1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 48 #define MLXSW_SP1_FWREV_MAJOR 13 49 #define MLXSW_SP1_FWREV_MINOR 2008 50 #define MLXSW_SP1_FWREV_SUBMINOR 2406 51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 52 53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 54 .major = MLXSW_SP1_FWREV_MAJOR, 55 .minor = MLXSW_SP1_FWREV_MINOR, 56 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 57 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 58 }; 59 60 #define MLXSW_SP1_FW_FILENAME \ 61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 64 65 #define MLXSW_SP2_FWREV_MAJOR 29 66 #define MLXSW_SP2_FWREV_MINOR 2008 67 #define MLXSW_SP2_FWREV_SUBMINOR 2406 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP2_FWREV_MINOR, 72 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 #define MLXSW_SP3_FWREV_MINOR 2008 82 #define MLXSW_SP3_FWREV_SUBMINOR 2406 83 84 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 85 .major = MLXSW_SP3_FWREV_MAJOR, 86 .minor = MLXSW_SP3_FWREV_MINOR, 87 .subminor = MLXSW_SP3_FWREV_SUBMINOR, 88 }; 89 90 #define MLXSW_SP3_FW_FILENAME \ 91 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 92 "." __stringify(MLXSW_SP3_FWREV_MINOR) \ 93 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" 94 95 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 96 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 97 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 98 99 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 100 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 101 }; 102 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 103 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 104 }; 105 106 /* tx_hdr_version 107 * Tx header version. 108 * Must be set to 1. 109 */ 110 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 111 112 /* tx_hdr_ctl 113 * Packet control type. 114 * 0 - Ethernet control (e.g. EMADs, LACP) 115 * 1 - Ethernet data 116 */ 117 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 118 119 /* tx_hdr_proto 120 * Packet protocol type. Must be set to 1 (Ethernet). 121 */ 122 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 123 124 /* tx_hdr_rx_is_router 125 * Packet is sent from the router. Valid for data packets only. 126 */ 127 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 128 129 /* tx_hdr_fid_valid 130 * Indicates if the 'fid' field is valid and should be used for 131 * forwarding lookup. Valid for data packets only. 132 */ 133 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 134 135 /* tx_hdr_swid 136 * Switch partition ID. Must be set to 0. 137 */ 138 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 139 140 /* tx_hdr_control_tclass 141 * Indicates if the packet should use the control TClass and not one 142 * of the data TClasses. 143 */ 144 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 145 146 /* tx_hdr_etclass 147 * Egress TClass to be used on the egress device on the egress port. 148 */ 149 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 150 151 /* tx_hdr_port_mid 152 * Destination local port for unicast packets. 153 * Destination multicast ID for multicast packets. 154 * 155 * Control packets are directed to a specific egress port, while data 156 * packets are transmitted through the CPU port (0) into the switch partition, 157 * where forwarding rules are applied. 158 */ 159 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 160 161 /* tx_hdr_fid 162 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 163 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 164 * Valid for data packets only. 165 */ 166 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 167 168 /* tx_hdr_type 169 * 0 - Data packets 170 * 6 - Control packets 171 */ 172 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 173 174 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 175 unsigned int counter_index, u64 *packets, 176 u64 *bytes) 177 { 178 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 179 int err; 180 181 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 182 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 184 if (err) 185 return err; 186 if (packets) 187 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 188 if (bytes) 189 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 190 return 0; 191 } 192 193 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 194 unsigned int counter_index) 195 { 196 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 197 198 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 199 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 200 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 201 } 202 203 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 204 unsigned int *p_counter_index) 205 { 206 int err; 207 208 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 209 p_counter_index); 210 if (err) 211 return err; 212 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 213 if (err) 214 goto err_counter_clear; 215 return 0; 216 217 err_counter_clear: 218 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 219 *p_counter_index); 220 return err; 221 } 222 223 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 224 unsigned int counter_index) 225 { 226 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 227 counter_index); 228 } 229 230 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 231 const struct mlxsw_tx_info *tx_info) 232 { 233 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 234 235 memset(txhdr, 0, MLXSW_TXHDR_LEN); 236 237 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 238 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 239 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 240 mlxsw_tx_hdr_swid_set(txhdr, 0); 241 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 242 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 243 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 244 } 245 246 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 247 { 248 switch (state) { 249 case BR_STATE_FORWARDING: 250 return MLXSW_REG_SPMS_STATE_FORWARDING; 251 case BR_STATE_LEARNING: 252 return MLXSW_REG_SPMS_STATE_LEARNING; 253 case BR_STATE_LISTENING: 254 case BR_STATE_DISABLED: 255 case BR_STATE_BLOCKING: 256 return MLXSW_REG_SPMS_STATE_DISCARDING; 257 default: 258 BUG(); 259 } 260 } 261 262 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 263 u8 state) 264 { 265 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 266 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 267 char *spms_pl; 268 int err; 269 270 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 271 if (!spms_pl) 272 return -ENOMEM; 273 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 274 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 275 276 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 277 kfree(spms_pl); 278 return err; 279 } 280 281 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 282 { 283 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 284 int err; 285 286 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 287 if (err) 288 return err; 289 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 290 return 0; 291 } 292 293 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 294 bool is_up) 295 { 296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 297 char paos_pl[MLXSW_REG_PAOS_LEN]; 298 299 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 300 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 301 MLXSW_PORT_ADMIN_STATUS_DOWN); 302 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 303 } 304 305 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 306 unsigned char *addr) 307 { 308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 309 char ppad_pl[MLXSW_REG_PPAD_LEN]; 310 311 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 312 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 313 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 314 } 315 316 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 317 { 318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 319 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 320 321 ether_addr_copy(addr, mlxsw_sp->base_mac); 322 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 323 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 324 } 325 326 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 327 { 328 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 329 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 330 int err; 331 332 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 333 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 334 if (err) 335 return err; 336 337 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 338 return 0; 339 } 340 341 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 342 { 343 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 344 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 345 346 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 347 if (mtu > mlxsw_sp_port->max_mtu) 348 return -EINVAL; 349 350 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 351 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 352 } 353 354 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 355 { 356 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 357 char pspa_pl[MLXSW_REG_PSPA_LEN]; 358 359 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 360 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 361 } 362 363 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 364 { 365 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 366 char svpe_pl[MLXSW_REG_SVPE_LEN]; 367 368 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 369 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 370 } 371 372 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 373 bool learn_enable) 374 { 375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 char *spvmlr_pl; 377 int err; 378 379 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 380 if (!spvmlr_pl) 381 return -ENOMEM; 382 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 383 learn_enable); 384 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 385 kfree(spvmlr_pl); 386 return err; 387 } 388 389 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 390 { 391 switch (ethtype) { 392 case ETH_P_8021Q: 393 *p_sver_type = 0; 394 break; 395 case ETH_P_8021AD: 396 *p_sver_type = 1; 397 break; 398 default: 399 return -EINVAL; 400 } 401 402 return 0; 403 } 404 405 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 406 u16 ethtype) 407 { 408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 409 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 410 u8 sver_type; 411 int err; 412 413 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 414 if (err) 415 return err; 416 417 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 419 } 420 421 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 422 u16 vid, u16 ethtype) 423 { 424 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 425 char spvid_pl[MLXSW_REG_SPVID_LEN]; 426 u8 sver_type; 427 int err; 428 429 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 430 if (err) 431 return err; 432 433 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 434 sver_type); 435 436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 437 } 438 439 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 440 bool allow) 441 { 442 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 443 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 444 445 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 446 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 447 } 448 449 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 450 u16 ethtype) 451 { 452 int err; 453 454 if (!vid) { 455 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 456 if (err) 457 return err; 458 } else { 459 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 460 if (err) 461 return err; 462 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 463 if (err) 464 goto err_port_allow_untagged_set; 465 } 466 467 mlxsw_sp_port->pvid = vid; 468 return 0; 469 470 err_port_allow_untagged_set: 471 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 472 return err; 473 } 474 475 static int 476 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 477 { 478 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 479 char sspr_pl[MLXSW_REG_SSPR_LEN]; 480 481 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 482 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 483 } 484 485 static int 486 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 487 struct mlxsw_sp_port_mapping *port_mapping) 488 { 489 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 490 bool separate_rxtx; 491 u8 module; 492 u8 width; 493 int err; 494 int i; 495 496 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 497 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 498 if (err) 499 return err; 500 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 501 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 502 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 503 504 if (width && !is_power_of_2(width)) { 505 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 506 local_port); 507 return -EINVAL; 508 } 509 510 for (i = 0; i < width; i++) { 511 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 512 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 513 local_port); 514 return -EINVAL; 515 } 516 if (separate_rxtx && 517 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 518 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 520 local_port); 521 return -EINVAL; 522 } 523 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 524 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 525 local_port); 526 return -EINVAL; 527 } 528 } 529 530 port_mapping->module = module; 531 port_mapping->width = width; 532 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 533 return 0; 534 } 535 536 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 537 { 538 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 540 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 541 int i; 542 543 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 544 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 545 for (i = 0; i < port_mapping->width; i++) { 546 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 547 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 548 } 549 550 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 551 } 552 553 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 554 { 555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 556 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 557 558 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 559 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 560 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 561 } 562 563 static int mlxsw_sp_port_open(struct net_device *dev) 564 { 565 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 566 int err; 567 568 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 569 if (err) 570 return err; 571 netif_start_queue(dev); 572 return 0; 573 } 574 575 static int mlxsw_sp_port_stop(struct net_device *dev) 576 { 577 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 578 579 netif_stop_queue(dev); 580 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 581 } 582 583 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 584 struct net_device *dev) 585 { 586 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 587 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 588 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 589 const struct mlxsw_tx_info tx_info = { 590 .local_port = mlxsw_sp_port->local_port, 591 .is_emad = false, 592 }; 593 u64 len; 594 int err; 595 596 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 597 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 598 dev_kfree_skb_any(skb); 599 return NETDEV_TX_OK; 600 } 601 602 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 603 604 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 605 return NETDEV_TX_BUSY; 606 607 if (eth_skb_pad(skb)) { 608 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 609 return NETDEV_TX_OK; 610 } 611 612 mlxsw_sp_txhdr_construct(skb, &tx_info); 613 /* TX header is consumed by HW on the way so we shouldn't count its 614 * bytes as being sent. 615 */ 616 len = skb->len - MLXSW_TXHDR_LEN; 617 618 /* Due to a race we might fail here because of a full queue. In that 619 * unlikely case we simply drop the packet. 620 */ 621 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 622 623 if (!err) { 624 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 625 u64_stats_update_begin(&pcpu_stats->syncp); 626 pcpu_stats->tx_packets++; 627 pcpu_stats->tx_bytes += len; 628 u64_stats_update_end(&pcpu_stats->syncp); 629 } else { 630 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 631 dev_kfree_skb_any(skb); 632 } 633 return NETDEV_TX_OK; 634 } 635 636 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 637 { 638 } 639 640 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 641 { 642 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 643 struct sockaddr *addr = p; 644 int err; 645 646 if (!is_valid_ether_addr(addr->sa_data)) 647 return -EADDRNOTAVAIL; 648 649 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 650 if (err) 651 return err; 652 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 653 return 0; 654 } 655 656 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 657 { 658 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 659 struct mlxsw_sp_hdroom orig_hdroom; 660 struct mlxsw_sp_hdroom hdroom; 661 int err; 662 663 orig_hdroom = *mlxsw_sp_port->hdroom; 664 665 hdroom = orig_hdroom; 666 hdroom.mtu = mtu; 667 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 668 669 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 670 if (err) { 671 netdev_err(dev, "Failed to configure port's headroom\n"); 672 return err; 673 } 674 675 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 676 if (err) 677 goto err_port_mtu_set; 678 dev->mtu = mtu; 679 return 0; 680 681 err_port_mtu_set: 682 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 683 return err; 684 } 685 686 static int 687 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 688 struct rtnl_link_stats64 *stats) 689 { 690 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 691 struct mlxsw_sp_port_pcpu_stats *p; 692 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 693 u32 tx_dropped = 0; 694 unsigned int start; 695 int i; 696 697 for_each_possible_cpu(i) { 698 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 699 do { 700 start = u64_stats_fetch_begin_irq(&p->syncp); 701 rx_packets = p->rx_packets; 702 rx_bytes = p->rx_bytes; 703 tx_packets = p->tx_packets; 704 tx_bytes = p->tx_bytes; 705 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 706 707 stats->rx_packets += rx_packets; 708 stats->rx_bytes += rx_bytes; 709 stats->tx_packets += tx_packets; 710 stats->tx_bytes += tx_bytes; 711 /* tx_dropped is u32, updated without syncp protection. */ 712 tx_dropped += p->tx_dropped; 713 } 714 stats->tx_dropped = tx_dropped; 715 return 0; 716 } 717 718 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 719 { 720 switch (attr_id) { 721 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 722 return true; 723 } 724 725 return false; 726 } 727 728 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 729 void *sp) 730 { 731 switch (attr_id) { 732 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 733 return mlxsw_sp_port_get_sw_stats64(dev, sp); 734 } 735 736 return -EINVAL; 737 } 738 739 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 740 int prio, char *ppcnt_pl) 741 { 742 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 744 745 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 746 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 747 } 748 749 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 750 struct rtnl_link_stats64 *stats) 751 { 752 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 753 int err; 754 755 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 756 0, ppcnt_pl); 757 if (err) 758 goto out; 759 760 stats->tx_packets = 761 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 762 stats->rx_packets = 763 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 764 stats->tx_bytes = 765 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 766 stats->rx_bytes = 767 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 768 stats->multicast = 769 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 770 771 stats->rx_crc_errors = 772 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 773 stats->rx_frame_errors = 774 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 775 776 stats->rx_length_errors = ( 777 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 778 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 779 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 780 781 stats->rx_errors = (stats->rx_crc_errors + 782 stats->rx_frame_errors + stats->rx_length_errors); 783 784 out: 785 return err; 786 } 787 788 static void 789 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 790 struct mlxsw_sp_port_xstats *xstats) 791 { 792 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 793 int err, i; 794 795 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 796 ppcnt_pl); 797 if (!err) 798 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 799 800 for (i = 0; i < TC_MAX_QUEUE; i++) { 801 err = mlxsw_sp_port_get_stats_raw(dev, 802 MLXSW_REG_PPCNT_TC_CONG_TC, 803 i, ppcnt_pl); 804 if (!err) 805 xstats->wred_drop[i] = 806 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 807 808 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 809 i, ppcnt_pl); 810 if (err) 811 continue; 812 813 xstats->backlog[i] = 814 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 815 xstats->tail_drop[i] = 816 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 817 } 818 819 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 820 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 821 i, ppcnt_pl); 822 if (err) 823 continue; 824 825 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 826 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 827 } 828 } 829 830 static void update_stats_cache(struct work_struct *work) 831 { 832 struct mlxsw_sp_port *mlxsw_sp_port = 833 container_of(work, struct mlxsw_sp_port, 834 periodic_hw_stats.update_dw.work); 835 836 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 837 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 838 * necessary when port goes down. 839 */ 840 goto out; 841 842 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 843 &mlxsw_sp_port->periodic_hw_stats.stats); 844 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 845 &mlxsw_sp_port->periodic_hw_stats.xstats); 846 847 out: 848 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 849 MLXSW_HW_STATS_UPDATE_TIME); 850 } 851 852 /* Return the stats from a cache that is updated periodically, 853 * as this function might get called in an atomic context. 854 */ 855 static void 856 mlxsw_sp_port_get_stats64(struct net_device *dev, 857 struct rtnl_link_stats64 *stats) 858 { 859 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 860 861 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 862 } 863 864 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 865 u16 vid_begin, u16 vid_end, 866 bool is_member, bool untagged) 867 { 868 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 869 char *spvm_pl; 870 int err; 871 872 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 873 if (!spvm_pl) 874 return -ENOMEM; 875 876 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 877 vid_end, is_member, untagged); 878 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 879 kfree(spvm_pl); 880 return err; 881 } 882 883 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 884 u16 vid_end, bool is_member, bool untagged) 885 { 886 u16 vid, vid_e; 887 int err; 888 889 for (vid = vid_begin; vid <= vid_end; 890 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 891 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 892 vid_end); 893 894 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 895 is_member, untagged); 896 if (err) 897 return err; 898 } 899 900 return 0; 901 } 902 903 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 904 bool flush_default) 905 { 906 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 907 908 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 909 &mlxsw_sp_port->vlans_list, list) { 910 if (!flush_default && 911 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 912 continue; 913 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 914 } 915 } 916 917 static void 918 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 919 { 920 if (mlxsw_sp_port_vlan->bridge_port) 921 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 922 else if (mlxsw_sp_port_vlan->fid) 923 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 924 } 925 926 struct mlxsw_sp_port_vlan * 927 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 928 { 929 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 930 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 931 int err; 932 933 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 934 if (mlxsw_sp_port_vlan) 935 return ERR_PTR(-EEXIST); 936 937 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 938 if (err) 939 return ERR_PTR(err); 940 941 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 942 if (!mlxsw_sp_port_vlan) { 943 err = -ENOMEM; 944 goto err_port_vlan_alloc; 945 } 946 947 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 948 mlxsw_sp_port_vlan->vid = vid; 949 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 950 951 return mlxsw_sp_port_vlan; 952 953 err_port_vlan_alloc: 954 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 955 return ERR_PTR(err); 956 } 957 958 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 959 { 960 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 961 u16 vid = mlxsw_sp_port_vlan->vid; 962 963 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 964 list_del(&mlxsw_sp_port_vlan->list); 965 kfree(mlxsw_sp_port_vlan); 966 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 967 } 968 969 static int mlxsw_sp_port_add_vid(struct net_device *dev, 970 __be16 __always_unused proto, u16 vid) 971 { 972 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 973 974 /* VLAN 0 is added to HW filter when device goes up, but it is 975 * reserved in our case, so simply return. 976 */ 977 if (!vid) 978 return 0; 979 980 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 981 } 982 983 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 984 __be16 __always_unused proto, u16 vid) 985 { 986 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 987 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 988 989 /* VLAN 0 is removed from HW filter when device goes down, but 990 * it is reserved in our case, so simply return. 991 */ 992 if (!vid) 993 return 0; 994 995 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 996 if (!mlxsw_sp_port_vlan) 997 return 0; 998 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 999 1000 return 0; 1001 } 1002 1003 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1004 struct flow_block_offload *f) 1005 { 1006 switch (f->binder_type) { 1007 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1008 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1009 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1010 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1011 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1012 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1013 default: 1014 return -EOPNOTSUPP; 1015 } 1016 } 1017 1018 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1019 void *type_data) 1020 { 1021 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1022 1023 switch (type) { 1024 case TC_SETUP_BLOCK: 1025 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1026 case TC_SETUP_QDISC_RED: 1027 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1028 case TC_SETUP_QDISC_PRIO: 1029 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1030 case TC_SETUP_QDISC_ETS: 1031 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1032 case TC_SETUP_QDISC_TBF: 1033 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1034 case TC_SETUP_QDISC_FIFO: 1035 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1036 default: 1037 return -EOPNOTSUPP; 1038 } 1039 } 1040 1041 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1042 { 1043 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1044 1045 if (!enable) { 1046 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1047 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1048 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1049 return -EINVAL; 1050 } 1051 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1052 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1053 } else { 1054 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1055 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1056 } 1057 return 0; 1058 } 1059 1060 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1061 { 1062 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1063 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1064 int err; 1065 1066 if (netif_running(dev)) 1067 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1068 1069 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1070 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1071 pplr_pl); 1072 1073 if (netif_running(dev)) 1074 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1075 1076 return err; 1077 } 1078 1079 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1080 1081 static int mlxsw_sp_handle_feature(struct net_device *dev, 1082 netdev_features_t wanted_features, 1083 netdev_features_t feature, 1084 mlxsw_sp_feature_handler feature_handler) 1085 { 1086 netdev_features_t changes = wanted_features ^ dev->features; 1087 bool enable = !!(wanted_features & feature); 1088 int err; 1089 1090 if (!(changes & feature)) 1091 return 0; 1092 1093 err = feature_handler(dev, enable); 1094 if (err) { 1095 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1096 enable ? "Enable" : "Disable", &feature, err); 1097 return err; 1098 } 1099 1100 if (enable) 1101 dev->features |= feature; 1102 else 1103 dev->features &= ~feature; 1104 1105 return 0; 1106 } 1107 static int mlxsw_sp_set_features(struct net_device *dev, 1108 netdev_features_t features) 1109 { 1110 netdev_features_t oper_features = dev->features; 1111 int err = 0; 1112 1113 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1114 mlxsw_sp_feature_hw_tc); 1115 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1116 mlxsw_sp_feature_loopback); 1117 1118 if (err) { 1119 dev->features = oper_features; 1120 return -EINVAL; 1121 } 1122 1123 return 0; 1124 } 1125 1126 static struct devlink_port * 1127 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1128 { 1129 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1130 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1131 1132 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1133 mlxsw_sp_port->local_port); 1134 } 1135 1136 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1137 struct ifreq *ifr) 1138 { 1139 struct hwtstamp_config config; 1140 int err; 1141 1142 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1143 return -EFAULT; 1144 1145 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1146 &config); 1147 if (err) 1148 return err; 1149 1150 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1151 return -EFAULT; 1152 1153 return 0; 1154 } 1155 1156 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1157 struct ifreq *ifr) 1158 { 1159 struct hwtstamp_config config; 1160 int err; 1161 1162 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1163 &config); 1164 if (err) 1165 return err; 1166 1167 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1168 return -EFAULT; 1169 1170 return 0; 1171 } 1172 1173 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1174 { 1175 struct hwtstamp_config config = {0}; 1176 1177 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1178 } 1179 1180 static int 1181 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1182 { 1183 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1184 1185 switch (cmd) { 1186 case SIOCSHWTSTAMP: 1187 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1188 case SIOCGHWTSTAMP: 1189 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1190 default: 1191 return -EOPNOTSUPP; 1192 } 1193 } 1194 1195 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1196 .ndo_open = mlxsw_sp_port_open, 1197 .ndo_stop = mlxsw_sp_port_stop, 1198 .ndo_start_xmit = mlxsw_sp_port_xmit, 1199 .ndo_setup_tc = mlxsw_sp_setup_tc, 1200 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1201 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1202 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1203 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1204 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1205 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1206 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1207 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1208 .ndo_set_features = mlxsw_sp_set_features, 1209 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1210 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1211 }; 1212 1213 static int 1214 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1215 { 1216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1217 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1218 const struct mlxsw_sp_port_type_speed_ops *ops; 1219 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1220 u32 eth_proto_cap_masked; 1221 int err; 1222 1223 ops = mlxsw_sp->port_type_speed_ops; 1224 1225 /* Set advertised speeds to speeds supported by both the driver 1226 * and the device. 1227 */ 1228 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1229 0, false); 1230 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1231 if (err) 1232 return err; 1233 1234 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1235 ð_proto_admin, ð_proto_oper); 1236 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1237 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1238 eth_proto_cap_masked, 1239 mlxsw_sp_port->link.autoneg); 1240 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1241 } 1242 1243 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1244 { 1245 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1247 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1248 u32 eth_proto_oper; 1249 int err; 1250 1251 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1252 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1253 mlxsw_sp_port->local_port, 0, 1254 false); 1255 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1256 if (err) 1257 return err; 1258 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1259 ð_proto_oper); 1260 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1261 return 0; 1262 } 1263 1264 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1265 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1266 bool dwrr, u8 dwrr_weight) 1267 { 1268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1269 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1270 1271 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1272 next_index); 1273 mlxsw_reg_qeec_de_set(qeec_pl, true); 1274 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1275 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1277 } 1278 1279 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1280 enum mlxsw_reg_qeec_hr hr, u8 index, 1281 u8 next_index, u32 maxrate, u8 burst_size) 1282 { 1283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1284 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1285 1286 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1287 next_index); 1288 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1289 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1290 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1292 } 1293 1294 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1295 enum mlxsw_reg_qeec_hr hr, u8 index, 1296 u8 next_index, u32 minrate) 1297 { 1298 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1299 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1300 1301 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1302 next_index); 1303 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1304 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1305 1306 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1307 } 1308 1309 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1310 u8 switch_prio, u8 tclass) 1311 { 1312 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1313 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1314 1315 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1316 tclass); 1317 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1318 } 1319 1320 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1321 { 1322 int err, i; 1323 1324 /* Setup the elements hierarcy, so that each TC is linked to 1325 * one subgroup, which are all member in the same group. 1326 */ 1327 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1328 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1329 if (err) 1330 return err; 1331 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1332 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1333 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1334 0, false, 0); 1335 if (err) 1336 return err; 1337 } 1338 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1339 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1340 MLXSW_REG_QEEC_HR_TC, i, i, 1341 false, 0); 1342 if (err) 1343 return err; 1344 1345 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1346 MLXSW_REG_QEEC_HR_TC, 1347 i + 8, i, 1348 true, 100); 1349 if (err) 1350 return err; 1351 } 1352 1353 /* Make sure the max shaper is disabled in all hierarchies that support 1354 * it. Note that this disables ptps (PTP shaper), but that is intended 1355 * for the initial configuration. 1356 */ 1357 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1358 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1359 MLXSW_REG_QEEC_MAS_DIS, 0); 1360 if (err) 1361 return err; 1362 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1363 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1364 MLXSW_REG_QEEC_HR_SUBGROUP, 1365 i, 0, 1366 MLXSW_REG_QEEC_MAS_DIS, 0); 1367 if (err) 1368 return err; 1369 } 1370 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1371 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1372 MLXSW_REG_QEEC_HR_TC, 1373 i, i, 1374 MLXSW_REG_QEEC_MAS_DIS, 0); 1375 if (err) 1376 return err; 1377 1378 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1379 MLXSW_REG_QEEC_HR_TC, 1380 i + 8, i, 1381 MLXSW_REG_QEEC_MAS_DIS, 0); 1382 if (err) 1383 return err; 1384 } 1385 1386 /* Configure the min shaper for multicast TCs. */ 1387 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1388 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1389 MLXSW_REG_QEEC_HR_TC, 1390 i + 8, i, 1391 MLXSW_REG_QEEC_MIS_MIN); 1392 if (err) 1393 return err; 1394 } 1395 1396 /* Map all priorities to traffic class 0. */ 1397 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1398 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1399 if (err) 1400 return err; 1401 } 1402 1403 return 0; 1404 } 1405 1406 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1407 bool enable) 1408 { 1409 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1410 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1411 1412 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1413 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1414 } 1415 1416 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1417 { 1418 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1419 u8 module = mlxsw_sp_port->mapping.module; 1420 u64 overheat_counter; 1421 int err; 1422 1423 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module, 1424 &overheat_counter); 1425 if (err) 1426 return err; 1427 1428 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1429 return 0; 1430 } 1431 1432 int 1433 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1434 bool is_8021ad_tagged, 1435 bool is_8021q_tagged) 1436 { 1437 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1438 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1439 1440 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1441 is_8021ad_tagged, is_8021q_tagged); 1442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1443 } 1444 1445 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1446 u8 split_base_local_port, 1447 struct mlxsw_sp_port_mapping *port_mapping) 1448 { 1449 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1450 bool split = !!split_base_local_port; 1451 struct mlxsw_sp_port *mlxsw_sp_port; 1452 u32 lanes = port_mapping->width; 1453 struct net_device *dev; 1454 bool splittable; 1455 int err; 1456 1457 splittable = lanes > 1 && !split; 1458 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 1459 port_mapping->module + 1, split, 1460 port_mapping->lane / lanes, 1461 splittable, lanes, 1462 mlxsw_sp->base_mac, 1463 sizeof(mlxsw_sp->base_mac)); 1464 if (err) { 1465 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1466 local_port); 1467 return err; 1468 } 1469 1470 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1471 if (!dev) { 1472 err = -ENOMEM; 1473 goto err_alloc_etherdev; 1474 } 1475 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1476 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1477 mlxsw_sp_port = netdev_priv(dev); 1478 mlxsw_sp_port->dev = dev; 1479 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1480 mlxsw_sp_port->local_port = local_port; 1481 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1482 mlxsw_sp_port->split = split; 1483 mlxsw_sp_port->split_base_local_port = split_base_local_port; 1484 mlxsw_sp_port->mapping = *port_mapping; 1485 mlxsw_sp_port->link.autoneg = 1; 1486 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1487 1488 mlxsw_sp_port->pcpu_stats = 1489 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1490 if (!mlxsw_sp_port->pcpu_stats) { 1491 err = -ENOMEM; 1492 goto err_alloc_stats; 1493 } 1494 1495 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1496 &update_stats_cache); 1497 1498 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1499 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1500 1501 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 1502 if (err) { 1503 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1504 mlxsw_sp_port->local_port); 1505 goto err_port_module_map; 1506 } 1507 1508 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1509 if (err) { 1510 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1511 mlxsw_sp_port->local_port); 1512 goto err_port_swid_set; 1513 } 1514 1515 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1516 if (err) { 1517 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1518 mlxsw_sp_port->local_port); 1519 goto err_dev_addr_init; 1520 } 1521 1522 netif_carrier_off(dev); 1523 1524 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1525 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1526 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1527 1528 dev->min_mtu = 0; 1529 dev->max_mtu = ETH_MAX_MTU; 1530 1531 /* Each packet needs to have a Tx header (metadata) on top all other 1532 * headers. 1533 */ 1534 dev->needed_headroom = MLXSW_TXHDR_LEN; 1535 1536 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1537 if (err) { 1538 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1539 mlxsw_sp_port->local_port); 1540 goto err_port_system_port_mapping_set; 1541 } 1542 1543 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1544 if (err) { 1545 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1546 mlxsw_sp_port->local_port); 1547 goto err_port_speed_by_width_set; 1548 } 1549 1550 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1551 &mlxsw_sp_port->max_speed); 1552 if (err) { 1553 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1554 mlxsw_sp_port->local_port); 1555 goto err_max_speed_get; 1556 } 1557 1558 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1559 if (err) { 1560 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1561 mlxsw_sp_port->local_port); 1562 goto err_port_max_mtu_get; 1563 } 1564 1565 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1566 if (err) { 1567 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1568 mlxsw_sp_port->local_port); 1569 goto err_port_mtu_set; 1570 } 1571 1572 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1573 if (err) 1574 goto err_port_admin_status_set; 1575 1576 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1577 if (err) { 1578 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1579 mlxsw_sp_port->local_port); 1580 goto err_port_buffers_init; 1581 } 1582 1583 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1584 if (err) { 1585 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1586 mlxsw_sp_port->local_port); 1587 goto err_port_ets_init; 1588 } 1589 1590 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1591 if (err) { 1592 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1593 mlxsw_sp_port->local_port); 1594 goto err_port_tc_mc_mode; 1595 } 1596 1597 /* ETS and buffers must be initialized before DCB. */ 1598 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1599 if (err) { 1600 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1601 mlxsw_sp_port->local_port); 1602 goto err_port_dcb_init; 1603 } 1604 1605 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1606 if (err) { 1607 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1608 mlxsw_sp_port->local_port); 1609 goto err_port_fids_init; 1610 } 1611 1612 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1613 if (err) { 1614 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1615 mlxsw_sp_port->local_port); 1616 goto err_port_qdiscs_init; 1617 } 1618 1619 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1620 false); 1621 if (err) { 1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1623 mlxsw_sp_port->local_port); 1624 goto err_port_vlan_clear; 1625 } 1626 1627 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1628 if (err) { 1629 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1630 mlxsw_sp_port->local_port); 1631 goto err_port_nve_init; 1632 } 1633 1634 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1635 ETH_P_8021Q); 1636 if (err) { 1637 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1638 mlxsw_sp_port->local_port); 1639 goto err_port_pvid_set; 1640 } 1641 1642 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1643 MLXSW_SP_DEFAULT_VID); 1644 if (IS_ERR(mlxsw_sp_port_vlan)) { 1645 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1646 mlxsw_sp_port->local_port); 1647 err = PTR_ERR(mlxsw_sp_port_vlan); 1648 goto err_port_vlan_create; 1649 } 1650 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1651 1652 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1653 * only packets with 802.1q header as tagged packets. 1654 */ 1655 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1656 if (err) { 1657 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1658 local_port); 1659 goto err_port_vlan_classification_set; 1660 } 1661 1662 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1663 mlxsw_sp->ptp_ops->shaper_work); 1664 1665 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1666 1667 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1668 if (err) { 1669 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1670 mlxsw_sp_port->local_port); 1671 goto err_port_overheat_init_val_set; 1672 } 1673 1674 err = register_netdev(dev); 1675 if (err) { 1676 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1677 mlxsw_sp_port->local_port); 1678 goto err_register_netdev; 1679 } 1680 1681 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1682 mlxsw_sp_port, dev); 1683 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1684 return 0; 1685 1686 err_register_netdev: 1687 err_port_overheat_init_val_set: 1688 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1689 err_port_vlan_classification_set: 1690 mlxsw_sp->ports[local_port] = NULL; 1691 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1692 err_port_vlan_create: 1693 err_port_pvid_set: 1694 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1695 err_port_nve_init: 1696 err_port_vlan_clear: 1697 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1698 err_port_qdiscs_init: 1699 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1700 err_port_fids_init: 1701 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1702 err_port_dcb_init: 1703 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1704 err_port_tc_mc_mode: 1705 err_port_ets_init: 1706 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1707 err_port_buffers_init: 1708 err_port_admin_status_set: 1709 err_port_mtu_set: 1710 err_port_max_mtu_get: 1711 err_max_speed_get: 1712 err_port_speed_by_width_set: 1713 err_port_system_port_mapping_set: 1714 err_dev_addr_init: 1715 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1716 err_port_swid_set: 1717 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 1718 err_port_module_map: 1719 free_percpu(mlxsw_sp_port->pcpu_stats); 1720 err_alloc_stats: 1721 free_netdev(dev); 1722 err_alloc_etherdev: 1723 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1724 return err; 1725 } 1726 1727 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1728 { 1729 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1730 1731 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1732 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1733 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1734 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1735 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1736 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1737 mlxsw_sp->ports[local_port] = NULL; 1738 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1739 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1740 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1741 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1742 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1743 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1744 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1745 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1746 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 1747 free_percpu(mlxsw_sp_port->pcpu_stats); 1748 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1749 free_netdev(mlxsw_sp_port->dev); 1750 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1751 } 1752 1753 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1754 { 1755 struct mlxsw_sp_port *mlxsw_sp_port; 1756 int err; 1757 1758 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1759 if (!mlxsw_sp_port) 1760 return -ENOMEM; 1761 1762 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1763 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1764 1765 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1766 mlxsw_sp_port, 1767 mlxsw_sp->base_mac, 1768 sizeof(mlxsw_sp->base_mac)); 1769 if (err) { 1770 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1771 goto err_core_cpu_port_init; 1772 } 1773 1774 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1775 return 0; 1776 1777 err_core_cpu_port_init: 1778 kfree(mlxsw_sp_port); 1779 return err; 1780 } 1781 1782 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1783 { 1784 struct mlxsw_sp_port *mlxsw_sp_port = 1785 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1786 1787 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1788 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1789 kfree(mlxsw_sp_port); 1790 } 1791 1792 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1793 { 1794 return mlxsw_sp->ports[local_port] != NULL; 1795 } 1796 1797 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1798 { 1799 int i; 1800 1801 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1802 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1803 mlxsw_sp_port_remove(mlxsw_sp, i); 1804 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1805 kfree(mlxsw_sp->ports); 1806 mlxsw_sp->ports = NULL; 1807 } 1808 1809 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1810 { 1811 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1812 struct mlxsw_sp_port_mapping *port_mapping; 1813 size_t alloc_size; 1814 int i; 1815 int err; 1816 1817 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 1818 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1819 if (!mlxsw_sp->ports) 1820 return -ENOMEM; 1821 1822 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 1823 if (err) 1824 goto err_cpu_port_create; 1825 1826 for (i = 1; i < max_ports; i++) { 1827 port_mapping = mlxsw_sp->port_mapping[i]; 1828 if (!port_mapping) 1829 continue; 1830 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 1831 if (err) 1832 goto err_port_create; 1833 } 1834 return 0; 1835 1836 err_port_create: 1837 for (i--; i >= 1; i--) 1838 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1839 mlxsw_sp_port_remove(mlxsw_sp, i); 1840 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1841 err_cpu_port_create: 1842 kfree(mlxsw_sp->ports); 1843 mlxsw_sp->ports = NULL; 1844 return err; 1845 } 1846 1847 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 1848 { 1849 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1850 struct mlxsw_sp_port_mapping port_mapping; 1851 int i; 1852 int err; 1853 1854 mlxsw_sp->port_mapping = kcalloc(max_ports, 1855 sizeof(struct mlxsw_sp_port_mapping *), 1856 GFP_KERNEL); 1857 if (!mlxsw_sp->port_mapping) 1858 return -ENOMEM; 1859 1860 for (i = 1; i < max_ports; i++) { 1861 if (mlxsw_core_port_is_xm(mlxsw_sp->core, i)) 1862 continue; 1863 1864 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 1865 if (err) 1866 goto err_port_module_info_get; 1867 if (!port_mapping.width) 1868 continue; 1869 1870 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 1871 sizeof(port_mapping), 1872 GFP_KERNEL); 1873 if (!mlxsw_sp->port_mapping[i]) { 1874 err = -ENOMEM; 1875 goto err_port_module_info_dup; 1876 } 1877 } 1878 return 0; 1879 1880 err_port_module_info_get: 1881 err_port_module_info_dup: 1882 for (i--; i >= 1; i--) 1883 kfree(mlxsw_sp->port_mapping[i]); 1884 kfree(mlxsw_sp->port_mapping); 1885 return err; 1886 } 1887 1888 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 1889 { 1890 int i; 1891 1892 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1893 kfree(mlxsw_sp->port_mapping[i]); 1894 kfree(mlxsw_sp->port_mapping); 1895 } 1896 1897 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 1898 { 1899 u8 offset = (local_port - 1) % max_width; 1900 1901 return local_port - offset; 1902 } 1903 1904 static int 1905 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 1906 struct mlxsw_sp_port_mapping *port_mapping, 1907 unsigned int count, u8 offset) 1908 { 1909 struct mlxsw_sp_port_mapping split_port_mapping; 1910 int err, i; 1911 1912 split_port_mapping = *port_mapping; 1913 split_port_mapping.width /= count; 1914 for (i = 0; i < count; i++) { 1915 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 1916 base_port, &split_port_mapping); 1917 if (err) 1918 goto err_port_create; 1919 split_port_mapping.lane += split_port_mapping.width; 1920 } 1921 1922 return 0; 1923 1924 err_port_create: 1925 for (i--; i >= 0; i--) 1926 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 1927 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 1928 return err; 1929 } 1930 1931 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 1932 u8 base_port, 1933 unsigned int count, u8 offset) 1934 { 1935 struct mlxsw_sp_port_mapping *port_mapping; 1936 int i; 1937 1938 /* Go over original unsplit ports in the gap and recreate them. */ 1939 for (i = 0; i < count * offset; i++) { 1940 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 1941 if (!port_mapping) 1942 continue; 1943 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 1944 } 1945 } 1946 1947 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 1948 unsigned int count, 1949 unsigned int max_width) 1950 { 1951 enum mlxsw_res_id local_ports_in_x_res_id; 1952 int split_width = max_width / count; 1953 1954 if (split_width == 1) 1955 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 1956 else if (split_width == 2) 1957 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 1958 else if (split_width == 4) 1959 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 1960 else 1961 return -EINVAL; 1962 1963 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 1964 return -EINVAL; 1965 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 1966 } 1967 1968 static struct mlxsw_sp_port * 1969 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1970 { 1971 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 1972 return mlxsw_sp->ports[local_port]; 1973 return NULL; 1974 } 1975 1976 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 1977 unsigned int count, 1978 struct netlink_ext_ack *extack) 1979 { 1980 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 1981 struct mlxsw_sp_port_mapping port_mapping; 1982 struct mlxsw_sp_port *mlxsw_sp_port; 1983 int max_width; 1984 u8 base_port; 1985 int offset; 1986 int i; 1987 int err; 1988 1989 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 1990 if (!mlxsw_sp_port) { 1991 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 1992 local_port); 1993 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 1994 return -EINVAL; 1995 } 1996 1997 max_width = mlxsw_core_module_max_width(mlxsw_core, 1998 mlxsw_sp_port->mapping.module); 1999 if (max_width < 0) { 2000 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2001 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2002 return max_width; 2003 } 2004 2005 /* Split port with non-max cannot be split. */ 2006 if (mlxsw_sp_port->mapping.width != max_width) { 2007 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 2008 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 2009 return -EINVAL; 2010 } 2011 2012 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2013 if (offset < 0) { 2014 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2015 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2016 return -EINVAL; 2017 } 2018 2019 /* Only in case max split is being done, the local port and 2020 * base port may differ. 2021 */ 2022 base_port = count == max_width ? 2023 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 2024 local_port; 2025 2026 for (i = 0; i < count * offset; i++) { 2027 /* Expect base port to exist and also the one in the middle in 2028 * case of maximal split count. 2029 */ 2030 if (i == 0 || (count == max_width && i == count / 2)) 2031 continue; 2032 2033 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 2034 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2035 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 2036 return -EINVAL; 2037 } 2038 } 2039 2040 port_mapping = mlxsw_sp_port->mapping; 2041 2042 for (i = 0; i < count; i++) 2043 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2044 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2045 2046 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 2047 count, offset); 2048 if (err) { 2049 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2050 goto err_port_split_create; 2051 } 2052 2053 return 0; 2054 2055 err_port_split_create: 2056 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2057 return err; 2058 } 2059 2060 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 2061 struct netlink_ext_ack *extack) 2062 { 2063 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2064 struct mlxsw_sp_port *mlxsw_sp_port; 2065 unsigned int count; 2066 int max_width; 2067 u8 base_port; 2068 int offset; 2069 int i; 2070 2071 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2072 if (!mlxsw_sp_port) { 2073 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2074 local_port); 2075 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2076 return -EINVAL; 2077 } 2078 2079 if (!mlxsw_sp_port->split) { 2080 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 2081 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2082 return -EINVAL; 2083 } 2084 2085 max_width = mlxsw_core_module_max_width(mlxsw_core, 2086 mlxsw_sp_port->mapping.module); 2087 if (max_width < 0) { 2088 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2089 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2090 return max_width; 2091 } 2092 2093 count = max_width / mlxsw_sp_port->mapping.width; 2094 2095 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2096 if (WARN_ON(offset < 0)) { 2097 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2098 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2099 return -EINVAL; 2100 } 2101 2102 base_port = mlxsw_sp_port->split_base_local_port; 2103 2104 for (i = 0; i < count; i++) 2105 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2106 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2107 2108 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2109 2110 return 0; 2111 } 2112 2113 static void 2114 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2115 { 2116 int i; 2117 2118 for (i = 0; i < TC_MAX_QUEUE; i++) 2119 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2120 } 2121 2122 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2123 char *pude_pl, void *priv) 2124 { 2125 struct mlxsw_sp *mlxsw_sp = priv; 2126 struct mlxsw_sp_port *mlxsw_sp_port; 2127 enum mlxsw_reg_pude_oper_status status; 2128 unsigned int max_ports; 2129 u8 local_port; 2130 2131 max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2132 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2133 2134 if (WARN_ON_ONCE(local_port >= max_ports)) 2135 return; 2136 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2137 if (!mlxsw_sp_port) 2138 return; 2139 2140 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2141 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2142 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2143 netif_carrier_on(mlxsw_sp_port->dev); 2144 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2145 } else { 2146 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2147 netif_carrier_off(mlxsw_sp_port->dev); 2148 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2149 } 2150 } 2151 2152 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2153 char *mtpptr_pl, bool ingress) 2154 { 2155 u8 local_port; 2156 u8 num_rec; 2157 int i; 2158 2159 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2160 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2161 for (i = 0; i < num_rec; i++) { 2162 u8 domain_number; 2163 u8 message_type; 2164 u16 sequence_id; 2165 u64 timestamp; 2166 2167 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2168 &domain_number, &sequence_id, 2169 ×tamp); 2170 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2171 message_type, domain_number, 2172 sequence_id, timestamp); 2173 } 2174 } 2175 2176 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2177 char *mtpptr_pl, void *priv) 2178 { 2179 struct mlxsw_sp *mlxsw_sp = priv; 2180 2181 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2182 } 2183 2184 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2185 char *mtpptr_pl, void *priv) 2186 { 2187 struct mlxsw_sp *mlxsw_sp = priv; 2188 2189 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2190 } 2191 2192 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2193 u8 local_port, void *priv) 2194 { 2195 struct mlxsw_sp *mlxsw_sp = priv; 2196 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2197 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2198 2199 if (unlikely(!mlxsw_sp_port)) { 2200 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2201 local_port); 2202 return; 2203 } 2204 2205 skb->dev = mlxsw_sp_port->dev; 2206 2207 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2208 u64_stats_update_begin(&pcpu_stats->syncp); 2209 pcpu_stats->rx_packets++; 2210 pcpu_stats->rx_bytes += skb->len; 2211 u64_stats_update_end(&pcpu_stats->syncp); 2212 2213 skb->protocol = eth_type_trans(skb, skb->dev); 2214 netif_receive_skb(skb); 2215 } 2216 2217 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 2218 void *priv) 2219 { 2220 skb->offload_fwd_mark = 1; 2221 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2222 } 2223 2224 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2225 u8 local_port, void *priv) 2226 { 2227 skb->offload_l3_fwd_mark = 1; 2228 skb->offload_fwd_mark = 1; 2229 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2230 } 2231 2232 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2233 u8 local_port) 2234 { 2235 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2236 } 2237 2238 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2239 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2240 _is_ctrl, SP_##_trap_group, DISCARD) 2241 2242 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2243 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2244 _is_ctrl, SP_##_trap_group, DISCARD) 2245 2246 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2247 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2248 _is_ctrl, SP_##_trap_group, DISCARD) 2249 2250 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2251 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2252 2253 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2254 /* Events */ 2255 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2256 /* L2 traps */ 2257 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2258 /* L3 traps */ 2259 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2260 false), 2261 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2262 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2263 false), 2264 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2265 ROUTER_EXP, false), 2266 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2267 ROUTER_EXP, false), 2268 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2269 ROUTER_EXP, false), 2270 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2271 ROUTER_EXP, false), 2272 /* Multicast Router Traps */ 2273 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2274 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2275 /* NVE traps */ 2276 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2277 }; 2278 2279 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2280 /* Events */ 2281 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2282 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2283 }; 2284 2285 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2286 { 2287 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2288 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2289 enum mlxsw_reg_qpcr_ir_units ir_units; 2290 int max_cpu_policers; 2291 bool is_bytes; 2292 u8 burst_size; 2293 u32 rate; 2294 int i, err; 2295 2296 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2297 return -EIO; 2298 2299 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2300 2301 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2302 for (i = 0; i < max_cpu_policers; i++) { 2303 is_bytes = false; 2304 switch (i) { 2305 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2306 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2307 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2308 rate = 1024; 2309 burst_size = 7; 2310 break; 2311 default: 2312 continue; 2313 } 2314 2315 __set_bit(i, mlxsw_sp->trap->policers_usage); 2316 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2317 burst_size); 2318 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2319 if (err) 2320 return err; 2321 } 2322 2323 return 0; 2324 } 2325 2326 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2327 { 2328 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2329 enum mlxsw_reg_htgt_trap_group i; 2330 int max_cpu_policers; 2331 int max_trap_groups; 2332 u8 priority, tc; 2333 u16 policer_id; 2334 int err; 2335 2336 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2337 return -EIO; 2338 2339 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2340 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2341 2342 for (i = 0; i < max_trap_groups; i++) { 2343 policer_id = i; 2344 switch (i) { 2345 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2346 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2347 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2348 priority = 1; 2349 tc = 1; 2350 break; 2351 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2352 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2353 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2354 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2355 break; 2356 default: 2357 continue; 2358 } 2359 2360 if (max_cpu_policers <= policer_id && 2361 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2362 return -EIO; 2363 2364 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2365 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2366 if (err) 2367 return err; 2368 } 2369 2370 return 0; 2371 } 2372 2373 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 2374 const struct mlxsw_listener listeners[], 2375 size_t listeners_count) 2376 { 2377 int i; 2378 int err; 2379 2380 for (i = 0; i < listeners_count; i++) { 2381 err = mlxsw_core_trap_register(mlxsw_sp->core, 2382 &listeners[i], 2383 mlxsw_sp); 2384 if (err) 2385 goto err_listener_register; 2386 2387 } 2388 return 0; 2389 2390 err_listener_register: 2391 for (i--; i >= 0; i--) { 2392 mlxsw_core_trap_unregister(mlxsw_sp->core, 2393 &listeners[i], 2394 mlxsw_sp); 2395 } 2396 return err; 2397 } 2398 2399 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 2400 const struct mlxsw_listener listeners[], 2401 size_t listeners_count) 2402 { 2403 int i; 2404 2405 for (i = 0; i < listeners_count; i++) { 2406 mlxsw_core_trap_unregister(mlxsw_sp->core, 2407 &listeners[i], 2408 mlxsw_sp); 2409 } 2410 } 2411 2412 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2413 { 2414 struct mlxsw_sp_trap *trap; 2415 u64 max_policers; 2416 int err; 2417 2418 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2419 return -EIO; 2420 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2421 trap = kzalloc(struct_size(trap, policers_usage, 2422 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2423 if (!trap) 2424 return -ENOMEM; 2425 trap->max_policers = max_policers; 2426 mlxsw_sp->trap = trap; 2427 2428 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2429 if (err) 2430 goto err_cpu_policers_set; 2431 2432 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2433 if (err) 2434 goto err_trap_groups_set; 2435 2436 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 2437 ARRAY_SIZE(mlxsw_sp_listener)); 2438 if (err) 2439 goto err_traps_register; 2440 2441 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 2442 mlxsw_sp->listeners_count); 2443 if (err) 2444 goto err_extra_traps_init; 2445 2446 return 0; 2447 2448 err_extra_traps_init: 2449 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2450 ARRAY_SIZE(mlxsw_sp_listener)); 2451 err_traps_register: 2452 err_trap_groups_set: 2453 err_cpu_policers_set: 2454 kfree(trap); 2455 return err; 2456 } 2457 2458 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2459 { 2460 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 2461 mlxsw_sp->listeners_count); 2462 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2463 ARRAY_SIZE(mlxsw_sp_listener)); 2464 kfree(mlxsw_sp->trap); 2465 } 2466 2467 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2468 2469 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2470 { 2471 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2472 u32 seed; 2473 int err; 2474 2475 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2476 MLXSW_SP_LAG_SEED_INIT); 2477 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2478 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2479 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2480 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2481 MLXSW_REG_SLCR_LAG_HASH_SIP | 2482 MLXSW_REG_SLCR_LAG_HASH_DIP | 2483 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2484 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2485 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2486 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2487 if (err) 2488 return err; 2489 2490 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2491 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2492 return -EIO; 2493 2494 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2495 sizeof(struct mlxsw_sp_upper), 2496 GFP_KERNEL); 2497 if (!mlxsw_sp->lags) 2498 return -ENOMEM; 2499 2500 return 0; 2501 } 2502 2503 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2504 { 2505 kfree(mlxsw_sp->lags); 2506 } 2507 2508 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 2509 { 2510 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2511 int err; 2512 2513 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 2514 MLXSW_REG_HTGT_INVALID_POLICER, 2515 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2516 MLXSW_REG_HTGT_DEFAULT_TC); 2517 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2518 if (err) 2519 return err; 2520 2521 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE, 2522 MLXSW_REG_HTGT_INVALID_POLICER, 2523 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2524 MLXSW_REG_HTGT_DEFAULT_TC); 2525 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2526 if (err) 2527 return err; 2528 2529 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE, 2530 MLXSW_REG_HTGT_INVALID_POLICER, 2531 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2532 MLXSW_REG_HTGT_DEFAULT_TC); 2533 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2534 if (err) 2535 return err; 2536 2537 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE, 2538 MLXSW_REG_HTGT_INVALID_POLICER, 2539 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2540 MLXSW_REG_HTGT_DEFAULT_TC); 2541 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2542 } 2543 2544 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2545 .clock_init = mlxsw_sp1_ptp_clock_init, 2546 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2547 .init = mlxsw_sp1_ptp_init, 2548 .fini = mlxsw_sp1_ptp_fini, 2549 .receive = mlxsw_sp1_ptp_receive, 2550 .transmitted = mlxsw_sp1_ptp_transmitted, 2551 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2552 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2553 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2554 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2555 .get_stats_count = mlxsw_sp1_get_stats_count, 2556 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2557 .get_stats = mlxsw_sp1_get_stats, 2558 }; 2559 2560 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2561 .clock_init = mlxsw_sp2_ptp_clock_init, 2562 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2563 .init = mlxsw_sp2_ptp_init, 2564 .fini = mlxsw_sp2_ptp_fini, 2565 .receive = mlxsw_sp2_ptp_receive, 2566 .transmitted = mlxsw_sp2_ptp_transmitted, 2567 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2568 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2569 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2570 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2571 .get_stats_count = mlxsw_sp2_get_stats_count, 2572 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2573 .get_stats = mlxsw_sp2_get_stats, 2574 }; 2575 2576 struct mlxsw_sp_sample_trigger_node { 2577 struct mlxsw_sp_sample_trigger trigger; 2578 struct mlxsw_sp_sample_params params; 2579 struct rhash_head ht_node; 2580 struct rcu_head rcu; 2581 refcount_t refcount; 2582 }; 2583 2584 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2585 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2586 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2587 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2588 .automatic_shrinking = true, 2589 }; 2590 2591 static void 2592 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2593 const struct mlxsw_sp_sample_trigger *trigger) 2594 { 2595 memset(key, 0, sizeof(*key)); 2596 key->type = trigger->type; 2597 key->local_port = trigger->local_port; 2598 } 2599 2600 /* RCU read lock must be held */ 2601 struct mlxsw_sp_sample_params * 2602 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2603 const struct mlxsw_sp_sample_trigger *trigger) 2604 { 2605 struct mlxsw_sp_sample_trigger_node *trigger_node; 2606 struct mlxsw_sp_sample_trigger key; 2607 2608 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2609 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2610 mlxsw_sp_sample_trigger_ht_params); 2611 if (!trigger_node) 2612 return NULL; 2613 2614 return &trigger_node->params; 2615 } 2616 2617 static int 2618 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2619 const struct mlxsw_sp_sample_trigger *trigger, 2620 const struct mlxsw_sp_sample_params *params) 2621 { 2622 struct mlxsw_sp_sample_trigger_node *trigger_node; 2623 int err; 2624 2625 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2626 if (!trigger_node) 2627 return -ENOMEM; 2628 2629 trigger_node->trigger = *trigger; 2630 trigger_node->params = *params; 2631 refcount_set(&trigger_node->refcount, 1); 2632 2633 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2634 &trigger_node->ht_node, 2635 mlxsw_sp_sample_trigger_ht_params); 2636 if (err) 2637 goto err_rhashtable_insert; 2638 2639 return 0; 2640 2641 err_rhashtable_insert: 2642 kfree(trigger_node); 2643 return err; 2644 } 2645 2646 static void 2647 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2648 struct mlxsw_sp_sample_trigger_node *trigger_node) 2649 { 2650 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2651 &trigger_node->ht_node, 2652 mlxsw_sp_sample_trigger_ht_params); 2653 kfree_rcu(trigger_node, rcu); 2654 } 2655 2656 int 2657 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2658 const struct mlxsw_sp_sample_trigger *trigger, 2659 const struct mlxsw_sp_sample_params *params, 2660 struct netlink_ext_ack *extack) 2661 { 2662 struct mlxsw_sp_sample_trigger_node *trigger_node; 2663 struct mlxsw_sp_sample_trigger key; 2664 2665 ASSERT_RTNL(); 2666 2667 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2668 2669 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2670 &key, 2671 mlxsw_sp_sample_trigger_ht_params); 2672 if (!trigger_node) 2673 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2674 params); 2675 2676 if (trigger_node->trigger.local_port) { 2677 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2678 return -EINVAL; 2679 } 2680 2681 if (trigger_node->params.psample_group != params->psample_group || 2682 trigger_node->params.truncate != params->truncate || 2683 trigger_node->params.rate != params->rate || 2684 trigger_node->params.trunc_size != params->trunc_size) { 2685 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2686 return -EINVAL; 2687 } 2688 2689 refcount_inc(&trigger_node->refcount); 2690 2691 return 0; 2692 } 2693 2694 void 2695 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2696 const struct mlxsw_sp_sample_trigger *trigger) 2697 { 2698 struct mlxsw_sp_sample_trigger_node *trigger_node; 2699 struct mlxsw_sp_sample_trigger key; 2700 2701 ASSERT_RTNL(); 2702 2703 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2704 2705 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2706 &key, 2707 mlxsw_sp_sample_trigger_ht_params); 2708 if (!trigger_node) 2709 return; 2710 2711 if (!refcount_dec_and_test(&trigger_node->refcount)) 2712 return; 2713 2714 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2715 } 2716 2717 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2718 unsigned long event, void *ptr); 2719 2720 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2721 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2722 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2723 2724 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2725 { 2726 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2727 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2728 mutex_init(&mlxsw_sp->parsing.lock); 2729 } 2730 2731 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2732 { 2733 mutex_destroy(&mlxsw_sp->parsing.lock); 2734 } 2735 2736 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2737 const struct mlxsw_bus_info *mlxsw_bus_info, 2738 struct netlink_ext_ack *extack) 2739 { 2740 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2741 int err; 2742 2743 mlxsw_sp->core = mlxsw_core; 2744 mlxsw_sp->bus_info = mlxsw_bus_info; 2745 2746 mlxsw_sp_parsing_init(mlxsw_sp); 2747 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 2748 2749 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2750 if (err) { 2751 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2752 return err; 2753 } 2754 2755 err = mlxsw_sp_kvdl_init(mlxsw_sp); 2756 if (err) { 2757 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 2758 return err; 2759 } 2760 2761 err = mlxsw_sp_fids_init(mlxsw_sp); 2762 if (err) { 2763 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 2764 goto err_fids_init; 2765 } 2766 2767 err = mlxsw_sp_policers_init(mlxsw_sp); 2768 if (err) { 2769 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 2770 goto err_policers_init; 2771 } 2772 2773 err = mlxsw_sp_traps_init(mlxsw_sp); 2774 if (err) { 2775 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 2776 goto err_traps_init; 2777 } 2778 2779 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 2780 if (err) { 2781 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 2782 goto err_devlink_traps_init; 2783 } 2784 2785 err = mlxsw_sp_buffers_init(mlxsw_sp); 2786 if (err) { 2787 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2788 goto err_buffers_init; 2789 } 2790 2791 err = mlxsw_sp_lag_init(mlxsw_sp); 2792 if (err) { 2793 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2794 goto err_lag_init; 2795 } 2796 2797 /* Initialize SPAN before router and switchdev, so that those components 2798 * can call mlxsw_sp_span_respin(). 2799 */ 2800 err = mlxsw_sp_span_init(mlxsw_sp); 2801 if (err) { 2802 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2803 goto err_span_init; 2804 } 2805 2806 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2807 if (err) { 2808 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2809 goto err_switchdev_init; 2810 } 2811 2812 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 2813 if (err) { 2814 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 2815 goto err_counter_pool_init; 2816 } 2817 2818 err = mlxsw_sp_afa_init(mlxsw_sp); 2819 if (err) { 2820 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 2821 goto err_afa_init; 2822 } 2823 2824 err = mlxsw_sp_nve_init(mlxsw_sp); 2825 if (err) { 2826 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 2827 goto err_nve_init; 2828 } 2829 2830 err = mlxsw_sp_acl_init(mlxsw_sp); 2831 if (err) { 2832 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 2833 goto err_acl_init; 2834 } 2835 2836 err = mlxsw_sp_router_init(mlxsw_sp, extack); 2837 if (err) { 2838 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2839 goto err_router_init; 2840 } 2841 2842 if (mlxsw_sp->bus_info->read_frc_capable) { 2843 /* NULL is a valid return value from clock_init */ 2844 mlxsw_sp->clock = 2845 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 2846 mlxsw_sp->bus_info->dev); 2847 if (IS_ERR(mlxsw_sp->clock)) { 2848 err = PTR_ERR(mlxsw_sp->clock); 2849 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 2850 goto err_ptp_clock_init; 2851 } 2852 } 2853 2854 if (mlxsw_sp->clock) { 2855 /* NULL is a valid return value from ptp_ops->init */ 2856 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 2857 if (IS_ERR(mlxsw_sp->ptp_state)) { 2858 err = PTR_ERR(mlxsw_sp->ptp_state); 2859 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 2860 goto err_ptp_init; 2861 } 2862 } 2863 2864 /* Initialize netdevice notifier after router and SPAN is initialized, 2865 * so that the event handler can use router structures and call SPAN 2866 * respin. 2867 */ 2868 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 2869 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2870 &mlxsw_sp->netdevice_nb); 2871 if (err) { 2872 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 2873 goto err_netdev_notifier; 2874 } 2875 2876 err = mlxsw_sp_dpipe_init(mlxsw_sp); 2877 if (err) { 2878 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 2879 goto err_dpipe_init; 2880 } 2881 2882 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 2883 if (err) { 2884 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 2885 goto err_port_module_info_init; 2886 } 2887 2888 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 2889 &mlxsw_sp_sample_trigger_ht_params); 2890 if (err) { 2891 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 2892 goto err_sample_trigger_init; 2893 } 2894 2895 err = mlxsw_sp_ports_create(mlxsw_sp); 2896 if (err) { 2897 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2898 goto err_ports_create; 2899 } 2900 2901 return 0; 2902 2903 err_ports_create: 2904 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 2905 err_sample_trigger_init: 2906 mlxsw_sp_port_module_info_fini(mlxsw_sp); 2907 err_port_module_info_init: 2908 mlxsw_sp_dpipe_fini(mlxsw_sp); 2909 err_dpipe_init: 2910 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2911 &mlxsw_sp->netdevice_nb); 2912 err_netdev_notifier: 2913 if (mlxsw_sp->clock) 2914 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 2915 err_ptp_init: 2916 if (mlxsw_sp->clock) 2917 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 2918 err_ptp_clock_init: 2919 mlxsw_sp_router_fini(mlxsw_sp); 2920 err_router_init: 2921 mlxsw_sp_acl_fini(mlxsw_sp); 2922 err_acl_init: 2923 mlxsw_sp_nve_fini(mlxsw_sp); 2924 err_nve_init: 2925 mlxsw_sp_afa_fini(mlxsw_sp); 2926 err_afa_init: 2927 mlxsw_sp_counter_pool_fini(mlxsw_sp); 2928 err_counter_pool_init: 2929 mlxsw_sp_switchdev_fini(mlxsw_sp); 2930 err_switchdev_init: 2931 mlxsw_sp_span_fini(mlxsw_sp); 2932 err_span_init: 2933 mlxsw_sp_lag_fini(mlxsw_sp); 2934 err_lag_init: 2935 mlxsw_sp_buffers_fini(mlxsw_sp); 2936 err_buffers_init: 2937 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 2938 err_devlink_traps_init: 2939 mlxsw_sp_traps_fini(mlxsw_sp); 2940 err_traps_init: 2941 mlxsw_sp_policers_fini(mlxsw_sp); 2942 err_policers_init: 2943 mlxsw_sp_fids_fini(mlxsw_sp); 2944 err_fids_init: 2945 mlxsw_sp_kvdl_fini(mlxsw_sp); 2946 mlxsw_sp_parsing_fini(mlxsw_sp); 2947 return err; 2948 } 2949 2950 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 2951 const struct mlxsw_bus_info *mlxsw_bus_info, 2952 struct netlink_ext_ack *extack) 2953 { 2954 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2955 2956 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 2957 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 2958 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 2959 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 2960 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 2961 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 2962 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 2963 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 2964 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 2965 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 2966 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 2967 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 2968 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 2969 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 2970 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 2971 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 2972 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 2973 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 2974 mlxsw_sp->listeners = mlxsw_sp1_listener; 2975 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 2976 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 2977 2978 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 2979 } 2980 2981 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 2982 const struct mlxsw_bus_info *mlxsw_bus_info, 2983 struct netlink_ext_ack *extack) 2984 { 2985 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2986 2987 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 2988 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 2989 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 2990 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 2991 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 2992 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 2993 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 2994 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 2995 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 2996 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 2997 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 2998 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 2999 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3000 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3001 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3002 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3003 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3004 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3005 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3006 3007 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3008 } 3009 3010 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3011 const struct mlxsw_bus_info *mlxsw_bus_info, 3012 struct netlink_ext_ack *extack) 3013 { 3014 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3015 3016 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3017 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3018 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3019 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3020 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3021 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3022 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3023 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3024 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3025 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3026 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3027 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3028 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3029 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3030 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3031 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3032 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3033 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3034 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3035 3036 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3037 } 3038 3039 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3040 { 3041 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3042 3043 mlxsw_sp_ports_remove(mlxsw_sp); 3044 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3045 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3046 mlxsw_sp_dpipe_fini(mlxsw_sp); 3047 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3048 &mlxsw_sp->netdevice_nb); 3049 if (mlxsw_sp->clock) { 3050 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3051 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3052 } 3053 mlxsw_sp_router_fini(mlxsw_sp); 3054 mlxsw_sp_acl_fini(mlxsw_sp); 3055 mlxsw_sp_nve_fini(mlxsw_sp); 3056 mlxsw_sp_afa_fini(mlxsw_sp); 3057 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3058 mlxsw_sp_switchdev_fini(mlxsw_sp); 3059 mlxsw_sp_span_fini(mlxsw_sp); 3060 mlxsw_sp_lag_fini(mlxsw_sp); 3061 mlxsw_sp_buffers_fini(mlxsw_sp); 3062 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3063 mlxsw_sp_traps_fini(mlxsw_sp); 3064 mlxsw_sp_policers_fini(mlxsw_sp); 3065 mlxsw_sp_fids_fini(mlxsw_sp); 3066 mlxsw_sp_kvdl_fini(mlxsw_sp); 3067 mlxsw_sp_parsing_fini(mlxsw_sp); 3068 } 3069 3070 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3071 * 802.1Q FIDs 3072 */ 3073 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3074 VLAN_VID_MASK - 1) 3075 3076 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3077 .used_max_mid = 1, 3078 .max_mid = MLXSW_SP_MID_MAX, 3079 .used_flood_tables = 1, 3080 .used_flood_mode = 1, 3081 .flood_mode = 3, 3082 .max_fid_flood_tables = 3, 3083 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3084 .used_max_ib_mc = 1, 3085 .max_ib_mc = 0, 3086 .used_max_pkey = 1, 3087 .max_pkey = 0, 3088 .used_kvd_sizes = 1, 3089 .kvd_hash_single_parts = 59, 3090 .kvd_hash_double_parts = 41, 3091 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3092 .swid_config = { 3093 { 3094 .used_type = 1, 3095 .type = MLXSW_PORT_SWID_TYPE_ETH, 3096 } 3097 }, 3098 }; 3099 3100 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3101 .used_max_mid = 1, 3102 .max_mid = MLXSW_SP_MID_MAX, 3103 .used_flood_tables = 1, 3104 .used_flood_mode = 1, 3105 .flood_mode = 3, 3106 .max_fid_flood_tables = 3, 3107 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3108 .used_max_ib_mc = 1, 3109 .max_ib_mc = 0, 3110 .used_max_pkey = 1, 3111 .max_pkey = 0, 3112 .used_kvh_xlt_cache_mode = 1, 3113 .kvh_xlt_cache_mode = 1, 3114 .swid_config = { 3115 { 3116 .used_type = 1, 3117 .type = MLXSW_PORT_SWID_TYPE_ETH, 3118 } 3119 }, 3120 }; 3121 3122 static void 3123 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3124 struct devlink_resource_size_params *kvd_size_params, 3125 struct devlink_resource_size_params *linear_size_params, 3126 struct devlink_resource_size_params *hash_double_size_params, 3127 struct devlink_resource_size_params *hash_single_size_params) 3128 { 3129 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3130 KVD_SINGLE_MIN_SIZE); 3131 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3132 KVD_DOUBLE_MIN_SIZE); 3133 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3134 u32 linear_size_min = 0; 3135 3136 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3137 MLXSW_SP_KVD_GRANULARITY, 3138 DEVLINK_RESOURCE_UNIT_ENTRY); 3139 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3140 kvd_size - single_size_min - 3141 double_size_min, 3142 MLXSW_SP_KVD_GRANULARITY, 3143 DEVLINK_RESOURCE_UNIT_ENTRY); 3144 devlink_resource_size_params_init(hash_double_size_params, 3145 double_size_min, 3146 kvd_size - single_size_min - 3147 linear_size_min, 3148 MLXSW_SP_KVD_GRANULARITY, 3149 DEVLINK_RESOURCE_UNIT_ENTRY); 3150 devlink_resource_size_params_init(hash_single_size_params, 3151 single_size_min, 3152 kvd_size - double_size_min - 3153 linear_size_min, 3154 MLXSW_SP_KVD_GRANULARITY, 3155 DEVLINK_RESOURCE_UNIT_ENTRY); 3156 } 3157 3158 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3159 { 3160 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3161 struct devlink_resource_size_params hash_single_size_params; 3162 struct devlink_resource_size_params hash_double_size_params; 3163 struct devlink_resource_size_params linear_size_params; 3164 struct devlink_resource_size_params kvd_size_params; 3165 u32 kvd_size, single_size, double_size, linear_size; 3166 const struct mlxsw_config_profile *profile; 3167 int err; 3168 3169 profile = &mlxsw_sp1_config_profile; 3170 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3171 return -EIO; 3172 3173 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3174 &linear_size_params, 3175 &hash_double_size_params, 3176 &hash_single_size_params); 3177 3178 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3179 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3180 kvd_size, MLXSW_SP_RESOURCE_KVD, 3181 DEVLINK_RESOURCE_ID_PARENT_TOP, 3182 &kvd_size_params); 3183 if (err) 3184 return err; 3185 3186 linear_size = profile->kvd_linear_size; 3187 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3188 linear_size, 3189 MLXSW_SP_RESOURCE_KVD_LINEAR, 3190 MLXSW_SP_RESOURCE_KVD, 3191 &linear_size_params); 3192 if (err) 3193 return err; 3194 3195 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3196 if (err) 3197 return err; 3198 3199 double_size = kvd_size - linear_size; 3200 double_size *= profile->kvd_hash_double_parts; 3201 double_size /= profile->kvd_hash_double_parts + 3202 profile->kvd_hash_single_parts; 3203 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3204 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3205 double_size, 3206 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3207 MLXSW_SP_RESOURCE_KVD, 3208 &hash_double_size_params); 3209 if (err) 3210 return err; 3211 3212 single_size = kvd_size - double_size - linear_size; 3213 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3214 single_size, 3215 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3216 MLXSW_SP_RESOURCE_KVD, 3217 &hash_single_size_params); 3218 if (err) 3219 return err; 3220 3221 return 0; 3222 } 3223 3224 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3225 { 3226 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3227 struct devlink_resource_size_params kvd_size_params; 3228 u32 kvd_size; 3229 3230 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3231 return -EIO; 3232 3233 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3234 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3235 MLXSW_SP_KVD_GRANULARITY, 3236 DEVLINK_RESOURCE_UNIT_ENTRY); 3237 3238 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3239 kvd_size, MLXSW_SP_RESOURCE_KVD, 3240 DEVLINK_RESOURCE_ID_PARENT_TOP, 3241 &kvd_size_params); 3242 } 3243 3244 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3245 { 3246 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3247 struct devlink_resource_size_params span_size_params; 3248 u32 max_span; 3249 3250 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3251 return -EIO; 3252 3253 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3254 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3255 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3256 3257 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3258 max_span, MLXSW_SP_RESOURCE_SPAN, 3259 DEVLINK_RESOURCE_ID_PARENT_TOP, 3260 &span_size_params); 3261 } 3262 3263 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3264 { 3265 int err; 3266 3267 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3268 if (err) 3269 return err; 3270 3271 err = mlxsw_sp_resources_span_register(mlxsw_core); 3272 if (err) 3273 goto err_resources_span_register; 3274 3275 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3276 if (err) 3277 goto err_resources_counter_register; 3278 3279 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3280 if (err) 3281 goto err_resources_counter_register; 3282 3283 return 0; 3284 3285 err_resources_counter_register: 3286 err_resources_span_register: 3287 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3288 return err; 3289 } 3290 3291 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3292 { 3293 int err; 3294 3295 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3296 if (err) 3297 return err; 3298 3299 err = mlxsw_sp_resources_span_register(mlxsw_core); 3300 if (err) 3301 goto err_resources_span_register; 3302 3303 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3304 if (err) 3305 goto err_resources_counter_register; 3306 3307 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3308 if (err) 3309 goto err_resources_counter_register; 3310 3311 return 0; 3312 3313 err_resources_counter_register: 3314 err_resources_span_register: 3315 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3316 return err; 3317 } 3318 3319 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3320 const struct mlxsw_config_profile *profile, 3321 u64 *p_single_size, u64 *p_double_size, 3322 u64 *p_linear_size) 3323 { 3324 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3325 u32 double_size; 3326 int err; 3327 3328 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3329 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3330 return -EIO; 3331 3332 /* The hash part is what left of the kvd without the 3333 * linear part. It is split to the single size and 3334 * double size by the parts ratio from the profile. 3335 * Both sizes must be a multiplications of the 3336 * granularity from the profile. In case the user 3337 * provided the sizes they are obtained via devlink. 3338 */ 3339 err = devlink_resource_size_get(devlink, 3340 MLXSW_SP_RESOURCE_KVD_LINEAR, 3341 p_linear_size); 3342 if (err) 3343 *p_linear_size = profile->kvd_linear_size; 3344 3345 err = devlink_resource_size_get(devlink, 3346 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3347 p_double_size); 3348 if (err) { 3349 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3350 *p_linear_size; 3351 double_size *= profile->kvd_hash_double_parts; 3352 double_size /= profile->kvd_hash_double_parts + 3353 profile->kvd_hash_single_parts; 3354 *p_double_size = rounddown(double_size, 3355 MLXSW_SP_KVD_GRANULARITY); 3356 } 3357 3358 err = devlink_resource_size_get(devlink, 3359 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3360 p_single_size); 3361 if (err) 3362 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3363 *p_double_size - *p_linear_size; 3364 3365 /* Check results are legal. */ 3366 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3367 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3368 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3369 return -EIO; 3370 3371 return 0; 3372 } 3373 3374 static int 3375 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3376 struct devlink_param_gset_ctx *ctx) 3377 { 3378 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3379 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3380 3381 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3382 return 0; 3383 } 3384 3385 static int 3386 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3387 struct devlink_param_gset_ctx *ctx) 3388 { 3389 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3390 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3391 3392 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3393 } 3394 3395 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3396 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3397 "acl_region_rehash_interval", 3398 DEVLINK_PARAM_TYPE_U32, 3399 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3400 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3401 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3402 NULL), 3403 }; 3404 3405 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3406 { 3407 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3408 union devlink_param_value value; 3409 int err; 3410 3411 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3412 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3413 if (err) 3414 return err; 3415 3416 value.vu32 = 0; 3417 devlink_param_driverinit_value_set(devlink, 3418 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3419 value); 3420 return 0; 3421 } 3422 3423 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3424 { 3425 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3426 mlxsw_sp2_devlink_params, 3427 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3428 } 3429 3430 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3431 struct sk_buff *skb, u8 local_port) 3432 { 3433 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3434 3435 skb_pull(skb, MLXSW_TXHDR_LEN); 3436 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3437 } 3438 3439 static struct mlxsw_driver mlxsw_sp1_driver = { 3440 .kind = mlxsw_sp1_driver_name, 3441 .priv_size = sizeof(struct mlxsw_sp), 3442 .fw_req_rev = &mlxsw_sp1_fw_rev, 3443 .fw_filename = MLXSW_SP1_FW_FILENAME, 3444 .init = mlxsw_sp1_init, 3445 .fini = mlxsw_sp_fini, 3446 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3447 .port_split = mlxsw_sp_port_split, 3448 .port_unsplit = mlxsw_sp_port_unsplit, 3449 .sb_pool_get = mlxsw_sp_sb_pool_get, 3450 .sb_pool_set = mlxsw_sp_sb_pool_set, 3451 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3452 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3453 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3454 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3455 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3456 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3457 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3458 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3459 .trap_init = mlxsw_sp_trap_init, 3460 .trap_fini = mlxsw_sp_trap_fini, 3461 .trap_action_set = mlxsw_sp_trap_action_set, 3462 .trap_group_init = mlxsw_sp_trap_group_init, 3463 .trap_group_set = mlxsw_sp_trap_group_set, 3464 .trap_policer_init = mlxsw_sp_trap_policer_init, 3465 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3466 .trap_policer_set = mlxsw_sp_trap_policer_set, 3467 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3468 .txhdr_construct = mlxsw_sp_txhdr_construct, 3469 .resources_register = mlxsw_sp1_resources_register, 3470 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3471 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3472 .txhdr_len = MLXSW_TXHDR_LEN, 3473 .profile = &mlxsw_sp1_config_profile, 3474 .res_query_enabled = true, 3475 .fw_fatal_enabled = true, 3476 .temp_warn_enabled = true, 3477 }; 3478 3479 static struct mlxsw_driver mlxsw_sp2_driver = { 3480 .kind = mlxsw_sp2_driver_name, 3481 .priv_size = sizeof(struct mlxsw_sp), 3482 .fw_req_rev = &mlxsw_sp2_fw_rev, 3483 .fw_filename = MLXSW_SP2_FW_FILENAME, 3484 .init = mlxsw_sp2_init, 3485 .fini = mlxsw_sp_fini, 3486 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3487 .port_split = mlxsw_sp_port_split, 3488 .port_unsplit = mlxsw_sp_port_unsplit, 3489 .sb_pool_get = mlxsw_sp_sb_pool_get, 3490 .sb_pool_set = mlxsw_sp_sb_pool_set, 3491 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3492 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3493 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3494 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3495 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3496 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3497 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3498 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3499 .trap_init = mlxsw_sp_trap_init, 3500 .trap_fini = mlxsw_sp_trap_fini, 3501 .trap_action_set = mlxsw_sp_trap_action_set, 3502 .trap_group_init = mlxsw_sp_trap_group_init, 3503 .trap_group_set = mlxsw_sp_trap_group_set, 3504 .trap_policer_init = mlxsw_sp_trap_policer_init, 3505 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3506 .trap_policer_set = mlxsw_sp_trap_policer_set, 3507 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3508 .txhdr_construct = mlxsw_sp_txhdr_construct, 3509 .resources_register = mlxsw_sp2_resources_register, 3510 .params_register = mlxsw_sp2_params_register, 3511 .params_unregister = mlxsw_sp2_params_unregister, 3512 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3513 .txhdr_len = MLXSW_TXHDR_LEN, 3514 .profile = &mlxsw_sp2_config_profile, 3515 .res_query_enabled = true, 3516 .fw_fatal_enabled = true, 3517 .temp_warn_enabled = true, 3518 }; 3519 3520 static struct mlxsw_driver mlxsw_sp3_driver = { 3521 .kind = mlxsw_sp3_driver_name, 3522 .priv_size = sizeof(struct mlxsw_sp), 3523 .fw_req_rev = &mlxsw_sp3_fw_rev, 3524 .fw_filename = MLXSW_SP3_FW_FILENAME, 3525 .init = mlxsw_sp3_init, 3526 .fini = mlxsw_sp_fini, 3527 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3528 .port_split = mlxsw_sp_port_split, 3529 .port_unsplit = mlxsw_sp_port_unsplit, 3530 .sb_pool_get = mlxsw_sp_sb_pool_get, 3531 .sb_pool_set = mlxsw_sp_sb_pool_set, 3532 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3533 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3534 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3535 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3536 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3537 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3538 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3539 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3540 .trap_init = mlxsw_sp_trap_init, 3541 .trap_fini = mlxsw_sp_trap_fini, 3542 .trap_action_set = mlxsw_sp_trap_action_set, 3543 .trap_group_init = mlxsw_sp_trap_group_init, 3544 .trap_group_set = mlxsw_sp_trap_group_set, 3545 .trap_policer_init = mlxsw_sp_trap_policer_init, 3546 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3547 .trap_policer_set = mlxsw_sp_trap_policer_set, 3548 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3549 .txhdr_construct = mlxsw_sp_txhdr_construct, 3550 .resources_register = mlxsw_sp2_resources_register, 3551 .params_register = mlxsw_sp2_params_register, 3552 .params_unregister = mlxsw_sp2_params_unregister, 3553 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3554 .txhdr_len = MLXSW_TXHDR_LEN, 3555 .profile = &mlxsw_sp2_config_profile, 3556 .res_query_enabled = true, 3557 .fw_fatal_enabled = true, 3558 .temp_warn_enabled = true, 3559 }; 3560 3561 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3562 { 3563 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3564 } 3565 3566 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 3567 struct netdev_nested_priv *priv) 3568 { 3569 int ret = 0; 3570 3571 if (mlxsw_sp_port_dev_check(lower_dev)) { 3572 priv->data = (void *)netdev_priv(lower_dev); 3573 ret = 1; 3574 } 3575 3576 return ret; 3577 } 3578 3579 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3580 { 3581 struct netdev_nested_priv priv = { 3582 .data = NULL, 3583 }; 3584 3585 if (mlxsw_sp_port_dev_check(dev)) 3586 return netdev_priv(dev); 3587 3588 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 3589 3590 return (struct mlxsw_sp_port *)priv.data; 3591 } 3592 3593 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3594 { 3595 struct mlxsw_sp_port *mlxsw_sp_port; 3596 3597 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3598 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3599 } 3600 3601 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3602 { 3603 struct netdev_nested_priv priv = { 3604 .data = NULL, 3605 }; 3606 3607 if (mlxsw_sp_port_dev_check(dev)) 3608 return netdev_priv(dev); 3609 3610 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3611 &priv); 3612 3613 return (struct mlxsw_sp_port *)priv.data; 3614 } 3615 3616 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3617 { 3618 struct mlxsw_sp_port *mlxsw_sp_port; 3619 3620 rcu_read_lock(); 3621 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3622 if (mlxsw_sp_port) 3623 dev_hold(mlxsw_sp_port->dev); 3624 rcu_read_unlock(); 3625 return mlxsw_sp_port; 3626 } 3627 3628 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3629 { 3630 dev_put(mlxsw_sp_port->dev); 3631 } 3632 3633 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 3634 { 3635 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3636 int err = 0; 3637 3638 mutex_lock(&mlxsw_sp->parsing.lock); 3639 3640 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 3641 goto out_unlock; 3642 3643 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 3644 mlxsw_sp->parsing.vxlan_udp_dport); 3645 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3646 if (err) 3647 goto out_unlock; 3648 3649 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 3650 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 3651 3652 out_unlock: 3653 mutex_unlock(&mlxsw_sp->parsing.lock); 3654 return err; 3655 } 3656 3657 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 3658 { 3659 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3660 3661 mutex_lock(&mlxsw_sp->parsing.lock); 3662 3663 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 3664 goto out_unlock; 3665 3666 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 3667 mlxsw_sp->parsing.vxlan_udp_dport); 3668 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3669 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 3670 3671 out_unlock: 3672 mutex_unlock(&mlxsw_sp->parsing.lock); 3673 } 3674 3675 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 3676 __be16 udp_dport) 3677 { 3678 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3679 int err; 3680 3681 mutex_lock(&mlxsw_sp->parsing.lock); 3682 3683 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 3684 be16_to_cpu(udp_dport)); 3685 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3686 if (err) 3687 goto out_unlock; 3688 3689 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 3690 3691 out_unlock: 3692 mutex_unlock(&mlxsw_sp->parsing.lock); 3693 return err; 3694 } 3695 3696 static void 3697 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 3698 struct net_device *lag_dev) 3699 { 3700 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 3701 struct net_device *upper_dev; 3702 struct list_head *iter; 3703 3704 if (netif_is_bridge_port(lag_dev)) 3705 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 3706 3707 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 3708 if (!netif_is_bridge_port(upper_dev)) 3709 continue; 3710 br_dev = netdev_master_upper_dev_get(upper_dev); 3711 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 3712 } 3713 } 3714 3715 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3716 { 3717 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3718 3719 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3720 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3721 } 3722 3723 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3724 { 3725 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3726 3727 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3728 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3729 } 3730 3731 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3732 u16 lag_id, u8 port_index) 3733 { 3734 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3735 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3736 3737 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3738 lag_id, port_index); 3739 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3740 } 3741 3742 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3743 u16 lag_id) 3744 { 3745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3746 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3747 3748 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3749 lag_id); 3750 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3751 } 3752 3753 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3754 u16 lag_id) 3755 { 3756 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3757 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3758 3759 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3760 lag_id); 3761 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3762 } 3763 3764 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3765 u16 lag_id) 3766 { 3767 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3768 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3769 3770 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3771 lag_id); 3772 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3773 } 3774 3775 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3776 struct net_device *lag_dev, 3777 u16 *p_lag_id) 3778 { 3779 struct mlxsw_sp_upper *lag; 3780 int free_lag_id = -1; 3781 u64 max_lag; 3782 int i; 3783 3784 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3785 for (i = 0; i < max_lag; i++) { 3786 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3787 if (lag->ref_count) { 3788 if (lag->dev == lag_dev) { 3789 *p_lag_id = i; 3790 return 0; 3791 } 3792 } else if (free_lag_id < 0) { 3793 free_lag_id = i; 3794 } 3795 } 3796 if (free_lag_id < 0) 3797 return -EBUSY; 3798 *p_lag_id = free_lag_id; 3799 return 0; 3800 } 3801 3802 static bool 3803 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3804 struct net_device *lag_dev, 3805 struct netdev_lag_upper_info *lag_upper_info, 3806 struct netlink_ext_ack *extack) 3807 { 3808 u16 lag_id; 3809 3810 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 3811 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 3812 return false; 3813 } 3814 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 3815 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 3816 return false; 3817 } 3818 return true; 3819 } 3820 3821 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3822 u16 lag_id, u8 *p_port_index) 3823 { 3824 u64 max_lag_members; 3825 int i; 3826 3827 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3828 MAX_LAG_MEMBERS); 3829 for (i = 0; i < max_lag_members; i++) { 3830 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3831 *p_port_index = i; 3832 return 0; 3833 } 3834 } 3835 return -EBUSY; 3836 } 3837 3838 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3839 struct net_device *lag_dev, 3840 struct netlink_ext_ack *extack) 3841 { 3842 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3843 struct mlxsw_sp_upper *lag; 3844 u16 lag_id; 3845 u8 port_index; 3846 int err; 3847 3848 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3849 if (err) 3850 return err; 3851 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3852 if (!lag->ref_count) { 3853 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3854 if (err) 3855 return err; 3856 lag->dev = lag_dev; 3857 } 3858 3859 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3860 if (err) 3861 return err; 3862 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3863 if (err) 3864 goto err_col_port_add; 3865 3866 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3867 mlxsw_sp_port->local_port); 3868 mlxsw_sp_port->lag_id = lag_id; 3869 mlxsw_sp_port->lagged = 1; 3870 lag->ref_count++; 3871 3872 /* Port is no longer usable as a router interface */ 3873 if (mlxsw_sp_port->default_vlan->fid) 3874 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 3875 3876 /* Join a router interface configured on the LAG, if exists */ 3877 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 3878 lag_dev, extack); 3879 if (err) 3880 goto err_router_join; 3881 3882 return 0; 3883 3884 err_router_join: 3885 lag->ref_count--; 3886 mlxsw_sp_port->lagged = 0; 3887 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3888 mlxsw_sp_port->local_port); 3889 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3890 err_col_port_add: 3891 if (!lag->ref_count) 3892 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3893 return err; 3894 } 3895 3896 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3897 struct net_device *lag_dev) 3898 { 3899 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3900 u16 lag_id = mlxsw_sp_port->lag_id; 3901 struct mlxsw_sp_upper *lag; 3902 3903 if (!mlxsw_sp_port->lagged) 3904 return; 3905 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3906 WARN_ON(lag->ref_count == 0); 3907 3908 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3909 3910 /* Any VLANs configured on the port are no longer valid */ 3911 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 3912 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 3913 /* Make the LAG and its directly linked uppers leave bridges they 3914 * are memeber in 3915 */ 3916 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 3917 3918 if (lag->ref_count == 1) 3919 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3920 3921 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3922 mlxsw_sp_port->local_port); 3923 mlxsw_sp_port->lagged = 0; 3924 lag->ref_count--; 3925 3926 /* Make sure untagged frames are allowed to ingress */ 3927 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 3928 ETH_P_8021Q); 3929 } 3930 3931 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3932 u16 lag_id) 3933 { 3934 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3935 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3936 3937 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3938 mlxsw_sp_port->local_port); 3939 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3940 } 3941 3942 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3943 u16 lag_id) 3944 { 3945 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3946 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3947 3948 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 3949 mlxsw_sp_port->local_port); 3950 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3951 } 3952 3953 static int 3954 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 3955 { 3956 int err; 3957 3958 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 3959 mlxsw_sp_port->lag_id); 3960 if (err) 3961 return err; 3962 3963 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3964 if (err) 3965 goto err_dist_port_add; 3966 3967 return 0; 3968 3969 err_dist_port_add: 3970 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3971 return err; 3972 } 3973 3974 static int 3975 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 3976 { 3977 int err; 3978 3979 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 3980 mlxsw_sp_port->lag_id); 3981 if (err) 3982 return err; 3983 3984 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 3985 mlxsw_sp_port->lag_id); 3986 if (err) 3987 goto err_col_port_disable; 3988 3989 return 0; 3990 3991 err_col_port_disable: 3992 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3993 return err; 3994 } 3995 3996 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 3997 struct netdev_lag_lower_state_info *info) 3998 { 3999 if (info->tx_enabled) 4000 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4001 else 4002 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4003 } 4004 4005 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4006 bool enable) 4007 { 4008 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4009 enum mlxsw_reg_spms_state spms_state; 4010 char *spms_pl; 4011 u16 vid; 4012 int err; 4013 4014 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4015 MLXSW_REG_SPMS_STATE_DISCARDING; 4016 4017 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4018 if (!spms_pl) 4019 return -ENOMEM; 4020 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4021 4022 for (vid = 0; vid < VLAN_N_VID; vid++) 4023 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4024 4025 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4026 kfree(spms_pl); 4027 return err; 4028 } 4029 4030 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4031 { 4032 u16 vid = 1; 4033 int err; 4034 4035 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4036 if (err) 4037 return err; 4038 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4039 if (err) 4040 goto err_port_stp_set; 4041 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4042 true, false); 4043 if (err) 4044 goto err_port_vlan_set; 4045 4046 for (; vid <= VLAN_N_VID - 1; vid++) { 4047 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4048 vid, false); 4049 if (err) 4050 goto err_vid_learning_set; 4051 } 4052 4053 return 0; 4054 4055 err_vid_learning_set: 4056 for (vid--; vid >= 1; vid--) 4057 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4058 err_port_vlan_set: 4059 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4060 err_port_stp_set: 4061 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4062 return err; 4063 } 4064 4065 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4066 { 4067 u16 vid; 4068 4069 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4070 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4071 vid, true); 4072 4073 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4074 false, false); 4075 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4076 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4077 } 4078 4079 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4080 { 4081 unsigned int num_vxlans = 0; 4082 struct net_device *dev; 4083 struct list_head *iter; 4084 4085 netdev_for_each_lower_dev(br_dev, dev, iter) { 4086 if (netif_is_vxlan(dev)) 4087 num_vxlans++; 4088 } 4089 4090 return num_vxlans > 1; 4091 } 4092 4093 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4094 { 4095 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4096 struct net_device *dev; 4097 struct list_head *iter; 4098 4099 netdev_for_each_lower_dev(br_dev, dev, iter) { 4100 u16 pvid; 4101 int err; 4102 4103 if (!netif_is_vxlan(dev)) 4104 continue; 4105 4106 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4107 if (err || !pvid) 4108 continue; 4109 4110 if (test_and_set_bit(pvid, vlans)) 4111 return false; 4112 } 4113 4114 return true; 4115 } 4116 4117 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4118 struct netlink_ext_ack *extack) 4119 { 4120 if (br_multicast_enabled(br_dev)) { 4121 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4122 return false; 4123 } 4124 4125 if (!br_vlan_enabled(br_dev) && 4126 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4127 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4128 return false; 4129 } 4130 4131 if (br_vlan_enabled(br_dev) && 4132 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4133 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4134 return false; 4135 } 4136 4137 return true; 4138 } 4139 4140 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4141 struct net_device *dev, 4142 unsigned long event, void *ptr) 4143 { 4144 struct netdev_notifier_changeupper_info *info; 4145 struct mlxsw_sp_port *mlxsw_sp_port; 4146 struct netlink_ext_ack *extack; 4147 struct net_device *upper_dev; 4148 struct mlxsw_sp *mlxsw_sp; 4149 int err = 0; 4150 u16 proto; 4151 4152 mlxsw_sp_port = netdev_priv(dev); 4153 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4154 info = ptr; 4155 extack = netdev_notifier_info_to_extack(&info->info); 4156 4157 switch (event) { 4158 case NETDEV_PRECHANGEUPPER: 4159 upper_dev = info->upper_dev; 4160 if (!is_vlan_dev(upper_dev) && 4161 !netif_is_lag_master(upper_dev) && 4162 !netif_is_bridge_master(upper_dev) && 4163 !netif_is_ovs_master(upper_dev) && 4164 !netif_is_macvlan(upper_dev)) { 4165 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4166 return -EINVAL; 4167 } 4168 if (!info->linking) 4169 break; 4170 if (netif_is_bridge_master(upper_dev) && 4171 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4172 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4173 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4174 return -EOPNOTSUPP; 4175 if (netdev_has_any_upper_dev(upper_dev) && 4176 (!netif_is_bridge_master(upper_dev) || 4177 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4178 upper_dev))) { 4179 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4180 return -EINVAL; 4181 } 4182 if (netif_is_lag_master(upper_dev) && 4183 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4184 info->upper_info, extack)) 4185 return -EINVAL; 4186 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4187 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4188 return -EINVAL; 4189 } 4190 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4191 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4192 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4193 return -EINVAL; 4194 } 4195 if (netif_is_macvlan(upper_dev) && 4196 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4197 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4198 return -EOPNOTSUPP; 4199 } 4200 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4201 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4202 return -EINVAL; 4203 } 4204 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4205 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4206 return -EINVAL; 4207 } 4208 if (netif_is_bridge_master(upper_dev)) { 4209 br_vlan_get_proto(upper_dev, &proto); 4210 if (br_vlan_enabled(upper_dev) && 4211 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4212 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4213 return -EOPNOTSUPP; 4214 } 4215 if (vlan_uses_dev(lower_dev) && 4216 br_vlan_enabled(upper_dev) && 4217 proto == ETH_P_8021AD) { 4218 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4219 return -EOPNOTSUPP; 4220 } 4221 } 4222 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4223 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4224 4225 if (br_vlan_enabled(br_dev)) { 4226 br_vlan_get_proto(br_dev, &proto); 4227 if (proto == ETH_P_8021AD) { 4228 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4229 return -EOPNOTSUPP; 4230 } 4231 } 4232 } 4233 if (is_vlan_dev(upper_dev) && 4234 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4235 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4236 return -EOPNOTSUPP; 4237 } 4238 break; 4239 case NETDEV_CHANGEUPPER: 4240 upper_dev = info->upper_dev; 4241 if (netif_is_bridge_master(upper_dev)) { 4242 if (info->linking) 4243 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4244 lower_dev, 4245 upper_dev, 4246 extack); 4247 else 4248 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4249 lower_dev, 4250 upper_dev); 4251 } else if (netif_is_lag_master(upper_dev)) { 4252 if (info->linking) { 4253 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4254 upper_dev, extack); 4255 } else { 4256 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4257 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4258 upper_dev); 4259 } 4260 } else if (netif_is_ovs_master(upper_dev)) { 4261 if (info->linking) 4262 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4263 else 4264 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4265 } else if (netif_is_macvlan(upper_dev)) { 4266 if (!info->linking) 4267 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4268 } else if (is_vlan_dev(upper_dev)) { 4269 struct net_device *br_dev; 4270 4271 if (!netif_is_bridge_port(upper_dev)) 4272 break; 4273 if (info->linking) 4274 break; 4275 br_dev = netdev_master_upper_dev_get(upper_dev); 4276 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4277 br_dev); 4278 } 4279 break; 4280 } 4281 4282 return err; 4283 } 4284 4285 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4286 unsigned long event, void *ptr) 4287 { 4288 struct netdev_notifier_changelowerstate_info *info; 4289 struct mlxsw_sp_port *mlxsw_sp_port; 4290 int err; 4291 4292 mlxsw_sp_port = netdev_priv(dev); 4293 info = ptr; 4294 4295 switch (event) { 4296 case NETDEV_CHANGELOWERSTATE: 4297 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4298 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4299 info->lower_state_info); 4300 if (err) 4301 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4302 } 4303 break; 4304 } 4305 4306 return 0; 4307 } 4308 4309 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4310 struct net_device *port_dev, 4311 unsigned long event, void *ptr) 4312 { 4313 switch (event) { 4314 case NETDEV_PRECHANGEUPPER: 4315 case NETDEV_CHANGEUPPER: 4316 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4317 event, ptr); 4318 case NETDEV_CHANGELOWERSTATE: 4319 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4320 ptr); 4321 } 4322 4323 return 0; 4324 } 4325 4326 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4327 unsigned long event, void *ptr) 4328 { 4329 struct net_device *dev; 4330 struct list_head *iter; 4331 int ret; 4332 4333 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4334 if (mlxsw_sp_port_dev_check(dev)) { 4335 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4336 ptr); 4337 if (ret) 4338 return ret; 4339 } 4340 } 4341 4342 return 0; 4343 } 4344 4345 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4346 struct net_device *dev, 4347 unsigned long event, void *ptr, 4348 u16 vid) 4349 { 4350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4351 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4352 struct netdev_notifier_changeupper_info *info = ptr; 4353 struct netlink_ext_ack *extack; 4354 struct net_device *upper_dev; 4355 int err = 0; 4356 4357 extack = netdev_notifier_info_to_extack(&info->info); 4358 4359 switch (event) { 4360 case NETDEV_PRECHANGEUPPER: 4361 upper_dev = info->upper_dev; 4362 if (!netif_is_bridge_master(upper_dev) && 4363 !netif_is_macvlan(upper_dev)) { 4364 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4365 return -EINVAL; 4366 } 4367 if (!info->linking) 4368 break; 4369 if (netif_is_bridge_master(upper_dev) && 4370 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4371 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4372 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4373 return -EOPNOTSUPP; 4374 if (netdev_has_any_upper_dev(upper_dev) && 4375 (!netif_is_bridge_master(upper_dev) || 4376 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4377 upper_dev))) { 4378 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4379 return -EINVAL; 4380 } 4381 if (netif_is_macvlan(upper_dev) && 4382 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4383 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4384 return -EOPNOTSUPP; 4385 } 4386 break; 4387 case NETDEV_CHANGEUPPER: 4388 upper_dev = info->upper_dev; 4389 if (netif_is_bridge_master(upper_dev)) { 4390 if (info->linking) 4391 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4392 vlan_dev, 4393 upper_dev, 4394 extack); 4395 else 4396 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4397 vlan_dev, 4398 upper_dev); 4399 } else if (netif_is_macvlan(upper_dev)) { 4400 if (!info->linking) 4401 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4402 } else { 4403 err = -EINVAL; 4404 WARN_ON(1); 4405 } 4406 break; 4407 } 4408 4409 return err; 4410 } 4411 4412 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4413 struct net_device *lag_dev, 4414 unsigned long event, 4415 void *ptr, u16 vid) 4416 { 4417 struct net_device *dev; 4418 struct list_head *iter; 4419 int ret; 4420 4421 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4422 if (mlxsw_sp_port_dev_check(dev)) { 4423 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4424 event, ptr, 4425 vid); 4426 if (ret) 4427 return ret; 4428 } 4429 } 4430 4431 return 0; 4432 } 4433 4434 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4435 struct net_device *br_dev, 4436 unsigned long event, void *ptr, 4437 u16 vid) 4438 { 4439 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4440 struct netdev_notifier_changeupper_info *info = ptr; 4441 struct netlink_ext_ack *extack; 4442 struct net_device *upper_dev; 4443 4444 if (!mlxsw_sp) 4445 return 0; 4446 4447 extack = netdev_notifier_info_to_extack(&info->info); 4448 4449 switch (event) { 4450 case NETDEV_PRECHANGEUPPER: 4451 upper_dev = info->upper_dev; 4452 if (!netif_is_macvlan(upper_dev)) { 4453 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4454 return -EOPNOTSUPP; 4455 } 4456 if (!info->linking) 4457 break; 4458 if (netif_is_macvlan(upper_dev) && 4459 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4460 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4461 return -EOPNOTSUPP; 4462 } 4463 break; 4464 case NETDEV_CHANGEUPPER: 4465 upper_dev = info->upper_dev; 4466 if (info->linking) 4467 break; 4468 if (netif_is_macvlan(upper_dev)) 4469 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4470 break; 4471 } 4472 4473 return 0; 4474 } 4475 4476 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4477 unsigned long event, void *ptr) 4478 { 4479 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4480 u16 vid = vlan_dev_vlan_id(vlan_dev); 4481 4482 if (mlxsw_sp_port_dev_check(real_dev)) 4483 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4484 event, ptr, vid); 4485 else if (netif_is_lag_master(real_dev)) 4486 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4487 real_dev, event, 4488 ptr, vid); 4489 else if (netif_is_bridge_master(real_dev)) 4490 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4491 event, ptr, vid); 4492 4493 return 0; 4494 } 4495 4496 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4497 unsigned long event, void *ptr) 4498 { 4499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4500 struct netdev_notifier_changeupper_info *info = ptr; 4501 struct netlink_ext_ack *extack; 4502 struct net_device *upper_dev; 4503 u16 proto; 4504 4505 if (!mlxsw_sp) 4506 return 0; 4507 4508 extack = netdev_notifier_info_to_extack(&info->info); 4509 4510 switch (event) { 4511 case NETDEV_PRECHANGEUPPER: 4512 upper_dev = info->upper_dev; 4513 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4514 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4515 return -EOPNOTSUPP; 4516 } 4517 if (!info->linking) 4518 break; 4519 if (br_vlan_enabled(br_dev)) { 4520 br_vlan_get_proto(br_dev, &proto); 4521 if (proto == ETH_P_8021AD) { 4522 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 4523 return -EOPNOTSUPP; 4524 } 4525 } 4526 if (is_vlan_dev(upper_dev) && 4527 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4528 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4529 return -EOPNOTSUPP; 4530 } 4531 if (netif_is_macvlan(upper_dev) && 4532 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4533 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4534 return -EOPNOTSUPP; 4535 } 4536 break; 4537 case NETDEV_CHANGEUPPER: 4538 upper_dev = info->upper_dev; 4539 if (info->linking) 4540 break; 4541 if (is_vlan_dev(upper_dev)) 4542 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4543 if (netif_is_macvlan(upper_dev)) 4544 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4545 break; 4546 } 4547 4548 return 0; 4549 } 4550 4551 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4552 unsigned long event, void *ptr) 4553 { 4554 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4555 struct netdev_notifier_changeupper_info *info = ptr; 4556 struct netlink_ext_ack *extack; 4557 4558 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4559 return 0; 4560 4561 extack = netdev_notifier_info_to_extack(&info->info); 4562 4563 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4564 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4565 4566 return -EOPNOTSUPP; 4567 } 4568 4569 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4570 { 4571 struct netdev_notifier_changeupper_info *info = ptr; 4572 4573 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4574 return false; 4575 return netif_is_l3_master(info->upper_dev); 4576 } 4577 4578 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4579 struct net_device *dev, 4580 unsigned long event, void *ptr) 4581 { 4582 struct netdev_notifier_changeupper_info *cu_info; 4583 struct netdev_notifier_info *info = ptr; 4584 struct netlink_ext_ack *extack; 4585 struct net_device *upper_dev; 4586 4587 extack = netdev_notifier_info_to_extack(info); 4588 4589 switch (event) { 4590 case NETDEV_CHANGEUPPER: 4591 cu_info = container_of(info, 4592 struct netdev_notifier_changeupper_info, 4593 info); 4594 upper_dev = cu_info->upper_dev; 4595 if (!netif_is_bridge_master(upper_dev)) 4596 return 0; 4597 if (!mlxsw_sp_lower_get(upper_dev)) 4598 return 0; 4599 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4600 return -EOPNOTSUPP; 4601 if (cu_info->linking) { 4602 if (!netif_running(dev)) 4603 return 0; 4604 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4605 * device needs to be mapped to a VLAN, but at this 4606 * point no VLANs are configured on the VxLAN device 4607 */ 4608 if (br_vlan_enabled(upper_dev)) 4609 return 0; 4610 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 4611 dev, 0, extack); 4612 } else { 4613 /* VLANs were already flushed, which triggered the 4614 * necessary cleanup 4615 */ 4616 if (br_vlan_enabled(upper_dev)) 4617 return 0; 4618 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4619 } 4620 break; 4621 case NETDEV_PRE_UP: 4622 upper_dev = netdev_master_upper_dev_get(dev); 4623 if (!upper_dev) 4624 return 0; 4625 if (!netif_is_bridge_master(upper_dev)) 4626 return 0; 4627 if (!mlxsw_sp_lower_get(upper_dev)) 4628 return 0; 4629 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 4630 extack); 4631 case NETDEV_DOWN: 4632 upper_dev = netdev_master_upper_dev_get(dev); 4633 if (!upper_dev) 4634 return 0; 4635 if (!netif_is_bridge_master(upper_dev)) 4636 return 0; 4637 if (!mlxsw_sp_lower_get(upper_dev)) 4638 return 0; 4639 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4640 break; 4641 } 4642 4643 return 0; 4644 } 4645 4646 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4647 unsigned long event, void *ptr) 4648 { 4649 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4650 struct mlxsw_sp_span_entry *span_entry; 4651 struct mlxsw_sp *mlxsw_sp; 4652 int err = 0; 4653 4654 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4655 if (event == NETDEV_UNREGISTER) { 4656 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4657 if (span_entry) 4658 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4659 } 4660 mlxsw_sp_span_respin(mlxsw_sp); 4661 4662 if (netif_is_vxlan(dev)) 4663 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 4664 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4665 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4666 event, ptr); 4667 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4668 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4669 event, ptr); 4670 else if (event == NETDEV_PRE_CHANGEADDR || 4671 event == NETDEV_CHANGEADDR || 4672 event == NETDEV_CHANGEMTU) 4673 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 4674 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4675 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4676 else if (mlxsw_sp_port_dev_check(dev)) 4677 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4678 else if (netif_is_lag_master(dev)) 4679 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4680 else if (is_vlan_dev(dev)) 4681 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4682 else if (netif_is_bridge_master(dev)) 4683 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4684 else if (netif_is_macvlan(dev)) 4685 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4686 4687 return notifier_from_errno(err); 4688 } 4689 4690 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4691 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4692 }; 4693 4694 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4695 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4696 }; 4697 4698 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4699 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4700 {0, }, 4701 }; 4702 4703 static struct pci_driver mlxsw_sp1_pci_driver = { 4704 .name = mlxsw_sp1_driver_name, 4705 .id_table = mlxsw_sp1_pci_id_table, 4706 }; 4707 4708 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4709 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4710 {0, }, 4711 }; 4712 4713 static struct pci_driver mlxsw_sp2_pci_driver = { 4714 .name = mlxsw_sp2_driver_name, 4715 .id_table = mlxsw_sp2_pci_id_table, 4716 }; 4717 4718 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 4719 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 4720 {0, }, 4721 }; 4722 4723 static struct pci_driver mlxsw_sp3_pci_driver = { 4724 .name = mlxsw_sp3_driver_name, 4725 .id_table = mlxsw_sp3_pci_id_table, 4726 }; 4727 4728 static int __init mlxsw_sp_module_init(void) 4729 { 4730 int err; 4731 4732 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4733 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4734 4735 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4736 if (err) 4737 goto err_sp1_core_driver_register; 4738 4739 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4740 if (err) 4741 goto err_sp2_core_driver_register; 4742 4743 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 4744 if (err) 4745 goto err_sp3_core_driver_register; 4746 4747 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4748 if (err) 4749 goto err_sp1_pci_driver_register; 4750 4751 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4752 if (err) 4753 goto err_sp2_pci_driver_register; 4754 4755 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 4756 if (err) 4757 goto err_sp3_pci_driver_register; 4758 4759 return 0; 4760 4761 err_sp3_pci_driver_register: 4762 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4763 err_sp2_pci_driver_register: 4764 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4765 err_sp1_pci_driver_register: 4766 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4767 err_sp3_core_driver_register: 4768 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4769 err_sp2_core_driver_register: 4770 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4771 err_sp1_core_driver_register: 4772 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4773 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4774 return err; 4775 } 4776 4777 static void __exit mlxsw_sp_module_exit(void) 4778 { 4779 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 4780 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4781 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4782 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4783 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4784 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4785 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4786 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4787 } 4788 4789 module_init(mlxsw_sp_module_init); 4790 module_exit(mlxsw_sp_module_exit); 4791 4792 MODULE_LICENSE("Dual BSD/GPL"); 4793 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4794 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4795 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 4796 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 4797 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 4798 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 4799 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 4800 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 4801