1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 48 #define MLXSW_SP1_FWREV_MAJOR 13 49 #define MLXSW_SP1_FWREV_MINOR 2008 50 #define MLXSW_SP1_FWREV_SUBMINOR 3326 51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 52 53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 54 .major = MLXSW_SP1_FWREV_MAJOR, 55 .minor = MLXSW_SP1_FWREV_MINOR, 56 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 57 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 58 }; 59 60 #define MLXSW_SP1_FW_FILENAME \ 61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 64 65 #define MLXSW_SP2_FWREV_MAJOR 29 66 #define MLXSW_SP2_FWREV_MINOR 2008 67 #define MLXSW_SP2_FWREV_SUBMINOR 3326 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP2_FWREV_MINOR, 72 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 #define MLXSW_SP3_FWREV_MINOR 2008 82 #define MLXSW_SP3_FWREV_SUBMINOR 3326 83 84 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 85 .major = MLXSW_SP3_FWREV_MAJOR, 86 .minor = MLXSW_SP3_FWREV_MINOR, 87 .subminor = MLXSW_SP3_FWREV_SUBMINOR, 88 }; 89 90 #define MLXSW_SP3_FW_FILENAME \ 91 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 92 "." __stringify(MLXSW_SP3_FWREV_MINOR) \ 93 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" 94 95 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 96 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 97 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 98 99 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 100 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 101 }; 102 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 103 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 104 }; 105 106 /* tx_hdr_version 107 * Tx header version. 108 * Must be set to 1. 109 */ 110 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 111 112 /* tx_hdr_ctl 113 * Packet control type. 114 * 0 - Ethernet control (e.g. EMADs, LACP) 115 * 1 - Ethernet data 116 */ 117 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 118 119 /* tx_hdr_proto 120 * Packet protocol type. Must be set to 1 (Ethernet). 121 */ 122 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 123 124 /* tx_hdr_rx_is_router 125 * Packet is sent from the router. Valid for data packets only. 126 */ 127 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 128 129 /* tx_hdr_fid_valid 130 * Indicates if the 'fid' field is valid and should be used for 131 * forwarding lookup. Valid for data packets only. 132 */ 133 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 134 135 /* tx_hdr_swid 136 * Switch partition ID. Must be set to 0. 137 */ 138 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 139 140 /* tx_hdr_control_tclass 141 * Indicates if the packet should use the control TClass and not one 142 * of the data TClasses. 143 */ 144 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 145 146 /* tx_hdr_etclass 147 * Egress TClass to be used on the egress device on the egress port. 148 */ 149 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 150 151 /* tx_hdr_port_mid 152 * Destination local port for unicast packets. 153 * Destination multicast ID for multicast packets. 154 * 155 * Control packets are directed to a specific egress port, while data 156 * packets are transmitted through the CPU port (0) into the switch partition, 157 * where forwarding rules are applied. 158 */ 159 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 160 161 /* tx_hdr_fid 162 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 163 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 164 * Valid for data packets only. 165 */ 166 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 167 168 /* tx_hdr_type 169 * 0 - Data packets 170 * 6 - Control packets 171 */ 172 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 173 174 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 175 unsigned int counter_index, u64 *packets, 176 u64 *bytes) 177 { 178 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 179 int err; 180 181 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 182 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 184 if (err) 185 return err; 186 if (packets) 187 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 188 if (bytes) 189 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 190 return 0; 191 } 192 193 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 194 unsigned int counter_index) 195 { 196 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 197 198 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 199 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 200 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 201 } 202 203 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 204 unsigned int *p_counter_index) 205 { 206 int err; 207 208 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 209 p_counter_index); 210 if (err) 211 return err; 212 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 213 if (err) 214 goto err_counter_clear; 215 return 0; 216 217 err_counter_clear: 218 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 219 *p_counter_index); 220 return err; 221 } 222 223 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 224 unsigned int counter_index) 225 { 226 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 227 counter_index); 228 } 229 230 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 231 const struct mlxsw_tx_info *tx_info) 232 { 233 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 234 235 memset(txhdr, 0, MLXSW_TXHDR_LEN); 236 237 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 238 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 239 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 240 mlxsw_tx_hdr_swid_set(txhdr, 0); 241 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 242 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 243 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 244 } 245 246 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 247 { 248 switch (state) { 249 case BR_STATE_FORWARDING: 250 return MLXSW_REG_SPMS_STATE_FORWARDING; 251 case BR_STATE_LEARNING: 252 return MLXSW_REG_SPMS_STATE_LEARNING; 253 case BR_STATE_LISTENING: 254 case BR_STATE_DISABLED: 255 case BR_STATE_BLOCKING: 256 return MLXSW_REG_SPMS_STATE_DISCARDING; 257 default: 258 BUG(); 259 } 260 } 261 262 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 263 u8 state) 264 { 265 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 266 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 267 char *spms_pl; 268 int err; 269 270 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 271 if (!spms_pl) 272 return -ENOMEM; 273 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 274 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 275 276 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 277 kfree(spms_pl); 278 return err; 279 } 280 281 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 282 { 283 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 284 int err; 285 286 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 287 if (err) 288 return err; 289 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 290 return 0; 291 } 292 293 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 294 bool is_up) 295 { 296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 297 char paos_pl[MLXSW_REG_PAOS_LEN]; 298 299 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 300 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 301 MLXSW_PORT_ADMIN_STATUS_DOWN); 302 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 303 } 304 305 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 306 unsigned char *addr) 307 { 308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 309 char ppad_pl[MLXSW_REG_PPAD_LEN]; 310 311 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 312 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 313 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 314 } 315 316 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 317 { 318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 319 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 320 321 ether_addr_copy(addr, mlxsw_sp->base_mac); 322 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 323 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 324 } 325 326 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 327 { 328 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 329 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 330 int err; 331 332 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 333 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 334 if (err) 335 return err; 336 337 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 338 return 0; 339 } 340 341 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 342 { 343 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 344 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 345 346 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 347 if (mtu > mlxsw_sp_port->max_mtu) 348 return -EINVAL; 349 350 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 351 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 352 } 353 354 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 355 u8 local_port, u8 swid) 356 { 357 char pspa_pl[MLXSW_REG_PSPA_LEN]; 358 359 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 360 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 361 } 362 363 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 364 { 365 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 366 char svpe_pl[MLXSW_REG_SVPE_LEN]; 367 368 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 369 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 370 } 371 372 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 373 bool learn_enable) 374 { 375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 char *spvmlr_pl; 377 int err; 378 379 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 380 if (!spvmlr_pl) 381 return -ENOMEM; 382 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 383 learn_enable); 384 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 385 kfree(spvmlr_pl); 386 return err; 387 } 388 389 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 390 { 391 switch (ethtype) { 392 case ETH_P_8021Q: 393 *p_sver_type = 0; 394 break; 395 case ETH_P_8021AD: 396 *p_sver_type = 1; 397 break; 398 default: 399 return -EINVAL; 400 } 401 402 return 0; 403 } 404 405 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 406 u16 ethtype) 407 { 408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 409 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 410 u8 sver_type; 411 int err; 412 413 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 414 if (err) 415 return err; 416 417 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 419 } 420 421 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 422 u16 vid, u16 ethtype) 423 { 424 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 425 char spvid_pl[MLXSW_REG_SPVID_LEN]; 426 u8 sver_type; 427 int err; 428 429 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 430 if (err) 431 return err; 432 433 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 434 sver_type); 435 436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 437 } 438 439 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 440 bool allow) 441 { 442 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 443 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 444 445 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 446 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 447 } 448 449 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 450 u16 ethtype) 451 { 452 int err; 453 454 if (!vid) { 455 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 456 if (err) 457 return err; 458 } else { 459 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 460 if (err) 461 return err; 462 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 463 if (err) 464 goto err_port_allow_untagged_set; 465 } 466 467 mlxsw_sp_port->pvid = vid; 468 return 0; 469 470 err_port_allow_untagged_set: 471 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 472 return err; 473 } 474 475 static int 476 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 477 { 478 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 479 char sspr_pl[MLXSW_REG_SSPR_LEN]; 480 481 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 482 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 483 } 484 485 static int 486 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 487 struct mlxsw_sp_port_mapping *port_mapping) 488 { 489 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 490 bool separate_rxtx; 491 u8 module; 492 u8 width; 493 int err; 494 int i; 495 496 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 497 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 498 if (err) 499 return err; 500 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 501 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 502 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 503 504 if (width && !is_power_of_2(width)) { 505 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 506 local_port); 507 return -EINVAL; 508 } 509 510 for (i = 0; i < width; i++) { 511 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 512 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 513 local_port); 514 return -EINVAL; 515 } 516 if (separate_rxtx && 517 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 518 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 520 local_port); 521 return -EINVAL; 522 } 523 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 524 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 525 local_port); 526 return -EINVAL; 527 } 528 } 529 530 port_mapping->module = module; 531 port_mapping->width = width; 532 port_mapping->module_width = width; 533 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 534 return 0; 535 } 536 537 static int 538 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 539 const struct mlxsw_sp_port_mapping *port_mapping) 540 { 541 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 542 int i, err; 543 544 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module); 545 546 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 547 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 548 for (i = 0; i < port_mapping->width; i++) { 549 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 550 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 551 } 552 553 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 554 if (err) 555 goto err_pmlp_write; 556 return 0; 557 558 err_pmlp_write: 559 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module); 560 return err; 561 } 562 563 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port, 564 u8 module) 565 { 566 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 567 568 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 569 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 570 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 571 mlxsw_env_module_port_unmap(mlxsw_sp->core, module); 572 } 573 574 static int mlxsw_sp_port_open(struct net_device *dev) 575 { 576 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 577 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 578 int err; 579 580 err = mlxsw_env_module_port_up(mlxsw_sp->core, 581 mlxsw_sp_port->mapping.module); 582 if (err) 583 return err; 584 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 585 if (err) 586 goto err_port_admin_status_set; 587 netif_start_queue(dev); 588 return 0; 589 590 err_port_admin_status_set: 591 mlxsw_env_module_port_down(mlxsw_sp->core, 592 mlxsw_sp_port->mapping.module); 593 return err; 594 } 595 596 static int mlxsw_sp_port_stop(struct net_device *dev) 597 { 598 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 599 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 600 601 netif_stop_queue(dev); 602 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 603 mlxsw_env_module_port_down(mlxsw_sp->core, 604 mlxsw_sp_port->mapping.module); 605 return 0; 606 } 607 608 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 609 struct net_device *dev) 610 { 611 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 612 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 613 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 614 const struct mlxsw_tx_info tx_info = { 615 .local_port = mlxsw_sp_port->local_port, 616 .is_emad = false, 617 }; 618 u64 len; 619 int err; 620 621 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 622 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 623 dev_kfree_skb_any(skb); 624 return NETDEV_TX_OK; 625 } 626 627 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 628 629 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 630 return NETDEV_TX_BUSY; 631 632 if (eth_skb_pad(skb)) { 633 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 634 return NETDEV_TX_OK; 635 } 636 637 mlxsw_sp_txhdr_construct(skb, &tx_info); 638 /* TX header is consumed by HW on the way so we shouldn't count its 639 * bytes as being sent. 640 */ 641 len = skb->len - MLXSW_TXHDR_LEN; 642 643 /* Due to a race we might fail here because of a full queue. In that 644 * unlikely case we simply drop the packet. 645 */ 646 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 647 648 if (!err) { 649 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 650 u64_stats_update_begin(&pcpu_stats->syncp); 651 pcpu_stats->tx_packets++; 652 pcpu_stats->tx_bytes += len; 653 u64_stats_update_end(&pcpu_stats->syncp); 654 } else { 655 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 656 dev_kfree_skb_any(skb); 657 } 658 return NETDEV_TX_OK; 659 } 660 661 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 662 { 663 } 664 665 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 666 { 667 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 668 struct sockaddr *addr = p; 669 int err; 670 671 if (!is_valid_ether_addr(addr->sa_data)) 672 return -EADDRNOTAVAIL; 673 674 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 675 if (err) 676 return err; 677 eth_hw_addr_set(dev, addr->sa_data); 678 return 0; 679 } 680 681 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 682 { 683 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 684 struct mlxsw_sp_hdroom orig_hdroom; 685 struct mlxsw_sp_hdroom hdroom; 686 int err; 687 688 orig_hdroom = *mlxsw_sp_port->hdroom; 689 690 hdroom = orig_hdroom; 691 hdroom.mtu = mtu; 692 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 693 694 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 695 if (err) { 696 netdev_err(dev, "Failed to configure port's headroom\n"); 697 return err; 698 } 699 700 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 701 if (err) 702 goto err_port_mtu_set; 703 dev->mtu = mtu; 704 return 0; 705 706 err_port_mtu_set: 707 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 708 return err; 709 } 710 711 static int 712 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 713 struct rtnl_link_stats64 *stats) 714 { 715 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 716 struct mlxsw_sp_port_pcpu_stats *p; 717 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 718 u32 tx_dropped = 0; 719 unsigned int start; 720 int i; 721 722 for_each_possible_cpu(i) { 723 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 724 do { 725 start = u64_stats_fetch_begin_irq(&p->syncp); 726 rx_packets = p->rx_packets; 727 rx_bytes = p->rx_bytes; 728 tx_packets = p->tx_packets; 729 tx_bytes = p->tx_bytes; 730 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 731 732 stats->rx_packets += rx_packets; 733 stats->rx_bytes += rx_bytes; 734 stats->tx_packets += tx_packets; 735 stats->tx_bytes += tx_bytes; 736 /* tx_dropped is u32, updated without syncp protection. */ 737 tx_dropped += p->tx_dropped; 738 } 739 stats->tx_dropped = tx_dropped; 740 return 0; 741 } 742 743 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 744 { 745 switch (attr_id) { 746 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 747 return true; 748 } 749 750 return false; 751 } 752 753 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 754 void *sp) 755 { 756 switch (attr_id) { 757 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 758 return mlxsw_sp_port_get_sw_stats64(dev, sp); 759 } 760 761 return -EINVAL; 762 } 763 764 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 765 int prio, char *ppcnt_pl) 766 { 767 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 768 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 769 770 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 771 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 772 } 773 774 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 775 struct rtnl_link_stats64 *stats) 776 { 777 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 778 int err; 779 780 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 781 0, ppcnt_pl); 782 if (err) 783 goto out; 784 785 stats->tx_packets = 786 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 787 stats->rx_packets = 788 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 789 stats->tx_bytes = 790 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 791 stats->rx_bytes = 792 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 793 stats->multicast = 794 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 795 796 stats->rx_crc_errors = 797 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 798 stats->rx_frame_errors = 799 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 800 801 stats->rx_length_errors = ( 802 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 803 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 804 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 805 806 stats->rx_errors = (stats->rx_crc_errors + 807 stats->rx_frame_errors + stats->rx_length_errors); 808 809 out: 810 return err; 811 } 812 813 static void 814 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 815 struct mlxsw_sp_port_xstats *xstats) 816 { 817 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 818 int err, i; 819 820 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 821 ppcnt_pl); 822 if (!err) 823 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 824 825 for (i = 0; i < TC_MAX_QUEUE; i++) { 826 err = mlxsw_sp_port_get_stats_raw(dev, 827 MLXSW_REG_PPCNT_TC_CONG_TC, 828 i, ppcnt_pl); 829 if (!err) 830 xstats->wred_drop[i] = 831 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 832 833 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 834 i, ppcnt_pl); 835 if (err) 836 continue; 837 838 xstats->backlog[i] = 839 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 840 xstats->tail_drop[i] = 841 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 842 } 843 844 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 845 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 846 i, ppcnt_pl); 847 if (err) 848 continue; 849 850 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 851 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 852 } 853 } 854 855 static void update_stats_cache(struct work_struct *work) 856 { 857 struct mlxsw_sp_port *mlxsw_sp_port = 858 container_of(work, struct mlxsw_sp_port, 859 periodic_hw_stats.update_dw.work); 860 861 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 862 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 863 * necessary when port goes down. 864 */ 865 goto out; 866 867 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 868 &mlxsw_sp_port->periodic_hw_stats.stats); 869 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 870 &mlxsw_sp_port->periodic_hw_stats.xstats); 871 872 out: 873 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 874 MLXSW_HW_STATS_UPDATE_TIME); 875 } 876 877 /* Return the stats from a cache that is updated periodically, 878 * as this function might get called in an atomic context. 879 */ 880 static void 881 mlxsw_sp_port_get_stats64(struct net_device *dev, 882 struct rtnl_link_stats64 *stats) 883 { 884 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 885 886 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 887 } 888 889 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 890 u16 vid_begin, u16 vid_end, 891 bool is_member, bool untagged) 892 { 893 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 894 char *spvm_pl; 895 int err; 896 897 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 898 if (!spvm_pl) 899 return -ENOMEM; 900 901 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 902 vid_end, is_member, untagged); 903 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 904 kfree(spvm_pl); 905 return err; 906 } 907 908 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 909 u16 vid_end, bool is_member, bool untagged) 910 { 911 u16 vid, vid_e; 912 int err; 913 914 for (vid = vid_begin; vid <= vid_end; 915 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 916 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 917 vid_end); 918 919 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 920 is_member, untagged); 921 if (err) 922 return err; 923 } 924 925 return 0; 926 } 927 928 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 929 bool flush_default) 930 { 931 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 932 933 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 934 &mlxsw_sp_port->vlans_list, list) { 935 if (!flush_default && 936 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 937 continue; 938 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 939 } 940 } 941 942 static void 943 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 944 { 945 if (mlxsw_sp_port_vlan->bridge_port) 946 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 947 else if (mlxsw_sp_port_vlan->fid) 948 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 949 } 950 951 struct mlxsw_sp_port_vlan * 952 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 953 { 954 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 955 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 956 int err; 957 958 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 959 if (mlxsw_sp_port_vlan) 960 return ERR_PTR(-EEXIST); 961 962 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 963 if (err) 964 return ERR_PTR(err); 965 966 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 967 if (!mlxsw_sp_port_vlan) { 968 err = -ENOMEM; 969 goto err_port_vlan_alloc; 970 } 971 972 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 973 mlxsw_sp_port_vlan->vid = vid; 974 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 975 976 return mlxsw_sp_port_vlan; 977 978 err_port_vlan_alloc: 979 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 980 return ERR_PTR(err); 981 } 982 983 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 984 { 985 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 986 u16 vid = mlxsw_sp_port_vlan->vid; 987 988 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 989 list_del(&mlxsw_sp_port_vlan->list); 990 kfree(mlxsw_sp_port_vlan); 991 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 992 } 993 994 static int mlxsw_sp_port_add_vid(struct net_device *dev, 995 __be16 __always_unused proto, u16 vid) 996 { 997 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 998 999 /* VLAN 0 is added to HW filter when device goes up, but it is 1000 * reserved in our case, so simply return. 1001 */ 1002 if (!vid) 1003 return 0; 1004 1005 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1006 } 1007 1008 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1009 __be16 __always_unused proto, u16 vid) 1010 { 1011 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1012 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1013 1014 /* VLAN 0 is removed from HW filter when device goes down, but 1015 * it is reserved in our case, so simply return. 1016 */ 1017 if (!vid) 1018 return 0; 1019 1020 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1021 if (!mlxsw_sp_port_vlan) 1022 return 0; 1023 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1024 1025 return 0; 1026 } 1027 1028 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1029 struct flow_block_offload *f) 1030 { 1031 switch (f->binder_type) { 1032 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1033 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1034 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1035 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1036 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1037 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1038 default: 1039 return -EOPNOTSUPP; 1040 } 1041 } 1042 1043 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1044 void *type_data) 1045 { 1046 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1047 1048 switch (type) { 1049 case TC_SETUP_BLOCK: 1050 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1051 case TC_SETUP_QDISC_RED: 1052 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1053 case TC_SETUP_QDISC_PRIO: 1054 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1055 case TC_SETUP_QDISC_ETS: 1056 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1057 case TC_SETUP_QDISC_TBF: 1058 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1059 case TC_SETUP_QDISC_FIFO: 1060 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1061 default: 1062 return -EOPNOTSUPP; 1063 } 1064 } 1065 1066 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1067 { 1068 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1069 1070 if (!enable) { 1071 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1072 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1073 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1074 return -EINVAL; 1075 } 1076 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1077 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1078 } else { 1079 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1080 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1081 } 1082 return 0; 1083 } 1084 1085 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1086 { 1087 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1088 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1089 int err; 1090 1091 if (netif_running(dev)) 1092 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1093 1094 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1095 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1096 pplr_pl); 1097 1098 if (netif_running(dev)) 1099 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1100 1101 return err; 1102 } 1103 1104 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1105 1106 static int mlxsw_sp_handle_feature(struct net_device *dev, 1107 netdev_features_t wanted_features, 1108 netdev_features_t feature, 1109 mlxsw_sp_feature_handler feature_handler) 1110 { 1111 netdev_features_t changes = wanted_features ^ dev->features; 1112 bool enable = !!(wanted_features & feature); 1113 int err; 1114 1115 if (!(changes & feature)) 1116 return 0; 1117 1118 err = feature_handler(dev, enable); 1119 if (err) { 1120 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1121 enable ? "Enable" : "Disable", &feature, err); 1122 return err; 1123 } 1124 1125 if (enable) 1126 dev->features |= feature; 1127 else 1128 dev->features &= ~feature; 1129 1130 return 0; 1131 } 1132 static int mlxsw_sp_set_features(struct net_device *dev, 1133 netdev_features_t features) 1134 { 1135 netdev_features_t oper_features = dev->features; 1136 int err = 0; 1137 1138 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1139 mlxsw_sp_feature_hw_tc); 1140 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1141 mlxsw_sp_feature_loopback); 1142 1143 if (err) { 1144 dev->features = oper_features; 1145 return -EINVAL; 1146 } 1147 1148 return 0; 1149 } 1150 1151 static struct devlink_port * 1152 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1153 { 1154 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1155 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1156 1157 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1158 mlxsw_sp_port->local_port); 1159 } 1160 1161 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1162 struct ifreq *ifr) 1163 { 1164 struct hwtstamp_config config; 1165 int err; 1166 1167 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1168 return -EFAULT; 1169 1170 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1171 &config); 1172 if (err) 1173 return err; 1174 1175 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1176 return -EFAULT; 1177 1178 return 0; 1179 } 1180 1181 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1182 struct ifreq *ifr) 1183 { 1184 struct hwtstamp_config config; 1185 int err; 1186 1187 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1188 &config); 1189 if (err) 1190 return err; 1191 1192 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1193 return -EFAULT; 1194 1195 return 0; 1196 } 1197 1198 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1199 { 1200 struct hwtstamp_config config = {0}; 1201 1202 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1203 } 1204 1205 static int 1206 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1207 { 1208 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1209 1210 switch (cmd) { 1211 case SIOCSHWTSTAMP: 1212 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1213 case SIOCGHWTSTAMP: 1214 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1215 default: 1216 return -EOPNOTSUPP; 1217 } 1218 } 1219 1220 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1221 .ndo_open = mlxsw_sp_port_open, 1222 .ndo_stop = mlxsw_sp_port_stop, 1223 .ndo_start_xmit = mlxsw_sp_port_xmit, 1224 .ndo_setup_tc = mlxsw_sp_setup_tc, 1225 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1226 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1227 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1228 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1229 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1230 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1231 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1232 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1233 .ndo_set_features = mlxsw_sp_set_features, 1234 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1235 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1236 }; 1237 1238 static int 1239 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1240 { 1241 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1242 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1243 const struct mlxsw_sp_port_type_speed_ops *ops; 1244 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1245 u32 eth_proto_cap_masked; 1246 int err; 1247 1248 ops = mlxsw_sp->port_type_speed_ops; 1249 1250 /* Set advertised speeds to speeds supported by both the driver 1251 * and the device. 1252 */ 1253 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1254 0, false); 1255 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1256 if (err) 1257 return err; 1258 1259 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1260 ð_proto_admin, ð_proto_oper); 1261 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1262 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1263 eth_proto_cap_masked, 1264 mlxsw_sp_port->link.autoneg); 1265 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1266 } 1267 1268 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1269 { 1270 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1271 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1272 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1273 u32 eth_proto_oper; 1274 int err; 1275 1276 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1277 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1278 mlxsw_sp_port->local_port, 0, 1279 false); 1280 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1281 if (err) 1282 return err; 1283 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1284 ð_proto_oper); 1285 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1286 return 0; 1287 } 1288 1289 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1290 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1291 bool dwrr, u8 dwrr_weight) 1292 { 1293 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1294 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1295 1296 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1297 next_index); 1298 mlxsw_reg_qeec_de_set(qeec_pl, true); 1299 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1300 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1301 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1302 } 1303 1304 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1305 enum mlxsw_reg_qeec_hr hr, u8 index, 1306 u8 next_index, u32 maxrate, u8 burst_size) 1307 { 1308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1309 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1310 1311 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1312 next_index); 1313 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1314 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1315 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1316 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1317 } 1318 1319 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1320 enum mlxsw_reg_qeec_hr hr, u8 index, 1321 u8 next_index, u32 minrate) 1322 { 1323 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1324 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1325 1326 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1327 next_index); 1328 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1329 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1330 1331 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1332 } 1333 1334 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1335 u8 switch_prio, u8 tclass) 1336 { 1337 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1338 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1339 1340 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1341 tclass); 1342 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1343 } 1344 1345 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1346 { 1347 int err, i; 1348 1349 /* Setup the elements hierarcy, so that each TC is linked to 1350 * one subgroup, which are all member in the same group. 1351 */ 1352 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1353 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1354 if (err) 1355 return err; 1356 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1357 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1358 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1359 0, false, 0); 1360 if (err) 1361 return err; 1362 } 1363 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1364 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1365 MLXSW_REG_QEEC_HR_TC, i, i, 1366 false, 0); 1367 if (err) 1368 return err; 1369 1370 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1371 MLXSW_REG_QEEC_HR_TC, 1372 i + 8, i, 1373 true, 100); 1374 if (err) 1375 return err; 1376 } 1377 1378 /* Make sure the max shaper is disabled in all hierarchies that support 1379 * it. Note that this disables ptps (PTP shaper), but that is intended 1380 * for the initial configuration. 1381 */ 1382 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1383 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1384 MLXSW_REG_QEEC_MAS_DIS, 0); 1385 if (err) 1386 return err; 1387 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1388 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1389 MLXSW_REG_QEEC_HR_SUBGROUP, 1390 i, 0, 1391 MLXSW_REG_QEEC_MAS_DIS, 0); 1392 if (err) 1393 return err; 1394 } 1395 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1396 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1397 MLXSW_REG_QEEC_HR_TC, 1398 i, i, 1399 MLXSW_REG_QEEC_MAS_DIS, 0); 1400 if (err) 1401 return err; 1402 1403 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1404 MLXSW_REG_QEEC_HR_TC, 1405 i + 8, i, 1406 MLXSW_REG_QEEC_MAS_DIS, 0); 1407 if (err) 1408 return err; 1409 } 1410 1411 /* Configure the min shaper for multicast TCs. */ 1412 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1413 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1414 MLXSW_REG_QEEC_HR_TC, 1415 i + 8, i, 1416 MLXSW_REG_QEEC_MIS_MIN); 1417 if (err) 1418 return err; 1419 } 1420 1421 /* Map all priorities to traffic class 0. */ 1422 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1423 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1424 if (err) 1425 return err; 1426 } 1427 1428 return 0; 1429 } 1430 1431 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1432 bool enable) 1433 { 1434 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1435 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1436 1437 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1438 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1439 } 1440 1441 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1442 { 1443 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1444 u8 module = mlxsw_sp_port->mapping.module; 1445 u64 overheat_counter; 1446 int err; 1447 1448 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module, 1449 &overheat_counter); 1450 if (err) 1451 return err; 1452 1453 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1454 return 0; 1455 } 1456 1457 int 1458 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1459 bool is_8021ad_tagged, 1460 bool is_8021q_tagged) 1461 { 1462 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1463 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1464 1465 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1466 is_8021ad_tagged, is_8021q_tagged); 1467 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1468 } 1469 1470 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1471 u8 local_port, u8 *port_number, 1472 u8 *split_port_subnumber, 1473 u8 *slot_index) 1474 { 1475 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1476 int err; 1477 1478 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1479 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1480 if (err) 1481 return err; 1482 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1483 split_port_subnumber, slot_index); 1484 return 0; 1485 } 1486 1487 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1488 bool split, 1489 struct mlxsw_sp_port_mapping *port_mapping) 1490 { 1491 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1492 struct mlxsw_sp_port *mlxsw_sp_port; 1493 u32 lanes = port_mapping->width; 1494 u8 split_port_subnumber; 1495 struct net_device *dev; 1496 u8 port_number; 1497 u8 slot_index; 1498 bool splittable; 1499 int err; 1500 1501 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1502 if (err) { 1503 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1504 local_port); 1505 return err; 1506 } 1507 1508 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1509 if (err) { 1510 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1511 local_port); 1512 goto err_port_swid_set; 1513 } 1514 1515 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1516 &split_port_subnumber, &slot_index); 1517 if (err) { 1518 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1519 local_port); 1520 goto err_port_label_info_get; 1521 } 1522 1523 splittable = lanes > 1 && !split; 1524 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 1525 port_number, split, split_port_subnumber, 1526 splittable, lanes, mlxsw_sp->base_mac, 1527 sizeof(mlxsw_sp->base_mac)); 1528 if (err) { 1529 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1530 local_port); 1531 goto err_core_port_init; 1532 } 1533 1534 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1535 if (!dev) { 1536 err = -ENOMEM; 1537 goto err_alloc_etherdev; 1538 } 1539 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1540 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1541 mlxsw_sp_port = netdev_priv(dev); 1542 mlxsw_sp_port->dev = dev; 1543 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1544 mlxsw_sp_port->local_port = local_port; 1545 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1546 mlxsw_sp_port->split = split; 1547 mlxsw_sp_port->mapping = *port_mapping; 1548 mlxsw_sp_port->link.autoneg = 1; 1549 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1550 1551 mlxsw_sp_port->pcpu_stats = 1552 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1553 if (!mlxsw_sp_port->pcpu_stats) { 1554 err = -ENOMEM; 1555 goto err_alloc_stats; 1556 } 1557 1558 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1559 &update_stats_cache); 1560 1561 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1562 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1563 1564 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1565 if (err) { 1566 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1567 mlxsw_sp_port->local_port); 1568 goto err_dev_addr_init; 1569 } 1570 1571 netif_carrier_off(dev); 1572 1573 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1574 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1575 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1576 1577 dev->min_mtu = 0; 1578 dev->max_mtu = ETH_MAX_MTU; 1579 1580 /* Each packet needs to have a Tx header (metadata) on top all other 1581 * headers. 1582 */ 1583 dev->needed_headroom = MLXSW_TXHDR_LEN; 1584 1585 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1586 if (err) { 1587 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1588 mlxsw_sp_port->local_port); 1589 goto err_port_system_port_mapping_set; 1590 } 1591 1592 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1593 if (err) { 1594 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1595 mlxsw_sp_port->local_port); 1596 goto err_port_speed_by_width_set; 1597 } 1598 1599 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1600 &mlxsw_sp_port->max_speed); 1601 if (err) { 1602 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1603 mlxsw_sp_port->local_port); 1604 goto err_max_speed_get; 1605 } 1606 1607 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1608 if (err) { 1609 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1610 mlxsw_sp_port->local_port); 1611 goto err_port_max_mtu_get; 1612 } 1613 1614 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1615 if (err) { 1616 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1617 mlxsw_sp_port->local_port); 1618 goto err_port_mtu_set; 1619 } 1620 1621 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1622 if (err) 1623 goto err_port_admin_status_set; 1624 1625 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1626 if (err) { 1627 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1628 mlxsw_sp_port->local_port); 1629 goto err_port_buffers_init; 1630 } 1631 1632 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1633 if (err) { 1634 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1635 mlxsw_sp_port->local_port); 1636 goto err_port_ets_init; 1637 } 1638 1639 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1640 if (err) { 1641 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1642 mlxsw_sp_port->local_port); 1643 goto err_port_tc_mc_mode; 1644 } 1645 1646 /* ETS and buffers must be initialized before DCB. */ 1647 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1648 if (err) { 1649 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1650 mlxsw_sp_port->local_port); 1651 goto err_port_dcb_init; 1652 } 1653 1654 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1655 if (err) { 1656 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1657 mlxsw_sp_port->local_port); 1658 goto err_port_fids_init; 1659 } 1660 1661 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1662 if (err) { 1663 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1664 mlxsw_sp_port->local_port); 1665 goto err_port_qdiscs_init; 1666 } 1667 1668 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1669 false); 1670 if (err) { 1671 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1672 mlxsw_sp_port->local_port); 1673 goto err_port_vlan_clear; 1674 } 1675 1676 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1677 if (err) { 1678 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1679 mlxsw_sp_port->local_port); 1680 goto err_port_nve_init; 1681 } 1682 1683 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1684 ETH_P_8021Q); 1685 if (err) { 1686 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1687 mlxsw_sp_port->local_port); 1688 goto err_port_pvid_set; 1689 } 1690 1691 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1692 MLXSW_SP_DEFAULT_VID); 1693 if (IS_ERR(mlxsw_sp_port_vlan)) { 1694 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1695 mlxsw_sp_port->local_port); 1696 err = PTR_ERR(mlxsw_sp_port_vlan); 1697 goto err_port_vlan_create; 1698 } 1699 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1700 1701 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1702 * only packets with 802.1q header as tagged packets. 1703 */ 1704 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1705 if (err) { 1706 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1707 local_port); 1708 goto err_port_vlan_classification_set; 1709 } 1710 1711 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1712 mlxsw_sp->ptp_ops->shaper_work); 1713 1714 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1715 1716 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1717 if (err) { 1718 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1719 mlxsw_sp_port->local_port); 1720 goto err_port_overheat_init_val_set; 1721 } 1722 1723 err = register_netdev(dev); 1724 if (err) { 1725 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1726 mlxsw_sp_port->local_port); 1727 goto err_register_netdev; 1728 } 1729 1730 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1731 mlxsw_sp_port, dev); 1732 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1733 return 0; 1734 1735 err_register_netdev: 1736 err_port_overheat_init_val_set: 1737 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1738 err_port_vlan_classification_set: 1739 mlxsw_sp->ports[local_port] = NULL; 1740 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1741 err_port_vlan_create: 1742 err_port_pvid_set: 1743 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1744 err_port_nve_init: 1745 err_port_vlan_clear: 1746 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1747 err_port_qdiscs_init: 1748 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1749 err_port_fids_init: 1750 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1751 err_port_dcb_init: 1752 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1753 err_port_tc_mc_mode: 1754 err_port_ets_init: 1755 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1756 err_port_buffers_init: 1757 err_port_admin_status_set: 1758 err_port_mtu_set: 1759 err_port_max_mtu_get: 1760 err_max_speed_get: 1761 err_port_speed_by_width_set: 1762 err_port_system_port_mapping_set: 1763 err_dev_addr_init: 1764 free_percpu(mlxsw_sp_port->pcpu_stats); 1765 err_alloc_stats: 1766 free_netdev(dev); 1767 err_alloc_etherdev: 1768 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1769 err_core_port_init: 1770 err_port_label_info_get: 1771 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1772 MLXSW_PORT_SWID_DISABLED_PORT); 1773 err_port_swid_set: 1774 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module); 1775 return err; 1776 } 1777 1778 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1779 { 1780 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1781 u8 module = mlxsw_sp_port->mapping.module; 1782 1783 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1784 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1785 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1786 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1787 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1788 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1789 mlxsw_sp->ports[local_port] = NULL; 1790 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1791 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1792 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1793 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1794 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1795 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1796 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1797 free_percpu(mlxsw_sp_port->pcpu_stats); 1798 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1799 free_netdev(mlxsw_sp_port->dev); 1800 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1801 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1802 MLXSW_PORT_SWID_DISABLED_PORT); 1803 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module); 1804 } 1805 1806 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1807 { 1808 struct mlxsw_sp_port *mlxsw_sp_port; 1809 int err; 1810 1811 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1812 if (!mlxsw_sp_port) 1813 return -ENOMEM; 1814 1815 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1816 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1817 1818 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1819 mlxsw_sp_port, 1820 mlxsw_sp->base_mac, 1821 sizeof(mlxsw_sp->base_mac)); 1822 if (err) { 1823 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1824 goto err_core_cpu_port_init; 1825 } 1826 1827 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1828 return 0; 1829 1830 err_core_cpu_port_init: 1831 kfree(mlxsw_sp_port); 1832 return err; 1833 } 1834 1835 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1836 { 1837 struct mlxsw_sp_port *mlxsw_sp_port = 1838 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1839 1840 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1841 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1842 kfree(mlxsw_sp_port); 1843 } 1844 1845 static bool mlxsw_sp_local_port_valid(u8 local_port) 1846 { 1847 return local_port != MLXSW_PORT_CPU_PORT; 1848 } 1849 1850 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1851 { 1852 if (!mlxsw_sp_local_port_valid(local_port)) 1853 return false; 1854 return mlxsw_sp->ports[local_port] != NULL; 1855 } 1856 1857 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1858 { 1859 int i; 1860 1861 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1862 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1863 mlxsw_sp_port_remove(mlxsw_sp, i); 1864 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1865 kfree(mlxsw_sp->ports); 1866 mlxsw_sp->ports = NULL; 1867 } 1868 1869 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1870 { 1871 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1872 struct mlxsw_sp_port_mapping *port_mapping; 1873 size_t alloc_size; 1874 int i; 1875 int err; 1876 1877 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 1878 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1879 if (!mlxsw_sp->ports) 1880 return -ENOMEM; 1881 1882 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 1883 if (err) 1884 goto err_cpu_port_create; 1885 1886 for (i = 1; i < max_ports; i++) { 1887 port_mapping = mlxsw_sp->port_mapping[i]; 1888 if (!port_mapping) 1889 continue; 1890 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 1891 if (err) 1892 goto err_port_create; 1893 } 1894 return 0; 1895 1896 err_port_create: 1897 for (i--; i >= 1; i--) 1898 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1899 mlxsw_sp_port_remove(mlxsw_sp, i); 1900 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1901 err_cpu_port_create: 1902 kfree(mlxsw_sp->ports); 1903 mlxsw_sp->ports = NULL; 1904 return err; 1905 } 1906 1907 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 1908 { 1909 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1910 struct mlxsw_sp_port_mapping port_mapping; 1911 int i; 1912 int err; 1913 1914 mlxsw_sp->port_mapping = kcalloc(max_ports, 1915 sizeof(struct mlxsw_sp_port_mapping *), 1916 GFP_KERNEL); 1917 if (!mlxsw_sp->port_mapping) 1918 return -ENOMEM; 1919 1920 for (i = 1; i < max_ports; i++) { 1921 if (mlxsw_core_port_is_xm(mlxsw_sp->core, i)) 1922 continue; 1923 1924 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 1925 if (err) 1926 goto err_port_module_info_get; 1927 if (!port_mapping.width) 1928 continue; 1929 1930 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 1931 sizeof(port_mapping), 1932 GFP_KERNEL); 1933 if (!mlxsw_sp->port_mapping[i]) { 1934 err = -ENOMEM; 1935 goto err_port_module_info_dup; 1936 } 1937 } 1938 return 0; 1939 1940 err_port_module_info_get: 1941 err_port_module_info_dup: 1942 for (i--; i >= 1; i--) 1943 kfree(mlxsw_sp->port_mapping[i]); 1944 kfree(mlxsw_sp->port_mapping); 1945 return err; 1946 } 1947 1948 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 1949 { 1950 int i; 1951 1952 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1953 kfree(mlxsw_sp->port_mapping[i]); 1954 kfree(mlxsw_sp->port_mapping); 1955 } 1956 1957 static int 1958 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 1959 struct mlxsw_sp_port_mapping *port_mapping, 1960 unsigned int count, const char *pmtdb_pl) 1961 { 1962 struct mlxsw_sp_port_mapping split_port_mapping; 1963 int err, i; 1964 1965 split_port_mapping = *port_mapping; 1966 split_port_mapping.width /= count; 1967 for (i = 0; i < count; i++) { 1968 u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 1969 1970 if (!mlxsw_sp_local_port_valid(s_local_port)) 1971 continue; 1972 1973 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 1974 true, &split_port_mapping); 1975 if (err) 1976 goto err_port_create; 1977 split_port_mapping.lane += split_port_mapping.width; 1978 } 1979 1980 return 0; 1981 1982 err_port_create: 1983 for (i--; i >= 0; i--) { 1984 u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 1985 1986 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 1987 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 1988 } 1989 return err; 1990 } 1991 1992 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 1993 unsigned int count, 1994 const char *pmtdb_pl) 1995 { 1996 struct mlxsw_sp_port_mapping *port_mapping; 1997 int i; 1998 1999 /* Go over original unsplit ports in the gap and recreate them. */ 2000 for (i = 0; i < count; i++) { 2001 u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2002 2003 port_mapping = mlxsw_sp->port_mapping[local_port]; 2004 if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) 2005 continue; 2006 mlxsw_sp_port_create(mlxsw_sp, local_port, 2007 false, port_mapping); 2008 } 2009 } 2010 2011 static struct mlxsw_sp_port * 2012 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 2013 { 2014 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2015 return mlxsw_sp->ports[local_port]; 2016 return NULL; 2017 } 2018 2019 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 2020 unsigned int count, 2021 struct netlink_ext_ack *extack) 2022 { 2023 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2024 struct mlxsw_sp_port_mapping port_mapping; 2025 struct mlxsw_sp_port *mlxsw_sp_port; 2026 enum mlxsw_reg_pmtdb_status status; 2027 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2028 int i; 2029 int err; 2030 2031 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2032 if (!mlxsw_sp_port) { 2033 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2034 local_port); 2035 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2036 return -EINVAL; 2037 } 2038 2039 if (mlxsw_sp_port->split) { 2040 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2041 return -EINVAL; 2042 } 2043 2044 mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, 2045 mlxsw_sp_port->mapping.module_width / count, 2046 count); 2047 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2048 if (err) { 2049 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2050 return err; 2051 } 2052 2053 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2054 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2055 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2056 return -EINVAL; 2057 } 2058 2059 port_mapping = mlxsw_sp_port->mapping; 2060 2061 for (i = 0; i < count; i++) { 2062 u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2063 2064 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2065 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2066 } 2067 2068 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2069 count, pmtdb_pl); 2070 if (err) { 2071 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2072 goto err_port_split_create; 2073 } 2074 2075 return 0; 2076 2077 err_port_split_create: 2078 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2079 return err; 2080 } 2081 2082 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 2083 struct netlink_ext_ack *extack) 2084 { 2085 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2086 struct mlxsw_sp_port *mlxsw_sp_port; 2087 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2088 unsigned int count; 2089 int i; 2090 int err; 2091 2092 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2093 if (!mlxsw_sp_port) { 2094 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2095 local_port); 2096 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2097 return -EINVAL; 2098 } 2099 2100 if (!mlxsw_sp_port->split) { 2101 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2102 return -EINVAL; 2103 } 2104 2105 count = mlxsw_sp_port->mapping.module_width / 2106 mlxsw_sp_port->mapping.width; 2107 2108 mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, 2109 mlxsw_sp_port->mapping.module_width / count, 2110 count); 2111 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2112 if (err) { 2113 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2114 return err; 2115 } 2116 2117 for (i = 0; i < count; i++) { 2118 u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2119 2120 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2121 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2122 } 2123 2124 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2125 2126 return 0; 2127 } 2128 2129 static void 2130 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2131 { 2132 int i; 2133 2134 for (i = 0; i < TC_MAX_QUEUE; i++) 2135 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2136 } 2137 2138 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2139 char *pude_pl, void *priv) 2140 { 2141 struct mlxsw_sp *mlxsw_sp = priv; 2142 struct mlxsw_sp_port *mlxsw_sp_port; 2143 enum mlxsw_reg_pude_oper_status status; 2144 unsigned int max_ports; 2145 u8 local_port; 2146 2147 max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2148 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2149 2150 if (WARN_ON_ONCE(local_port >= max_ports)) 2151 return; 2152 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2153 if (!mlxsw_sp_port) 2154 return; 2155 2156 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2157 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2158 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2159 netif_carrier_on(mlxsw_sp_port->dev); 2160 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2161 } else { 2162 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2163 netif_carrier_off(mlxsw_sp_port->dev); 2164 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2165 } 2166 } 2167 2168 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2169 char *mtpptr_pl, bool ingress) 2170 { 2171 u8 local_port; 2172 u8 num_rec; 2173 int i; 2174 2175 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2176 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2177 for (i = 0; i < num_rec; i++) { 2178 u8 domain_number; 2179 u8 message_type; 2180 u16 sequence_id; 2181 u64 timestamp; 2182 2183 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2184 &domain_number, &sequence_id, 2185 ×tamp); 2186 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2187 message_type, domain_number, 2188 sequence_id, timestamp); 2189 } 2190 } 2191 2192 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2193 char *mtpptr_pl, void *priv) 2194 { 2195 struct mlxsw_sp *mlxsw_sp = priv; 2196 2197 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2198 } 2199 2200 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2201 char *mtpptr_pl, void *priv) 2202 { 2203 struct mlxsw_sp *mlxsw_sp = priv; 2204 2205 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2206 } 2207 2208 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2209 u8 local_port, void *priv) 2210 { 2211 struct mlxsw_sp *mlxsw_sp = priv; 2212 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2213 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2214 2215 if (unlikely(!mlxsw_sp_port)) { 2216 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2217 local_port); 2218 return; 2219 } 2220 2221 skb->dev = mlxsw_sp_port->dev; 2222 2223 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2224 u64_stats_update_begin(&pcpu_stats->syncp); 2225 pcpu_stats->rx_packets++; 2226 pcpu_stats->rx_bytes += skb->len; 2227 u64_stats_update_end(&pcpu_stats->syncp); 2228 2229 skb->protocol = eth_type_trans(skb, skb->dev); 2230 netif_receive_skb(skb); 2231 } 2232 2233 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 2234 void *priv) 2235 { 2236 skb->offload_fwd_mark = 1; 2237 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2238 } 2239 2240 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2241 u8 local_port, void *priv) 2242 { 2243 skb->offload_l3_fwd_mark = 1; 2244 skb->offload_fwd_mark = 1; 2245 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2246 } 2247 2248 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2249 u8 local_port) 2250 { 2251 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2252 } 2253 2254 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2255 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2256 _is_ctrl, SP_##_trap_group, DISCARD) 2257 2258 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2259 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2260 _is_ctrl, SP_##_trap_group, DISCARD) 2261 2262 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2263 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2264 _is_ctrl, SP_##_trap_group, DISCARD) 2265 2266 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2267 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2268 2269 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2270 /* Events */ 2271 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2272 /* L2 traps */ 2273 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2274 /* L3 traps */ 2275 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2276 false), 2277 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2278 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2279 false), 2280 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2281 ROUTER_EXP, false), 2282 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2283 ROUTER_EXP, false), 2284 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2285 ROUTER_EXP, false), 2286 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2287 ROUTER_EXP, false), 2288 /* Multicast Router Traps */ 2289 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2290 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2291 /* NVE traps */ 2292 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2293 }; 2294 2295 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2296 /* Events */ 2297 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2298 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2299 }; 2300 2301 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2302 { 2303 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2304 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2305 enum mlxsw_reg_qpcr_ir_units ir_units; 2306 int max_cpu_policers; 2307 bool is_bytes; 2308 u8 burst_size; 2309 u32 rate; 2310 int i, err; 2311 2312 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2313 return -EIO; 2314 2315 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2316 2317 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2318 for (i = 0; i < max_cpu_policers; i++) { 2319 is_bytes = false; 2320 switch (i) { 2321 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2322 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2323 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2324 rate = 1024; 2325 burst_size = 7; 2326 break; 2327 default: 2328 continue; 2329 } 2330 2331 __set_bit(i, mlxsw_sp->trap->policers_usage); 2332 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2333 burst_size); 2334 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2335 if (err) 2336 return err; 2337 } 2338 2339 return 0; 2340 } 2341 2342 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2343 { 2344 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2345 enum mlxsw_reg_htgt_trap_group i; 2346 int max_cpu_policers; 2347 int max_trap_groups; 2348 u8 priority, tc; 2349 u16 policer_id; 2350 int err; 2351 2352 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2353 return -EIO; 2354 2355 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2356 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2357 2358 for (i = 0; i < max_trap_groups; i++) { 2359 policer_id = i; 2360 switch (i) { 2361 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2362 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2363 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2364 priority = 1; 2365 tc = 1; 2366 break; 2367 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2368 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2369 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2370 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2371 break; 2372 default: 2373 continue; 2374 } 2375 2376 if (max_cpu_policers <= policer_id && 2377 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2378 return -EIO; 2379 2380 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2381 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2382 if (err) 2383 return err; 2384 } 2385 2386 return 0; 2387 } 2388 2389 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 2390 const struct mlxsw_listener listeners[], 2391 size_t listeners_count) 2392 { 2393 int i; 2394 int err; 2395 2396 for (i = 0; i < listeners_count; i++) { 2397 err = mlxsw_core_trap_register(mlxsw_sp->core, 2398 &listeners[i], 2399 mlxsw_sp); 2400 if (err) 2401 goto err_listener_register; 2402 2403 } 2404 return 0; 2405 2406 err_listener_register: 2407 for (i--; i >= 0; i--) { 2408 mlxsw_core_trap_unregister(mlxsw_sp->core, 2409 &listeners[i], 2410 mlxsw_sp); 2411 } 2412 return err; 2413 } 2414 2415 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 2416 const struct mlxsw_listener listeners[], 2417 size_t listeners_count) 2418 { 2419 int i; 2420 2421 for (i = 0; i < listeners_count; i++) { 2422 mlxsw_core_trap_unregister(mlxsw_sp->core, 2423 &listeners[i], 2424 mlxsw_sp); 2425 } 2426 } 2427 2428 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2429 { 2430 struct mlxsw_sp_trap *trap; 2431 u64 max_policers; 2432 int err; 2433 2434 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2435 return -EIO; 2436 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2437 trap = kzalloc(struct_size(trap, policers_usage, 2438 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2439 if (!trap) 2440 return -ENOMEM; 2441 trap->max_policers = max_policers; 2442 mlxsw_sp->trap = trap; 2443 2444 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2445 if (err) 2446 goto err_cpu_policers_set; 2447 2448 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2449 if (err) 2450 goto err_trap_groups_set; 2451 2452 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 2453 ARRAY_SIZE(mlxsw_sp_listener)); 2454 if (err) 2455 goto err_traps_register; 2456 2457 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 2458 mlxsw_sp->listeners_count); 2459 if (err) 2460 goto err_extra_traps_init; 2461 2462 return 0; 2463 2464 err_extra_traps_init: 2465 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2466 ARRAY_SIZE(mlxsw_sp_listener)); 2467 err_traps_register: 2468 err_trap_groups_set: 2469 err_cpu_policers_set: 2470 kfree(trap); 2471 return err; 2472 } 2473 2474 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2475 { 2476 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 2477 mlxsw_sp->listeners_count); 2478 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2479 ARRAY_SIZE(mlxsw_sp_listener)); 2480 kfree(mlxsw_sp->trap); 2481 } 2482 2483 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2484 2485 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2486 { 2487 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2488 u32 seed; 2489 int err; 2490 2491 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2492 MLXSW_SP_LAG_SEED_INIT); 2493 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2494 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2495 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2496 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2497 MLXSW_REG_SLCR_LAG_HASH_SIP | 2498 MLXSW_REG_SLCR_LAG_HASH_DIP | 2499 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2500 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2501 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2502 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2503 if (err) 2504 return err; 2505 2506 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2507 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2508 return -EIO; 2509 2510 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2511 sizeof(struct mlxsw_sp_upper), 2512 GFP_KERNEL); 2513 if (!mlxsw_sp->lags) 2514 return -ENOMEM; 2515 2516 return 0; 2517 } 2518 2519 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2520 { 2521 kfree(mlxsw_sp->lags); 2522 } 2523 2524 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 2525 { 2526 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2527 int err; 2528 2529 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 2530 MLXSW_REG_HTGT_INVALID_POLICER, 2531 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2532 MLXSW_REG_HTGT_DEFAULT_TC); 2533 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2534 if (err) 2535 return err; 2536 2537 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE, 2538 MLXSW_REG_HTGT_INVALID_POLICER, 2539 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2540 MLXSW_REG_HTGT_DEFAULT_TC); 2541 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2542 if (err) 2543 return err; 2544 2545 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE, 2546 MLXSW_REG_HTGT_INVALID_POLICER, 2547 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2548 MLXSW_REG_HTGT_DEFAULT_TC); 2549 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2550 if (err) 2551 return err; 2552 2553 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE, 2554 MLXSW_REG_HTGT_INVALID_POLICER, 2555 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2556 MLXSW_REG_HTGT_DEFAULT_TC); 2557 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2558 } 2559 2560 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2561 .clock_init = mlxsw_sp1_ptp_clock_init, 2562 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2563 .init = mlxsw_sp1_ptp_init, 2564 .fini = mlxsw_sp1_ptp_fini, 2565 .receive = mlxsw_sp1_ptp_receive, 2566 .transmitted = mlxsw_sp1_ptp_transmitted, 2567 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2568 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2569 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2570 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2571 .get_stats_count = mlxsw_sp1_get_stats_count, 2572 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2573 .get_stats = mlxsw_sp1_get_stats, 2574 }; 2575 2576 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2577 .clock_init = mlxsw_sp2_ptp_clock_init, 2578 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2579 .init = mlxsw_sp2_ptp_init, 2580 .fini = mlxsw_sp2_ptp_fini, 2581 .receive = mlxsw_sp2_ptp_receive, 2582 .transmitted = mlxsw_sp2_ptp_transmitted, 2583 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2584 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2585 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2586 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2587 .get_stats_count = mlxsw_sp2_get_stats_count, 2588 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2589 .get_stats = mlxsw_sp2_get_stats, 2590 }; 2591 2592 struct mlxsw_sp_sample_trigger_node { 2593 struct mlxsw_sp_sample_trigger trigger; 2594 struct mlxsw_sp_sample_params params; 2595 struct rhash_head ht_node; 2596 struct rcu_head rcu; 2597 refcount_t refcount; 2598 }; 2599 2600 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2601 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2602 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2603 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2604 .automatic_shrinking = true, 2605 }; 2606 2607 static void 2608 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2609 const struct mlxsw_sp_sample_trigger *trigger) 2610 { 2611 memset(key, 0, sizeof(*key)); 2612 key->type = trigger->type; 2613 key->local_port = trigger->local_port; 2614 } 2615 2616 /* RCU read lock must be held */ 2617 struct mlxsw_sp_sample_params * 2618 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2619 const struct mlxsw_sp_sample_trigger *trigger) 2620 { 2621 struct mlxsw_sp_sample_trigger_node *trigger_node; 2622 struct mlxsw_sp_sample_trigger key; 2623 2624 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2625 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2626 mlxsw_sp_sample_trigger_ht_params); 2627 if (!trigger_node) 2628 return NULL; 2629 2630 return &trigger_node->params; 2631 } 2632 2633 static int 2634 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2635 const struct mlxsw_sp_sample_trigger *trigger, 2636 const struct mlxsw_sp_sample_params *params) 2637 { 2638 struct mlxsw_sp_sample_trigger_node *trigger_node; 2639 int err; 2640 2641 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2642 if (!trigger_node) 2643 return -ENOMEM; 2644 2645 trigger_node->trigger = *trigger; 2646 trigger_node->params = *params; 2647 refcount_set(&trigger_node->refcount, 1); 2648 2649 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2650 &trigger_node->ht_node, 2651 mlxsw_sp_sample_trigger_ht_params); 2652 if (err) 2653 goto err_rhashtable_insert; 2654 2655 return 0; 2656 2657 err_rhashtable_insert: 2658 kfree(trigger_node); 2659 return err; 2660 } 2661 2662 static void 2663 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2664 struct mlxsw_sp_sample_trigger_node *trigger_node) 2665 { 2666 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2667 &trigger_node->ht_node, 2668 mlxsw_sp_sample_trigger_ht_params); 2669 kfree_rcu(trigger_node, rcu); 2670 } 2671 2672 int 2673 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2674 const struct mlxsw_sp_sample_trigger *trigger, 2675 const struct mlxsw_sp_sample_params *params, 2676 struct netlink_ext_ack *extack) 2677 { 2678 struct mlxsw_sp_sample_trigger_node *trigger_node; 2679 struct mlxsw_sp_sample_trigger key; 2680 2681 ASSERT_RTNL(); 2682 2683 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2684 2685 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2686 &key, 2687 mlxsw_sp_sample_trigger_ht_params); 2688 if (!trigger_node) 2689 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2690 params); 2691 2692 if (trigger_node->trigger.local_port) { 2693 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2694 return -EINVAL; 2695 } 2696 2697 if (trigger_node->params.psample_group != params->psample_group || 2698 trigger_node->params.truncate != params->truncate || 2699 trigger_node->params.rate != params->rate || 2700 trigger_node->params.trunc_size != params->trunc_size) { 2701 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2702 return -EINVAL; 2703 } 2704 2705 refcount_inc(&trigger_node->refcount); 2706 2707 return 0; 2708 } 2709 2710 void 2711 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2712 const struct mlxsw_sp_sample_trigger *trigger) 2713 { 2714 struct mlxsw_sp_sample_trigger_node *trigger_node; 2715 struct mlxsw_sp_sample_trigger key; 2716 2717 ASSERT_RTNL(); 2718 2719 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2720 2721 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2722 &key, 2723 mlxsw_sp_sample_trigger_ht_params); 2724 if (!trigger_node) 2725 return; 2726 2727 if (!refcount_dec_and_test(&trigger_node->refcount)) 2728 return; 2729 2730 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2731 } 2732 2733 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2734 unsigned long event, void *ptr); 2735 2736 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2737 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2738 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2739 2740 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2741 { 2742 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2743 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2744 mutex_init(&mlxsw_sp->parsing.lock); 2745 } 2746 2747 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2748 { 2749 mutex_destroy(&mlxsw_sp->parsing.lock); 2750 } 2751 2752 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2753 const struct mlxsw_bus_info *mlxsw_bus_info, 2754 struct netlink_ext_ack *extack) 2755 { 2756 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2757 int err; 2758 2759 mlxsw_sp->core = mlxsw_core; 2760 mlxsw_sp->bus_info = mlxsw_bus_info; 2761 2762 mlxsw_sp_parsing_init(mlxsw_sp); 2763 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 2764 2765 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2766 if (err) { 2767 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2768 return err; 2769 } 2770 2771 err = mlxsw_sp_kvdl_init(mlxsw_sp); 2772 if (err) { 2773 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 2774 return err; 2775 } 2776 2777 err = mlxsw_sp_fids_init(mlxsw_sp); 2778 if (err) { 2779 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 2780 goto err_fids_init; 2781 } 2782 2783 err = mlxsw_sp_policers_init(mlxsw_sp); 2784 if (err) { 2785 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 2786 goto err_policers_init; 2787 } 2788 2789 err = mlxsw_sp_traps_init(mlxsw_sp); 2790 if (err) { 2791 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 2792 goto err_traps_init; 2793 } 2794 2795 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 2796 if (err) { 2797 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 2798 goto err_devlink_traps_init; 2799 } 2800 2801 err = mlxsw_sp_buffers_init(mlxsw_sp); 2802 if (err) { 2803 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2804 goto err_buffers_init; 2805 } 2806 2807 err = mlxsw_sp_lag_init(mlxsw_sp); 2808 if (err) { 2809 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2810 goto err_lag_init; 2811 } 2812 2813 /* Initialize SPAN before router and switchdev, so that those components 2814 * can call mlxsw_sp_span_respin(). 2815 */ 2816 err = mlxsw_sp_span_init(mlxsw_sp); 2817 if (err) { 2818 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2819 goto err_span_init; 2820 } 2821 2822 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2823 if (err) { 2824 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2825 goto err_switchdev_init; 2826 } 2827 2828 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 2829 if (err) { 2830 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 2831 goto err_counter_pool_init; 2832 } 2833 2834 err = mlxsw_sp_afa_init(mlxsw_sp); 2835 if (err) { 2836 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 2837 goto err_afa_init; 2838 } 2839 2840 err = mlxsw_sp_nve_init(mlxsw_sp); 2841 if (err) { 2842 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 2843 goto err_nve_init; 2844 } 2845 2846 err = mlxsw_sp_acl_init(mlxsw_sp); 2847 if (err) { 2848 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 2849 goto err_acl_init; 2850 } 2851 2852 err = mlxsw_sp_router_init(mlxsw_sp, extack); 2853 if (err) { 2854 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2855 goto err_router_init; 2856 } 2857 2858 if (mlxsw_sp->bus_info->read_frc_capable) { 2859 /* NULL is a valid return value from clock_init */ 2860 mlxsw_sp->clock = 2861 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 2862 mlxsw_sp->bus_info->dev); 2863 if (IS_ERR(mlxsw_sp->clock)) { 2864 err = PTR_ERR(mlxsw_sp->clock); 2865 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 2866 goto err_ptp_clock_init; 2867 } 2868 } 2869 2870 if (mlxsw_sp->clock) { 2871 /* NULL is a valid return value from ptp_ops->init */ 2872 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 2873 if (IS_ERR(mlxsw_sp->ptp_state)) { 2874 err = PTR_ERR(mlxsw_sp->ptp_state); 2875 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 2876 goto err_ptp_init; 2877 } 2878 } 2879 2880 /* Initialize netdevice notifier after router and SPAN is initialized, 2881 * so that the event handler can use router structures and call SPAN 2882 * respin. 2883 */ 2884 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 2885 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2886 &mlxsw_sp->netdevice_nb); 2887 if (err) { 2888 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 2889 goto err_netdev_notifier; 2890 } 2891 2892 err = mlxsw_sp_dpipe_init(mlxsw_sp); 2893 if (err) { 2894 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 2895 goto err_dpipe_init; 2896 } 2897 2898 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 2899 if (err) { 2900 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 2901 goto err_port_module_info_init; 2902 } 2903 2904 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 2905 &mlxsw_sp_sample_trigger_ht_params); 2906 if (err) { 2907 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 2908 goto err_sample_trigger_init; 2909 } 2910 2911 err = mlxsw_sp_ports_create(mlxsw_sp); 2912 if (err) { 2913 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2914 goto err_ports_create; 2915 } 2916 2917 return 0; 2918 2919 err_ports_create: 2920 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 2921 err_sample_trigger_init: 2922 mlxsw_sp_port_module_info_fini(mlxsw_sp); 2923 err_port_module_info_init: 2924 mlxsw_sp_dpipe_fini(mlxsw_sp); 2925 err_dpipe_init: 2926 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2927 &mlxsw_sp->netdevice_nb); 2928 err_netdev_notifier: 2929 if (mlxsw_sp->clock) 2930 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 2931 err_ptp_init: 2932 if (mlxsw_sp->clock) 2933 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 2934 err_ptp_clock_init: 2935 mlxsw_sp_router_fini(mlxsw_sp); 2936 err_router_init: 2937 mlxsw_sp_acl_fini(mlxsw_sp); 2938 err_acl_init: 2939 mlxsw_sp_nve_fini(mlxsw_sp); 2940 err_nve_init: 2941 mlxsw_sp_afa_fini(mlxsw_sp); 2942 err_afa_init: 2943 mlxsw_sp_counter_pool_fini(mlxsw_sp); 2944 err_counter_pool_init: 2945 mlxsw_sp_switchdev_fini(mlxsw_sp); 2946 err_switchdev_init: 2947 mlxsw_sp_span_fini(mlxsw_sp); 2948 err_span_init: 2949 mlxsw_sp_lag_fini(mlxsw_sp); 2950 err_lag_init: 2951 mlxsw_sp_buffers_fini(mlxsw_sp); 2952 err_buffers_init: 2953 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 2954 err_devlink_traps_init: 2955 mlxsw_sp_traps_fini(mlxsw_sp); 2956 err_traps_init: 2957 mlxsw_sp_policers_fini(mlxsw_sp); 2958 err_policers_init: 2959 mlxsw_sp_fids_fini(mlxsw_sp); 2960 err_fids_init: 2961 mlxsw_sp_kvdl_fini(mlxsw_sp); 2962 mlxsw_sp_parsing_fini(mlxsw_sp); 2963 return err; 2964 } 2965 2966 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 2967 const struct mlxsw_bus_info *mlxsw_bus_info, 2968 struct netlink_ext_ack *extack) 2969 { 2970 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2971 2972 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 2973 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 2974 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 2975 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 2976 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 2977 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 2978 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 2979 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 2980 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 2981 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 2982 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 2983 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 2984 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 2985 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 2986 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 2987 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 2988 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 2989 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 2990 mlxsw_sp->listeners = mlxsw_sp1_listener; 2991 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 2992 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 2993 2994 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 2995 } 2996 2997 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 2998 const struct mlxsw_bus_info *mlxsw_bus_info, 2999 struct netlink_ext_ack *extack) 3000 { 3001 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3002 3003 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3004 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3005 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3006 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3007 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3008 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3009 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3010 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3011 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3012 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3013 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3014 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3015 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3016 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3017 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3018 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3019 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3020 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3021 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3022 3023 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3024 } 3025 3026 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3027 const struct mlxsw_bus_info *mlxsw_bus_info, 3028 struct netlink_ext_ack *extack) 3029 { 3030 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3031 3032 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3033 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3034 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3035 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3036 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3037 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3038 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3039 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3040 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3041 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3042 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3043 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3044 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3045 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3046 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3047 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3048 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3049 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3050 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3051 3052 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3053 } 3054 3055 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3056 { 3057 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3058 3059 mlxsw_sp_ports_remove(mlxsw_sp); 3060 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3061 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3062 mlxsw_sp_dpipe_fini(mlxsw_sp); 3063 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3064 &mlxsw_sp->netdevice_nb); 3065 if (mlxsw_sp->clock) { 3066 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3067 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3068 } 3069 mlxsw_sp_router_fini(mlxsw_sp); 3070 mlxsw_sp_acl_fini(mlxsw_sp); 3071 mlxsw_sp_nve_fini(mlxsw_sp); 3072 mlxsw_sp_afa_fini(mlxsw_sp); 3073 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3074 mlxsw_sp_switchdev_fini(mlxsw_sp); 3075 mlxsw_sp_span_fini(mlxsw_sp); 3076 mlxsw_sp_lag_fini(mlxsw_sp); 3077 mlxsw_sp_buffers_fini(mlxsw_sp); 3078 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3079 mlxsw_sp_traps_fini(mlxsw_sp); 3080 mlxsw_sp_policers_fini(mlxsw_sp); 3081 mlxsw_sp_fids_fini(mlxsw_sp); 3082 mlxsw_sp_kvdl_fini(mlxsw_sp); 3083 mlxsw_sp_parsing_fini(mlxsw_sp); 3084 } 3085 3086 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3087 * 802.1Q FIDs 3088 */ 3089 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3090 VLAN_VID_MASK - 1) 3091 3092 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3093 .used_max_mid = 1, 3094 .max_mid = MLXSW_SP_MID_MAX, 3095 .used_flood_tables = 1, 3096 .used_flood_mode = 1, 3097 .flood_mode = 3, 3098 .max_fid_flood_tables = 3, 3099 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3100 .used_max_ib_mc = 1, 3101 .max_ib_mc = 0, 3102 .used_max_pkey = 1, 3103 .max_pkey = 0, 3104 .used_kvd_sizes = 1, 3105 .kvd_hash_single_parts = 59, 3106 .kvd_hash_double_parts = 41, 3107 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3108 .swid_config = { 3109 { 3110 .used_type = 1, 3111 .type = MLXSW_PORT_SWID_TYPE_ETH, 3112 } 3113 }, 3114 }; 3115 3116 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3117 .used_max_mid = 1, 3118 .max_mid = MLXSW_SP_MID_MAX, 3119 .used_flood_tables = 1, 3120 .used_flood_mode = 1, 3121 .flood_mode = 3, 3122 .max_fid_flood_tables = 3, 3123 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3124 .used_max_ib_mc = 1, 3125 .max_ib_mc = 0, 3126 .used_max_pkey = 1, 3127 .max_pkey = 0, 3128 .used_kvh_xlt_cache_mode = 1, 3129 .kvh_xlt_cache_mode = 1, 3130 .swid_config = { 3131 { 3132 .used_type = 1, 3133 .type = MLXSW_PORT_SWID_TYPE_ETH, 3134 } 3135 }, 3136 }; 3137 3138 static void 3139 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3140 struct devlink_resource_size_params *kvd_size_params, 3141 struct devlink_resource_size_params *linear_size_params, 3142 struct devlink_resource_size_params *hash_double_size_params, 3143 struct devlink_resource_size_params *hash_single_size_params) 3144 { 3145 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3146 KVD_SINGLE_MIN_SIZE); 3147 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3148 KVD_DOUBLE_MIN_SIZE); 3149 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3150 u32 linear_size_min = 0; 3151 3152 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3153 MLXSW_SP_KVD_GRANULARITY, 3154 DEVLINK_RESOURCE_UNIT_ENTRY); 3155 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3156 kvd_size - single_size_min - 3157 double_size_min, 3158 MLXSW_SP_KVD_GRANULARITY, 3159 DEVLINK_RESOURCE_UNIT_ENTRY); 3160 devlink_resource_size_params_init(hash_double_size_params, 3161 double_size_min, 3162 kvd_size - single_size_min - 3163 linear_size_min, 3164 MLXSW_SP_KVD_GRANULARITY, 3165 DEVLINK_RESOURCE_UNIT_ENTRY); 3166 devlink_resource_size_params_init(hash_single_size_params, 3167 single_size_min, 3168 kvd_size - double_size_min - 3169 linear_size_min, 3170 MLXSW_SP_KVD_GRANULARITY, 3171 DEVLINK_RESOURCE_UNIT_ENTRY); 3172 } 3173 3174 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3175 { 3176 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3177 struct devlink_resource_size_params hash_single_size_params; 3178 struct devlink_resource_size_params hash_double_size_params; 3179 struct devlink_resource_size_params linear_size_params; 3180 struct devlink_resource_size_params kvd_size_params; 3181 u32 kvd_size, single_size, double_size, linear_size; 3182 const struct mlxsw_config_profile *profile; 3183 int err; 3184 3185 profile = &mlxsw_sp1_config_profile; 3186 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3187 return -EIO; 3188 3189 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3190 &linear_size_params, 3191 &hash_double_size_params, 3192 &hash_single_size_params); 3193 3194 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3195 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3196 kvd_size, MLXSW_SP_RESOURCE_KVD, 3197 DEVLINK_RESOURCE_ID_PARENT_TOP, 3198 &kvd_size_params); 3199 if (err) 3200 return err; 3201 3202 linear_size = profile->kvd_linear_size; 3203 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3204 linear_size, 3205 MLXSW_SP_RESOURCE_KVD_LINEAR, 3206 MLXSW_SP_RESOURCE_KVD, 3207 &linear_size_params); 3208 if (err) 3209 return err; 3210 3211 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3212 if (err) 3213 return err; 3214 3215 double_size = kvd_size - linear_size; 3216 double_size *= profile->kvd_hash_double_parts; 3217 double_size /= profile->kvd_hash_double_parts + 3218 profile->kvd_hash_single_parts; 3219 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3220 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3221 double_size, 3222 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3223 MLXSW_SP_RESOURCE_KVD, 3224 &hash_double_size_params); 3225 if (err) 3226 return err; 3227 3228 single_size = kvd_size - double_size - linear_size; 3229 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3230 single_size, 3231 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3232 MLXSW_SP_RESOURCE_KVD, 3233 &hash_single_size_params); 3234 if (err) 3235 return err; 3236 3237 return 0; 3238 } 3239 3240 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3241 { 3242 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3243 struct devlink_resource_size_params kvd_size_params; 3244 u32 kvd_size; 3245 3246 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3247 return -EIO; 3248 3249 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3250 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3251 MLXSW_SP_KVD_GRANULARITY, 3252 DEVLINK_RESOURCE_UNIT_ENTRY); 3253 3254 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3255 kvd_size, MLXSW_SP_RESOURCE_KVD, 3256 DEVLINK_RESOURCE_ID_PARENT_TOP, 3257 &kvd_size_params); 3258 } 3259 3260 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3261 { 3262 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3263 struct devlink_resource_size_params span_size_params; 3264 u32 max_span; 3265 3266 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3267 return -EIO; 3268 3269 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3270 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3271 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3272 3273 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3274 max_span, MLXSW_SP_RESOURCE_SPAN, 3275 DEVLINK_RESOURCE_ID_PARENT_TOP, 3276 &span_size_params); 3277 } 3278 3279 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3280 { 3281 int err; 3282 3283 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3284 if (err) 3285 return err; 3286 3287 err = mlxsw_sp_resources_span_register(mlxsw_core); 3288 if (err) 3289 goto err_resources_span_register; 3290 3291 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3292 if (err) 3293 goto err_resources_counter_register; 3294 3295 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3296 if (err) 3297 goto err_resources_counter_register; 3298 3299 return 0; 3300 3301 err_resources_counter_register: 3302 err_resources_span_register: 3303 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3304 return err; 3305 } 3306 3307 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3308 { 3309 int err; 3310 3311 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3312 if (err) 3313 return err; 3314 3315 err = mlxsw_sp_resources_span_register(mlxsw_core); 3316 if (err) 3317 goto err_resources_span_register; 3318 3319 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3320 if (err) 3321 goto err_resources_counter_register; 3322 3323 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3324 if (err) 3325 goto err_resources_counter_register; 3326 3327 return 0; 3328 3329 err_resources_counter_register: 3330 err_resources_span_register: 3331 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3332 return err; 3333 } 3334 3335 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3336 const struct mlxsw_config_profile *profile, 3337 u64 *p_single_size, u64 *p_double_size, 3338 u64 *p_linear_size) 3339 { 3340 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3341 u32 double_size; 3342 int err; 3343 3344 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3345 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3346 return -EIO; 3347 3348 /* The hash part is what left of the kvd without the 3349 * linear part. It is split to the single size and 3350 * double size by the parts ratio from the profile. 3351 * Both sizes must be a multiplications of the 3352 * granularity from the profile. In case the user 3353 * provided the sizes they are obtained via devlink. 3354 */ 3355 err = devlink_resource_size_get(devlink, 3356 MLXSW_SP_RESOURCE_KVD_LINEAR, 3357 p_linear_size); 3358 if (err) 3359 *p_linear_size = profile->kvd_linear_size; 3360 3361 err = devlink_resource_size_get(devlink, 3362 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3363 p_double_size); 3364 if (err) { 3365 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3366 *p_linear_size; 3367 double_size *= profile->kvd_hash_double_parts; 3368 double_size /= profile->kvd_hash_double_parts + 3369 profile->kvd_hash_single_parts; 3370 *p_double_size = rounddown(double_size, 3371 MLXSW_SP_KVD_GRANULARITY); 3372 } 3373 3374 err = devlink_resource_size_get(devlink, 3375 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3376 p_single_size); 3377 if (err) 3378 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3379 *p_double_size - *p_linear_size; 3380 3381 /* Check results are legal. */ 3382 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3383 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3384 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3385 return -EIO; 3386 3387 return 0; 3388 } 3389 3390 static int 3391 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3392 struct devlink_param_gset_ctx *ctx) 3393 { 3394 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3395 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3396 3397 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3398 return 0; 3399 } 3400 3401 static int 3402 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3403 struct devlink_param_gset_ctx *ctx) 3404 { 3405 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3406 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3407 3408 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3409 } 3410 3411 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3412 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3413 "acl_region_rehash_interval", 3414 DEVLINK_PARAM_TYPE_U32, 3415 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3416 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3417 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3418 NULL), 3419 }; 3420 3421 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3422 { 3423 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3424 union devlink_param_value value; 3425 int err; 3426 3427 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3428 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3429 if (err) 3430 return err; 3431 3432 value.vu32 = 0; 3433 devlink_param_driverinit_value_set(devlink, 3434 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3435 value); 3436 return 0; 3437 } 3438 3439 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3440 { 3441 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3442 mlxsw_sp2_devlink_params, 3443 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3444 } 3445 3446 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3447 struct sk_buff *skb, u8 local_port) 3448 { 3449 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3450 3451 skb_pull(skb, MLXSW_TXHDR_LEN); 3452 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3453 } 3454 3455 static struct mlxsw_driver mlxsw_sp1_driver = { 3456 .kind = mlxsw_sp1_driver_name, 3457 .priv_size = sizeof(struct mlxsw_sp), 3458 .fw_req_rev = &mlxsw_sp1_fw_rev, 3459 .fw_filename = MLXSW_SP1_FW_FILENAME, 3460 .init = mlxsw_sp1_init, 3461 .fini = mlxsw_sp_fini, 3462 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3463 .port_split = mlxsw_sp_port_split, 3464 .port_unsplit = mlxsw_sp_port_unsplit, 3465 .sb_pool_get = mlxsw_sp_sb_pool_get, 3466 .sb_pool_set = mlxsw_sp_sb_pool_set, 3467 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3468 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3469 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3470 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3471 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3472 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3473 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3474 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3475 .trap_init = mlxsw_sp_trap_init, 3476 .trap_fini = mlxsw_sp_trap_fini, 3477 .trap_action_set = mlxsw_sp_trap_action_set, 3478 .trap_group_init = mlxsw_sp_trap_group_init, 3479 .trap_group_set = mlxsw_sp_trap_group_set, 3480 .trap_policer_init = mlxsw_sp_trap_policer_init, 3481 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3482 .trap_policer_set = mlxsw_sp_trap_policer_set, 3483 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3484 .txhdr_construct = mlxsw_sp_txhdr_construct, 3485 .resources_register = mlxsw_sp1_resources_register, 3486 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3487 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3488 .txhdr_len = MLXSW_TXHDR_LEN, 3489 .profile = &mlxsw_sp1_config_profile, 3490 .res_query_enabled = true, 3491 .fw_fatal_enabled = true, 3492 .temp_warn_enabled = true, 3493 }; 3494 3495 static struct mlxsw_driver mlxsw_sp2_driver = { 3496 .kind = mlxsw_sp2_driver_name, 3497 .priv_size = sizeof(struct mlxsw_sp), 3498 .fw_req_rev = &mlxsw_sp2_fw_rev, 3499 .fw_filename = MLXSW_SP2_FW_FILENAME, 3500 .init = mlxsw_sp2_init, 3501 .fini = mlxsw_sp_fini, 3502 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3503 .port_split = mlxsw_sp_port_split, 3504 .port_unsplit = mlxsw_sp_port_unsplit, 3505 .sb_pool_get = mlxsw_sp_sb_pool_get, 3506 .sb_pool_set = mlxsw_sp_sb_pool_set, 3507 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3508 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3509 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3510 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3511 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3512 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3513 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3514 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3515 .trap_init = mlxsw_sp_trap_init, 3516 .trap_fini = mlxsw_sp_trap_fini, 3517 .trap_action_set = mlxsw_sp_trap_action_set, 3518 .trap_group_init = mlxsw_sp_trap_group_init, 3519 .trap_group_set = mlxsw_sp_trap_group_set, 3520 .trap_policer_init = mlxsw_sp_trap_policer_init, 3521 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3522 .trap_policer_set = mlxsw_sp_trap_policer_set, 3523 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3524 .txhdr_construct = mlxsw_sp_txhdr_construct, 3525 .resources_register = mlxsw_sp2_resources_register, 3526 .params_register = mlxsw_sp2_params_register, 3527 .params_unregister = mlxsw_sp2_params_unregister, 3528 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3529 .txhdr_len = MLXSW_TXHDR_LEN, 3530 .profile = &mlxsw_sp2_config_profile, 3531 .res_query_enabled = true, 3532 .fw_fatal_enabled = true, 3533 .temp_warn_enabled = true, 3534 }; 3535 3536 static struct mlxsw_driver mlxsw_sp3_driver = { 3537 .kind = mlxsw_sp3_driver_name, 3538 .priv_size = sizeof(struct mlxsw_sp), 3539 .fw_req_rev = &mlxsw_sp3_fw_rev, 3540 .fw_filename = MLXSW_SP3_FW_FILENAME, 3541 .init = mlxsw_sp3_init, 3542 .fini = mlxsw_sp_fini, 3543 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3544 .port_split = mlxsw_sp_port_split, 3545 .port_unsplit = mlxsw_sp_port_unsplit, 3546 .sb_pool_get = mlxsw_sp_sb_pool_get, 3547 .sb_pool_set = mlxsw_sp_sb_pool_set, 3548 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3549 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3550 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3551 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3552 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3553 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3554 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3555 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3556 .trap_init = mlxsw_sp_trap_init, 3557 .trap_fini = mlxsw_sp_trap_fini, 3558 .trap_action_set = mlxsw_sp_trap_action_set, 3559 .trap_group_init = mlxsw_sp_trap_group_init, 3560 .trap_group_set = mlxsw_sp_trap_group_set, 3561 .trap_policer_init = mlxsw_sp_trap_policer_init, 3562 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3563 .trap_policer_set = mlxsw_sp_trap_policer_set, 3564 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3565 .txhdr_construct = mlxsw_sp_txhdr_construct, 3566 .resources_register = mlxsw_sp2_resources_register, 3567 .params_register = mlxsw_sp2_params_register, 3568 .params_unregister = mlxsw_sp2_params_unregister, 3569 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3570 .txhdr_len = MLXSW_TXHDR_LEN, 3571 .profile = &mlxsw_sp2_config_profile, 3572 .res_query_enabled = true, 3573 .fw_fatal_enabled = true, 3574 .temp_warn_enabled = true, 3575 }; 3576 3577 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3578 { 3579 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3580 } 3581 3582 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 3583 struct netdev_nested_priv *priv) 3584 { 3585 int ret = 0; 3586 3587 if (mlxsw_sp_port_dev_check(lower_dev)) { 3588 priv->data = (void *)netdev_priv(lower_dev); 3589 ret = 1; 3590 } 3591 3592 return ret; 3593 } 3594 3595 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3596 { 3597 struct netdev_nested_priv priv = { 3598 .data = NULL, 3599 }; 3600 3601 if (mlxsw_sp_port_dev_check(dev)) 3602 return netdev_priv(dev); 3603 3604 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 3605 3606 return (struct mlxsw_sp_port *)priv.data; 3607 } 3608 3609 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3610 { 3611 struct mlxsw_sp_port *mlxsw_sp_port; 3612 3613 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3614 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3615 } 3616 3617 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3618 { 3619 struct netdev_nested_priv priv = { 3620 .data = NULL, 3621 }; 3622 3623 if (mlxsw_sp_port_dev_check(dev)) 3624 return netdev_priv(dev); 3625 3626 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3627 &priv); 3628 3629 return (struct mlxsw_sp_port *)priv.data; 3630 } 3631 3632 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3633 { 3634 struct mlxsw_sp_port *mlxsw_sp_port; 3635 3636 rcu_read_lock(); 3637 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3638 if (mlxsw_sp_port) 3639 dev_hold(mlxsw_sp_port->dev); 3640 rcu_read_unlock(); 3641 return mlxsw_sp_port; 3642 } 3643 3644 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3645 { 3646 dev_put(mlxsw_sp_port->dev); 3647 } 3648 3649 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 3650 { 3651 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3652 int err = 0; 3653 3654 mutex_lock(&mlxsw_sp->parsing.lock); 3655 3656 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 3657 goto out_unlock; 3658 3659 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 3660 mlxsw_sp->parsing.vxlan_udp_dport); 3661 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3662 if (err) 3663 goto out_unlock; 3664 3665 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 3666 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 3667 3668 out_unlock: 3669 mutex_unlock(&mlxsw_sp->parsing.lock); 3670 return err; 3671 } 3672 3673 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 3674 { 3675 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3676 3677 mutex_lock(&mlxsw_sp->parsing.lock); 3678 3679 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 3680 goto out_unlock; 3681 3682 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 3683 mlxsw_sp->parsing.vxlan_udp_dport); 3684 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3685 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 3686 3687 out_unlock: 3688 mutex_unlock(&mlxsw_sp->parsing.lock); 3689 } 3690 3691 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 3692 __be16 udp_dport) 3693 { 3694 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3695 int err; 3696 3697 mutex_lock(&mlxsw_sp->parsing.lock); 3698 3699 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 3700 be16_to_cpu(udp_dport)); 3701 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3702 if (err) 3703 goto out_unlock; 3704 3705 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 3706 3707 out_unlock: 3708 mutex_unlock(&mlxsw_sp->parsing.lock); 3709 return err; 3710 } 3711 3712 static void 3713 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 3714 struct net_device *lag_dev) 3715 { 3716 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 3717 struct net_device *upper_dev; 3718 struct list_head *iter; 3719 3720 if (netif_is_bridge_port(lag_dev)) 3721 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 3722 3723 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 3724 if (!netif_is_bridge_port(upper_dev)) 3725 continue; 3726 br_dev = netdev_master_upper_dev_get(upper_dev); 3727 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 3728 } 3729 } 3730 3731 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3732 { 3733 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3734 3735 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3736 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3737 } 3738 3739 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3740 { 3741 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3742 3743 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3744 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3745 } 3746 3747 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3748 u16 lag_id, u8 port_index) 3749 { 3750 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3751 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3752 3753 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3754 lag_id, port_index); 3755 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3756 } 3757 3758 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3759 u16 lag_id) 3760 { 3761 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3762 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3763 3764 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3765 lag_id); 3766 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3767 } 3768 3769 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3770 u16 lag_id) 3771 { 3772 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3773 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3774 3775 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3776 lag_id); 3777 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3778 } 3779 3780 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3781 u16 lag_id) 3782 { 3783 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3784 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3785 3786 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3787 lag_id); 3788 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3789 } 3790 3791 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3792 struct net_device *lag_dev, 3793 u16 *p_lag_id) 3794 { 3795 struct mlxsw_sp_upper *lag; 3796 int free_lag_id = -1; 3797 u64 max_lag; 3798 int i; 3799 3800 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3801 for (i = 0; i < max_lag; i++) { 3802 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3803 if (lag->ref_count) { 3804 if (lag->dev == lag_dev) { 3805 *p_lag_id = i; 3806 return 0; 3807 } 3808 } else if (free_lag_id < 0) { 3809 free_lag_id = i; 3810 } 3811 } 3812 if (free_lag_id < 0) 3813 return -EBUSY; 3814 *p_lag_id = free_lag_id; 3815 return 0; 3816 } 3817 3818 static bool 3819 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3820 struct net_device *lag_dev, 3821 struct netdev_lag_upper_info *lag_upper_info, 3822 struct netlink_ext_ack *extack) 3823 { 3824 u16 lag_id; 3825 3826 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 3827 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 3828 return false; 3829 } 3830 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 3831 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 3832 return false; 3833 } 3834 return true; 3835 } 3836 3837 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3838 u16 lag_id, u8 *p_port_index) 3839 { 3840 u64 max_lag_members; 3841 int i; 3842 3843 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3844 MAX_LAG_MEMBERS); 3845 for (i = 0; i < max_lag_members; i++) { 3846 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3847 *p_port_index = i; 3848 return 0; 3849 } 3850 } 3851 return -EBUSY; 3852 } 3853 3854 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3855 struct net_device *lag_dev, 3856 struct netlink_ext_ack *extack) 3857 { 3858 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3859 struct mlxsw_sp_upper *lag; 3860 u16 lag_id; 3861 u8 port_index; 3862 int err; 3863 3864 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3865 if (err) 3866 return err; 3867 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3868 if (!lag->ref_count) { 3869 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3870 if (err) 3871 return err; 3872 lag->dev = lag_dev; 3873 } 3874 3875 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3876 if (err) 3877 return err; 3878 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3879 if (err) 3880 goto err_col_port_add; 3881 3882 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3883 mlxsw_sp_port->local_port); 3884 mlxsw_sp_port->lag_id = lag_id; 3885 mlxsw_sp_port->lagged = 1; 3886 lag->ref_count++; 3887 3888 /* Port is no longer usable as a router interface */ 3889 if (mlxsw_sp_port->default_vlan->fid) 3890 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 3891 3892 /* Join a router interface configured on the LAG, if exists */ 3893 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 3894 lag_dev, extack); 3895 if (err) 3896 goto err_router_join; 3897 3898 return 0; 3899 3900 err_router_join: 3901 lag->ref_count--; 3902 mlxsw_sp_port->lagged = 0; 3903 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3904 mlxsw_sp_port->local_port); 3905 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3906 err_col_port_add: 3907 if (!lag->ref_count) 3908 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3909 return err; 3910 } 3911 3912 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3913 struct net_device *lag_dev) 3914 { 3915 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3916 u16 lag_id = mlxsw_sp_port->lag_id; 3917 struct mlxsw_sp_upper *lag; 3918 3919 if (!mlxsw_sp_port->lagged) 3920 return; 3921 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3922 WARN_ON(lag->ref_count == 0); 3923 3924 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3925 3926 /* Any VLANs configured on the port are no longer valid */ 3927 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 3928 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 3929 /* Make the LAG and its directly linked uppers leave bridges they 3930 * are memeber in 3931 */ 3932 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 3933 3934 if (lag->ref_count == 1) 3935 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3936 3937 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3938 mlxsw_sp_port->local_port); 3939 mlxsw_sp_port->lagged = 0; 3940 lag->ref_count--; 3941 3942 /* Make sure untagged frames are allowed to ingress */ 3943 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 3944 ETH_P_8021Q); 3945 } 3946 3947 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3948 u16 lag_id) 3949 { 3950 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3951 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3952 3953 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3954 mlxsw_sp_port->local_port); 3955 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3956 } 3957 3958 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3959 u16 lag_id) 3960 { 3961 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3962 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3963 3964 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 3965 mlxsw_sp_port->local_port); 3966 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3967 } 3968 3969 static int 3970 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 3971 { 3972 int err; 3973 3974 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 3975 mlxsw_sp_port->lag_id); 3976 if (err) 3977 return err; 3978 3979 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3980 if (err) 3981 goto err_dist_port_add; 3982 3983 return 0; 3984 3985 err_dist_port_add: 3986 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3987 return err; 3988 } 3989 3990 static int 3991 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 3992 { 3993 int err; 3994 3995 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 3996 mlxsw_sp_port->lag_id); 3997 if (err) 3998 return err; 3999 4000 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4001 mlxsw_sp_port->lag_id); 4002 if (err) 4003 goto err_col_port_disable; 4004 4005 return 0; 4006 4007 err_col_port_disable: 4008 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4009 return err; 4010 } 4011 4012 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4013 struct netdev_lag_lower_state_info *info) 4014 { 4015 if (info->tx_enabled) 4016 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4017 else 4018 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4019 } 4020 4021 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4022 bool enable) 4023 { 4024 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4025 enum mlxsw_reg_spms_state spms_state; 4026 char *spms_pl; 4027 u16 vid; 4028 int err; 4029 4030 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4031 MLXSW_REG_SPMS_STATE_DISCARDING; 4032 4033 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4034 if (!spms_pl) 4035 return -ENOMEM; 4036 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4037 4038 for (vid = 0; vid < VLAN_N_VID; vid++) 4039 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4040 4041 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4042 kfree(spms_pl); 4043 return err; 4044 } 4045 4046 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4047 { 4048 u16 vid = 1; 4049 int err; 4050 4051 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4052 if (err) 4053 return err; 4054 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4055 if (err) 4056 goto err_port_stp_set; 4057 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4058 true, false); 4059 if (err) 4060 goto err_port_vlan_set; 4061 4062 for (; vid <= VLAN_N_VID - 1; vid++) { 4063 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4064 vid, false); 4065 if (err) 4066 goto err_vid_learning_set; 4067 } 4068 4069 return 0; 4070 4071 err_vid_learning_set: 4072 for (vid--; vid >= 1; vid--) 4073 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4074 err_port_vlan_set: 4075 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4076 err_port_stp_set: 4077 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4078 return err; 4079 } 4080 4081 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4082 { 4083 u16 vid; 4084 4085 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4086 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4087 vid, true); 4088 4089 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4090 false, false); 4091 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4092 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4093 } 4094 4095 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4096 { 4097 unsigned int num_vxlans = 0; 4098 struct net_device *dev; 4099 struct list_head *iter; 4100 4101 netdev_for_each_lower_dev(br_dev, dev, iter) { 4102 if (netif_is_vxlan(dev)) 4103 num_vxlans++; 4104 } 4105 4106 return num_vxlans > 1; 4107 } 4108 4109 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4110 { 4111 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4112 struct net_device *dev; 4113 struct list_head *iter; 4114 4115 netdev_for_each_lower_dev(br_dev, dev, iter) { 4116 u16 pvid; 4117 int err; 4118 4119 if (!netif_is_vxlan(dev)) 4120 continue; 4121 4122 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4123 if (err || !pvid) 4124 continue; 4125 4126 if (test_and_set_bit(pvid, vlans)) 4127 return false; 4128 } 4129 4130 return true; 4131 } 4132 4133 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4134 struct netlink_ext_ack *extack) 4135 { 4136 if (br_multicast_enabled(br_dev)) { 4137 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4138 return false; 4139 } 4140 4141 if (!br_vlan_enabled(br_dev) && 4142 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4143 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4144 return false; 4145 } 4146 4147 if (br_vlan_enabled(br_dev) && 4148 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4149 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4150 return false; 4151 } 4152 4153 return true; 4154 } 4155 4156 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4157 struct net_device *dev, 4158 unsigned long event, void *ptr) 4159 { 4160 struct netdev_notifier_changeupper_info *info; 4161 struct mlxsw_sp_port *mlxsw_sp_port; 4162 struct netlink_ext_ack *extack; 4163 struct net_device *upper_dev; 4164 struct mlxsw_sp *mlxsw_sp; 4165 int err = 0; 4166 u16 proto; 4167 4168 mlxsw_sp_port = netdev_priv(dev); 4169 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4170 info = ptr; 4171 extack = netdev_notifier_info_to_extack(&info->info); 4172 4173 switch (event) { 4174 case NETDEV_PRECHANGEUPPER: 4175 upper_dev = info->upper_dev; 4176 if (!is_vlan_dev(upper_dev) && 4177 !netif_is_lag_master(upper_dev) && 4178 !netif_is_bridge_master(upper_dev) && 4179 !netif_is_ovs_master(upper_dev) && 4180 !netif_is_macvlan(upper_dev)) { 4181 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4182 return -EINVAL; 4183 } 4184 if (!info->linking) 4185 break; 4186 if (netif_is_bridge_master(upper_dev) && 4187 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4188 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4189 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4190 return -EOPNOTSUPP; 4191 if (netdev_has_any_upper_dev(upper_dev) && 4192 (!netif_is_bridge_master(upper_dev) || 4193 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4194 upper_dev))) { 4195 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4196 return -EINVAL; 4197 } 4198 if (netif_is_lag_master(upper_dev) && 4199 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4200 info->upper_info, extack)) 4201 return -EINVAL; 4202 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4203 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4204 return -EINVAL; 4205 } 4206 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4207 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4208 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4209 return -EINVAL; 4210 } 4211 if (netif_is_macvlan(upper_dev) && 4212 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4213 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4214 return -EOPNOTSUPP; 4215 } 4216 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4217 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4218 return -EINVAL; 4219 } 4220 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4221 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4222 return -EINVAL; 4223 } 4224 if (netif_is_bridge_master(upper_dev)) { 4225 br_vlan_get_proto(upper_dev, &proto); 4226 if (br_vlan_enabled(upper_dev) && 4227 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4228 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4229 return -EOPNOTSUPP; 4230 } 4231 if (vlan_uses_dev(lower_dev) && 4232 br_vlan_enabled(upper_dev) && 4233 proto == ETH_P_8021AD) { 4234 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4235 return -EOPNOTSUPP; 4236 } 4237 } 4238 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4239 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4240 4241 if (br_vlan_enabled(br_dev)) { 4242 br_vlan_get_proto(br_dev, &proto); 4243 if (proto == ETH_P_8021AD) { 4244 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4245 return -EOPNOTSUPP; 4246 } 4247 } 4248 } 4249 if (is_vlan_dev(upper_dev) && 4250 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4251 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4252 return -EOPNOTSUPP; 4253 } 4254 break; 4255 case NETDEV_CHANGEUPPER: 4256 upper_dev = info->upper_dev; 4257 if (netif_is_bridge_master(upper_dev)) { 4258 if (info->linking) 4259 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4260 lower_dev, 4261 upper_dev, 4262 extack); 4263 else 4264 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4265 lower_dev, 4266 upper_dev); 4267 } else if (netif_is_lag_master(upper_dev)) { 4268 if (info->linking) { 4269 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4270 upper_dev, extack); 4271 } else { 4272 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4273 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4274 upper_dev); 4275 } 4276 } else if (netif_is_ovs_master(upper_dev)) { 4277 if (info->linking) 4278 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4279 else 4280 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4281 } else if (netif_is_macvlan(upper_dev)) { 4282 if (!info->linking) 4283 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4284 } else if (is_vlan_dev(upper_dev)) { 4285 struct net_device *br_dev; 4286 4287 if (!netif_is_bridge_port(upper_dev)) 4288 break; 4289 if (info->linking) 4290 break; 4291 br_dev = netdev_master_upper_dev_get(upper_dev); 4292 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4293 br_dev); 4294 } 4295 break; 4296 } 4297 4298 return err; 4299 } 4300 4301 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4302 unsigned long event, void *ptr) 4303 { 4304 struct netdev_notifier_changelowerstate_info *info; 4305 struct mlxsw_sp_port *mlxsw_sp_port; 4306 int err; 4307 4308 mlxsw_sp_port = netdev_priv(dev); 4309 info = ptr; 4310 4311 switch (event) { 4312 case NETDEV_CHANGELOWERSTATE: 4313 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4314 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4315 info->lower_state_info); 4316 if (err) 4317 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4318 } 4319 break; 4320 } 4321 4322 return 0; 4323 } 4324 4325 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4326 struct net_device *port_dev, 4327 unsigned long event, void *ptr) 4328 { 4329 switch (event) { 4330 case NETDEV_PRECHANGEUPPER: 4331 case NETDEV_CHANGEUPPER: 4332 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4333 event, ptr); 4334 case NETDEV_CHANGELOWERSTATE: 4335 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4336 ptr); 4337 } 4338 4339 return 0; 4340 } 4341 4342 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4343 unsigned long event, void *ptr) 4344 { 4345 struct net_device *dev; 4346 struct list_head *iter; 4347 int ret; 4348 4349 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4350 if (mlxsw_sp_port_dev_check(dev)) { 4351 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4352 ptr); 4353 if (ret) 4354 return ret; 4355 } 4356 } 4357 4358 return 0; 4359 } 4360 4361 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4362 struct net_device *dev, 4363 unsigned long event, void *ptr, 4364 u16 vid) 4365 { 4366 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4367 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4368 struct netdev_notifier_changeupper_info *info = ptr; 4369 struct netlink_ext_ack *extack; 4370 struct net_device *upper_dev; 4371 int err = 0; 4372 4373 extack = netdev_notifier_info_to_extack(&info->info); 4374 4375 switch (event) { 4376 case NETDEV_PRECHANGEUPPER: 4377 upper_dev = info->upper_dev; 4378 if (!netif_is_bridge_master(upper_dev) && 4379 !netif_is_macvlan(upper_dev)) { 4380 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4381 return -EINVAL; 4382 } 4383 if (!info->linking) 4384 break; 4385 if (netif_is_bridge_master(upper_dev) && 4386 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4387 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4388 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4389 return -EOPNOTSUPP; 4390 if (netdev_has_any_upper_dev(upper_dev) && 4391 (!netif_is_bridge_master(upper_dev) || 4392 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4393 upper_dev))) { 4394 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4395 return -EINVAL; 4396 } 4397 if (netif_is_macvlan(upper_dev) && 4398 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4399 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4400 return -EOPNOTSUPP; 4401 } 4402 break; 4403 case NETDEV_CHANGEUPPER: 4404 upper_dev = info->upper_dev; 4405 if (netif_is_bridge_master(upper_dev)) { 4406 if (info->linking) 4407 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4408 vlan_dev, 4409 upper_dev, 4410 extack); 4411 else 4412 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4413 vlan_dev, 4414 upper_dev); 4415 } else if (netif_is_macvlan(upper_dev)) { 4416 if (!info->linking) 4417 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4418 } else { 4419 err = -EINVAL; 4420 WARN_ON(1); 4421 } 4422 break; 4423 } 4424 4425 return err; 4426 } 4427 4428 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4429 struct net_device *lag_dev, 4430 unsigned long event, 4431 void *ptr, u16 vid) 4432 { 4433 struct net_device *dev; 4434 struct list_head *iter; 4435 int ret; 4436 4437 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4438 if (mlxsw_sp_port_dev_check(dev)) { 4439 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4440 event, ptr, 4441 vid); 4442 if (ret) 4443 return ret; 4444 } 4445 } 4446 4447 return 0; 4448 } 4449 4450 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4451 struct net_device *br_dev, 4452 unsigned long event, void *ptr, 4453 u16 vid) 4454 { 4455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4456 struct netdev_notifier_changeupper_info *info = ptr; 4457 struct netlink_ext_ack *extack; 4458 struct net_device *upper_dev; 4459 4460 if (!mlxsw_sp) 4461 return 0; 4462 4463 extack = netdev_notifier_info_to_extack(&info->info); 4464 4465 switch (event) { 4466 case NETDEV_PRECHANGEUPPER: 4467 upper_dev = info->upper_dev; 4468 if (!netif_is_macvlan(upper_dev)) { 4469 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4470 return -EOPNOTSUPP; 4471 } 4472 if (!info->linking) 4473 break; 4474 if (netif_is_macvlan(upper_dev) && 4475 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4476 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4477 return -EOPNOTSUPP; 4478 } 4479 break; 4480 case NETDEV_CHANGEUPPER: 4481 upper_dev = info->upper_dev; 4482 if (info->linking) 4483 break; 4484 if (netif_is_macvlan(upper_dev)) 4485 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4486 break; 4487 } 4488 4489 return 0; 4490 } 4491 4492 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4493 unsigned long event, void *ptr) 4494 { 4495 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4496 u16 vid = vlan_dev_vlan_id(vlan_dev); 4497 4498 if (mlxsw_sp_port_dev_check(real_dev)) 4499 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4500 event, ptr, vid); 4501 else if (netif_is_lag_master(real_dev)) 4502 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4503 real_dev, event, 4504 ptr, vid); 4505 else if (netif_is_bridge_master(real_dev)) 4506 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4507 event, ptr, vid); 4508 4509 return 0; 4510 } 4511 4512 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4513 unsigned long event, void *ptr) 4514 { 4515 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4516 struct netdev_notifier_changeupper_info *info = ptr; 4517 struct netlink_ext_ack *extack; 4518 struct net_device *upper_dev; 4519 u16 proto; 4520 4521 if (!mlxsw_sp) 4522 return 0; 4523 4524 extack = netdev_notifier_info_to_extack(&info->info); 4525 4526 switch (event) { 4527 case NETDEV_PRECHANGEUPPER: 4528 upper_dev = info->upper_dev; 4529 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4530 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4531 return -EOPNOTSUPP; 4532 } 4533 if (!info->linking) 4534 break; 4535 if (br_vlan_enabled(br_dev)) { 4536 br_vlan_get_proto(br_dev, &proto); 4537 if (proto == ETH_P_8021AD) { 4538 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 4539 return -EOPNOTSUPP; 4540 } 4541 } 4542 if (is_vlan_dev(upper_dev) && 4543 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4544 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4545 return -EOPNOTSUPP; 4546 } 4547 if (netif_is_macvlan(upper_dev) && 4548 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4549 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4550 return -EOPNOTSUPP; 4551 } 4552 break; 4553 case NETDEV_CHANGEUPPER: 4554 upper_dev = info->upper_dev; 4555 if (info->linking) 4556 break; 4557 if (is_vlan_dev(upper_dev)) 4558 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4559 if (netif_is_macvlan(upper_dev)) 4560 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4561 break; 4562 } 4563 4564 return 0; 4565 } 4566 4567 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4568 unsigned long event, void *ptr) 4569 { 4570 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4571 struct netdev_notifier_changeupper_info *info = ptr; 4572 struct netlink_ext_ack *extack; 4573 4574 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4575 return 0; 4576 4577 extack = netdev_notifier_info_to_extack(&info->info); 4578 4579 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4580 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4581 4582 return -EOPNOTSUPP; 4583 } 4584 4585 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4586 { 4587 struct netdev_notifier_changeupper_info *info = ptr; 4588 4589 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4590 return false; 4591 return netif_is_l3_master(info->upper_dev); 4592 } 4593 4594 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4595 struct net_device *dev, 4596 unsigned long event, void *ptr) 4597 { 4598 struct netdev_notifier_changeupper_info *cu_info; 4599 struct netdev_notifier_info *info = ptr; 4600 struct netlink_ext_ack *extack; 4601 struct net_device *upper_dev; 4602 4603 extack = netdev_notifier_info_to_extack(info); 4604 4605 switch (event) { 4606 case NETDEV_CHANGEUPPER: 4607 cu_info = container_of(info, 4608 struct netdev_notifier_changeupper_info, 4609 info); 4610 upper_dev = cu_info->upper_dev; 4611 if (!netif_is_bridge_master(upper_dev)) 4612 return 0; 4613 if (!mlxsw_sp_lower_get(upper_dev)) 4614 return 0; 4615 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4616 return -EOPNOTSUPP; 4617 if (cu_info->linking) { 4618 if (!netif_running(dev)) 4619 return 0; 4620 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4621 * device needs to be mapped to a VLAN, but at this 4622 * point no VLANs are configured on the VxLAN device 4623 */ 4624 if (br_vlan_enabled(upper_dev)) 4625 return 0; 4626 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 4627 dev, 0, extack); 4628 } else { 4629 /* VLANs were already flushed, which triggered the 4630 * necessary cleanup 4631 */ 4632 if (br_vlan_enabled(upper_dev)) 4633 return 0; 4634 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4635 } 4636 break; 4637 case NETDEV_PRE_UP: 4638 upper_dev = netdev_master_upper_dev_get(dev); 4639 if (!upper_dev) 4640 return 0; 4641 if (!netif_is_bridge_master(upper_dev)) 4642 return 0; 4643 if (!mlxsw_sp_lower_get(upper_dev)) 4644 return 0; 4645 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 4646 extack); 4647 case NETDEV_DOWN: 4648 upper_dev = netdev_master_upper_dev_get(dev); 4649 if (!upper_dev) 4650 return 0; 4651 if (!netif_is_bridge_master(upper_dev)) 4652 return 0; 4653 if (!mlxsw_sp_lower_get(upper_dev)) 4654 return 0; 4655 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4656 break; 4657 } 4658 4659 return 0; 4660 } 4661 4662 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4663 unsigned long event, void *ptr) 4664 { 4665 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4666 struct mlxsw_sp_span_entry *span_entry; 4667 struct mlxsw_sp *mlxsw_sp; 4668 int err = 0; 4669 4670 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4671 if (event == NETDEV_UNREGISTER) { 4672 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4673 if (span_entry) 4674 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4675 } 4676 mlxsw_sp_span_respin(mlxsw_sp); 4677 4678 if (netif_is_vxlan(dev)) 4679 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 4680 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4681 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4682 event, ptr); 4683 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4684 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4685 event, ptr); 4686 else if (event == NETDEV_PRE_CHANGEADDR || 4687 event == NETDEV_CHANGEADDR || 4688 event == NETDEV_CHANGEMTU) 4689 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 4690 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4691 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4692 else if (mlxsw_sp_port_dev_check(dev)) 4693 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4694 else if (netif_is_lag_master(dev)) 4695 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4696 else if (is_vlan_dev(dev)) 4697 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4698 else if (netif_is_bridge_master(dev)) 4699 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4700 else if (netif_is_macvlan(dev)) 4701 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4702 4703 return notifier_from_errno(err); 4704 } 4705 4706 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4707 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4708 }; 4709 4710 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4711 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4712 }; 4713 4714 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4715 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4716 {0, }, 4717 }; 4718 4719 static struct pci_driver mlxsw_sp1_pci_driver = { 4720 .name = mlxsw_sp1_driver_name, 4721 .id_table = mlxsw_sp1_pci_id_table, 4722 }; 4723 4724 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4725 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4726 {0, }, 4727 }; 4728 4729 static struct pci_driver mlxsw_sp2_pci_driver = { 4730 .name = mlxsw_sp2_driver_name, 4731 .id_table = mlxsw_sp2_pci_id_table, 4732 }; 4733 4734 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 4735 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 4736 {0, }, 4737 }; 4738 4739 static struct pci_driver mlxsw_sp3_pci_driver = { 4740 .name = mlxsw_sp3_driver_name, 4741 .id_table = mlxsw_sp3_pci_id_table, 4742 }; 4743 4744 static int __init mlxsw_sp_module_init(void) 4745 { 4746 int err; 4747 4748 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4749 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4750 4751 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4752 if (err) 4753 goto err_sp1_core_driver_register; 4754 4755 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4756 if (err) 4757 goto err_sp2_core_driver_register; 4758 4759 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 4760 if (err) 4761 goto err_sp3_core_driver_register; 4762 4763 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4764 if (err) 4765 goto err_sp1_pci_driver_register; 4766 4767 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4768 if (err) 4769 goto err_sp2_pci_driver_register; 4770 4771 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 4772 if (err) 4773 goto err_sp3_pci_driver_register; 4774 4775 return 0; 4776 4777 err_sp3_pci_driver_register: 4778 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4779 err_sp2_pci_driver_register: 4780 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4781 err_sp1_pci_driver_register: 4782 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4783 err_sp3_core_driver_register: 4784 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4785 err_sp2_core_driver_register: 4786 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4787 err_sp1_core_driver_register: 4788 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4789 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4790 return err; 4791 } 4792 4793 static void __exit mlxsw_sp_module_exit(void) 4794 { 4795 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 4796 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4797 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4798 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4799 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4800 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4801 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4802 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4803 } 4804 4805 module_init(mlxsw_sp_module_init); 4806 module_exit(mlxsw_sp_module_exit); 4807 4808 MODULE_LICENSE("Dual BSD/GPL"); 4809 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4810 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4811 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 4812 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 4813 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 4814 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 4815 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 4816 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 4817