1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 48 #define MLXSW_SP1_FWREV_MAJOR 13 49 #define MLXSW_SP1_FWREV_MINOR 2008 50 #define MLXSW_SP1_FWREV_SUBMINOR 2406 51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 52 53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 54 .major = MLXSW_SP1_FWREV_MAJOR, 55 .minor = MLXSW_SP1_FWREV_MINOR, 56 .subminor = MLXSW_SP1_FWREV_SUBMINOR, 57 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 58 }; 59 60 #define MLXSW_SP1_FW_FILENAME \ 61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \ 63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" 64 65 #define MLXSW_SP2_FWREV_MAJOR 29 66 #define MLXSW_SP2_FWREV_MINOR 2008 67 #define MLXSW_SP2_FWREV_SUBMINOR 2406 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP2_FWREV_MINOR, 72 .subminor = MLXSW_SP2_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP2_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 #define MLXSW_SP3_FWREV_MINOR 2008 82 #define MLXSW_SP3_FWREV_SUBMINOR 2406 83 84 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 85 .major = MLXSW_SP3_FWREV_MAJOR, 86 .minor = MLXSW_SP3_FWREV_MINOR, 87 .subminor = MLXSW_SP3_FWREV_SUBMINOR, 88 }; 89 90 #define MLXSW_SP3_FW_FILENAME \ 91 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 92 "." __stringify(MLXSW_SP3_FWREV_MINOR) \ 93 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" 94 95 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 96 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 97 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 98 99 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 100 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 101 }; 102 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 103 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 104 }; 105 106 /* tx_hdr_version 107 * Tx header version. 108 * Must be set to 1. 109 */ 110 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 111 112 /* tx_hdr_ctl 113 * Packet control type. 114 * 0 - Ethernet control (e.g. EMADs, LACP) 115 * 1 - Ethernet data 116 */ 117 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 118 119 /* tx_hdr_proto 120 * Packet protocol type. Must be set to 1 (Ethernet). 121 */ 122 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 123 124 /* tx_hdr_rx_is_router 125 * Packet is sent from the router. Valid for data packets only. 126 */ 127 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 128 129 /* tx_hdr_fid_valid 130 * Indicates if the 'fid' field is valid and should be used for 131 * forwarding lookup. Valid for data packets only. 132 */ 133 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 134 135 /* tx_hdr_swid 136 * Switch partition ID. Must be set to 0. 137 */ 138 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 139 140 /* tx_hdr_control_tclass 141 * Indicates if the packet should use the control TClass and not one 142 * of the data TClasses. 143 */ 144 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 145 146 /* tx_hdr_etclass 147 * Egress TClass to be used on the egress device on the egress port. 148 */ 149 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 150 151 /* tx_hdr_port_mid 152 * Destination local port for unicast packets. 153 * Destination multicast ID for multicast packets. 154 * 155 * Control packets are directed to a specific egress port, while data 156 * packets are transmitted through the CPU port (0) into the switch partition, 157 * where forwarding rules are applied. 158 */ 159 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 160 161 /* tx_hdr_fid 162 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 163 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 164 * Valid for data packets only. 165 */ 166 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 167 168 /* tx_hdr_type 169 * 0 - Data packets 170 * 6 - Control packets 171 */ 172 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 173 174 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 175 unsigned int counter_index, u64 *packets, 176 u64 *bytes) 177 { 178 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 179 int err; 180 181 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 182 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 184 if (err) 185 return err; 186 if (packets) 187 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 188 if (bytes) 189 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 190 return 0; 191 } 192 193 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 194 unsigned int counter_index) 195 { 196 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 197 198 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 199 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 200 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 201 } 202 203 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 204 unsigned int *p_counter_index) 205 { 206 int err; 207 208 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 209 p_counter_index); 210 if (err) 211 return err; 212 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 213 if (err) 214 goto err_counter_clear; 215 return 0; 216 217 err_counter_clear: 218 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 219 *p_counter_index); 220 return err; 221 } 222 223 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 224 unsigned int counter_index) 225 { 226 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 227 counter_index); 228 } 229 230 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 231 const struct mlxsw_tx_info *tx_info) 232 { 233 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 234 235 memset(txhdr, 0, MLXSW_TXHDR_LEN); 236 237 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 238 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 239 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 240 mlxsw_tx_hdr_swid_set(txhdr, 0); 241 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 242 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 243 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 244 } 245 246 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 247 { 248 switch (state) { 249 case BR_STATE_FORWARDING: 250 return MLXSW_REG_SPMS_STATE_FORWARDING; 251 case BR_STATE_LEARNING: 252 return MLXSW_REG_SPMS_STATE_LEARNING; 253 case BR_STATE_LISTENING: 254 case BR_STATE_DISABLED: 255 case BR_STATE_BLOCKING: 256 return MLXSW_REG_SPMS_STATE_DISCARDING; 257 default: 258 BUG(); 259 } 260 } 261 262 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 263 u8 state) 264 { 265 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 266 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 267 char *spms_pl; 268 int err; 269 270 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 271 if (!spms_pl) 272 return -ENOMEM; 273 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 274 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 275 276 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 277 kfree(spms_pl); 278 return err; 279 } 280 281 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 282 { 283 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 284 int err; 285 286 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 287 if (err) 288 return err; 289 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 290 return 0; 291 } 292 293 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 294 bool is_up) 295 { 296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 297 char paos_pl[MLXSW_REG_PAOS_LEN]; 298 299 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 300 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 301 MLXSW_PORT_ADMIN_STATUS_DOWN); 302 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 303 } 304 305 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 306 unsigned char *addr) 307 { 308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 309 char ppad_pl[MLXSW_REG_PPAD_LEN]; 310 311 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 312 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 313 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 314 } 315 316 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 317 { 318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 319 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 320 321 ether_addr_copy(addr, mlxsw_sp->base_mac); 322 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 323 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 324 } 325 326 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 327 { 328 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 329 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 330 int err; 331 332 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 333 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 334 if (err) 335 return err; 336 337 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 338 return 0; 339 } 340 341 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 342 { 343 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 344 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 345 346 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 347 if (mtu > mlxsw_sp_port->max_mtu) 348 return -EINVAL; 349 350 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 351 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 352 } 353 354 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 355 { 356 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 357 char pspa_pl[MLXSW_REG_PSPA_LEN]; 358 359 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 360 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 361 } 362 363 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 364 { 365 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 366 char svpe_pl[MLXSW_REG_SVPE_LEN]; 367 368 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 369 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 370 } 371 372 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 373 bool learn_enable) 374 { 375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 char *spvmlr_pl; 377 int err; 378 379 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 380 if (!spvmlr_pl) 381 return -ENOMEM; 382 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 383 learn_enable); 384 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 385 kfree(spvmlr_pl); 386 return err; 387 } 388 389 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 390 { 391 switch (ethtype) { 392 case ETH_P_8021Q: 393 *p_sver_type = 0; 394 break; 395 case ETH_P_8021AD: 396 *p_sver_type = 1; 397 break; 398 default: 399 return -EINVAL; 400 } 401 402 return 0; 403 } 404 405 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 406 u16 ethtype) 407 { 408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 409 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 410 u8 sver_type; 411 int err; 412 413 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 414 if (err) 415 return err; 416 417 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 419 } 420 421 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 422 u16 vid, u16 ethtype) 423 { 424 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 425 char spvid_pl[MLXSW_REG_SPVID_LEN]; 426 u8 sver_type; 427 int err; 428 429 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 430 if (err) 431 return err; 432 433 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 434 sver_type); 435 436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 437 } 438 439 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 440 bool allow) 441 { 442 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 443 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 444 445 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 446 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 447 } 448 449 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 450 u16 ethtype) 451 { 452 int err; 453 454 if (!vid) { 455 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 456 if (err) 457 return err; 458 } else { 459 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 460 if (err) 461 return err; 462 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 463 if (err) 464 goto err_port_allow_untagged_set; 465 } 466 467 mlxsw_sp_port->pvid = vid; 468 return 0; 469 470 err_port_allow_untagged_set: 471 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 472 return err; 473 } 474 475 static int 476 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 477 { 478 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 479 char sspr_pl[MLXSW_REG_SSPR_LEN]; 480 481 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 482 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 483 } 484 485 static int 486 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, 487 struct mlxsw_sp_port_mapping *port_mapping) 488 { 489 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 490 bool separate_rxtx; 491 u8 module; 492 u8 width; 493 int err; 494 int i; 495 496 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 497 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 498 if (err) 499 return err; 500 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 501 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 502 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 503 504 if (width && !is_power_of_2(width)) { 505 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 506 local_port); 507 return -EINVAL; 508 } 509 510 for (i = 0; i < width; i++) { 511 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 512 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 513 local_port); 514 return -EINVAL; 515 } 516 if (separate_rxtx && 517 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 518 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 520 local_port); 521 return -EINVAL; 522 } 523 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 524 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 525 local_port); 526 return -EINVAL; 527 } 528 } 529 530 port_mapping->module = module; 531 port_mapping->width = width; 532 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 533 return 0; 534 } 535 536 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) 537 { 538 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; 539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 540 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 541 int i; 542 543 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 544 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 545 for (i = 0; i < port_mapping->width; i++) { 546 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 547 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 548 } 549 550 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 551 } 552 553 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) 554 { 555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 556 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 557 558 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 559 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 560 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 561 } 562 563 static int mlxsw_sp_port_open(struct net_device *dev) 564 { 565 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 566 int err; 567 568 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 569 if (err) 570 return err; 571 netif_start_queue(dev); 572 return 0; 573 } 574 575 static int mlxsw_sp_port_stop(struct net_device *dev) 576 { 577 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 578 579 netif_stop_queue(dev); 580 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 581 } 582 583 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 584 struct net_device *dev) 585 { 586 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 587 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 588 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 589 const struct mlxsw_tx_info tx_info = { 590 .local_port = mlxsw_sp_port->local_port, 591 .is_emad = false, 592 }; 593 u64 len; 594 int err; 595 596 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 597 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 598 dev_kfree_skb_any(skb); 599 return NETDEV_TX_OK; 600 } 601 602 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 603 604 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 605 return NETDEV_TX_BUSY; 606 607 if (eth_skb_pad(skb)) { 608 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 609 return NETDEV_TX_OK; 610 } 611 612 mlxsw_sp_txhdr_construct(skb, &tx_info); 613 /* TX header is consumed by HW on the way so we shouldn't count its 614 * bytes as being sent. 615 */ 616 len = skb->len - MLXSW_TXHDR_LEN; 617 618 /* Due to a race we might fail here because of a full queue. In that 619 * unlikely case we simply drop the packet. 620 */ 621 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 622 623 if (!err) { 624 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 625 u64_stats_update_begin(&pcpu_stats->syncp); 626 pcpu_stats->tx_packets++; 627 pcpu_stats->tx_bytes += len; 628 u64_stats_update_end(&pcpu_stats->syncp); 629 } else { 630 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 631 dev_kfree_skb_any(skb); 632 } 633 return NETDEV_TX_OK; 634 } 635 636 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 637 { 638 } 639 640 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 641 { 642 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 643 struct sockaddr *addr = p; 644 int err; 645 646 if (!is_valid_ether_addr(addr->sa_data)) 647 return -EADDRNOTAVAIL; 648 649 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 650 if (err) 651 return err; 652 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 653 return 0; 654 } 655 656 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 657 { 658 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 659 struct mlxsw_sp_hdroom orig_hdroom; 660 struct mlxsw_sp_hdroom hdroom; 661 int err; 662 663 orig_hdroom = *mlxsw_sp_port->hdroom; 664 665 hdroom = orig_hdroom; 666 hdroom.mtu = mtu; 667 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 668 669 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 670 if (err) { 671 netdev_err(dev, "Failed to configure port's headroom\n"); 672 return err; 673 } 674 675 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 676 if (err) 677 goto err_port_mtu_set; 678 dev->mtu = mtu; 679 return 0; 680 681 err_port_mtu_set: 682 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 683 return err; 684 } 685 686 static int 687 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 688 struct rtnl_link_stats64 *stats) 689 { 690 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 691 struct mlxsw_sp_port_pcpu_stats *p; 692 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 693 u32 tx_dropped = 0; 694 unsigned int start; 695 int i; 696 697 for_each_possible_cpu(i) { 698 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 699 do { 700 start = u64_stats_fetch_begin_irq(&p->syncp); 701 rx_packets = p->rx_packets; 702 rx_bytes = p->rx_bytes; 703 tx_packets = p->tx_packets; 704 tx_bytes = p->tx_bytes; 705 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 706 707 stats->rx_packets += rx_packets; 708 stats->rx_bytes += rx_bytes; 709 stats->tx_packets += tx_packets; 710 stats->tx_bytes += tx_bytes; 711 /* tx_dropped is u32, updated without syncp protection. */ 712 tx_dropped += p->tx_dropped; 713 } 714 stats->tx_dropped = tx_dropped; 715 return 0; 716 } 717 718 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 719 { 720 switch (attr_id) { 721 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 722 return true; 723 } 724 725 return false; 726 } 727 728 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 729 void *sp) 730 { 731 switch (attr_id) { 732 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 733 return mlxsw_sp_port_get_sw_stats64(dev, sp); 734 } 735 736 return -EINVAL; 737 } 738 739 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 740 int prio, char *ppcnt_pl) 741 { 742 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 744 745 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 746 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 747 } 748 749 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 750 struct rtnl_link_stats64 *stats) 751 { 752 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 753 int err; 754 755 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 756 0, ppcnt_pl); 757 if (err) 758 goto out; 759 760 stats->tx_packets = 761 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 762 stats->rx_packets = 763 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 764 stats->tx_bytes = 765 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 766 stats->rx_bytes = 767 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 768 stats->multicast = 769 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 770 771 stats->rx_crc_errors = 772 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 773 stats->rx_frame_errors = 774 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 775 776 stats->rx_length_errors = ( 777 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 778 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 779 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 780 781 stats->rx_errors = (stats->rx_crc_errors + 782 stats->rx_frame_errors + stats->rx_length_errors); 783 784 out: 785 return err; 786 } 787 788 static void 789 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 790 struct mlxsw_sp_port_xstats *xstats) 791 { 792 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 793 int err, i; 794 795 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 796 ppcnt_pl); 797 if (!err) 798 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 799 800 for (i = 0; i < TC_MAX_QUEUE; i++) { 801 err = mlxsw_sp_port_get_stats_raw(dev, 802 MLXSW_REG_PPCNT_TC_CONG_TC, 803 i, ppcnt_pl); 804 if (!err) 805 xstats->wred_drop[i] = 806 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 807 808 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 809 i, ppcnt_pl); 810 if (err) 811 continue; 812 813 xstats->backlog[i] = 814 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 815 xstats->tail_drop[i] = 816 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 817 } 818 819 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 820 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 821 i, ppcnt_pl); 822 if (err) 823 continue; 824 825 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 826 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 827 } 828 } 829 830 static void update_stats_cache(struct work_struct *work) 831 { 832 struct mlxsw_sp_port *mlxsw_sp_port = 833 container_of(work, struct mlxsw_sp_port, 834 periodic_hw_stats.update_dw.work); 835 836 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 837 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 838 * necessary when port goes down. 839 */ 840 goto out; 841 842 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 843 &mlxsw_sp_port->periodic_hw_stats.stats); 844 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 845 &mlxsw_sp_port->periodic_hw_stats.xstats); 846 847 out: 848 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 849 MLXSW_HW_STATS_UPDATE_TIME); 850 } 851 852 /* Return the stats from a cache that is updated periodically, 853 * as this function might get called in an atomic context. 854 */ 855 static void 856 mlxsw_sp_port_get_stats64(struct net_device *dev, 857 struct rtnl_link_stats64 *stats) 858 { 859 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 860 861 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 862 } 863 864 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 865 u16 vid_begin, u16 vid_end, 866 bool is_member, bool untagged) 867 { 868 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 869 char *spvm_pl; 870 int err; 871 872 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 873 if (!spvm_pl) 874 return -ENOMEM; 875 876 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 877 vid_end, is_member, untagged); 878 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 879 kfree(spvm_pl); 880 return err; 881 } 882 883 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 884 u16 vid_end, bool is_member, bool untagged) 885 { 886 u16 vid, vid_e; 887 int err; 888 889 for (vid = vid_begin; vid <= vid_end; 890 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 891 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 892 vid_end); 893 894 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 895 is_member, untagged); 896 if (err) 897 return err; 898 } 899 900 return 0; 901 } 902 903 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 904 bool flush_default) 905 { 906 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 907 908 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 909 &mlxsw_sp_port->vlans_list, list) { 910 if (!flush_default && 911 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 912 continue; 913 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 914 } 915 } 916 917 static void 918 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 919 { 920 if (mlxsw_sp_port_vlan->bridge_port) 921 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 922 else if (mlxsw_sp_port_vlan->fid) 923 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 924 } 925 926 struct mlxsw_sp_port_vlan * 927 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 928 { 929 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 930 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 931 int err; 932 933 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 934 if (mlxsw_sp_port_vlan) 935 return ERR_PTR(-EEXIST); 936 937 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 938 if (err) 939 return ERR_PTR(err); 940 941 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 942 if (!mlxsw_sp_port_vlan) { 943 err = -ENOMEM; 944 goto err_port_vlan_alloc; 945 } 946 947 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 948 mlxsw_sp_port_vlan->vid = vid; 949 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 950 951 return mlxsw_sp_port_vlan; 952 953 err_port_vlan_alloc: 954 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 955 return ERR_PTR(err); 956 } 957 958 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 959 { 960 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 961 u16 vid = mlxsw_sp_port_vlan->vid; 962 963 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 964 list_del(&mlxsw_sp_port_vlan->list); 965 kfree(mlxsw_sp_port_vlan); 966 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 967 } 968 969 static int mlxsw_sp_port_add_vid(struct net_device *dev, 970 __be16 __always_unused proto, u16 vid) 971 { 972 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 973 974 /* VLAN 0 is added to HW filter when device goes up, but it is 975 * reserved in our case, so simply return. 976 */ 977 if (!vid) 978 return 0; 979 980 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 981 } 982 983 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 984 __be16 __always_unused proto, u16 vid) 985 { 986 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 987 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 988 989 /* VLAN 0 is removed from HW filter when device goes down, but 990 * it is reserved in our case, so simply return. 991 */ 992 if (!vid) 993 return 0; 994 995 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 996 if (!mlxsw_sp_port_vlan) 997 return 0; 998 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 999 1000 return 0; 1001 } 1002 1003 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1004 struct flow_block_offload *f) 1005 { 1006 switch (f->binder_type) { 1007 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1008 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1009 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1010 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1011 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1012 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1013 default: 1014 return -EOPNOTSUPP; 1015 } 1016 } 1017 1018 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1019 void *type_data) 1020 { 1021 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1022 1023 switch (type) { 1024 case TC_SETUP_BLOCK: 1025 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1026 case TC_SETUP_QDISC_RED: 1027 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1028 case TC_SETUP_QDISC_PRIO: 1029 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1030 case TC_SETUP_QDISC_ETS: 1031 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1032 case TC_SETUP_QDISC_TBF: 1033 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1034 case TC_SETUP_QDISC_FIFO: 1035 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1036 default: 1037 return -EOPNOTSUPP; 1038 } 1039 } 1040 1041 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1042 { 1043 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1044 1045 if (!enable) { 1046 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1047 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1048 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1049 return -EINVAL; 1050 } 1051 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1052 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1053 } else { 1054 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1055 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1056 } 1057 return 0; 1058 } 1059 1060 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1061 { 1062 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1063 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1064 int err; 1065 1066 if (netif_running(dev)) 1067 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1068 1069 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1070 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1071 pplr_pl); 1072 1073 if (netif_running(dev)) 1074 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1075 1076 return err; 1077 } 1078 1079 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1080 1081 static int mlxsw_sp_handle_feature(struct net_device *dev, 1082 netdev_features_t wanted_features, 1083 netdev_features_t feature, 1084 mlxsw_sp_feature_handler feature_handler) 1085 { 1086 netdev_features_t changes = wanted_features ^ dev->features; 1087 bool enable = !!(wanted_features & feature); 1088 int err; 1089 1090 if (!(changes & feature)) 1091 return 0; 1092 1093 err = feature_handler(dev, enable); 1094 if (err) { 1095 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1096 enable ? "Enable" : "Disable", &feature, err); 1097 return err; 1098 } 1099 1100 if (enable) 1101 dev->features |= feature; 1102 else 1103 dev->features &= ~feature; 1104 1105 return 0; 1106 } 1107 static int mlxsw_sp_set_features(struct net_device *dev, 1108 netdev_features_t features) 1109 { 1110 netdev_features_t oper_features = dev->features; 1111 int err = 0; 1112 1113 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1114 mlxsw_sp_feature_hw_tc); 1115 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1116 mlxsw_sp_feature_loopback); 1117 1118 if (err) { 1119 dev->features = oper_features; 1120 return -EINVAL; 1121 } 1122 1123 return 0; 1124 } 1125 1126 static struct devlink_port * 1127 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1128 { 1129 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1130 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1131 1132 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1133 mlxsw_sp_port->local_port); 1134 } 1135 1136 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1137 struct ifreq *ifr) 1138 { 1139 struct hwtstamp_config config; 1140 int err; 1141 1142 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1143 return -EFAULT; 1144 1145 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1146 &config); 1147 if (err) 1148 return err; 1149 1150 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1151 return -EFAULT; 1152 1153 return 0; 1154 } 1155 1156 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1157 struct ifreq *ifr) 1158 { 1159 struct hwtstamp_config config; 1160 int err; 1161 1162 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1163 &config); 1164 if (err) 1165 return err; 1166 1167 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1168 return -EFAULT; 1169 1170 return 0; 1171 } 1172 1173 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1174 { 1175 struct hwtstamp_config config = {0}; 1176 1177 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1178 } 1179 1180 static int 1181 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1182 { 1183 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1184 1185 switch (cmd) { 1186 case SIOCSHWTSTAMP: 1187 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1188 case SIOCGHWTSTAMP: 1189 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1190 default: 1191 return -EOPNOTSUPP; 1192 } 1193 } 1194 1195 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1196 .ndo_open = mlxsw_sp_port_open, 1197 .ndo_stop = mlxsw_sp_port_stop, 1198 .ndo_start_xmit = mlxsw_sp_port_xmit, 1199 .ndo_setup_tc = mlxsw_sp_setup_tc, 1200 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1201 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1202 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1203 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1204 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1205 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1206 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1207 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1208 .ndo_set_features = mlxsw_sp_set_features, 1209 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1210 .ndo_do_ioctl = mlxsw_sp_port_ioctl, 1211 }; 1212 1213 static int 1214 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1215 { 1216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1217 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1218 const struct mlxsw_sp_port_type_speed_ops *ops; 1219 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1220 u32 eth_proto_cap_masked; 1221 int err; 1222 1223 ops = mlxsw_sp->port_type_speed_ops; 1224 1225 /* Set advertised speeds to speeds supported by both the driver 1226 * and the device. 1227 */ 1228 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1229 0, false); 1230 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1231 if (err) 1232 return err; 1233 1234 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1235 ð_proto_admin, ð_proto_oper); 1236 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1237 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1238 eth_proto_cap_masked, 1239 mlxsw_sp_port->link.autoneg); 1240 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1241 } 1242 1243 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1244 { 1245 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1247 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1248 u32 eth_proto_oper; 1249 int err; 1250 1251 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1252 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1253 mlxsw_sp_port->local_port, 0, 1254 false); 1255 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1256 if (err) 1257 return err; 1258 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1259 ð_proto_oper); 1260 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1261 return 0; 1262 } 1263 1264 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1265 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1266 bool dwrr, u8 dwrr_weight) 1267 { 1268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1269 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1270 1271 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1272 next_index); 1273 mlxsw_reg_qeec_de_set(qeec_pl, true); 1274 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1275 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1277 } 1278 1279 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1280 enum mlxsw_reg_qeec_hr hr, u8 index, 1281 u8 next_index, u32 maxrate, u8 burst_size) 1282 { 1283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1284 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1285 1286 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1287 next_index); 1288 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1289 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1290 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1292 } 1293 1294 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1295 enum mlxsw_reg_qeec_hr hr, u8 index, 1296 u8 next_index, u32 minrate) 1297 { 1298 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1299 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1300 1301 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1302 next_index); 1303 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1304 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1305 1306 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1307 } 1308 1309 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1310 u8 switch_prio, u8 tclass) 1311 { 1312 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1313 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1314 1315 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1316 tclass); 1317 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1318 } 1319 1320 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1321 { 1322 int err, i; 1323 1324 /* Setup the elements hierarcy, so that each TC is linked to 1325 * one subgroup, which are all member in the same group. 1326 */ 1327 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1328 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1329 if (err) 1330 return err; 1331 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1332 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1333 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1334 0, false, 0); 1335 if (err) 1336 return err; 1337 } 1338 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1339 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1340 MLXSW_REG_QEEC_HR_TC, i, i, 1341 false, 0); 1342 if (err) 1343 return err; 1344 1345 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1346 MLXSW_REG_QEEC_HR_TC, 1347 i + 8, i, 1348 true, 100); 1349 if (err) 1350 return err; 1351 } 1352 1353 /* Make sure the max shaper is disabled in all hierarchies that support 1354 * it. Note that this disables ptps (PTP shaper), but that is intended 1355 * for the initial configuration. 1356 */ 1357 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1358 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1359 MLXSW_REG_QEEC_MAS_DIS, 0); 1360 if (err) 1361 return err; 1362 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1363 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1364 MLXSW_REG_QEEC_HR_SUBGROUP, 1365 i, 0, 1366 MLXSW_REG_QEEC_MAS_DIS, 0); 1367 if (err) 1368 return err; 1369 } 1370 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1371 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1372 MLXSW_REG_QEEC_HR_TC, 1373 i, i, 1374 MLXSW_REG_QEEC_MAS_DIS, 0); 1375 if (err) 1376 return err; 1377 1378 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1379 MLXSW_REG_QEEC_HR_TC, 1380 i + 8, i, 1381 MLXSW_REG_QEEC_MAS_DIS, 0); 1382 if (err) 1383 return err; 1384 } 1385 1386 /* Configure the min shaper for multicast TCs. */ 1387 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1388 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1389 MLXSW_REG_QEEC_HR_TC, 1390 i + 8, i, 1391 MLXSW_REG_QEEC_MIS_MIN); 1392 if (err) 1393 return err; 1394 } 1395 1396 /* Map all priorities to traffic class 0. */ 1397 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1398 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1399 if (err) 1400 return err; 1401 } 1402 1403 return 0; 1404 } 1405 1406 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1407 bool enable) 1408 { 1409 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1410 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1411 1412 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1413 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1414 } 1415 1416 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1417 { 1418 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1419 u8 module = mlxsw_sp_port->mapping.module; 1420 u64 overheat_counter; 1421 int err; 1422 1423 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module, 1424 &overheat_counter); 1425 if (err) 1426 return err; 1427 1428 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1429 return 0; 1430 } 1431 1432 int 1433 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1434 bool is_8021ad_tagged, 1435 bool is_8021q_tagged) 1436 { 1437 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1438 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1439 1440 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1441 is_8021ad_tagged, is_8021q_tagged); 1442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1443 } 1444 1445 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1446 u8 split_base_local_port, 1447 struct mlxsw_sp_port_mapping *port_mapping) 1448 { 1449 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1450 bool split = !!split_base_local_port; 1451 struct mlxsw_sp_port *mlxsw_sp_port; 1452 u32 lanes = port_mapping->width; 1453 struct net_device *dev; 1454 bool splittable; 1455 int err; 1456 1457 splittable = lanes > 1 && !split; 1458 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 1459 port_mapping->module + 1, split, 1460 port_mapping->lane / lanes, 1461 splittable, lanes, 1462 mlxsw_sp->base_mac, 1463 sizeof(mlxsw_sp->base_mac)); 1464 if (err) { 1465 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1466 local_port); 1467 return err; 1468 } 1469 1470 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1471 if (!dev) { 1472 err = -ENOMEM; 1473 goto err_alloc_etherdev; 1474 } 1475 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1476 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1477 mlxsw_sp_port = netdev_priv(dev); 1478 mlxsw_sp_port->dev = dev; 1479 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1480 mlxsw_sp_port->local_port = local_port; 1481 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1482 mlxsw_sp_port->split = split; 1483 mlxsw_sp_port->split_base_local_port = split_base_local_port; 1484 mlxsw_sp_port->mapping = *port_mapping; 1485 mlxsw_sp_port->link.autoneg = 1; 1486 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1487 1488 mlxsw_sp_port->pcpu_stats = 1489 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1490 if (!mlxsw_sp_port->pcpu_stats) { 1491 err = -ENOMEM; 1492 goto err_alloc_stats; 1493 } 1494 1495 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1496 &update_stats_cache); 1497 1498 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1499 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1500 1501 err = mlxsw_sp_port_module_map(mlxsw_sp_port); 1502 if (err) { 1503 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1504 mlxsw_sp_port->local_port); 1505 goto err_port_module_map; 1506 } 1507 1508 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1509 if (err) { 1510 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1511 mlxsw_sp_port->local_port); 1512 goto err_port_swid_set; 1513 } 1514 1515 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1516 if (err) { 1517 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1518 mlxsw_sp_port->local_port); 1519 goto err_dev_addr_init; 1520 } 1521 1522 netif_carrier_off(dev); 1523 1524 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1525 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1526 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1527 1528 dev->min_mtu = 0; 1529 dev->max_mtu = ETH_MAX_MTU; 1530 1531 /* Each packet needs to have a Tx header (metadata) on top all other 1532 * headers. 1533 */ 1534 dev->needed_headroom = MLXSW_TXHDR_LEN; 1535 1536 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1537 if (err) { 1538 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1539 mlxsw_sp_port->local_port); 1540 goto err_port_system_port_mapping_set; 1541 } 1542 1543 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1544 if (err) { 1545 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1546 mlxsw_sp_port->local_port); 1547 goto err_port_speed_by_width_set; 1548 } 1549 1550 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1551 &mlxsw_sp_port->max_speed); 1552 if (err) { 1553 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1554 mlxsw_sp_port->local_port); 1555 goto err_max_speed_get; 1556 } 1557 1558 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1559 if (err) { 1560 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1561 mlxsw_sp_port->local_port); 1562 goto err_port_max_mtu_get; 1563 } 1564 1565 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1566 if (err) { 1567 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1568 mlxsw_sp_port->local_port); 1569 goto err_port_mtu_set; 1570 } 1571 1572 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1573 if (err) 1574 goto err_port_admin_status_set; 1575 1576 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1577 if (err) { 1578 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1579 mlxsw_sp_port->local_port); 1580 goto err_port_buffers_init; 1581 } 1582 1583 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1584 if (err) { 1585 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1586 mlxsw_sp_port->local_port); 1587 goto err_port_ets_init; 1588 } 1589 1590 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1591 if (err) { 1592 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1593 mlxsw_sp_port->local_port); 1594 goto err_port_tc_mc_mode; 1595 } 1596 1597 /* ETS and buffers must be initialized before DCB. */ 1598 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1599 if (err) { 1600 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1601 mlxsw_sp_port->local_port); 1602 goto err_port_dcb_init; 1603 } 1604 1605 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1606 if (err) { 1607 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1608 mlxsw_sp_port->local_port); 1609 goto err_port_fids_init; 1610 } 1611 1612 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1613 if (err) { 1614 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1615 mlxsw_sp_port->local_port); 1616 goto err_port_qdiscs_init; 1617 } 1618 1619 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1620 false); 1621 if (err) { 1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1623 mlxsw_sp_port->local_port); 1624 goto err_port_vlan_clear; 1625 } 1626 1627 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1628 if (err) { 1629 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1630 mlxsw_sp_port->local_port); 1631 goto err_port_nve_init; 1632 } 1633 1634 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1635 ETH_P_8021Q); 1636 if (err) { 1637 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1638 mlxsw_sp_port->local_port); 1639 goto err_port_pvid_set; 1640 } 1641 1642 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1643 MLXSW_SP_DEFAULT_VID); 1644 if (IS_ERR(mlxsw_sp_port_vlan)) { 1645 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1646 mlxsw_sp_port->local_port); 1647 err = PTR_ERR(mlxsw_sp_port_vlan); 1648 goto err_port_vlan_create; 1649 } 1650 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1651 1652 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1653 * only packets with 802.1q header as tagged packets. 1654 */ 1655 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1656 if (err) { 1657 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1658 local_port); 1659 goto err_port_vlan_classification_set; 1660 } 1661 1662 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1663 mlxsw_sp->ptp_ops->shaper_work); 1664 1665 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1666 1667 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1668 if (err) { 1669 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1670 mlxsw_sp_port->local_port); 1671 goto err_port_overheat_init_val_set; 1672 } 1673 1674 err = register_netdev(dev); 1675 if (err) { 1676 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1677 mlxsw_sp_port->local_port); 1678 goto err_register_netdev; 1679 } 1680 1681 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1682 mlxsw_sp_port, dev); 1683 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1684 return 0; 1685 1686 err_register_netdev: 1687 err_port_overheat_init_val_set: 1688 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1689 err_port_vlan_classification_set: 1690 mlxsw_sp->ports[local_port] = NULL; 1691 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1692 err_port_vlan_create: 1693 err_port_pvid_set: 1694 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1695 err_port_nve_init: 1696 err_port_vlan_clear: 1697 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1698 err_port_qdiscs_init: 1699 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1700 err_port_fids_init: 1701 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1702 err_port_dcb_init: 1703 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1704 err_port_tc_mc_mode: 1705 err_port_ets_init: 1706 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1707 err_port_buffers_init: 1708 err_port_admin_status_set: 1709 err_port_mtu_set: 1710 err_port_max_mtu_get: 1711 err_max_speed_get: 1712 err_port_speed_by_width_set: 1713 err_port_system_port_mapping_set: 1714 err_dev_addr_init: 1715 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1716 err_port_swid_set: 1717 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 1718 err_port_module_map: 1719 free_percpu(mlxsw_sp_port->pcpu_stats); 1720 err_alloc_stats: 1721 free_netdev(dev); 1722 err_alloc_etherdev: 1723 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1724 return err; 1725 } 1726 1727 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1728 { 1729 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1730 1731 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1732 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1733 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1734 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1735 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1736 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1737 mlxsw_sp->ports[local_port] = NULL; 1738 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1739 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1740 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1741 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1742 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1743 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1744 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1745 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1746 mlxsw_sp_port_module_unmap(mlxsw_sp_port); 1747 free_percpu(mlxsw_sp_port->pcpu_stats); 1748 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1749 free_netdev(mlxsw_sp_port->dev); 1750 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1751 } 1752 1753 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1754 { 1755 struct mlxsw_sp_port *mlxsw_sp_port; 1756 int err; 1757 1758 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1759 if (!mlxsw_sp_port) 1760 return -ENOMEM; 1761 1762 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1763 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1764 1765 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1766 mlxsw_sp_port, 1767 mlxsw_sp->base_mac, 1768 sizeof(mlxsw_sp->base_mac)); 1769 if (err) { 1770 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1771 goto err_core_cpu_port_init; 1772 } 1773 1774 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1775 return 0; 1776 1777 err_core_cpu_port_init: 1778 kfree(mlxsw_sp_port); 1779 return err; 1780 } 1781 1782 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1783 { 1784 struct mlxsw_sp_port *mlxsw_sp_port = 1785 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1786 1787 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1788 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1789 kfree(mlxsw_sp_port); 1790 } 1791 1792 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1793 { 1794 return mlxsw_sp->ports[local_port] != NULL; 1795 } 1796 1797 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1798 { 1799 int i; 1800 1801 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1802 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1803 mlxsw_sp_port_remove(mlxsw_sp, i); 1804 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1805 kfree(mlxsw_sp->ports); 1806 mlxsw_sp->ports = NULL; 1807 } 1808 1809 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1810 { 1811 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1812 struct mlxsw_sp_port_mapping *port_mapping; 1813 size_t alloc_size; 1814 int i; 1815 int err; 1816 1817 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 1818 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1819 if (!mlxsw_sp->ports) 1820 return -ENOMEM; 1821 1822 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 1823 if (err) 1824 goto err_cpu_port_create; 1825 1826 for (i = 1; i < max_ports; i++) { 1827 port_mapping = mlxsw_sp->port_mapping[i]; 1828 if (!port_mapping) 1829 continue; 1830 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); 1831 if (err) 1832 goto err_port_create; 1833 } 1834 return 0; 1835 1836 err_port_create: 1837 for (i--; i >= 1; i--) 1838 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1839 mlxsw_sp_port_remove(mlxsw_sp, i); 1840 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1841 err_cpu_port_create: 1842 kfree(mlxsw_sp->ports); 1843 mlxsw_sp->ports = NULL; 1844 return err; 1845 } 1846 1847 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 1848 { 1849 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1850 struct mlxsw_sp_port_mapping port_mapping; 1851 int i; 1852 int err; 1853 1854 mlxsw_sp->port_mapping = kcalloc(max_ports, 1855 sizeof(struct mlxsw_sp_port_mapping *), 1856 GFP_KERNEL); 1857 if (!mlxsw_sp->port_mapping) 1858 return -ENOMEM; 1859 1860 for (i = 1; i < max_ports; i++) { 1861 if (mlxsw_core_port_is_xm(mlxsw_sp->core, i)) 1862 continue; 1863 1864 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 1865 if (err) 1866 goto err_port_module_info_get; 1867 if (!port_mapping.width) 1868 continue; 1869 1870 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 1871 sizeof(port_mapping), 1872 GFP_KERNEL); 1873 if (!mlxsw_sp->port_mapping[i]) { 1874 err = -ENOMEM; 1875 goto err_port_module_info_dup; 1876 } 1877 } 1878 return 0; 1879 1880 err_port_module_info_get: 1881 err_port_module_info_dup: 1882 for (i--; i >= 1; i--) 1883 kfree(mlxsw_sp->port_mapping[i]); 1884 kfree(mlxsw_sp->port_mapping); 1885 return err; 1886 } 1887 1888 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 1889 { 1890 int i; 1891 1892 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1893 kfree(mlxsw_sp->port_mapping[i]); 1894 kfree(mlxsw_sp->port_mapping); 1895 } 1896 1897 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) 1898 { 1899 u8 offset = (local_port - 1) % max_width; 1900 1901 return local_port - offset; 1902 } 1903 1904 static int 1905 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 1906 struct mlxsw_sp_port_mapping *port_mapping, 1907 unsigned int count, u8 offset) 1908 { 1909 struct mlxsw_sp_port_mapping split_port_mapping; 1910 int err, i; 1911 1912 split_port_mapping = *port_mapping; 1913 split_port_mapping.width /= count; 1914 for (i = 0; i < count; i++) { 1915 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, 1916 base_port, &split_port_mapping); 1917 if (err) 1918 goto err_port_create; 1919 split_port_mapping.lane += split_port_mapping.width; 1920 } 1921 1922 return 0; 1923 1924 err_port_create: 1925 for (i--; i >= 0; i--) 1926 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 1927 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 1928 return err; 1929 } 1930 1931 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 1932 u8 base_port, 1933 unsigned int count, u8 offset) 1934 { 1935 struct mlxsw_sp_port_mapping *port_mapping; 1936 int i; 1937 1938 /* Go over original unsplit ports in the gap and recreate them. */ 1939 for (i = 0; i < count * offset; i++) { 1940 port_mapping = mlxsw_sp->port_mapping[base_port + i]; 1941 if (!port_mapping) 1942 continue; 1943 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); 1944 } 1945 } 1946 1947 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, 1948 unsigned int count, 1949 unsigned int max_width) 1950 { 1951 enum mlxsw_res_id local_ports_in_x_res_id; 1952 int split_width = max_width / count; 1953 1954 if (split_width == 1) 1955 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; 1956 else if (split_width == 2) 1957 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; 1958 else if (split_width == 4) 1959 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; 1960 else 1961 return -EINVAL; 1962 1963 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) 1964 return -EINVAL; 1965 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 1966 } 1967 1968 static struct mlxsw_sp_port * 1969 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1970 { 1971 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 1972 return mlxsw_sp->ports[local_port]; 1973 return NULL; 1974 } 1975 1976 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 1977 unsigned int count, 1978 struct netlink_ext_ack *extack) 1979 { 1980 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 1981 struct mlxsw_sp_port_mapping port_mapping; 1982 struct mlxsw_sp_port *mlxsw_sp_port; 1983 int max_width; 1984 u8 base_port; 1985 int offset; 1986 int i; 1987 int err; 1988 1989 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 1990 if (!mlxsw_sp_port) { 1991 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 1992 local_port); 1993 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 1994 return -EINVAL; 1995 } 1996 1997 max_width = mlxsw_core_module_max_width(mlxsw_core, 1998 mlxsw_sp_port->mapping.module); 1999 if (max_width < 0) { 2000 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2001 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2002 return max_width; 2003 } 2004 2005 /* Split port with non-max cannot be split. */ 2006 if (mlxsw_sp_port->mapping.width != max_width) { 2007 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); 2008 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); 2009 return -EINVAL; 2010 } 2011 2012 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2013 if (offset < 0) { 2014 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2015 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2016 return -EINVAL; 2017 } 2018 2019 /* Only in case max split is being done, the local port and 2020 * base port may differ. 2021 */ 2022 base_port = count == max_width ? 2023 mlxsw_sp_cluster_base_port_get(local_port, max_width) : 2024 local_port; 2025 2026 for (i = 0; i < count * offset; i++) { 2027 /* Expect base port to exist and also the one in the middle in 2028 * case of maximal split count. 2029 */ 2030 if (i == 0 || (count == max_width && i == count / 2)) 2031 continue; 2032 2033 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { 2034 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2035 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); 2036 return -EINVAL; 2037 } 2038 } 2039 2040 port_mapping = mlxsw_sp_port->mapping; 2041 2042 for (i = 0; i < count; i++) 2043 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2044 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2045 2046 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, 2047 count, offset); 2048 if (err) { 2049 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2050 goto err_port_split_create; 2051 } 2052 2053 return 0; 2054 2055 err_port_split_create: 2056 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2057 return err; 2058 } 2059 2060 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, 2061 struct netlink_ext_ack *extack) 2062 { 2063 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2064 struct mlxsw_sp_port *mlxsw_sp_port; 2065 unsigned int count; 2066 int max_width; 2067 u8 base_port; 2068 int offset; 2069 int i; 2070 2071 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2072 if (!mlxsw_sp_port) { 2073 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2074 local_port); 2075 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2076 return -EINVAL; 2077 } 2078 2079 if (!mlxsw_sp_port->split) { 2080 netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); 2081 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2082 return -EINVAL; 2083 } 2084 2085 max_width = mlxsw_core_module_max_width(mlxsw_core, 2086 mlxsw_sp_port->mapping.module); 2087 if (max_width < 0) { 2088 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); 2089 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); 2090 return max_width; 2091 } 2092 2093 count = max_width / mlxsw_sp_port->mapping.width; 2094 2095 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); 2096 if (WARN_ON(offset < 0)) { 2097 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); 2098 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); 2099 return -EINVAL; 2100 } 2101 2102 base_port = mlxsw_sp_port->split_base_local_port; 2103 2104 for (i = 0; i < count; i++) 2105 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) 2106 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); 2107 2108 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); 2109 2110 return 0; 2111 } 2112 2113 static void 2114 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2115 { 2116 int i; 2117 2118 for (i = 0; i < TC_MAX_QUEUE; i++) 2119 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2120 } 2121 2122 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2123 char *pude_pl, void *priv) 2124 { 2125 struct mlxsw_sp *mlxsw_sp = priv; 2126 struct mlxsw_sp_port *mlxsw_sp_port; 2127 enum mlxsw_reg_pude_oper_status status; 2128 u8 local_port; 2129 2130 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2131 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2132 if (!mlxsw_sp_port) 2133 return; 2134 2135 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2136 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2137 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2138 netif_carrier_on(mlxsw_sp_port->dev); 2139 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2140 } else { 2141 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2142 netif_carrier_off(mlxsw_sp_port->dev); 2143 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2144 } 2145 } 2146 2147 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2148 char *mtpptr_pl, bool ingress) 2149 { 2150 u8 local_port; 2151 u8 num_rec; 2152 int i; 2153 2154 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2155 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2156 for (i = 0; i < num_rec; i++) { 2157 u8 domain_number; 2158 u8 message_type; 2159 u16 sequence_id; 2160 u64 timestamp; 2161 2162 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2163 &domain_number, &sequence_id, 2164 ×tamp); 2165 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2166 message_type, domain_number, 2167 sequence_id, timestamp); 2168 } 2169 } 2170 2171 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2172 char *mtpptr_pl, void *priv) 2173 { 2174 struct mlxsw_sp *mlxsw_sp = priv; 2175 2176 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2177 } 2178 2179 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2180 char *mtpptr_pl, void *priv) 2181 { 2182 struct mlxsw_sp *mlxsw_sp = priv; 2183 2184 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2185 } 2186 2187 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2188 u8 local_port, void *priv) 2189 { 2190 struct mlxsw_sp *mlxsw_sp = priv; 2191 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2192 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2193 2194 if (unlikely(!mlxsw_sp_port)) { 2195 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2196 local_port); 2197 return; 2198 } 2199 2200 skb->dev = mlxsw_sp_port->dev; 2201 2202 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2203 u64_stats_update_begin(&pcpu_stats->syncp); 2204 pcpu_stats->rx_packets++; 2205 pcpu_stats->rx_bytes += skb->len; 2206 u64_stats_update_end(&pcpu_stats->syncp); 2207 2208 skb->protocol = eth_type_trans(skb, skb->dev); 2209 netif_receive_skb(skb); 2210 } 2211 2212 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, 2213 void *priv) 2214 { 2215 skb->offload_fwd_mark = 1; 2216 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2217 } 2218 2219 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2220 u8 local_port, void *priv) 2221 { 2222 skb->offload_l3_fwd_mark = 1; 2223 skb->offload_fwd_mark = 1; 2224 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2225 } 2226 2227 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2228 u8 local_port) 2229 { 2230 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2231 } 2232 2233 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2234 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2235 _is_ctrl, SP_##_trap_group, DISCARD) 2236 2237 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2238 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2239 _is_ctrl, SP_##_trap_group, DISCARD) 2240 2241 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2242 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2243 _is_ctrl, SP_##_trap_group, DISCARD) 2244 2245 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2246 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2247 2248 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2249 /* Events */ 2250 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2251 /* L2 traps */ 2252 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2253 /* L3 traps */ 2254 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2255 false), 2256 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2257 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2258 false), 2259 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2260 ROUTER_EXP, false), 2261 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2262 ROUTER_EXP, false), 2263 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2264 ROUTER_EXP, false), 2265 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2266 ROUTER_EXP, false), 2267 /* Multicast Router Traps */ 2268 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2269 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2270 /* NVE traps */ 2271 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2272 }; 2273 2274 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2275 /* Events */ 2276 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2277 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2278 }; 2279 2280 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2281 { 2282 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2283 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2284 enum mlxsw_reg_qpcr_ir_units ir_units; 2285 int max_cpu_policers; 2286 bool is_bytes; 2287 u8 burst_size; 2288 u32 rate; 2289 int i, err; 2290 2291 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2292 return -EIO; 2293 2294 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2295 2296 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2297 for (i = 0; i < max_cpu_policers; i++) { 2298 is_bytes = false; 2299 switch (i) { 2300 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2301 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2302 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2303 rate = 1024; 2304 burst_size = 7; 2305 break; 2306 default: 2307 continue; 2308 } 2309 2310 __set_bit(i, mlxsw_sp->trap->policers_usage); 2311 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2312 burst_size); 2313 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2314 if (err) 2315 return err; 2316 } 2317 2318 return 0; 2319 } 2320 2321 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2322 { 2323 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2324 enum mlxsw_reg_htgt_trap_group i; 2325 int max_cpu_policers; 2326 int max_trap_groups; 2327 u8 priority, tc; 2328 u16 policer_id; 2329 int err; 2330 2331 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2332 return -EIO; 2333 2334 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2335 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2336 2337 for (i = 0; i < max_trap_groups; i++) { 2338 policer_id = i; 2339 switch (i) { 2340 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2341 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2342 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2343 priority = 1; 2344 tc = 1; 2345 break; 2346 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2347 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2348 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2349 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2350 break; 2351 default: 2352 continue; 2353 } 2354 2355 if (max_cpu_policers <= policer_id && 2356 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2357 return -EIO; 2358 2359 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2360 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2361 if (err) 2362 return err; 2363 } 2364 2365 return 0; 2366 } 2367 2368 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp, 2369 const struct mlxsw_listener listeners[], 2370 size_t listeners_count) 2371 { 2372 int i; 2373 int err; 2374 2375 for (i = 0; i < listeners_count; i++) { 2376 err = mlxsw_core_trap_register(mlxsw_sp->core, 2377 &listeners[i], 2378 mlxsw_sp); 2379 if (err) 2380 goto err_listener_register; 2381 2382 } 2383 return 0; 2384 2385 err_listener_register: 2386 for (i--; i >= 0; i--) { 2387 mlxsw_core_trap_unregister(mlxsw_sp->core, 2388 &listeners[i], 2389 mlxsw_sp); 2390 } 2391 return err; 2392 } 2393 2394 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp, 2395 const struct mlxsw_listener listeners[], 2396 size_t listeners_count) 2397 { 2398 int i; 2399 2400 for (i = 0; i < listeners_count; i++) { 2401 mlxsw_core_trap_unregister(mlxsw_sp->core, 2402 &listeners[i], 2403 mlxsw_sp); 2404 } 2405 } 2406 2407 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2408 { 2409 struct mlxsw_sp_trap *trap; 2410 u64 max_policers; 2411 int err; 2412 2413 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2414 return -EIO; 2415 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2416 trap = kzalloc(struct_size(trap, policers_usage, 2417 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2418 if (!trap) 2419 return -ENOMEM; 2420 trap->max_policers = max_policers; 2421 mlxsw_sp->trap = trap; 2422 2423 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2424 if (err) 2425 goto err_cpu_policers_set; 2426 2427 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2428 if (err) 2429 goto err_trap_groups_set; 2430 2431 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener, 2432 ARRAY_SIZE(mlxsw_sp_listener)); 2433 if (err) 2434 goto err_traps_register; 2435 2436 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners, 2437 mlxsw_sp->listeners_count); 2438 if (err) 2439 goto err_extra_traps_init; 2440 2441 return 0; 2442 2443 err_extra_traps_init: 2444 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2445 ARRAY_SIZE(mlxsw_sp_listener)); 2446 err_traps_register: 2447 err_trap_groups_set: 2448 err_cpu_policers_set: 2449 kfree(trap); 2450 return err; 2451 } 2452 2453 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2454 { 2455 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners, 2456 mlxsw_sp->listeners_count); 2457 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener, 2458 ARRAY_SIZE(mlxsw_sp_listener)); 2459 kfree(mlxsw_sp->trap); 2460 } 2461 2462 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2463 2464 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2465 { 2466 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2467 u32 seed; 2468 int err; 2469 2470 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2471 MLXSW_SP_LAG_SEED_INIT); 2472 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2473 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2474 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2475 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2476 MLXSW_REG_SLCR_LAG_HASH_SIP | 2477 MLXSW_REG_SLCR_LAG_HASH_DIP | 2478 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2479 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2480 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2481 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2482 if (err) 2483 return err; 2484 2485 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2486 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2487 return -EIO; 2488 2489 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2490 sizeof(struct mlxsw_sp_upper), 2491 GFP_KERNEL); 2492 if (!mlxsw_sp->lags) 2493 return -ENOMEM; 2494 2495 return 0; 2496 } 2497 2498 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2499 { 2500 kfree(mlxsw_sp->lags); 2501 } 2502 2503 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) 2504 { 2505 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2506 int err; 2507 2508 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, 2509 MLXSW_REG_HTGT_INVALID_POLICER, 2510 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2511 MLXSW_REG_HTGT_DEFAULT_TC); 2512 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2513 if (err) 2514 return err; 2515 2516 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE, 2517 MLXSW_REG_HTGT_INVALID_POLICER, 2518 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2519 MLXSW_REG_HTGT_DEFAULT_TC); 2520 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2521 if (err) 2522 return err; 2523 2524 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE, 2525 MLXSW_REG_HTGT_INVALID_POLICER, 2526 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2527 MLXSW_REG_HTGT_DEFAULT_TC); 2528 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2529 if (err) 2530 return err; 2531 2532 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE, 2533 MLXSW_REG_HTGT_INVALID_POLICER, 2534 MLXSW_REG_HTGT_DEFAULT_PRIORITY, 2535 MLXSW_REG_HTGT_DEFAULT_TC); 2536 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2537 } 2538 2539 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2540 .clock_init = mlxsw_sp1_ptp_clock_init, 2541 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2542 .init = mlxsw_sp1_ptp_init, 2543 .fini = mlxsw_sp1_ptp_fini, 2544 .receive = mlxsw_sp1_ptp_receive, 2545 .transmitted = mlxsw_sp1_ptp_transmitted, 2546 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2547 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2548 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2549 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2550 .get_stats_count = mlxsw_sp1_get_stats_count, 2551 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2552 .get_stats = mlxsw_sp1_get_stats, 2553 }; 2554 2555 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2556 .clock_init = mlxsw_sp2_ptp_clock_init, 2557 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2558 .init = mlxsw_sp2_ptp_init, 2559 .fini = mlxsw_sp2_ptp_fini, 2560 .receive = mlxsw_sp2_ptp_receive, 2561 .transmitted = mlxsw_sp2_ptp_transmitted, 2562 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2563 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2564 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2565 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2566 .get_stats_count = mlxsw_sp2_get_stats_count, 2567 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2568 .get_stats = mlxsw_sp2_get_stats, 2569 }; 2570 2571 struct mlxsw_sp_sample_trigger_node { 2572 struct mlxsw_sp_sample_trigger trigger; 2573 struct mlxsw_sp_sample_params params; 2574 struct rhash_head ht_node; 2575 struct rcu_head rcu; 2576 refcount_t refcount; 2577 }; 2578 2579 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2580 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2581 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2582 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2583 .automatic_shrinking = true, 2584 }; 2585 2586 static void 2587 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2588 const struct mlxsw_sp_sample_trigger *trigger) 2589 { 2590 memset(key, 0, sizeof(*key)); 2591 key->type = trigger->type; 2592 key->local_port = trigger->local_port; 2593 } 2594 2595 /* RCU read lock must be held */ 2596 struct mlxsw_sp_sample_params * 2597 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2598 const struct mlxsw_sp_sample_trigger *trigger) 2599 { 2600 struct mlxsw_sp_sample_trigger_node *trigger_node; 2601 struct mlxsw_sp_sample_trigger key; 2602 2603 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2604 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2605 mlxsw_sp_sample_trigger_ht_params); 2606 if (!trigger_node) 2607 return NULL; 2608 2609 return &trigger_node->params; 2610 } 2611 2612 static int 2613 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2614 const struct mlxsw_sp_sample_trigger *trigger, 2615 const struct mlxsw_sp_sample_params *params) 2616 { 2617 struct mlxsw_sp_sample_trigger_node *trigger_node; 2618 int err; 2619 2620 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2621 if (!trigger_node) 2622 return -ENOMEM; 2623 2624 trigger_node->trigger = *trigger; 2625 trigger_node->params = *params; 2626 refcount_set(&trigger_node->refcount, 1); 2627 2628 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2629 &trigger_node->ht_node, 2630 mlxsw_sp_sample_trigger_ht_params); 2631 if (err) 2632 goto err_rhashtable_insert; 2633 2634 return 0; 2635 2636 err_rhashtable_insert: 2637 kfree(trigger_node); 2638 return err; 2639 } 2640 2641 static void 2642 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2643 struct mlxsw_sp_sample_trigger_node *trigger_node) 2644 { 2645 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2646 &trigger_node->ht_node, 2647 mlxsw_sp_sample_trigger_ht_params); 2648 kfree_rcu(trigger_node, rcu); 2649 } 2650 2651 int 2652 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2653 const struct mlxsw_sp_sample_trigger *trigger, 2654 const struct mlxsw_sp_sample_params *params, 2655 struct netlink_ext_ack *extack) 2656 { 2657 struct mlxsw_sp_sample_trigger_node *trigger_node; 2658 struct mlxsw_sp_sample_trigger key; 2659 2660 ASSERT_RTNL(); 2661 2662 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2663 2664 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2665 &key, 2666 mlxsw_sp_sample_trigger_ht_params); 2667 if (!trigger_node) 2668 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2669 params); 2670 2671 if (trigger_node->trigger.local_port) { 2672 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2673 return -EINVAL; 2674 } 2675 2676 if (trigger_node->params.psample_group != params->psample_group || 2677 trigger_node->params.truncate != params->truncate || 2678 trigger_node->params.rate != params->rate || 2679 trigger_node->params.trunc_size != params->trunc_size) { 2680 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2681 return -EINVAL; 2682 } 2683 2684 refcount_inc(&trigger_node->refcount); 2685 2686 return 0; 2687 } 2688 2689 void 2690 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2691 const struct mlxsw_sp_sample_trigger *trigger) 2692 { 2693 struct mlxsw_sp_sample_trigger_node *trigger_node; 2694 struct mlxsw_sp_sample_trigger key; 2695 2696 ASSERT_RTNL(); 2697 2698 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2699 2700 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2701 &key, 2702 mlxsw_sp_sample_trigger_ht_params); 2703 if (!trigger_node) 2704 return; 2705 2706 if (!refcount_dec_and_test(&trigger_node->refcount)) 2707 return; 2708 2709 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2710 } 2711 2712 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2713 unsigned long event, void *ptr); 2714 2715 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2716 const struct mlxsw_bus_info *mlxsw_bus_info, 2717 struct netlink_ext_ack *extack) 2718 { 2719 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2720 int err; 2721 2722 mlxsw_sp->core = mlxsw_core; 2723 mlxsw_sp->bus_info = mlxsw_bus_info; 2724 2725 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 2726 2727 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2728 if (err) { 2729 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2730 return err; 2731 } 2732 2733 err = mlxsw_sp_kvdl_init(mlxsw_sp); 2734 if (err) { 2735 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 2736 return err; 2737 } 2738 2739 err = mlxsw_sp_fids_init(mlxsw_sp); 2740 if (err) { 2741 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 2742 goto err_fids_init; 2743 } 2744 2745 err = mlxsw_sp_policers_init(mlxsw_sp); 2746 if (err) { 2747 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 2748 goto err_policers_init; 2749 } 2750 2751 err = mlxsw_sp_traps_init(mlxsw_sp); 2752 if (err) { 2753 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 2754 goto err_traps_init; 2755 } 2756 2757 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 2758 if (err) { 2759 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 2760 goto err_devlink_traps_init; 2761 } 2762 2763 err = mlxsw_sp_buffers_init(mlxsw_sp); 2764 if (err) { 2765 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2766 goto err_buffers_init; 2767 } 2768 2769 err = mlxsw_sp_lag_init(mlxsw_sp); 2770 if (err) { 2771 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2772 goto err_lag_init; 2773 } 2774 2775 /* Initialize SPAN before router and switchdev, so that those components 2776 * can call mlxsw_sp_span_respin(). 2777 */ 2778 err = mlxsw_sp_span_init(mlxsw_sp); 2779 if (err) { 2780 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2781 goto err_span_init; 2782 } 2783 2784 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2785 if (err) { 2786 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2787 goto err_switchdev_init; 2788 } 2789 2790 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 2791 if (err) { 2792 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 2793 goto err_counter_pool_init; 2794 } 2795 2796 err = mlxsw_sp_afa_init(mlxsw_sp); 2797 if (err) { 2798 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 2799 goto err_afa_init; 2800 } 2801 2802 err = mlxsw_sp_nve_init(mlxsw_sp); 2803 if (err) { 2804 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 2805 goto err_nve_init; 2806 } 2807 2808 err = mlxsw_sp_acl_init(mlxsw_sp); 2809 if (err) { 2810 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 2811 goto err_acl_init; 2812 } 2813 2814 err = mlxsw_sp_router_init(mlxsw_sp, extack); 2815 if (err) { 2816 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2817 goto err_router_init; 2818 } 2819 2820 if (mlxsw_sp->bus_info->read_frc_capable) { 2821 /* NULL is a valid return value from clock_init */ 2822 mlxsw_sp->clock = 2823 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 2824 mlxsw_sp->bus_info->dev); 2825 if (IS_ERR(mlxsw_sp->clock)) { 2826 err = PTR_ERR(mlxsw_sp->clock); 2827 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 2828 goto err_ptp_clock_init; 2829 } 2830 } 2831 2832 if (mlxsw_sp->clock) { 2833 /* NULL is a valid return value from ptp_ops->init */ 2834 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 2835 if (IS_ERR(mlxsw_sp->ptp_state)) { 2836 err = PTR_ERR(mlxsw_sp->ptp_state); 2837 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 2838 goto err_ptp_init; 2839 } 2840 } 2841 2842 /* Initialize netdevice notifier after router and SPAN is initialized, 2843 * so that the event handler can use router structures and call SPAN 2844 * respin. 2845 */ 2846 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 2847 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2848 &mlxsw_sp->netdevice_nb); 2849 if (err) { 2850 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 2851 goto err_netdev_notifier; 2852 } 2853 2854 err = mlxsw_sp_dpipe_init(mlxsw_sp); 2855 if (err) { 2856 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 2857 goto err_dpipe_init; 2858 } 2859 2860 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 2861 if (err) { 2862 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 2863 goto err_port_module_info_init; 2864 } 2865 2866 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 2867 &mlxsw_sp_sample_trigger_ht_params); 2868 if (err) { 2869 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 2870 goto err_sample_trigger_init; 2871 } 2872 2873 err = mlxsw_sp_ports_create(mlxsw_sp); 2874 if (err) { 2875 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2876 goto err_ports_create; 2877 } 2878 2879 return 0; 2880 2881 err_ports_create: 2882 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 2883 err_sample_trigger_init: 2884 mlxsw_sp_port_module_info_fini(mlxsw_sp); 2885 err_port_module_info_init: 2886 mlxsw_sp_dpipe_fini(mlxsw_sp); 2887 err_dpipe_init: 2888 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2889 &mlxsw_sp->netdevice_nb); 2890 err_netdev_notifier: 2891 if (mlxsw_sp->clock) 2892 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 2893 err_ptp_init: 2894 if (mlxsw_sp->clock) 2895 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 2896 err_ptp_clock_init: 2897 mlxsw_sp_router_fini(mlxsw_sp); 2898 err_router_init: 2899 mlxsw_sp_acl_fini(mlxsw_sp); 2900 err_acl_init: 2901 mlxsw_sp_nve_fini(mlxsw_sp); 2902 err_nve_init: 2903 mlxsw_sp_afa_fini(mlxsw_sp); 2904 err_afa_init: 2905 mlxsw_sp_counter_pool_fini(mlxsw_sp); 2906 err_counter_pool_init: 2907 mlxsw_sp_switchdev_fini(mlxsw_sp); 2908 err_switchdev_init: 2909 mlxsw_sp_span_fini(mlxsw_sp); 2910 err_span_init: 2911 mlxsw_sp_lag_fini(mlxsw_sp); 2912 err_lag_init: 2913 mlxsw_sp_buffers_fini(mlxsw_sp); 2914 err_buffers_init: 2915 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 2916 err_devlink_traps_init: 2917 mlxsw_sp_traps_fini(mlxsw_sp); 2918 err_traps_init: 2919 mlxsw_sp_policers_fini(mlxsw_sp); 2920 err_policers_init: 2921 mlxsw_sp_fids_fini(mlxsw_sp); 2922 err_fids_init: 2923 mlxsw_sp_kvdl_fini(mlxsw_sp); 2924 return err; 2925 } 2926 2927 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 2928 const struct mlxsw_bus_info *mlxsw_bus_info, 2929 struct netlink_ext_ack *extack) 2930 { 2931 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2932 2933 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 2934 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 2935 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 2936 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 2937 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 2938 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 2939 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 2940 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 2941 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 2942 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 2943 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 2944 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 2945 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 2946 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 2947 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 2948 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 2949 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 2950 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 2951 mlxsw_sp->listeners = mlxsw_sp1_listener; 2952 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 2953 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 2954 2955 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 2956 } 2957 2958 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 2959 const struct mlxsw_bus_info *mlxsw_bus_info, 2960 struct netlink_ext_ack *extack) 2961 { 2962 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2963 2964 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 2965 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 2966 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 2967 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 2968 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 2969 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 2970 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 2971 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 2972 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 2973 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 2974 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 2975 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 2976 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 2977 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 2978 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 2979 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 2980 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 2981 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 2982 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 2983 2984 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 2985 } 2986 2987 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 2988 const struct mlxsw_bus_info *mlxsw_bus_info, 2989 struct netlink_ext_ack *extack) 2990 { 2991 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2992 2993 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 2994 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 2995 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 2996 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 2997 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 2998 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 2999 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3000 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3001 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3002 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3003 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3004 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3005 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3006 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3007 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3008 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3009 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3010 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3011 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3012 3013 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3014 } 3015 3016 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3017 { 3018 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3019 3020 mlxsw_sp_ports_remove(mlxsw_sp); 3021 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3022 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3023 mlxsw_sp_dpipe_fini(mlxsw_sp); 3024 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3025 &mlxsw_sp->netdevice_nb); 3026 if (mlxsw_sp->clock) { 3027 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3028 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3029 } 3030 mlxsw_sp_router_fini(mlxsw_sp); 3031 mlxsw_sp_acl_fini(mlxsw_sp); 3032 mlxsw_sp_nve_fini(mlxsw_sp); 3033 mlxsw_sp_afa_fini(mlxsw_sp); 3034 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3035 mlxsw_sp_switchdev_fini(mlxsw_sp); 3036 mlxsw_sp_span_fini(mlxsw_sp); 3037 mlxsw_sp_lag_fini(mlxsw_sp); 3038 mlxsw_sp_buffers_fini(mlxsw_sp); 3039 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3040 mlxsw_sp_traps_fini(mlxsw_sp); 3041 mlxsw_sp_policers_fini(mlxsw_sp); 3042 mlxsw_sp_fids_fini(mlxsw_sp); 3043 mlxsw_sp_kvdl_fini(mlxsw_sp); 3044 } 3045 3046 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3047 * 802.1Q FIDs 3048 */ 3049 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3050 VLAN_VID_MASK - 1) 3051 3052 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3053 .used_max_mid = 1, 3054 .max_mid = MLXSW_SP_MID_MAX, 3055 .used_flood_tables = 1, 3056 .used_flood_mode = 1, 3057 .flood_mode = 3, 3058 .max_fid_flood_tables = 3, 3059 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3060 .used_max_ib_mc = 1, 3061 .max_ib_mc = 0, 3062 .used_max_pkey = 1, 3063 .max_pkey = 0, 3064 .used_kvd_sizes = 1, 3065 .kvd_hash_single_parts = 59, 3066 .kvd_hash_double_parts = 41, 3067 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3068 .swid_config = { 3069 { 3070 .used_type = 1, 3071 .type = MLXSW_PORT_SWID_TYPE_ETH, 3072 } 3073 }, 3074 }; 3075 3076 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3077 .used_max_mid = 1, 3078 .max_mid = MLXSW_SP_MID_MAX, 3079 .used_flood_tables = 1, 3080 .used_flood_mode = 1, 3081 .flood_mode = 3, 3082 .max_fid_flood_tables = 3, 3083 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3084 .used_max_ib_mc = 1, 3085 .max_ib_mc = 0, 3086 .used_max_pkey = 1, 3087 .max_pkey = 0, 3088 .used_kvh_xlt_cache_mode = 1, 3089 .kvh_xlt_cache_mode = 1, 3090 .swid_config = { 3091 { 3092 .used_type = 1, 3093 .type = MLXSW_PORT_SWID_TYPE_ETH, 3094 } 3095 }, 3096 }; 3097 3098 static void 3099 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3100 struct devlink_resource_size_params *kvd_size_params, 3101 struct devlink_resource_size_params *linear_size_params, 3102 struct devlink_resource_size_params *hash_double_size_params, 3103 struct devlink_resource_size_params *hash_single_size_params) 3104 { 3105 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3106 KVD_SINGLE_MIN_SIZE); 3107 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3108 KVD_DOUBLE_MIN_SIZE); 3109 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3110 u32 linear_size_min = 0; 3111 3112 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3113 MLXSW_SP_KVD_GRANULARITY, 3114 DEVLINK_RESOURCE_UNIT_ENTRY); 3115 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3116 kvd_size - single_size_min - 3117 double_size_min, 3118 MLXSW_SP_KVD_GRANULARITY, 3119 DEVLINK_RESOURCE_UNIT_ENTRY); 3120 devlink_resource_size_params_init(hash_double_size_params, 3121 double_size_min, 3122 kvd_size - single_size_min - 3123 linear_size_min, 3124 MLXSW_SP_KVD_GRANULARITY, 3125 DEVLINK_RESOURCE_UNIT_ENTRY); 3126 devlink_resource_size_params_init(hash_single_size_params, 3127 single_size_min, 3128 kvd_size - double_size_min - 3129 linear_size_min, 3130 MLXSW_SP_KVD_GRANULARITY, 3131 DEVLINK_RESOURCE_UNIT_ENTRY); 3132 } 3133 3134 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3135 { 3136 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3137 struct devlink_resource_size_params hash_single_size_params; 3138 struct devlink_resource_size_params hash_double_size_params; 3139 struct devlink_resource_size_params linear_size_params; 3140 struct devlink_resource_size_params kvd_size_params; 3141 u32 kvd_size, single_size, double_size, linear_size; 3142 const struct mlxsw_config_profile *profile; 3143 int err; 3144 3145 profile = &mlxsw_sp1_config_profile; 3146 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3147 return -EIO; 3148 3149 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3150 &linear_size_params, 3151 &hash_double_size_params, 3152 &hash_single_size_params); 3153 3154 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3155 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3156 kvd_size, MLXSW_SP_RESOURCE_KVD, 3157 DEVLINK_RESOURCE_ID_PARENT_TOP, 3158 &kvd_size_params); 3159 if (err) 3160 return err; 3161 3162 linear_size = profile->kvd_linear_size; 3163 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3164 linear_size, 3165 MLXSW_SP_RESOURCE_KVD_LINEAR, 3166 MLXSW_SP_RESOURCE_KVD, 3167 &linear_size_params); 3168 if (err) 3169 return err; 3170 3171 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3172 if (err) 3173 return err; 3174 3175 double_size = kvd_size - linear_size; 3176 double_size *= profile->kvd_hash_double_parts; 3177 double_size /= profile->kvd_hash_double_parts + 3178 profile->kvd_hash_single_parts; 3179 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3180 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3181 double_size, 3182 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3183 MLXSW_SP_RESOURCE_KVD, 3184 &hash_double_size_params); 3185 if (err) 3186 return err; 3187 3188 single_size = kvd_size - double_size - linear_size; 3189 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3190 single_size, 3191 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3192 MLXSW_SP_RESOURCE_KVD, 3193 &hash_single_size_params); 3194 if (err) 3195 return err; 3196 3197 return 0; 3198 } 3199 3200 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3201 { 3202 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3203 struct devlink_resource_size_params kvd_size_params; 3204 u32 kvd_size; 3205 3206 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3207 return -EIO; 3208 3209 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3210 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3211 MLXSW_SP_KVD_GRANULARITY, 3212 DEVLINK_RESOURCE_UNIT_ENTRY); 3213 3214 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3215 kvd_size, MLXSW_SP_RESOURCE_KVD, 3216 DEVLINK_RESOURCE_ID_PARENT_TOP, 3217 &kvd_size_params); 3218 } 3219 3220 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3221 { 3222 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3223 struct devlink_resource_size_params span_size_params; 3224 u32 max_span; 3225 3226 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3227 return -EIO; 3228 3229 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3230 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3231 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3232 3233 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3234 max_span, MLXSW_SP_RESOURCE_SPAN, 3235 DEVLINK_RESOURCE_ID_PARENT_TOP, 3236 &span_size_params); 3237 } 3238 3239 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3240 { 3241 int err; 3242 3243 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3244 if (err) 3245 return err; 3246 3247 err = mlxsw_sp_resources_span_register(mlxsw_core); 3248 if (err) 3249 goto err_resources_span_register; 3250 3251 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3252 if (err) 3253 goto err_resources_counter_register; 3254 3255 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3256 if (err) 3257 goto err_resources_counter_register; 3258 3259 return 0; 3260 3261 err_resources_counter_register: 3262 err_resources_span_register: 3263 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3264 return err; 3265 } 3266 3267 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3268 { 3269 int err; 3270 3271 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3272 if (err) 3273 return err; 3274 3275 err = mlxsw_sp_resources_span_register(mlxsw_core); 3276 if (err) 3277 goto err_resources_span_register; 3278 3279 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3280 if (err) 3281 goto err_resources_counter_register; 3282 3283 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3284 if (err) 3285 goto err_resources_counter_register; 3286 3287 return 0; 3288 3289 err_resources_counter_register: 3290 err_resources_span_register: 3291 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); 3292 return err; 3293 } 3294 3295 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3296 const struct mlxsw_config_profile *profile, 3297 u64 *p_single_size, u64 *p_double_size, 3298 u64 *p_linear_size) 3299 { 3300 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3301 u32 double_size; 3302 int err; 3303 3304 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3305 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3306 return -EIO; 3307 3308 /* The hash part is what left of the kvd without the 3309 * linear part. It is split to the single size and 3310 * double size by the parts ratio from the profile. 3311 * Both sizes must be a multiplications of the 3312 * granularity from the profile. In case the user 3313 * provided the sizes they are obtained via devlink. 3314 */ 3315 err = devlink_resource_size_get(devlink, 3316 MLXSW_SP_RESOURCE_KVD_LINEAR, 3317 p_linear_size); 3318 if (err) 3319 *p_linear_size = profile->kvd_linear_size; 3320 3321 err = devlink_resource_size_get(devlink, 3322 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3323 p_double_size); 3324 if (err) { 3325 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3326 *p_linear_size; 3327 double_size *= profile->kvd_hash_double_parts; 3328 double_size /= profile->kvd_hash_double_parts + 3329 profile->kvd_hash_single_parts; 3330 *p_double_size = rounddown(double_size, 3331 MLXSW_SP_KVD_GRANULARITY); 3332 } 3333 3334 err = devlink_resource_size_get(devlink, 3335 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3336 p_single_size); 3337 if (err) 3338 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3339 *p_double_size - *p_linear_size; 3340 3341 /* Check results are legal. */ 3342 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3343 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3344 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3345 return -EIO; 3346 3347 return 0; 3348 } 3349 3350 static int 3351 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3352 struct devlink_param_gset_ctx *ctx) 3353 { 3354 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3355 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3356 3357 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3358 return 0; 3359 } 3360 3361 static int 3362 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3363 struct devlink_param_gset_ctx *ctx) 3364 { 3365 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3366 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3367 3368 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3369 } 3370 3371 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3372 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3373 "acl_region_rehash_interval", 3374 DEVLINK_PARAM_TYPE_U32, 3375 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3376 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3377 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3378 NULL), 3379 }; 3380 3381 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3382 { 3383 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3384 union devlink_param_value value; 3385 int err; 3386 3387 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3388 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3389 if (err) 3390 return err; 3391 3392 value.vu32 = 0; 3393 devlink_param_driverinit_value_set(devlink, 3394 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3395 value); 3396 return 0; 3397 } 3398 3399 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3400 { 3401 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3402 mlxsw_sp2_devlink_params, 3403 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3404 } 3405 3406 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3407 struct sk_buff *skb, u8 local_port) 3408 { 3409 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3410 3411 skb_pull(skb, MLXSW_TXHDR_LEN); 3412 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3413 } 3414 3415 static struct mlxsw_driver mlxsw_sp1_driver = { 3416 .kind = mlxsw_sp1_driver_name, 3417 .priv_size = sizeof(struct mlxsw_sp), 3418 .fw_req_rev = &mlxsw_sp1_fw_rev, 3419 .fw_filename = MLXSW_SP1_FW_FILENAME, 3420 .init = mlxsw_sp1_init, 3421 .fini = mlxsw_sp_fini, 3422 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3423 .port_split = mlxsw_sp_port_split, 3424 .port_unsplit = mlxsw_sp_port_unsplit, 3425 .sb_pool_get = mlxsw_sp_sb_pool_get, 3426 .sb_pool_set = mlxsw_sp_sb_pool_set, 3427 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3428 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3429 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3430 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3431 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3432 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3433 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3434 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3435 .trap_init = mlxsw_sp_trap_init, 3436 .trap_fini = mlxsw_sp_trap_fini, 3437 .trap_action_set = mlxsw_sp_trap_action_set, 3438 .trap_group_init = mlxsw_sp_trap_group_init, 3439 .trap_group_set = mlxsw_sp_trap_group_set, 3440 .trap_policer_init = mlxsw_sp_trap_policer_init, 3441 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3442 .trap_policer_set = mlxsw_sp_trap_policer_set, 3443 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3444 .txhdr_construct = mlxsw_sp_txhdr_construct, 3445 .resources_register = mlxsw_sp1_resources_register, 3446 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3447 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3448 .txhdr_len = MLXSW_TXHDR_LEN, 3449 .profile = &mlxsw_sp1_config_profile, 3450 .res_query_enabled = true, 3451 .fw_fatal_enabled = true, 3452 .temp_warn_enabled = true, 3453 }; 3454 3455 static struct mlxsw_driver mlxsw_sp2_driver = { 3456 .kind = mlxsw_sp2_driver_name, 3457 .priv_size = sizeof(struct mlxsw_sp), 3458 .fw_req_rev = &mlxsw_sp2_fw_rev, 3459 .fw_filename = MLXSW_SP2_FW_FILENAME, 3460 .init = mlxsw_sp2_init, 3461 .fini = mlxsw_sp_fini, 3462 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3463 .port_split = mlxsw_sp_port_split, 3464 .port_unsplit = mlxsw_sp_port_unsplit, 3465 .sb_pool_get = mlxsw_sp_sb_pool_get, 3466 .sb_pool_set = mlxsw_sp_sb_pool_set, 3467 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3468 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3469 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3470 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3471 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3472 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3473 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3474 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3475 .trap_init = mlxsw_sp_trap_init, 3476 .trap_fini = mlxsw_sp_trap_fini, 3477 .trap_action_set = mlxsw_sp_trap_action_set, 3478 .trap_group_init = mlxsw_sp_trap_group_init, 3479 .trap_group_set = mlxsw_sp_trap_group_set, 3480 .trap_policer_init = mlxsw_sp_trap_policer_init, 3481 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3482 .trap_policer_set = mlxsw_sp_trap_policer_set, 3483 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3484 .txhdr_construct = mlxsw_sp_txhdr_construct, 3485 .resources_register = mlxsw_sp2_resources_register, 3486 .params_register = mlxsw_sp2_params_register, 3487 .params_unregister = mlxsw_sp2_params_unregister, 3488 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3489 .txhdr_len = MLXSW_TXHDR_LEN, 3490 .profile = &mlxsw_sp2_config_profile, 3491 .res_query_enabled = true, 3492 .fw_fatal_enabled = true, 3493 .temp_warn_enabled = true, 3494 }; 3495 3496 static struct mlxsw_driver mlxsw_sp3_driver = { 3497 .kind = mlxsw_sp3_driver_name, 3498 .priv_size = sizeof(struct mlxsw_sp), 3499 .fw_req_rev = &mlxsw_sp3_fw_rev, 3500 .fw_filename = MLXSW_SP3_FW_FILENAME, 3501 .init = mlxsw_sp3_init, 3502 .fini = mlxsw_sp_fini, 3503 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, 3504 .port_split = mlxsw_sp_port_split, 3505 .port_unsplit = mlxsw_sp_port_unsplit, 3506 .sb_pool_get = mlxsw_sp_sb_pool_get, 3507 .sb_pool_set = mlxsw_sp_sb_pool_set, 3508 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3509 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3510 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3511 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3512 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3513 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3514 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3515 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3516 .trap_init = mlxsw_sp_trap_init, 3517 .trap_fini = mlxsw_sp_trap_fini, 3518 .trap_action_set = mlxsw_sp_trap_action_set, 3519 .trap_group_init = mlxsw_sp_trap_group_init, 3520 .trap_group_set = mlxsw_sp_trap_group_set, 3521 .trap_policer_init = mlxsw_sp_trap_policer_init, 3522 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3523 .trap_policer_set = mlxsw_sp_trap_policer_set, 3524 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3525 .txhdr_construct = mlxsw_sp_txhdr_construct, 3526 .resources_register = mlxsw_sp2_resources_register, 3527 .params_register = mlxsw_sp2_params_register, 3528 .params_unregister = mlxsw_sp2_params_unregister, 3529 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3530 .txhdr_len = MLXSW_TXHDR_LEN, 3531 .profile = &mlxsw_sp2_config_profile, 3532 .res_query_enabled = true, 3533 .fw_fatal_enabled = true, 3534 .temp_warn_enabled = true, 3535 }; 3536 3537 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3538 { 3539 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3540 } 3541 3542 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 3543 struct netdev_nested_priv *priv) 3544 { 3545 int ret = 0; 3546 3547 if (mlxsw_sp_port_dev_check(lower_dev)) { 3548 priv->data = (void *)netdev_priv(lower_dev); 3549 ret = 1; 3550 } 3551 3552 return ret; 3553 } 3554 3555 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3556 { 3557 struct netdev_nested_priv priv = { 3558 .data = NULL, 3559 }; 3560 3561 if (mlxsw_sp_port_dev_check(dev)) 3562 return netdev_priv(dev); 3563 3564 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 3565 3566 return (struct mlxsw_sp_port *)priv.data; 3567 } 3568 3569 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3570 { 3571 struct mlxsw_sp_port *mlxsw_sp_port; 3572 3573 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3574 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3575 } 3576 3577 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3578 { 3579 struct netdev_nested_priv priv = { 3580 .data = NULL, 3581 }; 3582 3583 if (mlxsw_sp_port_dev_check(dev)) 3584 return netdev_priv(dev); 3585 3586 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3587 &priv); 3588 3589 return (struct mlxsw_sp_port *)priv.data; 3590 } 3591 3592 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3593 { 3594 struct mlxsw_sp_port *mlxsw_sp_port; 3595 3596 rcu_read_lock(); 3597 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3598 if (mlxsw_sp_port) 3599 dev_hold(mlxsw_sp_port->dev); 3600 rcu_read_unlock(); 3601 return mlxsw_sp_port; 3602 } 3603 3604 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3605 { 3606 dev_put(mlxsw_sp_port->dev); 3607 } 3608 3609 static void 3610 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 3611 struct net_device *lag_dev) 3612 { 3613 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 3614 struct net_device *upper_dev; 3615 struct list_head *iter; 3616 3617 if (netif_is_bridge_port(lag_dev)) 3618 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 3619 3620 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 3621 if (!netif_is_bridge_port(upper_dev)) 3622 continue; 3623 br_dev = netdev_master_upper_dev_get(upper_dev); 3624 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 3625 } 3626 } 3627 3628 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3629 { 3630 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3631 3632 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3633 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3634 } 3635 3636 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3637 { 3638 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3639 3640 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3641 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3642 } 3643 3644 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3645 u16 lag_id, u8 port_index) 3646 { 3647 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3648 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3649 3650 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3651 lag_id, port_index); 3652 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3653 } 3654 3655 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3656 u16 lag_id) 3657 { 3658 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3659 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3660 3661 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3662 lag_id); 3663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3664 } 3665 3666 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3667 u16 lag_id) 3668 { 3669 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3670 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3671 3672 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3673 lag_id); 3674 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3675 } 3676 3677 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3678 u16 lag_id) 3679 { 3680 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3681 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3682 3683 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3684 lag_id); 3685 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3686 } 3687 3688 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3689 struct net_device *lag_dev, 3690 u16 *p_lag_id) 3691 { 3692 struct mlxsw_sp_upper *lag; 3693 int free_lag_id = -1; 3694 u64 max_lag; 3695 int i; 3696 3697 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3698 for (i = 0; i < max_lag; i++) { 3699 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3700 if (lag->ref_count) { 3701 if (lag->dev == lag_dev) { 3702 *p_lag_id = i; 3703 return 0; 3704 } 3705 } else if (free_lag_id < 0) { 3706 free_lag_id = i; 3707 } 3708 } 3709 if (free_lag_id < 0) 3710 return -EBUSY; 3711 *p_lag_id = free_lag_id; 3712 return 0; 3713 } 3714 3715 static bool 3716 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3717 struct net_device *lag_dev, 3718 struct netdev_lag_upper_info *lag_upper_info, 3719 struct netlink_ext_ack *extack) 3720 { 3721 u16 lag_id; 3722 3723 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 3724 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 3725 return false; 3726 } 3727 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 3728 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 3729 return false; 3730 } 3731 return true; 3732 } 3733 3734 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3735 u16 lag_id, u8 *p_port_index) 3736 { 3737 u64 max_lag_members; 3738 int i; 3739 3740 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 3741 MAX_LAG_MEMBERS); 3742 for (i = 0; i < max_lag_members; i++) { 3743 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 3744 *p_port_index = i; 3745 return 0; 3746 } 3747 } 3748 return -EBUSY; 3749 } 3750 3751 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3752 struct net_device *lag_dev, 3753 struct netlink_ext_ack *extack) 3754 { 3755 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3756 struct mlxsw_sp_upper *lag; 3757 u16 lag_id; 3758 u8 port_index; 3759 int err; 3760 3761 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 3762 if (err) 3763 return err; 3764 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3765 if (!lag->ref_count) { 3766 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 3767 if (err) 3768 return err; 3769 lag->dev = lag_dev; 3770 } 3771 3772 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 3773 if (err) 3774 return err; 3775 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 3776 if (err) 3777 goto err_col_port_add; 3778 3779 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 3780 mlxsw_sp_port->local_port); 3781 mlxsw_sp_port->lag_id = lag_id; 3782 mlxsw_sp_port->lagged = 1; 3783 lag->ref_count++; 3784 3785 /* Port is no longer usable as a router interface */ 3786 if (mlxsw_sp_port->default_vlan->fid) 3787 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 3788 3789 /* Join a router interface configured on the LAG, if exists */ 3790 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 3791 lag_dev, extack); 3792 if (err) 3793 goto err_router_join; 3794 3795 return 0; 3796 3797 err_router_join: 3798 lag->ref_count--; 3799 mlxsw_sp_port->lagged = 0; 3800 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3801 mlxsw_sp_port->local_port); 3802 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3803 err_col_port_add: 3804 if (!lag->ref_count) 3805 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3806 return err; 3807 } 3808 3809 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 3810 struct net_device *lag_dev) 3811 { 3812 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3813 u16 lag_id = mlxsw_sp_port->lag_id; 3814 struct mlxsw_sp_upper *lag; 3815 3816 if (!mlxsw_sp_port->lagged) 3817 return; 3818 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 3819 WARN_ON(lag->ref_count == 0); 3820 3821 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 3822 3823 /* Any VLANs configured on the port are no longer valid */ 3824 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 3825 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 3826 /* Make the LAG and its directly linked uppers leave bridges they 3827 * are memeber in 3828 */ 3829 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 3830 3831 if (lag->ref_count == 1) 3832 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 3833 3834 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 3835 mlxsw_sp_port->local_port); 3836 mlxsw_sp_port->lagged = 0; 3837 lag->ref_count--; 3838 3839 /* Make sure untagged frames are allowed to ingress */ 3840 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 3841 ETH_P_8021Q); 3842 } 3843 3844 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3845 u16 lag_id) 3846 { 3847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3848 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3849 3850 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 3851 mlxsw_sp_port->local_port); 3852 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3853 } 3854 3855 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3856 u16 lag_id) 3857 { 3858 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3859 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3860 3861 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 3862 mlxsw_sp_port->local_port); 3863 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3864 } 3865 3866 static int 3867 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 3868 { 3869 int err; 3870 3871 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 3872 mlxsw_sp_port->lag_id); 3873 if (err) 3874 return err; 3875 3876 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3877 if (err) 3878 goto err_dist_port_add; 3879 3880 return 0; 3881 3882 err_dist_port_add: 3883 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3884 return err; 3885 } 3886 3887 static int 3888 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 3889 { 3890 int err; 3891 3892 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 3893 mlxsw_sp_port->lag_id); 3894 if (err) 3895 return err; 3896 3897 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 3898 mlxsw_sp_port->lag_id); 3899 if (err) 3900 goto err_col_port_disable; 3901 3902 return 0; 3903 3904 err_col_port_disable: 3905 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 3906 return err; 3907 } 3908 3909 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 3910 struct netdev_lag_lower_state_info *info) 3911 { 3912 if (info->tx_enabled) 3913 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 3914 else 3915 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 3916 } 3917 3918 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 3919 bool enable) 3920 { 3921 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3922 enum mlxsw_reg_spms_state spms_state; 3923 char *spms_pl; 3924 u16 vid; 3925 int err; 3926 3927 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 3928 MLXSW_REG_SPMS_STATE_DISCARDING; 3929 3930 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 3931 if (!spms_pl) 3932 return -ENOMEM; 3933 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 3934 3935 for (vid = 0; vid < VLAN_N_VID; vid++) 3936 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 3937 3938 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 3939 kfree(spms_pl); 3940 return err; 3941 } 3942 3943 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 3944 { 3945 u16 vid = 1; 3946 int err; 3947 3948 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 3949 if (err) 3950 return err; 3951 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 3952 if (err) 3953 goto err_port_stp_set; 3954 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 3955 true, false); 3956 if (err) 3957 goto err_port_vlan_set; 3958 3959 for (; vid <= VLAN_N_VID - 1; vid++) { 3960 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 3961 vid, false); 3962 if (err) 3963 goto err_vid_learning_set; 3964 } 3965 3966 return 0; 3967 3968 err_vid_learning_set: 3969 for (vid--; vid >= 1; vid--) 3970 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 3971 err_port_vlan_set: 3972 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 3973 err_port_stp_set: 3974 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 3975 return err; 3976 } 3977 3978 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 3979 { 3980 u16 vid; 3981 3982 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 3983 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 3984 vid, true); 3985 3986 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 3987 false, false); 3988 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 3989 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 3990 } 3991 3992 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 3993 { 3994 unsigned int num_vxlans = 0; 3995 struct net_device *dev; 3996 struct list_head *iter; 3997 3998 netdev_for_each_lower_dev(br_dev, dev, iter) { 3999 if (netif_is_vxlan(dev)) 4000 num_vxlans++; 4001 } 4002 4003 return num_vxlans > 1; 4004 } 4005 4006 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4007 { 4008 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4009 struct net_device *dev; 4010 struct list_head *iter; 4011 4012 netdev_for_each_lower_dev(br_dev, dev, iter) { 4013 u16 pvid; 4014 int err; 4015 4016 if (!netif_is_vxlan(dev)) 4017 continue; 4018 4019 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4020 if (err || !pvid) 4021 continue; 4022 4023 if (test_and_set_bit(pvid, vlans)) 4024 return false; 4025 } 4026 4027 return true; 4028 } 4029 4030 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4031 struct netlink_ext_ack *extack) 4032 { 4033 if (br_multicast_enabled(br_dev)) { 4034 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4035 return false; 4036 } 4037 4038 if (!br_vlan_enabled(br_dev) && 4039 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4040 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4041 return false; 4042 } 4043 4044 if (br_vlan_enabled(br_dev) && 4045 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4046 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4047 return false; 4048 } 4049 4050 return true; 4051 } 4052 4053 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4054 struct net_device *dev, 4055 unsigned long event, void *ptr) 4056 { 4057 struct netdev_notifier_changeupper_info *info; 4058 struct mlxsw_sp_port *mlxsw_sp_port; 4059 struct netlink_ext_ack *extack; 4060 struct net_device *upper_dev; 4061 struct mlxsw_sp *mlxsw_sp; 4062 int err = 0; 4063 u16 proto; 4064 4065 mlxsw_sp_port = netdev_priv(dev); 4066 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4067 info = ptr; 4068 extack = netdev_notifier_info_to_extack(&info->info); 4069 4070 switch (event) { 4071 case NETDEV_PRECHANGEUPPER: 4072 upper_dev = info->upper_dev; 4073 if (!is_vlan_dev(upper_dev) && 4074 !netif_is_lag_master(upper_dev) && 4075 !netif_is_bridge_master(upper_dev) && 4076 !netif_is_ovs_master(upper_dev) && 4077 !netif_is_macvlan(upper_dev)) { 4078 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4079 return -EINVAL; 4080 } 4081 if (!info->linking) 4082 break; 4083 if (netif_is_bridge_master(upper_dev) && 4084 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4085 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4086 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4087 return -EOPNOTSUPP; 4088 if (netdev_has_any_upper_dev(upper_dev) && 4089 (!netif_is_bridge_master(upper_dev) || 4090 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4091 upper_dev))) { 4092 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4093 return -EINVAL; 4094 } 4095 if (netif_is_lag_master(upper_dev) && 4096 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4097 info->upper_info, extack)) 4098 return -EINVAL; 4099 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4100 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4101 return -EINVAL; 4102 } 4103 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4104 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4105 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4106 return -EINVAL; 4107 } 4108 if (netif_is_macvlan(upper_dev) && 4109 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4110 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4111 return -EOPNOTSUPP; 4112 } 4113 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4114 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4115 return -EINVAL; 4116 } 4117 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4118 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4119 return -EINVAL; 4120 } 4121 if (netif_is_bridge_master(upper_dev)) { 4122 br_vlan_get_proto(upper_dev, &proto); 4123 if (br_vlan_enabled(upper_dev) && 4124 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4125 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4126 return -EOPNOTSUPP; 4127 } 4128 if (vlan_uses_dev(lower_dev) && 4129 br_vlan_enabled(upper_dev) && 4130 proto == ETH_P_8021AD) { 4131 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4132 return -EOPNOTSUPP; 4133 } 4134 } 4135 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4136 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4137 4138 if (br_vlan_enabled(br_dev)) { 4139 br_vlan_get_proto(br_dev, &proto); 4140 if (proto == ETH_P_8021AD) { 4141 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4142 return -EOPNOTSUPP; 4143 } 4144 } 4145 } 4146 if (is_vlan_dev(upper_dev) && 4147 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4148 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4149 return -EOPNOTSUPP; 4150 } 4151 break; 4152 case NETDEV_CHANGEUPPER: 4153 upper_dev = info->upper_dev; 4154 if (netif_is_bridge_master(upper_dev)) { 4155 if (info->linking) 4156 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4157 lower_dev, 4158 upper_dev, 4159 extack); 4160 else 4161 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4162 lower_dev, 4163 upper_dev); 4164 } else if (netif_is_lag_master(upper_dev)) { 4165 if (info->linking) { 4166 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4167 upper_dev, extack); 4168 } else { 4169 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4170 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4171 upper_dev); 4172 } 4173 } else if (netif_is_ovs_master(upper_dev)) { 4174 if (info->linking) 4175 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4176 else 4177 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4178 } else if (netif_is_macvlan(upper_dev)) { 4179 if (!info->linking) 4180 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4181 } else if (is_vlan_dev(upper_dev)) { 4182 struct net_device *br_dev; 4183 4184 if (!netif_is_bridge_port(upper_dev)) 4185 break; 4186 if (info->linking) 4187 break; 4188 br_dev = netdev_master_upper_dev_get(upper_dev); 4189 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4190 br_dev); 4191 } 4192 break; 4193 } 4194 4195 return err; 4196 } 4197 4198 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4199 unsigned long event, void *ptr) 4200 { 4201 struct netdev_notifier_changelowerstate_info *info; 4202 struct mlxsw_sp_port *mlxsw_sp_port; 4203 int err; 4204 4205 mlxsw_sp_port = netdev_priv(dev); 4206 info = ptr; 4207 4208 switch (event) { 4209 case NETDEV_CHANGELOWERSTATE: 4210 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4211 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4212 info->lower_state_info); 4213 if (err) 4214 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4215 } 4216 break; 4217 } 4218 4219 return 0; 4220 } 4221 4222 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4223 struct net_device *port_dev, 4224 unsigned long event, void *ptr) 4225 { 4226 switch (event) { 4227 case NETDEV_PRECHANGEUPPER: 4228 case NETDEV_CHANGEUPPER: 4229 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4230 event, ptr); 4231 case NETDEV_CHANGELOWERSTATE: 4232 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4233 ptr); 4234 } 4235 4236 return 0; 4237 } 4238 4239 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4240 unsigned long event, void *ptr) 4241 { 4242 struct net_device *dev; 4243 struct list_head *iter; 4244 int ret; 4245 4246 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4247 if (mlxsw_sp_port_dev_check(dev)) { 4248 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4249 ptr); 4250 if (ret) 4251 return ret; 4252 } 4253 } 4254 4255 return 0; 4256 } 4257 4258 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4259 struct net_device *dev, 4260 unsigned long event, void *ptr, 4261 u16 vid) 4262 { 4263 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4264 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4265 struct netdev_notifier_changeupper_info *info = ptr; 4266 struct netlink_ext_ack *extack; 4267 struct net_device *upper_dev; 4268 int err = 0; 4269 4270 extack = netdev_notifier_info_to_extack(&info->info); 4271 4272 switch (event) { 4273 case NETDEV_PRECHANGEUPPER: 4274 upper_dev = info->upper_dev; 4275 if (!netif_is_bridge_master(upper_dev) && 4276 !netif_is_macvlan(upper_dev)) { 4277 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4278 return -EINVAL; 4279 } 4280 if (!info->linking) 4281 break; 4282 if (netif_is_bridge_master(upper_dev) && 4283 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4284 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4285 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4286 return -EOPNOTSUPP; 4287 if (netdev_has_any_upper_dev(upper_dev) && 4288 (!netif_is_bridge_master(upper_dev) || 4289 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4290 upper_dev))) { 4291 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4292 return -EINVAL; 4293 } 4294 if (netif_is_macvlan(upper_dev) && 4295 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4296 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4297 return -EOPNOTSUPP; 4298 } 4299 break; 4300 case NETDEV_CHANGEUPPER: 4301 upper_dev = info->upper_dev; 4302 if (netif_is_bridge_master(upper_dev)) { 4303 if (info->linking) 4304 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4305 vlan_dev, 4306 upper_dev, 4307 extack); 4308 else 4309 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4310 vlan_dev, 4311 upper_dev); 4312 } else if (netif_is_macvlan(upper_dev)) { 4313 if (!info->linking) 4314 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4315 } else { 4316 err = -EINVAL; 4317 WARN_ON(1); 4318 } 4319 break; 4320 } 4321 4322 return err; 4323 } 4324 4325 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4326 struct net_device *lag_dev, 4327 unsigned long event, 4328 void *ptr, u16 vid) 4329 { 4330 struct net_device *dev; 4331 struct list_head *iter; 4332 int ret; 4333 4334 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4335 if (mlxsw_sp_port_dev_check(dev)) { 4336 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4337 event, ptr, 4338 vid); 4339 if (ret) 4340 return ret; 4341 } 4342 } 4343 4344 return 0; 4345 } 4346 4347 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4348 struct net_device *br_dev, 4349 unsigned long event, void *ptr, 4350 u16 vid) 4351 { 4352 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4353 struct netdev_notifier_changeupper_info *info = ptr; 4354 struct netlink_ext_ack *extack; 4355 struct net_device *upper_dev; 4356 4357 if (!mlxsw_sp) 4358 return 0; 4359 4360 extack = netdev_notifier_info_to_extack(&info->info); 4361 4362 switch (event) { 4363 case NETDEV_PRECHANGEUPPER: 4364 upper_dev = info->upper_dev; 4365 if (!netif_is_macvlan(upper_dev)) { 4366 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4367 return -EOPNOTSUPP; 4368 } 4369 if (!info->linking) 4370 break; 4371 if (netif_is_macvlan(upper_dev) && 4372 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4373 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4374 return -EOPNOTSUPP; 4375 } 4376 break; 4377 case NETDEV_CHANGEUPPER: 4378 upper_dev = info->upper_dev; 4379 if (info->linking) 4380 break; 4381 if (netif_is_macvlan(upper_dev)) 4382 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4383 break; 4384 } 4385 4386 return 0; 4387 } 4388 4389 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4390 unsigned long event, void *ptr) 4391 { 4392 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4393 u16 vid = vlan_dev_vlan_id(vlan_dev); 4394 4395 if (mlxsw_sp_port_dev_check(real_dev)) 4396 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4397 event, ptr, vid); 4398 else if (netif_is_lag_master(real_dev)) 4399 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4400 real_dev, event, 4401 ptr, vid); 4402 else if (netif_is_bridge_master(real_dev)) 4403 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4404 event, ptr, vid); 4405 4406 return 0; 4407 } 4408 4409 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4410 unsigned long event, void *ptr) 4411 { 4412 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4413 struct netdev_notifier_changeupper_info *info = ptr; 4414 struct netlink_ext_ack *extack; 4415 struct net_device *upper_dev; 4416 u16 proto; 4417 4418 if (!mlxsw_sp) 4419 return 0; 4420 4421 extack = netdev_notifier_info_to_extack(&info->info); 4422 4423 switch (event) { 4424 case NETDEV_PRECHANGEUPPER: 4425 upper_dev = info->upper_dev; 4426 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4427 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4428 return -EOPNOTSUPP; 4429 } 4430 if (!info->linking) 4431 break; 4432 if (br_vlan_enabled(br_dev)) { 4433 br_vlan_get_proto(br_dev, &proto); 4434 if (proto == ETH_P_8021AD) { 4435 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 4436 return -EOPNOTSUPP; 4437 } 4438 } 4439 if (is_vlan_dev(upper_dev) && 4440 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4441 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4442 return -EOPNOTSUPP; 4443 } 4444 if (netif_is_macvlan(upper_dev) && 4445 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4446 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4447 return -EOPNOTSUPP; 4448 } 4449 break; 4450 case NETDEV_CHANGEUPPER: 4451 upper_dev = info->upper_dev; 4452 if (info->linking) 4453 break; 4454 if (is_vlan_dev(upper_dev)) 4455 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4456 if (netif_is_macvlan(upper_dev)) 4457 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4458 break; 4459 } 4460 4461 return 0; 4462 } 4463 4464 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4465 unsigned long event, void *ptr) 4466 { 4467 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4468 struct netdev_notifier_changeupper_info *info = ptr; 4469 struct netlink_ext_ack *extack; 4470 4471 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4472 return 0; 4473 4474 extack = netdev_notifier_info_to_extack(&info->info); 4475 4476 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4477 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4478 4479 return -EOPNOTSUPP; 4480 } 4481 4482 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4483 { 4484 struct netdev_notifier_changeupper_info *info = ptr; 4485 4486 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4487 return false; 4488 return netif_is_l3_master(info->upper_dev); 4489 } 4490 4491 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4492 struct net_device *dev, 4493 unsigned long event, void *ptr) 4494 { 4495 struct netdev_notifier_changeupper_info *cu_info; 4496 struct netdev_notifier_info *info = ptr; 4497 struct netlink_ext_ack *extack; 4498 struct net_device *upper_dev; 4499 4500 extack = netdev_notifier_info_to_extack(info); 4501 4502 switch (event) { 4503 case NETDEV_CHANGEUPPER: 4504 cu_info = container_of(info, 4505 struct netdev_notifier_changeupper_info, 4506 info); 4507 upper_dev = cu_info->upper_dev; 4508 if (!netif_is_bridge_master(upper_dev)) 4509 return 0; 4510 if (!mlxsw_sp_lower_get(upper_dev)) 4511 return 0; 4512 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4513 return -EOPNOTSUPP; 4514 if (cu_info->linking) { 4515 if (!netif_running(dev)) 4516 return 0; 4517 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4518 * device needs to be mapped to a VLAN, but at this 4519 * point no VLANs are configured on the VxLAN device 4520 */ 4521 if (br_vlan_enabled(upper_dev)) 4522 return 0; 4523 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 4524 dev, 0, extack); 4525 } else { 4526 /* VLANs were already flushed, which triggered the 4527 * necessary cleanup 4528 */ 4529 if (br_vlan_enabled(upper_dev)) 4530 return 0; 4531 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4532 } 4533 break; 4534 case NETDEV_PRE_UP: 4535 upper_dev = netdev_master_upper_dev_get(dev); 4536 if (!upper_dev) 4537 return 0; 4538 if (!netif_is_bridge_master(upper_dev)) 4539 return 0; 4540 if (!mlxsw_sp_lower_get(upper_dev)) 4541 return 0; 4542 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 4543 extack); 4544 case NETDEV_DOWN: 4545 upper_dev = netdev_master_upper_dev_get(dev); 4546 if (!upper_dev) 4547 return 0; 4548 if (!netif_is_bridge_master(upper_dev)) 4549 return 0; 4550 if (!mlxsw_sp_lower_get(upper_dev)) 4551 return 0; 4552 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4553 break; 4554 } 4555 4556 return 0; 4557 } 4558 4559 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4560 unsigned long event, void *ptr) 4561 { 4562 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4563 struct mlxsw_sp_span_entry *span_entry; 4564 struct mlxsw_sp *mlxsw_sp; 4565 int err = 0; 4566 4567 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4568 if (event == NETDEV_UNREGISTER) { 4569 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4570 if (span_entry) 4571 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4572 } 4573 mlxsw_sp_span_respin(mlxsw_sp); 4574 4575 if (netif_is_vxlan(dev)) 4576 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 4577 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4578 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4579 event, ptr); 4580 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4581 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4582 event, ptr); 4583 else if (event == NETDEV_PRE_CHANGEADDR || 4584 event == NETDEV_CHANGEADDR || 4585 event == NETDEV_CHANGEMTU) 4586 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 4587 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4588 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4589 else if (mlxsw_sp_port_dev_check(dev)) 4590 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4591 else if (netif_is_lag_master(dev)) 4592 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4593 else if (is_vlan_dev(dev)) 4594 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4595 else if (netif_is_bridge_master(dev)) 4596 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4597 else if (netif_is_macvlan(dev)) 4598 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4599 4600 return notifier_from_errno(err); 4601 } 4602 4603 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4604 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4605 }; 4606 4607 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4608 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4609 }; 4610 4611 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4612 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4613 {0, }, 4614 }; 4615 4616 static struct pci_driver mlxsw_sp1_pci_driver = { 4617 .name = mlxsw_sp1_driver_name, 4618 .id_table = mlxsw_sp1_pci_id_table, 4619 }; 4620 4621 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4622 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4623 {0, }, 4624 }; 4625 4626 static struct pci_driver mlxsw_sp2_pci_driver = { 4627 .name = mlxsw_sp2_driver_name, 4628 .id_table = mlxsw_sp2_pci_id_table, 4629 }; 4630 4631 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 4632 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 4633 {0, }, 4634 }; 4635 4636 static struct pci_driver mlxsw_sp3_pci_driver = { 4637 .name = mlxsw_sp3_driver_name, 4638 .id_table = mlxsw_sp3_pci_id_table, 4639 }; 4640 4641 static int __init mlxsw_sp_module_init(void) 4642 { 4643 int err; 4644 4645 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4646 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4647 4648 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4649 if (err) 4650 goto err_sp1_core_driver_register; 4651 4652 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4653 if (err) 4654 goto err_sp2_core_driver_register; 4655 4656 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 4657 if (err) 4658 goto err_sp3_core_driver_register; 4659 4660 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4661 if (err) 4662 goto err_sp1_pci_driver_register; 4663 4664 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4665 if (err) 4666 goto err_sp2_pci_driver_register; 4667 4668 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 4669 if (err) 4670 goto err_sp3_pci_driver_register; 4671 4672 return 0; 4673 4674 err_sp3_pci_driver_register: 4675 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4676 err_sp2_pci_driver_register: 4677 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4678 err_sp1_pci_driver_register: 4679 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4680 err_sp3_core_driver_register: 4681 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4682 err_sp2_core_driver_register: 4683 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4684 err_sp1_core_driver_register: 4685 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4686 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4687 return err; 4688 } 4689 4690 static void __exit mlxsw_sp_module_exit(void) 4691 { 4692 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 4693 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4694 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4695 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4696 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4697 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4698 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4699 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4700 } 4701 4702 module_init(mlxsw_sp_module_init); 4703 module_exit(mlxsw_sp_module_exit); 4704 4705 MODULE_LICENSE("Dual BSD/GPL"); 4706 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 4707 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 4708 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 4709 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 4710 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 4711 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 4712 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 4713 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 4714