1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 48 #define MLXSW_SP_FWREV_MINOR 2010 49 #define MLXSW_SP_FWREV_SUBMINOR 1006 50 51 #define MLXSW_SP1_FWREV_MAJOR 13 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP_FWREV_MINOR, 57 .subminor = MLXSW_SP_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 65 66 #define MLXSW_SP2_FWREV_MAJOR 29 67 68 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 69 .major = MLXSW_SP2_FWREV_MAJOR, 70 .minor = MLXSW_SP_FWREV_MINOR, 71 .subminor = MLXSW_SP_FWREV_SUBMINOR, 72 }; 73 74 #define MLXSW_SP2_FW_FILENAME \ 75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 76 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 77 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 78 79 #define MLXSW_SP3_FWREV_MAJOR 30 80 81 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 82 .major = MLXSW_SP3_FWREV_MAJOR, 83 .minor = MLXSW_SP_FWREV_MINOR, 84 .subminor = MLXSW_SP_FWREV_SUBMINOR, 85 }; 86 87 #define MLXSW_SP3_FW_FILENAME \ 88 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 89 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 90 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 91 92 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 93 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 94 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 95 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 96 97 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 98 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 99 }; 100 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 101 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 102 }; 103 104 /* tx_hdr_version 105 * Tx header version. 106 * Must be set to 1. 107 */ 108 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 109 110 /* tx_hdr_ctl 111 * Packet control type. 112 * 0 - Ethernet control (e.g. EMADs, LACP) 113 * 1 - Ethernet data 114 */ 115 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 116 117 /* tx_hdr_proto 118 * Packet protocol type. Must be set to 1 (Ethernet). 119 */ 120 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 121 122 /* tx_hdr_rx_is_router 123 * Packet is sent from the router. Valid for data packets only. 124 */ 125 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 126 127 /* tx_hdr_fid_valid 128 * Indicates if the 'fid' field is valid and should be used for 129 * forwarding lookup. Valid for data packets only. 130 */ 131 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 132 133 /* tx_hdr_swid 134 * Switch partition ID. Must be set to 0. 135 */ 136 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 137 138 /* tx_hdr_control_tclass 139 * Indicates if the packet should use the control TClass and not one 140 * of the data TClasses. 141 */ 142 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 143 144 /* tx_hdr_etclass 145 * Egress TClass to be used on the egress device on the egress port. 146 */ 147 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 148 149 /* tx_hdr_port_mid 150 * Destination local port for unicast packets. 151 * Destination multicast ID for multicast packets. 152 * 153 * Control packets are directed to a specific egress port, while data 154 * packets are transmitted through the CPU port (0) into the switch partition, 155 * where forwarding rules are applied. 156 */ 157 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 158 159 /* tx_hdr_fid 160 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 161 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 162 * Valid for data packets only. 163 */ 164 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 165 166 /* tx_hdr_type 167 * 0 - Data packets 168 * 6 - Control packets 169 */ 170 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 171 172 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 173 unsigned int counter_index, u64 *packets, 174 u64 *bytes) 175 { 176 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 177 int err; 178 179 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 180 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 182 if (err) 183 return err; 184 if (packets) 185 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 186 if (bytes) 187 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 188 return 0; 189 } 190 191 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 192 unsigned int counter_index) 193 { 194 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 195 196 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 197 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 199 } 200 201 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 202 unsigned int *p_counter_index) 203 { 204 int err; 205 206 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 207 p_counter_index); 208 if (err) 209 return err; 210 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 211 if (err) 212 goto err_counter_clear; 213 return 0; 214 215 err_counter_clear: 216 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 217 *p_counter_index); 218 return err; 219 } 220 221 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 222 unsigned int counter_index) 223 { 224 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 225 counter_index); 226 } 227 228 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 229 const struct mlxsw_tx_info *tx_info) 230 { 231 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 232 233 memset(txhdr, 0, MLXSW_TXHDR_LEN); 234 235 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 236 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 237 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 238 mlxsw_tx_hdr_swid_set(txhdr, 0); 239 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 240 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 241 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 242 } 243 244 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 245 { 246 switch (state) { 247 case BR_STATE_FORWARDING: 248 return MLXSW_REG_SPMS_STATE_FORWARDING; 249 case BR_STATE_LEARNING: 250 return MLXSW_REG_SPMS_STATE_LEARNING; 251 case BR_STATE_LISTENING: 252 case BR_STATE_DISABLED: 253 case BR_STATE_BLOCKING: 254 return MLXSW_REG_SPMS_STATE_DISCARDING; 255 default: 256 BUG(); 257 } 258 } 259 260 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 261 u8 state) 262 { 263 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 264 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 265 char *spms_pl; 266 int err; 267 268 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 269 if (!spms_pl) 270 return -ENOMEM; 271 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 272 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 273 274 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 275 kfree(spms_pl); 276 return err; 277 } 278 279 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 280 { 281 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 282 int err; 283 284 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 285 if (err) 286 return err; 287 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 288 return 0; 289 } 290 291 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 292 bool is_up) 293 { 294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 295 char paos_pl[MLXSW_REG_PAOS_LEN]; 296 297 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 298 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 299 MLXSW_PORT_ADMIN_STATUS_DOWN); 300 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 301 } 302 303 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 304 const unsigned char *addr) 305 { 306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 307 char ppad_pl[MLXSW_REG_PPAD_LEN]; 308 309 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 310 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 311 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 312 } 313 314 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 315 { 316 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 317 318 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 319 mlxsw_sp_port->local_port); 320 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 321 mlxsw_sp_port->dev->dev_addr); 322 } 323 324 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 325 { 326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 327 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 328 int err; 329 330 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 331 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 332 if (err) 333 return err; 334 335 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 336 return 0; 337 } 338 339 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 340 { 341 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 342 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 343 344 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 345 if (mtu > mlxsw_sp_port->max_mtu) 346 return -EINVAL; 347 348 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 349 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 350 } 351 352 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 353 u16 local_port, u8 swid) 354 { 355 char pspa_pl[MLXSW_REG_PSPA_LEN]; 356 357 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 358 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 359 } 360 361 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 362 { 363 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 364 char svpe_pl[MLXSW_REG_SVPE_LEN]; 365 366 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 368 } 369 370 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 371 bool learn_enable) 372 { 373 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 374 char *spvmlr_pl; 375 int err; 376 377 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 378 if (!spvmlr_pl) 379 return -ENOMEM; 380 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 381 learn_enable); 382 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 383 kfree(spvmlr_pl); 384 return err; 385 } 386 387 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 388 { 389 switch (ethtype) { 390 case ETH_P_8021Q: 391 *p_sver_type = 0; 392 break; 393 case ETH_P_8021AD: 394 *p_sver_type = 1; 395 break; 396 default: 397 return -EINVAL; 398 } 399 400 return 0; 401 } 402 403 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 404 u16 ethtype) 405 { 406 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 407 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 408 u8 sver_type; 409 int err; 410 411 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 412 if (err) 413 return err; 414 415 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 416 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 417 } 418 419 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 420 u16 vid, u16 ethtype) 421 { 422 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 423 char spvid_pl[MLXSW_REG_SPVID_LEN]; 424 u8 sver_type; 425 int err; 426 427 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 428 if (err) 429 return err; 430 431 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 432 sver_type); 433 434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 435 } 436 437 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 438 bool allow) 439 { 440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 441 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 442 443 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 444 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 445 } 446 447 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 448 u16 ethtype) 449 { 450 int err; 451 452 if (!vid) { 453 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 454 if (err) 455 return err; 456 } else { 457 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 458 if (err) 459 return err; 460 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 461 if (err) 462 goto err_port_allow_untagged_set; 463 } 464 465 mlxsw_sp_port->pvid = vid; 466 return 0; 467 468 err_port_allow_untagged_set: 469 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 470 return err; 471 } 472 473 static int 474 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 475 { 476 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 477 char sspr_pl[MLXSW_REG_SSPR_LEN]; 478 479 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 480 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 481 } 482 483 static int 484 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 485 struct mlxsw_sp_port_mapping *port_mapping) 486 { 487 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 488 bool separate_rxtx; 489 u8 module; 490 u8 width; 491 int err; 492 int i; 493 494 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 495 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 496 if (err) 497 return err; 498 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 499 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 500 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 501 502 if (width && !is_power_of_2(width)) { 503 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 504 local_port); 505 return -EINVAL; 506 } 507 508 for (i = 0; i < width; i++) { 509 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 510 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 511 local_port); 512 return -EINVAL; 513 } 514 if (separate_rxtx && 515 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 516 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 517 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 518 local_port); 519 return -EINVAL; 520 } 521 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { 522 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 523 local_port); 524 return -EINVAL; 525 } 526 } 527 528 port_mapping->module = module; 529 port_mapping->width = width; 530 port_mapping->module_width = width; 531 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 532 return 0; 533 } 534 535 static int 536 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 537 const struct mlxsw_sp_port_mapping *port_mapping) 538 { 539 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 540 int i, err; 541 542 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module); 543 544 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 545 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 546 for (i = 0; i < port_mapping->width; i++) { 547 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 548 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 549 } 550 551 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 552 if (err) 553 goto err_pmlp_write; 554 return 0; 555 556 err_pmlp_write: 557 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module); 558 return err; 559 } 560 561 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 562 u8 module) 563 { 564 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 565 566 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 567 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 568 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 569 mlxsw_env_module_port_unmap(mlxsw_sp->core, module); 570 } 571 572 static int mlxsw_sp_port_open(struct net_device *dev) 573 { 574 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 576 int err; 577 578 err = mlxsw_env_module_port_up(mlxsw_sp->core, 579 mlxsw_sp_port->mapping.module); 580 if (err) 581 return err; 582 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 583 if (err) 584 goto err_port_admin_status_set; 585 netif_start_queue(dev); 586 return 0; 587 588 err_port_admin_status_set: 589 mlxsw_env_module_port_down(mlxsw_sp->core, 590 mlxsw_sp_port->mapping.module); 591 return err; 592 } 593 594 static int mlxsw_sp_port_stop(struct net_device *dev) 595 { 596 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 597 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 598 599 netif_stop_queue(dev); 600 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 601 mlxsw_env_module_port_down(mlxsw_sp->core, 602 mlxsw_sp_port->mapping.module); 603 return 0; 604 } 605 606 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 607 struct net_device *dev) 608 { 609 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 611 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 612 const struct mlxsw_tx_info tx_info = { 613 .local_port = mlxsw_sp_port->local_port, 614 .is_emad = false, 615 }; 616 u64 len; 617 int err; 618 619 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 620 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 621 dev_kfree_skb_any(skb); 622 return NETDEV_TX_OK; 623 } 624 625 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 626 627 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 628 return NETDEV_TX_BUSY; 629 630 if (eth_skb_pad(skb)) { 631 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 632 return NETDEV_TX_OK; 633 } 634 635 mlxsw_sp_txhdr_construct(skb, &tx_info); 636 /* TX header is consumed by HW on the way so we shouldn't count its 637 * bytes as being sent. 638 */ 639 len = skb->len - MLXSW_TXHDR_LEN; 640 641 /* Due to a race we might fail here because of a full queue. In that 642 * unlikely case we simply drop the packet. 643 */ 644 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 645 646 if (!err) { 647 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 648 u64_stats_update_begin(&pcpu_stats->syncp); 649 pcpu_stats->tx_packets++; 650 pcpu_stats->tx_bytes += len; 651 u64_stats_update_end(&pcpu_stats->syncp); 652 } else { 653 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 654 dev_kfree_skb_any(skb); 655 } 656 return NETDEV_TX_OK; 657 } 658 659 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 660 { 661 } 662 663 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 664 { 665 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 666 struct sockaddr *addr = p; 667 int err; 668 669 if (!is_valid_ether_addr(addr->sa_data)) 670 return -EADDRNOTAVAIL; 671 672 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 673 if (err) 674 return err; 675 eth_hw_addr_set(dev, addr->sa_data); 676 return 0; 677 } 678 679 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 680 { 681 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 682 struct mlxsw_sp_hdroom orig_hdroom; 683 struct mlxsw_sp_hdroom hdroom; 684 int err; 685 686 orig_hdroom = *mlxsw_sp_port->hdroom; 687 688 hdroom = orig_hdroom; 689 hdroom.mtu = mtu; 690 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 691 692 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 693 if (err) { 694 netdev_err(dev, "Failed to configure port's headroom\n"); 695 return err; 696 } 697 698 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 699 if (err) 700 goto err_port_mtu_set; 701 dev->mtu = mtu; 702 return 0; 703 704 err_port_mtu_set: 705 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 706 return err; 707 } 708 709 static int 710 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 711 struct rtnl_link_stats64 *stats) 712 { 713 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 714 struct mlxsw_sp_port_pcpu_stats *p; 715 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 716 u32 tx_dropped = 0; 717 unsigned int start; 718 int i; 719 720 for_each_possible_cpu(i) { 721 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 722 do { 723 start = u64_stats_fetch_begin_irq(&p->syncp); 724 rx_packets = p->rx_packets; 725 rx_bytes = p->rx_bytes; 726 tx_packets = p->tx_packets; 727 tx_bytes = p->tx_bytes; 728 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 729 730 stats->rx_packets += rx_packets; 731 stats->rx_bytes += rx_bytes; 732 stats->tx_packets += tx_packets; 733 stats->tx_bytes += tx_bytes; 734 /* tx_dropped is u32, updated without syncp protection. */ 735 tx_dropped += p->tx_dropped; 736 } 737 stats->tx_dropped = tx_dropped; 738 return 0; 739 } 740 741 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 742 { 743 switch (attr_id) { 744 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 745 return true; 746 } 747 748 return false; 749 } 750 751 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 752 void *sp) 753 { 754 switch (attr_id) { 755 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 756 return mlxsw_sp_port_get_sw_stats64(dev, sp); 757 } 758 759 return -EINVAL; 760 } 761 762 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 763 int prio, char *ppcnt_pl) 764 { 765 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 766 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 767 768 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 769 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 770 } 771 772 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 773 struct rtnl_link_stats64 *stats) 774 { 775 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 776 int err; 777 778 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 779 0, ppcnt_pl); 780 if (err) 781 goto out; 782 783 stats->tx_packets = 784 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 785 stats->rx_packets = 786 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 787 stats->tx_bytes = 788 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 789 stats->rx_bytes = 790 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 791 stats->multicast = 792 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 793 794 stats->rx_crc_errors = 795 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 796 stats->rx_frame_errors = 797 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 798 799 stats->rx_length_errors = ( 800 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 801 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 802 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 803 804 stats->rx_errors = (stats->rx_crc_errors + 805 stats->rx_frame_errors + stats->rx_length_errors); 806 807 out: 808 return err; 809 } 810 811 static void 812 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 813 struct mlxsw_sp_port_xstats *xstats) 814 { 815 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 816 int err, i; 817 818 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 819 ppcnt_pl); 820 if (!err) 821 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 822 823 for (i = 0; i < TC_MAX_QUEUE; i++) { 824 err = mlxsw_sp_port_get_stats_raw(dev, 825 MLXSW_REG_PPCNT_TC_CONG_CNT, 826 i, ppcnt_pl); 827 if (err) 828 goto tc_cnt; 829 830 xstats->wred_drop[i] = 831 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 832 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 833 834 tc_cnt: 835 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 836 i, ppcnt_pl); 837 if (err) 838 continue; 839 840 xstats->backlog[i] = 841 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 842 xstats->tail_drop[i] = 843 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 844 } 845 846 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 847 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 848 i, ppcnt_pl); 849 if (err) 850 continue; 851 852 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 853 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 854 } 855 } 856 857 static void update_stats_cache(struct work_struct *work) 858 { 859 struct mlxsw_sp_port *mlxsw_sp_port = 860 container_of(work, struct mlxsw_sp_port, 861 periodic_hw_stats.update_dw.work); 862 863 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 864 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 865 * necessary when port goes down. 866 */ 867 goto out; 868 869 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 870 &mlxsw_sp_port->periodic_hw_stats.stats); 871 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 872 &mlxsw_sp_port->periodic_hw_stats.xstats); 873 874 out: 875 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 876 MLXSW_HW_STATS_UPDATE_TIME); 877 } 878 879 /* Return the stats from a cache that is updated periodically, 880 * as this function might get called in an atomic context. 881 */ 882 static void 883 mlxsw_sp_port_get_stats64(struct net_device *dev, 884 struct rtnl_link_stats64 *stats) 885 { 886 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 887 888 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 889 } 890 891 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 892 u16 vid_begin, u16 vid_end, 893 bool is_member, bool untagged) 894 { 895 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 896 char *spvm_pl; 897 int err; 898 899 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 900 if (!spvm_pl) 901 return -ENOMEM; 902 903 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 904 vid_end, is_member, untagged); 905 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 906 kfree(spvm_pl); 907 return err; 908 } 909 910 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 911 u16 vid_end, bool is_member, bool untagged) 912 { 913 u16 vid, vid_e; 914 int err; 915 916 for (vid = vid_begin; vid <= vid_end; 917 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 918 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 919 vid_end); 920 921 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 922 is_member, untagged); 923 if (err) 924 return err; 925 } 926 927 return 0; 928 } 929 930 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 931 bool flush_default) 932 { 933 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 934 935 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 936 &mlxsw_sp_port->vlans_list, list) { 937 if (!flush_default && 938 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 939 continue; 940 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 941 } 942 } 943 944 static void 945 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 946 { 947 if (mlxsw_sp_port_vlan->bridge_port) 948 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 949 else if (mlxsw_sp_port_vlan->fid) 950 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 951 } 952 953 struct mlxsw_sp_port_vlan * 954 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 955 { 956 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 957 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 958 int err; 959 960 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 961 if (mlxsw_sp_port_vlan) 962 return ERR_PTR(-EEXIST); 963 964 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 965 if (err) 966 return ERR_PTR(err); 967 968 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 969 if (!mlxsw_sp_port_vlan) { 970 err = -ENOMEM; 971 goto err_port_vlan_alloc; 972 } 973 974 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 975 mlxsw_sp_port_vlan->vid = vid; 976 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 977 978 return mlxsw_sp_port_vlan; 979 980 err_port_vlan_alloc: 981 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 982 return ERR_PTR(err); 983 } 984 985 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 986 { 987 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 988 u16 vid = mlxsw_sp_port_vlan->vid; 989 990 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 991 list_del(&mlxsw_sp_port_vlan->list); 992 kfree(mlxsw_sp_port_vlan); 993 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 994 } 995 996 static int mlxsw_sp_port_add_vid(struct net_device *dev, 997 __be16 __always_unused proto, u16 vid) 998 { 999 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1000 1001 /* VLAN 0 is added to HW filter when device goes up, but it is 1002 * reserved in our case, so simply return. 1003 */ 1004 if (!vid) 1005 return 0; 1006 1007 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1008 } 1009 1010 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1011 __be16 __always_unused proto, u16 vid) 1012 { 1013 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1014 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1015 1016 /* VLAN 0 is removed from HW filter when device goes down, but 1017 * it is reserved in our case, so simply return. 1018 */ 1019 if (!vid) 1020 return 0; 1021 1022 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1023 if (!mlxsw_sp_port_vlan) 1024 return 0; 1025 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1026 1027 return 0; 1028 } 1029 1030 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1031 struct flow_block_offload *f) 1032 { 1033 switch (f->binder_type) { 1034 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1035 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1036 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1037 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1038 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1039 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1040 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1041 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1042 default: 1043 return -EOPNOTSUPP; 1044 } 1045 } 1046 1047 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1048 void *type_data) 1049 { 1050 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1051 1052 switch (type) { 1053 case TC_SETUP_BLOCK: 1054 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1055 case TC_SETUP_QDISC_RED: 1056 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1057 case TC_SETUP_QDISC_PRIO: 1058 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1059 case TC_SETUP_QDISC_ETS: 1060 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1061 case TC_SETUP_QDISC_TBF: 1062 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1063 case TC_SETUP_QDISC_FIFO: 1064 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1065 default: 1066 return -EOPNOTSUPP; 1067 } 1068 } 1069 1070 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1071 { 1072 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1073 1074 if (!enable) { 1075 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1076 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1077 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1078 return -EINVAL; 1079 } 1080 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1081 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1082 } else { 1083 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1084 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1085 } 1086 return 0; 1087 } 1088 1089 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1090 { 1091 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1092 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1093 int err; 1094 1095 if (netif_running(dev)) 1096 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1097 1098 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1099 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1100 pplr_pl); 1101 1102 if (netif_running(dev)) 1103 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1104 1105 return err; 1106 } 1107 1108 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1109 1110 static int mlxsw_sp_handle_feature(struct net_device *dev, 1111 netdev_features_t wanted_features, 1112 netdev_features_t feature, 1113 mlxsw_sp_feature_handler feature_handler) 1114 { 1115 netdev_features_t changes = wanted_features ^ dev->features; 1116 bool enable = !!(wanted_features & feature); 1117 int err; 1118 1119 if (!(changes & feature)) 1120 return 0; 1121 1122 err = feature_handler(dev, enable); 1123 if (err) { 1124 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1125 enable ? "Enable" : "Disable", &feature, err); 1126 return err; 1127 } 1128 1129 if (enable) 1130 dev->features |= feature; 1131 else 1132 dev->features &= ~feature; 1133 1134 return 0; 1135 } 1136 static int mlxsw_sp_set_features(struct net_device *dev, 1137 netdev_features_t features) 1138 { 1139 netdev_features_t oper_features = dev->features; 1140 int err = 0; 1141 1142 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1143 mlxsw_sp_feature_hw_tc); 1144 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1145 mlxsw_sp_feature_loopback); 1146 1147 if (err) { 1148 dev->features = oper_features; 1149 return -EINVAL; 1150 } 1151 1152 return 0; 1153 } 1154 1155 static struct devlink_port * 1156 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1157 { 1158 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1159 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1160 1161 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1162 mlxsw_sp_port->local_port); 1163 } 1164 1165 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1166 struct ifreq *ifr) 1167 { 1168 struct hwtstamp_config config; 1169 int err; 1170 1171 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1172 return -EFAULT; 1173 1174 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1175 &config); 1176 if (err) 1177 return err; 1178 1179 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1180 return -EFAULT; 1181 1182 return 0; 1183 } 1184 1185 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1186 struct ifreq *ifr) 1187 { 1188 struct hwtstamp_config config; 1189 int err; 1190 1191 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1192 &config); 1193 if (err) 1194 return err; 1195 1196 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1197 return -EFAULT; 1198 1199 return 0; 1200 } 1201 1202 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1203 { 1204 struct hwtstamp_config config = {0}; 1205 1206 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1207 } 1208 1209 static int 1210 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1211 { 1212 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1213 1214 switch (cmd) { 1215 case SIOCSHWTSTAMP: 1216 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1217 case SIOCGHWTSTAMP: 1218 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1219 default: 1220 return -EOPNOTSUPP; 1221 } 1222 } 1223 1224 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1225 .ndo_open = mlxsw_sp_port_open, 1226 .ndo_stop = mlxsw_sp_port_stop, 1227 .ndo_start_xmit = mlxsw_sp_port_xmit, 1228 .ndo_setup_tc = mlxsw_sp_setup_tc, 1229 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1230 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1231 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1232 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1233 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1234 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1235 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1236 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1237 .ndo_set_features = mlxsw_sp_set_features, 1238 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1239 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1240 }; 1241 1242 static int 1243 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1244 { 1245 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1246 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1247 const struct mlxsw_sp_port_type_speed_ops *ops; 1248 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1249 u32 eth_proto_cap_masked; 1250 int err; 1251 1252 ops = mlxsw_sp->port_type_speed_ops; 1253 1254 /* Set advertised speeds to speeds supported by both the driver 1255 * and the device. 1256 */ 1257 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1258 0, false); 1259 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1260 if (err) 1261 return err; 1262 1263 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1264 ð_proto_admin, ð_proto_oper); 1265 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1266 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1267 eth_proto_cap_masked, 1268 mlxsw_sp_port->link.autoneg); 1269 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1270 } 1271 1272 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1273 { 1274 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1276 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1277 u32 eth_proto_oper; 1278 int err; 1279 1280 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1281 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1282 mlxsw_sp_port->local_port, 0, 1283 false); 1284 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1285 if (err) 1286 return err; 1287 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1288 ð_proto_oper); 1289 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1290 return 0; 1291 } 1292 1293 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1294 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1295 bool dwrr, u8 dwrr_weight) 1296 { 1297 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1298 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1299 1300 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1301 next_index); 1302 mlxsw_reg_qeec_de_set(qeec_pl, true); 1303 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1304 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1306 } 1307 1308 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1309 enum mlxsw_reg_qeec_hr hr, u8 index, 1310 u8 next_index, u32 maxrate, u8 burst_size) 1311 { 1312 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1313 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1314 1315 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1316 next_index); 1317 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1318 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1319 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1320 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1321 } 1322 1323 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1324 enum mlxsw_reg_qeec_hr hr, u8 index, 1325 u8 next_index, u32 minrate) 1326 { 1327 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1328 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1329 1330 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1331 next_index); 1332 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1333 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1334 1335 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1336 } 1337 1338 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1339 u8 switch_prio, u8 tclass) 1340 { 1341 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1342 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1343 1344 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1345 tclass); 1346 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1347 } 1348 1349 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1350 { 1351 int err, i; 1352 1353 /* Setup the elements hierarcy, so that each TC is linked to 1354 * one subgroup, which are all member in the same group. 1355 */ 1356 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1357 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1358 if (err) 1359 return err; 1360 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1361 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1362 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1363 0, false, 0); 1364 if (err) 1365 return err; 1366 } 1367 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1368 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1369 MLXSW_REG_QEEC_HR_TC, i, i, 1370 false, 0); 1371 if (err) 1372 return err; 1373 1374 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1375 MLXSW_REG_QEEC_HR_TC, 1376 i + 8, i, 1377 true, 100); 1378 if (err) 1379 return err; 1380 } 1381 1382 /* Make sure the max shaper is disabled in all hierarchies that support 1383 * it. Note that this disables ptps (PTP shaper), but that is intended 1384 * for the initial configuration. 1385 */ 1386 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1387 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1388 MLXSW_REG_QEEC_MAS_DIS, 0); 1389 if (err) 1390 return err; 1391 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1392 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1393 MLXSW_REG_QEEC_HR_SUBGROUP, 1394 i, 0, 1395 MLXSW_REG_QEEC_MAS_DIS, 0); 1396 if (err) 1397 return err; 1398 } 1399 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1400 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1401 MLXSW_REG_QEEC_HR_TC, 1402 i, i, 1403 MLXSW_REG_QEEC_MAS_DIS, 0); 1404 if (err) 1405 return err; 1406 1407 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1408 MLXSW_REG_QEEC_HR_TC, 1409 i + 8, i, 1410 MLXSW_REG_QEEC_MAS_DIS, 0); 1411 if (err) 1412 return err; 1413 } 1414 1415 /* Configure the min shaper for multicast TCs. */ 1416 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1417 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1418 MLXSW_REG_QEEC_HR_TC, 1419 i + 8, i, 1420 MLXSW_REG_QEEC_MIS_MIN); 1421 if (err) 1422 return err; 1423 } 1424 1425 /* Map all priorities to traffic class 0. */ 1426 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1427 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1428 if (err) 1429 return err; 1430 } 1431 1432 return 0; 1433 } 1434 1435 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1436 bool enable) 1437 { 1438 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1439 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1440 1441 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1443 } 1444 1445 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1446 { 1447 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1448 u8 module = mlxsw_sp_port->mapping.module; 1449 u64 overheat_counter; 1450 int err; 1451 1452 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module, 1453 &overheat_counter); 1454 if (err) 1455 return err; 1456 1457 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1458 return 0; 1459 } 1460 1461 int 1462 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1463 bool is_8021ad_tagged, 1464 bool is_8021q_tagged) 1465 { 1466 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1467 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1468 1469 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1470 is_8021ad_tagged, is_8021q_tagged); 1471 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1472 } 1473 1474 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1475 u16 local_port, u8 *port_number, 1476 u8 *split_port_subnumber, 1477 u8 *slot_index) 1478 { 1479 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1480 int err; 1481 1482 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1483 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1484 if (err) 1485 return err; 1486 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1487 split_port_subnumber, slot_index); 1488 return 0; 1489 } 1490 1491 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1492 bool split, 1493 struct mlxsw_sp_port_mapping *port_mapping) 1494 { 1495 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1496 struct mlxsw_sp_port *mlxsw_sp_port; 1497 u32 lanes = port_mapping->width; 1498 u8 split_port_subnumber; 1499 struct net_device *dev; 1500 u8 port_number; 1501 u8 slot_index; 1502 bool splittable; 1503 int err; 1504 1505 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1506 if (err) { 1507 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1508 local_port); 1509 return err; 1510 } 1511 1512 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1513 if (err) { 1514 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1515 local_port); 1516 goto err_port_swid_set; 1517 } 1518 1519 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1520 &split_port_subnumber, &slot_index); 1521 if (err) { 1522 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1523 local_port); 1524 goto err_port_label_info_get; 1525 } 1526 1527 splittable = lanes > 1 && !split; 1528 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, 1529 port_number, split, split_port_subnumber, 1530 splittable, lanes, mlxsw_sp->base_mac, 1531 sizeof(mlxsw_sp->base_mac)); 1532 if (err) { 1533 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1534 local_port); 1535 goto err_core_port_init; 1536 } 1537 1538 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1539 if (!dev) { 1540 err = -ENOMEM; 1541 goto err_alloc_etherdev; 1542 } 1543 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1544 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1545 mlxsw_sp_port = netdev_priv(dev); 1546 mlxsw_sp_port->dev = dev; 1547 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1548 mlxsw_sp_port->local_port = local_port; 1549 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1550 mlxsw_sp_port->split = split; 1551 mlxsw_sp_port->mapping = *port_mapping; 1552 mlxsw_sp_port->link.autoneg = 1; 1553 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1554 1555 mlxsw_sp_port->pcpu_stats = 1556 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1557 if (!mlxsw_sp_port->pcpu_stats) { 1558 err = -ENOMEM; 1559 goto err_alloc_stats; 1560 } 1561 1562 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1563 &update_stats_cache); 1564 1565 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1566 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1567 1568 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1569 if (err) { 1570 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1571 mlxsw_sp_port->local_port); 1572 goto err_dev_addr_init; 1573 } 1574 1575 netif_carrier_off(dev); 1576 1577 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1578 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1579 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1580 1581 dev->min_mtu = 0; 1582 dev->max_mtu = ETH_MAX_MTU; 1583 1584 /* Each packet needs to have a Tx header (metadata) on top all other 1585 * headers. 1586 */ 1587 dev->needed_headroom = MLXSW_TXHDR_LEN; 1588 1589 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1590 if (err) { 1591 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1592 mlxsw_sp_port->local_port); 1593 goto err_port_system_port_mapping_set; 1594 } 1595 1596 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1597 if (err) { 1598 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1599 mlxsw_sp_port->local_port); 1600 goto err_port_speed_by_width_set; 1601 } 1602 1603 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1604 &mlxsw_sp_port->max_speed); 1605 if (err) { 1606 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1607 mlxsw_sp_port->local_port); 1608 goto err_max_speed_get; 1609 } 1610 1611 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1612 if (err) { 1613 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1614 mlxsw_sp_port->local_port); 1615 goto err_port_max_mtu_get; 1616 } 1617 1618 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1619 if (err) { 1620 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1621 mlxsw_sp_port->local_port); 1622 goto err_port_mtu_set; 1623 } 1624 1625 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1626 if (err) 1627 goto err_port_admin_status_set; 1628 1629 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1630 if (err) { 1631 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1632 mlxsw_sp_port->local_port); 1633 goto err_port_buffers_init; 1634 } 1635 1636 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1637 if (err) { 1638 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1639 mlxsw_sp_port->local_port); 1640 goto err_port_ets_init; 1641 } 1642 1643 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1644 if (err) { 1645 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1646 mlxsw_sp_port->local_port); 1647 goto err_port_tc_mc_mode; 1648 } 1649 1650 /* ETS and buffers must be initialized before DCB. */ 1651 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1652 if (err) { 1653 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1654 mlxsw_sp_port->local_port); 1655 goto err_port_dcb_init; 1656 } 1657 1658 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1659 if (err) { 1660 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1661 mlxsw_sp_port->local_port); 1662 goto err_port_fids_init; 1663 } 1664 1665 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1666 if (err) { 1667 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1668 mlxsw_sp_port->local_port); 1669 goto err_port_qdiscs_init; 1670 } 1671 1672 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1673 false); 1674 if (err) { 1675 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1676 mlxsw_sp_port->local_port); 1677 goto err_port_vlan_clear; 1678 } 1679 1680 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1681 if (err) { 1682 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1683 mlxsw_sp_port->local_port); 1684 goto err_port_nve_init; 1685 } 1686 1687 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1688 ETH_P_8021Q); 1689 if (err) { 1690 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1691 mlxsw_sp_port->local_port); 1692 goto err_port_pvid_set; 1693 } 1694 1695 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1696 MLXSW_SP_DEFAULT_VID); 1697 if (IS_ERR(mlxsw_sp_port_vlan)) { 1698 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1699 mlxsw_sp_port->local_port); 1700 err = PTR_ERR(mlxsw_sp_port_vlan); 1701 goto err_port_vlan_create; 1702 } 1703 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1704 1705 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1706 * only packets with 802.1q header as tagged packets. 1707 */ 1708 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1709 if (err) { 1710 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1711 local_port); 1712 goto err_port_vlan_classification_set; 1713 } 1714 1715 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1716 mlxsw_sp->ptp_ops->shaper_work); 1717 1718 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1719 1720 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1721 if (err) { 1722 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1723 mlxsw_sp_port->local_port); 1724 goto err_port_overheat_init_val_set; 1725 } 1726 1727 err = register_netdev(dev); 1728 if (err) { 1729 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1730 mlxsw_sp_port->local_port); 1731 goto err_register_netdev; 1732 } 1733 1734 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1735 mlxsw_sp_port, dev); 1736 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1737 return 0; 1738 1739 err_register_netdev: 1740 err_port_overheat_init_val_set: 1741 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1742 err_port_vlan_classification_set: 1743 mlxsw_sp->ports[local_port] = NULL; 1744 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1745 err_port_vlan_create: 1746 err_port_pvid_set: 1747 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1748 err_port_nve_init: 1749 err_port_vlan_clear: 1750 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1751 err_port_qdiscs_init: 1752 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1753 err_port_fids_init: 1754 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1755 err_port_dcb_init: 1756 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1757 err_port_tc_mc_mode: 1758 err_port_ets_init: 1759 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1760 err_port_buffers_init: 1761 err_port_admin_status_set: 1762 err_port_mtu_set: 1763 err_port_max_mtu_get: 1764 err_max_speed_get: 1765 err_port_speed_by_width_set: 1766 err_port_system_port_mapping_set: 1767 err_dev_addr_init: 1768 free_percpu(mlxsw_sp_port->pcpu_stats); 1769 err_alloc_stats: 1770 free_netdev(dev); 1771 err_alloc_etherdev: 1772 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1773 err_core_port_init: 1774 err_port_label_info_get: 1775 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1776 MLXSW_PORT_SWID_DISABLED_PORT); 1777 err_port_swid_set: 1778 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module); 1779 return err; 1780 } 1781 1782 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1783 { 1784 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1785 u8 module = mlxsw_sp_port->mapping.module; 1786 1787 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1788 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1789 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1790 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1791 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1792 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1793 mlxsw_sp->ports[local_port] = NULL; 1794 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1795 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1796 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1797 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1798 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1799 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1800 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1801 free_percpu(mlxsw_sp_port->pcpu_stats); 1802 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1803 free_netdev(mlxsw_sp_port->dev); 1804 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1805 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1806 MLXSW_PORT_SWID_DISABLED_PORT); 1807 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module); 1808 } 1809 1810 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1811 { 1812 struct mlxsw_sp_port *mlxsw_sp_port; 1813 int err; 1814 1815 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1816 if (!mlxsw_sp_port) 1817 return -ENOMEM; 1818 1819 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1820 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1821 1822 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1823 mlxsw_sp_port, 1824 mlxsw_sp->base_mac, 1825 sizeof(mlxsw_sp->base_mac)); 1826 if (err) { 1827 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1828 goto err_core_cpu_port_init; 1829 } 1830 1831 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1832 return 0; 1833 1834 err_core_cpu_port_init: 1835 kfree(mlxsw_sp_port); 1836 return err; 1837 } 1838 1839 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1840 { 1841 struct mlxsw_sp_port *mlxsw_sp_port = 1842 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1843 1844 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1845 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1846 kfree(mlxsw_sp_port); 1847 } 1848 1849 static bool mlxsw_sp_local_port_valid(u16 local_port) 1850 { 1851 return local_port != MLXSW_PORT_CPU_PORT; 1852 } 1853 1854 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1855 { 1856 if (!mlxsw_sp_local_port_valid(local_port)) 1857 return false; 1858 return mlxsw_sp->ports[local_port] != NULL; 1859 } 1860 1861 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1862 { 1863 int i; 1864 1865 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1866 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1867 mlxsw_sp_port_remove(mlxsw_sp, i); 1868 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1869 kfree(mlxsw_sp->ports); 1870 mlxsw_sp->ports = NULL; 1871 } 1872 1873 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1874 { 1875 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1876 struct mlxsw_sp_port_mapping *port_mapping; 1877 size_t alloc_size; 1878 int i; 1879 int err; 1880 1881 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 1882 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1883 if (!mlxsw_sp->ports) 1884 return -ENOMEM; 1885 1886 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 1887 if (err) 1888 goto err_cpu_port_create; 1889 1890 for (i = 1; i < max_ports; i++) { 1891 port_mapping = mlxsw_sp->port_mapping[i]; 1892 if (!port_mapping) 1893 continue; 1894 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 1895 if (err) 1896 goto err_port_create; 1897 } 1898 return 0; 1899 1900 err_port_create: 1901 for (i--; i >= 1; i--) 1902 if (mlxsw_sp_port_created(mlxsw_sp, i)) 1903 mlxsw_sp_port_remove(mlxsw_sp, i); 1904 mlxsw_sp_cpu_port_remove(mlxsw_sp); 1905 err_cpu_port_create: 1906 kfree(mlxsw_sp->ports); 1907 mlxsw_sp->ports = NULL; 1908 return err; 1909 } 1910 1911 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 1912 { 1913 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 1914 struct mlxsw_sp_port_mapping port_mapping; 1915 int i; 1916 int err; 1917 1918 mlxsw_sp->port_mapping = kcalloc(max_ports, 1919 sizeof(struct mlxsw_sp_port_mapping *), 1920 GFP_KERNEL); 1921 if (!mlxsw_sp->port_mapping) 1922 return -ENOMEM; 1923 1924 for (i = 1; i < max_ports; i++) { 1925 if (mlxsw_core_port_is_xm(mlxsw_sp->core, i)) 1926 continue; 1927 1928 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); 1929 if (err) 1930 goto err_port_module_info_get; 1931 if (!port_mapping.width) 1932 continue; 1933 1934 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, 1935 sizeof(port_mapping), 1936 GFP_KERNEL); 1937 if (!mlxsw_sp->port_mapping[i]) { 1938 err = -ENOMEM; 1939 goto err_port_module_info_dup; 1940 } 1941 } 1942 return 0; 1943 1944 err_port_module_info_get: 1945 err_port_module_info_dup: 1946 for (i--; i >= 1; i--) 1947 kfree(mlxsw_sp->port_mapping[i]); 1948 kfree(mlxsw_sp->port_mapping); 1949 return err; 1950 } 1951 1952 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 1953 { 1954 int i; 1955 1956 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) 1957 kfree(mlxsw_sp->port_mapping[i]); 1958 kfree(mlxsw_sp->port_mapping); 1959 } 1960 1961 static int 1962 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 1963 struct mlxsw_sp_port_mapping *port_mapping, 1964 unsigned int count, const char *pmtdb_pl) 1965 { 1966 struct mlxsw_sp_port_mapping split_port_mapping; 1967 int err, i; 1968 1969 split_port_mapping = *port_mapping; 1970 split_port_mapping.width /= count; 1971 for (i = 0; i < count; i++) { 1972 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 1973 1974 if (!mlxsw_sp_local_port_valid(s_local_port)) 1975 continue; 1976 1977 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 1978 true, &split_port_mapping); 1979 if (err) 1980 goto err_port_create; 1981 split_port_mapping.lane += split_port_mapping.width; 1982 } 1983 1984 return 0; 1985 1986 err_port_create: 1987 for (i--; i >= 0; i--) { 1988 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 1989 1990 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 1991 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 1992 } 1993 return err; 1994 } 1995 1996 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 1997 unsigned int count, 1998 const char *pmtdb_pl) 1999 { 2000 struct mlxsw_sp_port_mapping *port_mapping; 2001 int i; 2002 2003 /* Go over original unsplit ports in the gap and recreate them. */ 2004 for (i = 0; i < count; i++) { 2005 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2006 2007 port_mapping = mlxsw_sp->port_mapping[local_port]; 2008 if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) 2009 continue; 2010 mlxsw_sp_port_create(mlxsw_sp, local_port, 2011 false, port_mapping); 2012 } 2013 } 2014 2015 static struct mlxsw_sp_port * 2016 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2017 { 2018 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2019 return mlxsw_sp->ports[local_port]; 2020 return NULL; 2021 } 2022 2023 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2024 unsigned int count, 2025 struct netlink_ext_ack *extack) 2026 { 2027 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2028 struct mlxsw_sp_port_mapping port_mapping; 2029 struct mlxsw_sp_port *mlxsw_sp_port; 2030 enum mlxsw_reg_pmtdb_status status; 2031 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2032 int i; 2033 int err; 2034 2035 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2036 if (!mlxsw_sp_port) { 2037 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2038 local_port); 2039 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2040 return -EINVAL; 2041 } 2042 2043 if (mlxsw_sp_port->split) { 2044 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2045 return -EINVAL; 2046 } 2047 2048 mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, 2049 mlxsw_sp_port->mapping.module_width / count, 2050 count); 2051 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2052 if (err) { 2053 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2054 return err; 2055 } 2056 2057 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2058 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2059 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2060 return -EINVAL; 2061 } 2062 2063 port_mapping = mlxsw_sp_port->mapping; 2064 2065 for (i = 0; i < count; i++) { 2066 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2067 2068 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2069 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2070 } 2071 2072 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2073 count, pmtdb_pl); 2074 if (err) { 2075 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2076 goto err_port_split_create; 2077 } 2078 2079 return 0; 2080 2081 err_port_split_create: 2082 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2083 return err; 2084 } 2085 2086 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2087 struct netlink_ext_ack *extack) 2088 { 2089 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2090 struct mlxsw_sp_port *mlxsw_sp_port; 2091 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2092 unsigned int count; 2093 int i; 2094 int err; 2095 2096 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2097 if (!mlxsw_sp_port) { 2098 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2099 local_port); 2100 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2101 return -EINVAL; 2102 } 2103 2104 if (!mlxsw_sp_port->split) { 2105 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2106 return -EINVAL; 2107 } 2108 2109 count = mlxsw_sp_port->mapping.module_width / 2110 mlxsw_sp_port->mapping.width; 2111 2112 mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, 2113 mlxsw_sp_port->mapping.module_width / count, 2114 count); 2115 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2116 if (err) { 2117 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2118 return err; 2119 } 2120 2121 for (i = 0; i < count; i++) { 2122 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2123 2124 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2125 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2126 } 2127 2128 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2129 2130 return 0; 2131 } 2132 2133 static void 2134 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2135 { 2136 int i; 2137 2138 for (i = 0; i < TC_MAX_QUEUE; i++) 2139 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2140 } 2141 2142 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2143 char *pude_pl, void *priv) 2144 { 2145 struct mlxsw_sp *mlxsw_sp = priv; 2146 struct mlxsw_sp_port *mlxsw_sp_port; 2147 enum mlxsw_reg_pude_oper_status status; 2148 u16 local_port; 2149 2150 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2151 2152 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2153 return; 2154 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2155 if (!mlxsw_sp_port) 2156 return; 2157 2158 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2159 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2160 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2161 netif_carrier_on(mlxsw_sp_port->dev); 2162 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2163 } else { 2164 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2165 netif_carrier_off(mlxsw_sp_port->dev); 2166 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2167 } 2168 } 2169 2170 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2171 char *mtpptr_pl, bool ingress) 2172 { 2173 u16 local_port; 2174 u8 num_rec; 2175 int i; 2176 2177 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2178 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2179 for (i = 0; i < num_rec; i++) { 2180 u8 domain_number; 2181 u8 message_type; 2182 u16 sequence_id; 2183 u64 timestamp; 2184 2185 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2186 &domain_number, &sequence_id, 2187 ×tamp); 2188 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2189 message_type, domain_number, 2190 sequence_id, timestamp); 2191 } 2192 } 2193 2194 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2195 char *mtpptr_pl, void *priv) 2196 { 2197 struct mlxsw_sp *mlxsw_sp = priv; 2198 2199 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2200 } 2201 2202 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2203 char *mtpptr_pl, void *priv) 2204 { 2205 struct mlxsw_sp *mlxsw_sp = priv; 2206 2207 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2208 } 2209 2210 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2211 u16 local_port, void *priv) 2212 { 2213 struct mlxsw_sp *mlxsw_sp = priv; 2214 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2215 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2216 2217 if (unlikely(!mlxsw_sp_port)) { 2218 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2219 local_port); 2220 return; 2221 } 2222 2223 skb->dev = mlxsw_sp_port->dev; 2224 2225 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2226 u64_stats_update_begin(&pcpu_stats->syncp); 2227 pcpu_stats->rx_packets++; 2228 pcpu_stats->rx_bytes += skb->len; 2229 u64_stats_update_end(&pcpu_stats->syncp); 2230 2231 skb->protocol = eth_type_trans(skb, skb->dev); 2232 netif_receive_skb(skb); 2233 } 2234 2235 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2236 void *priv) 2237 { 2238 skb->offload_fwd_mark = 1; 2239 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2240 } 2241 2242 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2243 u16 local_port, void *priv) 2244 { 2245 skb->offload_l3_fwd_mark = 1; 2246 skb->offload_fwd_mark = 1; 2247 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2248 } 2249 2250 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2251 u16 local_port) 2252 { 2253 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2254 } 2255 2256 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2257 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2258 _is_ctrl, SP_##_trap_group, DISCARD) 2259 2260 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2261 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2262 _is_ctrl, SP_##_trap_group, DISCARD) 2263 2264 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2265 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2266 _is_ctrl, SP_##_trap_group, DISCARD) 2267 2268 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2269 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2270 2271 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2272 /* Events */ 2273 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2274 /* L2 traps */ 2275 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2276 /* L3 traps */ 2277 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2278 false), 2279 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2280 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2281 false), 2282 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2283 ROUTER_EXP, false), 2284 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2285 ROUTER_EXP, false), 2286 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2287 ROUTER_EXP, false), 2288 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2289 ROUTER_EXP, false), 2290 /* Multicast Router Traps */ 2291 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2292 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2293 /* NVE traps */ 2294 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2295 }; 2296 2297 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2298 /* Events */ 2299 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2300 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2301 }; 2302 2303 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2304 { 2305 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2306 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2307 enum mlxsw_reg_qpcr_ir_units ir_units; 2308 int max_cpu_policers; 2309 bool is_bytes; 2310 u8 burst_size; 2311 u32 rate; 2312 int i, err; 2313 2314 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2315 return -EIO; 2316 2317 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2318 2319 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2320 for (i = 0; i < max_cpu_policers; i++) { 2321 is_bytes = false; 2322 switch (i) { 2323 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2324 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2325 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2326 rate = 1024; 2327 burst_size = 7; 2328 break; 2329 default: 2330 continue; 2331 } 2332 2333 __set_bit(i, mlxsw_sp->trap->policers_usage); 2334 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2335 burst_size); 2336 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2337 if (err) 2338 return err; 2339 } 2340 2341 return 0; 2342 } 2343 2344 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2345 { 2346 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2347 enum mlxsw_reg_htgt_trap_group i; 2348 int max_cpu_policers; 2349 int max_trap_groups; 2350 u8 priority, tc; 2351 u16 policer_id; 2352 int err; 2353 2354 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2355 return -EIO; 2356 2357 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2358 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2359 2360 for (i = 0; i < max_trap_groups; i++) { 2361 policer_id = i; 2362 switch (i) { 2363 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2364 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2365 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2366 priority = 1; 2367 tc = 1; 2368 break; 2369 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2370 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2371 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2372 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2373 break; 2374 default: 2375 continue; 2376 } 2377 2378 if (max_cpu_policers <= policer_id && 2379 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2380 return -EIO; 2381 2382 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2383 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2384 if (err) 2385 return err; 2386 } 2387 2388 return 0; 2389 } 2390 2391 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2392 { 2393 struct mlxsw_sp_trap *trap; 2394 u64 max_policers; 2395 int err; 2396 2397 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2398 return -EIO; 2399 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2400 trap = kzalloc(struct_size(trap, policers_usage, 2401 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2402 if (!trap) 2403 return -ENOMEM; 2404 trap->max_policers = max_policers; 2405 mlxsw_sp->trap = trap; 2406 2407 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2408 if (err) 2409 goto err_cpu_policers_set; 2410 2411 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2412 if (err) 2413 goto err_trap_groups_set; 2414 2415 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2416 ARRAY_SIZE(mlxsw_sp_listener), 2417 mlxsw_sp); 2418 if (err) 2419 goto err_traps_register; 2420 2421 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2422 mlxsw_sp->listeners_count, mlxsw_sp); 2423 if (err) 2424 goto err_extra_traps_init; 2425 2426 return 0; 2427 2428 err_extra_traps_init: 2429 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2430 ARRAY_SIZE(mlxsw_sp_listener), 2431 mlxsw_sp); 2432 err_traps_register: 2433 err_trap_groups_set: 2434 err_cpu_policers_set: 2435 kfree(trap); 2436 return err; 2437 } 2438 2439 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2440 { 2441 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2442 mlxsw_sp->listeners_count, 2443 mlxsw_sp); 2444 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2445 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2446 kfree(mlxsw_sp->trap); 2447 } 2448 2449 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2450 2451 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2452 { 2453 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2454 u32 seed; 2455 int err; 2456 2457 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2458 MLXSW_SP_LAG_SEED_INIT); 2459 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2460 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2461 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2462 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2463 MLXSW_REG_SLCR_LAG_HASH_SIP | 2464 MLXSW_REG_SLCR_LAG_HASH_DIP | 2465 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2466 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2467 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2468 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2469 if (err) 2470 return err; 2471 2472 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2473 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2474 return -EIO; 2475 2476 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2477 sizeof(struct mlxsw_sp_upper), 2478 GFP_KERNEL); 2479 if (!mlxsw_sp->lags) 2480 return -ENOMEM; 2481 2482 return 0; 2483 } 2484 2485 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2486 { 2487 kfree(mlxsw_sp->lags); 2488 } 2489 2490 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2491 .clock_init = mlxsw_sp1_ptp_clock_init, 2492 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2493 .init = mlxsw_sp1_ptp_init, 2494 .fini = mlxsw_sp1_ptp_fini, 2495 .receive = mlxsw_sp1_ptp_receive, 2496 .transmitted = mlxsw_sp1_ptp_transmitted, 2497 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2498 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2499 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2500 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2501 .get_stats_count = mlxsw_sp1_get_stats_count, 2502 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2503 .get_stats = mlxsw_sp1_get_stats, 2504 }; 2505 2506 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2507 .clock_init = mlxsw_sp2_ptp_clock_init, 2508 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2509 .init = mlxsw_sp2_ptp_init, 2510 .fini = mlxsw_sp2_ptp_fini, 2511 .receive = mlxsw_sp2_ptp_receive, 2512 .transmitted = mlxsw_sp2_ptp_transmitted, 2513 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2514 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2515 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2516 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2517 .get_stats_count = mlxsw_sp2_get_stats_count, 2518 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2519 .get_stats = mlxsw_sp2_get_stats, 2520 }; 2521 2522 struct mlxsw_sp_sample_trigger_node { 2523 struct mlxsw_sp_sample_trigger trigger; 2524 struct mlxsw_sp_sample_params params; 2525 struct rhash_head ht_node; 2526 struct rcu_head rcu; 2527 refcount_t refcount; 2528 }; 2529 2530 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2531 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2532 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2533 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2534 .automatic_shrinking = true, 2535 }; 2536 2537 static void 2538 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2539 const struct mlxsw_sp_sample_trigger *trigger) 2540 { 2541 memset(key, 0, sizeof(*key)); 2542 key->type = trigger->type; 2543 key->local_port = trigger->local_port; 2544 } 2545 2546 /* RCU read lock must be held */ 2547 struct mlxsw_sp_sample_params * 2548 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2549 const struct mlxsw_sp_sample_trigger *trigger) 2550 { 2551 struct mlxsw_sp_sample_trigger_node *trigger_node; 2552 struct mlxsw_sp_sample_trigger key; 2553 2554 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2555 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2556 mlxsw_sp_sample_trigger_ht_params); 2557 if (!trigger_node) 2558 return NULL; 2559 2560 return &trigger_node->params; 2561 } 2562 2563 static int 2564 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2565 const struct mlxsw_sp_sample_trigger *trigger, 2566 const struct mlxsw_sp_sample_params *params) 2567 { 2568 struct mlxsw_sp_sample_trigger_node *trigger_node; 2569 int err; 2570 2571 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2572 if (!trigger_node) 2573 return -ENOMEM; 2574 2575 trigger_node->trigger = *trigger; 2576 trigger_node->params = *params; 2577 refcount_set(&trigger_node->refcount, 1); 2578 2579 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2580 &trigger_node->ht_node, 2581 mlxsw_sp_sample_trigger_ht_params); 2582 if (err) 2583 goto err_rhashtable_insert; 2584 2585 return 0; 2586 2587 err_rhashtable_insert: 2588 kfree(trigger_node); 2589 return err; 2590 } 2591 2592 static void 2593 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2594 struct mlxsw_sp_sample_trigger_node *trigger_node) 2595 { 2596 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2597 &trigger_node->ht_node, 2598 mlxsw_sp_sample_trigger_ht_params); 2599 kfree_rcu(trigger_node, rcu); 2600 } 2601 2602 int 2603 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2604 const struct mlxsw_sp_sample_trigger *trigger, 2605 const struct mlxsw_sp_sample_params *params, 2606 struct netlink_ext_ack *extack) 2607 { 2608 struct mlxsw_sp_sample_trigger_node *trigger_node; 2609 struct mlxsw_sp_sample_trigger key; 2610 2611 ASSERT_RTNL(); 2612 2613 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2614 2615 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2616 &key, 2617 mlxsw_sp_sample_trigger_ht_params); 2618 if (!trigger_node) 2619 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2620 params); 2621 2622 if (trigger_node->trigger.local_port) { 2623 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2624 return -EINVAL; 2625 } 2626 2627 if (trigger_node->params.psample_group != params->psample_group || 2628 trigger_node->params.truncate != params->truncate || 2629 trigger_node->params.rate != params->rate || 2630 trigger_node->params.trunc_size != params->trunc_size) { 2631 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2632 return -EINVAL; 2633 } 2634 2635 refcount_inc(&trigger_node->refcount); 2636 2637 return 0; 2638 } 2639 2640 void 2641 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2642 const struct mlxsw_sp_sample_trigger *trigger) 2643 { 2644 struct mlxsw_sp_sample_trigger_node *trigger_node; 2645 struct mlxsw_sp_sample_trigger key; 2646 2647 ASSERT_RTNL(); 2648 2649 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2650 2651 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2652 &key, 2653 mlxsw_sp_sample_trigger_ht_params); 2654 if (!trigger_node) 2655 return; 2656 2657 if (!refcount_dec_and_test(&trigger_node->refcount)) 2658 return; 2659 2660 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2661 } 2662 2663 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2664 unsigned long event, void *ptr); 2665 2666 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2667 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2668 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2669 2670 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2671 { 2672 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2673 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2674 mutex_init(&mlxsw_sp->parsing.lock); 2675 } 2676 2677 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2678 { 2679 mutex_destroy(&mlxsw_sp->parsing.lock); 2680 } 2681 2682 struct mlxsw_sp_ipv6_addr_node { 2683 struct in6_addr key; 2684 struct rhash_head ht_node; 2685 u32 kvdl_index; 2686 refcount_t refcount; 2687 }; 2688 2689 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 2690 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 2691 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 2692 .key_len = sizeof(struct in6_addr), 2693 .automatic_shrinking = true, 2694 }; 2695 2696 static int 2697 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 2698 u32 *p_kvdl_index) 2699 { 2700 struct mlxsw_sp_ipv6_addr_node *node; 2701 char rips_pl[MLXSW_REG_RIPS_LEN]; 2702 int err; 2703 2704 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 2705 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2706 p_kvdl_index); 2707 if (err) 2708 return err; 2709 2710 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 2711 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 2712 if (err) 2713 goto err_rips_write; 2714 2715 node = kzalloc(sizeof(*node), GFP_KERNEL); 2716 if (!node) { 2717 err = -ENOMEM; 2718 goto err_node_alloc; 2719 } 2720 2721 node->key = *addr6; 2722 node->kvdl_index = *p_kvdl_index; 2723 refcount_set(&node->refcount, 1); 2724 2725 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 2726 &node->ht_node, 2727 mlxsw_sp_ipv6_addr_ht_params); 2728 if (err) 2729 goto err_rhashtable_insert; 2730 2731 return 0; 2732 2733 err_rhashtable_insert: 2734 kfree(node); 2735 err_node_alloc: 2736 err_rips_write: 2737 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2738 *p_kvdl_index); 2739 return err; 2740 } 2741 2742 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 2743 struct mlxsw_sp_ipv6_addr_node *node) 2744 { 2745 u32 kvdl_index = node->kvdl_index; 2746 2747 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 2748 mlxsw_sp_ipv6_addr_ht_params); 2749 kfree(node); 2750 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2751 kvdl_index); 2752 } 2753 2754 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 2755 const struct in6_addr *addr6, 2756 u32 *p_kvdl_index) 2757 { 2758 struct mlxsw_sp_ipv6_addr_node *node; 2759 int err = 0; 2760 2761 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2762 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2763 mlxsw_sp_ipv6_addr_ht_params); 2764 if (node) { 2765 refcount_inc(&node->refcount); 2766 *p_kvdl_index = node->kvdl_index; 2767 goto out_unlock; 2768 } 2769 2770 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 2771 2772 out_unlock: 2773 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2774 return err; 2775 } 2776 2777 void 2778 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 2779 { 2780 struct mlxsw_sp_ipv6_addr_node *node; 2781 2782 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2783 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2784 mlxsw_sp_ipv6_addr_ht_params); 2785 if (WARN_ON(!node)) 2786 goto out_unlock; 2787 2788 if (!refcount_dec_and_test(&node->refcount)) 2789 goto out_unlock; 2790 2791 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 2792 2793 out_unlock: 2794 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2795 } 2796 2797 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 2798 { 2799 int err; 2800 2801 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 2802 &mlxsw_sp_ipv6_addr_ht_params); 2803 if (err) 2804 return err; 2805 2806 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 2807 return 0; 2808 } 2809 2810 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 2811 { 2812 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 2813 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 2814 } 2815 2816 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2817 const struct mlxsw_bus_info *mlxsw_bus_info, 2818 struct netlink_ext_ack *extack) 2819 { 2820 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2821 struct devlink *devlink = priv_to_devlink(mlxsw_core); 2822 int err; 2823 2824 mlxsw_sp->core = mlxsw_core; 2825 mlxsw_sp->bus_info = mlxsw_bus_info; 2826 2827 mlxsw_sp_parsing_init(mlxsw_sp); 2828 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 2829 2830 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2831 if (err) { 2832 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2833 return err; 2834 } 2835 2836 err = mlxsw_sp_kvdl_init(mlxsw_sp); 2837 if (err) { 2838 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 2839 return err; 2840 } 2841 2842 err = mlxsw_sp_fids_init(mlxsw_sp); 2843 if (err) { 2844 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 2845 goto err_fids_init; 2846 } 2847 2848 err = mlxsw_sp_policers_init(mlxsw_sp); 2849 if (err) { 2850 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 2851 goto err_policers_init; 2852 } 2853 2854 err = mlxsw_sp_traps_init(mlxsw_sp); 2855 if (err) { 2856 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 2857 goto err_traps_init; 2858 } 2859 2860 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 2861 if (err) { 2862 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 2863 goto err_devlink_traps_init; 2864 } 2865 2866 err = mlxsw_sp_buffers_init(mlxsw_sp); 2867 if (err) { 2868 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2869 goto err_buffers_init; 2870 } 2871 2872 err = mlxsw_sp_lag_init(mlxsw_sp); 2873 if (err) { 2874 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2875 goto err_lag_init; 2876 } 2877 2878 /* Initialize SPAN before router and switchdev, so that those components 2879 * can call mlxsw_sp_span_respin(). 2880 */ 2881 err = mlxsw_sp_span_init(mlxsw_sp); 2882 if (err) { 2883 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 2884 goto err_span_init; 2885 } 2886 2887 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2888 if (err) { 2889 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2890 goto err_switchdev_init; 2891 } 2892 2893 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 2894 if (err) { 2895 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 2896 goto err_counter_pool_init; 2897 } 2898 2899 err = mlxsw_sp_afa_init(mlxsw_sp); 2900 if (err) { 2901 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 2902 goto err_afa_init; 2903 } 2904 2905 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 2906 if (err) { 2907 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 2908 goto err_ipv6_addr_ht_init; 2909 } 2910 2911 err = mlxsw_sp_nve_init(mlxsw_sp); 2912 if (err) { 2913 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 2914 goto err_nve_init; 2915 } 2916 2917 err = mlxsw_sp_acl_init(mlxsw_sp); 2918 if (err) { 2919 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 2920 goto err_acl_init; 2921 } 2922 2923 err = mlxsw_sp_router_init(mlxsw_sp, extack); 2924 if (err) { 2925 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 2926 goto err_router_init; 2927 } 2928 2929 if (mlxsw_sp->bus_info->read_frc_capable) { 2930 /* NULL is a valid return value from clock_init */ 2931 mlxsw_sp->clock = 2932 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 2933 mlxsw_sp->bus_info->dev); 2934 if (IS_ERR(mlxsw_sp->clock)) { 2935 err = PTR_ERR(mlxsw_sp->clock); 2936 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 2937 goto err_ptp_clock_init; 2938 } 2939 } 2940 2941 if (mlxsw_sp->clock) { 2942 /* NULL is a valid return value from ptp_ops->init */ 2943 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 2944 if (IS_ERR(mlxsw_sp->ptp_state)) { 2945 err = PTR_ERR(mlxsw_sp->ptp_state); 2946 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 2947 goto err_ptp_init; 2948 } 2949 } 2950 2951 /* Initialize netdevice notifier after router and SPAN is initialized, 2952 * so that the event handler can use router structures and call SPAN 2953 * respin. 2954 */ 2955 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 2956 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 2957 &mlxsw_sp->netdevice_nb); 2958 if (err) { 2959 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 2960 goto err_netdev_notifier; 2961 } 2962 2963 err = mlxsw_sp_dpipe_init(mlxsw_sp); 2964 if (err) { 2965 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 2966 goto err_dpipe_init; 2967 } 2968 2969 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 2970 if (err) { 2971 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 2972 goto err_port_module_info_init; 2973 } 2974 2975 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 2976 &mlxsw_sp_sample_trigger_ht_params); 2977 if (err) { 2978 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 2979 goto err_sample_trigger_init; 2980 } 2981 2982 devl_lock(devlink); 2983 err = mlxsw_sp_ports_create(mlxsw_sp); 2984 devl_unlock(devlink); 2985 if (err) { 2986 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2987 goto err_ports_create; 2988 } 2989 2990 return 0; 2991 2992 err_ports_create: 2993 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 2994 err_sample_trigger_init: 2995 mlxsw_sp_port_module_info_fini(mlxsw_sp); 2996 err_port_module_info_init: 2997 mlxsw_sp_dpipe_fini(mlxsw_sp); 2998 err_dpipe_init: 2999 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3000 &mlxsw_sp->netdevice_nb); 3001 err_netdev_notifier: 3002 if (mlxsw_sp->clock) 3003 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3004 err_ptp_init: 3005 if (mlxsw_sp->clock) 3006 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3007 err_ptp_clock_init: 3008 mlxsw_sp_router_fini(mlxsw_sp); 3009 err_router_init: 3010 mlxsw_sp_acl_fini(mlxsw_sp); 3011 err_acl_init: 3012 mlxsw_sp_nve_fini(mlxsw_sp); 3013 err_nve_init: 3014 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3015 err_ipv6_addr_ht_init: 3016 mlxsw_sp_afa_fini(mlxsw_sp); 3017 err_afa_init: 3018 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3019 err_counter_pool_init: 3020 mlxsw_sp_switchdev_fini(mlxsw_sp); 3021 err_switchdev_init: 3022 mlxsw_sp_span_fini(mlxsw_sp); 3023 err_span_init: 3024 mlxsw_sp_lag_fini(mlxsw_sp); 3025 err_lag_init: 3026 mlxsw_sp_buffers_fini(mlxsw_sp); 3027 err_buffers_init: 3028 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3029 err_devlink_traps_init: 3030 mlxsw_sp_traps_fini(mlxsw_sp); 3031 err_traps_init: 3032 mlxsw_sp_policers_fini(mlxsw_sp); 3033 err_policers_init: 3034 mlxsw_sp_fids_fini(mlxsw_sp); 3035 err_fids_init: 3036 mlxsw_sp_kvdl_fini(mlxsw_sp); 3037 mlxsw_sp_parsing_fini(mlxsw_sp); 3038 return err; 3039 } 3040 3041 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3042 const struct mlxsw_bus_info *mlxsw_bus_info, 3043 struct netlink_ext_ack *extack) 3044 { 3045 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3046 3047 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3048 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3049 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3050 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3051 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3052 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3053 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3054 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3055 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3056 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3057 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3058 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3059 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3060 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3061 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3062 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3063 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3064 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3065 mlxsw_sp->listeners = mlxsw_sp1_listener; 3066 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3067 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3068 3069 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3070 } 3071 3072 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3073 const struct mlxsw_bus_info *mlxsw_bus_info, 3074 struct netlink_ext_ack *extack) 3075 { 3076 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3077 3078 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3079 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3080 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3081 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3082 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3083 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3084 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3085 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3086 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3087 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3088 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3089 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3090 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3091 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3092 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3093 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3094 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3095 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3096 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3097 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3098 3099 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3100 } 3101 3102 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3103 const struct mlxsw_bus_info *mlxsw_bus_info, 3104 struct netlink_ext_ack *extack) 3105 { 3106 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3107 3108 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3109 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3110 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3111 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3112 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3113 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3114 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3115 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3116 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3117 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3118 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3119 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3120 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3121 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3122 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3123 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3124 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3125 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3126 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3127 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3128 3129 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3130 } 3131 3132 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3133 const struct mlxsw_bus_info *mlxsw_bus_info, 3134 struct netlink_ext_ack *extack) 3135 { 3136 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3137 3138 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3139 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3140 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3141 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3142 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3143 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3144 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3145 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3146 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3147 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3148 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3149 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3150 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3151 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3152 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3153 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3154 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3155 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3156 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3157 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3158 3159 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3160 } 3161 3162 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3163 { 3164 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3165 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3166 3167 devl_lock(devlink); 3168 mlxsw_sp_ports_remove(mlxsw_sp); 3169 devl_unlock(devlink); 3170 3171 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3172 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3173 mlxsw_sp_dpipe_fini(mlxsw_sp); 3174 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3175 &mlxsw_sp->netdevice_nb); 3176 if (mlxsw_sp->clock) { 3177 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3178 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3179 } 3180 mlxsw_sp_router_fini(mlxsw_sp); 3181 mlxsw_sp_acl_fini(mlxsw_sp); 3182 mlxsw_sp_nve_fini(mlxsw_sp); 3183 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3184 mlxsw_sp_afa_fini(mlxsw_sp); 3185 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3186 mlxsw_sp_switchdev_fini(mlxsw_sp); 3187 mlxsw_sp_span_fini(mlxsw_sp); 3188 mlxsw_sp_lag_fini(mlxsw_sp); 3189 mlxsw_sp_buffers_fini(mlxsw_sp); 3190 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3191 mlxsw_sp_traps_fini(mlxsw_sp); 3192 mlxsw_sp_policers_fini(mlxsw_sp); 3193 mlxsw_sp_fids_fini(mlxsw_sp); 3194 mlxsw_sp_kvdl_fini(mlxsw_sp); 3195 mlxsw_sp_parsing_fini(mlxsw_sp); 3196 } 3197 3198 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated 3199 * 802.1Q FIDs 3200 */ 3201 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ 3202 VLAN_VID_MASK - 1) 3203 3204 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3205 .used_max_mid = 1, 3206 .max_mid = MLXSW_SP_MID_MAX, 3207 .used_flood_tables = 1, 3208 .used_flood_mode = 1, 3209 .flood_mode = 3, 3210 .max_fid_flood_tables = 3, 3211 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3212 .used_max_ib_mc = 1, 3213 .max_ib_mc = 0, 3214 .used_max_pkey = 1, 3215 .max_pkey = 0, 3216 .used_kvd_sizes = 1, 3217 .kvd_hash_single_parts = 59, 3218 .kvd_hash_double_parts = 41, 3219 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3220 .swid_config = { 3221 { 3222 .used_type = 1, 3223 .type = MLXSW_PORT_SWID_TYPE_ETH, 3224 } 3225 }, 3226 }; 3227 3228 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3229 .used_max_mid = 1, 3230 .max_mid = MLXSW_SP_MID_MAX, 3231 .used_flood_tables = 1, 3232 .used_flood_mode = 1, 3233 .flood_mode = 3, 3234 .max_fid_flood_tables = 3, 3235 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, 3236 .used_max_ib_mc = 1, 3237 .max_ib_mc = 0, 3238 .used_max_pkey = 1, 3239 .max_pkey = 0, 3240 .used_kvh_xlt_cache_mode = 1, 3241 .kvh_xlt_cache_mode = 1, 3242 .swid_config = { 3243 { 3244 .used_type = 1, 3245 .type = MLXSW_PORT_SWID_TYPE_ETH, 3246 } 3247 }, 3248 }; 3249 3250 static void 3251 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3252 struct devlink_resource_size_params *kvd_size_params, 3253 struct devlink_resource_size_params *linear_size_params, 3254 struct devlink_resource_size_params *hash_double_size_params, 3255 struct devlink_resource_size_params *hash_single_size_params) 3256 { 3257 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3258 KVD_SINGLE_MIN_SIZE); 3259 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3260 KVD_DOUBLE_MIN_SIZE); 3261 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3262 u32 linear_size_min = 0; 3263 3264 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3265 MLXSW_SP_KVD_GRANULARITY, 3266 DEVLINK_RESOURCE_UNIT_ENTRY); 3267 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3268 kvd_size - single_size_min - 3269 double_size_min, 3270 MLXSW_SP_KVD_GRANULARITY, 3271 DEVLINK_RESOURCE_UNIT_ENTRY); 3272 devlink_resource_size_params_init(hash_double_size_params, 3273 double_size_min, 3274 kvd_size - single_size_min - 3275 linear_size_min, 3276 MLXSW_SP_KVD_GRANULARITY, 3277 DEVLINK_RESOURCE_UNIT_ENTRY); 3278 devlink_resource_size_params_init(hash_single_size_params, 3279 single_size_min, 3280 kvd_size - double_size_min - 3281 linear_size_min, 3282 MLXSW_SP_KVD_GRANULARITY, 3283 DEVLINK_RESOURCE_UNIT_ENTRY); 3284 } 3285 3286 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3287 { 3288 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3289 struct devlink_resource_size_params hash_single_size_params; 3290 struct devlink_resource_size_params hash_double_size_params; 3291 struct devlink_resource_size_params linear_size_params; 3292 struct devlink_resource_size_params kvd_size_params; 3293 u32 kvd_size, single_size, double_size, linear_size; 3294 const struct mlxsw_config_profile *profile; 3295 int err; 3296 3297 profile = &mlxsw_sp1_config_profile; 3298 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3299 return -EIO; 3300 3301 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3302 &linear_size_params, 3303 &hash_double_size_params, 3304 &hash_single_size_params); 3305 3306 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3307 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3308 kvd_size, MLXSW_SP_RESOURCE_KVD, 3309 DEVLINK_RESOURCE_ID_PARENT_TOP, 3310 &kvd_size_params); 3311 if (err) 3312 return err; 3313 3314 linear_size = profile->kvd_linear_size; 3315 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3316 linear_size, 3317 MLXSW_SP_RESOURCE_KVD_LINEAR, 3318 MLXSW_SP_RESOURCE_KVD, 3319 &linear_size_params); 3320 if (err) 3321 return err; 3322 3323 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3324 if (err) 3325 return err; 3326 3327 double_size = kvd_size - linear_size; 3328 double_size *= profile->kvd_hash_double_parts; 3329 double_size /= profile->kvd_hash_double_parts + 3330 profile->kvd_hash_single_parts; 3331 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3332 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3333 double_size, 3334 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3335 MLXSW_SP_RESOURCE_KVD, 3336 &hash_double_size_params); 3337 if (err) 3338 return err; 3339 3340 single_size = kvd_size - double_size - linear_size; 3341 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3342 single_size, 3343 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3344 MLXSW_SP_RESOURCE_KVD, 3345 &hash_single_size_params); 3346 if (err) 3347 return err; 3348 3349 return 0; 3350 } 3351 3352 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3353 { 3354 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3355 struct devlink_resource_size_params kvd_size_params; 3356 u32 kvd_size; 3357 3358 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3359 return -EIO; 3360 3361 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3362 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3363 MLXSW_SP_KVD_GRANULARITY, 3364 DEVLINK_RESOURCE_UNIT_ENTRY); 3365 3366 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3367 kvd_size, MLXSW_SP_RESOURCE_KVD, 3368 DEVLINK_RESOURCE_ID_PARENT_TOP, 3369 &kvd_size_params); 3370 } 3371 3372 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3373 { 3374 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3375 struct devlink_resource_size_params span_size_params; 3376 u32 max_span; 3377 3378 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3379 return -EIO; 3380 3381 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3382 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3383 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3384 3385 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3386 max_span, MLXSW_SP_RESOURCE_SPAN, 3387 DEVLINK_RESOURCE_ID_PARENT_TOP, 3388 &span_size_params); 3389 } 3390 3391 static int 3392 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3393 { 3394 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3395 struct devlink_resource_size_params size_params; 3396 u8 max_rif_mac_profiles; 3397 3398 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3399 max_rif_mac_profiles = 1; 3400 else 3401 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3402 MAX_RIF_MAC_PROFILES); 3403 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3404 max_rif_mac_profiles, 1, 3405 DEVLINK_RESOURCE_UNIT_ENTRY); 3406 3407 return devlink_resource_register(devlink, 3408 "rif_mac_profiles", 3409 max_rif_mac_profiles, 3410 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3411 DEVLINK_RESOURCE_ID_PARENT_TOP, 3412 &size_params); 3413 } 3414 3415 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3416 { 3417 int err; 3418 3419 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3420 if (err) 3421 return err; 3422 3423 err = mlxsw_sp_resources_span_register(mlxsw_core); 3424 if (err) 3425 goto err_resources_span_register; 3426 3427 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3428 if (err) 3429 goto err_resources_counter_register; 3430 3431 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3432 if (err) 3433 goto err_policer_resources_register; 3434 3435 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3436 if (err) 3437 goto err_resources_rif_mac_profile_register; 3438 3439 return 0; 3440 3441 err_resources_rif_mac_profile_register: 3442 err_policer_resources_register: 3443 err_resources_counter_register: 3444 err_resources_span_register: 3445 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3446 return err; 3447 } 3448 3449 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3450 { 3451 int err; 3452 3453 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3454 if (err) 3455 return err; 3456 3457 err = mlxsw_sp_resources_span_register(mlxsw_core); 3458 if (err) 3459 goto err_resources_span_register; 3460 3461 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3462 if (err) 3463 goto err_resources_counter_register; 3464 3465 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3466 if (err) 3467 goto err_policer_resources_register; 3468 3469 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3470 if (err) 3471 goto err_resources_rif_mac_profile_register; 3472 3473 return 0; 3474 3475 err_resources_rif_mac_profile_register: 3476 err_policer_resources_register: 3477 err_resources_counter_register: 3478 err_resources_span_register: 3479 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3480 return err; 3481 } 3482 3483 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3484 const struct mlxsw_config_profile *profile, 3485 u64 *p_single_size, u64 *p_double_size, 3486 u64 *p_linear_size) 3487 { 3488 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3489 u32 double_size; 3490 int err; 3491 3492 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3493 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3494 return -EIO; 3495 3496 /* The hash part is what left of the kvd without the 3497 * linear part. It is split to the single size and 3498 * double size by the parts ratio from the profile. 3499 * Both sizes must be a multiplications of the 3500 * granularity from the profile. In case the user 3501 * provided the sizes they are obtained via devlink. 3502 */ 3503 err = devlink_resource_size_get(devlink, 3504 MLXSW_SP_RESOURCE_KVD_LINEAR, 3505 p_linear_size); 3506 if (err) 3507 *p_linear_size = profile->kvd_linear_size; 3508 3509 err = devlink_resource_size_get(devlink, 3510 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3511 p_double_size); 3512 if (err) { 3513 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3514 *p_linear_size; 3515 double_size *= profile->kvd_hash_double_parts; 3516 double_size /= profile->kvd_hash_double_parts + 3517 profile->kvd_hash_single_parts; 3518 *p_double_size = rounddown(double_size, 3519 MLXSW_SP_KVD_GRANULARITY); 3520 } 3521 3522 err = devlink_resource_size_get(devlink, 3523 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3524 p_single_size); 3525 if (err) 3526 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3527 *p_double_size - *p_linear_size; 3528 3529 /* Check results are legal. */ 3530 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3531 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3532 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3533 return -EIO; 3534 3535 return 0; 3536 } 3537 3538 static int 3539 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3540 struct devlink_param_gset_ctx *ctx) 3541 { 3542 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3543 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3544 3545 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3546 return 0; 3547 } 3548 3549 static int 3550 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3551 struct devlink_param_gset_ctx *ctx) 3552 { 3553 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3554 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3555 3556 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3557 } 3558 3559 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3560 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3561 "acl_region_rehash_interval", 3562 DEVLINK_PARAM_TYPE_U32, 3563 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3564 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3565 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3566 NULL), 3567 }; 3568 3569 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3570 { 3571 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3572 union devlink_param_value value; 3573 int err; 3574 3575 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3576 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3577 if (err) 3578 return err; 3579 3580 value.vu32 = 0; 3581 devlink_param_driverinit_value_set(devlink, 3582 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3583 value); 3584 return 0; 3585 } 3586 3587 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3588 { 3589 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3590 mlxsw_sp2_devlink_params, 3591 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3592 } 3593 3594 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3595 struct sk_buff *skb, u16 local_port) 3596 { 3597 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3598 3599 skb_pull(skb, MLXSW_TXHDR_LEN); 3600 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3601 } 3602 3603 static struct mlxsw_driver mlxsw_sp1_driver = { 3604 .kind = mlxsw_sp1_driver_name, 3605 .priv_size = sizeof(struct mlxsw_sp), 3606 .fw_req_rev = &mlxsw_sp1_fw_rev, 3607 .fw_filename = MLXSW_SP1_FW_FILENAME, 3608 .init = mlxsw_sp1_init, 3609 .fini = mlxsw_sp_fini, 3610 .port_split = mlxsw_sp_port_split, 3611 .port_unsplit = mlxsw_sp_port_unsplit, 3612 .sb_pool_get = mlxsw_sp_sb_pool_get, 3613 .sb_pool_set = mlxsw_sp_sb_pool_set, 3614 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3615 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3616 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3617 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3618 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3619 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3620 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3621 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3622 .trap_init = mlxsw_sp_trap_init, 3623 .trap_fini = mlxsw_sp_trap_fini, 3624 .trap_action_set = mlxsw_sp_trap_action_set, 3625 .trap_group_init = mlxsw_sp_trap_group_init, 3626 .trap_group_set = mlxsw_sp_trap_group_set, 3627 .trap_policer_init = mlxsw_sp_trap_policer_init, 3628 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3629 .trap_policer_set = mlxsw_sp_trap_policer_set, 3630 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3631 .txhdr_construct = mlxsw_sp_txhdr_construct, 3632 .resources_register = mlxsw_sp1_resources_register, 3633 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3634 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3635 .txhdr_len = MLXSW_TXHDR_LEN, 3636 .profile = &mlxsw_sp1_config_profile, 3637 }; 3638 3639 static struct mlxsw_driver mlxsw_sp2_driver = { 3640 .kind = mlxsw_sp2_driver_name, 3641 .priv_size = sizeof(struct mlxsw_sp), 3642 .fw_req_rev = &mlxsw_sp2_fw_rev, 3643 .fw_filename = MLXSW_SP2_FW_FILENAME, 3644 .init = mlxsw_sp2_init, 3645 .fini = mlxsw_sp_fini, 3646 .port_split = mlxsw_sp_port_split, 3647 .port_unsplit = mlxsw_sp_port_unsplit, 3648 .sb_pool_get = mlxsw_sp_sb_pool_get, 3649 .sb_pool_set = mlxsw_sp_sb_pool_set, 3650 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3651 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3652 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3653 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3654 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3655 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3656 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3657 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3658 .trap_init = mlxsw_sp_trap_init, 3659 .trap_fini = mlxsw_sp_trap_fini, 3660 .trap_action_set = mlxsw_sp_trap_action_set, 3661 .trap_group_init = mlxsw_sp_trap_group_init, 3662 .trap_group_set = mlxsw_sp_trap_group_set, 3663 .trap_policer_init = mlxsw_sp_trap_policer_init, 3664 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3665 .trap_policer_set = mlxsw_sp_trap_policer_set, 3666 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3667 .txhdr_construct = mlxsw_sp_txhdr_construct, 3668 .resources_register = mlxsw_sp2_resources_register, 3669 .params_register = mlxsw_sp2_params_register, 3670 .params_unregister = mlxsw_sp2_params_unregister, 3671 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3672 .txhdr_len = MLXSW_TXHDR_LEN, 3673 .profile = &mlxsw_sp2_config_profile, 3674 }; 3675 3676 static struct mlxsw_driver mlxsw_sp3_driver = { 3677 .kind = mlxsw_sp3_driver_name, 3678 .priv_size = sizeof(struct mlxsw_sp), 3679 .fw_req_rev = &mlxsw_sp3_fw_rev, 3680 .fw_filename = MLXSW_SP3_FW_FILENAME, 3681 .init = mlxsw_sp3_init, 3682 .fini = mlxsw_sp_fini, 3683 .port_split = mlxsw_sp_port_split, 3684 .port_unsplit = mlxsw_sp_port_unsplit, 3685 .sb_pool_get = mlxsw_sp_sb_pool_get, 3686 .sb_pool_set = mlxsw_sp_sb_pool_set, 3687 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3688 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3689 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3690 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3691 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3692 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3693 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3694 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3695 .trap_init = mlxsw_sp_trap_init, 3696 .trap_fini = mlxsw_sp_trap_fini, 3697 .trap_action_set = mlxsw_sp_trap_action_set, 3698 .trap_group_init = mlxsw_sp_trap_group_init, 3699 .trap_group_set = mlxsw_sp_trap_group_set, 3700 .trap_policer_init = mlxsw_sp_trap_policer_init, 3701 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3702 .trap_policer_set = mlxsw_sp_trap_policer_set, 3703 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3704 .txhdr_construct = mlxsw_sp_txhdr_construct, 3705 .resources_register = mlxsw_sp2_resources_register, 3706 .params_register = mlxsw_sp2_params_register, 3707 .params_unregister = mlxsw_sp2_params_unregister, 3708 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3709 .txhdr_len = MLXSW_TXHDR_LEN, 3710 .profile = &mlxsw_sp2_config_profile, 3711 }; 3712 3713 static struct mlxsw_driver mlxsw_sp4_driver = { 3714 .kind = mlxsw_sp4_driver_name, 3715 .priv_size = sizeof(struct mlxsw_sp), 3716 .init = mlxsw_sp4_init, 3717 .fini = mlxsw_sp_fini, 3718 .port_split = mlxsw_sp_port_split, 3719 .port_unsplit = mlxsw_sp_port_unsplit, 3720 .sb_pool_get = mlxsw_sp_sb_pool_get, 3721 .sb_pool_set = mlxsw_sp_sb_pool_set, 3722 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3723 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3724 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3725 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3726 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3727 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3728 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3729 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3730 .trap_init = mlxsw_sp_trap_init, 3731 .trap_fini = mlxsw_sp_trap_fini, 3732 .trap_action_set = mlxsw_sp_trap_action_set, 3733 .trap_group_init = mlxsw_sp_trap_group_init, 3734 .trap_group_set = mlxsw_sp_trap_group_set, 3735 .trap_policer_init = mlxsw_sp_trap_policer_init, 3736 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3737 .trap_policer_set = mlxsw_sp_trap_policer_set, 3738 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3739 .txhdr_construct = mlxsw_sp_txhdr_construct, 3740 .resources_register = mlxsw_sp2_resources_register, 3741 .params_register = mlxsw_sp2_params_register, 3742 .params_unregister = mlxsw_sp2_params_unregister, 3743 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3744 .txhdr_len = MLXSW_TXHDR_LEN, 3745 .profile = &mlxsw_sp2_config_profile, 3746 }; 3747 3748 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3749 { 3750 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3751 } 3752 3753 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 3754 struct netdev_nested_priv *priv) 3755 { 3756 int ret = 0; 3757 3758 if (mlxsw_sp_port_dev_check(lower_dev)) { 3759 priv->data = (void *)netdev_priv(lower_dev); 3760 ret = 1; 3761 } 3762 3763 return ret; 3764 } 3765 3766 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3767 { 3768 struct netdev_nested_priv priv = { 3769 .data = NULL, 3770 }; 3771 3772 if (mlxsw_sp_port_dev_check(dev)) 3773 return netdev_priv(dev); 3774 3775 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 3776 3777 return (struct mlxsw_sp_port *)priv.data; 3778 } 3779 3780 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3781 { 3782 struct mlxsw_sp_port *mlxsw_sp_port; 3783 3784 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3785 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3786 } 3787 3788 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3789 { 3790 struct netdev_nested_priv priv = { 3791 .data = NULL, 3792 }; 3793 3794 if (mlxsw_sp_port_dev_check(dev)) 3795 return netdev_priv(dev); 3796 3797 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 3798 &priv); 3799 3800 return (struct mlxsw_sp_port *)priv.data; 3801 } 3802 3803 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 3804 { 3805 struct mlxsw_sp_port *mlxsw_sp_port; 3806 3807 rcu_read_lock(); 3808 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 3809 if (mlxsw_sp_port) 3810 dev_hold(mlxsw_sp_port->dev); 3811 rcu_read_unlock(); 3812 return mlxsw_sp_port; 3813 } 3814 3815 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 3816 { 3817 dev_put(mlxsw_sp_port->dev); 3818 } 3819 3820 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 3821 { 3822 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3823 int err = 0; 3824 3825 mutex_lock(&mlxsw_sp->parsing.lock); 3826 3827 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 3828 goto out_unlock; 3829 3830 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 3831 mlxsw_sp->parsing.vxlan_udp_dport); 3832 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3833 if (err) 3834 goto out_unlock; 3835 3836 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 3837 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 3838 3839 out_unlock: 3840 mutex_unlock(&mlxsw_sp->parsing.lock); 3841 return err; 3842 } 3843 3844 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 3845 { 3846 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3847 3848 mutex_lock(&mlxsw_sp->parsing.lock); 3849 3850 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 3851 goto out_unlock; 3852 3853 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 3854 mlxsw_sp->parsing.vxlan_udp_dport); 3855 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3856 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 3857 3858 out_unlock: 3859 mutex_unlock(&mlxsw_sp->parsing.lock); 3860 } 3861 3862 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 3863 __be16 udp_dport) 3864 { 3865 char mprs_pl[MLXSW_REG_MPRS_LEN]; 3866 int err; 3867 3868 mutex_lock(&mlxsw_sp->parsing.lock); 3869 3870 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 3871 be16_to_cpu(udp_dport)); 3872 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 3873 if (err) 3874 goto out_unlock; 3875 3876 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 3877 3878 out_unlock: 3879 mutex_unlock(&mlxsw_sp->parsing.lock); 3880 return err; 3881 } 3882 3883 static void 3884 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 3885 struct net_device *lag_dev) 3886 { 3887 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 3888 struct net_device *upper_dev; 3889 struct list_head *iter; 3890 3891 if (netif_is_bridge_port(lag_dev)) 3892 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 3893 3894 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 3895 if (!netif_is_bridge_port(upper_dev)) 3896 continue; 3897 br_dev = netdev_master_upper_dev_get(upper_dev); 3898 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 3899 } 3900 } 3901 3902 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3903 { 3904 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3905 3906 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 3907 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3908 } 3909 3910 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 3911 { 3912 char sldr_pl[MLXSW_REG_SLDR_LEN]; 3913 3914 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 3915 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 3916 } 3917 3918 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3919 u16 lag_id, u8 port_index) 3920 { 3921 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3922 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3923 3924 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 3925 lag_id, port_index); 3926 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3927 } 3928 3929 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 3930 u16 lag_id) 3931 { 3932 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3933 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3934 3935 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 3936 lag_id); 3937 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3938 } 3939 3940 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 3941 u16 lag_id) 3942 { 3943 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3944 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3945 3946 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 3947 lag_id); 3948 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3949 } 3950 3951 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 3952 u16 lag_id) 3953 { 3954 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3955 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 3956 3957 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 3958 lag_id); 3959 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 3960 } 3961 3962 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 3963 struct net_device *lag_dev, 3964 u16 *p_lag_id) 3965 { 3966 struct mlxsw_sp_upper *lag; 3967 int free_lag_id = -1; 3968 u64 max_lag; 3969 int i; 3970 3971 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 3972 for (i = 0; i < max_lag; i++) { 3973 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 3974 if (lag->ref_count) { 3975 if (lag->dev == lag_dev) { 3976 *p_lag_id = i; 3977 return 0; 3978 } 3979 } else if (free_lag_id < 0) { 3980 free_lag_id = i; 3981 } 3982 } 3983 if (free_lag_id < 0) 3984 return -EBUSY; 3985 *p_lag_id = free_lag_id; 3986 return 0; 3987 } 3988 3989 static bool 3990 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 3991 struct net_device *lag_dev, 3992 struct netdev_lag_upper_info *lag_upper_info, 3993 struct netlink_ext_ack *extack) 3994 { 3995 u16 lag_id; 3996 3997 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 3998 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 3999 return false; 4000 } 4001 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4002 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4003 return false; 4004 } 4005 return true; 4006 } 4007 4008 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4009 u16 lag_id, u8 *p_port_index) 4010 { 4011 u64 max_lag_members; 4012 int i; 4013 4014 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4015 MAX_LAG_MEMBERS); 4016 for (i = 0; i < max_lag_members; i++) { 4017 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4018 *p_port_index = i; 4019 return 0; 4020 } 4021 } 4022 return -EBUSY; 4023 } 4024 4025 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4026 struct net_device *lag_dev, 4027 struct netlink_ext_ack *extack) 4028 { 4029 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4030 struct mlxsw_sp_upper *lag; 4031 u16 lag_id; 4032 u8 port_index; 4033 int err; 4034 4035 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4036 if (err) 4037 return err; 4038 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4039 if (!lag->ref_count) { 4040 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4041 if (err) 4042 return err; 4043 lag->dev = lag_dev; 4044 } 4045 4046 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4047 if (err) 4048 return err; 4049 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4050 if (err) 4051 goto err_col_port_add; 4052 4053 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4054 mlxsw_sp_port->local_port); 4055 mlxsw_sp_port->lag_id = lag_id; 4056 mlxsw_sp_port->lagged = 1; 4057 lag->ref_count++; 4058 4059 /* Port is no longer usable as a router interface */ 4060 if (mlxsw_sp_port->default_vlan->fid) 4061 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4062 4063 /* Join a router interface configured on the LAG, if exists */ 4064 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 4065 lag_dev, extack); 4066 if (err) 4067 goto err_router_join; 4068 4069 return 0; 4070 4071 err_router_join: 4072 lag->ref_count--; 4073 mlxsw_sp_port->lagged = 0; 4074 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4075 mlxsw_sp_port->local_port); 4076 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4077 err_col_port_add: 4078 if (!lag->ref_count) 4079 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4080 return err; 4081 } 4082 4083 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4084 struct net_device *lag_dev) 4085 { 4086 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4087 u16 lag_id = mlxsw_sp_port->lag_id; 4088 struct mlxsw_sp_upper *lag; 4089 4090 if (!mlxsw_sp_port->lagged) 4091 return; 4092 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4093 WARN_ON(lag->ref_count == 0); 4094 4095 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4096 4097 /* Any VLANs configured on the port are no longer valid */ 4098 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4099 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4100 /* Make the LAG and its directly linked uppers leave bridges they 4101 * are memeber in 4102 */ 4103 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4104 4105 if (lag->ref_count == 1) 4106 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4107 4108 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4109 mlxsw_sp_port->local_port); 4110 mlxsw_sp_port->lagged = 0; 4111 lag->ref_count--; 4112 4113 /* Make sure untagged frames are allowed to ingress */ 4114 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4115 ETH_P_8021Q); 4116 } 4117 4118 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4119 u16 lag_id) 4120 { 4121 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4122 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4123 4124 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4125 mlxsw_sp_port->local_port); 4126 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4127 } 4128 4129 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4130 u16 lag_id) 4131 { 4132 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4133 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4134 4135 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4136 mlxsw_sp_port->local_port); 4137 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4138 } 4139 4140 static int 4141 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4142 { 4143 int err; 4144 4145 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4146 mlxsw_sp_port->lag_id); 4147 if (err) 4148 return err; 4149 4150 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4151 if (err) 4152 goto err_dist_port_add; 4153 4154 return 0; 4155 4156 err_dist_port_add: 4157 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4158 return err; 4159 } 4160 4161 static int 4162 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4163 { 4164 int err; 4165 4166 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4167 mlxsw_sp_port->lag_id); 4168 if (err) 4169 return err; 4170 4171 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4172 mlxsw_sp_port->lag_id); 4173 if (err) 4174 goto err_col_port_disable; 4175 4176 return 0; 4177 4178 err_col_port_disable: 4179 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4180 return err; 4181 } 4182 4183 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4184 struct netdev_lag_lower_state_info *info) 4185 { 4186 if (info->tx_enabled) 4187 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4188 else 4189 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4190 } 4191 4192 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4193 bool enable) 4194 { 4195 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4196 enum mlxsw_reg_spms_state spms_state; 4197 char *spms_pl; 4198 u16 vid; 4199 int err; 4200 4201 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4202 MLXSW_REG_SPMS_STATE_DISCARDING; 4203 4204 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4205 if (!spms_pl) 4206 return -ENOMEM; 4207 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4208 4209 for (vid = 0; vid < VLAN_N_VID; vid++) 4210 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4211 4212 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4213 kfree(spms_pl); 4214 return err; 4215 } 4216 4217 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4218 { 4219 u16 vid = 1; 4220 int err; 4221 4222 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4223 if (err) 4224 return err; 4225 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4226 if (err) 4227 goto err_port_stp_set; 4228 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4229 true, false); 4230 if (err) 4231 goto err_port_vlan_set; 4232 4233 for (; vid <= VLAN_N_VID - 1; vid++) { 4234 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4235 vid, false); 4236 if (err) 4237 goto err_vid_learning_set; 4238 } 4239 4240 return 0; 4241 4242 err_vid_learning_set: 4243 for (vid--; vid >= 1; vid--) 4244 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4245 err_port_vlan_set: 4246 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4247 err_port_stp_set: 4248 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4249 return err; 4250 } 4251 4252 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4253 { 4254 u16 vid; 4255 4256 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4257 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4258 vid, true); 4259 4260 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4261 false, false); 4262 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4263 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4264 } 4265 4266 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4267 { 4268 unsigned int num_vxlans = 0; 4269 struct net_device *dev; 4270 struct list_head *iter; 4271 4272 netdev_for_each_lower_dev(br_dev, dev, iter) { 4273 if (netif_is_vxlan(dev)) 4274 num_vxlans++; 4275 } 4276 4277 return num_vxlans > 1; 4278 } 4279 4280 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4281 { 4282 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4283 struct net_device *dev; 4284 struct list_head *iter; 4285 4286 netdev_for_each_lower_dev(br_dev, dev, iter) { 4287 u16 pvid; 4288 int err; 4289 4290 if (!netif_is_vxlan(dev)) 4291 continue; 4292 4293 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4294 if (err || !pvid) 4295 continue; 4296 4297 if (test_and_set_bit(pvid, vlans)) 4298 return false; 4299 } 4300 4301 return true; 4302 } 4303 4304 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4305 struct netlink_ext_ack *extack) 4306 { 4307 if (br_multicast_enabled(br_dev)) { 4308 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4309 return false; 4310 } 4311 4312 if (!br_vlan_enabled(br_dev) && 4313 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4314 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4315 return false; 4316 } 4317 4318 if (br_vlan_enabled(br_dev) && 4319 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4320 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4321 return false; 4322 } 4323 4324 return true; 4325 } 4326 4327 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4328 struct net_device *dev, 4329 unsigned long event, void *ptr) 4330 { 4331 struct netdev_notifier_changeupper_info *info; 4332 struct mlxsw_sp_port *mlxsw_sp_port; 4333 struct netlink_ext_ack *extack; 4334 struct net_device *upper_dev; 4335 struct mlxsw_sp *mlxsw_sp; 4336 int err = 0; 4337 u16 proto; 4338 4339 mlxsw_sp_port = netdev_priv(dev); 4340 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4341 info = ptr; 4342 extack = netdev_notifier_info_to_extack(&info->info); 4343 4344 switch (event) { 4345 case NETDEV_PRECHANGEUPPER: 4346 upper_dev = info->upper_dev; 4347 if (!is_vlan_dev(upper_dev) && 4348 !netif_is_lag_master(upper_dev) && 4349 !netif_is_bridge_master(upper_dev) && 4350 !netif_is_ovs_master(upper_dev) && 4351 !netif_is_macvlan(upper_dev)) { 4352 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4353 return -EINVAL; 4354 } 4355 if (!info->linking) 4356 break; 4357 if (netif_is_bridge_master(upper_dev) && 4358 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4359 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4360 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4361 return -EOPNOTSUPP; 4362 if (netdev_has_any_upper_dev(upper_dev) && 4363 (!netif_is_bridge_master(upper_dev) || 4364 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4365 upper_dev))) { 4366 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4367 return -EINVAL; 4368 } 4369 if (netif_is_lag_master(upper_dev) && 4370 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4371 info->upper_info, extack)) 4372 return -EINVAL; 4373 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4374 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4375 return -EINVAL; 4376 } 4377 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4378 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4379 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4380 return -EINVAL; 4381 } 4382 if (netif_is_macvlan(upper_dev) && 4383 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4384 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4385 return -EOPNOTSUPP; 4386 } 4387 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4388 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4389 return -EINVAL; 4390 } 4391 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4392 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4393 return -EINVAL; 4394 } 4395 if (netif_is_bridge_master(upper_dev)) { 4396 br_vlan_get_proto(upper_dev, &proto); 4397 if (br_vlan_enabled(upper_dev) && 4398 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4399 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4400 return -EOPNOTSUPP; 4401 } 4402 if (vlan_uses_dev(lower_dev) && 4403 br_vlan_enabled(upper_dev) && 4404 proto == ETH_P_8021AD) { 4405 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4406 return -EOPNOTSUPP; 4407 } 4408 } 4409 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4410 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4411 4412 if (br_vlan_enabled(br_dev)) { 4413 br_vlan_get_proto(br_dev, &proto); 4414 if (proto == ETH_P_8021AD) { 4415 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4416 return -EOPNOTSUPP; 4417 } 4418 } 4419 } 4420 if (is_vlan_dev(upper_dev) && 4421 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4422 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4423 return -EOPNOTSUPP; 4424 } 4425 break; 4426 case NETDEV_CHANGEUPPER: 4427 upper_dev = info->upper_dev; 4428 if (netif_is_bridge_master(upper_dev)) { 4429 if (info->linking) 4430 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4431 lower_dev, 4432 upper_dev, 4433 extack); 4434 else 4435 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4436 lower_dev, 4437 upper_dev); 4438 } else if (netif_is_lag_master(upper_dev)) { 4439 if (info->linking) { 4440 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4441 upper_dev, extack); 4442 } else { 4443 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4444 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4445 upper_dev); 4446 } 4447 } else if (netif_is_ovs_master(upper_dev)) { 4448 if (info->linking) 4449 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4450 else 4451 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4452 } else if (netif_is_macvlan(upper_dev)) { 4453 if (!info->linking) 4454 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4455 } else if (is_vlan_dev(upper_dev)) { 4456 struct net_device *br_dev; 4457 4458 if (!netif_is_bridge_port(upper_dev)) 4459 break; 4460 if (info->linking) 4461 break; 4462 br_dev = netdev_master_upper_dev_get(upper_dev); 4463 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4464 br_dev); 4465 } 4466 break; 4467 } 4468 4469 return err; 4470 } 4471 4472 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4473 unsigned long event, void *ptr) 4474 { 4475 struct netdev_notifier_changelowerstate_info *info; 4476 struct mlxsw_sp_port *mlxsw_sp_port; 4477 int err; 4478 4479 mlxsw_sp_port = netdev_priv(dev); 4480 info = ptr; 4481 4482 switch (event) { 4483 case NETDEV_CHANGELOWERSTATE: 4484 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4485 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4486 info->lower_state_info); 4487 if (err) 4488 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4489 } 4490 break; 4491 } 4492 4493 return 0; 4494 } 4495 4496 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4497 struct net_device *port_dev, 4498 unsigned long event, void *ptr) 4499 { 4500 switch (event) { 4501 case NETDEV_PRECHANGEUPPER: 4502 case NETDEV_CHANGEUPPER: 4503 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4504 event, ptr); 4505 case NETDEV_CHANGELOWERSTATE: 4506 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4507 ptr); 4508 } 4509 4510 return 0; 4511 } 4512 4513 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4514 unsigned long event, void *ptr) 4515 { 4516 struct net_device *dev; 4517 struct list_head *iter; 4518 int ret; 4519 4520 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4521 if (mlxsw_sp_port_dev_check(dev)) { 4522 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4523 ptr); 4524 if (ret) 4525 return ret; 4526 } 4527 } 4528 4529 return 0; 4530 } 4531 4532 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4533 struct net_device *dev, 4534 unsigned long event, void *ptr, 4535 u16 vid) 4536 { 4537 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4538 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4539 struct netdev_notifier_changeupper_info *info = ptr; 4540 struct netlink_ext_ack *extack; 4541 struct net_device *upper_dev; 4542 int err = 0; 4543 4544 extack = netdev_notifier_info_to_extack(&info->info); 4545 4546 switch (event) { 4547 case NETDEV_PRECHANGEUPPER: 4548 upper_dev = info->upper_dev; 4549 if (!netif_is_bridge_master(upper_dev) && 4550 !netif_is_macvlan(upper_dev)) { 4551 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4552 return -EINVAL; 4553 } 4554 if (!info->linking) 4555 break; 4556 if (netif_is_bridge_master(upper_dev) && 4557 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4558 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4559 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4560 return -EOPNOTSUPP; 4561 if (netdev_has_any_upper_dev(upper_dev) && 4562 (!netif_is_bridge_master(upper_dev) || 4563 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4564 upper_dev))) { 4565 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4566 return -EINVAL; 4567 } 4568 if (netif_is_macvlan(upper_dev) && 4569 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4570 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4571 return -EOPNOTSUPP; 4572 } 4573 break; 4574 case NETDEV_CHANGEUPPER: 4575 upper_dev = info->upper_dev; 4576 if (netif_is_bridge_master(upper_dev)) { 4577 if (info->linking) 4578 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4579 vlan_dev, 4580 upper_dev, 4581 extack); 4582 else 4583 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4584 vlan_dev, 4585 upper_dev); 4586 } else if (netif_is_macvlan(upper_dev)) { 4587 if (!info->linking) 4588 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4589 } else { 4590 err = -EINVAL; 4591 WARN_ON(1); 4592 } 4593 break; 4594 } 4595 4596 return err; 4597 } 4598 4599 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4600 struct net_device *lag_dev, 4601 unsigned long event, 4602 void *ptr, u16 vid) 4603 { 4604 struct net_device *dev; 4605 struct list_head *iter; 4606 int ret; 4607 4608 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4609 if (mlxsw_sp_port_dev_check(dev)) { 4610 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4611 event, ptr, 4612 vid); 4613 if (ret) 4614 return ret; 4615 } 4616 } 4617 4618 return 0; 4619 } 4620 4621 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4622 struct net_device *br_dev, 4623 unsigned long event, void *ptr, 4624 u16 vid) 4625 { 4626 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4627 struct netdev_notifier_changeupper_info *info = ptr; 4628 struct netlink_ext_ack *extack; 4629 struct net_device *upper_dev; 4630 4631 if (!mlxsw_sp) 4632 return 0; 4633 4634 extack = netdev_notifier_info_to_extack(&info->info); 4635 4636 switch (event) { 4637 case NETDEV_PRECHANGEUPPER: 4638 upper_dev = info->upper_dev; 4639 if (!netif_is_macvlan(upper_dev)) { 4640 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4641 return -EOPNOTSUPP; 4642 } 4643 if (!info->linking) 4644 break; 4645 if (netif_is_macvlan(upper_dev) && 4646 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4647 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4648 return -EOPNOTSUPP; 4649 } 4650 break; 4651 case NETDEV_CHANGEUPPER: 4652 upper_dev = info->upper_dev; 4653 if (info->linking) 4654 break; 4655 if (netif_is_macvlan(upper_dev)) 4656 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4657 break; 4658 } 4659 4660 return 0; 4661 } 4662 4663 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4664 unsigned long event, void *ptr) 4665 { 4666 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4667 u16 vid = vlan_dev_vlan_id(vlan_dev); 4668 4669 if (mlxsw_sp_port_dev_check(real_dev)) 4670 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4671 event, ptr, vid); 4672 else if (netif_is_lag_master(real_dev)) 4673 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4674 real_dev, event, 4675 ptr, vid); 4676 else if (netif_is_bridge_master(real_dev)) 4677 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4678 event, ptr, vid); 4679 4680 return 0; 4681 } 4682 4683 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4684 unsigned long event, void *ptr) 4685 { 4686 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4687 struct netdev_notifier_changeupper_info *info = ptr; 4688 struct netlink_ext_ack *extack; 4689 struct net_device *upper_dev; 4690 u16 proto; 4691 4692 if (!mlxsw_sp) 4693 return 0; 4694 4695 extack = netdev_notifier_info_to_extack(&info->info); 4696 4697 switch (event) { 4698 case NETDEV_PRECHANGEUPPER: 4699 upper_dev = info->upper_dev; 4700 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { 4701 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4702 return -EOPNOTSUPP; 4703 } 4704 if (!info->linking) 4705 break; 4706 if (br_vlan_enabled(br_dev)) { 4707 br_vlan_get_proto(br_dev, &proto); 4708 if (proto == ETH_P_8021AD) { 4709 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 4710 return -EOPNOTSUPP; 4711 } 4712 } 4713 if (is_vlan_dev(upper_dev) && 4714 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4715 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4716 return -EOPNOTSUPP; 4717 } 4718 if (netif_is_macvlan(upper_dev) && 4719 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4720 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4721 return -EOPNOTSUPP; 4722 } 4723 break; 4724 case NETDEV_CHANGEUPPER: 4725 upper_dev = info->upper_dev; 4726 if (info->linking) 4727 break; 4728 if (is_vlan_dev(upper_dev)) 4729 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4730 if (netif_is_macvlan(upper_dev)) 4731 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4732 break; 4733 } 4734 4735 return 0; 4736 } 4737 4738 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4739 unsigned long event, void *ptr) 4740 { 4741 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4742 struct netdev_notifier_changeupper_info *info = ptr; 4743 struct netlink_ext_ack *extack; 4744 4745 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4746 return 0; 4747 4748 extack = netdev_notifier_info_to_extack(&info->info); 4749 4750 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ 4751 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4752 4753 return -EOPNOTSUPP; 4754 } 4755 4756 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) 4757 { 4758 struct netdev_notifier_changeupper_info *info = ptr; 4759 4760 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) 4761 return false; 4762 return netif_is_l3_master(info->upper_dev); 4763 } 4764 4765 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4766 struct net_device *dev, 4767 unsigned long event, void *ptr) 4768 { 4769 struct netdev_notifier_changeupper_info *cu_info; 4770 struct netdev_notifier_info *info = ptr; 4771 struct netlink_ext_ack *extack; 4772 struct net_device *upper_dev; 4773 4774 extack = netdev_notifier_info_to_extack(info); 4775 4776 switch (event) { 4777 case NETDEV_CHANGEUPPER: 4778 cu_info = container_of(info, 4779 struct netdev_notifier_changeupper_info, 4780 info); 4781 upper_dev = cu_info->upper_dev; 4782 if (!netif_is_bridge_master(upper_dev)) 4783 return 0; 4784 if (!mlxsw_sp_lower_get(upper_dev)) 4785 return 0; 4786 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4787 return -EOPNOTSUPP; 4788 if (cu_info->linking) { 4789 if (!netif_running(dev)) 4790 return 0; 4791 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4792 * device needs to be mapped to a VLAN, but at this 4793 * point no VLANs are configured on the VxLAN device 4794 */ 4795 if (br_vlan_enabled(upper_dev)) 4796 return 0; 4797 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 4798 dev, 0, extack); 4799 } else { 4800 /* VLANs were already flushed, which triggered the 4801 * necessary cleanup 4802 */ 4803 if (br_vlan_enabled(upper_dev)) 4804 return 0; 4805 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4806 } 4807 break; 4808 case NETDEV_PRE_UP: 4809 upper_dev = netdev_master_upper_dev_get(dev); 4810 if (!upper_dev) 4811 return 0; 4812 if (!netif_is_bridge_master(upper_dev)) 4813 return 0; 4814 if (!mlxsw_sp_lower_get(upper_dev)) 4815 return 0; 4816 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 4817 extack); 4818 case NETDEV_DOWN: 4819 upper_dev = netdev_master_upper_dev_get(dev); 4820 if (!upper_dev) 4821 return 0; 4822 if (!netif_is_bridge_master(upper_dev)) 4823 return 0; 4824 if (!mlxsw_sp_lower_get(upper_dev)) 4825 return 0; 4826 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 4827 break; 4828 } 4829 4830 return 0; 4831 } 4832 4833 static bool mlxsw_sp_netdevice_event_is_router(unsigned long event) 4834 { 4835 switch (event) { 4836 case NETDEV_PRE_CHANGEADDR: 4837 case NETDEV_CHANGEADDR: 4838 case NETDEV_CHANGEMTU: 4839 case NETDEV_OFFLOAD_XSTATS_ENABLE: 4840 case NETDEV_OFFLOAD_XSTATS_DISABLE: 4841 case NETDEV_OFFLOAD_XSTATS_REPORT_USED: 4842 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA: 4843 return true; 4844 default: 4845 return false; 4846 } 4847 } 4848 4849 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 4850 unsigned long event, void *ptr) 4851 { 4852 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4853 struct mlxsw_sp_span_entry *span_entry; 4854 struct mlxsw_sp *mlxsw_sp; 4855 int err = 0; 4856 4857 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 4858 if (event == NETDEV_UNREGISTER) { 4859 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 4860 if (span_entry) 4861 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 4862 } 4863 mlxsw_sp_span_respin(mlxsw_sp); 4864 4865 if (netif_is_vxlan(dev)) 4866 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 4867 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) 4868 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, 4869 event, ptr); 4870 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) 4871 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, 4872 event, ptr); 4873 else if (mlxsw_sp_netdevice_event_is_router(event)) 4874 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); 4875 else if (mlxsw_sp_is_vrf_event(event, ptr)) 4876 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); 4877 else if (mlxsw_sp_port_dev_check(dev)) 4878 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 4879 else if (netif_is_lag_master(dev)) 4880 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 4881 else if (is_vlan_dev(dev)) 4882 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 4883 else if (netif_is_bridge_master(dev)) 4884 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 4885 else if (netif_is_macvlan(dev)) 4886 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 4887 4888 return notifier_from_errno(err); 4889 } 4890 4891 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 4892 .notifier_call = mlxsw_sp_inetaddr_valid_event, 4893 }; 4894 4895 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 4896 .notifier_call = mlxsw_sp_inet6addr_valid_event, 4897 }; 4898 4899 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 4900 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 4901 {0, }, 4902 }; 4903 4904 static struct pci_driver mlxsw_sp1_pci_driver = { 4905 .name = mlxsw_sp1_driver_name, 4906 .id_table = mlxsw_sp1_pci_id_table, 4907 }; 4908 4909 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 4910 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 4911 {0, }, 4912 }; 4913 4914 static struct pci_driver mlxsw_sp2_pci_driver = { 4915 .name = mlxsw_sp2_driver_name, 4916 .id_table = mlxsw_sp2_pci_id_table, 4917 }; 4918 4919 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 4920 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 4921 {0, }, 4922 }; 4923 4924 static struct pci_driver mlxsw_sp3_pci_driver = { 4925 .name = mlxsw_sp3_driver_name, 4926 .id_table = mlxsw_sp3_pci_id_table, 4927 }; 4928 4929 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 4930 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 4931 {0, }, 4932 }; 4933 4934 static struct pci_driver mlxsw_sp4_pci_driver = { 4935 .name = mlxsw_sp4_driver_name, 4936 .id_table = mlxsw_sp4_pci_id_table, 4937 }; 4938 4939 static int __init mlxsw_sp_module_init(void) 4940 { 4941 int err; 4942 4943 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4944 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4945 4946 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 4947 if (err) 4948 goto err_sp1_core_driver_register; 4949 4950 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 4951 if (err) 4952 goto err_sp2_core_driver_register; 4953 4954 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 4955 if (err) 4956 goto err_sp3_core_driver_register; 4957 4958 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 4959 if (err) 4960 goto err_sp4_core_driver_register; 4961 4962 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 4963 if (err) 4964 goto err_sp1_pci_driver_register; 4965 4966 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 4967 if (err) 4968 goto err_sp2_pci_driver_register; 4969 4970 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 4971 if (err) 4972 goto err_sp3_pci_driver_register; 4973 4974 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 4975 if (err) 4976 goto err_sp4_pci_driver_register; 4977 4978 return 0; 4979 4980 err_sp4_pci_driver_register: 4981 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 4982 err_sp3_pci_driver_register: 4983 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 4984 err_sp2_pci_driver_register: 4985 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 4986 err_sp1_pci_driver_register: 4987 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 4988 err_sp4_core_driver_register: 4989 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 4990 err_sp3_core_driver_register: 4991 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 4992 err_sp2_core_driver_register: 4993 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 4994 err_sp1_core_driver_register: 4995 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 4996 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 4997 return err; 4998 } 4999 5000 static void __exit mlxsw_sp_module_exit(void) 5001 { 5002 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 5003 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5004 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5005 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5006 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5007 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5008 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5009 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5010 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5011 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5012 } 5013 5014 module_init(mlxsw_sp_module_init); 5015 module_exit(mlxsw_sp_module_exit); 5016 5017 MODULE_LICENSE("Dual BSD/GPL"); 5018 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5019 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5020 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5021 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5022 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5023 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5024 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5025 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5026 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5027