1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 33 #include "spectrum.h" 34 #include "pci.h" 35 #include "core.h" 36 #include "core_env.h" 37 #include "reg.h" 38 #include "port.h" 39 #include "trap.h" 40 #include "txheader.h" 41 #include "spectrum_cnt.h" 42 #include "spectrum_dpipe.h" 43 #include "spectrum_acl_flex_actions.h" 44 #include "spectrum_span.h" 45 #include "spectrum_ptp.h" 46 #include "spectrum_trap.h" 47 48 #define MLXSW_SP_FWREV_MINOR 2010 49 #define MLXSW_SP_FWREV_SUBMINOR 1006 50 51 #define MLXSW_SP1_FWREV_MAJOR 13 52 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 53 54 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 55 .major = MLXSW_SP1_FWREV_MAJOR, 56 .minor = MLXSW_SP_FWREV_MINOR, 57 .subminor = MLXSW_SP_FWREV_SUBMINOR, 58 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 59 }; 60 61 #define MLXSW_SP1_FW_FILENAME \ 62 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 63 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 64 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 65 66 #define MLXSW_SP2_FWREV_MAJOR 29 67 68 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 69 .major = MLXSW_SP2_FWREV_MAJOR, 70 .minor = MLXSW_SP_FWREV_MINOR, 71 .subminor = MLXSW_SP_FWREV_SUBMINOR, 72 }; 73 74 #define MLXSW_SP2_FW_FILENAME \ 75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 76 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 77 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 78 79 #define MLXSW_SP3_FWREV_MAJOR 30 80 81 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 82 .major = MLXSW_SP3_FWREV_MAJOR, 83 .minor = MLXSW_SP_FWREV_MINOR, 84 .subminor = MLXSW_SP_FWREV_SUBMINOR, 85 }; 86 87 #define MLXSW_SP3_FW_FILENAME \ 88 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 89 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 90 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 91 92 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ 93 "mellanox/lc_ini_bundle_" \ 94 __stringify(MLXSW_SP_FWREV_MINOR) "_" \ 95 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" 96 97 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 98 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 99 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 100 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 101 102 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 103 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 104 }; 105 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 106 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 107 }; 108 109 /* tx_hdr_version 110 * Tx header version. 111 * Must be set to 1. 112 */ 113 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 114 115 /* tx_hdr_ctl 116 * Packet control type. 117 * 0 - Ethernet control (e.g. EMADs, LACP) 118 * 1 - Ethernet data 119 */ 120 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 121 122 /* tx_hdr_proto 123 * Packet protocol type. Must be set to 1 (Ethernet). 124 */ 125 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 126 127 /* tx_hdr_rx_is_router 128 * Packet is sent from the router. Valid for data packets only. 129 */ 130 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 131 132 /* tx_hdr_fid_valid 133 * Indicates if the 'fid' field is valid and should be used for 134 * forwarding lookup. Valid for data packets only. 135 */ 136 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 137 138 /* tx_hdr_swid 139 * Switch partition ID. Must be set to 0. 140 */ 141 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 142 143 /* tx_hdr_control_tclass 144 * Indicates if the packet should use the control TClass and not one 145 * of the data TClasses. 146 */ 147 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 148 149 /* tx_hdr_etclass 150 * Egress TClass to be used on the egress device on the egress port. 151 */ 152 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 153 154 /* tx_hdr_port_mid 155 * Destination local port for unicast packets. 156 * Destination multicast ID for multicast packets. 157 * 158 * Control packets are directed to a specific egress port, while data 159 * packets are transmitted through the CPU port (0) into the switch partition, 160 * where forwarding rules are applied. 161 */ 162 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 163 164 /* tx_hdr_fid 165 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 166 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 167 * Valid for data packets only. 168 */ 169 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 170 171 /* tx_hdr_type 172 * 0 - Data packets 173 * 6 - Control packets 174 */ 175 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 176 177 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 178 unsigned int counter_index, u64 *packets, 179 u64 *bytes) 180 { 181 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 182 int err; 183 184 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 185 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 186 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 187 if (err) 188 return err; 189 if (packets) 190 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 191 if (bytes) 192 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 193 return 0; 194 } 195 196 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 197 unsigned int counter_index) 198 { 199 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 200 201 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 202 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 203 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 204 } 205 206 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 207 unsigned int *p_counter_index) 208 { 209 int err; 210 211 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 212 p_counter_index); 213 if (err) 214 return err; 215 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 216 if (err) 217 goto err_counter_clear; 218 return 0; 219 220 err_counter_clear: 221 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 222 *p_counter_index); 223 return err; 224 } 225 226 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 227 unsigned int counter_index) 228 { 229 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 230 counter_index); 231 } 232 233 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 234 const struct mlxsw_tx_info *tx_info) 235 { 236 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 237 238 memset(txhdr, 0, MLXSW_TXHDR_LEN); 239 240 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 241 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 242 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 243 mlxsw_tx_hdr_swid_set(txhdr, 0); 244 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 245 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 246 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 247 } 248 249 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 250 { 251 switch (state) { 252 case BR_STATE_FORWARDING: 253 return MLXSW_REG_SPMS_STATE_FORWARDING; 254 case BR_STATE_LEARNING: 255 return MLXSW_REG_SPMS_STATE_LEARNING; 256 case BR_STATE_LISTENING: 257 case BR_STATE_DISABLED: 258 case BR_STATE_BLOCKING: 259 return MLXSW_REG_SPMS_STATE_DISCARDING; 260 default: 261 BUG(); 262 } 263 } 264 265 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 266 u8 state) 267 { 268 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 270 char *spms_pl; 271 int err; 272 273 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 274 if (!spms_pl) 275 return -ENOMEM; 276 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 277 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 278 279 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 280 kfree(spms_pl); 281 return err; 282 } 283 284 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 285 { 286 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 287 int err; 288 289 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 290 if (err) 291 return err; 292 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 293 return 0; 294 } 295 296 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 297 bool is_up) 298 { 299 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 300 char paos_pl[MLXSW_REG_PAOS_LEN]; 301 302 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 303 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 304 MLXSW_PORT_ADMIN_STATUS_DOWN); 305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 306 } 307 308 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 309 const unsigned char *addr) 310 { 311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 312 char ppad_pl[MLXSW_REG_PPAD_LEN]; 313 314 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 315 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 316 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 317 } 318 319 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 320 { 321 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 322 323 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 324 mlxsw_sp_port->local_port); 325 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 326 mlxsw_sp_port->dev->dev_addr); 327 } 328 329 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 330 { 331 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 332 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 333 int err; 334 335 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 336 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 337 if (err) 338 return err; 339 340 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 341 return 0; 342 } 343 344 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 345 { 346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 347 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 348 349 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 350 if (mtu > mlxsw_sp_port->max_mtu) 351 return -EINVAL; 352 353 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 354 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 355 } 356 357 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 358 u16 local_port, u8 swid) 359 { 360 char pspa_pl[MLXSW_REG_PSPA_LEN]; 361 362 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 363 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 364 } 365 366 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 367 { 368 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 369 char svpe_pl[MLXSW_REG_SVPE_LEN]; 370 371 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 372 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 373 } 374 375 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 376 bool learn_enable) 377 { 378 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 379 char *spvmlr_pl; 380 int err; 381 382 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 383 if (!spvmlr_pl) 384 return -ENOMEM; 385 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 386 learn_enable); 387 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 388 kfree(spvmlr_pl); 389 return err; 390 } 391 392 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 393 { 394 switch (ethtype) { 395 case ETH_P_8021Q: 396 *p_sver_type = 0; 397 break; 398 case ETH_P_8021AD: 399 *p_sver_type = 1; 400 break; 401 default: 402 return -EINVAL; 403 } 404 405 return 0; 406 } 407 408 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 409 u16 ethtype) 410 { 411 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 412 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 413 u8 sver_type; 414 int err; 415 416 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 417 if (err) 418 return err; 419 420 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 421 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 422 } 423 424 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 425 u16 vid, u16 ethtype) 426 { 427 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 428 char spvid_pl[MLXSW_REG_SPVID_LEN]; 429 u8 sver_type; 430 int err; 431 432 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 433 if (err) 434 return err; 435 436 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 437 sver_type); 438 439 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 440 } 441 442 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 443 bool allow) 444 { 445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 446 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 447 448 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 450 } 451 452 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 453 u16 ethtype) 454 { 455 int err; 456 457 if (!vid) { 458 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 459 if (err) 460 return err; 461 } else { 462 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 463 if (err) 464 return err; 465 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 466 if (err) 467 goto err_port_allow_untagged_set; 468 } 469 470 mlxsw_sp_port->pvid = vid; 471 return 0; 472 473 err_port_allow_untagged_set: 474 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 475 return err; 476 } 477 478 static int 479 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 480 { 481 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 482 char sspr_pl[MLXSW_REG_SSPR_LEN]; 483 484 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 485 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 486 } 487 488 static int 489 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, 490 u16 local_port, char *pmlp_pl, 491 struct mlxsw_sp_port_mapping *port_mapping) 492 { 493 bool separate_rxtx; 494 u8 first_lane; 495 u8 slot_index; 496 u8 module; 497 u8 width; 498 int i; 499 500 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 501 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); 502 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 503 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 504 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 505 506 if (width && !is_power_of_2(width)) { 507 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 508 local_port); 509 return -EINVAL; 510 } 511 512 for (i = 0; i < width; i++) { 513 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 514 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 515 local_port); 516 return -EINVAL; 517 } 518 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { 519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", 520 local_port); 521 return -EINVAL; 522 } 523 if (separate_rxtx && 524 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 525 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 526 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 527 local_port); 528 return -EINVAL; 529 } 530 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { 531 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 532 local_port); 533 return -EINVAL; 534 } 535 } 536 537 port_mapping->module = module; 538 port_mapping->slot_index = slot_index; 539 port_mapping->width = width; 540 port_mapping->module_width = width; 541 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 542 return 0; 543 } 544 545 static int 546 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 547 struct mlxsw_sp_port_mapping *port_mapping) 548 { 549 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 550 int err; 551 552 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 553 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 554 if (err) 555 return err; 556 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 557 pmlp_pl, port_mapping); 558 } 559 560 static int 561 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 562 const struct mlxsw_sp_port_mapping *port_mapping) 563 { 564 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 565 int i, err; 566 567 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, 568 port_mapping->module); 569 570 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 571 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 572 for (i = 0; i < port_mapping->width; i++) { 573 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, 574 port_mapping->slot_index); 575 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 576 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 577 } 578 579 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 580 if (err) 581 goto err_pmlp_write; 582 return 0; 583 584 err_pmlp_write: 585 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, 586 port_mapping->module); 587 return err; 588 } 589 590 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 591 u8 slot_index, u8 module) 592 { 593 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 594 595 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 596 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 597 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 598 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); 599 } 600 601 static int mlxsw_sp_port_open(struct net_device *dev) 602 { 603 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 604 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 605 int err; 606 607 err = mlxsw_env_module_port_up(mlxsw_sp->core, 608 mlxsw_sp_port->mapping.slot_index, 609 mlxsw_sp_port->mapping.module); 610 if (err) 611 return err; 612 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 613 if (err) 614 goto err_port_admin_status_set; 615 netif_start_queue(dev); 616 return 0; 617 618 err_port_admin_status_set: 619 mlxsw_env_module_port_down(mlxsw_sp->core, 620 mlxsw_sp_port->mapping.slot_index, 621 mlxsw_sp_port->mapping.module); 622 return err; 623 } 624 625 static int mlxsw_sp_port_stop(struct net_device *dev) 626 { 627 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 628 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 629 630 netif_stop_queue(dev); 631 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 632 mlxsw_env_module_port_down(mlxsw_sp->core, 633 mlxsw_sp_port->mapping.slot_index, 634 mlxsw_sp_port->mapping.module); 635 return 0; 636 } 637 638 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 639 struct net_device *dev) 640 { 641 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 642 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 643 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 644 const struct mlxsw_tx_info tx_info = { 645 .local_port = mlxsw_sp_port->local_port, 646 .is_emad = false, 647 }; 648 u64 len; 649 int err; 650 651 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 652 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 653 dev_kfree_skb_any(skb); 654 return NETDEV_TX_OK; 655 } 656 657 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 658 659 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 660 return NETDEV_TX_BUSY; 661 662 if (eth_skb_pad(skb)) { 663 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 664 return NETDEV_TX_OK; 665 } 666 667 mlxsw_sp_txhdr_construct(skb, &tx_info); 668 /* TX header is consumed by HW on the way so we shouldn't count its 669 * bytes as being sent. 670 */ 671 len = skb->len - MLXSW_TXHDR_LEN; 672 673 /* Due to a race we might fail here because of a full queue. In that 674 * unlikely case we simply drop the packet. 675 */ 676 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 677 678 if (!err) { 679 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 680 u64_stats_update_begin(&pcpu_stats->syncp); 681 pcpu_stats->tx_packets++; 682 pcpu_stats->tx_bytes += len; 683 u64_stats_update_end(&pcpu_stats->syncp); 684 } else { 685 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 686 dev_kfree_skb_any(skb); 687 } 688 return NETDEV_TX_OK; 689 } 690 691 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 692 { 693 } 694 695 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 696 { 697 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 698 struct sockaddr *addr = p; 699 int err; 700 701 if (!is_valid_ether_addr(addr->sa_data)) 702 return -EADDRNOTAVAIL; 703 704 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 705 if (err) 706 return err; 707 eth_hw_addr_set(dev, addr->sa_data); 708 return 0; 709 } 710 711 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 712 { 713 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 714 struct mlxsw_sp_hdroom orig_hdroom; 715 struct mlxsw_sp_hdroom hdroom; 716 int err; 717 718 orig_hdroom = *mlxsw_sp_port->hdroom; 719 720 hdroom = orig_hdroom; 721 hdroom.mtu = mtu; 722 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 723 724 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 725 if (err) { 726 netdev_err(dev, "Failed to configure port's headroom\n"); 727 return err; 728 } 729 730 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 731 if (err) 732 goto err_port_mtu_set; 733 dev->mtu = mtu; 734 return 0; 735 736 err_port_mtu_set: 737 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 738 return err; 739 } 740 741 static int 742 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 743 struct rtnl_link_stats64 *stats) 744 { 745 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 746 struct mlxsw_sp_port_pcpu_stats *p; 747 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 748 u32 tx_dropped = 0; 749 unsigned int start; 750 int i; 751 752 for_each_possible_cpu(i) { 753 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 754 do { 755 start = u64_stats_fetch_begin_irq(&p->syncp); 756 rx_packets = p->rx_packets; 757 rx_bytes = p->rx_bytes; 758 tx_packets = p->tx_packets; 759 tx_bytes = p->tx_bytes; 760 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 761 762 stats->rx_packets += rx_packets; 763 stats->rx_bytes += rx_bytes; 764 stats->tx_packets += tx_packets; 765 stats->tx_bytes += tx_bytes; 766 /* tx_dropped is u32, updated without syncp protection. */ 767 tx_dropped += p->tx_dropped; 768 } 769 stats->tx_dropped = tx_dropped; 770 return 0; 771 } 772 773 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 774 { 775 switch (attr_id) { 776 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 777 return true; 778 } 779 780 return false; 781 } 782 783 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 784 void *sp) 785 { 786 switch (attr_id) { 787 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 788 return mlxsw_sp_port_get_sw_stats64(dev, sp); 789 } 790 791 return -EINVAL; 792 } 793 794 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 795 int prio, char *ppcnt_pl) 796 { 797 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 798 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 799 800 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 801 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 802 } 803 804 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 805 struct rtnl_link_stats64 *stats) 806 { 807 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 808 int err; 809 810 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 811 0, ppcnt_pl); 812 if (err) 813 goto out; 814 815 stats->tx_packets = 816 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 817 stats->rx_packets = 818 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 819 stats->tx_bytes = 820 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 821 stats->rx_bytes = 822 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 823 stats->multicast = 824 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 825 826 stats->rx_crc_errors = 827 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 828 stats->rx_frame_errors = 829 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 830 831 stats->rx_length_errors = ( 832 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 833 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 834 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 835 836 stats->rx_errors = (stats->rx_crc_errors + 837 stats->rx_frame_errors + stats->rx_length_errors); 838 839 out: 840 return err; 841 } 842 843 static void 844 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 845 struct mlxsw_sp_port_xstats *xstats) 846 { 847 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 848 int err, i; 849 850 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 851 ppcnt_pl); 852 if (!err) 853 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 854 855 for (i = 0; i < TC_MAX_QUEUE; i++) { 856 err = mlxsw_sp_port_get_stats_raw(dev, 857 MLXSW_REG_PPCNT_TC_CONG_CNT, 858 i, ppcnt_pl); 859 if (err) 860 goto tc_cnt; 861 862 xstats->wred_drop[i] = 863 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 864 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 865 866 tc_cnt: 867 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 868 i, ppcnt_pl); 869 if (err) 870 continue; 871 872 xstats->backlog[i] = 873 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 874 xstats->tail_drop[i] = 875 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 876 } 877 878 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 879 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 880 i, ppcnt_pl); 881 if (err) 882 continue; 883 884 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 885 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 886 } 887 } 888 889 static void update_stats_cache(struct work_struct *work) 890 { 891 struct mlxsw_sp_port *mlxsw_sp_port = 892 container_of(work, struct mlxsw_sp_port, 893 periodic_hw_stats.update_dw.work); 894 895 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 896 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 897 * necessary when port goes down. 898 */ 899 goto out; 900 901 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 902 &mlxsw_sp_port->periodic_hw_stats.stats); 903 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 904 &mlxsw_sp_port->periodic_hw_stats.xstats); 905 906 out: 907 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 908 MLXSW_HW_STATS_UPDATE_TIME); 909 } 910 911 /* Return the stats from a cache that is updated periodically, 912 * as this function might get called in an atomic context. 913 */ 914 static void 915 mlxsw_sp_port_get_stats64(struct net_device *dev, 916 struct rtnl_link_stats64 *stats) 917 { 918 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 919 920 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 921 } 922 923 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 924 u16 vid_begin, u16 vid_end, 925 bool is_member, bool untagged) 926 { 927 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 928 char *spvm_pl; 929 int err; 930 931 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 932 if (!spvm_pl) 933 return -ENOMEM; 934 935 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 936 vid_end, is_member, untagged); 937 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 938 kfree(spvm_pl); 939 return err; 940 } 941 942 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 943 u16 vid_end, bool is_member, bool untagged) 944 { 945 u16 vid, vid_e; 946 int err; 947 948 for (vid = vid_begin; vid <= vid_end; 949 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 950 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 951 vid_end); 952 953 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 954 is_member, untagged); 955 if (err) 956 return err; 957 } 958 959 return 0; 960 } 961 962 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 963 bool flush_default) 964 { 965 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 966 967 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 968 &mlxsw_sp_port->vlans_list, list) { 969 if (!flush_default && 970 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 971 continue; 972 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 973 } 974 } 975 976 static void 977 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 978 { 979 if (mlxsw_sp_port_vlan->bridge_port) 980 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 981 else if (mlxsw_sp_port_vlan->fid) 982 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 983 } 984 985 struct mlxsw_sp_port_vlan * 986 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 987 { 988 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 989 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 990 int err; 991 992 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 993 if (mlxsw_sp_port_vlan) 994 return ERR_PTR(-EEXIST); 995 996 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 997 if (err) 998 return ERR_PTR(err); 999 1000 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1001 if (!mlxsw_sp_port_vlan) { 1002 err = -ENOMEM; 1003 goto err_port_vlan_alloc; 1004 } 1005 1006 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1007 mlxsw_sp_port_vlan->vid = vid; 1008 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1009 1010 return mlxsw_sp_port_vlan; 1011 1012 err_port_vlan_alloc: 1013 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1014 return ERR_PTR(err); 1015 } 1016 1017 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1018 { 1019 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1020 u16 vid = mlxsw_sp_port_vlan->vid; 1021 1022 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1023 list_del(&mlxsw_sp_port_vlan->list); 1024 kfree(mlxsw_sp_port_vlan); 1025 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1026 } 1027 1028 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1029 __be16 __always_unused proto, u16 vid) 1030 { 1031 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1032 1033 /* VLAN 0 is added to HW filter when device goes up, but it is 1034 * reserved in our case, so simply return. 1035 */ 1036 if (!vid) 1037 return 0; 1038 1039 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1040 } 1041 1042 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1043 __be16 __always_unused proto, u16 vid) 1044 { 1045 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1046 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1047 1048 /* VLAN 0 is removed from HW filter when device goes down, but 1049 * it is reserved in our case, so simply return. 1050 */ 1051 if (!vid) 1052 return 0; 1053 1054 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1055 if (!mlxsw_sp_port_vlan) 1056 return 0; 1057 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1058 1059 return 0; 1060 } 1061 1062 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1063 struct flow_block_offload *f) 1064 { 1065 switch (f->binder_type) { 1066 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1067 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1068 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1069 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1070 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1071 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1072 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1073 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1074 default: 1075 return -EOPNOTSUPP; 1076 } 1077 } 1078 1079 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1080 void *type_data) 1081 { 1082 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1083 1084 switch (type) { 1085 case TC_SETUP_BLOCK: 1086 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1087 case TC_SETUP_QDISC_RED: 1088 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1089 case TC_SETUP_QDISC_PRIO: 1090 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1091 case TC_SETUP_QDISC_ETS: 1092 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1093 case TC_SETUP_QDISC_TBF: 1094 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1095 case TC_SETUP_QDISC_FIFO: 1096 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1097 default: 1098 return -EOPNOTSUPP; 1099 } 1100 } 1101 1102 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1103 { 1104 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1105 1106 if (!enable) { 1107 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1108 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1109 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1110 return -EINVAL; 1111 } 1112 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1113 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1114 } else { 1115 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1116 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1117 } 1118 return 0; 1119 } 1120 1121 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1122 { 1123 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1124 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1125 int err; 1126 1127 if (netif_running(dev)) 1128 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1129 1130 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1131 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1132 pplr_pl); 1133 1134 if (netif_running(dev)) 1135 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1136 1137 return err; 1138 } 1139 1140 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1141 1142 static int mlxsw_sp_handle_feature(struct net_device *dev, 1143 netdev_features_t wanted_features, 1144 netdev_features_t feature, 1145 mlxsw_sp_feature_handler feature_handler) 1146 { 1147 netdev_features_t changes = wanted_features ^ dev->features; 1148 bool enable = !!(wanted_features & feature); 1149 int err; 1150 1151 if (!(changes & feature)) 1152 return 0; 1153 1154 err = feature_handler(dev, enable); 1155 if (err) { 1156 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1157 enable ? "Enable" : "Disable", &feature, err); 1158 return err; 1159 } 1160 1161 if (enable) 1162 dev->features |= feature; 1163 else 1164 dev->features &= ~feature; 1165 1166 return 0; 1167 } 1168 static int mlxsw_sp_set_features(struct net_device *dev, 1169 netdev_features_t features) 1170 { 1171 netdev_features_t oper_features = dev->features; 1172 int err = 0; 1173 1174 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1175 mlxsw_sp_feature_hw_tc); 1176 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1177 mlxsw_sp_feature_loopback); 1178 1179 if (err) { 1180 dev->features = oper_features; 1181 return -EINVAL; 1182 } 1183 1184 return 0; 1185 } 1186 1187 static struct devlink_port * 1188 mlxsw_sp_port_get_devlink_port(struct net_device *dev) 1189 { 1190 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1191 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1192 1193 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, 1194 mlxsw_sp_port->local_port); 1195 } 1196 1197 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1198 struct ifreq *ifr) 1199 { 1200 struct hwtstamp_config config; 1201 int err; 1202 1203 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1204 return -EFAULT; 1205 1206 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1207 &config); 1208 if (err) 1209 return err; 1210 1211 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1212 return -EFAULT; 1213 1214 return 0; 1215 } 1216 1217 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1218 struct ifreq *ifr) 1219 { 1220 struct hwtstamp_config config; 1221 int err; 1222 1223 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1224 &config); 1225 if (err) 1226 return err; 1227 1228 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1229 return -EFAULT; 1230 1231 return 0; 1232 } 1233 1234 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1235 { 1236 struct hwtstamp_config config = {0}; 1237 1238 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1239 } 1240 1241 static int 1242 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1243 { 1244 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1245 1246 switch (cmd) { 1247 case SIOCSHWTSTAMP: 1248 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1249 case SIOCGHWTSTAMP: 1250 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1251 default: 1252 return -EOPNOTSUPP; 1253 } 1254 } 1255 1256 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1257 .ndo_open = mlxsw_sp_port_open, 1258 .ndo_stop = mlxsw_sp_port_stop, 1259 .ndo_start_xmit = mlxsw_sp_port_xmit, 1260 .ndo_setup_tc = mlxsw_sp_setup_tc, 1261 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1262 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1263 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1264 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1265 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1266 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1267 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1268 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1269 .ndo_set_features = mlxsw_sp_set_features, 1270 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, 1271 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1272 }; 1273 1274 static int 1275 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1276 { 1277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1278 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1279 const struct mlxsw_sp_port_type_speed_ops *ops; 1280 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1281 u32 eth_proto_cap_masked; 1282 int err; 1283 1284 ops = mlxsw_sp->port_type_speed_ops; 1285 1286 /* Set advertised speeds to speeds supported by both the driver 1287 * and the device. 1288 */ 1289 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1290 0, false); 1291 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1292 if (err) 1293 return err; 1294 1295 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1296 ð_proto_admin, ð_proto_oper); 1297 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1298 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1299 eth_proto_cap_masked, 1300 mlxsw_sp_port->link.autoneg); 1301 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1302 } 1303 1304 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1305 { 1306 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1308 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1309 u32 eth_proto_oper; 1310 int err; 1311 1312 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1313 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1314 mlxsw_sp_port->local_port, 0, 1315 false); 1316 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1317 if (err) 1318 return err; 1319 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1320 ð_proto_oper); 1321 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1322 return 0; 1323 } 1324 1325 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1326 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1327 bool dwrr, u8 dwrr_weight) 1328 { 1329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1330 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1331 1332 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1333 next_index); 1334 mlxsw_reg_qeec_de_set(qeec_pl, true); 1335 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1336 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1337 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1338 } 1339 1340 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1341 enum mlxsw_reg_qeec_hr hr, u8 index, 1342 u8 next_index, u32 maxrate, u8 burst_size) 1343 { 1344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1345 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1346 1347 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1348 next_index); 1349 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1350 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1351 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1352 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1353 } 1354 1355 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1356 enum mlxsw_reg_qeec_hr hr, u8 index, 1357 u8 next_index, u32 minrate) 1358 { 1359 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1360 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1361 1362 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1363 next_index); 1364 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1365 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1366 1367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1368 } 1369 1370 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1371 u8 switch_prio, u8 tclass) 1372 { 1373 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1374 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1375 1376 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1377 tclass); 1378 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1379 } 1380 1381 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1382 { 1383 int err, i; 1384 1385 /* Setup the elements hierarcy, so that each TC is linked to 1386 * one subgroup, which are all member in the same group. 1387 */ 1388 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1389 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1390 if (err) 1391 return err; 1392 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1393 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1394 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1395 0, false, 0); 1396 if (err) 1397 return err; 1398 } 1399 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1400 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1401 MLXSW_REG_QEEC_HR_TC, i, i, 1402 false, 0); 1403 if (err) 1404 return err; 1405 1406 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1407 MLXSW_REG_QEEC_HR_TC, 1408 i + 8, i, 1409 true, 100); 1410 if (err) 1411 return err; 1412 } 1413 1414 /* Make sure the max shaper is disabled in all hierarchies that support 1415 * it. Note that this disables ptps (PTP shaper), but that is intended 1416 * for the initial configuration. 1417 */ 1418 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1419 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1420 MLXSW_REG_QEEC_MAS_DIS, 0); 1421 if (err) 1422 return err; 1423 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1424 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1425 MLXSW_REG_QEEC_HR_SUBGROUP, 1426 i, 0, 1427 MLXSW_REG_QEEC_MAS_DIS, 0); 1428 if (err) 1429 return err; 1430 } 1431 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1432 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1433 MLXSW_REG_QEEC_HR_TC, 1434 i, i, 1435 MLXSW_REG_QEEC_MAS_DIS, 0); 1436 if (err) 1437 return err; 1438 1439 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1440 MLXSW_REG_QEEC_HR_TC, 1441 i + 8, i, 1442 MLXSW_REG_QEEC_MAS_DIS, 0); 1443 if (err) 1444 return err; 1445 } 1446 1447 /* Configure the min shaper for multicast TCs. */ 1448 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1449 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1450 MLXSW_REG_QEEC_HR_TC, 1451 i + 8, i, 1452 MLXSW_REG_QEEC_MIS_MIN); 1453 if (err) 1454 return err; 1455 } 1456 1457 /* Map all priorities to traffic class 0. */ 1458 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1459 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1460 if (err) 1461 return err; 1462 } 1463 1464 return 0; 1465 } 1466 1467 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1468 bool enable) 1469 { 1470 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1471 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1472 1473 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1475 } 1476 1477 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1478 { 1479 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1480 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1481 u8 module = mlxsw_sp_port->mapping.module; 1482 u64 overheat_counter; 1483 int err; 1484 1485 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, 1486 module, &overheat_counter); 1487 if (err) 1488 return err; 1489 1490 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1491 return 0; 1492 } 1493 1494 int 1495 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1496 bool is_8021ad_tagged, 1497 bool is_8021q_tagged) 1498 { 1499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1500 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1501 1502 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1503 is_8021ad_tagged, is_8021q_tagged); 1504 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1505 } 1506 1507 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1508 u16 local_port, u8 *port_number, 1509 u8 *split_port_subnumber, 1510 u8 *slot_index) 1511 { 1512 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1513 int err; 1514 1515 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1516 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1517 if (err) 1518 return err; 1519 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1520 split_port_subnumber, slot_index); 1521 return 0; 1522 } 1523 1524 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1525 bool split, 1526 struct mlxsw_sp_port_mapping *port_mapping) 1527 { 1528 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1529 struct mlxsw_sp_port *mlxsw_sp_port; 1530 u32 lanes = port_mapping->width; 1531 u8 split_port_subnumber; 1532 struct net_device *dev; 1533 u8 port_number; 1534 u8 slot_index; 1535 bool splittable; 1536 int err; 1537 1538 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1539 if (err) { 1540 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1541 local_port); 1542 return err; 1543 } 1544 1545 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1546 if (err) { 1547 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1548 local_port); 1549 goto err_port_swid_set; 1550 } 1551 1552 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1553 &split_port_subnumber, &slot_index); 1554 if (err) { 1555 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1556 local_port); 1557 goto err_port_label_info_get; 1558 } 1559 1560 splittable = lanes > 1 && !split; 1561 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, 1562 port_number, split, split_port_subnumber, 1563 splittable, lanes, mlxsw_sp->base_mac, 1564 sizeof(mlxsw_sp->base_mac)); 1565 if (err) { 1566 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1567 local_port); 1568 goto err_core_port_init; 1569 } 1570 1571 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1572 if (!dev) { 1573 err = -ENOMEM; 1574 goto err_alloc_etherdev; 1575 } 1576 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1577 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1578 mlxsw_sp_port = netdev_priv(dev); 1579 mlxsw_sp_port->dev = dev; 1580 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1581 mlxsw_sp_port->local_port = local_port; 1582 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1583 mlxsw_sp_port->split = split; 1584 mlxsw_sp_port->mapping = *port_mapping; 1585 mlxsw_sp_port->link.autoneg = 1; 1586 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1587 1588 mlxsw_sp_port->pcpu_stats = 1589 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1590 if (!mlxsw_sp_port->pcpu_stats) { 1591 err = -ENOMEM; 1592 goto err_alloc_stats; 1593 } 1594 1595 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1596 &update_stats_cache); 1597 1598 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1599 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1600 1601 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1602 if (err) { 1603 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1604 mlxsw_sp_port->local_port); 1605 goto err_dev_addr_init; 1606 } 1607 1608 netif_carrier_off(dev); 1609 1610 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1611 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1612 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1613 1614 dev->min_mtu = 0; 1615 dev->max_mtu = ETH_MAX_MTU; 1616 1617 /* Each packet needs to have a Tx header (metadata) on top all other 1618 * headers. 1619 */ 1620 dev->needed_headroom = MLXSW_TXHDR_LEN; 1621 1622 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1623 if (err) { 1624 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1625 mlxsw_sp_port->local_port); 1626 goto err_port_system_port_mapping_set; 1627 } 1628 1629 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1630 if (err) { 1631 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1632 mlxsw_sp_port->local_port); 1633 goto err_port_speed_by_width_set; 1634 } 1635 1636 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1637 &mlxsw_sp_port->max_speed); 1638 if (err) { 1639 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1640 mlxsw_sp_port->local_port); 1641 goto err_max_speed_get; 1642 } 1643 1644 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1645 if (err) { 1646 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1647 mlxsw_sp_port->local_port); 1648 goto err_port_max_mtu_get; 1649 } 1650 1651 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1652 if (err) { 1653 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1654 mlxsw_sp_port->local_port); 1655 goto err_port_mtu_set; 1656 } 1657 1658 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1659 if (err) 1660 goto err_port_admin_status_set; 1661 1662 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1663 if (err) { 1664 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1665 mlxsw_sp_port->local_port); 1666 goto err_port_buffers_init; 1667 } 1668 1669 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1670 if (err) { 1671 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1672 mlxsw_sp_port->local_port); 1673 goto err_port_ets_init; 1674 } 1675 1676 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1677 if (err) { 1678 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1679 mlxsw_sp_port->local_port); 1680 goto err_port_tc_mc_mode; 1681 } 1682 1683 /* ETS and buffers must be initialized before DCB. */ 1684 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1685 if (err) { 1686 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1687 mlxsw_sp_port->local_port); 1688 goto err_port_dcb_init; 1689 } 1690 1691 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1692 if (err) { 1693 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1694 mlxsw_sp_port->local_port); 1695 goto err_port_fids_init; 1696 } 1697 1698 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1699 if (err) { 1700 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1701 mlxsw_sp_port->local_port); 1702 goto err_port_qdiscs_init; 1703 } 1704 1705 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1706 false); 1707 if (err) { 1708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1709 mlxsw_sp_port->local_port); 1710 goto err_port_vlan_clear; 1711 } 1712 1713 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1714 if (err) { 1715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1716 mlxsw_sp_port->local_port); 1717 goto err_port_nve_init; 1718 } 1719 1720 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1721 ETH_P_8021Q); 1722 if (err) { 1723 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1724 mlxsw_sp_port->local_port); 1725 goto err_port_pvid_set; 1726 } 1727 1728 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1729 MLXSW_SP_DEFAULT_VID); 1730 if (IS_ERR(mlxsw_sp_port_vlan)) { 1731 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1732 mlxsw_sp_port->local_port); 1733 err = PTR_ERR(mlxsw_sp_port_vlan); 1734 goto err_port_vlan_create; 1735 } 1736 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1737 1738 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1739 * only packets with 802.1q header as tagged packets. 1740 */ 1741 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1742 if (err) { 1743 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1744 local_port); 1745 goto err_port_vlan_classification_set; 1746 } 1747 1748 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1749 mlxsw_sp->ptp_ops->shaper_work); 1750 1751 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1752 1753 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1754 if (err) { 1755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1756 mlxsw_sp_port->local_port); 1757 goto err_port_overheat_init_val_set; 1758 } 1759 1760 err = register_netdev(dev); 1761 if (err) { 1762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1763 mlxsw_sp_port->local_port); 1764 goto err_register_netdev; 1765 } 1766 1767 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, 1768 mlxsw_sp_port, dev); 1769 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1770 return 0; 1771 1772 err_register_netdev: 1773 err_port_overheat_init_val_set: 1774 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1775 err_port_vlan_classification_set: 1776 mlxsw_sp->ports[local_port] = NULL; 1777 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1778 err_port_vlan_create: 1779 err_port_pvid_set: 1780 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1781 err_port_nve_init: 1782 err_port_vlan_clear: 1783 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1784 err_port_qdiscs_init: 1785 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1786 err_port_fids_init: 1787 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1788 err_port_dcb_init: 1789 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1790 err_port_tc_mc_mode: 1791 err_port_ets_init: 1792 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1793 err_port_buffers_init: 1794 err_port_admin_status_set: 1795 err_port_mtu_set: 1796 err_port_max_mtu_get: 1797 err_max_speed_get: 1798 err_port_speed_by_width_set: 1799 err_port_system_port_mapping_set: 1800 err_dev_addr_init: 1801 free_percpu(mlxsw_sp_port->pcpu_stats); 1802 err_alloc_stats: 1803 free_netdev(dev); 1804 err_alloc_etherdev: 1805 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1806 err_core_port_init: 1807 err_port_label_info_get: 1808 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1809 MLXSW_PORT_SWID_DISABLED_PORT); 1810 err_port_swid_set: 1811 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 1812 port_mapping->slot_index, 1813 port_mapping->module); 1814 return err; 1815 } 1816 1817 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1818 { 1819 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1820 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1821 u8 module = mlxsw_sp_port->mapping.module; 1822 1823 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1824 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1825 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1826 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); 1827 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1828 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1829 mlxsw_sp->ports[local_port] = NULL; 1830 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1831 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1832 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1833 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1834 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1835 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1836 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1837 free_percpu(mlxsw_sp_port->pcpu_stats); 1838 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1839 free_netdev(mlxsw_sp_port->dev); 1840 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1841 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1842 MLXSW_PORT_SWID_DISABLED_PORT); 1843 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); 1844 } 1845 1846 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1847 { 1848 struct mlxsw_sp_port *mlxsw_sp_port; 1849 int err; 1850 1851 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1852 if (!mlxsw_sp_port) 1853 return -ENOMEM; 1854 1855 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1856 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1857 1858 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1859 mlxsw_sp_port, 1860 mlxsw_sp->base_mac, 1861 sizeof(mlxsw_sp->base_mac)); 1862 if (err) { 1863 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1864 goto err_core_cpu_port_init; 1865 } 1866 1867 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1868 return 0; 1869 1870 err_core_cpu_port_init: 1871 kfree(mlxsw_sp_port); 1872 return err; 1873 } 1874 1875 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1876 { 1877 struct mlxsw_sp_port *mlxsw_sp_port = 1878 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1879 1880 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1881 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1882 kfree(mlxsw_sp_port); 1883 } 1884 1885 static bool mlxsw_sp_local_port_valid(u16 local_port) 1886 { 1887 return local_port != MLXSW_PORT_CPU_PORT; 1888 } 1889 1890 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1891 { 1892 if (!mlxsw_sp_local_port_valid(local_port)) 1893 return false; 1894 return mlxsw_sp->ports[local_port] != NULL; 1895 } 1896 1897 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, 1898 u16 local_port, bool enable) 1899 { 1900 char pmecr_pl[MLXSW_REG_PMECR_LEN]; 1901 1902 mlxsw_reg_pmecr_pack(pmecr_pl, local_port, 1903 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : 1904 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); 1905 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); 1906 } 1907 1908 struct mlxsw_sp_port_mapping_event { 1909 struct list_head list; 1910 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 1911 }; 1912 1913 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) 1914 { 1915 struct mlxsw_sp_port_mapping_event *event, *next_event; 1916 struct mlxsw_sp_port_mapping_events *events; 1917 struct mlxsw_sp_port_mapping port_mapping; 1918 struct mlxsw_sp *mlxsw_sp; 1919 struct devlink *devlink; 1920 LIST_HEAD(event_queue); 1921 u16 local_port; 1922 int err; 1923 1924 events = container_of(work, struct mlxsw_sp_port_mapping_events, work); 1925 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); 1926 devlink = priv_to_devlink(mlxsw_sp->core); 1927 1928 spin_lock_bh(&events->queue_lock); 1929 list_splice_init(&events->queue, &event_queue); 1930 spin_unlock_bh(&events->queue_lock); 1931 1932 list_for_each_entry_safe(event, next_event, &event_queue, list) { 1933 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); 1934 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 1935 event->pmlp_pl, &port_mapping); 1936 if (err) 1937 goto out; 1938 1939 if (WARN_ON_ONCE(!port_mapping.width)) 1940 goto out; 1941 1942 devl_lock(devlink); 1943 1944 if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) 1945 mlxsw_sp_port_create(mlxsw_sp, local_port, 1946 false, &port_mapping); 1947 else 1948 WARN_ON_ONCE(1); 1949 1950 devl_unlock(devlink); 1951 1952 mlxsw_sp->port_mapping[local_port] = port_mapping; 1953 1954 out: 1955 kfree(event); 1956 } 1957 } 1958 1959 static void 1960 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, 1961 char *pmlp_pl, void *priv) 1962 { 1963 struct mlxsw_sp_port_mapping_events *events; 1964 struct mlxsw_sp_port_mapping_event *event; 1965 struct mlxsw_sp *mlxsw_sp = priv; 1966 u16 local_port; 1967 1968 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); 1969 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 1970 return; 1971 1972 events = &mlxsw_sp->port_mapping_events; 1973 event = kmalloc(sizeof(*event), GFP_ATOMIC); 1974 if (!event) 1975 return; 1976 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); 1977 spin_lock(&events->queue_lock); 1978 list_add_tail(&event->list, &events->queue); 1979 spin_unlock(&events->queue_lock); 1980 mlxsw_core_schedule_work(&events->work); 1981 } 1982 1983 static void 1984 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) 1985 { 1986 struct mlxsw_sp_port_mapping_event *event, *next_event; 1987 struct mlxsw_sp_port_mapping_events *events; 1988 1989 events = &mlxsw_sp->port_mapping_events; 1990 1991 /* Caller needs to make sure that no new event is going to appear. */ 1992 cancel_work_sync(&events->work); 1993 list_for_each_entry_safe(event, next_event, &events->queue, list) { 1994 list_del(&event->list); 1995 kfree(event); 1996 } 1997 } 1998 1999 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2000 { 2001 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2002 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 2003 int i; 2004 2005 for (i = 1; i < max_ports; i++) 2006 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2007 /* Make sure all scheduled events are processed */ 2008 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2009 2010 devl_lock(devlink); 2011 for (i = 1; i < max_ports; i++) 2012 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2013 mlxsw_sp_port_remove(mlxsw_sp, i); 2014 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2015 devl_unlock(devlink); 2016 kfree(mlxsw_sp->ports); 2017 mlxsw_sp->ports = NULL; 2018 } 2019 2020 static void 2021 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, 2022 bool (*selector)(void *priv, u16 local_port), 2023 void *priv) 2024 { 2025 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2026 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); 2027 int i; 2028 2029 for (i = 1; i < max_ports; i++) 2030 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) 2031 mlxsw_sp_port_remove(mlxsw_sp, i); 2032 } 2033 2034 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2035 { 2036 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2037 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 2038 struct mlxsw_sp_port_mapping_events *events; 2039 struct mlxsw_sp_port_mapping *port_mapping; 2040 size_t alloc_size; 2041 int i; 2042 int err; 2043 2044 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2045 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2046 if (!mlxsw_sp->ports) 2047 return -ENOMEM; 2048 2049 events = &mlxsw_sp->port_mapping_events; 2050 INIT_LIST_HEAD(&events->queue); 2051 spin_lock_init(&events->queue_lock); 2052 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); 2053 2054 for (i = 1; i < max_ports; i++) { 2055 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); 2056 if (err) 2057 goto err_event_enable; 2058 } 2059 2060 devl_lock(devlink); 2061 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2062 if (err) 2063 goto err_cpu_port_create; 2064 2065 for (i = 1; i < max_ports; i++) { 2066 port_mapping = &mlxsw_sp->port_mapping[i]; 2067 if (!port_mapping->width) 2068 continue; 2069 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 2070 if (err) 2071 goto err_port_create; 2072 } 2073 devl_unlock(devlink); 2074 return 0; 2075 2076 err_port_create: 2077 for (i--; i >= 1; i--) 2078 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2079 mlxsw_sp_port_remove(mlxsw_sp, i); 2080 i = max_ports; 2081 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2082 err_cpu_port_create: 2083 devl_unlock(devlink); 2084 err_event_enable: 2085 for (i--; i >= 1; i--) 2086 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2087 /* Make sure all scheduled events are processed */ 2088 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2089 kfree(mlxsw_sp->ports); 2090 mlxsw_sp->ports = NULL; 2091 return err; 2092 } 2093 2094 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2095 { 2096 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2097 struct mlxsw_sp_port_mapping *port_mapping; 2098 int i; 2099 int err; 2100 2101 mlxsw_sp->port_mapping = kcalloc(max_ports, 2102 sizeof(struct mlxsw_sp_port_mapping), 2103 GFP_KERNEL); 2104 if (!mlxsw_sp->port_mapping) 2105 return -ENOMEM; 2106 2107 for (i = 1; i < max_ports; i++) { 2108 port_mapping = &mlxsw_sp->port_mapping[i]; 2109 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); 2110 if (err) 2111 goto err_port_module_info_get; 2112 } 2113 return 0; 2114 2115 err_port_module_info_get: 2116 kfree(mlxsw_sp->port_mapping); 2117 return err; 2118 } 2119 2120 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2121 { 2122 kfree(mlxsw_sp->port_mapping); 2123 } 2124 2125 static int 2126 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 2127 struct mlxsw_sp_port_mapping *port_mapping, 2128 unsigned int count, const char *pmtdb_pl) 2129 { 2130 struct mlxsw_sp_port_mapping split_port_mapping; 2131 int err, i; 2132 2133 split_port_mapping = *port_mapping; 2134 split_port_mapping.width /= count; 2135 for (i = 0; i < count; i++) { 2136 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2137 2138 if (!mlxsw_sp_local_port_valid(s_local_port)) 2139 continue; 2140 2141 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 2142 true, &split_port_mapping); 2143 if (err) 2144 goto err_port_create; 2145 split_port_mapping.lane += split_port_mapping.width; 2146 } 2147 2148 return 0; 2149 2150 err_port_create: 2151 for (i--; i >= 0; i--) { 2152 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2153 2154 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2155 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2156 } 2157 return err; 2158 } 2159 2160 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2161 unsigned int count, 2162 const char *pmtdb_pl) 2163 { 2164 struct mlxsw_sp_port_mapping *port_mapping; 2165 int i; 2166 2167 /* Go over original unsplit ports in the gap and recreate them. */ 2168 for (i = 0; i < count; i++) { 2169 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2170 2171 port_mapping = &mlxsw_sp->port_mapping[local_port]; 2172 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) 2173 continue; 2174 mlxsw_sp_port_create(mlxsw_sp, local_port, 2175 false, port_mapping); 2176 } 2177 } 2178 2179 static struct mlxsw_sp_port * 2180 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2181 { 2182 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2183 return mlxsw_sp->ports[local_port]; 2184 return NULL; 2185 } 2186 2187 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2188 unsigned int count, 2189 struct netlink_ext_ack *extack) 2190 { 2191 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2192 struct mlxsw_sp_port_mapping port_mapping; 2193 struct mlxsw_sp_port *mlxsw_sp_port; 2194 enum mlxsw_reg_pmtdb_status status; 2195 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2196 int i; 2197 int err; 2198 2199 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2200 if (!mlxsw_sp_port) { 2201 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2202 local_port); 2203 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2204 return -EINVAL; 2205 } 2206 2207 if (mlxsw_sp_port->split) { 2208 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2209 return -EINVAL; 2210 } 2211 2212 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2213 mlxsw_sp_port->mapping.module, 2214 mlxsw_sp_port->mapping.module_width / count, 2215 count); 2216 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2217 if (err) { 2218 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2219 return err; 2220 } 2221 2222 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2223 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2224 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2225 return -EINVAL; 2226 } 2227 2228 port_mapping = mlxsw_sp_port->mapping; 2229 2230 for (i = 0; i < count; i++) { 2231 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2232 2233 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2234 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2235 } 2236 2237 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2238 count, pmtdb_pl); 2239 if (err) { 2240 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2241 goto err_port_split_create; 2242 } 2243 2244 return 0; 2245 2246 err_port_split_create: 2247 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2248 2249 return err; 2250 } 2251 2252 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2253 struct netlink_ext_ack *extack) 2254 { 2255 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2256 struct mlxsw_sp_port *mlxsw_sp_port; 2257 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2258 unsigned int count; 2259 int i; 2260 int err; 2261 2262 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2263 if (!mlxsw_sp_port) { 2264 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2265 local_port); 2266 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2267 return -EINVAL; 2268 } 2269 2270 if (!mlxsw_sp_port->split) { 2271 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2272 return -EINVAL; 2273 } 2274 2275 count = mlxsw_sp_port->mapping.module_width / 2276 mlxsw_sp_port->mapping.width; 2277 2278 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2279 mlxsw_sp_port->mapping.module, 2280 mlxsw_sp_port->mapping.module_width / count, 2281 count); 2282 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2283 if (err) { 2284 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2285 return err; 2286 } 2287 2288 for (i = 0; i < count; i++) { 2289 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2290 2291 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2292 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2293 } 2294 2295 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2296 2297 return 0; 2298 } 2299 2300 static void 2301 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2302 { 2303 int i; 2304 2305 for (i = 0; i < TC_MAX_QUEUE; i++) 2306 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2307 } 2308 2309 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2310 char *pude_pl, void *priv) 2311 { 2312 struct mlxsw_sp *mlxsw_sp = priv; 2313 struct mlxsw_sp_port *mlxsw_sp_port; 2314 enum mlxsw_reg_pude_oper_status status; 2315 u16 local_port; 2316 2317 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2318 2319 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2320 return; 2321 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2322 if (!mlxsw_sp_port) 2323 return; 2324 2325 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2326 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2327 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2328 netif_carrier_on(mlxsw_sp_port->dev); 2329 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2330 } else { 2331 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2332 netif_carrier_off(mlxsw_sp_port->dev); 2333 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2334 } 2335 } 2336 2337 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2338 char *mtpptr_pl, bool ingress) 2339 { 2340 u16 local_port; 2341 u8 num_rec; 2342 int i; 2343 2344 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2345 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2346 for (i = 0; i < num_rec; i++) { 2347 u8 domain_number; 2348 u8 message_type; 2349 u16 sequence_id; 2350 u64 timestamp; 2351 2352 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2353 &domain_number, &sequence_id, 2354 ×tamp); 2355 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2356 message_type, domain_number, 2357 sequence_id, timestamp); 2358 } 2359 } 2360 2361 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2362 char *mtpptr_pl, void *priv) 2363 { 2364 struct mlxsw_sp *mlxsw_sp = priv; 2365 2366 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2367 } 2368 2369 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2370 char *mtpptr_pl, void *priv) 2371 { 2372 struct mlxsw_sp *mlxsw_sp = priv; 2373 2374 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2375 } 2376 2377 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2378 u16 local_port, void *priv) 2379 { 2380 struct mlxsw_sp *mlxsw_sp = priv; 2381 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2382 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2383 2384 if (unlikely(!mlxsw_sp_port)) { 2385 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2386 local_port); 2387 return; 2388 } 2389 2390 skb->dev = mlxsw_sp_port->dev; 2391 2392 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2393 u64_stats_update_begin(&pcpu_stats->syncp); 2394 pcpu_stats->rx_packets++; 2395 pcpu_stats->rx_bytes += skb->len; 2396 u64_stats_update_end(&pcpu_stats->syncp); 2397 2398 skb->protocol = eth_type_trans(skb, skb->dev); 2399 netif_receive_skb(skb); 2400 } 2401 2402 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2403 void *priv) 2404 { 2405 skb->offload_fwd_mark = 1; 2406 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2407 } 2408 2409 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2410 u16 local_port, void *priv) 2411 { 2412 skb->offload_l3_fwd_mark = 1; 2413 skb->offload_fwd_mark = 1; 2414 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2415 } 2416 2417 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2418 u16 local_port) 2419 { 2420 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2421 } 2422 2423 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2424 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2425 _is_ctrl, SP_##_trap_group, DISCARD) 2426 2427 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2428 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2429 _is_ctrl, SP_##_trap_group, DISCARD) 2430 2431 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2432 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2433 _is_ctrl, SP_##_trap_group, DISCARD) 2434 2435 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2436 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2437 2438 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2439 /* Events */ 2440 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2441 /* L2 traps */ 2442 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2443 /* L3 traps */ 2444 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2445 false), 2446 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2447 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2448 false), 2449 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2450 ROUTER_EXP, false), 2451 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2452 ROUTER_EXP, false), 2453 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2454 ROUTER_EXP, false), 2455 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2456 ROUTER_EXP, false), 2457 /* Multicast Router Traps */ 2458 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2459 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2460 /* NVE traps */ 2461 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2462 }; 2463 2464 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2465 /* Events */ 2466 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2467 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2468 }; 2469 2470 static const struct mlxsw_listener mlxsw_sp2_listener[] = { 2471 /* Events */ 2472 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), 2473 }; 2474 2475 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2476 { 2477 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2478 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2479 enum mlxsw_reg_qpcr_ir_units ir_units; 2480 int max_cpu_policers; 2481 bool is_bytes; 2482 u8 burst_size; 2483 u32 rate; 2484 int i, err; 2485 2486 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2487 return -EIO; 2488 2489 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2490 2491 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2492 for (i = 0; i < max_cpu_policers; i++) { 2493 is_bytes = false; 2494 switch (i) { 2495 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2496 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2497 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2498 rate = 1024; 2499 burst_size = 7; 2500 break; 2501 default: 2502 continue; 2503 } 2504 2505 __set_bit(i, mlxsw_sp->trap->policers_usage); 2506 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2507 burst_size); 2508 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2509 if (err) 2510 return err; 2511 } 2512 2513 return 0; 2514 } 2515 2516 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2517 { 2518 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2519 enum mlxsw_reg_htgt_trap_group i; 2520 int max_cpu_policers; 2521 int max_trap_groups; 2522 u8 priority, tc; 2523 u16 policer_id; 2524 int err; 2525 2526 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2527 return -EIO; 2528 2529 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2530 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2531 2532 for (i = 0; i < max_trap_groups; i++) { 2533 policer_id = i; 2534 switch (i) { 2535 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2536 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2537 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2538 priority = 1; 2539 tc = 1; 2540 break; 2541 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2542 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2543 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2544 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2545 break; 2546 default: 2547 continue; 2548 } 2549 2550 if (max_cpu_policers <= policer_id && 2551 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2552 return -EIO; 2553 2554 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2555 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2556 if (err) 2557 return err; 2558 } 2559 2560 return 0; 2561 } 2562 2563 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2564 { 2565 struct mlxsw_sp_trap *trap; 2566 u64 max_policers; 2567 int err; 2568 2569 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2570 return -EIO; 2571 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2572 trap = kzalloc(struct_size(trap, policers_usage, 2573 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2574 if (!trap) 2575 return -ENOMEM; 2576 trap->max_policers = max_policers; 2577 mlxsw_sp->trap = trap; 2578 2579 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2580 if (err) 2581 goto err_cpu_policers_set; 2582 2583 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2584 if (err) 2585 goto err_trap_groups_set; 2586 2587 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2588 ARRAY_SIZE(mlxsw_sp_listener), 2589 mlxsw_sp); 2590 if (err) 2591 goto err_traps_register; 2592 2593 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2594 mlxsw_sp->listeners_count, mlxsw_sp); 2595 if (err) 2596 goto err_extra_traps_init; 2597 2598 return 0; 2599 2600 err_extra_traps_init: 2601 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2602 ARRAY_SIZE(mlxsw_sp_listener), 2603 mlxsw_sp); 2604 err_traps_register: 2605 err_trap_groups_set: 2606 err_cpu_policers_set: 2607 kfree(trap); 2608 return err; 2609 } 2610 2611 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2612 { 2613 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2614 mlxsw_sp->listeners_count, 2615 mlxsw_sp); 2616 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2617 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2618 kfree(mlxsw_sp->trap); 2619 } 2620 2621 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2622 2623 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2624 { 2625 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2626 u32 seed; 2627 int err; 2628 2629 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2630 MLXSW_SP_LAG_SEED_INIT); 2631 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2632 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2633 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2634 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2635 MLXSW_REG_SLCR_LAG_HASH_SIP | 2636 MLXSW_REG_SLCR_LAG_HASH_DIP | 2637 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2638 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2639 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2640 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2641 if (err) 2642 return err; 2643 2644 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || 2645 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2646 return -EIO; 2647 2648 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), 2649 sizeof(struct mlxsw_sp_upper), 2650 GFP_KERNEL); 2651 if (!mlxsw_sp->lags) 2652 return -ENOMEM; 2653 2654 return 0; 2655 } 2656 2657 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2658 { 2659 kfree(mlxsw_sp->lags); 2660 } 2661 2662 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2663 .clock_init = mlxsw_sp1_ptp_clock_init, 2664 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2665 .init = mlxsw_sp1_ptp_init, 2666 .fini = mlxsw_sp1_ptp_fini, 2667 .receive = mlxsw_sp1_ptp_receive, 2668 .transmitted = mlxsw_sp1_ptp_transmitted, 2669 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2670 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2671 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2672 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2673 .get_stats_count = mlxsw_sp1_get_stats_count, 2674 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2675 .get_stats = mlxsw_sp1_get_stats, 2676 }; 2677 2678 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2679 .clock_init = mlxsw_sp2_ptp_clock_init, 2680 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2681 .init = mlxsw_sp2_ptp_init, 2682 .fini = mlxsw_sp2_ptp_fini, 2683 .receive = mlxsw_sp2_ptp_receive, 2684 .transmitted = mlxsw_sp2_ptp_transmitted, 2685 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2686 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2687 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2688 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2689 .get_stats_count = mlxsw_sp2_get_stats_count, 2690 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2691 .get_stats = mlxsw_sp2_get_stats, 2692 }; 2693 2694 struct mlxsw_sp_sample_trigger_node { 2695 struct mlxsw_sp_sample_trigger trigger; 2696 struct mlxsw_sp_sample_params params; 2697 struct rhash_head ht_node; 2698 struct rcu_head rcu; 2699 refcount_t refcount; 2700 }; 2701 2702 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2703 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2704 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2705 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2706 .automatic_shrinking = true, 2707 }; 2708 2709 static void 2710 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2711 const struct mlxsw_sp_sample_trigger *trigger) 2712 { 2713 memset(key, 0, sizeof(*key)); 2714 key->type = trigger->type; 2715 key->local_port = trigger->local_port; 2716 } 2717 2718 /* RCU read lock must be held */ 2719 struct mlxsw_sp_sample_params * 2720 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2721 const struct mlxsw_sp_sample_trigger *trigger) 2722 { 2723 struct mlxsw_sp_sample_trigger_node *trigger_node; 2724 struct mlxsw_sp_sample_trigger key; 2725 2726 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2727 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2728 mlxsw_sp_sample_trigger_ht_params); 2729 if (!trigger_node) 2730 return NULL; 2731 2732 return &trigger_node->params; 2733 } 2734 2735 static int 2736 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2737 const struct mlxsw_sp_sample_trigger *trigger, 2738 const struct mlxsw_sp_sample_params *params) 2739 { 2740 struct mlxsw_sp_sample_trigger_node *trigger_node; 2741 int err; 2742 2743 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2744 if (!trigger_node) 2745 return -ENOMEM; 2746 2747 trigger_node->trigger = *trigger; 2748 trigger_node->params = *params; 2749 refcount_set(&trigger_node->refcount, 1); 2750 2751 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2752 &trigger_node->ht_node, 2753 mlxsw_sp_sample_trigger_ht_params); 2754 if (err) 2755 goto err_rhashtable_insert; 2756 2757 return 0; 2758 2759 err_rhashtable_insert: 2760 kfree(trigger_node); 2761 return err; 2762 } 2763 2764 static void 2765 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2766 struct mlxsw_sp_sample_trigger_node *trigger_node) 2767 { 2768 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2769 &trigger_node->ht_node, 2770 mlxsw_sp_sample_trigger_ht_params); 2771 kfree_rcu(trigger_node, rcu); 2772 } 2773 2774 int 2775 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2776 const struct mlxsw_sp_sample_trigger *trigger, 2777 const struct mlxsw_sp_sample_params *params, 2778 struct netlink_ext_ack *extack) 2779 { 2780 struct mlxsw_sp_sample_trigger_node *trigger_node; 2781 struct mlxsw_sp_sample_trigger key; 2782 2783 ASSERT_RTNL(); 2784 2785 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2786 2787 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2788 &key, 2789 mlxsw_sp_sample_trigger_ht_params); 2790 if (!trigger_node) 2791 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2792 params); 2793 2794 if (trigger_node->trigger.local_port) { 2795 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2796 return -EINVAL; 2797 } 2798 2799 if (trigger_node->params.psample_group != params->psample_group || 2800 trigger_node->params.truncate != params->truncate || 2801 trigger_node->params.rate != params->rate || 2802 trigger_node->params.trunc_size != params->trunc_size) { 2803 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2804 return -EINVAL; 2805 } 2806 2807 refcount_inc(&trigger_node->refcount); 2808 2809 return 0; 2810 } 2811 2812 void 2813 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2814 const struct mlxsw_sp_sample_trigger *trigger) 2815 { 2816 struct mlxsw_sp_sample_trigger_node *trigger_node; 2817 struct mlxsw_sp_sample_trigger key; 2818 2819 ASSERT_RTNL(); 2820 2821 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2822 2823 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2824 &key, 2825 mlxsw_sp_sample_trigger_ht_params); 2826 if (!trigger_node) 2827 return; 2828 2829 if (!refcount_dec_and_test(&trigger_node->refcount)) 2830 return; 2831 2832 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2833 } 2834 2835 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2836 unsigned long event, void *ptr); 2837 2838 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2839 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2840 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2841 2842 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2843 { 2844 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2845 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2846 mutex_init(&mlxsw_sp->parsing.lock); 2847 } 2848 2849 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2850 { 2851 mutex_destroy(&mlxsw_sp->parsing.lock); 2852 } 2853 2854 struct mlxsw_sp_ipv6_addr_node { 2855 struct in6_addr key; 2856 struct rhash_head ht_node; 2857 u32 kvdl_index; 2858 refcount_t refcount; 2859 }; 2860 2861 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 2862 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 2863 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 2864 .key_len = sizeof(struct in6_addr), 2865 .automatic_shrinking = true, 2866 }; 2867 2868 static int 2869 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 2870 u32 *p_kvdl_index) 2871 { 2872 struct mlxsw_sp_ipv6_addr_node *node; 2873 char rips_pl[MLXSW_REG_RIPS_LEN]; 2874 int err; 2875 2876 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 2877 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2878 p_kvdl_index); 2879 if (err) 2880 return err; 2881 2882 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 2883 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 2884 if (err) 2885 goto err_rips_write; 2886 2887 node = kzalloc(sizeof(*node), GFP_KERNEL); 2888 if (!node) { 2889 err = -ENOMEM; 2890 goto err_node_alloc; 2891 } 2892 2893 node->key = *addr6; 2894 node->kvdl_index = *p_kvdl_index; 2895 refcount_set(&node->refcount, 1); 2896 2897 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 2898 &node->ht_node, 2899 mlxsw_sp_ipv6_addr_ht_params); 2900 if (err) 2901 goto err_rhashtable_insert; 2902 2903 return 0; 2904 2905 err_rhashtable_insert: 2906 kfree(node); 2907 err_node_alloc: 2908 err_rips_write: 2909 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2910 *p_kvdl_index); 2911 return err; 2912 } 2913 2914 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 2915 struct mlxsw_sp_ipv6_addr_node *node) 2916 { 2917 u32 kvdl_index = node->kvdl_index; 2918 2919 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 2920 mlxsw_sp_ipv6_addr_ht_params); 2921 kfree(node); 2922 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2923 kvdl_index); 2924 } 2925 2926 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 2927 const struct in6_addr *addr6, 2928 u32 *p_kvdl_index) 2929 { 2930 struct mlxsw_sp_ipv6_addr_node *node; 2931 int err = 0; 2932 2933 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2934 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2935 mlxsw_sp_ipv6_addr_ht_params); 2936 if (node) { 2937 refcount_inc(&node->refcount); 2938 *p_kvdl_index = node->kvdl_index; 2939 goto out_unlock; 2940 } 2941 2942 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 2943 2944 out_unlock: 2945 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2946 return err; 2947 } 2948 2949 void 2950 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 2951 { 2952 struct mlxsw_sp_ipv6_addr_node *node; 2953 2954 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 2955 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 2956 mlxsw_sp_ipv6_addr_ht_params); 2957 if (WARN_ON(!node)) 2958 goto out_unlock; 2959 2960 if (!refcount_dec_and_test(&node->refcount)) 2961 goto out_unlock; 2962 2963 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 2964 2965 out_unlock: 2966 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 2967 } 2968 2969 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 2970 { 2971 int err; 2972 2973 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 2974 &mlxsw_sp_ipv6_addr_ht_params); 2975 if (err) 2976 return err; 2977 2978 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 2979 return 0; 2980 } 2981 2982 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 2983 { 2984 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 2985 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 2986 } 2987 2988 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2989 const struct mlxsw_bus_info *mlxsw_bus_info, 2990 struct netlink_ext_ack *extack) 2991 { 2992 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2993 int err; 2994 2995 mlxsw_sp->core = mlxsw_core; 2996 mlxsw_sp->bus_info = mlxsw_bus_info; 2997 2998 mlxsw_sp_parsing_init(mlxsw_sp); 2999 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 3000 3001 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3002 if (err) { 3003 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3004 return err; 3005 } 3006 3007 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3008 if (err) { 3009 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3010 return err; 3011 } 3012 3013 err = mlxsw_sp_pgt_init(mlxsw_sp); 3014 if (err) { 3015 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n"); 3016 goto err_pgt_init; 3017 } 3018 3019 err = mlxsw_sp_fids_init(mlxsw_sp); 3020 if (err) { 3021 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3022 goto err_fids_init; 3023 } 3024 3025 err = mlxsw_sp_policers_init(mlxsw_sp); 3026 if (err) { 3027 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 3028 goto err_policers_init; 3029 } 3030 3031 err = mlxsw_sp_traps_init(mlxsw_sp); 3032 if (err) { 3033 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3034 goto err_traps_init; 3035 } 3036 3037 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 3038 if (err) { 3039 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 3040 goto err_devlink_traps_init; 3041 } 3042 3043 err = mlxsw_sp_buffers_init(mlxsw_sp); 3044 if (err) { 3045 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3046 goto err_buffers_init; 3047 } 3048 3049 err = mlxsw_sp_lag_init(mlxsw_sp); 3050 if (err) { 3051 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3052 goto err_lag_init; 3053 } 3054 3055 /* Initialize SPAN before router and switchdev, so that those components 3056 * can call mlxsw_sp_span_respin(). 3057 */ 3058 err = mlxsw_sp_span_init(mlxsw_sp); 3059 if (err) { 3060 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3061 goto err_span_init; 3062 } 3063 3064 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3065 if (err) { 3066 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3067 goto err_switchdev_init; 3068 } 3069 3070 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3071 if (err) { 3072 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3073 goto err_counter_pool_init; 3074 } 3075 3076 err = mlxsw_sp_afa_init(mlxsw_sp); 3077 if (err) { 3078 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3079 goto err_afa_init; 3080 } 3081 3082 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 3083 if (err) { 3084 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 3085 goto err_ipv6_addr_ht_init; 3086 } 3087 3088 err = mlxsw_sp_nve_init(mlxsw_sp); 3089 if (err) { 3090 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 3091 goto err_nve_init; 3092 } 3093 3094 err = mlxsw_sp_acl_init(mlxsw_sp); 3095 if (err) { 3096 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3097 goto err_acl_init; 3098 } 3099 3100 err = mlxsw_sp_router_init(mlxsw_sp, extack); 3101 if (err) { 3102 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3103 goto err_router_init; 3104 } 3105 3106 if (mlxsw_sp->bus_info->read_frc_capable) { 3107 /* NULL is a valid return value from clock_init */ 3108 mlxsw_sp->clock = 3109 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 3110 mlxsw_sp->bus_info->dev); 3111 if (IS_ERR(mlxsw_sp->clock)) { 3112 err = PTR_ERR(mlxsw_sp->clock); 3113 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 3114 goto err_ptp_clock_init; 3115 } 3116 } 3117 3118 if (mlxsw_sp->clock) { 3119 /* NULL is a valid return value from ptp_ops->init */ 3120 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 3121 if (IS_ERR(mlxsw_sp->ptp_state)) { 3122 err = PTR_ERR(mlxsw_sp->ptp_state); 3123 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 3124 goto err_ptp_init; 3125 } 3126 } 3127 3128 /* Initialize netdevice notifier after SPAN is initialized, so that the 3129 * event handler can call SPAN respin. 3130 */ 3131 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3132 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3133 &mlxsw_sp->netdevice_nb); 3134 if (err) { 3135 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3136 goto err_netdev_notifier; 3137 } 3138 3139 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3140 if (err) { 3141 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3142 goto err_dpipe_init; 3143 } 3144 3145 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 3146 if (err) { 3147 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 3148 goto err_port_module_info_init; 3149 } 3150 3151 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 3152 &mlxsw_sp_sample_trigger_ht_params); 3153 if (err) { 3154 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 3155 goto err_sample_trigger_init; 3156 } 3157 3158 err = mlxsw_sp_ports_create(mlxsw_sp); 3159 if (err) { 3160 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3161 goto err_ports_create; 3162 } 3163 3164 return 0; 3165 3166 err_ports_create: 3167 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3168 err_sample_trigger_init: 3169 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3170 err_port_module_info_init: 3171 mlxsw_sp_dpipe_fini(mlxsw_sp); 3172 err_dpipe_init: 3173 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3174 &mlxsw_sp->netdevice_nb); 3175 err_netdev_notifier: 3176 if (mlxsw_sp->clock) 3177 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3178 err_ptp_init: 3179 if (mlxsw_sp->clock) 3180 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3181 err_ptp_clock_init: 3182 mlxsw_sp_router_fini(mlxsw_sp); 3183 err_router_init: 3184 mlxsw_sp_acl_fini(mlxsw_sp); 3185 err_acl_init: 3186 mlxsw_sp_nve_fini(mlxsw_sp); 3187 err_nve_init: 3188 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3189 err_ipv6_addr_ht_init: 3190 mlxsw_sp_afa_fini(mlxsw_sp); 3191 err_afa_init: 3192 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3193 err_counter_pool_init: 3194 mlxsw_sp_switchdev_fini(mlxsw_sp); 3195 err_switchdev_init: 3196 mlxsw_sp_span_fini(mlxsw_sp); 3197 err_span_init: 3198 mlxsw_sp_lag_fini(mlxsw_sp); 3199 err_lag_init: 3200 mlxsw_sp_buffers_fini(mlxsw_sp); 3201 err_buffers_init: 3202 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3203 err_devlink_traps_init: 3204 mlxsw_sp_traps_fini(mlxsw_sp); 3205 err_traps_init: 3206 mlxsw_sp_policers_fini(mlxsw_sp); 3207 err_policers_init: 3208 mlxsw_sp_fids_fini(mlxsw_sp); 3209 err_fids_init: 3210 mlxsw_sp_pgt_fini(mlxsw_sp); 3211 err_pgt_init: 3212 mlxsw_sp_kvdl_fini(mlxsw_sp); 3213 mlxsw_sp_parsing_fini(mlxsw_sp); 3214 return err; 3215 } 3216 3217 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3218 const struct mlxsw_bus_info *mlxsw_bus_info, 3219 struct netlink_ext_ack *extack) 3220 { 3221 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3222 3223 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3224 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3225 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3226 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3227 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3228 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3229 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3230 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3231 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3232 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3233 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3234 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3235 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3236 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3237 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3238 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3239 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3240 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3241 mlxsw_sp->listeners = mlxsw_sp1_listener; 3242 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3243 mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr; 3244 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3245 mlxsw_sp->pgt_smpe_index_valid = true; 3246 3247 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3248 } 3249 3250 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3251 const struct mlxsw_bus_info *mlxsw_bus_info, 3252 struct netlink_ext_ack *extack) 3253 { 3254 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3255 3256 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3257 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3258 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3259 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3260 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3261 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3262 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3263 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3264 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3265 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3266 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3267 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3268 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3269 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3270 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3271 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3272 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3273 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3274 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3275 mlxsw_sp->listeners = mlxsw_sp2_listener; 3276 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3277 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3278 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3279 mlxsw_sp->pgt_smpe_index_valid = false; 3280 3281 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3282 } 3283 3284 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3285 const struct mlxsw_bus_info *mlxsw_bus_info, 3286 struct netlink_ext_ack *extack) 3287 { 3288 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3289 3290 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3291 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3292 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3293 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3294 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3295 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3296 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3297 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3298 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3299 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3300 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3301 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3302 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3303 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3304 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3305 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3306 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3307 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3308 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3309 mlxsw_sp->listeners = mlxsw_sp2_listener; 3310 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3311 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3312 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3313 mlxsw_sp->pgt_smpe_index_valid = false; 3314 3315 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3316 } 3317 3318 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3319 const struct mlxsw_bus_info *mlxsw_bus_info, 3320 struct netlink_ext_ack *extack) 3321 { 3322 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3323 3324 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3325 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3326 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3327 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3328 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3329 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3330 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3331 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3332 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3333 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3334 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3335 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3336 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3337 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3338 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3339 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3340 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3341 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3342 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3343 mlxsw_sp->listeners = mlxsw_sp2_listener; 3344 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3345 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3346 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3347 mlxsw_sp->pgt_smpe_index_valid = false; 3348 3349 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3350 } 3351 3352 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3353 { 3354 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3355 3356 mlxsw_sp_ports_remove(mlxsw_sp); 3357 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3358 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3359 mlxsw_sp_dpipe_fini(mlxsw_sp); 3360 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3361 &mlxsw_sp->netdevice_nb); 3362 if (mlxsw_sp->clock) { 3363 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3364 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3365 } 3366 mlxsw_sp_router_fini(mlxsw_sp); 3367 mlxsw_sp_acl_fini(mlxsw_sp); 3368 mlxsw_sp_nve_fini(mlxsw_sp); 3369 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3370 mlxsw_sp_afa_fini(mlxsw_sp); 3371 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3372 mlxsw_sp_switchdev_fini(mlxsw_sp); 3373 mlxsw_sp_span_fini(mlxsw_sp); 3374 mlxsw_sp_lag_fini(mlxsw_sp); 3375 mlxsw_sp_buffers_fini(mlxsw_sp); 3376 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3377 mlxsw_sp_traps_fini(mlxsw_sp); 3378 mlxsw_sp_policers_fini(mlxsw_sp); 3379 mlxsw_sp_fids_fini(mlxsw_sp); 3380 mlxsw_sp_pgt_fini(mlxsw_sp); 3381 mlxsw_sp_kvdl_fini(mlxsw_sp); 3382 mlxsw_sp_parsing_fini(mlxsw_sp); 3383 } 3384 3385 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3386 .used_flood_mode = 1, 3387 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3388 .used_max_ib_mc = 1, 3389 .max_ib_mc = 0, 3390 .used_max_pkey = 1, 3391 .max_pkey = 0, 3392 .used_ubridge = 1, 3393 .ubridge = 1, 3394 .used_kvd_sizes = 1, 3395 .kvd_hash_single_parts = 59, 3396 .kvd_hash_double_parts = 41, 3397 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3398 .swid_config = { 3399 { 3400 .used_type = 1, 3401 .type = MLXSW_PORT_SWID_TYPE_ETH, 3402 } 3403 }, 3404 }; 3405 3406 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3407 .used_flood_mode = 1, 3408 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3409 .used_max_ib_mc = 1, 3410 .max_ib_mc = 0, 3411 .used_max_pkey = 1, 3412 .max_pkey = 0, 3413 .used_ubridge = 1, 3414 .ubridge = 1, 3415 .swid_config = { 3416 { 3417 .used_type = 1, 3418 .type = MLXSW_PORT_SWID_TYPE_ETH, 3419 } 3420 }, 3421 }; 3422 3423 static void 3424 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3425 struct devlink_resource_size_params *kvd_size_params, 3426 struct devlink_resource_size_params *linear_size_params, 3427 struct devlink_resource_size_params *hash_double_size_params, 3428 struct devlink_resource_size_params *hash_single_size_params) 3429 { 3430 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3431 KVD_SINGLE_MIN_SIZE); 3432 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3433 KVD_DOUBLE_MIN_SIZE); 3434 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3435 u32 linear_size_min = 0; 3436 3437 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3438 MLXSW_SP_KVD_GRANULARITY, 3439 DEVLINK_RESOURCE_UNIT_ENTRY); 3440 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3441 kvd_size - single_size_min - 3442 double_size_min, 3443 MLXSW_SP_KVD_GRANULARITY, 3444 DEVLINK_RESOURCE_UNIT_ENTRY); 3445 devlink_resource_size_params_init(hash_double_size_params, 3446 double_size_min, 3447 kvd_size - single_size_min - 3448 linear_size_min, 3449 MLXSW_SP_KVD_GRANULARITY, 3450 DEVLINK_RESOURCE_UNIT_ENTRY); 3451 devlink_resource_size_params_init(hash_single_size_params, 3452 single_size_min, 3453 kvd_size - double_size_min - 3454 linear_size_min, 3455 MLXSW_SP_KVD_GRANULARITY, 3456 DEVLINK_RESOURCE_UNIT_ENTRY); 3457 } 3458 3459 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3460 { 3461 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3462 struct devlink_resource_size_params hash_single_size_params; 3463 struct devlink_resource_size_params hash_double_size_params; 3464 struct devlink_resource_size_params linear_size_params; 3465 struct devlink_resource_size_params kvd_size_params; 3466 u32 kvd_size, single_size, double_size, linear_size; 3467 const struct mlxsw_config_profile *profile; 3468 int err; 3469 3470 profile = &mlxsw_sp1_config_profile; 3471 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3472 return -EIO; 3473 3474 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3475 &linear_size_params, 3476 &hash_double_size_params, 3477 &hash_single_size_params); 3478 3479 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3480 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3481 kvd_size, MLXSW_SP_RESOURCE_KVD, 3482 DEVLINK_RESOURCE_ID_PARENT_TOP, 3483 &kvd_size_params); 3484 if (err) 3485 return err; 3486 3487 linear_size = profile->kvd_linear_size; 3488 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3489 linear_size, 3490 MLXSW_SP_RESOURCE_KVD_LINEAR, 3491 MLXSW_SP_RESOURCE_KVD, 3492 &linear_size_params); 3493 if (err) 3494 return err; 3495 3496 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3497 if (err) 3498 return err; 3499 3500 double_size = kvd_size - linear_size; 3501 double_size *= profile->kvd_hash_double_parts; 3502 double_size /= profile->kvd_hash_double_parts + 3503 profile->kvd_hash_single_parts; 3504 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3505 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3506 double_size, 3507 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3508 MLXSW_SP_RESOURCE_KVD, 3509 &hash_double_size_params); 3510 if (err) 3511 return err; 3512 3513 single_size = kvd_size - double_size - linear_size; 3514 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3515 single_size, 3516 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3517 MLXSW_SP_RESOURCE_KVD, 3518 &hash_single_size_params); 3519 if (err) 3520 return err; 3521 3522 return 0; 3523 } 3524 3525 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3526 { 3527 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3528 struct devlink_resource_size_params kvd_size_params; 3529 u32 kvd_size; 3530 3531 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3532 return -EIO; 3533 3534 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3535 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3536 MLXSW_SP_KVD_GRANULARITY, 3537 DEVLINK_RESOURCE_UNIT_ENTRY); 3538 3539 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3540 kvd_size, MLXSW_SP_RESOURCE_KVD, 3541 DEVLINK_RESOURCE_ID_PARENT_TOP, 3542 &kvd_size_params); 3543 } 3544 3545 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3546 { 3547 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3548 struct devlink_resource_size_params span_size_params; 3549 u32 max_span; 3550 3551 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3552 return -EIO; 3553 3554 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3555 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3556 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3557 3558 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3559 max_span, MLXSW_SP_RESOURCE_SPAN, 3560 DEVLINK_RESOURCE_ID_PARENT_TOP, 3561 &span_size_params); 3562 } 3563 3564 static int 3565 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3566 { 3567 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3568 struct devlink_resource_size_params size_params; 3569 u8 max_rif_mac_profiles; 3570 3571 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3572 max_rif_mac_profiles = 1; 3573 else 3574 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3575 MAX_RIF_MAC_PROFILES); 3576 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3577 max_rif_mac_profiles, 1, 3578 DEVLINK_RESOURCE_UNIT_ENTRY); 3579 3580 return devlink_resource_register(devlink, 3581 "rif_mac_profiles", 3582 max_rif_mac_profiles, 3583 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3584 DEVLINK_RESOURCE_ID_PARENT_TOP, 3585 &size_params); 3586 } 3587 3588 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core) 3589 { 3590 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3591 struct devlink_resource_size_params size_params; 3592 u64 max_rifs; 3593 3594 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS)) 3595 return -EIO; 3596 3597 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS); 3598 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs, 3599 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3600 3601 return devlink_resource_register(devlink, "rifs", max_rifs, 3602 MLXSW_SP_RESOURCE_RIFS, 3603 DEVLINK_RESOURCE_ID_PARENT_TOP, 3604 &size_params); 3605 } 3606 3607 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3608 { 3609 int err; 3610 3611 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3612 if (err) 3613 return err; 3614 3615 err = mlxsw_sp_resources_span_register(mlxsw_core); 3616 if (err) 3617 goto err_resources_span_register; 3618 3619 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3620 if (err) 3621 goto err_resources_counter_register; 3622 3623 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3624 if (err) 3625 goto err_policer_resources_register; 3626 3627 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3628 if (err) 3629 goto err_resources_rif_mac_profile_register; 3630 3631 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3632 if (err) 3633 goto err_resources_rifs_register; 3634 3635 return 0; 3636 3637 err_resources_rifs_register: 3638 err_resources_rif_mac_profile_register: 3639 err_policer_resources_register: 3640 err_resources_counter_register: 3641 err_resources_span_register: 3642 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3643 return err; 3644 } 3645 3646 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3647 { 3648 int err; 3649 3650 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3651 if (err) 3652 return err; 3653 3654 err = mlxsw_sp_resources_span_register(mlxsw_core); 3655 if (err) 3656 goto err_resources_span_register; 3657 3658 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3659 if (err) 3660 goto err_resources_counter_register; 3661 3662 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3663 if (err) 3664 goto err_policer_resources_register; 3665 3666 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3667 if (err) 3668 goto err_resources_rif_mac_profile_register; 3669 3670 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3671 if (err) 3672 goto err_resources_rifs_register; 3673 3674 return 0; 3675 3676 err_resources_rifs_register: 3677 err_resources_rif_mac_profile_register: 3678 err_policer_resources_register: 3679 err_resources_counter_register: 3680 err_resources_span_register: 3681 devlink_resources_unregister(priv_to_devlink(mlxsw_core)); 3682 return err; 3683 } 3684 3685 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3686 const struct mlxsw_config_profile *profile, 3687 u64 *p_single_size, u64 *p_double_size, 3688 u64 *p_linear_size) 3689 { 3690 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3691 u32 double_size; 3692 int err; 3693 3694 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3695 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3696 return -EIO; 3697 3698 /* The hash part is what left of the kvd without the 3699 * linear part. It is split to the single size and 3700 * double size by the parts ratio from the profile. 3701 * Both sizes must be a multiplications of the 3702 * granularity from the profile. In case the user 3703 * provided the sizes they are obtained via devlink. 3704 */ 3705 err = devlink_resource_size_get(devlink, 3706 MLXSW_SP_RESOURCE_KVD_LINEAR, 3707 p_linear_size); 3708 if (err) 3709 *p_linear_size = profile->kvd_linear_size; 3710 3711 err = devlink_resource_size_get(devlink, 3712 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3713 p_double_size); 3714 if (err) { 3715 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3716 *p_linear_size; 3717 double_size *= profile->kvd_hash_double_parts; 3718 double_size /= profile->kvd_hash_double_parts + 3719 profile->kvd_hash_single_parts; 3720 *p_double_size = rounddown(double_size, 3721 MLXSW_SP_KVD_GRANULARITY); 3722 } 3723 3724 err = devlink_resource_size_get(devlink, 3725 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3726 p_single_size); 3727 if (err) 3728 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3729 *p_double_size - *p_linear_size; 3730 3731 /* Check results are legal. */ 3732 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3733 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3734 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3735 return -EIO; 3736 3737 return 0; 3738 } 3739 3740 static int 3741 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3742 struct devlink_param_gset_ctx *ctx) 3743 { 3744 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3745 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3746 3747 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3748 return 0; 3749 } 3750 3751 static int 3752 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3753 struct devlink_param_gset_ctx *ctx) 3754 { 3755 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3756 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3757 3758 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3759 } 3760 3761 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3762 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3763 "acl_region_rehash_interval", 3764 DEVLINK_PARAM_TYPE_U32, 3765 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3766 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3767 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3768 NULL), 3769 }; 3770 3771 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3772 { 3773 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3774 union devlink_param_value value; 3775 int err; 3776 3777 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3778 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3779 if (err) 3780 return err; 3781 3782 value.vu32 = 0; 3783 devlink_param_driverinit_value_set(devlink, 3784 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3785 value); 3786 return 0; 3787 } 3788 3789 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3790 { 3791 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3792 mlxsw_sp2_devlink_params, 3793 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3794 } 3795 3796 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3797 struct sk_buff *skb, u16 local_port) 3798 { 3799 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3800 3801 skb_pull(skb, MLXSW_TXHDR_LEN); 3802 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3803 } 3804 3805 static struct mlxsw_driver mlxsw_sp1_driver = { 3806 .kind = mlxsw_sp1_driver_name, 3807 .priv_size = sizeof(struct mlxsw_sp), 3808 .fw_req_rev = &mlxsw_sp1_fw_rev, 3809 .fw_filename = MLXSW_SP1_FW_FILENAME, 3810 .init = mlxsw_sp1_init, 3811 .fini = mlxsw_sp_fini, 3812 .port_split = mlxsw_sp_port_split, 3813 .port_unsplit = mlxsw_sp_port_unsplit, 3814 .sb_pool_get = mlxsw_sp_sb_pool_get, 3815 .sb_pool_set = mlxsw_sp_sb_pool_set, 3816 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3817 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3818 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3819 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3820 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3821 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3822 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3823 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3824 .trap_init = mlxsw_sp_trap_init, 3825 .trap_fini = mlxsw_sp_trap_fini, 3826 .trap_action_set = mlxsw_sp_trap_action_set, 3827 .trap_group_init = mlxsw_sp_trap_group_init, 3828 .trap_group_set = mlxsw_sp_trap_group_set, 3829 .trap_policer_init = mlxsw_sp_trap_policer_init, 3830 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3831 .trap_policer_set = mlxsw_sp_trap_policer_set, 3832 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3833 .txhdr_construct = mlxsw_sp_txhdr_construct, 3834 .resources_register = mlxsw_sp1_resources_register, 3835 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3836 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3837 .txhdr_len = MLXSW_TXHDR_LEN, 3838 .profile = &mlxsw_sp1_config_profile, 3839 }; 3840 3841 static struct mlxsw_driver mlxsw_sp2_driver = { 3842 .kind = mlxsw_sp2_driver_name, 3843 .priv_size = sizeof(struct mlxsw_sp), 3844 .fw_req_rev = &mlxsw_sp2_fw_rev, 3845 .fw_filename = MLXSW_SP2_FW_FILENAME, 3846 .init = mlxsw_sp2_init, 3847 .fini = mlxsw_sp_fini, 3848 .port_split = mlxsw_sp_port_split, 3849 .port_unsplit = mlxsw_sp_port_unsplit, 3850 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3851 .sb_pool_get = mlxsw_sp_sb_pool_get, 3852 .sb_pool_set = mlxsw_sp_sb_pool_set, 3853 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3854 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3855 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3856 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3857 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3858 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3859 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3860 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3861 .trap_init = mlxsw_sp_trap_init, 3862 .trap_fini = mlxsw_sp_trap_fini, 3863 .trap_action_set = mlxsw_sp_trap_action_set, 3864 .trap_group_init = mlxsw_sp_trap_group_init, 3865 .trap_group_set = mlxsw_sp_trap_group_set, 3866 .trap_policer_init = mlxsw_sp_trap_policer_init, 3867 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3868 .trap_policer_set = mlxsw_sp_trap_policer_set, 3869 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3870 .txhdr_construct = mlxsw_sp_txhdr_construct, 3871 .resources_register = mlxsw_sp2_resources_register, 3872 .params_register = mlxsw_sp2_params_register, 3873 .params_unregister = mlxsw_sp2_params_unregister, 3874 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3875 .txhdr_len = MLXSW_TXHDR_LEN, 3876 .profile = &mlxsw_sp2_config_profile, 3877 }; 3878 3879 static struct mlxsw_driver mlxsw_sp3_driver = { 3880 .kind = mlxsw_sp3_driver_name, 3881 .priv_size = sizeof(struct mlxsw_sp), 3882 .fw_req_rev = &mlxsw_sp3_fw_rev, 3883 .fw_filename = MLXSW_SP3_FW_FILENAME, 3884 .init = mlxsw_sp3_init, 3885 .fini = mlxsw_sp_fini, 3886 .port_split = mlxsw_sp_port_split, 3887 .port_unsplit = mlxsw_sp_port_unsplit, 3888 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3889 .sb_pool_get = mlxsw_sp_sb_pool_get, 3890 .sb_pool_set = mlxsw_sp_sb_pool_set, 3891 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3892 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3893 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3894 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3895 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3896 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3897 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3898 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3899 .trap_init = mlxsw_sp_trap_init, 3900 .trap_fini = mlxsw_sp_trap_fini, 3901 .trap_action_set = mlxsw_sp_trap_action_set, 3902 .trap_group_init = mlxsw_sp_trap_group_init, 3903 .trap_group_set = mlxsw_sp_trap_group_set, 3904 .trap_policer_init = mlxsw_sp_trap_policer_init, 3905 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3906 .trap_policer_set = mlxsw_sp_trap_policer_set, 3907 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3908 .txhdr_construct = mlxsw_sp_txhdr_construct, 3909 .resources_register = mlxsw_sp2_resources_register, 3910 .params_register = mlxsw_sp2_params_register, 3911 .params_unregister = mlxsw_sp2_params_unregister, 3912 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3913 .txhdr_len = MLXSW_TXHDR_LEN, 3914 .profile = &mlxsw_sp2_config_profile, 3915 }; 3916 3917 static struct mlxsw_driver mlxsw_sp4_driver = { 3918 .kind = mlxsw_sp4_driver_name, 3919 .priv_size = sizeof(struct mlxsw_sp), 3920 .init = mlxsw_sp4_init, 3921 .fini = mlxsw_sp_fini, 3922 .port_split = mlxsw_sp_port_split, 3923 .port_unsplit = mlxsw_sp_port_unsplit, 3924 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3925 .sb_pool_get = mlxsw_sp_sb_pool_get, 3926 .sb_pool_set = mlxsw_sp_sb_pool_set, 3927 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3928 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3929 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3930 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3931 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3932 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3933 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3934 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3935 .trap_init = mlxsw_sp_trap_init, 3936 .trap_fini = mlxsw_sp_trap_fini, 3937 .trap_action_set = mlxsw_sp_trap_action_set, 3938 .trap_group_init = mlxsw_sp_trap_group_init, 3939 .trap_group_set = mlxsw_sp_trap_group_set, 3940 .trap_policer_init = mlxsw_sp_trap_policer_init, 3941 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3942 .trap_policer_set = mlxsw_sp_trap_policer_set, 3943 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3944 .txhdr_construct = mlxsw_sp_txhdr_construct, 3945 .resources_register = mlxsw_sp2_resources_register, 3946 .params_register = mlxsw_sp2_params_register, 3947 .params_unregister = mlxsw_sp2_params_unregister, 3948 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3949 .txhdr_len = MLXSW_TXHDR_LEN, 3950 .profile = &mlxsw_sp2_config_profile, 3951 }; 3952 3953 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3954 { 3955 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3956 } 3957 3958 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 3959 struct netdev_nested_priv *priv) 3960 { 3961 int ret = 0; 3962 3963 if (mlxsw_sp_port_dev_check(lower_dev)) { 3964 priv->data = (void *)netdev_priv(lower_dev); 3965 ret = 1; 3966 } 3967 3968 return ret; 3969 } 3970 3971 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 3972 { 3973 struct netdev_nested_priv priv = { 3974 .data = NULL, 3975 }; 3976 3977 if (mlxsw_sp_port_dev_check(dev)) 3978 return netdev_priv(dev); 3979 3980 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 3981 3982 return (struct mlxsw_sp_port *)priv.data; 3983 } 3984 3985 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 3986 { 3987 struct mlxsw_sp_port *mlxsw_sp_port; 3988 3989 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 3990 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 3991 } 3992 3993 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 3994 { 3995 struct netdev_nested_priv priv = { 3996 .data = NULL, 3997 }; 3998 3999 if (mlxsw_sp_port_dev_check(dev)) 4000 return netdev_priv(dev); 4001 4002 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4003 &priv); 4004 4005 return (struct mlxsw_sp_port *)priv.data; 4006 } 4007 4008 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4009 { 4010 struct mlxsw_sp_port *mlxsw_sp_port; 4011 4012 rcu_read_lock(); 4013 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4014 if (mlxsw_sp_port) 4015 dev_hold(mlxsw_sp_port->dev); 4016 rcu_read_unlock(); 4017 return mlxsw_sp_port; 4018 } 4019 4020 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4021 { 4022 dev_put(mlxsw_sp_port->dev); 4023 } 4024 4025 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 4026 { 4027 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4028 int err = 0; 4029 4030 mutex_lock(&mlxsw_sp->parsing.lock); 4031 4032 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 4033 goto out_unlock; 4034 4035 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 4036 mlxsw_sp->parsing.vxlan_udp_dport); 4037 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4038 if (err) 4039 goto out_unlock; 4040 4041 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 4042 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 4043 4044 out_unlock: 4045 mutex_unlock(&mlxsw_sp->parsing.lock); 4046 return err; 4047 } 4048 4049 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 4050 { 4051 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4052 4053 mutex_lock(&mlxsw_sp->parsing.lock); 4054 4055 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 4056 goto out_unlock; 4057 4058 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 4059 mlxsw_sp->parsing.vxlan_udp_dport); 4060 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4061 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 4062 4063 out_unlock: 4064 mutex_unlock(&mlxsw_sp->parsing.lock); 4065 } 4066 4067 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 4068 __be16 udp_dport) 4069 { 4070 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4071 int err; 4072 4073 mutex_lock(&mlxsw_sp->parsing.lock); 4074 4075 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 4076 be16_to_cpu(udp_dport)); 4077 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4078 if (err) 4079 goto out_unlock; 4080 4081 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 4082 4083 out_unlock: 4084 mutex_unlock(&mlxsw_sp->parsing.lock); 4085 return err; 4086 } 4087 4088 static void 4089 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 4090 struct net_device *lag_dev) 4091 { 4092 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 4093 struct net_device *upper_dev; 4094 struct list_head *iter; 4095 4096 if (netif_is_bridge_port(lag_dev)) 4097 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 4098 4099 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4100 if (!netif_is_bridge_port(upper_dev)) 4101 continue; 4102 br_dev = netdev_master_upper_dev_get(upper_dev); 4103 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 4104 } 4105 } 4106 4107 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4108 { 4109 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4110 4111 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4112 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4113 } 4114 4115 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4116 { 4117 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4118 4119 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4120 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4121 } 4122 4123 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4124 u16 lag_id, u8 port_index) 4125 { 4126 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4127 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4128 4129 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4130 lag_id, port_index); 4131 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4132 } 4133 4134 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4135 u16 lag_id) 4136 { 4137 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4138 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4139 4140 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4141 lag_id); 4142 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4143 } 4144 4145 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4146 u16 lag_id) 4147 { 4148 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4149 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4150 4151 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4152 lag_id); 4153 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4154 } 4155 4156 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4157 u16 lag_id) 4158 { 4159 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4160 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4161 4162 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4163 lag_id); 4164 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4165 } 4166 4167 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4168 struct net_device *lag_dev, 4169 u16 *p_lag_id) 4170 { 4171 struct mlxsw_sp_upper *lag; 4172 int free_lag_id = -1; 4173 u64 max_lag; 4174 int i; 4175 4176 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); 4177 for (i = 0; i < max_lag; i++) { 4178 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4179 if (lag->ref_count) { 4180 if (lag->dev == lag_dev) { 4181 *p_lag_id = i; 4182 return 0; 4183 } 4184 } else if (free_lag_id < 0) { 4185 free_lag_id = i; 4186 } 4187 } 4188 if (free_lag_id < 0) 4189 return -EBUSY; 4190 *p_lag_id = free_lag_id; 4191 return 0; 4192 } 4193 4194 static bool 4195 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4196 struct net_device *lag_dev, 4197 struct netdev_lag_upper_info *lag_upper_info, 4198 struct netlink_ext_ack *extack) 4199 { 4200 u16 lag_id; 4201 4202 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4203 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4204 return false; 4205 } 4206 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4207 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4208 return false; 4209 } 4210 return true; 4211 } 4212 4213 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4214 u16 lag_id, u8 *p_port_index) 4215 { 4216 u64 max_lag_members; 4217 int i; 4218 4219 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4220 MAX_LAG_MEMBERS); 4221 for (i = 0; i < max_lag_members; i++) { 4222 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4223 *p_port_index = i; 4224 return 0; 4225 } 4226 } 4227 return -EBUSY; 4228 } 4229 4230 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4231 struct net_device *lag_dev, 4232 struct netlink_ext_ack *extack) 4233 { 4234 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4235 struct mlxsw_sp_upper *lag; 4236 u16 lag_id; 4237 u8 port_index; 4238 int err; 4239 4240 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4241 if (err) 4242 return err; 4243 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4244 if (!lag->ref_count) { 4245 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4246 if (err) 4247 return err; 4248 lag->dev = lag_dev; 4249 } 4250 4251 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4252 if (err) 4253 return err; 4254 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4255 if (err) 4256 goto err_col_port_add; 4257 4258 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4259 mlxsw_sp_port->local_port); 4260 mlxsw_sp_port->lag_id = lag_id; 4261 mlxsw_sp_port->lagged = 1; 4262 lag->ref_count++; 4263 4264 /* Port is no longer usable as a router interface */ 4265 if (mlxsw_sp_port->default_vlan->fid) 4266 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4267 4268 /* Join a router interface configured on the LAG, if exists */ 4269 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 4270 lag_dev, extack); 4271 if (err) 4272 goto err_router_join; 4273 4274 return 0; 4275 4276 err_router_join: 4277 lag->ref_count--; 4278 mlxsw_sp_port->lagged = 0; 4279 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4280 mlxsw_sp_port->local_port); 4281 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4282 err_col_port_add: 4283 if (!lag->ref_count) 4284 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4285 return err; 4286 } 4287 4288 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4289 struct net_device *lag_dev) 4290 { 4291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4292 u16 lag_id = mlxsw_sp_port->lag_id; 4293 struct mlxsw_sp_upper *lag; 4294 4295 if (!mlxsw_sp_port->lagged) 4296 return; 4297 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4298 WARN_ON(lag->ref_count == 0); 4299 4300 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4301 4302 /* Any VLANs configured on the port are no longer valid */ 4303 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4304 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4305 /* Make the LAG and its directly linked uppers leave bridges they 4306 * are memeber in 4307 */ 4308 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4309 4310 if (lag->ref_count == 1) 4311 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4312 4313 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4314 mlxsw_sp_port->local_port); 4315 mlxsw_sp_port->lagged = 0; 4316 lag->ref_count--; 4317 4318 /* Make sure untagged frames are allowed to ingress */ 4319 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4320 ETH_P_8021Q); 4321 } 4322 4323 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4324 u16 lag_id) 4325 { 4326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4327 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4328 4329 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4330 mlxsw_sp_port->local_port); 4331 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4332 } 4333 4334 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4335 u16 lag_id) 4336 { 4337 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4338 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4339 4340 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4341 mlxsw_sp_port->local_port); 4342 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4343 } 4344 4345 static int 4346 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4347 { 4348 int err; 4349 4350 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4351 mlxsw_sp_port->lag_id); 4352 if (err) 4353 return err; 4354 4355 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4356 if (err) 4357 goto err_dist_port_add; 4358 4359 return 0; 4360 4361 err_dist_port_add: 4362 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4363 return err; 4364 } 4365 4366 static int 4367 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4368 { 4369 int err; 4370 4371 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4372 mlxsw_sp_port->lag_id); 4373 if (err) 4374 return err; 4375 4376 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4377 mlxsw_sp_port->lag_id); 4378 if (err) 4379 goto err_col_port_disable; 4380 4381 return 0; 4382 4383 err_col_port_disable: 4384 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4385 return err; 4386 } 4387 4388 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4389 struct netdev_lag_lower_state_info *info) 4390 { 4391 if (info->tx_enabled) 4392 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4393 else 4394 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4395 } 4396 4397 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4398 bool enable) 4399 { 4400 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4401 enum mlxsw_reg_spms_state spms_state; 4402 char *spms_pl; 4403 u16 vid; 4404 int err; 4405 4406 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4407 MLXSW_REG_SPMS_STATE_DISCARDING; 4408 4409 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4410 if (!spms_pl) 4411 return -ENOMEM; 4412 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4413 4414 for (vid = 0; vid < VLAN_N_VID; vid++) 4415 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4416 4417 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4418 kfree(spms_pl); 4419 return err; 4420 } 4421 4422 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4423 { 4424 u16 vid = 1; 4425 int err; 4426 4427 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4428 if (err) 4429 return err; 4430 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4431 if (err) 4432 goto err_port_stp_set; 4433 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4434 true, false); 4435 if (err) 4436 goto err_port_vlan_set; 4437 4438 for (; vid <= VLAN_N_VID - 1; vid++) { 4439 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4440 vid, false); 4441 if (err) 4442 goto err_vid_learning_set; 4443 } 4444 4445 return 0; 4446 4447 err_vid_learning_set: 4448 for (vid--; vid >= 1; vid--) 4449 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4450 err_port_vlan_set: 4451 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4452 err_port_stp_set: 4453 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4454 return err; 4455 } 4456 4457 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4458 { 4459 u16 vid; 4460 4461 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4462 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4463 vid, true); 4464 4465 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4466 false, false); 4467 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4468 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4469 } 4470 4471 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4472 { 4473 unsigned int num_vxlans = 0; 4474 struct net_device *dev; 4475 struct list_head *iter; 4476 4477 netdev_for_each_lower_dev(br_dev, dev, iter) { 4478 if (netif_is_vxlan(dev)) 4479 num_vxlans++; 4480 } 4481 4482 return num_vxlans > 1; 4483 } 4484 4485 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4486 { 4487 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4488 struct net_device *dev; 4489 struct list_head *iter; 4490 4491 netdev_for_each_lower_dev(br_dev, dev, iter) { 4492 u16 pvid; 4493 int err; 4494 4495 if (!netif_is_vxlan(dev)) 4496 continue; 4497 4498 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4499 if (err || !pvid) 4500 continue; 4501 4502 if (test_and_set_bit(pvid, vlans)) 4503 return false; 4504 } 4505 4506 return true; 4507 } 4508 4509 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4510 struct netlink_ext_ack *extack) 4511 { 4512 if (br_multicast_enabled(br_dev)) { 4513 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4514 return false; 4515 } 4516 4517 if (!br_vlan_enabled(br_dev) && 4518 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4519 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4520 return false; 4521 } 4522 4523 if (br_vlan_enabled(br_dev) && 4524 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4525 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4526 return false; 4527 } 4528 4529 return true; 4530 } 4531 4532 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4533 struct net_device *dev, 4534 unsigned long event, void *ptr) 4535 { 4536 struct netdev_notifier_changeupper_info *info; 4537 struct mlxsw_sp_port *mlxsw_sp_port; 4538 struct netlink_ext_ack *extack; 4539 struct net_device *upper_dev; 4540 struct mlxsw_sp *mlxsw_sp; 4541 int err = 0; 4542 u16 proto; 4543 4544 mlxsw_sp_port = netdev_priv(dev); 4545 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4546 info = ptr; 4547 extack = netdev_notifier_info_to_extack(&info->info); 4548 4549 switch (event) { 4550 case NETDEV_PRECHANGEUPPER: 4551 upper_dev = info->upper_dev; 4552 if (!is_vlan_dev(upper_dev) && 4553 !netif_is_lag_master(upper_dev) && 4554 !netif_is_bridge_master(upper_dev) && 4555 !netif_is_ovs_master(upper_dev) && 4556 !netif_is_macvlan(upper_dev) && 4557 !netif_is_l3_master(upper_dev)) { 4558 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4559 return -EINVAL; 4560 } 4561 if (!info->linking) 4562 break; 4563 if (netif_is_bridge_master(upper_dev) && 4564 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4565 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4566 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4567 return -EOPNOTSUPP; 4568 if (netdev_has_any_upper_dev(upper_dev) && 4569 (!netif_is_bridge_master(upper_dev) || 4570 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4571 upper_dev))) { 4572 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4573 return -EINVAL; 4574 } 4575 if (netif_is_lag_master(upper_dev) && 4576 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4577 info->upper_info, extack)) 4578 return -EINVAL; 4579 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4580 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4581 return -EINVAL; 4582 } 4583 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4584 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4585 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4586 return -EINVAL; 4587 } 4588 if (netif_is_macvlan(upper_dev) && 4589 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4590 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4591 return -EOPNOTSUPP; 4592 } 4593 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4594 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4595 return -EINVAL; 4596 } 4597 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4598 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4599 return -EINVAL; 4600 } 4601 if (netif_is_bridge_master(upper_dev)) { 4602 br_vlan_get_proto(upper_dev, &proto); 4603 if (br_vlan_enabled(upper_dev) && 4604 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4605 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4606 return -EOPNOTSUPP; 4607 } 4608 if (vlan_uses_dev(lower_dev) && 4609 br_vlan_enabled(upper_dev) && 4610 proto == ETH_P_8021AD) { 4611 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4612 return -EOPNOTSUPP; 4613 } 4614 } 4615 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4616 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4617 4618 if (br_vlan_enabled(br_dev)) { 4619 br_vlan_get_proto(br_dev, &proto); 4620 if (proto == ETH_P_8021AD) { 4621 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4622 return -EOPNOTSUPP; 4623 } 4624 } 4625 } 4626 if (is_vlan_dev(upper_dev) && 4627 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4628 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4629 return -EOPNOTSUPP; 4630 } 4631 break; 4632 case NETDEV_CHANGEUPPER: 4633 upper_dev = info->upper_dev; 4634 if (netif_is_bridge_master(upper_dev)) { 4635 if (info->linking) 4636 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4637 lower_dev, 4638 upper_dev, 4639 extack); 4640 else 4641 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4642 lower_dev, 4643 upper_dev); 4644 } else if (netif_is_lag_master(upper_dev)) { 4645 if (info->linking) { 4646 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4647 upper_dev, extack); 4648 } else { 4649 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4650 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4651 upper_dev); 4652 } 4653 } else if (netif_is_ovs_master(upper_dev)) { 4654 if (info->linking) 4655 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4656 else 4657 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4658 } else if (netif_is_macvlan(upper_dev)) { 4659 if (!info->linking) 4660 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4661 } else if (is_vlan_dev(upper_dev)) { 4662 struct net_device *br_dev; 4663 4664 if (!netif_is_bridge_port(upper_dev)) 4665 break; 4666 if (info->linking) 4667 break; 4668 br_dev = netdev_master_upper_dev_get(upper_dev); 4669 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4670 br_dev); 4671 } 4672 break; 4673 } 4674 4675 return err; 4676 } 4677 4678 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4679 unsigned long event, void *ptr) 4680 { 4681 struct netdev_notifier_changelowerstate_info *info; 4682 struct mlxsw_sp_port *mlxsw_sp_port; 4683 int err; 4684 4685 mlxsw_sp_port = netdev_priv(dev); 4686 info = ptr; 4687 4688 switch (event) { 4689 case NETDEV_CHANGELOWERSTATE: 4690 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4691 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4692 info->lower_state_info); 4693 if (err) 4694 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4695 } 4696 break; 4697 } 4698 4699 return 0; 4700 } 4701 4702 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4703 struct net_device *port_dev, 4704 unsigned long event, void *ptr) 4705 { 4706 switch (event) { 4707 case NETDEV_PRECHANGEUPPER: 4708 case NETDEV_CHANGEUPPER: 4709 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4710 event, ptr); 4711 case NETDEV_CHANGELOWERSTATE: 4712 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4713 ptr); 4714 } 4715 4716 return 0; 4717 } 4718 4719 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4720 unsigned long event, void *ptr) 4721 { 4722 struct net_device *dev; 4723 struct list_head *iter; 4724 int ret; 4725 4726 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4727 if (mlxsw_sp_port_dev_check(dev)) { 4728 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4729 ptr); 4730 if (ret) 4731 return ret; 4732 } 4733 } 4734 4735 return 0; 4736 } 4737 4738 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4739 struct net_device *dev, 4740 unsigned long event, void *ptr, 4741 u16 vid) 4742 { 4743 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4744 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4745 struct netdev_notifier_changeupper_info *info = ptr; 4746 struct netlink_ext_ack *extack; 4747 struct net_device *upper_dev; 4748 int err = 0; 4749 4750 extack = netdev_notifier_info_to_extack(&info->info); 4751 4752 switch (event) { 4753 case NETDEV_PRECHANGEUPPER: 4754 upper_dev = info->upper_dev; 4755 if (!netif_is_bridge_master(upper_dev) && 4756 !netif_is_macvlan(upper_dev) && 4757 !netif_is_l3_master(upper_dev)) { 4758 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4759 return -EINVAL; 4760 } 4761 if (!info->linking) 4762 break; 4763 if (netif_is_bridge_master(upper_dev) && 4764 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4765 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4766 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4767 return -EOPNOTSUPP; 4768 if (netdev_has_any_upper_dev(upper_dev) && 4769 (!netif_is_bridge_master(upper_dev) || 4770 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4771 upper_dev))) { 4772 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4773 return -EINVAL; 4774 } 4775 if (netif_is_macvlan(upper_dev) && 4776 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4777 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4778 return -EOPNOTSUPP; 4779 } 4780 break; 4781 case NETDEV_CHANGEUPPER: 4782 upper_dev = info->upper_dev; 4783 if (netif_is_bridge_master(upper_dev)) { 4784 if (info->linking) 4785 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4786 vlan_dev, 4787 upper_dev, 4788 extack); 4789 else 4790 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4791 vlan_dev, 4792 upper_dev); 4793 } else if (netif_is_macvlan(upper_dev)) { 4794 if (!info->linking) 4795 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4796 } 4797 break; 4798 } 4799 4800 return err; 4801 } 4802 4803 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4804 struct net_device *lag_dev, 4805 unsigned long event, 4806 void *ptr, u16 vid) 4807 { 4808 struct net_device *dev; 4809 struct list_head *iter; 4810 int ret; 4811 4812 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4813 if (mlxsw_sp_port_dev_check(dev)) { 4814 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4815 event, ptr, 4816 vid); 4817 if (ret) 4818 return ret; 4819 } 4820 } 4821 4822 return 0; 4823 } 4824 4825 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4826 struct net_device *br_dev, 4827 unsigned long event, void *ptr, 4828 u16 vid) 4829 { 4830 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4831 struct netdev_notifier_changeupper_info *info = ptr; 4832 struct netlink_ext_ack *extack; 4833 struct net_device *upper_dev; 4834 4835 if (!mlxsw_sp) 4836 return 0; 4837 4838 extack = netdev_notifier_info_to_extack(&info->info); 4839 4840 switch (event) { 4841 case NETDEV_PRECHANGEUPPER: 4842 upper_dev = info->upper_dev; 4843 if (!netif_is_macvlan(upper_dev) && 4844 !netif_is_l3_master(upper_dev)) { 4845 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4846 return -EOPNOTSUPP; 4847 } 4848 if (!info->linking) 4849 break; 4850 if (netif_is_macvlan(upper_dev) && 4851 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4852 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4853 return -EOPNOTSUPP; 4854 } 4855 break; 4856 case NETDEV_CHANGEUPPER: 4857 upper_dev = info->upper_dev; 4858 if (info->linking) 4859 break; 4860 if (netif_is_macvlan(upper_dev)) 4861 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4862 break; 4863 } 4864 4865 return 0; 4866 } 4867 4868 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4869 unsigned long event, void *ptr) 4870 { 4871 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4872 u16 vid = vlan_dev_vlan_id(vlan_dev); 4873 4874 if (mlxsw_sp_port_dev_check(real_dev)) 4875 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4876 event, ptr, vid); 4877 else if (netif_is_lag_master(real_dev)) 4878 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4879 real_dev, event, 4880 ptr, vid); 4881 else if (netif_is_bridge_master(real_dev)) 4882 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4883 event, ptr, vid); 4884 4885 return 0; 4886 } 4887 4888 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 4889 unsigned long event, void *ptr) 4890 { 4891 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 4892 struct netdev_notifier_changeupper_info *info = ptr; 4893 struct netlink_ext_ack *extack; 4894 struct net_device *upper_dev; 4895 u16 proto; 4896 4897 if (!mlxsw_sp) 4898 return 0; 4899 4900 extack = netdev_notifier_info_to_extack(&info->info); 4901 4902 switch (event) { 4903 case NETDEV_PRECHANGEUPPER: 4904 upper_dev = info->upper_dev; 4905 if (!is_vlan_dev(upper_dev) && 4906 !netif_is_macvlan(upper_dev) && 4907 !netif_is_l3_master(upper_dev)) { 4908 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4909 return -EOPNOTSUPP; 4910 } 4911 if (!info->linking) 4912 break; 4913 if (br_vlan_enabled(br_dev)) { 4914 br_vlan_get_proto(br_dev, &proto); 4915 if (proto == ETH_P_8021AD) { 4916 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 4917 return -EOPNOTSUPP; 4918 } 4919 } 4920 if (is_vlan_dev(upper_dev) && 4921 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4922 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4923 return -EOPNOTSUPP; 4924 } 4925 if (netif_is_macvlan(upper_dev) && 4926 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 4927 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4928 return -EOPNOTSUPP; 4929 } 4930 break; 4931 case NETDEV_CHANGEUPPER: 4932 upper_dev = info->upper_dev; 4933 if (info->linking) 4934 break; 4935 if (is_vlan_dev(upper_dev)) 4936 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 4937 if (netif_is_macvlan(upper_dev)) 4938 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4939 break; 4940 } 4941 4942 return 0; 4943 } 4944 4945 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 4946 unsigned long event, void *ptr) 4947 { 4948 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 4949 struct netdev_notifier_changeupper_info *info = ptr; 4950 struct netlink_ext_ack *extack; 4951 struct net_device *upper_dev; 4952 4953 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 4954 return 0; 4955 4956 extack = netdev_notifier_info_to_extack(&info->info); 4957 upper_dev = info->upper_dev; 4958 4959 if (!netif_is_l3_master(upper_dev)) { 4960 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4961 return -EOPNOTSUPP; 4962 } 4963 4964 return 0; 4965 } 4966 4967 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 4968 struct net_device *dev, 4969 unsigned long event, void *ptr) 4970 { 4971 struct netdev_notifier_changeupper_info *cu_info; 4972 struct netdev_notifier_info *info = ptr; 4973 struct netlink_ext_ack *extack; 4974 struct net_device *upper_dev; 4975 4976 extack = netdev_notifier_info_to_extack(info); 4977 4978 switch (event) { 4979 case NETDEV_CHANGEUPPER: 4980 cu_info = container_of(info, 4981 struct netdev_notifier_changeupper_info, 4982 info); 4983 upper_dev = cu_info->upper_dev; 4984 if (!netif_is_bridge_master(upper_dev)) 4985 return 0; 4986 if (!mlxsw_sp_lower_get(upper_dev)) 4987 return 0; 4988 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4989 return -EOPNOTSUPP; 4990 if (cu_info->linking) { 4991 if (!netif_running(dev)) 4992 return 0; 4993 /* When the bridge is VLAN-aware, the VNI of the VxLAN 4994 * device needs to be mapped to a VLAN, but at this 4995 * point no VLANs are configured on the VxLAN device 4996 */ 4997 if (br_vlan_enabled(upper_dev)) 4998 return 0; 4999 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5000 dev, 0, extack); 5001 } else { 5002 /* VLANs were already flushed, which triggered the 5003 * necessary cleanup 5004 */ 5005 if (br_vlan_enabled(upper_dev)) 5006 return 0; 5007 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5008 } 5009 break; 5010 case NETDEV_PRE_UP: 5011 upper_dev = netdev_master_upper_dev_get(dev); 5012 if (!upper_dev) 5013 return 0; 5014 if (!netif_is_bridge_master(upper_dev)) 5015 return 0; 5016 if (!mlxsw_sp_lower_get(upper_dev)) 5017 return 0; 5018 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5019 extack); 5020 case NETDEV_DOWN: 5021 upper_dev = netdev_master_upper_dev_get(dev); 5022 if (!upper_dev) 5023 return 0; 5024 if (!netif_is_bridge_master(upper_dev)) 5025 return 0; 5026 if (!mlxsw_sp_lower_get(upper_dev)) 5027 return 0; 5028 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5029 break; 5030 } 5031 5032 return 0; 5033 } 5034 5035 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5036 unsigned long event, void *ptr) 5037 { 5038 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5039 struct mlxsw_sp_span_entry *span_entry; 5040 struct mlxsw_sp *mlxsw_sp; 5041 int err = 0; 5042 5043 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5044 if (event == NETDEV_UNREGISTER) { 5045 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5046 if (span_entry) 5047 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5048 } 5049 mlxsw_sp_span_respin(mlxsw_sp); 5050 5051 if (netif_is_vxlan(dev)) 5052 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5053 else if (mlxsw_sp_port_dev_check(dev)) 5054 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 5055 else if (netif_is_lag_master(dev)) 5056 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5057 else if (is_vlan_dev(dev)) 5058 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 5059 else if (netif_is_bridge_master(dev)) 5060 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 5061 else if (netif_is_macvlan(dev)) 5062 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5063 5064 return notifier_from_errno(err); 5065 } 5066 5067 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 5068 .notifier_call = mlxsw_sp_inetaddr_valid_event, 5069 }; 5070 5071 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 5072 .notifier_call = mlxsw_sp_inet6addr_valid_event, 5073 }; 5074 5075 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5076 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5077 {0, }, 5078 }; 5079 5080 static struct pci_driver mlxsw_sp1_pci_driver = { 5081 .name = mlxsw_sp1_driver_name, 5082 .id_table = mlxsw_sp1_pci_id_table, 5083 }; 5084 5085 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5086 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5087 {0, }, 5088 }; 5089 5090 static struct pci_driver mlxsw_sp2_pci_driver = { 5091 .name = mlxsw_sp2_driver_name, 5092 .id_table = mlxsw_sp2_pci_id_table, 5093 }; 5094 5095 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 5096 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 5097 {0, }, 5098 }; 5099 5100 static struct pci_driver mlxsw_sp3_pci_driver = { 5101 .name = mlxsw_sp3_driver_name, 5102 .id_table = mlxsw_sp3_pci_id_table, 5103 }; 5104 5105 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 5106 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 5107 {0, }, 5108 }; 5109 5110 static struct pci_driver mlxsw_sp4_pci_driver = { 5111 .name = mlxsw_sp4_driver_name, 5112 .id_table = mlxsw_sp4_pci_id_table, 5113 }; 5114 5115 static int __init mlxsw_sp_module_init(void) 5116 { 5117 int err; 5118 5119 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5120 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5121 5122 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5123 if (err) 5124 goto err_sp1_core_driver_register; 5125 5126 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5127 if (err) 5128 goto err_sp2_core_driver_register; 5129 5130 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 5131 if (err) 5132 goto err_sp3_core_driver_register; 5133 5134 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 5135 if (err) 5136 goto err_sp4_core_driver_register; 5137 5138 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5139 if (err) 5140 goto err_sp1_pci_driver_register; 5141 5142 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5143 if (err) 5144 goto err_sp2_pci_driver_register; 5145 5146 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 5147 if (err) 5148 goto err_sp3_pci_driver_register; 5149 5150 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 5151 if (err) 5152 goto err_sp4_pci_driver_register; 5153 5154 return 0; 5155 5156 err_sp4_pci_driver_register: 5157 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5158 err_sp3_pci_driver_register: 5159 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5160 err_sp2_pci_driver_register: 5161 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5162 err_sp1_pci_driver_register: 5163 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5164 err_sp4_core_driver_register: 5165 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5166 err_sp3_core_driver_register: 5167 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5168 err_sp2_core_driver_register: 5169 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5170 err_sp1_core_driver_register: 5171 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5172 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5173 return err; 5174 } 5175 5176 static void __exit mlxsw_sp_module_exit(void) 5177 { 5178 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 5179 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5180 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5181 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5182 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5183 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5184 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5185 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5186 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5187 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5188 } 5189 5190 module_init(mlxsw_sp_module_init); 5191 module_exit(mlxsw_sp_module_exit); 5192 5193 MODULE_LICENSE("Dual BSD/GPL"); 5194 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5195 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5196 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5197 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5198 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5199 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5200 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5201 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5202 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5203 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); 5204