1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/types.h> 7 #include <linux/pci.h> 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/ethtool.h> 11 #include <linux/slab.h> 12 #include <linux/device.h> 13 #include <linux/skbuff.h> 14 #include <linux/if_vlan.h> 15 #include <linux/if_bridge.h> 16 #include <linux/workqueue.h> 17 #include <linux/jiffies.h> 18 #include <linux/bitops.h> 19 #include <linux/list.h> 20 #include <linux/notifier.h> 21 #include <linux/dcbnl.h> 22 #include <linux/inetdevice.h> 23 #include <linux/netlink.h> 24 #include <linux/jhash.h> 25 #include <linux/log2.h> 26 #include <linux/refcount.h> 27 #include <linux/rhashtable.h> 28 #include <net/switchdev.h> 29 #include <net/pkt_cls.h> 30 #include <net/netevent.h> 31 #include <net/addrconf.h> 32 #include <linux/ptp_classify.h> 33 34 #include "spectrum.h" 35 #include "pci.h" 36 #include "core.h" 37 #include "core_env.h" 38 #include "reg.h" 39 #include "port.h" 40 #include "trap.h" 41 #include "txheader.h" 42 #include "spectrum_cnt.h" 43 #include "spectrum_dpipe.h" 44 #include "spectrum_acl_flex_actions.h" 45 #include "spectrum_span.h" 46 #include "spectrum_ptp.h" 47 #include "spectrum_trap.h" 48 49 #define MLXSW_SP_FWREV_MINOR 2010 50 #define MLXSW_SP_FWREV_SUBMINOR 1006 51 52 #define MLXSW_SP1_FWREV_MAJOR 13 53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 54 55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 56 .major = MLXSW_SP1_FWREV_MAJOR, 57 .minor = MLXSW_SP_FWREV_MINOR, 58 .subminor = MLXSW_SP_FWREV_SUBMINOR, 59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR, 60 }; 61 62 #define MLXSW_SP1_FW_FILENAME \ 63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ 64 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 65 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 66 67 #define MLXSW_SP2_FWREV_MAJOR 29 68 69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { 70 .major = MLXSW_SP2_FWREV_MAJOR, 71 .minor = MLXSW_SP_FWREV_MINOR, 72 .subminor = MLXSW_SP_FWREV_SUBMINOR, 73 }; 74 75 #define MLXSW_SP2_FW_FILENAME \ 76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \ 77 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 78 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 79 80 #define MLXSW_SP3_FWREV_MAJOR 30 81 82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { 83 .major = MLXSW_SP3_FWREV_MAJOR, 84 .minor = MLXSW_SP_FWREV_MINOR, 85 .subminor = MLXSW_SP_FWREV_SUBMINOR, 86 }; 87 88 #define MLXSW_SP3_FW_FILENAME \ 89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ 90 "." __stringify(MLXSW_SP_FWREV_MINOR) \ 91 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" 92 93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ 94 "mellanox/lc_ini_bundle_" \ 95 __stringify(MLXSW_SP_FWREV_MINOR) "_" \ 96 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" 97 98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; 99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; 100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; 101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; 102 103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 104 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 105 }; 106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { 107 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 108 }; 109 110 /* tx_hdr_version 111 * Tx header version. 112 * Must be set to 1. 113 */ 114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 115 116 /* tx_hdr_ctl 117 * Packet control type. 118 * 0 - Ethernet control (e.g. EMADs, LACP) 119 * 1 - Ethernet data 120 */ 121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 122 123 /* tx_hdr_proto 124 * Packet protocol type. Must be set to 1 (Ethernet). 125 */ 126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 127 128 /* tx_hdr_rx_is_router 129 * Packet is sent from the router. Valid for data packets only. 130 */ 131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 132 133 /* tx_hdr_fid_valid 134 * Indicates if the 'fid' field is valid and should be used for 135 * forwarding lookup. Valid for data packets only. 136 */ 137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 138 139 /* tx_hdr_swid 140 * Switch partition ID. Must be set to 0. 141 */ 142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 143 144 /* tx_hdr_control_tclass 145 * Indicates if the packet should use the control TClass and not one 146 * of the data TClasses. 147 */ 148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 149 150 /* tx_hdr_etclass 151 * Egress TClass to be used on the egress device on the egress port. 152 */ 153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 154 155 /* tx_hdr_port_mid 156 * Destination local port for unicast packets. 157 * Destination multicast ID for multicast packets. 158 * 159 * Control packets are directed to a specific egress port, while data 160 * packets are transmitted through the CPU port (0) into the switch partition, 161 * where forwarding rules are applied. 162 */ 163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 164 165 /* tx_hdr_fid 166 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 167 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 168 * Valid for data packets only. 169 */ 170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16); 171 172 /* tx_hdr_type 173 * 0 - Data packets 174 * 6 - Control packets 175 */ 176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 177 178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 179 unsigned int counter_index, u64 *packets, 180 u64 *bytes) 181 { 182 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 183 int err; 184 185 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, 186 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 187 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 188 if (err) 189 return err; 190 if (packets) 191 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); 192 if (bytes) 193 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); 194 return 0; 195 } 196 197 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, 198 unsigned int counter_index) 199 { 200 char mgpc_pl[MLXSW_REG_MGPC_LEN]; 201 202 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, 203 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES); 204 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); 205 } 206 207 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 208 unsigned int *p_counter_index) 209 { 210 int err; 211 212 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 213 p_counter_index); 214 if (err) 215 return err; 216 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); 217 if (err) 218 goto err_counter_clear; 219 return 0; 220 221 err_counter_clear: 222 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 223 *p_counter_index); 224 return err; 225 } 226 227 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 228 unsigned int counter_index) 229 { 230 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, 231 counter_index); 232 } 233 234 void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 235 const struct mlxsw_tx_info *tx_info) 236 { 237 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 238 239 memset(txhdr, 0, MLXSW_TXHDR_LEN); 240 241 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 242 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 243 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 244 mlxsw_tx_hdr_swid_set(txhdr, 0); 245 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 246 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 247 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 248 } 249 250 int 251 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core, 252 struct mlxsw_sp_port *mlxsw_sp_port, 253 struct sk_buff *skb, 254 const struct mlxsw_tx_info *tx_info) 255 { 256 char *txhdr; 257 u16 max_fid; 258 int err; 259 260 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 261 err = -ENOMEM; 262 goto err_skb_cow_head; 263 } 264 265 if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) { 266 err = -EIO; 267 goto err_res_valid; 268 } 269 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID); 270 271 txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 272 memset(txhdr, 0, MLXSW_TXHDR_LEN); 273 274 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 275 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 276 mlxsw_tx_hdr_rx_is_router_set(txhdr, true); 277 mlxsw_tx_hdr_fid_valid_set(txhdr, true); 278 mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1); 279 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA); 280 return 0; 281 282 err_res_valid: 283 err_skb_cow_head: 284 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 285 dev_kfree_skb_any(skb); 286 return err; 287 } 288 289 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb) 290 { 291 unsigned int type; 292 293 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 294 return false; 295 296 type = ptp_classify_raw(skb); 297 return !!ptp_parse_header(skb, type); 298 } 299 300 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core, 301 struct mlxsw_sp_port *mlxsw_sp_port, 302 struct sk_buff *skb, 303 const struct mlxsw_tx_info *tx_info) 304 { 305 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 306 307 /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp 308 * need special handling and cannot be transmitted as regular control 309 * packets. 310 */ 311 if (unlikely(mlxsw_sp_skb_requires_ts(skb))) 312 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core, 313 mlxsw_sp_port, skb, 314 tx_info); 315 316 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { 317 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 318 dev_kfree_skb_any(skb); 319 return -ENOMEM; 320 } 321 322 mlxsw_sp_txhdr_construct(skb, tx_info); 323 return 0; 324 } 325 326 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) 327 { 328 switch (state) { 329 case BR_STATE_FORWARDING: 330 return MLXSW_REG_SPMS_STATE_FORWARDING; 331 case BR_STATE_LEARNING: 332 return MLXSW_REG_SPMS_STATE_LEARNING; 333 case BR_STATE_LISTENING: 334 case BR_STATE_DISABLED: 335 case BR_STATE_BLOCKING: 336 return MLXSW_REG_SPMS_STATE_DISCARDING; 337 default: 338 BUG(); 339 } 340 } 341 342 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 343 u8 state) 344 { 345 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); 346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 347 char *spms_pl; 348 int err; 349 350 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 351 if (!spms_pl) 352 return -ENOMEM; 353 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 354 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 355 356 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 357 kfree(spms_pl); 358 return err; 359 } 360 361 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 362 { 363 char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; 364 int err; 365 366 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 367 if (err) 368 return err; 369 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 370 return 0; 371 } 372 373 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 374 bool is_up) 375 { 376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 377 char paos_pl[MLXSW_REG_PAOS_LEN]; 378 379 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 380 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 381 MLXSW_PORT_ADMIN_STATUS_DOWN); 382 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 383 } 384 385 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 386 const unsigned char *addr) 387 { 388 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 389 char ppad_pl[MLXSW_REG_PPAD_LEN]; 390 391 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 392 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 393 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 394 } 395 396 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 397 { 398 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 399 400 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac, 401 mlxsw_sp_port->local_port); 402 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, 403 mlxsw_sp_port->dev->dev_addr); 404 } 405 406 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) 407 { 408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 409 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 410 int err; 411 412 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 413 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 414 if (err) 415 return err; 416 417 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 418 return 0; 419 } 420 421 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 422 { 423 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 424 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 425 426 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 427 if (mtu > mlxsw_sp_port->max_mtu) 428 return -EINVAL; 429 430 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 431 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 432 } 433 434 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, 435 u16 local_port, u8 swid) 436 { 437 char pspa_pl[MLXSW_REG_PSPA_LEN]; 438 439 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 440 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 441 } 442 443 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 444 { 445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 446 char svpe_pl[MLXSW_REG_SVPE_LEN]; 447 448 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 449 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 450 } 451 452 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 453 bool learn_enable) 454 { 455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 456 char *spvmlr_pl; 457 int err; 458 459 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 460 if (!spvmlr_pl) 461 return -ENOMEM; 462 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 463 learn_enable); 464 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 465 kfree(spvmlr_pl); 466 return err; 467 } 468 469 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) 470 { 471 switch (ethtype) { 472 case ETH_P_8021Q: 473 *p_sver_type = 0; 474 break; 475 case ETH_P_8021AD: 476 *p_sver_type = 1; 477 break; 478 default: 479 return -EINVAL; 480 } 481 482 return 0; 483 } 484 485 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port, 486 u16 ethtype) 487 { 488 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 489 char spevet_pl[MLXSW_REG_SPEVET_LEN]; 490 u8 sver_type; 491 int err; 492 493 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 494 if (err) 495 return err; 496 497 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type); 498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl); 499 } 500 501 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, 502 u16 vid, u16 ethtype) 503 { 504 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 505 char spvid_pl[MLXSW_REG_SPVID_LEN]; 506 u8 sver_type; 507 int err; 508 509 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); 510 if (err) 511 return err; 512 513 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, 514 sver_type); 515 516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 517 } 518 519 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, 520 bool allow) 521 { 522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 523 char spaft_pl[MLXSW_REG_SPAFT_LEN]; 524 525 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow); 526 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); 527 } 528 529 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 530 u16 ethtype) 531 { 532 int err; 533 534 if (!vid) { 535 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false); 536 if (err) 537 return err; 538 } else { 539 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); 540 if (err) 541 return err; 542 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); 543 if (err) 544 goto err_port_allow_untagged_set; 545 } 546 547 mlxsw_sp_port->pvid = vid; 548 return 0; 549 550 err_port_allow_untagged_set: 551 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); 552 return err; 553 } 554 555 static int 556 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 557 { 558 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 559 char sspr_pl[MLXSW_REG_SSPR_LEN]; 560 561 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 562 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 563 } 564 565 static int 566 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, 567 u16 local_port, char *pmlp_pl, 568 struct mlxsw_sp_port_mapping *port_mapping) 569 { 570 bool separate_rxtx; 571 u8 first_lane; 572 u8 slot_index; 573 u8 module; 574 u8 width; 575 int i; 576 577 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 578 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); 579 width = mlxsw_reg_pmlp_width_get(pmlp_pl); 580 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); 581 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 582 583 if (width && !is_power_of_2(width)) { 584 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", 585 local_port); 586 return -EINVAL; 587 } 588 589 for (i = 0; i < width; i++) { 590 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) { 591 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n", 592 local_port); 593 return -EINVAL; 594 } 595 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { 596 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", 597 local_port); 598 return -EINVAL; 599 } 600 if (separate_rxtx && 601 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != 602 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { 603 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n", 604 local_port); 605 return -EINVAL; 606 } 607 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { 608 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", 609 local_port); 610 return -EINVAL; 611 } 612 } 613 614 port_mapping->module = module; 615 port_mapping->slot_index = slot_index; 616 port_mapping->width = width; 617 port_mapping->module_width = width; 618 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 619 return 0; 620 } 621 622 static int 623 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, 624 struct mlxsw_sp_port_mapping *port_mapping) 625 { 626 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 627 int err; 628 629 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 630 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 631 if (err) 632 return err; 633 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 634 pmlp_pl, port_mapping); 635 } 636 637 static int 638 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, 639 const struct mlxsw_sp_port_mapping *port_mapping) 640 { 641 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 642 int i, err; 643 644 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, 645 port_mapping->module); 646 647 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 648 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); 649 for (i = 0; i < port_mapping->width; i++) { 650 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, 651 port_mapping->slot_index); 652 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); 653 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ 654 } 655 656 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 657 if (err) 658 goto err_pmlp_write; 659 return 0; 660 661 err_pmlp_write: 662 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, 663 port_mapping->module); 664 return err; 665 } 666 667 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, 668 u8 slot_index, u8 module) 669 { 670 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 671 672 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 673 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 674 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 675 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); 676 } 677 678 static int mlxsw_sp_port_open(struct net_device *dev) 679 { 680 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 681 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 682 int err; 683 684 err = mlxsw_env_module_port_up(mlxsw_sp->core, 685 mlxsw_sp_port->mapping.slot_index, 686 mlxsw_sp_port->mapping.module); 687 if (err) 688 return err; 689 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 690 if (err) 691 goto err_port_admin_status_set; 692 netif_start_queue(dev); 693 return 0; 694 695 err_port_admin_status_set: 696 mlxsw_env_module_port_down(mlxsw_sp->core, 697 mlxsw_sp_port->mapping.slot_index, 698 mlxsw_sp_port->mapping.module); 699 return err; 700 } 701 702 static int mlxsw_sp_port_stop(struct net_device *dev) 703 { 704 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 705 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 706 707 netif_stop_queue(dev); 708 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 709 mlxsw_env_module_port_down(mlxsw_sp->core, 710 mlxsw_sp_port->mapping.slot_index, 711 mlxsw_sp_port->mapping.module); 712 return 0; 713 } 714 715 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 716 struct net_device *dev) 717 { 718 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 719 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 720 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 721 const struct mlxsw_tx_info tx_info = { 722 .local_port = mlxsw_sp_port->local_port, 723 .is_emad = false, 724 }; 725 u64 len; 726 int err; 727 728 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); 729 730 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 731 return NETDEV_TX_BUSY; 732 733 if (eth_skb_pad(skb)) { 734 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 735 return NETDEV_TX_OK; 736 } 737 738 err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb, 739 &tx_info); 740 if (err) 741 return NETDEV_TX_OK; 742 743 /* TX header is consumed by HW on the way so we shouldn't count its 744 * bytes as being sent. 745 */ 746 len = skb->len - MLXSW_TXHDR_LEN; 747 748 /* Due to a race we might fail here because of a full queue. In that 749 * unlikely case we simply drop the packet. 750 */ 751 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 752 753 if (!err) { 754 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 755 u64_stats_update_begin(&pcpu_stats->syncp); 756 pcpu_stats->tx_packets++; 757 pcpu_stats->tx_bytes += len; 758 u64_stats_update_end(&pcpu_stats->syncp); 759 } else { 760 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 761 dev_kfree_skb_any(skb); 762 } 763 return NETDEV_TX_OK; 764 } 765 766 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 767 { 768 } 769 770 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 771 { 772 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 773 struct sockaddr *addr = p; 774 int err; 775 776 if (!is_valid_ether_addr(addr->sa_data)) 777 return -EADDRNOTAVAIL; 778 779 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 780 if (err) 781 return err; 782 eth_hw_addr_set(dev, addr->sa_data); 783 return 0; 784 } 785 786 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 787 { 788 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 789 struct mlxsw_sp_hdroom orig_hdroom; 790 struct mlxsw_sp_hdroom hdroom; 791 int err; 792 793 orig_hdroom = *mlxsw_sp_port->hdroom; 794 795 hdroom = orig_hdroom; 796 hdroom.mtu = mtu; 797 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 798 799 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 800 if (err) { 801 netdev_err(dev, "Failed to configure port's headroom\n"); 802 return err; 803 } 804 805 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 806 if (err) 807 goto err_port_mtu_set; 808 dev->mtu = mtu; 809 return 0; 810 811 err_port_mtu_set: 812 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); 813 return err; 814 } 815 816 static int 817 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, 818 struct rtnl_link_stats64 *stats) 819 { 820 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 821 struct mlxsw_sp_port_pcpu_stats *p; 822 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 823 u32 tx_dropped = 0; 824 unsigned int start; 825 int i; 826 827 for_each_possible_cpu(i) { 828 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 829 do { 830 start = u64_stats_fetch_begin(&p->syncp); 831 rx_packets = p->rx_packets; 832 rx_bytes = p->rx_bytes; 833 tx_packets = p->tx_packets; 834 tx_bytes = p->tx_bytes; 835 } while (u64_stats_fetch_retry(&p->syncp, start)); 836 837 stats->rx_packets += rx_packets; 838 stats->rx_bytes += rx_bytes; 839 stats->tx_packets += tx_packets; 840 stats->tx_bytes += tx_bytes; 841 /* tx_dropped is u32, updated without syncp protection. */ 842 tx_dropped += p->tx_dropped; 843 } 844 stats->tx_dropped = tx_dropped; 845 return 0; 846 } 847 848 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) 849 { 850 switch (attr_id) { 851 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 852 return true; 853 } 854 855 return false; 856 } 857 858 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev, 859 void *sp) 860 { 861 switch (attr_id) { 862 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 863 return mlxsw_sp_port_get_sw_stats64(dev, sp); 864 } 865 866 return -EINVAL; 867 } 868 869 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, 870 int prio, char *ppcnt_pl) 871 { 872 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 873 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 874 875 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); 876 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 877 } 878 879 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev, 880 struct rtnl_link_stats64 *stats) 881 { 882 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 883 int err; 884 885 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 886 0, ppcnt_pl); 887 if (err) 888 goto out; 889 890 stats->tx_packets = 891 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl); 892 stats->rx_packets = 893 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl); 894 stats->tx_bytes = 895 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl); 896 stats->rx_bytes = 897 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl); 898 stats->multicast = 899 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl); 900 901 stats->rx_crc_errors = 902 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl); 903 stats->rx_frame_errors = 904 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl); 905 906 stats->rx_length_errors = ( 907 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) + 908 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) + 909 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl)); 910 911 stats->rx_errors = (stats->rx_crc_errors + 912 stats->rx_frame_errors + stats->rx_length_errors); 913 914 out: 915 return err; 916 } 917 918 static void 919 mlxsw_sp_port_get_hw_xstats(struct net_device *dev, 920 struct mlxsw_sp_port_xstats *xstats) 921 { 922 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 923 int err, i; 924 925 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, 926 ppcnt_pl); 927 if (!err) 928 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); 929 930 for (i = 0; i < TC_MAX_QUEUE; i++) { 931 err = mlxsw_sp_port_get_stats_raw(dev, 932 MLXSW_REG_PPCNT_TC_CONG_CNT, 933 i, ppcnt_pl); 934 if (err) 935 goto tc_cnt; 936 937 xstats->wred_drop[i] = 938 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); 939 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl); 940 941 tc_cnt: 942 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, 943 i, ppcnt_pl); 944 if (err) 945 continue; 946 947 xstats->backlog[i] = 948 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); 949 xstats->tail_drop[i] = 950 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); 951 } 952 953 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 954 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, 955 i, ppcnt_pl); 956 if (err) 957 continue; 958 959 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); 960 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); 961 } 962 } 963 964 static void update_stats_cache(struct work_struct *work) 965 { 966 struct mlxsw_sp_port *mlxsw_sp_port = 967 container_of(work, struct mlxsw_sp_port, 968 periodic_hw_stats.update_dw.work); 969 970 if (!netif_carrier_ok(mlxsw_sp_port->dev)) 971 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as 972 * necessary when port goes down. 973 */ 974 goto out; 975 976 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, 977 &mlxsw_sp_port->periodic_hw_stats.stats); 978 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, 979 &mlxsw_sp_port->periodic_hw_stats.xstats); 980 981 out: 982 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 983 MLXSW_HW_STATS_UPDATE_TIME); 984 } 985 986 /* Return the stats from a cache that is updated periodically, 987 * as this function might get called in an atomic context. 988 */ 989 static void 990 mlxsw_sp_port_get_stats64(struct net_device *dev, 991 struct rtnl_link_stats64 *stats) 992 { 993 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 994 995 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); 996 } 997 998 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, 999 u16 vid_begin, u16 vid_end, 1000 bool is_member, bool untagged) 1001 { 1002 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1003 char *spvm_pl; 1004 int err; 1005 1006 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 1007 if (!spvm_pl) 1008 return -ENOMEM; 1009 1010 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 1011 vid_end, is_member, untagged); 1012 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 1013 kfree(spvm_pl); 1014 return err; 1015 } 1016 1017 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 1018 u16 vid_end, bool is_member, bool untagged) 1019 { 1020 u16 vid, vid_e; 1021 int err; 1022 1023 for (vid = vid_begin; vid <= vid_end; 1024 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { 1025 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), 1026 vid_end); 1027 1028 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, 1029 is_member, untagged); 1030 if (err) 1031 return err; 1032 } 1033 1034 return 0; 1035 } 1036 1037 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, 1038 bool flush_default) 1039 { 1040 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; 1041 1042 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, 1043 &mlxsw_sp_port->vlans_list, list) { 1044 if (!flush_default && 1045 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) 1046 continue; 1047 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1048 } 1049 } 1050 1051 static void 1052 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1053 { 1054 if (mlxsw_sp_port_vlan->bridge_port) 1055 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1056 else if (mlxsw_sp_port_vlan->fid) 1057 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 1058 } 1059 1060 struct mlxsw_sp_port_vlan * 1061 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 1062 { 1063 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1064 bool untagged = vid == MLXSW_SP_DEFAULT_VID; 1065 int err; 1066 1067 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1068 if (mlxsw_sp_port_vlan) 1069 return ERR_PTR(-EEXIST); 1070 1071 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); 1072 if (err) 1073 return ERR_PTR(err); 1074 1075 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL); 1076 if (!mlxsw_sp_port_vlan) { 1077 err = -ENOMEM; 1078 goto err_port_vlan_alloc; 1079 } 1080 1081 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1082 mlxsw_sp_port_vlan->vid = vid; 1083 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1084 1085 return mlxsw_sp_port_vlan; 1086 1087 err_port_vlan_alloc: 1088 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1089 return ERR_PTR(err); 1090 } 1091 1092 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 1093 { 1094 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 1095 u16 vid = mlxsw_sp_port_vlan->vid; 1096 1097 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); 1098 list_del(&mlxsw_sp_port_vlan->list); 1099 kfree(mlxsw_sp_port_vlan); 1100 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 1101 } 1102 1103 static int mlxsw_sp_port_add_vid(struct net_device *dev, 1104 __be16 __always_unused proto, u16 vid) 1105 { 1106 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1107 1108 /* VLAN 0 is added to HW filter when device goes up, but it is 1109 * reserved in our case, so simply return. 1110 */ 1111 if (!vid) 1112 return 0; 1113 1114 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); 1115 } 1116 1117 static int mlxsw_sp_port_kill_vid(struct net_device *dev, 1118 __be16 __always_unused proto, u16 vid) 1119 { 1120 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1121 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1122 1123 /* VLAN 0 is removed from HW filter when device goes down, but 1124 * it is reserved in our case, so simply return. 1125 */ 1126 if (!vid) 1127 return 0; 1128 1129 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1130 if (!mlxsw_sp_port_vlan) 1131 return 0; 1132 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1133 1134 return 0; 1135 } 1136 1137 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, 1138 struct flow_block_offload *f) 1139 { 1140 switch (f->binder_type) { 1141 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 1142 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); 1143 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 1144 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); 1145 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: 1146 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); 1147 case FLOW_BLOCK_BINDER_TYPE_RED_MARK: 1148 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f); 1149 default: 1150 return -EOPNOTSUPP; 1151 } 1152 } 1153 1154 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, 1155 void *type_data) 1156 { 1157 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1158 1159 switch (type) { 1160 case TC_SETUP_BLOCK: 1161 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); 1162 case TC_SETUP_QDISC_RED: 1163 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); 1164 case TC_SETUP_QDISC_PRIO: 1165 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data); 1166 case TC_SETUP_QDISC_ETS: 1167 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data); 1168 case TC_SETUP_QDISC_TBF: 1169 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data); 1170 case TC_SETUP_QDISC_FIFO: 1171 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data); 1172 default: 1173 return -EOPNOTSUPP; 1174 } 1175 } 1176 1177 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable) 1178 { 1179 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1180 1181 if (!enable) { 1182 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) || 1183 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) { 1184 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 1185 return -EINVAL; 1186 } 1187 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block); 1188 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block); 1189 } else { 1190 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block); 1191 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block); 1192 } 1193 return 0; 1194 } 1195 1196 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable) 1197 { 1198 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1199 char pplr_pl[MLXSW_REG_PPLR_LEN]; 1200 int err; 1201 1202 if (netif_running(dev)) 1203 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1204 1205 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable); 1206 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr), 1207 pplr_pl); 1208 1209 if (netif_running(dev)) 1210 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1211 1212 return err; 1213 } 1214 1215 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable); 1216 1217 static int mlxsw_sp_handle_feature(struct net_device *dev, 1218 netdev_features_t wanted_features, 1219 netdev_features_t feature, 1220 mlxsw_sp_feature_handler feature_handler) 1221 { 1222 netdev_features_t changes = wanted_features ^ dev->features; 1223 bool enable = !!(wanted_features & feature); 1224 int err; 1225 1226 if (!(changes & feature)) 1227 return 0; 1228 1229 err = feature_handler(dev, enable); 1230 if (err) { 1231 netdev_err(dev, "%s feature %pNF failed, err %d\n", 1232 enable ? "Enable" : "Disable", &feature, err); 1233 return err; 1234 } 1235 1236 if (enable) 1237 dev->features |= feature; 1238 else 1239 dev->features &= ~feature; 1240 1241 return 0; 1242 } 1243 static int mlxsw_sp_set_features(struct net_device *dev, 1244 netdev_features_t features) 1245 { 1246 netdev_features_t oper_features = dev->features; 1247 int err = 0; 1248 1249 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC, 1250 mlxsw_sp_feature_hw_tc); 1251 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK, 1252 mlxsw_sp_feature_loopback); 1253 1254 if (err) { 1255 dev->features = oper_features; 1256 return -EINVAL; 1257 } 1258 1259 return 0; 1260 } 1261 1262 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, 1263 struct ifreq *ifr) 1264 { 1265 struct hwtstamp_config config; 1266 int err; 1267 1268 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1269 return -EFAULT; 1270 1271 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, 1272 &config); 1273 if (err) 1274 return err; 1275 1276 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1277 return -EFAULT; 1278 1279 return 0; 1280 } 1281 1282 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, 1283 struct ifreq *ifr) 1284 { 1285 struct hwtstamp_config config; 1286 int err; 1287 1288 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, 1289 &config); 1290 if (err) 1291 return err; 1292 1293 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 1294 return -EFAULT; 1295 1296 return 0; 1297 } 1298 1299 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) 1300 { 1301 struct hwtstamp_config config = {0}; 1302 1303 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); 1304 } 1305 1306 static int 1307 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1308 { 1309 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1310 1311 switch (cmd) { 1312 case SIOCSHWTSTAMP: 1313 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); 1314 case SIOCGHWTSTAMP: 1315 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); 1316 default: 1317 return -EOPNOTSUPP; 1318 } 1319 } 1320 1321 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 1322 .ndo_open = mlxsw_sp_port_open, 1323 .ndo_stop = mlxsw_sp_port_stop, 1324 .ndo_start_xmit = mlxsw_sp_port_xmit, 1325 .ndo_setup_tc = mlxsw_sp_setup_tc, 1326 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 1327 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 1328 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 1329 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 1330 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats, 1331 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, 1332 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 1333 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 1334 .ndo_set_features = mlxsw_sp_set_features, 1335 .ndo_eth_ioctl = mlxsw_sp_port_ioctl, 1336 }; 1337 1338 static int 1339 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) 1340 { 1341 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1342 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; 1343 const struct mlxsw_sp_port_type_speed_ops *ops; 1344 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1345 u32 eth_proto_cap_masked; 1346 int err; 1347 1348 ops = mlxsw_sp->port_type_speed_ops; 1349 1350 /* Set advertised speeds to speeds supported by both the driver 1351 * and the device. 1352 */ 1353 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1354 0, false); 1355 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1356 if (err) 1357 return err; 1358 1359 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, 1360 ð_proto_admin, ð_proto_oper); 1361 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap); 1362 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 1363 eth_proto_cap_masked, 1364 mlxsw_sp_port->link.autoneg); 1365 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1366 } 1367 1368 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed) 1369 { 1370 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; 1371 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1372 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1373 u32 eth_proto_oper; 1374 int err; 1375 1376 port_type_speed_ops = mlxsw_sp->port_type_speed_ops; 1377 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, 1378 mlxsw_sp_port->local_port, 0, 1379 false); 1380 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1381 if (err) 1382 return err; 1383 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL, 1384 ð_proto_oper); 1385 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper); 1386 return 0; 1387 } 1388 1389 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1390 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1391 bool dwrr, u8 dwrr_weight) 1392 { 1393 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1394 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1395 1396 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1397 next_index); 1398 mlxsw_reg_qeec_de_set(qeec_pl, true); 1399 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1400 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1401 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1402 } 1403 1404 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1405 enum mlxsw_reg_qeec_hr hr, u8 index, 1406 u8 next_index, u32 maxrate, u8 burst_size) 1407 { 1408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1409 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1410 1411 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1412 next_index); 1413 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1414 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1415 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size); 1416 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1417 } 1418 1419 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, 1420 enum mlxsw_reg_qeec_hr hr, u8 index, 1421 u8 next_index, u32 minrate) 1422 { 1423 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1424 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1425 1426 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1427 next_index); 1428 mlxsw_reg_qeec_mise_set(qeec_pl, true); 1429 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); 1430 1431 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1432 } 1433 1434 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1435 u8 switch_prio, u8 tclass) 1436 { 1437 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1438 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1439 1440 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1441 tclass); 1442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1443 } 1444 1445 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1446 { 1447 int err, i; 1448 1449 /* Setup the elements hierarcy, so that each TC is linked to 1450 * one subgroup, which are all member in the same group. 1451 */ 1452 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1453 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0); 1454 if (err) 1455 return err; 1456 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1457 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1458 MLXSW_REG_QEEC_HR_SUBGROUP, i, 1459 0, false, 0); 1460 if (err) 1461 return err; 1462 } 1463 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1464 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1465 MLXSW_REG_QEEC_HR_TC, i, i, 1466 false, 0); 1467 if (err) 1468 return err; 1469 1470 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1471 MLXSW_REG_QEEC_HR_TC, 1472 i + 8, i, 1473 true, 100); 1474 if (err) 1475 return err; 1476 } 1477 1478 /* Make sure the max shaper is disabled in all hierarchies that support 1479 * it. Note that this disables ptps (PTP shaper), but that is intended 1480 * for the initial configuration. 1481 */ 1482 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1483 MLXSW_REG_QEEC_HR_PORT, 0, 0, 1484 MLXSW_REG_QEEC_MAS_DIS, 0); 1485 if (err) 1486 return err; 1487 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1488 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1489 MLXSW_REG_QEEC_HR_SUBGROUP, 1490 i, 0, 1491 MLXSW_REG_QEEC_MAS_DIS, 0); 1492 if (err) 1493 return err; 1494 } 1495 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1496 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1497 MLXSW_REG_QEEC_HR_TC, 1498 i, i, 1499 MLXSW_REG_QEEC_MAS_DIS, 0); 1500 if (err) 1501 return err; 1502 1503 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1504 MLXSW_REG_QEEC_HR_TC, 1505 i + 8, i, 1506 MLXSW_REG_QEEC_MAS_DIS, 0); 1507 if (err) 1508 return err; 1509 } 1510 1511 /* Configure the min shaper for multicast TCs. */ 1512 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1513 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, 1514 MLXSW_REG_QEEC_HR_TC, 1515 i + 8, i, 1516 MLXSW_REG_QEEC_MIS_MIN); 1517 if (err) 1518 return err; 1519 } 1520 1521 /* Map all priorities to traffic class 0. */ 1522 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1523 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1524 if (err) 1525 return err; 1526 } 1527 1528 return 0; 1529 } 1530 1531 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 1532 bool enable) 1533 { 1534 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1535 char qtctm_pl[MLXSW_REG_QTCTM_LEN]; 1536 1537 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable); 1538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl); 1539 } 1540 1541 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) 1542 { 1543 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1544 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1545 u8 module = mlxsw_sp_port->mapping.module; 1546 u64 overheat_counter; 1547 int err; 1548 1549 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, 1550 module, &overheat_counter); 1551 if (err) 1552 return err; 1553 1554 mlxsw_sp_port->module_overheat_initial_val = overheat_counter; 1555 return 0; 1556 } 1557 1558 int 1559 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, 1560 bool is_8021ad_tagged, 1561 bool is_8021q_tagged) 1562 { 1563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1564 char spvc_pl[MLXSW_REG_SPVC_LEN]; 1565 1566 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, 1567 is_8021ad_tagged, is_8021q_tagged); 1568 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); 1569 } 1570 1571 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, 1572 u16 local_port, u8 *port_number, 1573 u8 *split_port_subnumber, 1574 u8 *slot_index) 1575 { 1576 char pllp_pl[MLXSW_REG_PLLP_LEN]; 1577 int err; 1578 1579 mlxsw_reg_pllp_pack(pllp_pl, local_port); 1580 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); 1581 if (err) 1582 return err; 1583 mlxsw_reg_pllp_unpack(pllp_pl, port_number, 1584 split_port_subnumber, slot_index); 1585 return 0; 1586 } 1587 1588 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, 1589 bool split, 1590 struct mlxsw_sp_port_mapping *port_mapping) 1591 { 1592 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1593 struct mlxsw_sp_port *mlxsw_sp_port; 1594 u32 lanes = port_mapping->width; 1595 u8 split_port_subnumber; 1596 struct net_device *dev; 1597 u8 port_number; 1598 u8 slot_index; 1599 bool splittable; 1600 int err; 1601 1602 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); 1603 if (err) { 1604 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", 1605 local_port); 1606 return err; 1607 } 1608 1609 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); 1610 if (err) { 1611 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1612 local_port); 1613 goto err_port_swid_set; 1614 } 1615 1616 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, 1617 &split_port_subnumber, &slot_index); 1618 if (err) { 1619 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", 1620 local_port); 1621 goto err_port_label_info_get; 1622 } 1623 1624 splittable = lanes > 1 && !split; 1625 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, 1626 port_number, split, split_port_subnumber, 1627 splittable, lanes, mlxsw_sp->base_mac, 1628 sizeof(mlxsw_sp->base_mac)); 1629 if (err) { 1630 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1631 local_port); 1632 goto err_core_port_init; 1633 } 1634 1635 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1636 if (!dev) { 1637 err = -ENOMEM; 1638 goto err_alloc_etherdev; 1639 } 1640 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); 1641 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp)); 1642 mlxsw_sp_port = netdev_priv(dev); 1643 mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port, 1644 mlxsw_sp_port, dev); 1645 mlxsw_sp_port->dev = dev; 1646 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1647 mlxsw_sp_port->local_port = local_port; 1648 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; 1649 mlxsw_sp_port->split = split; 1650 mlxsw_sp_port->mapping = *port_mapping; 1651 mlxsw_sp_port->link.autoneg = 1; 1652 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); 1653 1654 mlxsw_sp_port->pcpu_stats = 1655 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1656 if (!mlxsw_sp_port->pcpu_stats) { 1657 err = -ENOMEM; 1658 goto err_alloc_stats; 1659 } 1660 1661 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, 1662 &update_stats_cache); 1663 1664 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1665 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1666 1667 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1668 if (err) { 1669 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1670 mlxsw_sp_port->local_port); 1671 goto err_dev_addr_init; 1672 } 1673 1674 netif_carrier_off(dev); 1675 1676 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1677 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; 1678 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; 1679 1680 dev->min_mtu = 0; 1681 dev->max_mtu = ETH_MAX_MTU; 1682 1683 /* Each packet needs to have a Tx header (metadata) on top all other 1684 * headers. 1685 */ 1686 dev->needed_headroom = MLXSW_TXHDR_LEN; 1687 1688 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1689 if (err) { 1690 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1691 mlxsw_sp_port->local_port); 1692 goto err_port_system_port_mapping_set; 1693 } 1694 1695 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port); 1696 if (err) { 1697 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1698 mlxsw_sp_port->local_port); 1699 goto err_port_speed_by_width_set; 1700 } 1701 1702 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port, 1703 &mlxsw_sp_port->max_speed); 1704 if (err) { 1705 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n", 1706 mlxsw_sp_port->local_port); 1707 goto err_max_speed_get; 1708 } 1709 1710 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); 1711 if (err) { 1712 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", 1713 mlxsw_sp_port->local_port); 1714 goto err_port_max_mtu_get; 1715 } 1716 1717 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1718 if (err) { 1719 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1720 mlxsw_sp_port->local_port); 1721 goto err_port_mtu_set; 1722 } 1723 1724 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1725 if (err) 1726 goto err_port_admin_status_set; 1727 1728 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1729 if (err) { 1730 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1731 mlxsw_sp_port->local_port); 1732 goto err_port_buffers_init; 1733 } 1734 1735 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1736 if (err) { 1737 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1738 mlxsw_sp_port->local_port); 1739 goto err_port_ets_init; 1740 } 1741 1742 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true); 1743 if (err) { 1744 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n", 1745 mlxsw_sp_port->local_port); 1746 goto err_port_tc_mc_mode; 1747 } 1748 1749 /* ETS and buffers must be initialized before DCB. */ 1750 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1751 if (err) { 1752 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1753 mlxsw_sp_port->local_port); 1754 goto err_port_dcb_init; 1755 } 1756 1757 err = mlxsw_sp_port_fids_init(mlxsw_sp_port); 1758 if (err) { 1759 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n", 1760 mlxsw_sp_port->local_port); 1761 goto err_port_fids_init; 1762 } 1763 1764 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); 1765 if (err) { 1766 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", 1767 mlxsw_sp_port->local_port); 1768 goto err_port_qdiscs_init; 1769 } 1770 1771 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false, 1772 false); 1773 if (err) { 1774 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n", 1775 mlxsw_sp_port->local_port); 1776 goto err_port_vlan_clear; 1777 } 1778 1779 err = mlxsw_sp_port_nve_init(mlxsw_sp_port); 1780 if (err) { 1781 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n", 1782 mlxsw_sp_port->local_port); 1783 goto err_port_nve_init; 1784 } 1785 1786 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 1787 ETH_P_8021Q); 1788 if (err) { 1789 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", 1790 mlxsw_sp_port->local_port); 1791 goto err_port_pvid_set; 1792 } 1793 1794 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1795 MLXSW_SP_DEFAULT_VID); 1796 if (IS_ERR(mlxsw_sp_port_vlan)) { 1797 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", 1798 mlxsw_sp_port->local_port); 1799 err = PTR_ERR(mlxsw_sp_port_vlan); 1800 goto err_port_vlan_create; 1801 } 1802 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; 1803 1804 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat 1805 * only packets with 802.1q header as tagged packets. 1806 */ 1807 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); 1808 if (err) { 1809 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", 1810 local_port); 1811 goto err_port_vlan_classification_set; 1812 } 1813 1814 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, 1815 mlxsw_sp->ptp_ops->shaper_work); 1816 1817 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1818 1819 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port); 1820 if (err) { 1821 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n", 1822 mlxsw_sp_port->local_port); 1823 goto err_port_overheat_init_val_set; 1824 } 1825 1826 err = register_netdev(dev); 1827 if (err) { 1828 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1829 mlxsw_sp_port->local_port); 1830 goto err_register_netdev; 1831 } 1832 1833 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); 1834 return 0; 1835 1836 err_register_netdev: 1837 err_port_overheat_init_val_set: 1838 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1839 err_port_vlan_classification_set: 1840 mlxsw_sp->ports[local_port] = NULL; 1841 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); 1842 err_port_vlan_create: 1843 err_port_pvid_set: 1844 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1845 err_port_nve_init: 1846 err_port_vlan_clear: 1847 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1848 err_port_qdiscs_init: 1849 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1850 err_port_fids_init: 1851 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1852 err_port_dcb_init: 1853 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1854 err_port_tc_mc_mode: 1855 err_port_ets_init: 1856 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1857 err_port_buffers_init: 1858 err_port_admin_status_set: 1859 err_port_mtu_set: 1860 err_port_max_mtu_get: 1861 err_max_speed_get: 1862 err_port_speed_by_width_set: 1863 err_port_system_port_mapping_set: 1864 err_dev_addr_init: 1865 free_percpu(mlxsw_sp_port->pcpu_stats); 1866 err_alloc_stats: 1867 free_netdev(dev); 1868 err_alloc_etherdev: 1869 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1870 err_core_port_init: 1871 err_port_label_info_get: 1872 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1873 MLXSW_PORT_SWID_DISABLED_PORT); 1874 err_port_swid_set: 1875 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 1876 port_mapping->slot_index, 1877 port_mapping->module); 1878 return err; 1879 } 1880 1881 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1882 { 1883 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1884 u8 slot_index = mlxsw_sp_port->mapping.slot_index; 1885 u8 module = mlxsw_sp_port->mapping.module; 1886 1887 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); 1888 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); 1889 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1890 mlxsw_sp_port_ptp_clear(mlxsw_sp_port); 1891 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); 1892 mlxsw_sp->ports[local_port] = NULL; 1893 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); 1894 mlxsw_sp_port_nve_fini(mlxsw_sp_port); 1895 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); 1896 mlxsw_sp_port_fids_fini(mlxsw_sp_port); 1897 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1898 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); 1899 mlxsw_sp_port_buffers_fini(mlxsw_sp_port); 1900 free_percpu(mlxsw_sp_port->pcpu_stats); 1901 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); 1902 free_netdev(mlxsw_sp_port->dev); 1903 mlxsw_core_port_fini(mlxsw_sp->core, local_port); 1904 mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 1905 MLXSW_PORT_SWID_DISABLED_PORT); 1906 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); 1907 } 1908 1909 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) 1910 { 1911 struct mlxsw_sp_port *mlxsw_sp_port; 1912 int err; 1913 1914 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL); 1915 if (!mlxsw_sp_port) 1916 return -ENOMEM; 1917 1918 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1919 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT; 1920 1921 err = mlxsw_core_cpu_port_init(mlxsw_sp->core, 1922 mlxsw_sp_port, 1923 mlxsw_sp->base_mac, 1924 sizeof(mlxsw_sp->base_mac)); 1925 if (err) { 1926 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n"); 1927 goto err_core_cpu_port_init; 1928 } 1929 1930 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port; 1931 return 0; 1932 1933 err_core_cpu_port_init: 1934 kfree(mlxsw_sp_port); 1935 return err; 1936 } 1937 1938 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) 1939 { 1940 struct mlxsw_sp_port *mlxsw_sp_port = 1941 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 1942 1943 mlxsw_core_cpu_port_fini(mlxsw_sp->core); 1944 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL; 1945 kfree(mlxsw_sp_port); 1946 } 1947 1948 static bool mlxsw_sp_local_port_valid(u16 local_port) 1949 { 1950 return local_port != MLXSW_PORT_CPU_PORT; 1951 } 1952 1953 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) 1954 { 1955 if (!mlxsw_sp_local_port_valid(local_port)) 1956 return false; 1957 return mlxsw_sp->ports[local_port] != NULL; 1958 } 1959 1960 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, 1961 u16 local_port, bool enable) 1962 { 1963 char pmecr_pl[MLXSW_REG_PMECR_LEN]; 1964 1965 mlxsw_reg_pmecr_pack(pmecr_pl, local_port, 1966 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : 1967 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); 1968 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); 1969 } 1970 1971 struct mlxsw_sp_port_mapping_event { 1972 struct list_head list; 1973 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 1974 }; 1975 1976 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) 1977 { 1978 struct mlxsw_sp_port_mapping_event *event, *next_event; 1979 struct mlxsw_sp_port_mapping_events *events; 1980 struct mlxsw_sp_port_mapping port_mapping; 1981 struct mlxsw_sp *mlxsw_sp; 1982 struct devlink *devlink; 1983 LIST_HEAD(event_queue); 1984 u16 local_port; 1985 int err; 1986 1987 events = container_of(work, struct mlxsw_sp_port_mapping_events, work); 1988 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); 1989 devlink = priv_to_devlink(mlxsw_sp->core); 1990 1991 spin_lock_bh(&events->queue_lock); 1992 list_splice_init(&events->queue, &event_queue); 1993 spin_unlock_bh(&events->queue_lock); 1994 1995 list_for_each_entry_safe(event, next_event, &event_queue, list) { 1996 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); 1997 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, 1998 event->pmlp_pl, &port_mapping); 1999 if (err) 2000 goto out; 2001 2002 if (WARN_ON_ONCE(!port_mapping.width)) 2003 goto out; 2004 2005 devl_lock(devlink); 2006 2007 if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) 2008 mlxsw_sp_port_create(mlxsw_sp, local_port, 2009 false, &port_mapping); 2010 else 2011 WARN_ON_ONCE(1); 2012 2013 devl_unlock(devlink); 2014 2015 mlxsw_sp->port_mapping[local_port] = port_mapping; 2016 2017 out: 2018 kfree(event); 2019 } 2020 } 2021 2022 static void 2023 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, 2024 char *pmlp_pl, void *priv) 2025 { 2026 struct mlxsw_sp_port_mapping_events *events; 2027 struct mlxsw_sp_port_mapping_event *event; 2028 struct mlxsw_sp *mlxsw_sp = priv; 2029 u16 local_port; 2030 2031 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); 2032 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2033 return; 2034 2035 events = &mlxsw_sp->port_mapping_events; 2036 event = kmalloc(sizeof(*event), GFP_ATOMIC); 2037 if (!event) 2038 return; 2039 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); 2040 spin_lock(&events->queue_lock); 2041 list_add_tail(&event->list, &events->queue); 2042 spin_unlock(&events->queue_lock); 2043 mlxsw_core_schedule_work(&events->work); 2044 } 2045 2046 static void 2047 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) 2048 { 2049 struct mlxsw_sp_port_mapping_event *event, *next_event; 2050 struct mlxsw_sp_port_mapping_events *events; 2051 2052 events = &mlxsw_sp->port_mapping_events; 2053 2054 /* Caller needs to make sure that no new event is going to appear. */ 2055 cancel_work_sync(&events->work); 2056 list_for_each_entry_safe(event, next_event, &events->queue, list) { 2057 list_del(&event->list); 2058 kfree(event); 2059 } 2060 } 2061 2062 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 2063 { 2064 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2065 int i; 2066 2067 for (i = 1; i < max_ports; i++) 2068 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2069 /* Make sure all scheduled events are processed */ 2070 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2071 2072 for (i = 1; i < max_ports; i++) 2073 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2074 mlxsw_sp_port_remove(mlxsw_sp, i); 2075 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2076 kfree(mlxsw_sp->ports); 2077 mlxsw_sp->ports = NULL; 2078 } 2079 2080 static void 2081 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, 2082 bool (*selector)(void *priv, u16 local_port), 2083 void *priv) 2084 { 2085 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2086 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); 2087 int i; 2088 2089 for (i = 1; i < max_ports; i++) 2090 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) 2091 mlxsw_sp_port_remove(mlxsw_sp, i); 2092 } 2093 2094 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 2095 { 2096 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2097 struct mlxsw_sp_port_mapping_events *events; 2098 struct mlxsw_sp_port_mapping *port_mapping; 2099 size_t alloc_size; 2100 int i; 2101 int err; 2102 2103 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; 2104 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 2105 if (!mlxsw_sp->ports) 2106 return -ENOMEM; 2107 2108 events = &mlxsw_sp->port_mapping_events; 2109 INIT_LIST_HEAD(&events->queue); 2110 spin_lock_init(&events->queue_lock); 2111 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); 2112 2113 for (i = 1; i < max_ports; i++) { 2114 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); 2115 if (err) 2116 goto err_event_enable; 2117 } 2118 2119 err = mlxsw_sp_cpu_port_create(mlxsw_sp); 2120 if (err) 2121 goto err_cpu_port_create; 2122 2123 for (i = 1; i < max_ports; i++) { 2124 port_mapping = &mlxsw_sp->port_mapping[i]; 2125 if (!port_mapping->width) 2126 continue; 2127 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); 2128 if (err) 2129 goto err_port_create; 2130 } 2131 return 0; 2132 2133 err_port_create: 2134 for (i--; i >= 1; i--) 2135 if (mlxsw_sp_port_created(mlxsw_sp, i)) 2136 mlxsw_sp_port_remove(mlxsw_sp, i); 2137 i = max_ports; 2138 mlxsw_sp_cpu_port_remove(mlxsw_sp); 2139 err_cpu_port_create: 2140 err_event_enable: 2141 for (i--; i >= 1; i--) 2142 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); 2143 /* Make sure all scheduled events are processed */ 2144 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); 2145 kfree(mlxsw_sp->ports); 2146 mlxsw_sp->ports = NULL; 2147 return err; 2148 } 2149 2150 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) 2151 { 2152 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); 2153 struct mlxsw_sp_port_mapping *port_mapping; 2154 int i; 2155 int err; 2156 2157 mlxsw_sp->port_mapping = kcalloc(max_ports, 2158 sizeof(struct mlxsw_sp_port_mapping), 2159 GFP_KERNEL); 2160 if (!mlxsw_sp->port_mapping) 2161 return -ENOMEM; 2162 2163 for (i = 1; i < max_ports; i++) { 2164 port_mapping = &mlxsw_sp->port_mapping[i]; 2165 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); 2166 if (err) 2167 goto err_port_module_info_get; 2168 } 2169 return 0; 2170 2171 err_port_module_info_get: 2172 kfree(mlxsw_sp->port_mapping); 2173 return err; 2174 } 2175 2176 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) 2177 { 2178 kfree(mlxsw_sp->port_mapping); 2179 } 2180 2181 static int 2182 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, 2183 struct mlxsw_sp_port_mapping *port_mapping, 2184 unsigned int count, const char *pmtdb_pl) 2185 { 2186 struct mlxsw_sp_port_mapping split_port_mapping; 2187 int err, i; 2188 2189 split_port_mapping = *port_mapping; 2190 split_port_mapping.width /= count; 2191 for (i = 0; i < count; i++) { 2192 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2193 2194 if (!mlxsw_sp_local_port_valid(s_local_port)) 2195 continue; 2196 2197 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, 2198 true, &split_port_mapping); 2199 if (err) 2200 goto err_port_create; 2201 split_port_mapping.lane += split_port_mapping.width; 2202 } 2203 2204 return 0; 2205 2206 err_port_create: 2207 for (i--; i >= 0; i--) { 2208 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2209 2210 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2211 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2212 } 2213 return err; 2214 } 2215 2216 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 2217 unsigned int count, 2218 const char *pmtdb_pl) 2219 { 2220 struct mlxsw_sp_port_mapping *port_mapping; 2221 int i; 2222 2223 /* Go over original unsplit ports in the gap and recreate them. */ 2224 for (i = 0; i < count; i++) { 2225 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2226 2227 port_mapping = &mlxsw_sp->port_mapping[local_port]; 2228 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) 2229 continue; 2230 mlxsw_sp_port_create(mlxsw_sp, local_port, 2231 false, port_mapping); 2232 } 2233 } 2234 2235 static struct mlxsw_sp_port * 2236 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) 2237 { 2238 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 2239 return mlxsw_sp->ports[local_port]; 2240 return NULL; 2241 } 2242 2243 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, 2244 unsigned int count, 2245 struct netlink_ext_ack *extack) 2246 { 2247 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2248 struct mlxsw_sp_port_mapping port_mapping; 2249 struct mlxsw_sp_port *mlxsw_sp_port; 2250 enum mlxsw_reg_pmtdb_status status; 2251 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2252 int i; 2253 int err; 2254 2255 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2256 if (!mlxsw_sp_port) { 2257 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2258 local_port); 2259 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2260 return -EINVAL; 2261 } 2262 2263 if (mlxsw_sp_port->split) { 2264 NL_SET_ERR_MSG_MOD(extack, "Port is already split"); 2265 return -EINVAL; 2266 } 2267 2268 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2269 mlxsw_sp_port->mapping.module, 2270 mlxsw_sp_port->mapping.module_width / count, 2271 count); 2272 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2273 if (err) { 2274 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2275 return err; 2276 } 2277 2278 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); 2279 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { 2280 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); 2281 return -EINVAL; 2282 } 2283 2284 port_mapping = mlxsw_sp_port->mapping; 2285 2286 for (i = 0; i < count; i++) { 2287 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2288 2289 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2290 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2291 } 2292 2293 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, 2294 count, pmtdb_pl); 2295 if (err) { 2296 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2297 goto err_port_split_create; 2298 } 2299 2300 return 0; 2301 2302 err_port_split_create: 2303 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2304 2305 return err; 2306 } 2307 2308 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, 2309 struct netlink_ext_ack *extack) 2310 { 2311 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2312 struct mlxsw_sp_port *mlxsw_sp_port; 2313 char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; 2314 unsigned int count; 2315 int i; 2316 int err; 2317 2318 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 2319 if (!mlxsw_sp_port) { 2320 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2321 local_port); 2322 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); 2323 return -EINVAL; 2324 } 2325 2326 if (!mlxsw_sp_port->split) { 2327 NL_SET_ERR_MSG_MOD(extack, "Port was not split"); 2328 return -EINVAL; 2329 } 2330 2331 count = mlxsw_sp_port->mapping.module_width / 2332 mlxsw_sp_port->mapping.width; 2333 2334 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, 2335 mlxsw_sp_port->mapping.module, 2336 mlxsw_sp_port->mapping.module_width / count, 2337 count); 2338 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); 2339 if (err) { 2340 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); 2341 return err; 2342 } 2343 2344 for (i = 0; i < count; i++) { 2345 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); 2346 2347 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) 2348 mlxsw_sp_port_remove(mlxsw_sp, s_local_port); 2349 } 2350 2351 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); 2352 2353 return 0; 2354 } 2355 2356 static void 2357 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) 2358 { 2359 int i; 2360 2361 for (i = 0; i < TC_MAX_QUEUE; i++) 2362 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; 2363 } 2364 2365 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2366 char *pude_pl, void *priv) 2367 { 2368 struct mlxsw_sp *mlxsw_sp = priv; 2369 struct mlxsw_sp_port *mlxsw_sp_port; 2370 enum mlxsw_reg_pude_oper_status status; 2371 u16 local_port; 2372 2373 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2374 2375 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) 2376 return; 2377 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2378 if (!mlxsw_sp_port) 2379 return; 2380 2381 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2382 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2383 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2384 netif_carrier_on(mlxsw_sp_port->dev); 2385 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0); 2386 } else { 2387 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2388 netif_carrier_off(mlxsw_sp_port->dev); 2389 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); 2390 } 2391 } 2392 2393 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, 2394 char *mtpptr_pl, bool ingress) 2395 { 2396 u16 local_port; 2397 u8 num_rec; 2398 int i; 2399 2400 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl); 2401 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl); 2402 for (i = 0; i < num_rec; i++) { 2403 u8 domain_number; 2404 u8 message_type; 2405 u16 sequence_id; 2406 u64 timestamp; 2407 2408 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type, 2409 &domain_number, &sequence_id, 2410 ×tamp); 2411 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port, 2412 message_type, domain_number, 2413 sequence_id, timestamp); 2414 } 2415 } 2416 2417 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg, 2418 char *mtpptr_pl, void *priv) 2419 { 2420 struct mlxsw_sp *mlxsw_sp = priv; 2421 2422 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true); 2423 } 2424 2425 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, 2426 char *mtpptr_pl, void *priv) 2427 { 2428 struct mlxsw_sp *mlxsw_sp = priv; 2429 2430 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false); 2431 } 2432 2433 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, 2434 u16 local_port, void *priv) 2435 { 2436 struct mlxsw_sp *mlxsw_sp = priv; 2437 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2438 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2439 2440 if (unlikely(!mlxsw_sp_port)) { 2441 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2442 local_port); 2443 return; 2444 } 2445 2446 skb->dev = mlxsw_sp_port->dev; 2447 2448 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2449 u64_stats_update_begin(&pcpu_stats->syncp); 2450 pcpu_stats->rx_packets++; 2451 pcpu_stats->rx_bytes += skb->len; 2452 u64_stats_update_end(&pcpu_stats->syncp); 2453 2454 skb->protocol = eth_type_trans(skb, skb->dev); 2455 netif_receive_skb(skb); 2456 } 2457 2458 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, 2459 void *priv) 2460 { 2461 skb->offload_fwd_mark = 1; 2462 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2463 } 2464 2465 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, 2466 u16 local_port, void *priv) 2467 { 2468 skb->offload_l3_fwd_mark = 1; 2469 skb->offload_fwd_mark = 1; 2470 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); 2471 } 2472 2473 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, 2474 u16 local_port) 2475 { 2476 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); 2477 } 2478 2479 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2480 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ 2481 _is_ctrl, SP_##_trap_group, DISCARD) 2482 2483 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2484 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ 2485 _is_ctrl, SP_##_trap_group, DISCARD) 2486 2487 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ 2488 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ 2489 _is_ctrl, SP_##_trap_group, DISCARD) 2490 2491 #define MLXSW_SP_EVENTL(_func, _trap_id) \ 2492 MLXSW_EVENTL(_func, _trap_id, SP_EVENT) 2493 2494 static const struct mlxsw_listener mlxsw_sp_listener[] = { 2495 /* Events */ 2496 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), 2497 /* L2 traps */ 2498 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false), 2499 /* L3 traps */ 2500 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, 2501 false), 2502 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), 2503 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, 2504 false), 2505 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD, 2506 ROUTER_EXP, false), 2507 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD, 2508 ROUTER_EXP, false), 2509 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD, 2510 ROUTER_EXP, false), 2511 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, 2512 ROUTER_EXP, false), 2513 /* Multicast Router Traps */ 2514 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), 2515 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), 2516 /* NVE traps */ 2517 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), 2518 }; 2519 2520 static const struct mlxsw_listener mlxsw_sp1_listener[] = { 2521 /* Events */ 2522 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0), 2523 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), 2524 }; 2525 2526 static const struct mlxsw_listener mlxsw_sp2_listener[] = { 2527 /* Events */ 2528 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), 2529 }; 2530 2531 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) 2532 { 2533 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2534 char qpcr_pl[MLXSW_REG_QPCR_LEN]; 2535 enum mlxsw_reg_qpcr_ir_units ir_units; 2536 int max_cpu_policers; 2537 bool is_bytes; 2538 u8 burst_size; 2539 u32 rate; 2540 int i, err; 2541 2542 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) 2543 return -EIO; 2544 2545 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2546 2547 ir_units = MLXSW_REG_QPCR_IR_UNITS_M; 2548 for (i = 0; i < max_cpu_policers; i++) { 2549 is_bytes = false; 2550 switch (i) { 2551 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2552 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2553 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2554 rate = 1024; 2555 burst_size = 7; 2556 break; 2557 default: 2558 continue; 2559 } 2560 2561 __set_bit(i, mlxsw_sp->trap->policers_usage); 2562 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, 2563 burst_size); 2564 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); 2565 if (err) 2566 return err; 2567 } 2568 2569 return 0; 2570 } 2571 2572 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) 2573 { 2574 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2575 enum mlxsw_reg_htgt_trap_group i; 2576 int max_cpu_policers; 2577 int max_trap_groups; 2578 u8 priority, tc; 2579 u16 policer_id; 2580 int err; 2581 2582 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) 2583 return -EIO; 2584 2585 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); 2586 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); 2587 2588 for (i = 0; i < max_trap_groups; i++) { 2589 policer_id = i; 2590 switch (i) { 2591 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: 2592 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: 2593 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS: 2594 priority = 1; 2595 tc = 1; 2596 break; 2597 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: 2598 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; 2599 tc = MLXSW_REG_HTGT_DEFAULT_TC; 2600 policer_id = MLXSW_REG_HTGT_INVALID_POLICER; 2601 break; 2602 default: 2603 continue; 2604 } 2605 2606 if (max_cpu_policers <= policer_id && 2607 policer_id != MLXSW_REG_HTGT_INVALID_POLICER) 2608 return -EIO; 2609 2610 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); 2611 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); 2612 if (err) 2613 return err; 2614 } 2615 2616 return 0; 2617 } 2618 2619 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2620 { 2621 struct mlxsw_sp_trap *trap; 2622 u64 max_policers; 2623 int err; 2624 2625 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS)) 2626 return -EIO; 2627 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS); 2628 trap = kzalloc(struct_size(trap, policers_usage, 2629 BITS_TO_LONGS(max_policers)), GFP_KERNEL); 2630 if (!trap) 2631 return -ENOMEM; 2632 trap->max_policers = max_policers; 2633 mlxsw_sp->trap = trap; 2634 2635 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); 2636 if (err) 2637 goto err_cpu_policers_set; 2638 2639 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); 2640 if (err) 2641 goto err_trap_groups_set; 2642 2643 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener, 2644 ARRAY_SIZE(mlxsw_sp_listener), 2645 mlxsw_sp); 2646 if (err) 2647 goto err_traps_register; 2648 2649 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners, 2650 mlxsw_sp->listeners_count, mlxsw_sp); 2651 if (err) 2652 goto err_extra_traps_init; 2653 2654 return 0; 2655 2656 err_extra_traps_init: 2657 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2658 ARRAY_SIZE(mlxsw_sp_listener), 2659 mlxsw_sp); 2660 err_traps_register: 2661 err_trap_groups_set: 2662 err_cpu_policers_set: 2663 kfree(trap); 2664 return err; 2665 } 2666 2667 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2668 { 2669 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners, 2670 mlxsw_sp->listeners_count, 2671 mlxsw_sp); 2672 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener, 2673 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp); 2674 kfree(mlxsw_sp->trap); 2675 } 2676 2677 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2678 2679 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2680 { 2681 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2682 u16 max_lag; 2683 u32 seed; 2684 int err; 2685 2686 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 2687 MLXSW_SP_LAG_SEED_INIT); 2688 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2689 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2690 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2691 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2692 MLXSW_REG_SLCR_LAG_HASH_SIP | 2693 MLXSW_REG_SLCR_LAG_HASH_DIP | 2694 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2695 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2696 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); 2697 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2698 if (err) 2699 return err; 2700 2701 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 2702 if (err) 2703 return err; 2704 2705 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) 2706 return -EIO; 2707 2708 mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper), 2709 GFP_KERNEL); 2710 if (!mlxsw_sp->lags) 2711 return -ENOMEM; 2712 2713 return 0; 2714 } 2715 2716 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) 2717 { 2718 kfree(mlxsw_sp->lags); 2719 } 2720 2721 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = { 2722 .clock_init = mlxsw_sp1_ptp_clock_init, 2723 .clock_fini = mlxsw_sp1_ptp_clock_fini, 2724 .init = mlxsw_sp1_ptp_init, 2725 .fini = mlxsw_sp1_ptp_fini, 2726 .receive = mlxsw_sp1_ptp_receive, 2727 .transmitted = mlxsw_sp1_ptp_transmitted, 2728 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get, 2729 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set, 2730 .shaper_work = mlxsw_sp1_ptp_shaper_work, 2731 .get_ts_info = mlxsw_sp1_ptp_get_ts_info, 2732 .get_stats_count = mlxsw_sp1_get_stats_count, 2733 .get_stats_strings = mlxsw_sp1_get_stats_strings, 2734 .get_stats = mlxsw_sp1_get_stats, 2735 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, 2736 }; 2737 2738 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { 2739 .clock_init = mlxsw_sp2_ptp_clock_init, 2740 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2741 .init = mlxsw_sp2_ptp_init, 2742 .fini = mlxsw_sp2_ptp_fini, 2743 .receive = mlxsw_sp2_ptp_receive, 2744 .transmitted = mlxsw_sp2_ptp_transmitted, 2745 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2746 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2747 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2748 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2749 .get_stats_count = mlxsw_sp2_get_stats_count, 2750 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2751 .get_stats = mlxsw_sp2_get_stats, 2752 .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct, 2753 }; 2754 2755 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = { 2756 .clock_init = mlxsw_sp2_ptp_clock_init, 2757 .clock_fini = mlxsw_sp2_ptp_clock_fini, 2758 .init = mlxsw_sp2_ptp_init, 2759 .fini = mlxsw_sp2_ptp_fini, 2760 .receive = mlxsw_sp2_ptp_receive, 2761 .transmitted = mlxsw_sp2_ptp_transmitted, 2762 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get, 2763 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set, 2764 .shaper_work = mlxsw_sp2_ptp_shaper_work, 2765 .get_ts_info = mlxsw_sp2_ptp_get_ts_info, 2766 .get_stats_count = mlxsw_sp2_get_stats_count, 2767 .get_stats_strings = mlxsw_sp2_get_stats_strings, 2768 .get_stats = mlxsw_sp2_get_stats, 2769 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct, 2770 }; 2771 2772 struct mlxsw_sp_sample_trigger_node { 2773 struct mlxsw_sp_sample_trigger trigger; 2774 struct mlxsw_sp_sample_params params; 2775 struct rhash_head ht_node; 2776 struct rcu_head rcu; 2777 refcount_t refcount; 2778 }; 2779 2780 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = { 2781 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger), 2782 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node), 2783 .key_len = sizeof(struct mlxsw_sp_sample_trigger), 2784 .automatic_shrinking = true, 2785 }; 2786 2787 static void 2788 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key, 2789 const struct mlxsw_sp_sample_trigger *trigger) 2790 { 2791 memset(key, 0, sizeof(*key)); 2792 key->type = trigger->type; 2793 key->local_port = trigger->local_port; 2794 } 2795 2796 /* RCU read lock must be held */ 2797 struct mlxsw_sp_sample_params * 2798 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp, 2799 const struct mlxsw_sp_sample_trigger *trigger) 2800 { 2801 struct mlxsw_sp_sample_trigger_node *trigger_node; 2802 struct mlxsw_sp_sample_trigger key; 2803 2804 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2805 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key, 2806 mlxsw_sp_sample_trigger_ht_params); 2807 if (!trigger_node) 2808 return NULL; 2809 2810 return &trigger_node->params; 2811 } 2812 2813 static int 2814 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp, 2815 const struct mlxsw_sp_sample_trigger *trigger, 2816 const struct mlxsw_sp_sample_params *params) 2817 { 2818 struct mlxsw_sp_sample_trigger_node *trigger_node; 2819 int err; 2820 2821 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL); 2822 if (!trigger_node) 2823 return -ENOMEM; 2824 2825 trigger_node->trigger = *trigger; 2826 trigger_node->params = *params; 2827 refcount_set(&trigger_node->refcount, 1); 2828 2829 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht, 2830 &trigger_node->ht_node, 2831 mlxsw_sp_sample_trigger_ht_params); 2832 if (err) 2833 goto err_rhashtable_insert; 2834 2835 return 0; 2836 2837 err_rhashtable_insert: 2838 kfree(trigger_node); 2839 return err; 2840 } 2841 2842 static void 2843 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp, 2844 struct mlxsw_sp_sample_trigger_node *trigger_node) 2845 { 2846 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht, 2847 &trigger_node->ht_node, 2848 mlxsw_sp_sample_trigger_ht_params); 2849 kfree_rcu(trigger_node, rcu); 2850 } 2851 2852 int 2853 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, 2854 const struct mlxsw_sp_sample_trigger *trigger, 2855 const struct mlxsw_sp_sample_params *params, 2856 struct netlink_ext_ack *extack) 2857 { 2858 struct mlxsw_sp_sample_trigger_node *trigger_node; 2859 struct mlxsw_sp_sample_trigger key; 2860 2861 ASSERT_RTNL(); 2862 2863 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2864 2865 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2866 &key, 2867 mlxsw_sp_sample_trigger_ht_params); 2868 if (!trigger_node) 2869 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key, 2870 params); 2871 2872 if (trigger_node->trigger.local_port) { 2873 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port"); 2874 return -EINVAL; 2875 } 2876 2877 if (trigger_node->params.psample_group != params->psample_group || 2878 trigger_node->params.truncate != params->truncate || 2879 trigger_node->params.rate != params->rate || 2880 trigger_node->params.trunc_size != params->trunc_size) { 2881 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger"); 2882 return -EINVAL; 2883 } 2884 2885 refcount_inc(&trigger_node->refcount); 2886 2887 return 0; 2888 } 2889 2890 void 2891 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, 2892 const struct mlxsw_sp_sample_trigger *trigger) 2893 { 2894 struct mlxsw_sp_sample_trigger_node *trigger_node; 2895 struct mlxsw_sp_sample_trigger key; 2896 2897 ASSERT_RTNL(); 2898 2899 mlxsw_sp_sample_trigger_key_init(&key, trigger); 2900 2901 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht, 2902 &key, 2903 mlxsw_sp_sample_trigger_ht_params); 2904 if (!trigger_node) 2905 return; 2906 2907 if (!refcount_dec_and_test(&trigger_node->refcount)) 2908 return; 2909 2910 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node); 2911 } 2912 2913 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2914 unsigned long event, void *ptr); 2915 2916 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 2917 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128 2918 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 2919 2920 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) 2921 { 2922 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 2923 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; 2924 mutex_init(&mlxsw_sp->parsing.lock); 2925 } 2926 2927 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) 2928 { 2929 mutex_destroy(&mlxsw_sp->parsing.lock); 2930 } 2931 2932 struct mlxsw_sp_ipv6_addr_node { 2933 struct in6_addr key; 2934 struct rhash_head ht_node; 2935 u32 kvdl_index; 2936 refcount_t refcount; 2937 }; 2938 2939 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { 2940 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), 2941 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), 2942 .key_len = sizeof(struct in6_addr), 2943 .automatic_shrinking = true, 2944 }; 2945 2946 static int 2947 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, 2948 u32 *p_kvdl_index) 2949 { 2950 struct mlxsw_sp_ipv6_addr_node *node; 2951 char rips_pl[MLXSW_REG_RIPS_LEN]; 2952 int err; 2953 2954 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 2955 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2956 p_kvdl_index); 2957 if (err) 2958 return err; 2959 2960 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); 2961 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); 2962 if (err) 2963 goto err_rips_write; 2964 2965 node = kzalloc(sizeof(*node), GFP_KERNEL); 2966 if (!node) { 2967 err = -ENOMEM; 2968 goto err_node_alloc; 2969 } 2970 2971 node->key = *addr6; 2972 node->kvdl_index = *p_kvdl_index; 2973 refcount_set(&node->refcount, 1); 2974 2975 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, 2976 &node->ht_node, 2977 mlxsw_sp_ipv6_addr_ht_params); 2978 if (err) 2979 goto err_rhashtable_insert; 2980 2981 return 0; 2982 2983 err_rhashtable_insert: 2984 kfree(node); 2985 err_node_alloc: 2986 err_rips_write: 2987 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 2988 *p_kvdl_index); 2989 return err; 2990 } 2991 2992 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, 2993 struct mlxsw_sp_ipv6_addr_node *node) 2994 { 2995 u32 kvdl_index = node->kvdl_index; 2996 2997 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, 2998 mlxsw_sp_ipv6_addr_ht_params); 2999 kfree(node); 3000 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, 3001 kvdl_index); 3002 } 3003 3004 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, 3005 const struct in6_addr *addr6, 3006 u32 *p_kvdl_index) 3007 { 3008 struct mlxsw_sp_ipv6_addr_node *node; 3009 int err = 0; 3010 3011 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 3012 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 3013 mlxsw_sp_ipv6_addr_ht_params); 3014 if (node) { 3015 refcount_inc(&node->refcount); 3016 *p_kvdl_index = node->kvdl_index; 3017 goto out_unlock; 3018 } 3019 3020 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); 3021 3022 out_unlock: 3023 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 3024 return err; 3025 } 3026 3027 void 3028 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) 3029 { 3030 struct mlxsw_sp_ipv6_addr_node *node; 3031 3032 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); 3033 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, 3034 mlxsw_sp_ipv6_addr_ht_params); 3035 if (WARN_ON(!node)) 3036 goto out_unlock; 3037 3038 if (!refcount_dec_and_test(&node->refcount)) 3039 goto out_unlock; 3040 3041 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); 3042 3043 out_unlock: 3044 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); 3045 } 3046 3047 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) 3048 { 3049 int err; 3050 3051 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, 3052 &mlxsw_sp_ipv6_addr_ht_params); 3053 if (err) 3054 return err; 3055 3056 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); 3057 return 0; 3058 } 3059 3060 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) 3061 { 3062 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); 3063 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); 3064 } 3065 3066 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 3067 const struct mlxsw_bus_info *mlxsw_bus_info, 3068 struct netlink_ext_ack *extack) 3069 { 3070 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3071 int err; 3072 3073 mlxsw_sp->core = mlxsw_core; 3074 mlxsw_sp->bus_info = mlxsw_bus_info; 3075 3076 mlxsw_sp_parsing_init(mlxsw_sp); 3077 mlxsw_core_emad_string_tlv_enable(mlxsw_core); 3078 3079 err = mlxsw_sp_base_mac_get(mlxsw_sp); 3080 if (err) { 3081 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 3082 return err; 3083 } 3084 3085 err = mlxsw_sp_kvdl_init(mlxsw_sp); 3086 if (err) { 3087 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); 3088 return err; 3089 } 3090 3091 err = mlxsw_sp_pgt_init(mlxsw_sp); 3092 if (err) { 3093 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n"); 3094 goto err_pgt_init; 3095 } 3096 3097 err = mlxsw_sp_fids_init(mlxsw_sp); 3098 if (err) { 3099 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); 3100 goto err_fids_init; 3101 } 3102 3103 err = mlxsw_sp_policers_init(mlxsw_sp); 3104 if (err) { 3105 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); 3106 goto err_policers_init; 3107 } 3108 3109 err = mlxsw_sp_traps_init(mlxsw_sp); 3110 if (err) { 3111 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); 3112 goto err_traps_init; 3113 } 3114 3115 err = mlxsw_sp_devlink_traps_init(mlxsw_sp); 3116 if (err) { 3117 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n"); 3118 goto err_devlink_traps_init; 3119 } 3120 3121 err = mlxsw_sp_buffers_init(mlxsw_sp); 3122 if (err) { 3123 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 3124 goto err_buffers_init; 3125 } 3126 3127 err = mlxsw_sp_lag_init(mlxsw_sp); 3128 if (err) { 3129 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 3130 goto err_lag_init; 3131 } 3132 3133 /* Initialize SPAN before router and switchdev, so that those components 3134 * can call mlxsw_sp_span_respin(). 3135 */ 3136 err = mlxsw_sp_span_init(mlxsw_sp); 3137 if (err) { 3138 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); 3139 goto err_span_init; 3140 } 3141 3142 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3143 if (err) { 3144 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3145 goto err_switchdev_init; 3146 } 3147 3148 err = mlxsw_sp_counter_pool_init(mlxsw_sp); 3149 if (err) { 3150 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); 3151 goto err_counter_pool_init; 3152 } 3153 3154 err = mlxsw_sp_afa_init(mlxsw_sp); 3155 if (err) { 3156 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); 3157 goto err_afa_init; 3158 } 3159 3160 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); 3161 if (err) { 3162 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); 3163 goto err_ipv6_addr_ht_init; 3164 } 3165 3166 err = mlxsw_sp_nve_init(mlxsw_sp); 3167 if (err) { 3168 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); 3169 goto err_nve_init; 3170 } 3171 3172 err = mlxsw_sp_acl_init(mlxsw_sp); 3173 if (err) { 3174 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); 3175 goto err_acl_init; 3176 } 3177 3178 err = mlxsw_sp_router_init(mlxsw_sp, extack); 3179 if (err) { 3180 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3181 goto err_router_init; 3182 } 3183 3184 if (mlxsw_sp->bus_info->read_clock_capable) { 3185 /* NULL is a valid return value from clock_init */ 3186 mlxsw_sp->clock = 3187 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp, 3188 mlxsw_sp->bus_info->dev); 3189 if (IS_ERR(mlxsw_sp->clock)) { 3190 err = PTR_ERR(mlxsw_sp->clock); 3191 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n"); 3192 goto err_ptp_clock_init; 3193 } 3194 } 3195 3196 if (mlxsw_sp->clock) { 3197 /* NULL is a valid return value from ptp_ops->init */ 3198 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp); 3199 if (IS_ERR(mlxsw_sp->ptp_state)) { 3200 err = PTR_ERR(mlxsw_sp->ptp_state); 3201 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n"); 3202 goto err_ptp_init; 3203 } 3204 } 3205 3206 /* Initialize netdevice notifier after SPAN is initialized, so that the 3207 * event handler can call SPAN respin. 3208 */ 3209 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; 3210 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3211 &mlxsw_sp->netdevice_nb); 3212 if (err) { 3213 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); 3214 goto err_netdev_notifier; 3215 } 3216 3217 err = mlxsw_sp_dpipe_init(mlxsw_sp); 3218 if (err) { 3219 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); 3220 goto err_dpipe_init; 3221 } 3222 3223 err = mlxsw_sp_port_module_info_init(mlxsw_sp); 3224 if (err) { 3225 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n"); 3226 goto err_port_module_info_init; 3227 } 3228 3229 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht, 3230 &mlxsw_sp_sample_trigger_ht_params); 3231 if (err) { 3232 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n"); 3233 goto err_sample_trigger_init; 3234 } 3235 3236 err = mlxsw_sp_ports_create(mlxsw_sp); 3237 if (err) { 3238 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 3239 goto err_ports_create; 3240 } 3241 3242 return 0; 3243 3244 err_ports_create: 3245 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3246 err_sample_trigger_init: 3247 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3248 err_port_module_info_init: 3249 mlxsw_sp_dpipe_fini(mlxsw_sp); 3250 err_dpipe_init: 3251 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3252 &mlxsw_sp->netdevice_nb); 3253 err_netdev_notifier: 3254 if (mlxsw_sp->clock) 3255 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3256 err_ptp_init: 3257 if (mlxsw_sp->clock) 3258 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3259 err_ptp_clock_init: 3260 mlxsw_sp_router_fini(mlxsw_sp); 3261 err_router_init: 3262 mlxsw_sp_acl_fini(mlxsw_sp); 3263 err_acl_init: 3264 mlxsw_sp_nve_fini(mlxsw_sp); 3265 err_nve_init: 3266 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3267 err_ipv6_addr_ht_init: 3268 mlxsw_sp_afa_fini(mlxsw_sp); 3269 err_afa_init: 3270 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3271 err_counter_pool_init: 3272 mlxsw_sp_switchdev_fini(mlxsw_sp); 3273 err_switchdev_init: 3274 mlxsw_sp_span_fini(mlxsw_sp); 3275 err_span_init: 3276 mlxsw_sp_lag_fini(mlxsw_sp); 3277 err_lag_init: 3278 mlxsw_sp_buffers_fini(mlxsw_sp); 3279 err_buffers_init: 3280 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3281 err_devlink_traps_init: 3282 mlxsw_sp_traps_fini(mlxsw_sp); 3283 err_traps_init: 3284 mlxsw_sp_policers_fini(mlxsw_sp); 3285 err_policers_init: 3286 mlxsw_sp_fids_fini(mlxsw_sp); 3287 err_fids_init: 3288 mlxsw_sp_pgt_fini(mlxsw_sp); 3289 err_pgt_init: 3290 mlxsw_sp_kvdl_fini(mlxsw_sp); 3291 mlxsw_sp_parsing_fini(mlxsw_sp); 3292 return err; 3293 } 3294 3295 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, 3296 const struct mlxsw_bus_info *mlxsw_bus_info, 3297 struct netlink_ext_ack *extack) 3298 { 3299 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3300 3301 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops; 3302 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; 3303 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; 3304 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; 3305 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; 3306 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; 3307 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; 3308 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; 3309 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; 3310 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; 3311 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; 3312 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; 3313 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; 3314 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; 3315 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; 3316 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; 3317 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops; 3318 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops; 3319 mlxsw_sp->listeners = mlxsw_sp1_listener; 3320 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); 3321 mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr; 3322 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; 3323 mlxsw_sp->pgt_smpe_index_valid = true; 3324 3325 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3326 } 3327 3328 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, 3329 const struct mlxsw_bus_info *mlxsw_bus_info, 3330 struct netlink_ext_ack *extack) 3331 { 3332 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3333 3334 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3335 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3336 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3337 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3338 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3339 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3340 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3341 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3342 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3343 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3344 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3345 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; 3346 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3347 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3348 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; 3349 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3350 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3351 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3352 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3353 mlxsw_sp->listeners = mlxsw_sp2_listener; 3354 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3355 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3356 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; 3357 mlxsw_sp->pgt_smpe_index_valid = false; 3358 3359 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3360 } 3361 3362 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, 3363 const struct mlxsw_bus_info *mlxsw_bus_info, 3364 struct netlink_ext_ack *extack) 3365 { 3366 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3367 3368 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3369 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3370 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3371 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; 3372 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3373 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3374 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3375 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; 3376 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3377 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3378 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3379 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3380 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3381 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; 3382 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3383 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3384 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3385 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3386 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3387 mlxsw_sp->listeners = mlxsw_sp2_listener; 3388 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3389 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3390 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; 3391 mlxsw_sp->pgt_smpe_index_valid = false; 3392 3393 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3394 } 3395 3396 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, 3397 const struct mlxsw_bus_info *mlxsw_bus_info, 3398 struct netlink_ext_ack *extack) 3399 { 3400 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3401 3402 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; 3403 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; 3404 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; 3405 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; 3406 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; 3407 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; 3408 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; 3409 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; 3410 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; 3411 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; 3412 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; 3413 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; 3414 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; 3415 mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops; 3416 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; 3417 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; 3418 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; 3419 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; 3420 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; 3421 mlxsw_sp->listeners = mlxsw_sp2_listener; 3422 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); 3423 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr; 3424 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; 3425 mlxsw_sp->pgt_smpe_index_valid = false; 3426 3427 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); 3428 } 3429 3430 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 3431 { 3432 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3433 3434 mlxsw_sp_ports_remove(mlxsw_sp); 3435 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); 3436 mlxsw_sp_port_module_info_fini(mlxsw_sp); 3437 mlxsw_sp_dpipe_fini(mlxsw_sp); 3438 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), 3439 &mlxsw_sp->netdevice_nb); 3440 if (mlxsw_sp->clock) { 3441 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state); 3442 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock); 3443 } 3444 mlxsw_sp_router_fini(mlxsw_sp); 3445 mlxsw_sp_acl_fini(mlxsw_sp); 3446 mlxsw_sp_nve_fini(mlxsw_sp); 3447 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); 3448 mlxsw_sp_afa_fini(mlxsw_sp); 3449 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3450 mlxsw_sp_switchdev_fini(mlxsw_sp); 3451 mlxsw_sp_span_fini(mlxsw_sp); 3452 mlxsw_sp_lag_fini(mlxsw_sp); 3453 mlxsw_sp_buffers_fini(mlxsw_sp); 3454 mlxsw_sp_devlink_traps_fini(mlxsw_sp); 3455 mlxsw_sp_traps_fini(mlxsw_sp); 3456 mlxsw_sp_policers_fini(mlxsw_sp); 3457 mlxsw_sp_fids_fini(mlxsw_sp); 3458 mlxsw_sp_pgt_fini(mlxsw_sp); 3459 mlxsw_sp_kvdl_fini(mlxsw_sp); 3460 mlxsw_sp_parsing_fini(mlxsw_sp); 3461 } 3462 3463 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { 3464 .used_flood_mode = 1, 3465 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3466 .used_max_ib_mc = 1, 3467 .max_ib_mc = 0, 3468 .used_max_pkey = 1, 3469 .max_pkey = 0, 3470 .used_ubridge = 1, 3471 .ubridge = 1, 3472 .used_kvd_sizes = 1, 3473 .kvd_hash_single_parts = 59, 3474 .kvd_hash_double_parts = 41, 3475 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, 3476 .swid_config = { 3477 { 3478 .used_type = 1, 3479 .type = MLXSW_PORT_SWID_TYPE_ETH, 3480 } 3481 }, 3482 }; 3483 3484 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { 3485 .used_flood_mode = 1, 3486 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3487 .used_max_ib_mc = 1, 3488 .max_ib_mc = 0, 3489 .used_max_pkey = 1, 3490 .max_pkey = 0, 3491 .used_ubridge = 1, 3492 .ubridge = 1, 3493 .swid_config = { 3494 { 3495 .used_type = 1, 3496 .type = MLXSW_PORT_SWID_TYPE_ETH, 3497 } 3498 }, 3499 .used_cqe_time_stamp_type = 1, 3500 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3501 }; 3502 3503 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs 3504 * in Spectrum-2/3, to avoid regression in number of free entries in the PGT 3505 * table. 3506 */ 3507 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128 3508 3509 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = { 3510 .used_max_lag = 1, 3511 .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG, 3512 .used_flood_mode = 1, 3513 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED, 3514 .used_max_ib_mc = 1, 3515 .max_ib_mc = 0, 3516 .used_max_pkey = 1, 3517 .max_pkey = 0, 3518 .used_ubridge = 1, 3519 .ubridge = 1, 3520 .swid_config = { 3521 { 3522 .used_type = 1, 3523 .type = MLXSW_PORT_SWID_TYPE_ETH, 3524 } 3525 }, 3526 .used_cqe_time_stamp_type = 1, 3527 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, 3528 }; 3529 3530 static void 3531 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, 3532 struct devlink_resource_size_params *kvd_size_params, 3533 struct devlink_resource_size_params *linear_size_params, 3534 struct devlink_resource_size_params *hash_double_size_params, 3535 struct devlink_resource_size_params *hash_single_size_params) 3536 { 3537 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3538 KVD_SINGLE_MIN_SIZE); 3539 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 3540 KVD_DOUBLE_MIN_SIZE); 3541 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3542 u32 linear_size_min = 0; 3543 3544 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, 3545 MLXSW_SP_KVD_GRANULARITY, 3546 DEVLINK_RESOURCE_UNIT_ENTRY); 3547 devlink_resource_size_params_init(linear_size_params, linear_size_min, 3548 kvd_size - single_size_min - 3549 double_size_min, 3550 MLXSW_SP_KVD_GRANULARITY, 3551 DEVLINK_RESOURCE_UNIT_ENTRY); 3552 devlink_resource_size_params_init(hash_double_size_params, 3553 double_size_min, 3554 kvd_size - single_size_min - 3555 linear_size_min, 3556 MLXSW_SP_KVD_GRANULARITY, 3557 DEVLINK_RESOURCE_UNIT_ENTRY); 3558 devlink_resource_size_params_init(hash_single_size_params, 3559 single_size_min, 3560 kvd_size - double_size_min - 3561 linear_size_min, 3562 MLXSW_SP_KVD_GRANULARITY, 3563 DEVLINK_RESOURCE_UNIT_ENTRY); 3564 } 3565 3566 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3567 { 3568 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3569 struct devlink_resource_size_params hash_single_size_params; 3570 struct devlink_resource_size_params hash_double_size_params; 3571 struct devlink_resource_size_params linear_size_params; 3572 struct devlink_resource_size_params kvd_size_params; 3573 u32 kvd_size, single_size, double_size, linear_size; 3574 const struct mlxsw_config_profile *profile; 3575 int err; 3576 3577 profile = &mlxsw_sp1_config_profile; 3578 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3579 return -EIO; 3580 3581 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, 3582 &linear_size_params, 3583 &hash_double_size_params, 3584 &hash_single_size_params); 3585 3586 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3587 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3588 kvd_size, MLXSW_SP_RESOURCE_KVD, 3589 DEVLINK_RESOURCE_ID_PARENT_TOP, 3590 &kvd_size_params); 3591 if (err) 3592 return err; 3593 3594 linear_size = profile->kvd_linear_size; 3595 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, 3596 linear_size, 3597 MLXSW_SP_RESOURCE_KVD_LINEAR, 3598 MLXSW_SP_RESOURCE_KVD, 3599 &linear_size_params); 3600 if (err) 3601 return err; 3602 3603 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); 3604 if (err) 3605 return err; 3606 3607 double_size = kvd_size - linear_size; 3608 double_size *= profile->kvd_hash_double_parts; 3609 double_size /= profile->kvd_hash_double_parts + 3610 profile->kvd_hash_single_parts; 3611 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); 3612 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, 3613 double_size, 3614 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3615 MLXSW_SP_RESOURCE_KVD, 3616 &hash_double_size_params); 3617 if (err) 3618 return err; 3619 3620 single_size = kvd_size - double_size - linear_size; 3621 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, 3622 single_size, 3623 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3624 MLXSW_SP_RESOURCE_KVD, 3625 &hash_single_size_params); 3626 if (err) 3627 return err; 3628 3629 return 0; 3630 } 3631 3632 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core) 3633 { 3634 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3635 struct devlink_resource_size_params kvd_size_params; 3636 u32 kvd_size; 3637 3638 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 3639 return -EIO; 3640 3641 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 3642 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size, 3643 MLXSW_SP_KVD_GRANULARITY, 3644 DEVLINK_RESOURCE_UNIT_ENTRY); 3645 3646 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 3647 kvd_size, MLXSW_SP_RESOURCE_KVD, 3648 DEVLINK_RESOURCE_ID_PARENT_TOP, 3649 &kvd_size_params); 3650 } 3651 3652 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core) 3653 { 3654 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3655 struct devlink_resource_size_params span_size_params; 3656 u32 max_span; 3657 3658 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN)) 3659 return -EIO; 3660 3661 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN); 3662 devlink_resource_size_params_init(&span_size_params, max_span, max_span, 3663 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3664 3665 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN, 3666 max_span, MLXSW_SP_RESOURCE_SPAN, 3667 DEVLINK_RESOURCE_ID_PARENT_TOP, 3668 &span_size_params); 3669 } 3670 3671 static int 3672 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) 3673 { 3674 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3675 struct devlink_resource_size_params size_params; 3676 u8 max_rif_mac_profiles; 3677 3678 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) 3679 max_rif_mac_profiles = 1; 3680 else 3681 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, 3682 MAX_RIF_MAC_PROFILES); 3683 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, 3684 max_rif_mac_profiles, 1, 3685 DEVLINK_RESOURCE_UNIT_ENTRY); 3686 3687 return devl_resource_register(devlink, 3688 "rif_mac_profiles", 3689 max_rif_mac_profiles, 3690 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, 3691 DEVLINK_RESOURCE_ID_PARENT_TOP, 3692 &size_params); 3693 } 3694 3695 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core) 3696 { 3697 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3698 struct devlink_resource_size_params size_params; 3699 u64 max_rifs; 3700 3701 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS)) 3702 return -EIO; 3703 3704 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS); 3705 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs, 3706 1, DEVLINK_RESOURCE_UNIT_ENTRY); 3707 3708 return devl_resource_register(devlink, "rifs", max_rifs, 3709 MLXSW_SP_RESOURCE_RIFS, 3710 DEVLINK_RESOURCE_ID_PARENT_TOP, 3711 &size_params); 3712 } 3713 3714 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) 3715 { 3716 int err; 3717 3718 err = mlxsw_sp1_resources_kvd_register(mlxsw_core); 3719 if (err) 3720 return err; 3721 3722 err = mlxsw_sp_resources_span_register(mlxsw_core); 3723 if (err) 3724 goto err_resources_span_register; 3725 3726 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3727 if (err) 3728 goto err_resources_counter_register; 3729 3730 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3731 if (err) 3732 goto err_policer_resources_register; 3733 3734 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3735 if (err) 3736 goto err_resources_rif_mac_profile_register; 3737 3738 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3739 if (err) 3740 goto err_resources_rifs_register; 3741 3742 return 0; 3743 3744 err_resources_rifs_register: 3745 err_resources_rif_mac_profile_register: 3746 err_policer_resources_register: 3747 err_resources_counter_register: 3748 err_resources_span_register: 3749 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3750 return err; 3751 } 3752 3753 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) 3754 { 3755 int err; 3756 3757 err = mlxsw_sp2_resources_kvd_register(mlxsw_core); 3758 if (err) 3759 return err; 3760 3761 err = mlxsw_sp_resources_span_register(mlxsw_core); 3762 if (err) 3763 goto err_resources_span_register; 3764 3765 err = mlxsw_sp_counter_resources_register(mlxsw_core); 3766 if (err) 3767 goto err_resources_counter_register; 3768 3769 err = mlxsw_sp_policer_resources_register(mlxsw_core); 3770 if (err) 3771 goto err_policer_resources_register; 3772 3773 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core); 3774 if (err) 3775 goto err_resources_rif_mac_profile_register; 3776 3777 err = mlxsw_sp_resources_rifs_register(mlxsw_core); 3778 if (err) 3779 goto err_resources_rifs_register; 3780 3781 return 0; 3782 3783 err_resources_rifs_register: 3784 err_resources_rif_mac_profile_register: 3785 err_policer_resources_register: 3786 err_resources_counter_register: 3787 err_resources_span_register: 3788 devl_resources_unregister(priv_to_devlink(mlxsw_core)); 3789 return err; 3790 } 3791 3792 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, 3793 const struct mlxsw_config_profile *profile, 3794 u64 *p_single_size, u64 *p_double_size, 3795 u64 *p_linear_size) 3796 { 3797 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3798 u32 double_size; 3799 int err; 3800 3801 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3802 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) 3803 return -EIO; 3804 3805 /* The hash part is what left of the kvd without the 3806 * linear part. It is split to the single size and 3807 * double size by the parts ratio from the profile. 3808 * Both sizes must be a multiplications of the 3809 * granularity from the profile. In case the user 3810 * provided the sizes they are obtained via devlink. 3811 */ 3812 err = devl_resource_size_get(devlink, 3813 MLXSW_SP_RESOURCE_KVD_LINEAR, 3814 p_linear_size); 3815 if (err) 3816 *p_linear_size = profile->kvd_linear_size; 3817 3818 err = devl_resource_size_get(devlink, 3819 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 3820 p_double_size); 3821 if (err) { 3822 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3823 *p_linear_size; 3824 double_size *= profile->kvd_hash_double_parts; 3825 double_size /= profile->kvd_hash_double_parts + 3826 profile->kvd_hash_single_parts; 3827 *p_double_size = rounddown(double_size, 3828 MLXSW_SP_KVD_GRANULARITY); 3829 } 3830 3831 err = devl_resource_size_get(devlink, 3832 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 3833 p_single_size); 3834 if (err) 3835 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - 3836 *p_double_size - *p_linear_size; 3837 3838 /* Check results are legal. */ 3839 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) || 3840 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || 3841 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size) 3842 return -EIO; 3843 3844 return 0; 3845 } 3846 3847 static int 3848 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 3849 struct devlink_param_gset_ctx *ctx) 3850 { 3851 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3852 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3853 3854 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); 3855 return 0; 3856 } 3857 3858 static int 3859 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 3860 struct devlink_param_gset_ctx *ctx) 3861 { 3862 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 3863 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3864 3865 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); 3866 } 3867 3868 static const struct devlink_param mlxsw_sp2_devlink_params[] = { 3869 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3870 "acl_region_rehash_interval", 3871 DEVLINK_PARAM_TYPE_U32, 3872 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3873 mlxsw_sp_params_acl_region_rehash_intrvl_get, 3874 mlxsw_sp_params_acl_region_rehash_intrvl_set, 3875 NULL), 3876 }; 3877 3878 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) 3879 { 3880 struct devlink *devlink = priv_to_devlink(mlxsw_core); 3881 union devlink_param_value value; 3882 int err; 3883 3884 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, 3885 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3886 if (err) 3887 return err; 3888 3889 value.vu32 = 0; 3890 devlink_param_driverinit_value_set(devlink, 3891 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 3892 value); 3893 return 0; 3894 } 3895 3896 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) 3897 { 3898 devlink_params_unregister(priv_to_devlink(mlxsw_core), 3899 mlxsw_sp2_devlink_params, 3900 ARRAY_SIZE(mlxsw_sp2_devlink_params)); 3901 } 3902 3903 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, 3904 struct sk_buff *skb, u16 local_port) 3905 { 3906 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 3907 3908 skb_pull(skb, MLXSW_TXHDR_LEN); 3909 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port); 3910 } 3911 3912 static struct mlxsw_driver mlxsw_sp1_driver = { 3913 .kind = mlxsw_sp1_driver_name, 3914 .priv_size = sizeof(struct mlxsw_sp), 3915 .fw_req_rev = &mlxsw_sp1_fw_rev, 3916 .fw_filename = MLXSW_SP1_FW_FILENAME, 3917 .init = mlxsw_sp1_init, 3918 .fini = mlxsw_sp_fini, 3919 .port_split = mlxsw_sp_port_split, 3920 .port_unsplit = mlxsw_sp_port_unsplit, 3921 .sb_pool_get = mlxsw_sp_sb_pool_get, 3922 .sb_pool_set = mlxsw_sp_sb_pool_set, 3923 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3924 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3925 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3926 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3927 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3928 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3929 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3930 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3931 .trap_init = mlxsw_sp_trap_init, 3932 .trap_fini = mlxsw_sp_trap_fini, 3933 .trap_action_set = mlxsw_sp_trap_action_set, 3934 .trap_group_init = mlxsw_sp_trap_group_init, 3935 .trap_group_set = mlxsw_sp_trap_group_set, 3936 .trap_policer_init = mlxsw_sp_trap_policer_init, 3937 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3938 .trap_policer_set = mlxsw_sp_trap_policer_set, 3939 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3940 .txhdr_construct = mlxsw_sp_txhdr_construct, 3941 .resources_register = mlxsw_sp1_resources_register, 3942 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, 3943 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3944 .txhdr_len = MLXSW_TXHDR_LEN, 3945 .profile = &mlxsw_sp1_config_profile, 3946 .sdq_supports_cqe_v2 = false, 3947 }; 3948 3949 static struct mlxsw_driver mlxsw_sp2_driver = { 3950 .kind = mlxsw_sp2_driver_name, 3951 .priv_size = sizeof(struct mlxsw_sp), 3952 .fw_req_rev = &mlxsw_sp2_fw_rev, 3953 .fw_filename = MLXSW_SP2_FW_FILENAME, 3954 .init = mlxsw_sp2_init, 3955 .fini = mlxsw_sp_fini, 3956 .port_split = mlxsw_sp_port_split, 3957 .port_unsplit = mlxsw_sp_port_unsplit, 3958 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3959 .sb_pool_get = mlxsw_sp_sb_pool_get, 3960 .sb_pool_set = mlxsw_sp_sb_pool_set, 3961 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 3962 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 3963 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 3964 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 3965 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 3966 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 3967 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 3968 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 3969 .trap_init = mlxsw_sp_trap_init, 3970 .trap_fini = mlxsw_sp_trap_fini, 3971 .trap_action_set = mlxsw_sp_trap_action_set, 3972 .trap_group_init = mlxsw_sp_trap_group_init, 3973 .trap_group_set = mlxsw_sp_trap_group_set, 3974 .trap_policer_init = mlxsw_sp_trap_policer_init, 3975 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 3976 .trap_policer_set = mlxsw_sp_trap_policer_set, 3977 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 3978 .txhdr_construct = mlxsw_sp_txhdr_construct, 3979 .resources_register = mlxsw_sp2_resources_register, 3980 .params_register = mlxsw_sp2_params_register, 3981 .params_unregister = mlxsw_sp2_params_unregister, 3982 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 3983 .txhdr_len = MLXSW_TXHDR_LEN, 3984 .profile = &mlxsw_sp2_config_profile, 3985 .sdq_supports_cqe_v2 = true, 3986 }; 3987 3988 static struct mlxsw_driver mlxsw_sp3_driver = { 3989 .kind = mlxsw_sp3_driver_name, 3990 .priv_size = sizeof(struct mlxsw_sp), 3991 .fw_req_rev = &mlxsw_sp3_fw_rev, 3992 .fw_filename = MLXSW_SP3_FW_FILENAME, 3993 .init = mlxsw_sp3_init, 3994 .fini = mlxsw_sp_fini, 3995 .port_split = mlxsw_sp_port_split, 3996 .port_unsplit = mlxsw_sp_port_unsplit, 3997 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 3998 .sb_pool_get = mlxsw_sp_sb_pool_get, 3999 .sb_pool_set = mlxsw_sp_sb_pool_set, 4000 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4001 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4002 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4003 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4004 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4005 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4006 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4007 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4008 .trap_init = mlxsw_sp_trap_init, 4009 .trap_fini = mlxsw_sp_trap_fini, 4010 .trap_action_set = mlxsw_sp_trap_action_set, 4011 .trap_group_init = mlxsw_sp_trap_group_init, 4012 .trap_group_set = mlxsw_sp_trap_group_set, 4013 .trap_policer_init = mlxsw_sp_trap_policer_init, 4014 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4015 .trap_policer_set = mlxsw_sp_trap_policer_set, 4016 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4017 .txhdr_construct = mlxsw_sp_txhdr_construct, 4018 .resources_register = mlxsw_sp2_resources_register, 4019 .params_register = mlxsw_sp2_params_register, 4020 .params_unregister = mlxsw_sp2_params_unregister, 4021 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4022 .txhdr_len = MLXSW_TXHDR_LEN, 4023 .profile = &mlxsw_sp2_config_profile, 4024 .sdq_supports_cqe_v2 = true, 4025 }; 4026 4027 static struct mlxsw_driver mlxsw_sp4_driver = { 4028 .kind = mlxsw_sp4_driver_name, 4029 .priv_size = sizeof(struct mlxsw_sp), 4030 .init = mlxsw_sp4_init, 4031 .fini = mlxsw_sp_fini, 4032 .port_split = mlxsw_sp_port_split, 4033 .port_unsplit = mlxsw_sp_port_unsplit, 4034 .ports_remove_selected = mlxsw_sp_ports_remove_selected, 4035 .sb_pool_get = mlxsw_sp_sb_pool_get, 4036 .sb_pool_set = mlxsw_sp_sb_pool_set, 4037 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 4038 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 4039 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 4040 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 4041 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 4042 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 4043 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 4044 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 4045 .trap_init = mlxsw_sp_trap_init, 4046 .trap_fini = mlxsw_sp_trap_fini, 4047 .trap_action_set = mlxsw_sp_trap_action_set, 4048 .trap_group_init = mlxsw_sp_trap_group_init, 4049 .trap_group_set = mlxsw_sp_trap_group_set, 4050 .trap_policer_init = mlxsw_sp_trap_policer_init, 4051 .trap_policer_fini = mlxsw_sp_trap_policer_fini, 4052 .trap_policer_set = mlxsw_sp_trap_policer_set, 4053 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, 4054 .txhdr_construct = mlxsw_sp_txhdr_construct, 4055 .resources_register = mlxsw_sp2_resources_register, 4056 .params_register = mlxsw_sp2_params_register, 4057 .params_unregister = mlxsw_sp2_params_unregister, 4058 .ptp_transmitted = mlxsw_sp_ptp_transmitted, 4059 .txhdr_len = MLXSW_TXHDR_LEN, 4060 .profile = &mlxsw_sp4_config_profile, 4061 .sdq_supports_cqe_v2 = true, 4062 }; 4063 4064 bool mlxsw_sp_port_dev_check(const struct net_device *dev) 4065 { 4066 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 4067 } 4068 4069 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, 4070 struct netdev_nested_priv *priv) 4071 { 4072 int ret = 0; 4073 4074 if (mlxsw_sp_port_dev_check(lower_dev)) { 4075 priv->data = (void *)netdev_priv(lower_dev); 4076 ret = 1; 4077 } 4078 4079 return ret; 4080 } 4081 4082 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) 4083 { 4084 struct netdev_nested_priv priv = { 4085 .data = NULL, 4086 }; 4087 4088 if (mlxsw_sp_port_dev_check(dev)) 4089 return netdev_priv(dev); 4090 4091 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv); 4092 4093 return (struct mlxsw_sp_port *)priv.data; 4094 } 4095 4096 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) 4097 { 4098 struct mlxsw_sp_port *mlxsw_sp_port; 4099 4100 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); 4101 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; 4102 } 4103 4104 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) 4105 { 4106 struct netdev_nested_priv priv = { 4107 .data = NULL, 4108 }; 4109 4110 if (mlxsw_sp_port_dev_check(dev)) 4111 return netdev_priv(dev); 4112 4113 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, 4114 &priv); 4115 4116 return (struct mlxsw_sp_port *)priv.data; 4117 } 4118 4119 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) 4120 { 4121 struct mlxsw_sp_port *mlxsw_sp_port; 4122 4123 rcu_read_lock(); 4124 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); 4125 if (mlxsw_sp_port) 4126 dev_hold(mlxsw_sp_port->dev); 4127 rcu_read_unlock(); 4128 return mlxsw_sp_port; 4129 } 4130 4131 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) 4132 { 4133 dev_put(mlxsw_sp_port->dev); 4134 } 4135 4136 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) 4137 { 4138 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4139 int err = 0; 4140 4141 mutex_lock(&mlxsw_sp->parsing.lock); 4142 4143 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) 4144 goto out_unlock; 4145 4146 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, 4147 mlxsw_sp->parsing.vxlan_udp_dport); 4148 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4149 if (err) 4150 goto out_unlock; 4151 4152 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; 4153 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); 4154 4155 out_unlock: 4156 mutex_unlock(&mlxsw_sp->parsing.lock); 4157 return err; 4158 } 4159 4160 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) 4161 { 4162 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4163 4164 mutex_lock(&mlxsw_sp->parsing.lock); 4165 4166 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) 4167 goto out_unlock; 4168 4169 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, 4170 mlxsw_sp->parsing.vxlan_udp_dport); 4171 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4172 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; 4173 4174 out_unlock: 4175 mutex_unlock(&mlxsw_sp->parsing.lock); 4176 } 4177 4178 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, 4179 __be16 udp_dport) 4180 { 4181 char mprs_pl[MLXSW_REG_MPRS_LEN]; 4182 int err; 4183 4184 mutex_lock(&mlxsw_sp->parsing.lock); 4185 4186 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, 4187 be16_to_cpu(udp_dport)); 4188 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); 4189 if (err) 4190 goto out_unlock; 4191 4192 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); 4193 4194 out_unlock: 4195 mutex_unlock(&mlxsw_sp->parsing.lock); 4196 return err; 4197 } 4198 4199 static void 4200 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, 4201 struct net_device *lag_dev) 4202 { 4203 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); 4204 struct net_device *upper_dev; 4205 struct list_head *iter; 4206 4207 if (netif_is_bridge_port(lag_dev)) 4208 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); 4209 4210 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { 4211 if (!netif_is_bridge_port(upper_dev)) 4212 continue; 4213 br_dev = netdev_master_upper_dev_get(upper_dev); 4214 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); 4215 } 4216 } 4217 4218 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4219 { 4220 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4221 4222 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4224 } 4225 4226 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4227 { 4228 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4229 4230 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4231 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4232 } 4233 4234 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4235 u16 lag_id, u8 port_index) 4236 { 4237 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4238 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4239 4240 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 4241 lag_id, port_index); 4242 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4243 } 4244 4245 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4246 u16 lag_id) 4247 { 4248 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4249 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4250 4251 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 4252 lag_id); 4253 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4254 } 4255 4256 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 4257 u16 lag_id) 4258 { 4259 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4260 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4261 4262 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 4263 lag_id); 4264 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4265 } 4266 4267 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 4268 u16 lag_id) 4269 { 4270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4271 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 4272 4273 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 4274 lag_id); 4275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4276 } 4277 4278 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4279 struct net_device *lag_dev, 4280 u16 *p_lag_id) 4281 { 4282 struct mlxsw_sp_upper *lag; 4283 int free_lag_id = -1; 4284 u16 max_lag; 4285 int err, i; 4286 4287 err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 4288 if (err) 4289 return err; 4290 4291 for (i = 0; i < max_lag; i++) { 4292 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4293 if (lag->ref_count) { 4294 if (lag->dev == lag_dev) { 4295 *p_lag_id = i; 4296 return 0; 4297 } 4298 } else if (free_lag_id < 0) { 4299 free_lag_id = i; 4300 } 4301 } 4302 if (free_lag_id < 0) 4303 return -EBUSY; 4304 *p_lag_id = free_lag_id; 4305 return 0; 4306 } 4307 4308 static bool 4309 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 4310 struct net_device *lag_dev, 4311 struct netdev_lag_upper_info *lag_upper_info, 4312 struct netlink_ext_ack *extack) 4313 { 4314 u16 lag_id; 4315 4316 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4317 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4318 return false; 4319 } 4320 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4321 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4322 return false; 4323 } 4324 return true; 4325 } 4326 4327 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4328 u16 lag_id, u8 *p_port_index) 4329 { 4330 u64 max_lag_members; 4331 int i; 4332 4333 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, 4334 MAX_LAG_MEMBERS); 4335 for (i = 0; i < max_lag_members; i++) { 4336 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 4337 *p_port_index = i; 4338 return 0; 4339 } 4340 } 4341 return -EBUSY; 4342 } 4343 4344 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 4345 struct net_device *lag_dev, 4346 struct netlink_ext_ack *extack) 4347 { 4348 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4349 struct mlxsw_sp_upper *lag; 4350 u16 lag_id; 4351 u8 port_index; 4352 int err; 4353 4354 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4355 if (err) 4356 return err; 4357 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4358 if (!lag->ref_count) { 4359 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4360 if (err) 4361 return err; 4362 lag->dev = lag_dev; 4363 } 4364 4365 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4366 if (err) 4367 return err; 4368 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 4369 if (err) 4370 goto err_col_port_add; 4371 4372 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 4373 mlxsw_sp_port->local_port); 4374 mlxsw_sp_port->lag_id = lag_id; 4375 mlxsw_sp_port->lagged = 1; 4376 lag->ref_count++; 4377 4378 /* Port is no longer usable as a router interface */ 4379 if (mlxsw_sp_port->default_vlan->fid) 4380 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); 4381 4382 /* Join a router interface configured on the LAG, if exists */ 4383 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, 4384 lag_dev, extack); 4385 if (err) 4386 goto err_router_join; 4387 4388 return 0; 4389 4390 err_router_join: 4391 lag->ref_count--; 4392 mlxsw_sp_port->lagged = 0; 4393 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4394 mlxsw_sp_port->local_port); 4395 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4396 err_col_port_add: 4397 if (!lag->ref_count) 4398 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4399 return err; 4400 } 4401 4402 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 4403 struct net_device *lag_dev) 4404 { 4405 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4406 u16 lag_id = mlxsw_sp_port->lag_id; 4407 struct mlxsw_sp_upper *lag; 4408 4409 if (!mlxsw_sp_port->lagged) 4410 return; 4411 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4412 WARN_ON(lag->ref_count == 0); 4413 4414 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4415 4416 /* Any VLANs configured on the port are no longer valid */ 4417 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); 4418 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); 4419 /* Make the LAG and its directly linked uppers leave bridges they 4420 * are memeber in 4421 */ 4422 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); 4423 4424 if (lag->ref_count == 1) 4425 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4426 4427 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4428 mlxsw_sp_port->local_port); 4429 mlxsw_sp_port->lagged = 0; 4430 lag->ref_count--; 4431 4432 /* Make sure untagged frames are allowed to ingress */ 4433 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, 4434 ETH_P_8021Q); 4435 } 4436 4437 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 4438 u16 lag_id) 4439 { 4440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4441 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4442 4443 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 4444 mlxsw_sp_port->local_port); 4445 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4446 } 4447 4448 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 4449 u16 lag_id) 4450 { 4451 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4452 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4453 4454 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 4455 mlxsw_sp_port->local_port); 4456 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4457 } 4458 4459 static int 4460 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) 4461 { 4462 int err; 4463 4464 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, 4465 mlxsw_sp_port->lag_id); 4466 if (err) 4467 return err; 4468 4469 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4470 if (err) 4471 goto err_dist_port_add; 4472 4473 return 0; 4474 4475 err_dist_port_add: 4476 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4477 return err; 4478 } 4479 4480 static int 4481 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) 4482 { 4483 int err; 4484 4485 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 4486 mlxsw_sp_port->lag_id); 4487 if (err) 4488 return err; 4489 4490 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, 4491 mlxsw_sp_port->lag_id); 4492 if (err) 4493 goto err_col_port_disable; 4494 4495 return 0; 4496 4497 err_col_port_disable: 4498 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); 4499 return err; 4500 } 4501 4502 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 4503 struct netdev_lag_lower_state_info *info) 4504 { 4505 if (info->tx_enabled) 4506 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); 4507 else 4508 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4509 } 4510 4511 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, 4512 bool enable) 4513 { 4514 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4515 enum mlxsw_reg_spms_state spms_state; 4516 char *spms_pl; 4517 u16 vid; 4518 int err; 4519 4520 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : 4521 MLXSW_REG_SPMS_STATE_DISCARDING; 4522 4523 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 4524 if (!spms_pl) 4525 return -ENOMEM; 4526 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 4527 4528 for (vid = 0; vid < VLAN_N_VID; vid++) 4529 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); 4530 4531 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 4532 kfree(spms_pl); 4533 return err; 4534 } 4535 4536 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4537 { 4538 u16 vid = 1; 4539 int err; 4540 4541 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4542 if (err) 4543 return err; 4544 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); 4545 if (err) 4546 goto err_port_stp_set; 4547 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4548 true, false); 4549 if (err) 4550 goto err_port_vlan_set; 4551 4552 for (; vid <= VLAN_N_VID - 1; vid++) { 4553 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4554 vid, false); 4555 if (err) 4556 goto err_vid_learning_set; 4557 } 4558 4559 return 0; 4560 4561 err_vid_learning_set: 4562 for (vid--; vid >= 1; vid--) 4563 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 4564 err_port_vlan_set: 4565 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4566 err_port_stp_set: 4567 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4568 return err; 4569 } 4570 4571 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4572 { 4573 u16 vid; 4574 4575 for (vid = VLAN_N_VID - 1; vid >= 1; vid--) 4576 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, 4577 vid, true); 4578 4579 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, 4580 false, false); 4581 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4582 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 4583 } 4584 4585 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) 4586 { 4587 unsigned int num_vxlans = 0; 4588 struct net_device *dev; 4589 struct list_head *iter; 4590 4591 netdev_for_each_lower_dev(br_dev, dev, iter) { 4592 if (netif_is_vxlan(dev)) 4593 num_vxlans++; 4594 } 4595 4596 return num_vxlans > 1; 4597 } 4598 4599 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) 4600 { 4601 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; 4602 struct net_device *dev; 4603 struct list_head *iter; 4604 4605 netdev_for_each_lower_dev(br_dev, dev, iter) { 4606 u16 pvid; 4607 int err; 4608 4609 if (!netif_is_vxlan(dev)) 4610 continue; 4611 4612 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); 4613 if (err || !pvid) 4614 continue; 4615 4616 if (test_and_set_bit(pvid, vlans)) 4617 return false; 4618 } 4619 4620 return true; 4621 } 4622 4623 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, 4624 struct netlink_ext_ack *extack) 4625 { 4626 if (br_multicast_enabled(br_dev)) { 4627 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device"); 4628 return false; 4629 } 4630 4631 if (!br_vlan_enabled(br_dev) && 4632 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { 4633 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); 4634 return false; 4635 } 4636 4637 if (br_vlan_enabled(br_dev) && 4638 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { 4639 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); 4640 return false; 4641 } 4642 4643 return true; 4644 } 4645 4646 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, 4647 struct net_device *dev, 4648 unsigned long event, void *ptr) 4649 { 4650 struct netdev_notifier_changeupper_info *info; 4651 struct mlxsw_sp_port *mlxsw_sp_port; 4652 struct netlink_ext_ack *extack; 4653 struct net_device *upper_dev; 4654 struct mlxsw_sp *mlxsw_sp; 4655 int err = 0; 4656 u16 proto; 4657 4658 mlxsw_sp_port = netdev_priv(dev); 4659 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4660 info = ptr; 4661 extack = netdev_notifier_info_to_extack(&info->info); 4662 4663 switch (event) { 4664 case NETDEV_PRECHANGEUPPER: 4665 upper_dev = info->upper_dev; 4666 if (!is_vlan_dev(upper_dev) && 4667 !netif_is_lag_master(upper_dev) && 4668 !netif_is_bridge_master(upper_dev) && 4669 !netif_is_ovs_master(upper_dev) && 4670 !netif_is_macvlan(upper_dev) && 4671 !netif_is_l3_master(upper_dev)) { 4672 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4673 return -EINVAL; 4674 } 4675 if (!info->linking) 4676 break; 4677 if (netif_is_bridge_master(upper_dev) && 4678 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4679 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4680 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4681 return -EOPNOTSUPP; 4682 if (netdev_has_any_upper_dev(upper_dev) && 4683 (!netif_is_bridge_master(upper_dev) || 4684 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4685 upper_dev))) { 4686 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4687 return -EINVAL; 4688 } 4689 if (netif_is_lag_master(upper_dev) && 4690 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 4691 info->upper_info, extack)) 4692 return -EINVAL; 4693 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { 4694 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); 4695 return -EINVAL; 4696 } 4697 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && 4698 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { 4699 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); 4700 return -EINVAL; 4701 } 4702 if (netif_is_macvlan(upper_dev) && 4703 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { 4704 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4705 return -EOPNOTSUPP; 4706 } 4707 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { 4708 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); 4709 return -EINVAL; 4710 } 4711 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { 4712 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4713 return -EINVAL; 4714 } 4715 if (netif_is_bridge_master(upper_dev)) { 4716 br_vlan_get_proto(upper_dev, &proto); 4717 if (br_vlan_enabled(upper_dev) && 4718 proto != ETH_P_8021Q && proto != ETH_P_8021AD) { 4719 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); 4720 return -EOPNOTSUPP; 4721 } 4722 if (vlan_uses_dev(lower_dev) && 4723 br_vlan_enabled(upper_dev) && 4724 proto == ETH_P_8021AD) { 4725 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); 4726 return -EOPNOTSUPP; 4727 } 4728 } 4729 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { 4730 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); 4731 4732 if (br_vlan_enabled(br_dev)) { 4733 br_vlan_get_proto(br_dev, &proto); 4734 if (proto == ETH_P_8021AD) { 4735 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); 4736 return -EOPNOTSUPP; 4737 } 4738 } 4739 } 4740 if (is_vlan_dev(upper_dev) && 4741 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 4742 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 4743 return -EOPNOTSUPP; 4744 } 4745 break; 4746 case NETDEV_CHANGEUPPER: 4747 upper_dev = info->upper_dev; 4748 if (netif_is_bridge_master(upper_dev)) { 4749 if (info->linking) 4750 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4751 lower_dev, 4752 upper_dev, 4753 extack); 4754 else 4755 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4756 lower_dev, 4757 upper_dev); 4758 } else if (netif_is_lag_master(upper_dev)) { 4759 if (info->linking) { 4760 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 4761 upper_dev, extack); 4762 } else { 4763 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); 4764 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 4765 upper_dev); 4766 } 4767 } else if (netif_is_ovs_master(upper_dev)) { 4768 if (info->linking) 4769 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 4770 else 4771 mlxsw_sp_port_ovs_leave(mlxsw_sp_port); 4772 } else if (netif_is_macvlan(upper_dev)) { 4773 if (!info->linking) 4774 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4775 } else if (is_vlan_dev(upper_dev)) { 4776 struct net_device *br_dev; 4777 4778 if (!netif_is_bridge_port(upper_dev)) 4779 break; 4780 if (info->linking) 4781 break; 4782 br_dev = netdev_master_upper_dev_get(upper_dev); 4783 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, 4784 br_dev); 4785 } 4786 break; 4787 } 4788 4789 return err; 4790 } 4791 4792 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 4793 unsigned long event, void *ptr) 4794 { 4795 struct netdev_notifier_changelowerstate_info *info; 4796 struct mlxsw_sp_port *mlxsw_sp_port; 4797 int err; 4798 4799 mlxsw_sp_port = netdev_priv(dev); 4800 info = ptr; 4801 4802 switch (event) { 4803 case NETDEV_CHANGELOWERSTATE: 4804 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 4805 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 4806 info->lower_state_info); 4807 if (err) 4808 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 4809 } 4810 break; 4811 } 4812 4813 return 0; 4814 } 4815 4816 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, 4817 struct net_device *port_dev, 4818 unsigned long event, void *ptr) 4819 { 4820 switch (event) { 4821 case NETDEV_PRECHANGEUPPER: 4822 case NETDEV_CHANGEUPPER: 4823 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, 4824 event, ptr); 4825 case NETDEV_CHANGELOWERSTATE: 4826 return mlxsw_sp_netdevice_port_lower_event(port_dev, event, 4827 ptr); 4828 } 4829 4830 return 0; 4831 } 4832 4833 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 4834 unsigned long event, void *ptr) 4835 { 4836 struct net_device *dev; 4837 struct list_head *iter; 4838 int ret; 4839 4840 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4841 if (mlxsw_sp_port_dev_check(dev)) { 4842 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, 4843 ptr); 4844 if (ret) 4845 return ret; 4846 } 4847 } 4848 4849 return 0; 4850 } 4851 4852 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, 4853 struct net_device *dev, 4854 unsigned long event, void *ptr, 4855 u16 vid) 4856 { 4857 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 4858 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4859 struct netdev_notifier_changeupper_info *info = ptr; 4860 struct netlink_ext_ack *extack; 4861 struct net_device *upper_dev; 4862 int err = 0; 4863 4864 extack = netdev_notifier_info_to_extack(&info->info); 4865 4866 switch (event) { 4867 case NETDEV_PRECHANGEUPPER: 4868 upper_dev = info->upper_dev; 4869 if (!netif_is_bridge_master(upper_dev) && 4870 !netif_is_macvlan(upper_dev) && 4871 !netif_is_l3_master(upper_dev)) { 4872 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4873 return -EINVAL; 4874 } 4875 if (!info->linking) 4876 break; 4877 if (netif_is_bridge_master(upper_dev) && 4878 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) && 4879 mlxsw_sp_bridge_has_vxlan(upper_dev) && 4880 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 4881 return -EOPNOTSUPP; 4882 if (netdev_has_any_upper_dev(upper_dev) && 4883 (!netif_is_bridge_master(upper_dev) || 4884 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, 4885 upper_dev))) { 4886 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); 4887 return -EINVAL; 4888 } 4889 if (netif_is_macvlan(upper_dev) && 4890 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4891 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4892 return -EOPNOTSUPP; 4893 } 4894 break; 4895 case NETDEV_CHANGEUPPER: 4896 upper_dev = info->upper_dev; 4897 if (netif_is_bridge_master(upper_dev)) { 4898 if (info->linking) 4899 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, 4900 vlan_dev, 4901 upper_dev, 4902 extack); 4903 else 4904 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 4905 vlan_dev, 4906 upper_dev); 4907 } else if (netif_is_macvlan(upper_dev)) { 4908 if (!info->linking) 4909 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4910 } 4911 break; 4912 } 4913 4914 return err; 4915 } 4916 4917 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, 4918 struct net_device *lag_dev, 4919 unsigned long event, 4920 void *ptr, u16 vid) 4921 { 4922 struct net_device *dev; 4923 struct list_head *iter; 4924 int ret; 4925 4926 netdev_for_each_lower_dev(lag_dev, dev, iter) { 4927 if (mlxsw_sp_port_dev_check(dev)) { 4928 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, 4929 event, ptr, 4930 vid); 4931 if (ret) 4932 return ret; 4933 } 4934 } 4935 4936 return 0; 4937 } 4938 4939 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, 4940 struct net_device *br_dev, 4941 unsigned long event, void *ptr, 4942 u16 vid) 4943 { 4944 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 4945 struct netdev_notifier_changeupper_info *info = ptr; 4946 struct netlink_ext_ack *extack; 4947 struct net_device *upper_dev; 4948 4949 if (!mlxsw_sp) 4950 return 0; 4951 4952 extack = netdev_notifier_info_to_extack(&info->info); 4953 4954 switch (event) { 4955 case NETDEV_PRECHANGEUPPER: 4956 upper_dev = info->upper_dev; 4957 if (!netif_is_macvlan(upper_dev) && 4958 !netif_is_l3_master(upper_dev)) { 4959 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 4960 return -EOPNOTSUPP; 4961 } 4962 if (!info->linking) 4963 break; 4964 if (netif_is_macvlan(upper_dev) && 4965 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { 4966 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 4967 return -EOPNOTSUPP; 4968 } 4969 break; 4970 case NETDEV_CHANGEUPPER: 4971 upper_dev = info->upper_dev; 4972 if (info->linking) 4973 break; 4974 if (netif_is_macvlan(upper_dev)) 4975 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4976 break; 4977 } 4978 4979 return 0; 4980 } 4981 4982 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 4983 unsigned long event, void *ptr) 4984 { 4985 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 4986 u16 vid = vlan_dev_vlan_id(vlan_dev); 4987 4988 if (mlxsw_sp_port_dev_check(real_dev)) 4989 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, 4990 event, ptr, vid); 4991 else if (netif_is_lag_master(real_dev)) 4992 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, 4993 real_dev, event, 4994 ptr, vid); 4995 else if (netif_is_bridge_master(real_dev)) 4996 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, 4997 event, ptr, vid); 4998 4999 return 0; 5000 } 5001 5002 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, 5003 unsigned long event, void *ptr) 5004 { 5005 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); 5006 struct netdev_notifier_changeupper_info *info = ptr; 5007 struct netlink_ext_ack *extack; 5008 struct net_device *upper_dev; 5009 u16 proto; 5010 5011 if (!mlxsw_sp) 5012 return 0; 5013 5014 extack = netdev_notifier_info_to_extack(&info->info); 5015 5016 switch (event) { 5017 case NETDEV_PRECHANGEUPPER: 5018 upper_dev = info->upper_dev; 5019 if (!is_vlan_dev(upper_dev) && 5020 !netif_is_macvlan(upper_dev) && 5021 !netif_is_l3_master(upper_dev)) { 5022 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5023 return -EOPNOTSUPP; 5024 } 5025 if (!info->linking) 5026 break; 5027 if (br_vlan_enabled(br_dev)) { 5028 br_vlan_get_proto(br_dev, &proto); 5029 if (proto == ETH_P_8021AD) { 5030 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge"); 5031 return -EOPNOTSUPP; 5032 } 5033 } 5034 if (is_vlan_dev(upper_dev) && 5035 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { 5036 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); 5037 return -EOPNOTSUPP; 5038 } 5039 if (netif_is_macvlan(upper_dev) && 5040 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { 5041 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 5042 return -EOPNOTSUPP; 5043 } 5044 break; 5045 case NETDEV_CHANGEUPPER: 5046 upper_dev = info->upper_dev; 5047 if (info->linking) 5048 break; 5049 if (is_vlan_dev(upper_dev)) 5050 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); 5051 if (netif_is_macvlan(upper_dev)) 5052 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 5053 break; 5054 } 5055 5056 return 0; 5057 } 5058 5059 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, 5060 unsigned long event, void *ptr) 5061 { 5062 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); 5063 struct netdev_notifier_changeupper_info *info = ptr; 5064 struct netlink_ext_ack *extack; 5065 struct net_device *upper_dev; 5066 5067 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) 5068 return 0; 5069 5070 extack = netdev_notifier_info_to_extack(&info->info); 5071 upper_dev = info->upper_dev; 5072 5073 if (!netif_is_l3_master(upper_dev)) { 5074 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); 5075 return -EOPNOTSUPP; 5076 } 5077 5078 return 0; 5079 } 5080 5081 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, 5082 struct net_device *dev, 5083 unsigned long event, void *ptr) 5084 { 5085 struct netdev_notifier_changeupper_info *cu_info; 5086 struct netdev_notifier_info *info = ptr; 5087 struct netlink_ext_ack *extack; 5088 struct net_device *upper_dev; 5089 5090 extack = netdev_notifier_info_to_extack(info); 5091 5092 switch (event) { 5093 case NETDEV_CHANGEUPPER: 5094 cu_info = container_of(info, 5095 struct netdev_notifier_changeupper_info, 5096 info); 5097 upper_dev = cu_info->upper_dev; 5098 if (!netif_is_bridge_master(upper_dev)) 5099 return 0; 5100 if (!mlxsw_sp_lower_get(upper_dev)) 5101 return 0; 5102 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) 5103 return -EOPNOTSUPP; 5104 if (cu_info->linking) { 5105 if (!netif_running(dev)) 5106 return 0; 5107 /* When the bridge is VLAN-aware, the VNI of the VxLAN 5108 * device needs to be mapped to a VLAN, but at this 5109 * point no VLANs are configured on the VxLAN device 5110 */ 5111 if (br_vlan_enabled(upper_dev)) 5112 return 0; 5113 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, 5114 dev, 0, extack); 5115 } else { 5116 /* VLANs were already flushed, which triggered the 5117 * necessary cleanup 5118 */ 5119 if (br_vlan_enabled(upper_dev)) 5120 return 0; 5121 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5122 } 5123 break; 5124 case NETDEV_PRE_UP: 5125 upper_dev = netdev_master_upper_dev_get(dev); 5126 if (!upper_dev) 5127 return 0; 5128 if (!netif_is_bridge_master(upper_dev)) 5129 return 0; 5130 if (!mlxsw_sp_lower_get(upper_dev)) 5131 return 0; 5132 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, 5133 extack); 5134 case NETDEV_DOWN: 5135 upper_dev = netdev_master_upper_dev_get(dev); 5136 if (!upper_dev) 5137 return 0; 5138 if (!netif_is_bridge_master(upper_dev)) 5139 return 0; 5140 if (!mlxsw_sp_lower_get(upper_dev)) 5141 return 0; 5142 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); 5143 break; 5144 } 5145 5146 return 0; 5147 } 5148 5149 static int mlxsw_sp_netdevice_event(struct notifier_block *nb, 5150 unsigned long event, void *ptr) 5151 { 5152 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 5153 struct mlxsw_sp_span_entry *span_entry; 5154 struct mlxsw_sp *mlxsw_sp; 5155 int err = 0; 5156 5157 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); 5158 if (event == NETDEV_UNREGISTER) { 5159 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); 5160 if (span_entry) 5161 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); 5162 } 5163 mlxsw_sp_span_respin(mlxsw_sp); 5164 5165 if (netif_is_vxlan(dev)) 5166 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); 5167 else if (mlxsw_sp_port_dev_check(dev)) 5168 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); 5169 else if (netif_is_lag_master(dev)) 5170 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 5171 else if (is_vlan_dev(dev)) 5172 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 5173 else if (netif_is_bridge_master(dev)) 5174 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); 5175 else if (netif_is_macvlan(dev)) 5176 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); 5177 5178 return notifier_from_errno(err); 5179 } 5180 5181 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { 5182 .notifier_call = mlxsw_sp_inetaddr_valid_event, 5183 }; 5184 5185 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { 5186 .notifier_call = mlxsw_sp_inet6addr_valid_event, 5187 }; 5188 5189 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { 5190 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, 5191 {0, }, 5192 }; 5193 5194 static struct pci_driver mlxsw_sp1_pci_driver = { 5195 .name = mlxsw_sp1_driver_name, 5196 .id_table = mlxsw_sp1_pci_id_table, 5197 }; 5198 5199 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { 5200 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, 5201 {0, }, 5202 }; 5203 5204 static struct pci_driver mlxsw_sp2_pci_driver = { 5205 .name = mlxsw_sp2_driver_name, 5206 .id_table = mlxsw_sp2_pci_id_table, 5207 }; 5208 5209 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = { 5210 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0}, 5211 {0, }, 5212 }; 5213 5214 static struct pci_driver mlxsw_sp3_pci_driver = { 5215 .name = mlxsw_sp3_driver_name, 5216 .id_table = mlxsw_sp3_pci_id_table, 5217 }; 5218 5219 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { 5220 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, 5221 {0, }, 5222 }; 5223 5224 static struct pci_driver mlxsw_sp4_pci_driver = { 5225 .name = mlxsw_sp4_driver_name, 5226 .id_table = mlxsw_sp4_pci_id_table, 5227 }; 5228 5229 static int __init mlxsw_sp_module_init(void) 5230 { 5231 int err; 5232 5233 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5234 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5235 5236 err = mlxsw_core_driver_register(&mlxsw_sp1_driver); 5237 if (err) 5238 goto err_sp1_core_driver_register; 5239 5240 err = mlxsw_core_driver_register(&mlxsw_sp2_driver); 5241 if (err) 5242 goto err_sp2_core_driver_register; 5243 5244 err = mlxsw_core_driver_register(&mlxsw_sp3_driver); 5245 if (err) 5246 goto err_sp3_core_driver_register; 5247 5248 err = mlxsw_core_driver_register(&mlxsw_sp4_driver); 5249 if (err) 5250 goto err_sp4_core_driver_register; 5251 5252 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); 5253 if (err) 5254 goto err_sp1_pci_driver_register; 5255 5256 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); 5257 if (err) 5258 goto err_sp2_pci_driver_register; 5259 5260 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver); 5261 if (err) 5262 goto err_sp3_pci_driver_register; 5263 5264 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); 5265 if (err) 5266 goto err_sp4_pci_driver_register; 5267 5268 return 0; 5269 5270 err_sp4_pci_driver_register: 5271 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5272 err_sp3_pci_driver_register: 5273 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5274 err_sp2_pci_driver_register: 5275 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5276 err_sp1_pci_driver_register: 5277 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5278 err_sp4_core_driver_register: 5279 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5280 err_sp3_core_driver_register: 5281 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5282 err_sp2_core_driver_register: 5283 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5284 err_sp1_core_driver_register: 5285 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5286 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5287 return err; 5288 } 5289 5290 static void __exit mlxsw_sp_module_exit(void) 5291 { 5292 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); 5293 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); 5294 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); 5295 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); 5296 mlxsw_core_driver_unregister(&mlxsw_sp4_driver); 5297 mlxsw_core_driver_unregister(&mlxsw_sp3_driver); 5298 mlxsw_core_driver_unregister(&mlxsw_sp2_driver); 5299 mlxsw_core_driver_unregister(&mlxsw_sp1_driver); 5300 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); 5301 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); 5302 } 5303 5304 module_init(mlxsw_sp_module_init); 5305 module_exit(mlxsw_sp_module_exit); 5306 5307 MODULE_LICENSE("Dual BSD/GPL"); 5308 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 5309 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 5310 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); 5311 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); 5312 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); 5313 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); 5314 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); 5315 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); 5316 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); 5317 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); 5318