1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/netdevice.h> 41 #include <linux/etherdevice.h> 42 #include <linux/ethtool.h> 43 #include <linux/slab.h> 44 #include <linux/device.h> 45 #include <linux/skbuff.h> 46 #include <linux/if_vlan.h> 47 #include <linux/if_bridge.h> 48 #include <linux/workqueue.h> 49 #include <linux/jiffies.h> 50 #include <linux/bitops.h> 51 #include <linux/list.h> 52 #include <net/switchdev.h> 53 #include <generated/utsrelease.h> 54 55 #include "spectrum.h" 56 #include "core.h" 57 #include "reg.h" 58 #include "port.h" 59 #include "trap.h" 60 #include "txheader.h" 61 62 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 63 static const char mlxsw_sp_driver_version[] = "1.0"; 64 65 /* tx_hdr_version 66 * Tx header version. 67 * Must be set to 1. 68 */ 69 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 70 71 /* tx_hdr_ctl 72 * Packet control type. 73 * 0 - Ethernet control (e.g. EMADs, LACP) 74 * 1 - Ethernet data 75 */ 76 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 77 78 /* tx_hdr_proto 79 * Packet protocol type. Must be set to 1 (Ethernet). 80 */ 81 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 82 83 /* tx_hdr_rx_is_router 84 * Packet is sent from the router. Valid for data packets only. 85 */ 86 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 87 88 /* tx_hdr_fid_valid 89 * Indicates if the 'fid' field is valid and should be used for 90 * forwarding lookup. Valid for data packets only. 91 */ 92 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 93 94 /* tx_hdr_swid 95 * Switch partition ID. Must be set to 0. 96 */ 97 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 98 99 /* tx_hdr_control_tclass 100 * Indicates if the packet should use the control TClass and not one 101 * of the data TClasses. 102 */ 103 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 104 105 /* tx_hdr_etclass 106 * Egress TClass to be used on the egress device on the egress port. 107 */ 108 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 109 110 /* tx_hdr_port_mid 111 * Destination local port for unicast packets. 112 * Destination multicast ID for multicast packets. 113 * 114 * Control packets are directed to a specific egress port, while data 115 * packets are transmitted through the CPU port (0) into the switch partition, 116 * where forwarding rules are applied. 117 */ 118 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 119 120 /* tx_hdr_fid 121 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 122 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 123 * Valid for data packets only. 124 */ 125 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 126 127 /* tx_hdr_type 128 * 0 - Data packets 129 * 6 - Control packets 130 */ 131 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 132 133 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 134 const struct mlxsw_tx_info *tx_info) 135 { 136 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 137 138 memset(txhdr, 0, MLXSW_TXHDR_LEN); 139 140 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 141 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 142 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 143 mlxsw_tx_hdr_swid_set(txhdr, 0); 144 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 145 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 146 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 147 } 148 149 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 150 { 151 char spad_pl[MLXSW_REG_SPAD_LEN]; 152 int err; 153 154 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 155 if (err) 156 return err; 157 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 158 return 0; 159 } 160 161 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 162 bool is_up) 163 { 164 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 165 char paos_pl[MLXSW_REG_PAOS_LEN]; 166 167 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 168 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 169 MLXSW_PORT_ADMIN_STATUS_DOWN); 170 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 171 } 172 173 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, 174 bool *p_is_up) 175 { 176 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 177 char paos_pl[MLXSW_REG_PAOS_LEN]; 178 u8 oper_status; 179 int err; 180 181 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0); 182 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 183 if (err) 184 return err; 185 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); 186 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; 187 return 0; 188 } 189 190 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 191 unsigned char *addr) 192 { 193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 194 char ppad_pl[MLXSW_REG_PPAD_LEN]; 195 196 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 197 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 199 } 200 201 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 202 { 203 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 204 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 205 206 ether_addr_copy(addr, mlxsw_sp->base_mac); 207 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 208 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 209 } 210 211 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 212 u16 vid, enum mlxsw_reg_spms_state state) 213 { 214 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 215 char *spms_pl; 216 int err; 217 218 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 219 if (!spms_pl) 220 return -ENOMEM; 221 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 222 mlxsw_reg_spms_vid_pack(spms_pl, vid, state); 223 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 224 kfree(spms_pl); 225 return err; 226 } 227 228 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 229 { 230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 231 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 232 int max_mtu; 233 int err; 234 235 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 236 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 237 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 238 if (err) 239 return err; 240 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 241 242 if (mtu > max_mtu) 243 return -EINVAL; 244 245 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 246 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 247 } 248 249 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 250 { 251 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 252 char pspa_pl[MLXSW_REG_PSPA_LEN]; 253 254 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 255 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 256 } 257 258 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 259 bool enable) 260 { 261 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 262 char svpe_pl[MLXSW_REG_SVPE_LEN]; 263 264 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 265 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 266 } 267 268 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 269 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 270 u16 vid) 271 { 272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 273 char svfa_pl[MLXSW_REG_SVFA_LEN]; 274 275 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 276 fid, vid); 277 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 278 } 279 280 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 281 u16 vid, bool learn_enable) 282 { 283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 284 char *spvmlr_pl; 285 int err; 286 287 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 288 if (!spvmlr_pl) 289 return -ENOMEM; 290 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 291 learn_enable); 292 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 293 kfree(spvmlr_pl); 294 return err; 295 } 296 297 static int 298 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 299 { 300 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 301 char sspr_pl[MLXSW_REG_SSPR_LEN]; 302 303 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 304 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 305 } 306 307 static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port, 308 bool *p_usable) 309 { 310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 311 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 312 int err; 313 314 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 315 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 316 if (err) 317 return err; 318 *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; 319 return 0; 320 } 321 322 static int mlxsw_sp_port_open(struct net_device *dev) 323 { 324 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 325 int err; 326 327 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 328 if (err) 329 return err; 330 netif_start_queue(dev); 331 return 0; 332 } 333 334 static int mlxsw_sp_port_stop(struct net_device *dev) 335 { 336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 337 338 netif_stop_queue(dev); 339 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 340 } 341 342 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 343 struct net_device *dev) 344 { 345 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 347 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 348 const struct mlxsw_tx_info tx_info = { 349 .local_port = mlxsw_sp_port->local_port, 350 .is_emad = false, 351 }; 352 u64 len; 353 int err; 354 355 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) 356 return NETDEV_TX_BUSY; 357 358 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 359 struct sk_buff *skb_orig = skb; 360 361 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 362 if (!skb) { 363 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 364 dev_kfree_skb_any(skb_orig); 365 return NETDEV_TX_OK; 366 } 367 } 368 369 if (eth_skb_pad(skb)) { 370 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 371 return NETDEV_TX_OK; 372 } 373 374 mlxsw_sp_txhdr_construct(skb, &tx_info); 375 len = skb->len; 376 /* Due to a race we might fail here because of a full queue. In that 377 * unlikely case we simply drop the packet. 378 */ 379 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); 380 381 if (!err) { 382 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 383 u64_stats_update_begin(&pcpu_stats->syncp); 384 pcpu_stats->tx_packets++; 385 pcpu_stats->tx_bytes += len; 386 u64_stats_update_end(&pcpu_stats->syncp); 387 } else { 388 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 389 dev_kfree_skb_any(skb); 390 } 391 return NETDEV_TX_OK; 392 } 393 394 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 395 { 396 } 397 398 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 399 { 400 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 401 struct sockaddr *addr = p; 402 int err; 403 404 if (!is_valid_ether_addr(addr->sa_data)) 405 return -EADDRNOTAVAIL; 406 407 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 408 if (err) 409 return err; 410 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 411 return 0; 412 } 413 414 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 415 { 416 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 417 int err; 418 419 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 420 if (err) 421 return err; 422 dev->mtu = mtu; 423 return 0; 424 } 425 426 static struct rtnl_link_stats64 * 427 mlxsw_sp_port_get_stats64(struct net_device *dev, 428 struct rtnl_link_stats64 *stats) 429 { 430 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 431 struct mlxsw_sp_port_pcpu_stats *p; 432 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 433 u32 tx_dropped = 0; 434 unsigned int start; 435 int i; 436 437 for_each_possible_cpu(i) { 438 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 439 do { 440 start = u64_stats_fetch_begin_irq(&p->syncp); 441 rx_packets = p->rx_packets; 442 rx_bytes = p->rx_bytes; 443 tx_packets = p->tx_packets; 444 tx_bytes = p->tx_bytes; 445 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 446 447 stats->rx_packets += rx_packets; 448 stats->rx_bytes += rx_bytes; 449 stats->tx_packets += tx_packets; 450 stats->tx_bytes += tx_bytes; 451 /* tx_dropped is u32, updated without syncp protection. */ 452 tx_dropped += p->tx_dropped; 453 } 454 stats->tx_dropped = tx_dropped; 455 return stats; 456 } 457 458 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 459 u16 vid_end, bool is_member, bool untagged) 460 { 461 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 462 char *spvm_pl; 463 int err; 464 465 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 466 if (!spvm_pl) 467 return -ENOMEM; 468 469 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 470 vid_end, is_member, untagged); 471 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 472 kfree(spvm_pl); 473 return err; 474 } 475 476 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 477 { 478 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 479 u16 vid, last_visited_vid; 480 int err; 481 482 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 483 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 484 vid); 485 if (err) { 486 last_visited_vid = vid; 487 goto err_port_vid_to_fid_set; 488 } 489 } 490 491 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 492 if (err) { 493 last_visited_vid = VLAN_N_VID; 494 goto err_port_vid_to_fid_set; 495 } 496 497 return 0; 498 499 err_port_vid_to_fid_set: 500 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 501 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 502 vid); 503 return err; 504 } 505 506 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 507 { 508 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 509 u16 vid; 510 int err; 511 512 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 513 if (err) 514 return err; 515 516 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 517 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 518 vid, vid); 519 if (err) 520 return err; 521 } 522 523 return 0; 524 } 525 526 static struct mlxsw_sp_vfid * 527 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) 528 { 529 struct mlxsw_sp_vfid *vfid; 530 531 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { 532 if (vfid->vid == vid) 533 return vfid; 534 } 535 536 return NULL; 537 } 538 539 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 540 { 541 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, 542 MLXSW_SP_VFID_PORT_MAX); 543 } 544 545 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) 546 { 547 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 548 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 549 550 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); 551 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 552 } 553 554 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) 555 { 556 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 557 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 558 559 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); 560 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 561 } 562 563 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 564 u16 vid) 565 { 566 struct device *dev = mlxsw_sp->bus_info->dev; 567 struct mlxsw_sp_vfid *vfid; 568 u16 n_vfid; 569 int err; 570 571 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 572 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { 573 dev_err(dev, "No available vFIDs\n"); 574 return ERR_PTR(-ERANGE); 575 } 576 577 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 578 if (err) { 579 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 580 return ERR_PTR(err); 581 } 582 583 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 584 if (!vfid) 585 goto err_allocate_vfid; 586 587 vfid->vfid = n_vfid; 588 vfid->vid = vid; 589 590 list_add(&vfid->list, &mlxsw_sp->port_vfids.list); 591 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); 592 593 return vfid; 594 595 err_allocate_vfid: 596 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 597 return ERR_PTR(-ENOMEM); 598 } 599 600 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 601 struct mlxsw_sp_vfid *vfid) 602 { 603 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); 604 list_del(&vfid->list); 605 606 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 607 608 kfree(vfid); 609 } 610 611 static struct mlxsw_sp_port * 612 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, 613 struct mlxsw_sp_vfid *vfid) 614 { 615 struct mlxsw_sp_port *mlxsw_sp_vport; 616 617 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 618 if (!mlxsw_sp_vport) 619 return NULL; 620 621 /* dev will be set correctly after the VLAN device is linked 622 * with the real device. In case of bridge SELF invocation, dev 623 * will remain as is. 624 */ 625 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 626 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 627 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 628 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 629 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 630 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 631 mlxsw_sp_vport->vport.vfid = vfid; 632 mlxsw_sp_vport->vport.vid = vfid->vid; 633 634 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 635 636 return mlxsw_sp_vport; 637 } 638 639 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 640 { 641 list_del(&mlxsw_sp_vport->vport.list); 642 kfree(mlxsw_sp_vport); 643 } 644 645 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 646 u16 vid) 647 { 648 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 649 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 650 struct mlxsw_sp_port *mlxsw_sp_vport; 651 struct mlxsw_sp_vfid *vfid; 652 int err; 653 654 /* VLAN 0 is added to HW filter when device goes up, but it is 655 * reserved in our case, so simply return. 656 */ 657 if (!vid) 658 return 0; 659 660 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { 661 netdev_warn(dev, "VID=%d already configured\n", vid); 662 return 0; 663 } 664 665 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 666 if (!vfid) { 667 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); 668 if (IS_ERR(vfid)) { 669 netdev_err(dev, "Failed to create vFID for VID=%d\n", 670 vid); 671 return PTR_ERR(vfid); 672 } 673 } 674 675 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); 676 if (!mlxsw_sp_vport) { 677 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); 678 err = -ENOMEM; 679 goto err_port_vport_create; 680 } 681 682 if (!vfid->nr_vports) { 683 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, 684 true, false); 685 if (err) { 686 netdev_err(dev, "Failed to setup flooding for vFID=%d\n", 687 vfid->vfid); 688 goto err_vport_flood_set; 689 } 690 } 691 692 /* When adding the first VLAN interface on a bridged port we need to 693 * transition all the active 802.1Q bridge VLANs to use explicit 694 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 695 */ 696 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 697 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 698 if (err) { 699 netdev_err(dev, "Failed to set to Virtual mode\n"); 700 goto err_port_vp_mode_trans; 701 } 702 } 703 704 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 705 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 706 true, 707 mlxsw_sp_vfid_to_fid(vfid->vfid), 708 vid); 709 if (err) { 710 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", 711 vid, vfid->vfid); 712 goto err_port_vid_to_fid_set; 713 } 714 715 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 716 if (err) { 717 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); 718 goto err_port_vid_learning_set; 719 } 720 721 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); 722 if (err) { 723 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 724 vid); 725 goto err_port_add_vid; 726 } 727 728 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 729 MLXSW_REG_SPMS_STATE_FORWARDING); 730 if (err) { 731 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 732 goto err_port_stp_state_set; 733 } 734 735 vfid->nr_vports++; 736 737 return 0; 738 739 err_port_stp_state_set: 740 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 741 err_port_add_vid: 742 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 743 err_port_vid_learning_set: 744 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 745 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 746 mlxsw_sp_vfid_to_fid(vfid->vfid), vid); 747 err_port_vid_to_fid_set: 748 if (list_is_singular(&mlxsw_sp_port->vports_list)) 749 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 750 err_port_vp_mode_trans: 751 if (!vfid->nr_vports) 752 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, 753 false); 754 err_vport_flood_set: 755 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 756 err_port_vport_create: 757 if (!vfid->nr_vports) 758 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); 759 return err; 760 } 761 762 int mlxsw_sp_port_kill_vid(struct net_device *dev, 763 __be16 __always_unused proto, u16 vid) 764 { 765 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 766 struct mlxsw_sp_port *mlxsw_sp_vport; 767 struct mlxsw_sp_vfid *vfid; 768 int err; 769 770 /* VLAN 0 is removed from HW filter when device goes down, but 771 * it is reserved in our case, so simply return. 772 */ 773 if (!vid) 774 return 0; 775 776 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 777 if (!mlxsw_sp_vport) { 778 netdev_warn(dev, "VID=%d does not exist\n", vid); 779 return 0; 780 } 781 782 vfid = mlxsw_sp_vport->vport.vfid; 783 784 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 785 MLXSW_REG_SPMS_STATE_DISCARDING); 786 if (err) { 787 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 788 return err; 789 } 790 791 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 792 if (err) { 793 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 794 vid); 795 return err; 796 } 797 798 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 799 if (err) { 800 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); 801 return err; 802 } 803 804 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 805 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 806 false, 807 mlxsw_sp_vfid_to_fid(vfid->vfid), 808 vid); 809 if (err) { 810 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", 811 vid, vfid->vfid); 812 return err; 813 } 814 815 /* When removing the last VLAN interface on a bridged port we need to 816 * transition all active 802.1Q bridge VLANs to use VID to FID 817 * mappings and set port's mode to VLAN mode. 818 */ 819 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 820 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 821 if (err) { 822 netdev_err(dev, "Failed to set to VLAN mode\n"); 823 return err; 824 } 825 } 826 827 vfid->nr_vports--; 828 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 829 830 /* Destroy the vFID if no vPorts are assigned to it anymore. */ 831 if (!vfid->nr_vports) 832 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); 833 834 return 0; 835 } 836 837 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 838 .ndo_open = mlxsw_sp_port_open, 839 .ndo_stop = mlxsw_sp_port_stop, 840 .ndo_start_xmit = mlxsw_sp_port_xmit, 841 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 842 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 843 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 844 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 845 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 846 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 847 .ndo_fdb_add = switchdev_port_fdb_add, 848 .ndo_fdb_del = switchdev_port_fdb_del, 849 .ndo_fdb_dump = switchdev_port_fdb_dump, 850 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 851 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 852 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 853 }; 854 855 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 856 struct ethtool_drvinfo *drvinfo) 857 { 858 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 859 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 860 861 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 862 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 863 sizeof(drvinfo->version)); 864 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 865 "%d.%d.%d", 866 mlxsw_sp->bus_info->fw_rev.major, 867 mlxsw_sp->bus_info->fw_rev.minor, 868 mlxsw_sp->bus_info->fw_rev.subminor); 869 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 870 sizeof(drvinfo->bus_info)); 871 } 872 873 struct mlxsw_sp_port_hw_stats { 874 char str[ETH_GSTRING_LEN]; 875 u64 (*getter)(char *payload); 876 }; 877 878 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 879 { 880 .str = "a_frames_transmitted_ok", 881 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 882 }, 883 { 884 .str = "a_frames_received_ok", 885 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 886 }, 887 { 888 .str = "a_frame_check_sequence_errors", 889 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 890 }, 891 { 892 .str = "a_alignment_errors", 893 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 894 }, 895 { 896 .str = "a_octets_transmitted_ok", 897 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 898 }, 899 { 900 .str = "a_octets_received_ok", 901 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 902 }, 903 { 904 .str = "a_multicast_frames_xmitted_ok", 905 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 906 }, 907 { 908 .str = "a_broadcast_frames_xmitted_ok", 909 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 910 }, 911 { 912 .str = "a_multicast_frames_received_ok", 913 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 914 }, 915 { 916 .str = "a_broadcast_frames_received_ok", 917 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 918 }, 919 { 920 .str = "a_in_range_length_errors", 921 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 922 }, 923 { 924 .str = "a_out_of_range_length_field", 925 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 926 }, 927 { 928 .str = "a_frame_too_long_errors", 929 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 930 }, 931 { 932 .str = "a_symbol_error_during_carrier", 933 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 934 }, 935 { 936 .str = "a_mac_control_frames_transmitted", 937 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 938 }, 939 { 940 .str = "a_mac_control_frames_received", 941 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 942 }, 943 { 944 .str = "a_unsupported_opcodes_received", 945 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 946 }, 947 { 948 .str = "a_pause_mac_ctrl_frames_received", 949 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 950 }, 951 { 952 .str = "a_pause_mac_ctrl_frames_xmitted", 953 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 954 }, 955 }; 956 957 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 958 959 static void mlxsw_sp_port_get_strings(struct net_device *dev, 960 u32 stringset, u8 *data) 961 { 962 u8 *p = data; 963 int i; 964 965 switch (stringset) { 966 case ETH_SS_STATS: 967 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 968 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 969 ETH_GSTRING_LEN); 970 p += ETH_GSTRING_LEN; 971 } 972 break; 973 } 974 } 975 976 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 977 enum ethtool_phys_id_state state) 978 { 979 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 980 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 981 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 982 bool active; 983 984 switch (state) { 985 case ETHTOOL_ID_ACTIVE: 986 active = true; 987 break; 988 case ETHTOOL_ID_INACTIVE: 989 active = false; 990 break; 991 default: 992 return -EOPNOTSUPP; 993 } 994 995 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 996 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 997 } 998 999 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1000 struct ethtool_stats *stats, u64 *data) 1001 { 1002 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1003 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1004 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1005 int i; 1006 int err; 1007 1008 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); 1009 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1010 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) 1011 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; 1012 } 1013 1014 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1015 { 1016 switch (sset) { 1017 case ETH_SS_STATS: 1018 return MLXSW_SP_PORT_HW_STATS_LEN; 1019 default: 1020 return -EOPNOTSUPP; 1021 } 1022 } 1023 1024 struct mlxsw_sp_port_link_mode { 1025 u32 mask; 1026 u32 supported; 1027 u32 advertised; 1028 u32 speed; 1029 }; 1030 1031 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1032 { 1033 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1034 .supported = SUPPORTED_100baseT_Full, 1035 .advertised = ADVERTISED_100baseT_Full, 1036 .speed = 100, 1037 }, 1038 { 1039 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, 1040 .speed = 100, 1041 }, 1042 { 1043 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1044 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1045 .supported = SUPPORTED_1000baseKX_Full, 1046 .advertised = ADVERTISED_1000baseKX_Full, 1047 .speed = 1000, 1048 }, 1049 { 1050 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1051 .supported = SUPPORTED_10000baseT_Full, 1052 .advertised = ADVERTISED_10000baseT_Full, 1053 .speed = 10000, 1054 }, 1055 { 1056 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1057 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1058 .supported = SUPPORTED_10000baseKX4_Full, 1059 .advertised = ADVERTISED_10000baseKX4_Full, 1060 .speed = 10000, 1061 }, 1062 { 1063 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1064 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1065 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1066 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1067 .supported = SUPPORTED_10000baseKR_Full, 1068 .advertised = ADVERTISED_10000baseKR_Full, 1069 .speed = 10000, 1070 }, 1071 { 1072 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1073 .supported = SUPPORTED_20000baseKR2_Full, 1074 .advertised = ADVERTISED_20000baseKR2_Full, 1075 .speed = 20000, 1076 }, 1077 { 1078 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1079 .supported = SUPPORTED_40000baseCR4_Full, 1080 .advertised = ADVERTISED_40000baseCR4_Full, 1081 .speed = 40000, 1082 }, 1083 { 1084 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1085 .supported = SUPPORTED_40000baseKR4_Full, 1086 .advertised = ADVERTISED_40000baseKR4_Full, 1087 .speed = 40000, 1088 }, 1089 { 1090 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1091 .supported = SUPPORTED_40000baseSR4_Full, 1092 .advertised = ADVERTISED_40000baseSR4_Full, 1093 .speed = 40000, 1094 }, 1095 { 1096 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1097 .supported = SUPPORTED_40000baseLR4_Full, 1098 .advertised = ADVERTISED_40000baseLR4_Full, 1099 .speed = 40000, 1100 }, 1101 { 1102 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 1103 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 1104 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1105 .speed = 25000, 1106 }, 1107 { 1108 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 1109 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 1110 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1111 .speed = 50000, 1112 }, 1113 { 1114 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1115 .supported = SUPPORTED_56000baseKR4_Full, 1116 .advertised = ADVERTISED_56000baseKR4_Full, 1117 .speed = 56000, 1118 }, 1119 { 1120 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 1121 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1122 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1123 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 1124 .speed = 100000, 1125 }, 1126 }; 1127 1128 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 1129 1130 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) 1131 { 1132 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1133 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1134 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1135 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1136 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1137 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1138 return SUPPORTED_FIBRE; 1139 1140 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1141 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1142 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1143 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1144 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1145 return SUPPORTED_Backplane; 1146 return 0; 1147 } 1148 1149 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) 1150 { 1151 u32 modes = 0; 1152 int i; 1153 1154 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1155 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1156 modes |= mlxsw_sp_port_link_mode[i].supported; 1157 } 1158 return modes; 1159 } 1160 1161 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) 1162 { 1163 u32 modes = 0; 1164 int i; 1165 1166 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1167 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1168 modes |= mlxsw_sp_port_link_mode[i].advertised; 1169 } 1170 return modes; 1171 } 1172 1173 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1174 struct ethtool_cmd *cmd) 1175 { 1176 u32 speed = SPEED_UNKNOWN; 1177 u8 duplex = DUPLEX_UNKNOWN; 1178 int i; 1179 1180 if (!carrier_ok) 1181 goto out; 1182 1183 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1184 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1185 speed = mlxsw_sp_port_link_mode[i].speed; 1186 duplex = DUPLEX_FULL; 1187 break; 1188 } 1189 } 1190 out: 1191 ethtool_cmd_speed_set(cmd, speed); 1192 cmd->duplex = duplex; 1193 } 1194 1195 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1196 { 1197 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1198 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1199 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1200 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1201 return PORT_FIBRE; 1202 1203 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1204 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1205 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1206 return PORT_DA; 1207 1208 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1209 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1210 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1211 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1212 return PORT_NONE; 1213 1214 return PORT_OTHER; 1215 } 1216 1217 static int mlxsw_sp_port_get_settings(struct net_device *dev, 1218 struct ethtool_cmd *cmd) 1219 { 1220 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1221 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1222 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1223 u32 eth_proto_cap; 1224 u32 eth_proto_admin; 1225 u32 eth_proto_oper; 1226 int err; 1227 1228 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1229 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1230 if (err) { 1231 netdev_err(dev, "Failed to get proto"); 1232 return err; 1233 } 1234 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, 1235 ð_proto_admin, ð_proto_oper); 1236 1237 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1238 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1239 SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1240 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1241 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1242 eth_proto_oper, cmd); 1243 1244 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 1245 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); 1246 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); 1247 1248 cmd->transceiver = XCVR_INTERNAL; 1249 return 0; 1250 } 1251 1252 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) 1253 { 1254 u32 ptys_proto = 0; 1255 int i; 1256 1257 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1258 if (advertising & mlxsw_sp_port_link_mode[i].advertised) 1259 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1260 } 1261 return ptys_proto; 1262 } 1263 1264 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1265 { 1266 u32 ptys_proto = 0; 1267 int i; 1268 1269 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1270 if (speed == mlxsw_sp_port_link_mode[i].speed) 1271 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1272 } 1273 return ptys_proto; 1274 } 1275 1276 static int mlxsw_sp_port_set_settings(struct net_device *dev, 1277 struct ethtool_cmd *cmd) 1278 { 1279 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1280 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1281 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1282 u32 speed; 1283 u32 eth_proto_new; 1284 u32 eth_proto_cap; 1285 u32 eth_proto_admin; 1286 bool is_up; 1287 int err; 1288 1289 speed = ethtool_cmd_speed(cmd); 1290 1291 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? 1292 mlxsw_sp_to_ptys_advert_link(cmd->advertising) : 1293 mlxsw_sp_to_ptys_speed(speed); 1294 1295 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1296 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1297 if (err) { 1298 netdev_err(dev, "Failed to get proto"); 1299 return err; 1300 } 1301 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); 1302 1303 eth_proto_new = eth_proto_new & eth_proto_cap; 1304 if (!eth_proto_new) { 1305 netdev_err(dev, "Not supported proto admin requested"); 1306 return -EINVAL; 1307 } 1308 if (eth_proto_new == eth_proto_admin) 1309 return 0; 1310 1311 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 1312 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1313 if (err) { 1314 netdev_err(dev, "Failed to set proto admin"); 1315 return err; 1316 } 1317 1318 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); 1319 if (err) { 1320 netdev_err(dev, "Failed to get oper status"); 1321 return err; 1322 } 1323 if (!is_up) 1324 return 0; 1325 1326 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1327 if (err) { 1328 netdev_err(dev, "Failed to set admin status"); 1329 return err; 1330 } 1331 1332 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1333 if (err) { 1334 netdev_err(dev, "Failed to set admin status"); 1335 return err; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1342 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1343 .get_link = ethtool_op_get_link, 1344 .get_strings = mlxsw_sp_port_get_strings, 1345 .set_phys_id = mlxsw_sp_port_set_phys_id, 1346 .get_ethtool_stats = mlxsw_sp_port_get_stats, 1347 .get_sset_count = mlxsw_sp_port_get_sset_count, 1348 .get_settings = mlxsw_sp_port_get_settings, 1349 .set_settings = mlxsw_sp_port_set_settings, 1350 }; 1351 1352 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1353 { 1354 struct mlxsw_sp_port *mlxsw_sp_port; 1355 struct net_device *dev; 1356 bool usable; 1357 size_t bytes; 1358 int err; 1359 1360 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1361 if (!dev) 1362 return -ENOMEM; 1363 mlxsw_sp_port = netdev_priv(dev); 1364 mlxsw_sp_port->dev = dev; 1365 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1366 mlxsw_sp_port->local_port = local_port; 1367 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 1368 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 1369 if (!mlxsw_sp_port->active_vlans) { 1370 err = -ENOMEM; 1371 goto err_port_active_vlans_alloc; 1372 } 1373 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 1374 if (!mlxsw_sp_port->untagged_vlans) { 1375 err = -ENOMEM; 1376 goto err_port_untagged_vlans_alloc; 1377 } 1378 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 1379 1380 mlxsw_sp_port->pcpu_stats = 1381 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1382 if (!mlxsw_sp_port->pcpu_stats) { 1383 err = -ENOMEM; 1384 goto err_alloc_stats; 1385 } 1386 1387 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1388 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1389 1390 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1391 if (err) { 1392 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1393 mlxsw_sp_port->local_port); 1394 goto err_dev_addr_init; 1395 } 1396 1397 netif_carrier_off(dev); 1398 1399 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1400 NETIF_F_HW_VLAN_CTAG_FILTER; 1401 1402 /* Each packet needs to have a Tx header (metadata) on top all other 1403 * headers. 1404 */ 1405 dev->hard_header_len += MLXSW_TXHDR_LEN; 1406 1407 err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable); 1408 if (err) { 1409 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n", 1410 mlxsw_sp_port->local_port); 1411 goto err_port_module_check; 1412 } 1413 1414 if (!usable) { 1415 dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n", 1416 mlxsw_sp_port->local_port); 1417 goto port_not_usable; 1418 } 1419 1420 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1421 if (err) { 1422 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1423 mlxsw_sp_port->local_port); 1424 goto err_port_system_port_mapping_set; 1425 } 1426 1427 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1428 if (err) { 1429 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1430 mlxsw_sp_port->local_port); 1431 goto err_port_swid_set; 1432 } 1433 1434 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1435 if (err) { 1436 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1437 mlxsw_sp_port->local_port); 1438 goto err_port_mtu_set; 1439 } 1440 1441 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1442 if (err) 1443 goto err_port_admin_status_set; 1444 1445 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1446 if (err) { 1447 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1448 mlxsw_sp_port->local_port); 1449 goto err_port_buffers_init; 1450 } 1451 1452 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 1453 err = register_netdev(dev); 1454 if (err) { 1455 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1456 mlxsw_sp_port->local_port); 1457 goto err_register_netdev; 1458 } 1459 1460 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); 1461 if (err) 1462 goto err_port_vlan_init; 1463 1464 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1465 return 0; 1466 1467 err_port_vlan_init: 1468 unregister_netdev(dev); 1469 err_register_netdev: 1470 err_port_buffers_init: 1471 err_port_admin_status_set: 1472 err_port_mtu_set: 1473 err_port_swid_set: 1474 err_port_system_port_mapping_set: 1475 port_not_usable: 1476 err_port_module_check: 1477 err_dev_addr_init: 1478 free_percpu(mlxsw_sp_port->pcpu_stats); 1479 err_alloc_stats: 1480 kfree(mlxsw_sp_port->untagged_vlans); 1481 err_port_untagged_vlans_alloc: 1482 kfree(mlxsw_sp_port->active_vlans); 1483 err_port_active_vlans_alloc: 1484 free_netdev(dev); 1485 return err; 1486 } 1487 1488 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1489 { 1490 struct net_device *dev = mlxsw_sp_port->dev; 1491 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; 1492 1493 list_for_each_entry_safe(mlxsw_sp_vport, tmp, 1494 &mlxsw_sp_port->vports_list, vport.list) { 1495 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1496 1497 /* vPorts created for VLAN devices should already be gone 1498 * by now, since we unregistered the port netdev. 1499 */ 1500 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); 1501 mlxsw_sp_port_kill_vid(dev, 0, vid); 1502 } 1503 } 1504 1505 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1506 { 1507 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1508 1509 if (!mlxsw_sp_port) 1510 return; 1511 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1512 mlxsw_sp_port_vports_fini(mlxsw_sp_port); 1513 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 1514 free_percpu(mlxsw_sp_port->pcpu_stats); 1515 kfree(mlxsw_sp_port->untagged_vlans); 1516 kfree(mlxsw_sp_port->active_vlans); 1517 free_netdev(mlxsw_sp_port->dev); 1518 } 1519 1520 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1521 { 1522 int i; 1523 1524 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 1525 mlxsw_sp_port_remove(mlxsw_sp, i); 1526 kfree(mlxsw_sp->ports); 1527 } 1528 1529 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1530 { 1531 size_t alloc_size; 1532 int i; 1533 int err; 1534 1535 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 1536 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1537 if (!mlxsw_sp->ports) 1538 return -ENOMEM; 1539 1540 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 1541 err = mlxsw_sp_port_create(mlxsw_sp, i); 1542 if (err) 1543 goto err_port_create; 1544 } 1545 return 0; 1546 1547 err_port_create: 1548 for (i--; i >= 1; i--) 1549 mlxsw_sp_port_remove(mlxsw_sp, i); 1550 kfree(mlxsw_sp->ports); 1551 return err; 1552 } 1553 1554 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 1555 char *pude_pl, void *priv) 1556 { 1557 struct mlxsw_sp *mlxsw_sp = priv; 1558 struct mlxsw_sp_port *mlxsw_sp_port; 1559 enum mlxsw_reg_pude_oper_status status; 1560 u8 local_port; 1561 1562 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 1563 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1564 if (!mlxsw_sp_port) { 1565 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", 1566 local_port); 1567 return; 1568 } 1569 1570 status = mlxsw_reg_pude_oper_status_get(pude_pl); 1571 if (status == MLXSW_PORT_OPER_STATUS_UP) { 1572 netdev_info(mlxsw_sp_port->dev, "link up\n"); 1573 netif_carrier_on(mlxsw_sp_port->dev); 1574 } else { 1575 netdev_info(mlxsw_sp_port->dev, "link down\n"); 1576 netif_carrier_off(mlxsw_sp_port->dev); 1577 } 1578 } 1579 1580 static struct mlxsw_event_listener mlxsw_sp_pude_event = { 1581 .func = mlxsw_sp_pude_event_func, 1582 .trap_id = MLXSW_TRAP_ID_PUDE, 1583 }; 1584 1585 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 1586 enum mlxsw_event_trap_id trap_id) 1587 { 1588 struct mlxsw_event_listener *el; 1589 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1590 int err; 1591 1592 switch (trap_id) { 1593 case MLXSW_TRAP_ID_PUDE: 1594 el = &mlxsw_sp_pude_event; 1595 break; 1596 } 1597 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 1598 if (err) 1599 return err; 1600 1601 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 1602 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1603 if (err) 1604 goto err_event_trap_set; 1605 1606 return 0; 1607 1608 err_event_trap_set: 1609 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1610 return err; 1611 } 1612 1613 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 1614 enum mlxsw_event_trap_id trap_id) 1615 { 1616 struct mlxsw_event_listener *el; 1617 1618 switch (trap_id) { 1619 case MLXSW_TRAP_ID_PUDE: 1620 el = &mlxsw_sp_pude_event; 1621 break; 1622 } 1623 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1624 } 1625 1626 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 1627 void *priv) 1628 { 1629 struct mlxsw_sp *mlxsw_sp = priv; 1630 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1631 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1632 1633 if (unlikely(!mlxsw_sp_port)) { 1634 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 1635 local_port); 1636 return; 1637 } 1638 1639 skb->dev = mlxsw_sp_port->dev; 1640 1641 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1642 u64_stats_update_begin(&pcpu_stats->syncp); 1643 pcpu_stats->rx_packets++; 1644 pcpu_stats->rx_bytes += skb->len; 1645 u64_stats_update_end(&pcpu_stats->syncp); 1646 1647 skb->protocol = eth_type_trans(skb, skb->dev); 1648 netif_receive_skb(skb); 1649 } 1650 1651 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 1652 { 1653 .func = mlxsw_sp_rx_listener_func, 1654 .local_port = MLXSW_PORT_DONT_CARE, 1655 .trap_id = MLXSW_TRAP_ID_FDB_MC, 1656 }, 1657 /* Traps for specific L2 packet types, not trapped as FDB MC */ 1658 { 1659 .func = mlxsw_sp_rx_listener_func, 1660 .local_port = MLXSW_PORT_DONT_CARE, 1661 .trap_id = MLXSW_TRAP_ID_STP, 1662 }, 1663 { 1664 .func = mlxsw_sp_rx_listener_func, 1665 .local_port = MLXSW_PORT_DONT_CARE, 1666 .trap_id = MLXSW_TRAP_ID_LACP, 1667 }, 1668 { 1669 .func = mlxsw_sp_rx_listener_func, 1670 .local_port = MLXSW_PORT_DONT_CARE, 1671 .trap_id = MLXSW_TRAP_ID_EAPOL, 1672 }, 1673 { 1674 .func = mlxsw_sp_rx_listener_func, 1675 .local_port = MLXSW_PORT_DONT_CARE, 1676 .trap_id = MLXSW_TRAP_ID_LLDP, 1677 }, 1678 { 1679 .func = mlxsw_sp_rx_listener_func, 1680 .local_port = MLXSW_PORT_DONT_CARE, 1681 .trap_id = MLXSW_TRAP_ID_MMRP, 1682 }, 1683 { 1684 .func = mlxsw_sp_rx_listener_func, 1685 .local_port = MLXSW_PORT_DONT_CARE, 1686 .trap_id = MLXSW_TRAP_ID_MVRP, 1687 }, 1688 { 1689 .func = mlxsw_sp_rx_listener_func, 1690 .local_port = MLXSW_PORT_DONT_CARE, 1691 .trap_id = MLXSW_TRAP_ID_RPVST, 1692 }, 1693 { 1694 .func = mlxsw_sp_rx_listener_func, 1695 .local_port = MLXSW_PORT_DONT_CARE, 1696 .trap_id = MLXSW_TRAP_ID_DHCP, 1697 }, 1698 { 1699 .func = mlxsw_sp_rx_listener_func, 1700 .local_port = MLXSW_PORT_DONT_CARE, 1701 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, 1702 }, 1703 { 1704 .func = mlxsw_sp_rx_listener_func, 1705 .local_port = MLXSW_PORT_DONT_CARE, 1706 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, 1707 }, 1708 { 1709 .func = mlxsw_sp_rx_listener_func, 1710 .local_port = MLXSW_PORT_DONT_CARE, 1711 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, 1712 }, 1713 { 1714 .func = mlxsw_sp_rx_listener_func, 1715 .local_port = MLXSW_PORT_DONT_CARE, 1716 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, 1717 }, 1718 { 1719 .func = mlxsw_sp_rx_listener_func, 1720 .local_port = MLXSW_PORT_DONT_CARE, 1721 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 1722 }, 1723 }; 1724 1725 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 1726 { 1727 char htgt_pl[MLXSW_REG_HTGT_LEN]; 1728 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1729 int i; 1730 int err; 1731 1732 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 1733 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1734 if (err) 1735 return err; 1736 1737 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 1738 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1739 if (err) 1740 return err; 1741 1742 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 1743 err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 1744 &mlxsw_sp_rx_listener[i], 1745 mlxsw_sp); 1746 if (err) 1747 goto err_rx_listener_register; 1748 1749 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 1750 mlxsw_sp_rx_listener[i].trap_id); 1751 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1752 if (err) 1753 goto err_rx_trap_set; 1754 } 1755 return 0; 1756 1757 err_rx_trap_set: 1758 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1759 &mlxsw_sp_rx_listener[i], 1760 mlxsw_sp); 1761 err_rx_listener_register: 1762 for (i--; i >= 0; i--) { 1763 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 1764 mlxsw_sp_rx_listener[i].trap_id); 1765 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1766 1767 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1768 &mlxsw_sp_rx_listener[i], 1769 mlxsw_sp); 1770 } 1771 return err; 1772 } 1773 1774 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 1775 { 1776 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1777 int i; 1778 1779 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 1780 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 1781 mlxsw_sp_rx_listener[i].trap_id); 1782 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1783 1784 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1785 &mlxsw_sp_rx_listener[i], 1786 mlxsw_sp); 1787 } 1788 } 1789 1790 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 1791 enum mlxsw_reg_sfgc_type type, 1792 enum mlxsw_reg_sfgc_bridge_type bridge_type) 1793 { 1794 enum mlxsw_flood_table_type table_type; 1795 enum mlxsw_sp_flood_table flood_table; 1796 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 1797 1798 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 1799 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 1800 else 1801 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 1802 1803 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 1804 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 1805 else 1806 flood_table = MLXSW_SP_FLOOD_TABLE_BM; 1807 1808 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 1809 flood_table); 1810 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 1811 } 1812 1813 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 1814 { 1815 int type, err; 1816 1817 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 1818 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 1819 continue; 1820 1821 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 1822 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 1823 if (err) 1824 return err; 1825 1826 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 1827 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 1828 if (err) 1829 return err; 1830 } 1831 1832 return 0; 1833 } 1834 1835 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 1836 { 1837 char slcr_pl[MLXSW_REG_SLCR_LEN]; 1838 1839 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 1840 MLXSW_REG_SLCR_LAG_HASH_DMAC | 1841 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 1842 MLXSW_REG_SLCR_LAG_HASH_VLANID | 1843 MLXSW_REG_SLCR_LAG_HASH_SIP | 1844 MLXSW_REG_SLCR_LAG_HASH_DIP | 1845 MLXSW_REG_SLCR_LAG_HASH_SPORT | 1846 MLXSW_REG_SLCR_LAG_HASH_DPORT | 1847 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 1848 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 1849 } 1850 1851 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, 1852 const struct mlxsw_bus_info *mlxsw_bus_info) 1853 { 1854 struct mlxsw_sp *mlxsw_sp = priv; 1855 int err; 1856 1857 mlxsw_sp->core = mlxsw_core; 1858 mlxsw_sp->bus_info = mlxsw_bus_info; 1859 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); 1860 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); 1861 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 1862 1863 err = mlxsw_sp_base_mac_get(mlxsw_sp); 1864 if (err) { 1865 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 1866 return err; 1867 } 1868 1869 err = mlxsw_sp_ports_create(mlxsw_sp); 1870 if (err) { 1871 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 1872 return err; 1873 } 1874 1875 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1876 if (err) { 1877 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 1878 goto err_event_register; 1879 } 1880 1881 err = mlxsw_sp_traps_init(mlxsw_sp); 1882 if (err) { 1883 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 1884 goto err_rx_listener_register; 1885 } 1886 1887 err = mlxsw_sp_flood_init(mlxsw_sp); 1888 if (err) { 1889 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 1890 goto err_flood_init; 1891 } 1892 1893 err = mlxsw_sp_buffers_init(mlxsw_sp); 1894 if (err) { 1895 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 1896 goto err_buffers_init; 1897 } 1898 1899 err = mlxsw_sp_lag_init(mlxsw_sp); 1900 if (err) { 1901 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 1902 goto err_lag_init; 1903 } 1904 1905 err = mlxsw_sp_switchdev_init(mlxsw_sp); 1906 if (err) { 1907 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 1908 goto err_switchdev_init; 1909 } 1910 1911 return 0; 1912 1913 err_switchdev_init: 1914 err_lag_init: 1915 err_buffers_init: 1916 err_flood_init: 1917 mlxsw_sp_traps_fini(mlxsw_sp); 1918 err_rx_listener_register: 1919 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1920 err_event_register: 1921 mlxsw_sp_ports_remove(mlxsw_sp); 1922 return err; 1923 } 1924 1925 static void mlxsw_sp_fini(void *priv) 1926 { 1927 struct mlxsw_sp *mlxsw_sp = priv; 1928 1929 mlxsw_sp_switchdev_fini(mlxsw_sp); 1930 mlxsw_sp_traps_fini(mlxsw_sp); 1931 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1932 mlxsw_sp_ports_remove(mlxsw_sp); 1933 } 1934 1935 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 1936 .used_max_vepa_channels = 1, 1937 .max_vepa_channels = 0, 1938 .used_max_lag = 1, 1939 .max_lag = MLXSW_SP_LAG_MAX, 1940 .used_max_port_per_lag = 1, 1941 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, 1942 .used_max_mid = 1, 1943 .max_mid = MLXSW_SP_MID_MAX, 1944 .used_max_pgt = 1, 1945 .max_pgt = 0, 1946 .used_max_system_port = 1, 1947 .max_system_port = 64, 1948 .used_max_vlan_groups = 1, 1949 .max_vlan_groups = 127, 1950 .used_max_regions = 1, 1951 .max_regions = 400, 1952 .used_flood_tables = 1, 1953 .used_flood_mode = 1, 1954 .flood_mode = 3, 1955 .max_fid_offset_flood_tables = 2, 1956 .fid_offset_flood_table_size = VLAN_N_VID - 1, 1957 .max_fid_flood_tables = 2, 1958 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 1959 .used_max_ib_mc = 1, 1960 .max_ib_mc = 0, 1961 .used_max_pkey = 1, 1962 .max_pkey = 0, 1963 .swid_config = { 1964 { 1965 .used_type = 1, 1966 .type = MLXSW_PORT_SWID_TYPE_ETH, 1967 } 1968 }, 1969 }; 1970 1971 static struct mlxsw_driver mlxsw_sp_driver = { 1972 .kind = MLXSW_DEVICE_KIND_SPECTRUM, 1973 .owner = THIS_MODULE, 1974 .priv_size = sizeof(struct mlxsw_sp), 1975 .init = mlxsw_sp_init, 1976 .fini = mlxsw_sp_fini, 1977 .txhdr_construct = mlxsw_sp_txhdr_construct, 1978 .txhdr_len = MLXSW_TXHDR_LEN, 1979 .profile = &mlxsw_sp_config_profile, 1980 }; 1981 1982 static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 1983 { 1984 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 1985 } 1986 1987 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) 1988 { 1989 struct net_device *dev = mlxsw_sp_port->dev; 1990 int err; 1991 1992 /* When port is not bridged untagged packets are tagged with 1993 * PVID=VID=1, thereby creating an implicit VLAN interface in 1994 * the device. Remove it and let bridge code take care of its 1995 * own VLANs. 1996 */ 1997 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 1998 if (err) 1999 return err; 2000 2001 mlxsw_sp_port->learning = 1; 2002 mlxsw_sp_port->learning_sync = 1; 2003 mlxsw_sp_port->uc_flood = 1; 2004 mlxsw_sp_port->bridged = 1; 2005 2006 return 0; 2007 } 2008 2009 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 2010 { 2011 struct net_device *dev = mlxsw_sp_port->dev; 2012 2013 mlxsw_sp_port->learning = 0; 2014 mlxsw_sp_port->learning_sync = 0; 2015 mlxsw_sp_port->uc_flood = 0; 2016 mlxsw_sp_port->bridged = 0; 2017 2018 /* Add implicit VLAN interface in the device, so that untagged 2019 * packets will be classified to the default vFID. 2020 */ 2021 return mlxsw_sp_port_add_vid(dev, 0, 1); 2022 } 2023 2024 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 2025 struct net_device *br_dev) 2026 { 2027 return !mlxsw_sp->master_bridge.dev || 2028 mlxsw_sp->master_bridge.dev == br_dev; 2029 } 2030 2031 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 2032 struct net_device *br_dev) 2033 { 2034 mlxsw_sp->master_bridge.dev = br_dev; 2035 mlxsw_sp->master_bridge.ref_count++; 2036 } 2037 2038 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, 2039 struct net_device *br_dev) 2040 { 2041 if (--mlxsw_sp->master_bridge.ref_count == 0) 2042 mlxsw_sp->master_bridge.dev = NULL; 2043 } 2044 2045 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 2046 { 2047 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2048 2049 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 2050 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2051 } 2052 2053 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 2054 { 2055 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2056 2057 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 2058 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2059 } 2060 2061 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 2062 u16 lag_id, u8 port_index) 2063 { 2064 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2065 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2066 2067 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 2068 lag_id, port_index); 2069 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2070 } 2071 2072 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 2073 u16 lag_id) 2074 { 2075 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2076 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2077 2078 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 2079 lag_id); 2080 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2081 } 2082 2083 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 2084 u16 lag_id) 2085 { 2086 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2087 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2088 2089 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 2090 lag_id); 2091 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2092 } 2093 2094 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 2095 u16 lag_id) 2096 { 2097 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2098 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2099 2100 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 2101 lag_id); 2102 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2103 } 2104 2105 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 2106 struct net_device *lag_dev, 2107 u16 *p_lag_id) 2108 { 2109 struct mlxsw_sp_upper *lag; 2110 int free_lag_id = -1; 2111 int i; 2112 2113 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { 2114 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 2115 if (lag->ref_count) { 2116 if (lag->dev == lag_dev) { 2117 *p_lag_id = i; 2118 return 0; 2119 } 2120 } else if (free_lag_id < 0) { 2121 free_lag_id = i; 2122 } 2123 } 2124 if (free_lag_id < 0) 2125 return -EBUSY; 2126 *p_lag_id = free_lag_id; 2127 return 0; 2128 } 2129 2130 static bool 2131 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 2132 struct net_device *lag_dev, 2133 struct netdev_lag_upper_info *lag_upper_info) 2134 { 2135 u16 lag_id; 2136 2137 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 2138 return false; 2139 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 2140 return false; 2141 return true; 2142 } 2143 2144 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 2145 u16 lag_id, u8 *p_port_index) 2146 { 2147 int i; 2148 2149 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 2150 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 2151 *p_port_index = i; 2152 return 0; 2153 } 2154 } 2155 return -EBUSY; 2156 } 2157 2158 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 2159 struct net_device *lag_dev) 2160 { 2161 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2162 struct mlxsw_sp_upper *lag; 2163 u16 lag_id; 2164 u8 port_index; 2165 int err; 2166 2167 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 2168 if (err) 2169 return err; 2170 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 2171 if (!lag->ref_count) { 2172 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 2173 if (err) 2174 return err; 2175 lag->dev = lag_dev; 2176 } 2177 2178 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 2179 if (err) 2180 return err; 2181 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 2182 if (err) 2183 goto err_col_port_add; 2184 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 2185 if (err) 2186 goto err_col_port_enable; 2187 2188 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 2189 mlxsw_sp_port->local_port); 2190 mlxsw_sp_port->lag_id = lag_id; 2191 mlxsw_sp_port->lagged = 1; 2192 lag->ref_count++; 2193 return 0; 2194 2195 err_col_port_add: 2196 if (!lag->ref_count) 2197 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2198 err_col_port_enable: 2199 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2200 return err; 2201 } 2202 2203 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2204 struct net_device *lag_dev) 2205 { 2206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2207 struct mlxsw_sp_upper *lag; 2208 u16 lag_id = mlxsw_sp_port->lag_id; 2209 int err; 2210 2211 if (!mlxsw_sp_port->lagged) 2212 return 0; 2213 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 2214 WARN_ON(lag->ref_count == 0); 2215 2216 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 2217 if (err) 2218 return err; 2219 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2220 if (err) 2221 return err; 2222 2223 if (lag->ref_count == 1) { 2224 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2225 if (err) 2226 return err; 2227 } 2228 2229 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 2230 mlxsw_sp_port->local_port); 2231 mlxsw_sp_port->lagged = 0; 2232 lag->ref_count--; 2233 return 0; 2234 } 2235 2236 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 2237 u16 lag_id) 2238 { 2239 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2240 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2241 2242 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 2243 mlxsw_sp_port->local_port); 2244 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2245 } 2246 2247 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 2248 u16 lag_id) 2249 { 2250 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2251 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2252 2253 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 2254 mlxsw_sp_port->local_port); 2255 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2256 } 2257 2258 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 2259 bool lag_tx_enabled) 2260 { 2261 if (lag_tx_enabled) 2262 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 2263 mlxsw_sp_port->lag_id); 2264 else 2265 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 2266 mlxsw_sp_port->lag_id); 2267 } 2268 2269 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 2270 struct netdev_lag_lower_state_info *info) 2271 { 2272 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 2273 } 2274 2275 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 2276 struct net_device *br_dev); 2277 2278 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 2279 struct net_device *vlan_dev) 2280 { 2281 struct mlxsw_sp_port *mlxsw_sp_vport; 2282 u16 vid = vlan_dev_vlan_id(vlan_dev); 2283 2284 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2285 if (!mlxsw_sp_vport) { 2286 WARN_ON(!mlxsw_sp_vport); 2287 return -EINVAL; 2288 } 2289 2290 mlxsw_sp_vport->dev = vlan_dev; 2291 2292 return 0; 2293 } 2294 2295 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 2296 struct net_device *vlan_dev) 2297 { 2298 struct mlxsw_sp_port *mlxsw_sp_vport; 2299 u16 vid = vlan_dev_vlan_id(vlan_dev); 2300 2301 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2302 if (!mlxsw_sp_vport) { 2303 WARN_ON(!mlxsw_sp_vport); 2304 return -EINVAL; 2305 } 2306 2307 /* When removing a VLAN device while still bridged we should first 2308 * remove it from the bridge, as we receive the bridge's notification 2309 * when the vPort is already gone. 2310 */ 2311 if (mlxsw_sp_vport->bridged) { 2312 struct net_device *br_dev; 2313 2314 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 2315 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); 2316 } 2317 2318 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 2319 2320 return 0; 2321 } 2322 2323 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 2324 unsigned long event, void *ptr) 2325 { 2326 struct netdev_notifier_changeupper_info *info; 2327 struct mlxsw_sp_port *mlxsw_sp_port; 2328 struct net_device *upper_dev; 2329 struct mlxsw_sp *mlxsw_sp; 2330 int err; 2331 2332 mlxsw_sp_port = netdev_priv(dev); 2333 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2334 info = ptr; 2335 2336 switch (event) { 2337 case NETDEV_PRECHANGEUPPER: 2338 upper_dev = info->upper_dev; 2339 if (!info->master || !info->linking) 2340 break; 2341 /* HW limitation forbids to put ports to multiple bridges. */ 2342 if (netif_is_bridge_master(upper_dev) && 2343 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 2344 return NOTIFY_BAD; 2345 if (netif_is_lag_master(upper_dev) && 2346 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 2347 info->upper_info)) 2348 return NOTIFY_BAD; 2349 break; 2350 case NETDEV_CHANGEUPPER: 2351 upper_dev = info->upper_dev; 2352 if (is_vlan_dev(upper_dev)) { 2353 if (info->linking) { 2354 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 2355 upper_dev); 2356 if (err) { 2357 netdev_err(dev, "Failed to link VLAN device\n"); 2358 return NOTIFY_BAD; 2359 } 2360 } else { 2361 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 2362 upper_dev); 2363 if (err) { 2364 netdev_err(dev, "Failed to unlink VLAN device\n"); 2365 return NOTIFY_BAD; 2366 } 2367 } 2368 } else if (netif_is_bridge_master(upper_dev)) { 2369 if (info->linking) { 2370 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); 2371 if (err) { 2372 netdev_err(dev, "Failed to join bridge\n"); 2373 return NOTIFY_BAD; 2374 } 2375 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); 2376 } else { 2377 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 2378 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); 2379 if (err) { 2380 netdev_err(dev, "Failed to leave bridge\n"); 2381 return NOTIFY_BAD; 2382 } 2383 } 2384 } else if (netif_is_lag_master(upper_dev)) { 2385 if (info->linking) { 2386 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 2387 upper_dev); 2388 if (err) { 2389 netdev_err(dev, "Failed to join link aggregation\n"); 2390 return NOTIFY_BAD; 2391 } 2392 } else { 2393 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, 2394 upper_dev); 2395 if (err) { 2396 netdev_err(dev, "Failed to leave link aggregation\n"); 2397 return NOTIFY_BAD; 2398 } 2399 } 2400 } 2401 break; 2402 } 2403 2404 return NOTIFY_DONE; 2405 } 2406 2407 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 2408 unsigned long event, void *ptr) 2409 { 2410 struct netdev_notifier_changelowerstate_info *info; 2411 struct mlxsw_sp_port *mlxsw_sp_port; 2412 int err; 2413 2414 mlxsw_sp_port = netdev_priv(dev); 2415 info = ptr; 2416 2417 switch (event) { 2418 case NETDEV_CHANGELOWERSTATE: 2419 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 2420 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 2421 info->lower_state_info); 2422 if (err) 2423 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 2424 } 2425 break; 2426 } 2427 2428 return NOTIFY_DONE; 2429 } 2430 2431 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 2432 unsigned long event, void *ptr) 2433 { 2434 switch (event) { 2435 case NETDEV_PRECHANGEUPPER: 2436 case NETDEV_CHANGEUPPER: 2437 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 2438 case NETDEV_CHANGELOWERSTATE: 2439 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 2440 } 2441 2442 return NOTIFY_DONE; 2443 } 2444 2445 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 2446 unsigned long event, void *ptr) 2447 { 2448 struct net_device *dev; 2449 struct list_head *iter; 2450 int ret; 2451 2452 netdev_for_each_lower_dev(lag_dev, dev, iter) { 2453 if (mlxsw_sp_port_dev_check(dev)) { 2454 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 2455 if (ret == NOTIFY_BAD) 2456 return ret; 2457 } 2458 } 2459 2460 return NOTIFY_DONE; 2461 } 2462 2463 static struct mlxsw_sp_vfid * 2464 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, 2465 const struct net_device *br_dev) 2466 { 2467 struct mlxsw_sp_vfid *vfid; 2468 2469 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { 2470 if (vfid->br_dev == br_dev) 2471 return vfid; 2472 } 2473 2474 return NULL; 2475 } 2476 2477 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) 2478 { 2479 return vfid - MLXSW_SP_VFID_PORT_MAX; 2480 } 2481 2482 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) 2483 { 2484 return MLXSW_SP_VFID_PORT_MAX + br_vfid; 2485 } 2486 2487 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) 2488 { 2489 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, 2490 MLXSW_SP_VFID_BR_MAX); 2491 } 2492 2493 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, 2494 struct net_device *br_dev) 2495 { 2496 struct device *dev = mlxsw_sp->bus_info->dev; 2497 struct mlxsw_sp_vfid *vfid; 2498 u16 n_vfid; 2499 int err; 2500 2501 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); 2502 if (n_vfid == MLXSW_SP_VFID_MAX) { 2503 dev_err(dev, "No available vFIDs\n"); 2504 return ERR_PTR(-ERANGE); 2505 } 2506 2507 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 2508 if (err) { 2509 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 2510 return ERR_PTR(err); 2511 } 2512 2513 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 2514 if (!vfid) 2515 goto err_allocate_vfid; 2516 2517 vfid->vfid = n_vfid; 2518 vfid->br_dev = br_dev; 2519 2520 list_add(&vfid->list, &mlxsw_sp->br_vfids.list); 2521 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); 2522 2523 return vfid; 2524 2525 err_allocate_vfid: 2526 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 2527 return ERR_PTR(-ENOMEM); 2528 } 2529 2530 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 2531 struct mlxsw_sp_vfid *vfid) 2532 { 2533 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); 2534 2535 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); 2536 list_del(&vfid->list); 2537 2538 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 2539 2540 kfree(vfid); 2541 } 2542 2543 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 2544 struct net_device *br_dev) 2545 { 2546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 2547 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 2548 struct net_device *dev = mlxsw_sp_vport->dev; 2549 struct mlxsw_sp_vfid *vfid, *new_vfid; 2550 int err; 2551 2552 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 2553 if (!vfid) { 2554 WARN_ON(!vfid); 2555 return -EINVAL; 2556 } 2557 2558 /* We need a vFID to go back to after leaving the bridge's vFID. */ 2559 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 2560 if (!new_vfid) { 2561 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); 2562 if (IS_ERR(new_vfid)) { 2563 netdev_err(dev, "Failed to create vFID for VID=%d\n", 2564 vid); 2565 return PTR_ERR(new_vfid); 2566 } 2567 } 2568 2569 /* Invalidate existing {Port, VID} to vFID mapping and create a new 2570 * one for the new vFID. 2571 */ 2572 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2573 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2574 false, 2575 mlxsw_sp_vfid_to_fid(vfid->vfid), 2576 vid); 2577 if (err) { 2578 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", 2579 vfid->vfid); 2580 goto err_port_vid_to_fid_invalidate; 2581 } 2582 2583 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2584 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2585 true, 2586 mlxsw_sp_vfid_to_fid(new_vfid->vfid), 2587 vid); 2588 if (err) { 2589 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", 2590 new_vfid->vfid); 2591 goto err_port_vid_to_fid_validate; 2592 } 2593 2594 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 2595 if (err) { 2596 netdev_err(dev, "Failed to disable learning\n"); 2597 goto err_port_vid_learning_set; 2598 } 2599 2600 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, 2601 false); 2602 if (err) { 2603 netdev_err(dev, "Failed clear to clear flooding\n"); 2604 goto err_vport_flood_set; 2605 } 2606 2607 /* Switch between the vFIDs and destroy the old one if needed. */ 2608 new_vfid->nr_vports++; 2609 mlxsw_sp_vport->vport.vfid = new_vfid; 2610 vfid->nr_vports--; 2611 if (!vfid->nr_vports) 2612 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); 2613 2614 mlxsw_sp_vport->learning = 0; 2615 mlxsw_sp_vport->learning_sync = 0; 2616 mlxsw_sp_vport->uc_flood = 0; 2617 mlxsw_sp_vport->bridged = 0; 2618 2619 return 0; 2620 2621 err_vport_flood_set: 2622 err_port_vid_learning_set: 2623 err_port_vid_to_fid_validate: 2624 err_port_vid_to_fid_invalidate: 2625 /* Rollback vFID only if new. */ 2626 if (!new_vfid->nr_vports) 2627 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); 2628 return err; 2629 } 2630 2631 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 2632 struct net_device *br_dev) 2633 { 2634 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; 2635 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 2636 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 2637 struct net_device *dev = mlxsw_sp_vport->dev; 2638 struct mlxsw_sp_vfid *vfid; 2639 int err; 2640 2641 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 2642 if (!vfid) { 2643 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); 2644 if (IS_ERR(vfid)) { 2645 netdev_err(dev, "Failed to create bridge vFID\n"); 2646 return PTR_ERR(vfid); 2647 } 2648 } 2649 2650 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); 2651 if (err) { 2652 netdev_err(dev, "Failed to setup flooding for vFID=%d\n", 2653 vfid->vfid); 2654 goto err_port_flood_set; 2655 } 2656 2657 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 2658 if (err) { 2659 netdev_err(dev, "Failed to enable learning\n"); 2660 goto err_port_vid_learning_set; 2661 } 2662 2663 /* We need to invalidate existing {Port, VID} to vFID mapping and 2664 * create a new one for the bridge's vFID. 2665 */ 2666 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2667 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2668 false, 2669 mlxsw_sp_vfid_to_fid(old_vfid->vfid), 2670 vid); 2671 if (err) { 2672 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", 2673 old_vfid->vfid); 2674 goto err_port_vid_to_fid_invalidate; 2675 } 2676 2677 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2678 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2679 true, 2680 mlxsw_sp_vfid_to_fid(vfid->vfid), 2681 vid); 2682 if (err) { 2683 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", 2684 vfid->vfid); 2685 goto err_port_vid_to_fid_validate; 2686 } 2687 2688 /* Switch between the vFIDs and destroy the old one if needed. */ 2689 vfid->nr_vports++; 2690 mlxsw_sp_vport->vport.vfid = vfid; 2691 old_vfid->nr_vports--; 2692 if (!old_vfid->nr_vports) 2693 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); 2694 2695 mlxsw_sp_vport->learning = 1; 2696 mlxsw_sp_vport->learning_sync = 1; 2697 mlxsw_sp_vport->uc_flood = 1; 2698 mlxsw_sp_vport->bridged = 1; 2699 2700 return 0; 2701 2702 err_port_vid_to_fid_validate: 2703 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2704 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 2705 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); 2706 err_port_vid_to_fid_invalidate: 2707 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 2708 err_port_vid_learning_set: 2709 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); 2710 err_port_flood_set: 2711 if (!vfid->nr_vports) 2712 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); 2713 return err; 2714 } 2715 2716 static bool 2717 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 2718 const struct net_device *br_dev) 2719 { 2720 struct mlxsw_sp_port *mlxsw_sp_vport; 2721 2722 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 2723 vport.list) { 2724 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) 2725 return false; 2726 } 2727 2728 return true; 2729 } 2730 2731 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 2732 unsigned long event, void *ptr, 2733 u16 vid) 2734 { 2735 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2736 struct netdev_notifier_changeupper_info *info = ptr; 2737 struct mlxsw_sp_port *mlxsw_sp_vport; 2738 struct net_device *upper_dev; 2739 int err; 2740 2741 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2742 2743 switch (event) { 2744 case NETDEV_PRECHANGEUPPER: 2745 upper_dev = info->upper_dev; 2746 if (!info->master || !info->linking) 2747 break; 2748 if (!netif_is_bridge_master(upper_dev)) 2749 return NOTIFY_BAD; 2750 /* We can't have multiple VLAN interfaces configured on 2751 * the same port and being members in the same bridge. 2752 */ 2753 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 2754 upper_dev)) 2755 return NOTIFY_BAD; 2756 break; 2757 case NETDEV_CHANGEUPPER: 2758 upper_dev = info->upper_dev; 2759 if (!info->master) 2760 break; 2761 if (info->linking) { 2762 if (!mlxsw_sp_vport) { 2763 WARN_ON(!mlxsw_sp_vport); 2764 return NOTIFY_BAD; 2765 } 2766 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 2767 upper_dev); 2768 if (err) { 2769 netdev_err(dev, "Failed to join bridge\n"); 2770 return NOTIFY_BAD; 2771 } 2772 } else { 2773 /* We ignore bridge's unlinking notifications if vPort 2774 * is gone, since we already left the bridge when the 2775 * VLAN device was unlinked from the real device. 2776 */ 2777 if (!mlxsw_sp_vport) 2778 return NOTIFY_DONE; 2779 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, 2780 upper_dev); 2781 if (err) { 2782 netdev_err(dev, "Failed to leave bridge\n"); 2783 return NOTIFY_BAD; 2784 } 2785 } 2786 } 2787 2788 return NOTIFY_DONE; 2789 } 2790 2791 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 2792 unsigned long event, void *ptr, 2793 u16 vid) 2794 { 2795 struct net_device *dev; 2796 struct list_head *iter; 2797 int ret; 2798 2799 netdev_for_each_lower_dev(lag_dev, dev, iter) { 2800 if (mlxsw_sp_port_dev_check(dev)) { 2801 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 2802 vid); 2803 if (ret == NOTIFY_BAD) 2804 return ret; 2805 } 2806 } 2807 2808 return NOTIFY_DONE; 2809 } 2810 2811 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 2812 unsigned long event, void *ptr) 2813 { 2814 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 2815 u16 vid = vlan_dev_vlan_id(vlan_dev); 2816 2817 if (mlxsw_sp_port_dev_check(real_dev)) 2818 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 2819 vid); 2820 else if (netif_is_lag_master(real_dev)) 2821 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 2822 vid); 2823 2824 return NOTIFY_DONE; 2825 } 2826 2827 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2828 unsigned long event, void *ptr) 2829 { 2830 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2831 2832 if (mlxsw_sp_port_dev_check(dev)) 2833 return mlxsw_sp_netdevice_port_event(dev, event, ptr); 2834 2835 if (netif_is_lag_master(dev)) 2836 return mlxsw_sp_netdevice_lag_event(dev, event, ptr); 2837 2838 if (is_vlan_dev(dev)) 2839 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 2840 2841 return NOTIFY_DONE; 2842 } 2843 2844 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 2845 .notifier_call = mlxsw_sp_netdevice_event, 2846 }; 2847 2848 static int __init mlxsw_sp_module_init(void) 2849 { 2850 int err; 2851 2852 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 2853 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 2854 if (err) 2855 goto err_core_driver_register; 2856 return 0; 2857 2858 err_core_driver_register: 2859 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 2860 return err; 2861 } 2862 2863 static void __exit mlxsw_sp_module_exit(void) 2864 { 2865 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 2866 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 2867 } 2868 2869 module_init(mlxsw_sp_module_init); 2870 module_exit(mlxsw_sp_module_exit); 2871 2872 MODULE_LICENSE("Dual BSD/GPL"); 2873 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 2874 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 2875 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); 2876