1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/netdevice.h> 41 #include <linux/etherdevice.h> 42 #include <linux/ethtool.h> 43 #include <linux/slab.h> 44 #include <linux/device.h> 45 #include <linux/skbuff.h> 46 #include <linux/if_vlan.h> 47 #include <linux/if_bridge.h> 48 #include <linux/workqueue.h> 49 #include <linux/jiffies.h> 50 #include <linux/bitops.h> 51 #include <linux/list.h> 52 #include <net/switchdev.h> 53 #include <generated/utsrelease.h> 54 55 #include "spectrum.h" 56 #include "core.h" 57 #include "reg.h" 58 #include "port.h" 59 #include "trap.h" 60 #include "txheader.h" 61 62 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 63 static const char mlxsw_sp_driver_version[] = "1.0"; 64 65 /* tx_hdr_version 66 * Tx header version. 67 * Must be set to 1. 68 */ 69 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 70 71 /* tx_hdr_ctl 72 * Packet control type. 73 * 0 - Ethernet control (e.g. EMADs, LACP) 74 * 1 - Ethernet data 75 */ 76 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 77 78 /* tx_hdr_proto 79 * Packet protocol type. Must be set to 1 (Ethernet). 80 */ 81 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 82 83 /* tx_hdr_rx_is_router 84 * Packet is sent from the router. Valid for data packets only. 85 */ 86 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 87 88 /* tx_hdr_fid_valid 89 * Indicates if the 'fid' field is valid and should be used for 90 * forwarding lookup. Valid for data packets only. 91 */ 92 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 93 94 /* tx_hdr_swid 95 * Switch partition ID. Must be set to 0. 96 */ 97 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 98 99 /* tx_hdr_control_tclass 100 * Indicates if the packet should use the control TClass and not one 101 * of the data TClasses. 102 */ 103 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 104 105 /* tx_hdr_etclass 106 * Egress TClass to be used on the egress device on the egress port. 107 */ 108 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 109 110 /* tx_hdr_port_mid 111 * Destination local port for unicast packets. 112 * Destination multicast ID for multicast packets. 113 * 114 * Control packets are directed to a specific egress port, while data 115 * packets are transmitted through the CPU port (0) into the switch partition, 116 * where forwarding rules are applied. 117 */ 118 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 119 120 /* tx_hdr_fid 121 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 122 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 123 * Valid for data packets only. 124 */ 125 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 126 127 /* tx_hdr_type 128 * 0 - Data packets 129 * 6 - Control packets 130 */ 131 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 132 133 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 134 const struct mlxsw_tx_info *tx_info) 135 { 136 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 137 138 memset(txhdr, 0, MLXSW_TXHDR_LEN); 139 140 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 141 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 142 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 143 mlxsw_tx_hdr_swid_set(txhdr, 0); 144 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 145 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 146 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 147 } 148 149 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 150 { 151 char spad_pl[MLXSW_REG_SPAD_LEN]; 152 int err; 153 154 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 155 if (err) 156 return err; 157 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 158 return 0; 159 } 160 161 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 162 bool is_up) 163 { 164 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 165 char paos_pl[MLXSW_REG_PAOS_LEN]; 166 167 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 168 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 169 MLXSW_PORT_ADMIN_STATUS_DOWN); 170 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 171 } 172 173 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, 174 bool *p_is_up) 175 { 176 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 177 char paos_pl[MLXSW_REG_PAOS_LEN]; 178 u8 oper_status; 179 int err; 180 181 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0); 182 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 183 if (err) 184 return err; 185 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); 186 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; 187 return 0; 188 } 189 190 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 191 unsigned char *addr) 192 { 193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 194 char ppad_pl[MLXSW_REG_PPAD_LEN]; 195 196 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 197 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 199 } 200 201 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 202 { 203 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 204 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 205 206 ether_addr_copy(addr, mlxsw_sp->base_mac); 207 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 208 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 209 } 210 211 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 212 u16 vid, enum mlxsw_reg_spms_state state) 213 { 214 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 215 char *spms_pl; 216 int err; 217 218 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 219 if (!spms_pl) 220 return -ENOMEM; 221 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 222 mlxsw_reg_spms_vid_pack(spms_pl, vid, state); 223 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 224 kfree(spms_pl); 225 return err; 226 } 227 228 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 229 { 230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 231 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 232 int max_mtu; 233 int err; 234 235 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 236 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 237 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 238 if (err) 239 return err; 240 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 241 242 if (mtu > max_mtu) 243 return -EINVAL; 244 245 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 246 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 247 } 248 249 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 250 { 251 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 252 char pspa_pl[MLXSW_REG_PSPA_LEN]; 253 254 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 255 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 256 } 257 258 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 259 bool enable) 260 { 261 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 262 char svpe_pl[MLXSW_REG_SVPE_LEN]; 263 264 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 265 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 266 } 267 268 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 269 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 270 u16 vid) 271 { 272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 273 char svfa_pl[MLXSW_REG_SVFA_LEN]; 274 275 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 276 fid, vid); 277 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 278 } 279 280 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 281 u16 vid, bool learn_enable) 282 { 283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 284 char *spvmlr_pl; 285 int err; 286 287 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 288 if (!spvmlr_pl) 289 return -ENOMEM; 290 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 291 learn_enable); 292 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 293 kfree(spvmlr_pl); 294 return err; 295 } 296 297 static int 298 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 299 { 300 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 301 char sspr_pl[MLXSW_REG_SSPR_LEN]; 302 303 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 304 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 305 } 306 307 static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port, 308 bool *p_usable) 309 { 310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 311 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 312 int err; 313 314 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 315 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 316 if (err) 317 return err; 318 *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; 319 return 0; 320 } 321 322 static int mlxsw_sp_port_open(struct net_device *dev) 323 { 324 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 325 int err; 326 327 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 328 if (err) 329 return err; 330 netif_start_queue(dev); 331 return 0; 332 } 333 334 static int mlxsw_sp_port_stop(struct net_device *dev) 335 { 336 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 337 338 netif_stop_queue(dev); 339 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 340 } 341 342 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 343 struct net_device *dev) 344 { 345 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 347 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 348 const struct mlxsw_tx_info tx_info = { 349 .local_port = mlxsw_sp_port->local_port, 350 .is_emad = false, 351 }; 352 u64 len; 353 int err; 354 355 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) 356 return NETDEV_TX_BUSY; 357 358 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 359 struct sk_buff *skb_orig = skb; 360 361 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 362 if (!skb) { 363 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 364 dev_kfree_skb_any(skb_orig); 365 return NETDEV_TX_OK; 366 } 367 } 368 369 if (eth_skb_pad(skb)) { 370 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 371 return NETDEV_TX_OK; 372 } 373 374 mlxsw_sp_txhdr_construct(skb, &tx_info); 375 len = skb->len; 376 /* Due to a race we might fail here because of a full queue. In that 377 * unlikely case we simply drop the packet. 378 */ 379 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); 380 381 if (!err) { 382 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 383 u64_stats_update_begin(&pcpu_stats->syncp); 384 pcpu_stats->tx_packets++; 385 pcpu_stats->tx_bytes += len; 386 u64_stats_update_end(&pcpu_stats->syncp); 387 } else { 388 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 389 dev_kfree_skb_any(skb); 390 } 391 return NETDEV_TX_OK; 392 } 393 394 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 395 { 396 } 397 398 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 399 { 400 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 401 struct sockaddr *addr = p; 402 int err; 403 404 if (!is_valid_ether_addr(addr->sa_data)) 405 return -EADDRNOTAVAIL; 406 407 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 408 if (err) 409 return err; 410 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 411 return 0; 412 } 413 414 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 415 { 416 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 417 int err; 418 419 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 420 if (err) 421 return err; 422 dev->mtu = mtu; 423 return 0; 424 } 425 426 static struct rtnl_link_stats64 * 427 mlxsw_sp_port_get_stats64(struct net_device *dev, 428 struct rtnl_link_stats64 *stats) 429 { 430 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 431 struct mlxsw_sp_port_pcpu_stats *p; 432 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 433 u32 tx_dropped = 0; 434 unsigned int start; 435 int i; 436 437 for_each_possible_cpu(i) { 438 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 439 do { 440 start = u64_stats_fetch_begin_irq(&p->syncp); 441 rx_packets = p->rx_packets; 442 rx_bytes = p->rx_bytes; 443 tx_packets = p->tx_packets; 444 tx_bytes = p->tx_bytes; 445 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 446 447 stats->rx_packets += rx_packets; 448 stats->rx_bytes += rx_bytes; 449 stats->tx_packets += tx_packets; 450 stats->tx_bytes += tx_bytes; 451 /* tx_dropped is u32, updated without syncp protection. */ 452 tx_dropped += p->tx_dropped; 453 } 454 stats->tx_dropped = tx_dropped; 455 return stats; 456 } 457 458 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 459 u16 vid_end, bool is_member, bool untagged) 460 { 461 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 462 char *spvm_pl; 463 int err; 464 465 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 466 if (!spvm_pl) 467 return -ENOMEM; 468 469 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 470 vid_end, is_member, untagged); 471 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 472 kfree(spvm_pl); 473 return err; 474 } 475 476 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 477 { 478 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 479 u16 vid, last_visited_vid; 480 int err; 481 482 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 483 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 484 vid); 485 if (err) { 486 last_visited_vid = vid; 487 goto err_port_vid_to_fid_set; 488 } 489 } 490 491 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 492 if (err) { 493 last_visited_vid = VLAN_N_VID; 494 goto err_port_vid_to_fid_set; 495 } 496 497 return 0; 498 499 err_port_vid_to_fid_set: 500 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 501 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 502 vid); 503 return err; 504 } 505 506 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 507 { 508 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 509 u16 vid; 510 int err; 511 512 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 513 if (err) 514 return err; 515 516 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 517 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 518 vid, vid); 519 if (err) 520 return err; 521 } 522 523 return 0; 524 } 525 526 static struct mlxsw_sp_vfid * 527 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) 528 { 529 struct mlxsw_sp_vfid *vfid; 530 531 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { 532 if (vfid->vid == vid) 533 return vfid; 534 } 535 536 return NULL; 537 } 538 539 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 540 { 541 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, 542 MLXSW_SP_VFID_PORT_MAX); 543 } 544 545 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) 546 { 547 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 548 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 549 550 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); 551 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 552 } 553 554 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) 555 { 556 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 557 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 558 559 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); 560 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 561 } 562 563 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 564 u16 vid) 565 { 566 struct device *dev = mlxsw_sp->bus_info->dev; 567 struct mlxsw_sp_vfid *vfid; 568 u16 n_vfid; 569 int err; 570 571 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 572 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { 573 dev_err(dev, "No available vFIDs\n"); 574 return ERR_PTR(-ERANGE); 575 } 576 577 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 578 if (err) { 579 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 580 return ERR_PTR(err); 581 } 582 583 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 584 if (!vfid) 585 goto err_allocate_vfid; 586 587 vfid->vfid = n_vfid; 588 vfid->vid = vid; 589 590 list_add(&vfid->list, &mlxsw_sp->port_vfids.list); 591 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); 592 593 return vfid; 594 595 err_allocate_vfid: 596 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 597 return ERR_PTR(-ENOMEM); 598 } 599 600 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 601 struct mlxsw_sp_vfid *vfid) 602 { 603 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); 604 list_del(&vfid->list); 605 606 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 607 608 kfree(vfid); 609 } 610 611 static struct mlxsw_sp_port * 612 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, 613 struct mlxsw_sp_vfid *vfid) 614 { 615 struct mlxsw_sp_port *mlxsw_sp_vport; 616 617 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 618 if (!mlxsw_sp_vport) 619 return NULL; 620 621 /* dev will be set correctly after the VLAN device is linked 622 * with the real device. In case of bridge SELF invocation, dev 623 * will remain as is. 624 */ 625 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 626 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 627 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 628 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 629 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 630 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 631 mlxsw_sp_vport->vport.vfid = vfid; 632 mlxsw_sp_vport->vport.vid = vfid->vid; 633 634 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 635 636 return mlxsw_sp_vport; 637 } 638 639 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 640 { 641 list_del(&mlxsw_sp_vport->vport.list); 642 kfree(mlxsw_sp_vport); 643 } 644 645 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 646 u16 vid) 647 { 648 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 649 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 650 struct mlxsw_sp_port *mlxsw_sp_vport; 651 struct mlxsw_sp_vfid *vfid; 652 int err; 653 654 /* VLAN 0 is added to HW filter when device goes up, but it is 655 * reserved in our case, so simply return. 656 */ 657 if (!vid) 658 return 0; 659 660 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { 661 netdev_warn(dev, "VID=%d already configured\n", vid); 662 return 0; 663 } 664 665 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 666 if (!vfid) { 667 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); 668 if (IS_ERR(vfid)) { 669 netdev_err(dev, "Failed to create vFID for VID=%d\n", 670 vid); 671 return PTR_ERR(vfid); 672 } 673 } 674 675 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); 676 if (!mlxsw_sp_vport) { 677 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); 678 err = -ENOMEM; 679 goto err_port_vport_create; 680 } 681 682 if (!vfid->nr_vports) { 683 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, 684 true, false); 685 if (err) { 686 netdev_err(dev, "Failed to setup flooding for vFID=%d\n", 687 vfid->vfid); 688 goto err_vport_flood_set; 689 } 690 } 691 692 /* When adding the first VLAN interface on a bridged port we need to 693 * transition all the active 802.1Q bridge VLANs to use explicit 694 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 695 */ 696 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 697 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 698 if (err) { 699 netdev_err(dev, "Failed to set to Virtual mode\n"); 700 goto err_port_vp_mode_trans; 701 } 702 } 703 704 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 705 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 706 true, 707 mlxsw_sp_vfid_to_fid(vfid->vfid), 708 vid); 709 if (err) { 710 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", 711 vid, vfid->vfid); 712 goto err_port_vid_to_fid_set; 713 } 714 715 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 716 if (err) { 717 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); 718 goto err_port_vid_learning_set; 719 } 720 721 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); 722 if (err) { 723 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 724 vid); 725 goto err_port_add_vid; 726 } 727 728 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 729 MLXSW_REG_SPMS_STATE_FORWARDING); 730 if (err) { 731 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 732 goto err_port_stp_state_set; 733 } 734 735 vfid->nr_vports++; 736 737 return 0; 738 739 err_port_stp_state_set: 740 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 741 err_port_add_vid: 742 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 743 err_port_vid_learning_set: 744 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 745 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 746 mlxsw_sp_vfid_to_fid(vfid->vfid), vid); 747 err_port_vid_to_fid_set: 748 if (list_is_singular(&mlxsw_sp_port->vports_list)) 749 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 750 err_port_vp_mode_trans: 751 if (!vfid->nr_vports) 752 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, 753 false); 754 err_vport_flood_set: 755 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 756 err_port_vport_create: 757 if (!vfid->nr_vports) 758 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); 759 return err; 760 } 761 762 int mlxsw_sp_port_kill_vid(struct net_device *dev, 763 __be16 __always_unused proto, u16 vid) 764 { 765 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 766 struct mlxsw_sp_port *mlxsw_sp_vport; 767 struct mlxsw_sp_vfid *vfid; 768 int err; 769 770 /* VLAN 0 is removed from HW filter when device goes down, but 771 * it is reserved in our case, so simply return. 772 */ 773 if (!vid) 774 return 0; 775 776 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 777 if (!mlxsw_sp_vport) { 778 netdev_warn(dev, "VID=%d does not exist\n", vid); 779 return 0; 780 } 781 782 vfid = mlxsw_sp_vport->vport.vfid; 783 784 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 785 MLXSW_REG_SPMS_STATE_DISCARDING); 786 if (err) { 787 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 788 return err; 789 } 790 791 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 792 if (err) { 793 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 794 vid); 795 return err; 796 } 797 798 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 799 if (err) { 800 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); 801 return err; 802 } 803 804 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 805 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 806 false, 807 mlxsw_sp_vfid_to_fid(vfid->vfid), 808 vid); 809 if (err) { 810 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", 811 vid, vfid->vfid); 812 return err; 813 } 814 815 /* When removing the last VLAN interface on a bridged port we need to 816 * transition all active 802.1Q bridge VLANs to use VID to FID 817 * mappings and set port's mode to VLAN mode. 818 */ 819 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 820 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 821 if (err) { 822 netdev_err(dev, "Failed to set to VLAN mode\n"); 823 return err; 824 } 825 } 826 827 vfid->nr_vports--; 828 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 829 830 /* Destroy the vFID if no vPorts are assigned to it anymore. */ 831 if (!vfid->nr_vports) 832 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); 833 834 return 0; 835 } 836 837 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 838 .ndo_open = mlxsw_sp_port_open, 839 .ndo_stop = mlxsw_sp_port_stop, 840 .ndo_start_xmit = mlxsw_sp_port_xmit, 841 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 842 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 843 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 844 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 845 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 846 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 847 .ndo_fdb_add = switchdev_port_fdb_add, 848 .ndo_fdb_del = switchdev_port_fdb_del, 849 .ndo_fdb_dump = switchdev_port_fdb_dump, 850 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 851 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 852 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 853 }; 854 855 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 856 struct ethtool_drvinfo *drvinfo) 857 { 858 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 859 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 860 861 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 862 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 863 sizeof(drvinfo->version)); 864 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 865 "%d.%d.%d", 866 mlxsw_sp->bus_info->fw_rev.major, 867 mlxsw_sp->bus_info->fw_rev.minor, 868 mlxsw_sp->bus_info->fw_rev.subminor); 869 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 870 sizeof(drvinfo->bus_info)); 871 } 872 873 struct mlxsw_sp_port_hw_stats { 874 char str[ETH_GSTRING_LEN]; 875 u64 (*getter)(char *payload); 876 }; 877 878 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 879 { 880 .str = "a_frames_transmitted_ok", 881 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 882 }, 883 { 884 .str = "a_frames_received_ok", 885 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 886 }, 887 { 888 .str = "a_frame_check_sequence_errors", 889 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 890 }, 891 { 892 .str = "a_alignment_errors", 893 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 894 }, 895 { 896 .str = "a_octets_transmitted_ok", 897 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 898 }, 899 { 900 .str = "a_octets_received_ok", 901 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 902 }, 903 { 904 .str = "a_multicast_frames_xmitted_ok", 905 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 906 }, 907 { 908 .str = "a_broadcast_frames_xmitted_ok", 909 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 910 }, 911 { 912 .str = "a_multicast_frames_received_ok", 913 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 914 }, 915 { 916 .str = "a_broadcast_frames_received_ok", 917 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 918 }, 919 { 920 .str = "a_in_range_length_errors", 921 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 922 }, 923 { 924 .str = "a_out_of_range_length_field", 925 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 926 }, 927 { 928 .str = "a_frame_too_long_errors", 929 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 930 }, 931 { 932 .str = "a_symbol_error_during_carrier", 933 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 934 }, 935 { 936 .str = "a_mac_control_frames_transmitted", 937 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 938 }, 939 { 940 .str = "a_mac_control_frames_received", 941 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 942 }, 943 { 944 .str = "a_unsupported_opcodes_received", 945 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 946 }, 947 { 948 .str = "a_pause_mac_ctrl_frames_received", 949 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 950 }, 951 { 952 .str = "a_pause_mac_ctrl_frames_xmitted", 953 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 954 }, 955 }; 956 957 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 958 959 static void mlxsw_sp_port_get_strings(struct net_device *dev, 960 u32 stringset, u8 *data) 961 { 962 u8 *p = data; 963 int i; 964 965 switch (stringset) { 966 case ETH_SS_STATS: 967 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 968 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 969 ETH_GSTRING_LEN); 970 p += ETH_GSTRING_LEN; 971 } 972 break; 973 } 974 } 975 976 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 977 enum ethtool_phys_id_state state) 978 { 979 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 980 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 981 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 982 bool active; 983 984 switch (state) { 985 case ETHTOOL_ID_ACTIVE: 986 active = true; 987 break; 988 case ETHTOOL_ID_INACTIVE: 989 active = false; 990 break; 991 default: 992 return -EOPNOTSUPP; 993 } 994 995 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 996 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 997 } 998 999 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1000 struct ethtool_stats *stats, u64 *data) 1001 { 1002 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1003 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1004 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1005 int i; 1006 int err; 1007 1008 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); 1009 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1010 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) 1011 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; 1012 } 1013 1014 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1015 { 1016 switch (sset) { 1017 case ETH_SS_STATS: 1018 return MLXSW_SP_PORT_HW_STATS_LEN; 1019 default: 1020 return -EOPNOTSUPP; 1021 } 1022 } 1023 1024 struct mlxsw_sp_port_link_mode { 1025 u32 mask; 1026 u32 supported; 1027 u32 advertised; 1028 u32 speed; 1029 }; 1030 1031 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1032 { 1033 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1034 .supported = SUPPORTED_100baseT_Full, 1035 .advertised = ADVERTISED_100baseT_Full, 1036 .speed = 100, 1037 }, 1038 { 1039 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, 1040 .speed = 100, 1041 }, 1042 { 1043 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1044 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1045 .supported = SUPPORTED_1000baseKX_Full, 1046 .advertised = ADVERTISED_1000baseKX_Full, 1047 .speed = 1000, 1048 }, 1049 { 1050 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1051 .supported = SUPPORTED_10000baseT_Full, 1052 .advertised = ADVERTISED_10000baseT_Full, 1053 .speed = 10000, 1054 }, 1055 { 1056 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1057 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1058 .supported = SUPPORTED_10000baseKX4_Full, 1059 .advertised = ADVERTISED_10000baseKX4_Full, 1060 .speed = 10000, 1061 }, 1062 { 1063 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1064 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1065 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1066 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1067 .supported = SUPPORTED_10000baseKR_Full, 1068 .advertised = ADVERTISED_10000baseKR_Full, 1069 .speed = 10000, 1070 }, 1071 { 1072 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1073 .supported = SUPPORTED_20000baseKR2_Full, 1074 .advertised = ADVERTISED_20000baseKR2_Full, 1075 .speed = 20000, 1076 }, 1077 { 1078 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1079 .supported = SUPPORTED_40000baseCR4_Full, 1080 .advertised = ADVERTISED_40000baseCR4_Full, 1081 .speed = 40000, 1082 }, 1083 { 1084 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1085 .supported = SUPPORTED_40000baseKR4_Full, 1086 .advertised = ADVERTISED_40000baseKR4_Full, 1087 .speed = 40000, 1088 }, 1089 { 1090 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1091 .supported = SUPPORTED_40000baseSR4_Full, 1092 .advertised = ADVERTISED_40000baseSR4_Full, 1093 .speed = 40000, 1094 }, 1095 { 1096 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1097 .supported = SUPPORTED_40000baseLR4_Full, 1098 .advertised = ADVERTISED_40000baseLR4_Full, 1099 .speed = 40000, 1100 }, 1101 { 1102 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 1103 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 1104 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1105 .speed = 25000, 1106 }, 1107 { 1108 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 1109 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 1110 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1111 .speed = 50000, 1112 }, 1113 { 1114 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1115 .supported = SUPPORTED_56000baseKR4_Full, 1116 .advertised = ADVERTISED_56000baseKR4_Full, 1117 .speed = 56000, 1118 }, 1119 { 1120 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 1121 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1122 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1123 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 1124 .speed = 100000, 1125 }, 1126 }; 1127 1128 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 1129 1130 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) 1131 { 1132 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1133 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1134 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1135 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1136 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1137 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1138 return SUPPORTED_FIBRE; 1139 1140 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1141 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1142 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1143 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1144 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1145 return SUPPORTED_Backplane; 1146 return 0; 1147 } 1148 1149 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) 1150 { 1151 u32 modes = 0; 1152 int i; 1153 1154 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1155 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1156 modes |= mlxsw_sp_port_link_mode[i].supported; 1157 } 1158 return modes; 1159 } 1160 1161 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) 1162 { 1163 u32 modes = 0; 1164 int i; 1165 1166 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1167 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1168 modes |= mlxsw_sp_port_link_mode[i].advertised; 1169 } 1170 return modes; 1171 } 1172 1173 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1174 struct ethtool_cmd *cmd) 1175 { 1176 u32 speed = SPEED_UNKNOWN; 1177 u8 duplex = DUPLEX_UNKNOWN; 1178 int i; 1179 1180 if (!carrier_ok) 1181 goto out; 1182 1183 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1184 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1185 speed = mlxsw_sp_port_link_mode[i].speed; 1186 duplex = DUPLEX_FULL; 1187 break; 1188 } 1189 } 1190 out: 1191 ethtool_cmd_speed_set(cmd, speed); 1192 cmd->duplex = duplex; 1193 } 1194 1195 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1196 { 1197 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1198 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1199 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1200 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1201 return PORT_FIBRE; 1202 1203 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1204 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1205 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1206 return PORT_DA; 1207 1208 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1209 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1210 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1211 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1212 return PORT_NONE; 1213 1214 return PORT_OTHER; 1215 } 1216 1217 static int mlxsw_sp_port_get_settings(struct net_device *dev, 1218 struct ethtool_cmd *cmd) 1219 { 1220 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1221 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1222 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1223 u32 eth_proto_cap; 1224 u32 eth_proto_admin; 1225 u32 eth_proto_oper; 1226 int err; 1227 1228 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1229 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1230 if (err) { 1231 netdev_err(dev, "Failed to get proto"); 1232 return err; 1233 } 1234 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, 1235 ð_proto_admin, ð_proto_oper); 1236 1237 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1238 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1239 SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1240 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1241 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1242 eth_proto_oper, cmd); 1243 1244 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 1245 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); 1246 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); 1247 1248 cmd->transceiver = XCVR_INTERNAL; 1249 return 0; 1250 } 1251 1252 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) 1253 { 1254 u32 ptys_proto = 0; 1255 int i; 1256 1257 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1258 if (advertising & mlxsw_sp_port_link_mode[i].advertised) 1259 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1260 } 1261 return ptys_proto; 1262 } 1263 1264 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1265 { 1266 u32 ptys_proto = 0; 1267 int i; 1268 1269 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1270 if (speed == mlxsw_sp_port_link_mode[i].speed) 1271 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1272 } 1273 return ptys_proto; 1274 } 1275 1276 static int mlxsw_sp_port_set_settings(struct net_device *dev, 1277 struct ethtool_cmd *cmd) 1278 { 1279 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1280 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1281 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1282 u32 speed; 1283 u32 eth_proto_new; 1284 u32 eth_proto_cap; 1285 u32 eth_proto_admin; 1286 bool is_up; 1287 int err; 1288 1289 speed = ethtool_cmd_speed(cmd); 1290 1291 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? 1292 mlxsw_sp_to_ptys_advert_link(cmd->advertising) : 1293 mlxsw_sp_to_ptys_speed(speed); 1294 1295 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1296 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1297 if (err) { 1298 netdev_err(dev, "Failed to get proto"); 1299 return err; 1300 } 1301 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); 1302 1303 eth_proto_new = eth_proto_new & eth_proto_cap; 1304 if (!eth_proto_new) { 1305 netdev_err(dev, "Not supported proto admin requested"); 1306 return -EINVAL; 1307 } 1308 if (eth_proto_new == eth_proto_admin) 1309 return 0; 1310 1311 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 1312 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1313 if (err) { 1314 netdev_err(dev, "Failed to set proto admin"); 1315 return err; 1316 } 1317 1318 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); 1319 if (err) { 1320 netdev_err(dev, "Failed to get oper status"); 1321 return err; 1322 } 1323 if (!is_up) 1324 return 0; 1325 1326 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1327 if (err) { 1328 netdev_err(dev, "Failed to set admin status"); 1329 return err; 1330 } 1331 1332 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1333 if (err) { 1334 netdev_err(dev, "Failed to set admin status"); 1335 return err; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1342 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1343 .get_link = ethtool_op_get_link, 1344 .get_strings = mlxsw_sp_port_get_strings, 1345 .set_phys_id = mlxsw_sp_port_set_phys_id, 1346 .get_ethtool_stats = mlxsw_sp_port_get_stats, 1347 .get_sset_count = mlxsw_sp_port_get_sset_count, 1348 .get_settings = mlxsw_sp_port_get_settings, 1349 .set_settings = mlxsw_sp_port_set_settings, 1350 }; 1351 1352 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1353 { 1354 struct mlxsw_sp_port *mlxsw_sp_port; 1355 struct net_device *dev; 1356 bool usable; 1357 size_t bytes; 1358 int err; 1359 1360 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1361 if (!dev) 1362 return -ENOMEM; 1363 mlxsw_sp_port = netdev_priv(dev); 1364 mlxsw_sp_port->dev = dev; 1365 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1366 mlxsw_sp_port->local_port = local_port; 1367 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 1368 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 1369 if (!mlxsw_sp_port->active_vlans) { 1370 err = -ENOMEM; 1371 goto err_port_active_vlans_alloc; 1372 } 1373 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 1374 if (!mlxsw_sp_port->untagged_vlans) { 1375 err = -ENOMEM; 1376 goto err_port_untagged_vlans_alloc; 1377 } 1378 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 1379 1380 mlxsw_sp_port->pcpu_stats = 1381 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1382 if (!mlxsw_sp_port->pcpu_stats) { 1383 err = -ENOMEM; 1384 goto err_alloc_stats; 1385 } 1386 1387 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1388 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1389 1390 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1391 if (err) { 1392 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1393 mlxsw_sp_port->local_port); 1394 goto err_dev_addr_init; 1395 } 1396 1397 netif_carrier_off(dev); 1398 1399 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1400 NETIF_F_HW_VLAN_CTAG_FILTER; 1401 1402 /* Each packet needs to have a Tx header (metadata) on top all other 1403 * headers. 1404 */ 1405 dev->hard_header_len += MLXSW_TXHDR_LEN; 1406 1407 err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable); 1408 if (err) { 1409 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n", 1410 mlxsw_sp_port->local_port); 1411 goto err_port_module_check; 1412 } 1413 1414 if (!usable) { 1415 dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n", 1416 mlxsw_sp_port->local_port); 1417 goto port_not_usable; 1418 } 1419 1420 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1421 if (err) { 1422 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1423 mlxsw_sp_port->local_port); 1424 goto err_port_system_port_mapping_set; 1425 } 1426 1427 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1428 if (err) { 1429 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1430 mlxsw_sp_port->local_port); 1431 goto err_port_swid_set; 1432 } 1433 1434 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1435 if (err) { 1436 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1437 mlxsw_sp_port->local_port); 1438 goto err_port_mtu_set; 1439 } 1440 1441 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1442 if (err) 1443 goto err_port_admin_status_set; 1444 1445 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1446 if (err) { 1447 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1448 mlxsw_sp_port->local_port); 1449 goto err_port_buffers_init; 1450 } 1451 1452 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 1453 err = register_netdev(dev); 1454 if (err) { 1455 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1456 mlxsw_sp_port->local_port); 1457 goto err_register_netdev; 1458 } 1459 1460 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); 1461 if (err) 1462 goto err_port_vlan_init; 1463 1464 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1465 return 0; 1466 1467 err_port_vlan_init: 1468 unregister_netdev(dev); 1469 err_register_netdev: 1470 err_port_buffers_init: 1471 err_port_admin_status_set: 1472 err_port_mtu_set: 1473 err_port_swid_set: 1474 err_port_system_port_mapping_set: 1475 port_not_usable: 1476 err_port_module_check: 1477 err_dev_addr_init: 1478 free_percpu(mlxsw_sp_port->pcpu_stats); 1479 err_alloc_stats: 1480 kfree(mlxsw_sp_port->untagged_vlans); 1481 err_port_untagged_vlans_alloc: 1482 kfree(mlxsw_sp_port->active_vlans); 1483 err_port_active_vlans_alloc: 1484 free_netdev(dev); 1485 return err; 1486 } 1487 1488 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1489 { 1490 struct net_device *dev = mlxsw_sp_port->dev; 1491 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; 1492 1493 list_for_each_entry_safe(mlxsw_sp_vport, tmp, 1494 &mlxsw_sp_port->vports_list, vport.list) { 1495 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1496 1497 /* vPorts created for VLAN devices should already be gone 1498 * by now, since we unregistered the port netdev. 1499 */ 1500 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); 1501 mlxsw_sp_port_kill_vid(dev, 0, vid); 1502 } 1503 } 1504 1505 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1506 { 1507 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1508 1509 if (!mlxsw_sp_port) 1510 return; 1511 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1512 mlxsw_sp_port_vports_fini(mlxsw_sp_port); 1513 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 1514 free_percpu(mlxsw_sp_port->pcpu_stats); 1515 kfree(mlxsw_sp_port->untagged_vlans); 1516 kfree(mlxsw_sp_port->active_vlans); 1517 free_netdev(mlxsw_sp_port->dev); 1518 } 1519 1520 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1521 { 1522 int i; 1523 1524 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 1525 mlxsw_sp_port_remove(mlxsw_sp, i); 1526 kfree(mlxsw_sp->ports); 1527 } 1528 1529 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1530 { 1531 size_t alloc_size; 1532 int i; 1533 int err; 1534 1535 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 1536 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1537 if (!mlxsw_sp->ports) 1538 return -ENOMEM; 1539 1540 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 1541 err = mlxsw_sp_port_create(mlxsw_sp, i); 1542 if (err) 1543 goto err_port_create; 1544 } 1545 return 0; 1546 1547 err_port_create: 1548 for (i--; i >= 1; i--) 1549 mlxsw_sp_port_remove(mlxsw_sp, i); 1550 kfree(mlxsw_sp->ports); 1551 return err; 1552 } 1553 1554 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 1555 char *pude_pl, void *priv) 1556 { 1557 struct mlxsw_sp *mlxsw_sp = priv; 1558 struct mlxsw_sp_port *mlxsw_sp_port; 1559 enum mlxsw_reg_pude_oper_status status; 1560 u8 local_port; 1561 1562 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 1563 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1564 if (!mlxsw_sp_port) { 1565 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", 1566 local_port); 1567 return; 1568 } 1569 1570 status = mlxsw_reg_pude_oper_status_get(pude_pl); 1571 if (status == MLXSW_PORT_OPER_STATUS_UP) { 1572 netdev_info(mlxsw_sp_port->dev, "link up\n"); 1573 netif_carrier_on(mlxsw_sp_port->dev); 1574 } else { 1575 netdev_info(mlxsw_sp_port->dev, "link down\n"); 1576 netif_carrier_off(mlxsw_sp_port->dev); 1577 } 1578 } 1579 1580 static struct mlxsw_event_listener mlxsw_sp_pude_event = { 1581 .func = mlxsw_sp_pude_event_func, 1582 .trap_id = MLXSW_TRAP_ID_PUDE, 1583 }; 1584 1585 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 1586 enum mlxsw_event_trap_id trap_id) 1587 { 1588 struct mlxsw_event_listener *el; 1589 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1590 int err; 1591 1592 switch (trap_id) { 1593 case MLXSW_TRAP_ID_PUDE: 1594 el = &mlxsw_sp_pude_event; 1595 break; 1596 } 1597 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 1598 if (err) 1599 return err; 1600 1601 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 1602 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1603 if (err) 1604 goto err_event_trap_set; 1605 1606 return 0; 1607 1608 err_event_trap_set: 1609 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1610 return err; 1611 } 1612 1613 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 1614 enum mlxsw_event_trap_id trap_id) 1615 { 1616 struct mlxsw_event_listener *el; 1617 1618 switch (trap_id) { 1619 case MLXSW_TRAP_ID_PUDE: 1620 el = &mlxsw_sp_pude_event; 1621 break; 1622 } 1623 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1624 } 1625 1626 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 1627 void *priv) 1628 { 1629 struct mlxsw_sp *mlxsw_sp = priv; 1630 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1631 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1632 1633 if (unlikely(!mlxsw_sp_port)) { 1634 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 1635 local_port); 1636 return; 1637 } 1638 1639 skb->dev = mlxsw_sp_port->dev; 1640 1641 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1642 u64_stats_update_begin(&pcpu_stats->syncp); 1643 pcpu_stats->rx_packets++; 1644 pcpu_stats->rx_bytes += skb->len; 1645 u64_stats_update_end(&pcpu_stats->syncp); 1646 1647 skb->protocol = eth_type_trans(skb, skb->dev); 1648 netif_receive_skb(skb); 1649 } 1650 1651 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 1652 { 1653 .func = mlxsw_sp_rx_listener_func, 1654 .local_port = MLXSW_PORT_DONT_CARE, 1655 .trap_id = MLXSW_TRAP_ID_FDB_MC, 1656 }, 1657 /* Traps for specific L2 packet types, not trapped as FDB MC */ 1658 { 1659 .func = mlxsw_sp_rx_listener_func, 1660 .local_port = MLXSW_PORT_DONT_CARE, 1661 .trap_id = MLXSW_TRAP_ID_STP, 1662 }, 1663 { 1664 .func = mlxsw_sp_rx_listener_func, 1665 .local_port = MLXSW_PORT_DONT_CARE, 1666 .trap_id = MLXSW_TRAP_ID_LACP, 1667 }, 1668 { 1669 .func = mlxsw_sp_rx_listener_func, 1670 .local_port = MLXSW_PORT_DONT_CARE, 1671 .trap_id = MLXSW_TRAP_ID_EAPOL, 1672 }, 1673 { 1674 .func = mlxsw_sp_rx_listener_func, 1675 .local_port = MLXSW_PORT_DONT_CARE, 1676 .trap_id = MLXSW_TRAP_ID_LLDP, 1677 }, 1678 { 1679 .func = mlxsw_sp_rx_listener_func, 1680 .local_port = MLXSW_PORT_DONT_CARE, 1681 .trap_id = MLXSW_TRAP_ID_MMRP, 1682 }, 1683 { 1684 .func = mlxsw_sp_rx_listener_func, 1685 .local_port = MLXSW_PORT_DONT_CARE, 1686 .trap_id = MLXSW_TRAP_ID_MVRP, 1687 }, 1688 { 1689 .func = mlxsw_sp_rx_listener_func, 1690 .local_port = MLXSW_PORT_DONT_CARE, 1691 .trap_id = MLXSW_TRAP_ID_RPVST, 1692 }, 1693 { 1694 .func = mlxsw_sp_rx_listener_func, 1695 .local_port = MLXSW_PORT_DONT_CARE, 1696 .trap_id = MLXSW_TRAP_ID_DHCP, 1697 }, 1698 { 1699 .func = mlxsw_sp_rx_listener_func, 1700 .local_port = MLXSW_PORT_DONT_CARE, 1701 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, 1702 }, 1703 { 1704 .func = mlxsw_sp_rx_listener_func, 1705 .local_port = MLXSW_PORT_DONT_CARE, 1706 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, 1707 }, 1708 { 1709 .func = mlxsw_sp_rx_listener_func, 1710 .local_port = MLXSW_PORT_DONT_CARE, 1711 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, 1712 }, 1713 { 1714 .func = mlxsw_sp_rx_listener_func, 1715 .local_port = MLXSW_PORT_DONT_CARE, 1716 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, 1717 }, 1718 { 1719 .func = mlxsw_sp_rx_listener_func, 1720 .local_port = MLXSW_PORT_DONT_CARE, 1721 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 1722 }, 1723 }; 1724 1725 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 1726 { 1727 char htgt_pl[MLXSW_REG_HTGT_LEN]; 1728 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1729 int i; 1730 int err; 1731 1732 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 1733 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1734 if (err) 1735 return err; 1736 1737 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 1738 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1739 if (err) 1740 return err; 1741 1742 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 1743 err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 1744 &mlxsw_sp_rx_listener[i], 1745 mlxsw_sp); 1746 if (err) 1747 goto err_rx_listener_register; 1748 1749 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 1750 mlxsw_sp_rx_listener[i].trap_id); 1751 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1752 if (err) 1753 goto err_rx_trap_set; 1754 } 1755 return 0; 1756 1757 err_rx_trap_set: 1758 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1759 &mlxsw_sp_rx_listener[i], 1760 mlxsw_sp); 1761 err_rx_listener_register: 1762 for (i--; i >= 0; i--) { 1763 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 1764 mlxsw_sp_rx_listener[i].trap_id); 1765 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1766 1767 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1768 &mlxsw_sp_rx_listener[i], 1769 mlxsw_sp); 1770 } 1771 return err; 1772 } 1773 1774 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 1775 { 1776 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1777 int i; 1778 1779 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 1780 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 1781 mlxsw_sp_rx_listener[i].trap_id); 1782 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1783 1784 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1785 &mlxsw_sp_rx_listener[i], 1786 mlxsw_sp); 1787 } 1788 } 1789 1790 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 1791 enum mlxsw_reg_sfgc_type type, 1792 enum mlxsw_reg_sfgc_bridge_type bridge_type) 1793 { 1794 enum mlxsw_flood_table_type table_type; 1795 enum mlxsw_sp_flood_table flood_table; 1796 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 1797 1798 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 1799 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 1800 else 1801 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 1802 1803 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 1804 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 1805 else 1806 flood_table = MLXSW_SP_FLOOD_TABLE_BM; 1807 1808 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 1809 flood_table); 1810 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 1811 } 1812 1813 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 1814 { 1815 int type, err; 1816 1817 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 1818 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 1819 continue; 1820 1821 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 1822 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 1823 if (err) 1824 return err; 1825 1826 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 1827 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 1828 if (err) 1829 return err; 1830 } 1831 1832 return 0; 1833 } 1834 1835 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 1836 { 1837 char slcr_pl[MLXSW_REG_SLCR_LEN]; 1838 1839 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 1840 MLXSW_REG_SLCR_LAG_HASH_DMAC | 1841 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 1842 MLXSW_REG_SLCR_LAG_HASH_VLANID | 1843 MLXSW_REG_SLCR_LAG_HASH_SIP | 1844 MLXSW_REG_SLCR_LAG_HASH_DIP | 1845 MLXSW_REG_SLCR_LAG_HASH_SPORT | 1846 MLXSW_REG_SLCR_LAG_HASH_DPORT | 1847 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 1848 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 1849 } 1850 1851 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, 1852 const struct mlxsw_bus_info *mlxsw_bus_info) 1853 { 1854 struct mlxsw_sp *mlxsw_sp = priv; 1855 int err; 1856 1857 mlxsw_sp->core = mlxsw_core; 1858 mlxsw_sp->bus_info = mlxsw_bus_info; 1859 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); 1860 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); 1861 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 1862 1863 err = mlxsw_sp_base_mac_get(mlxsw_sp); 1864 if (err) { 1865 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 1866 return err; 1867 } 1868 1869 err = mlxsw_sp_ports_create(mlxsw_sp); 1870 if (err) { 1871 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 1872 return err; 1873 } 1874 1875 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1876 if (err) { 1877 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 1878 goto err_event_register; 1879 } 1880 1881 err = mlxsw_sp_traps_init(mlxsw_sp); 1882 if (err) { 1883 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 1884 goto err_rx_listener_register; 1885 } 1886 1887 err = mlxsw_sp_flood_init(mlxsw_sp); 1888 if (err) { 1889 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 1890 goto err_flood_init; 1891 } 1892 1893 err = mlxsw_sp_buffers_init(mlxsw_sp); 1894 if (err) { 1895 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 1896 goto err_buffers_init; 1897 } 1898 1899 err = mlxsw_sp_lag_init(mlxsw_sp); 1900 if (err) { 1901 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 1902 goto err_lag_init; 1903 } 1904 1905 err = mlxsw_sp_switchdev_init(mlxsw_sp); 1906 if (err) { 1907 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 1908 goto err_switchdev_init; 1909 } 1910 1911 return 0; 1912 1913 err_switchdev_init: 1914 err_lag_init: 1915 err_buffers_init: 1916 err_flood_init: 1917 mlxsw_sp_traps_fini(mlxsw_sp); 1918 err_rx_listener_register: 1919 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1920 err_event_register: 1921 mlxsw_sp_ports_remove(mlxsw_sp); 1922 return err; 1923 } 1924 1925 static void mlxsw_sp_fini(void *priv) 1926 { 1927 struct mlxsw_sp *mlxsw_sp = priv; 1928 1929 mlxsw_sp_switchdev_fini(mlxsw_sp); 1930 mlxsw_sp_traps_fini(mlxsw_sp); 1931 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1932 mlxsw_sp_ports_remove(mlxsw_sp); 1933 } 1934 1935 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 1936 .used_max_vepa_channels = 1, 1937 .max_vepa_channels = 0, 1938 .used_max_lag = 1, 1939 .max_lag = MLXSW_SP_LAG_MAX, 1940 .used_max_port_per_lag = 1, 1941 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, 1942 .used_max_mid = 1, 1943 .max_mid = MLXSW_SP_MID_MAX, 1944 .used_max_pgt = 1, 1945 .max_pgt = 0, 1946 .used_max_system_port = 1, 1947 .max_system_port = 64, 1948 .used_max_vlan_groups = 1, 1949 .max_vlan_groups = 127, 1950 .used_max_regions = 1, 1951 .max_regions = 400, 1952 .used_flood_tables = 1, 1953 .used_flood_mode = 1, 1954 .flood_mode = 3, 1955 .max_fid_offset_flood_tables = 2, 1956 .fid_offset_flood_table_size = VLAN_N_VID - 1, 1957 .max_fid_flood_tables = 2, 1958 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 1959 .used_max_ib_mc = 1, 1960 .max_ib_mc = 0, 1961 .used_max_pkey = 1, 1962 .max_pkey = 0, 1963 .swid_config = { 1964 { 1965 .used_type = 1, 1966 .type = MLXSW_PORT_SWID_TYPE_ETH, 1967 } 1968 }, 1969 }; 1970 1971 static struct mlxsw_driver mlxsw_sp_driver = { 1972 .kind = MLXSW_DEVICE_KIND_SPECTRUM, 1973 .owner = THIS_MODULE, 1974 .priv_size = sizeof(struct mlxsw_sp), 1975 .init = mlxsw_sp_init, 1976 .fini = mlxsw_sp_fini, 1977 .txhdr_construct = mlxsw_sp_txhdr_construct, 1978 .txhdr_len = MLXSW_TXHDR_LEN, 1979 .profile = &mlxsw_sp_config_profile, 1980 }; 1981 1982 static int 1983 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) 1984 { 1985 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1986 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 1987 1988 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); 1989 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); 1990 1991 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 1992 } 1993 1994 static int 1995 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 1996 u16 fid) 1997 { 1998 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1999 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2000 2001 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); 2002 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 2003 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 2004 mlxsw_sp_port->local_port); 2005 2006 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2007 } 2008 2009 static int 2010 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) 2011 { 2012 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2013 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2014 2015 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); 2016 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 2017 2018 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2019 } 2020 2021 static int 2022 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 2023 u16 fid) 2024 { 2025 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2026 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2027 2028 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); 2029 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 2030 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 2031 2032 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2033 } 2034 2035 static int 2036 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) 2037 { 2038 int err, last_err = 0; 2039 u16 vid; 2040 2041 for (vid = 1; vid < VLAN_N_VID - 1; vid++) { 2042 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); 2043 if (err) 2044 last_err = err; 2045 } 2046 2047 return last_err; 2048 } 2049 2050 static int 2051 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) 2052 { 2053 int err, last_err = 0; 2054 u16 vid; 2055 2056 for (vid = 1; vid < VLAN_N_VID - 1; vid++) { 2057 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); 2058 if (err) 2059 last_err = err; 2060 } 2061 2062 return last_err; 2063 } 2064 2065 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) 2066 { 2067 if (!list_empty(&mlxsw_sp_port->vports_list)) 2068 if (mlxsw_sp_port->lagged) 2069 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); 2070 else 2071 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); 2072 else 2073 if (mlxsw_sp_port->lagged) 2074 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); 2075 else 2076 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); 2077 } 2078 2079 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) 2080 { 2081 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); 2082 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 2083 2084 if (mlxsw_sp_vport->lagged) 2085 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, 2086 fid); 2087 else 2088 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); 2089 } 2090 2091 static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 2092 { 2093 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 2094 } 2095 2096 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) 2097 { 2098 struct net_device *dev = mlxsw_sp_port->dev; 2099 int err; 2100 2101 /* When port is not bridged untagged packets are tagged with 2102 * PVID=VID=1, thereby creating an implicit VLAN interface in 2103 * the device. Remove it and let bridge code take care of its 2104 * own VLANs. 2105 */ 2106 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 2107 if (err) 2108 return err; 2109 2110 mlxsw_sp_port->learning = 1; 2111 mlxsw_sp_port->learning_sync = 1; 2112 mlxsw_sp_port->uc_flood = 1; 2113 mlxsw_sp_port->bridged = 1; 2114 2115 return 0; 2116 } 2117 2118 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2119 bool flush_fdb) 2120 { 2121 struct net_device *dev = mlxsw_sp_port->dev; 2122 2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) 2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2125 2126 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 2127 2128 mlxsw_sp_port->learning = 0; 2129 mlxsw_sp_port->learning_sync = 0; 2130 mlxsw_sp_port->uc_flood = 0; 2131 mlxsw_sp_port->bridged = 0; 2132 2133 /* Add implicit VLAN interface in the device, so that untagged 2134 * packets will be classified to the default vFID. 2135 */ 2136 return mlxsw_sp_port_add_vid(dev, 0, 1); 2137 } 2138 2139 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 2140 struct net_device *br_dev) 2141 { 2142 return !mlxsw_sp->master_bridge.dev || 2143 mlxsw_sp->master_bridge.dev == br_dev; 2144 } 2145 2146 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 2147 struct net_device *br_dev) 2148 { 2149 mlxsw_sp->master_bridge.dev = br_dev; 2150 mlxsw_sp->master_bridge.ref_count++; 2151 } 2152 2153 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, 2154 struct net_device *br_dev) 2155 { 2156 if (--mlxsw_sp->master_bridge.ref_count == 0) 2157 mlxsw_sp->master_bridge.dev = NULL; 2158 } 2159 2160 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 2161 { 2162 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2163 2164 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 2165 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2166 } 2167 2168 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 2169 { 2170 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2171 2172 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 2173 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2174 } 2175 2176 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 2177 u16 lag_id, u8 port_index) 2178 { 2179 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2180 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2181 2182 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 2183 lag_id, port_index); 2184 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2185 } 2186 2187 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 2188 u16 lag_id) 2189 { 2190 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2191 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2192 2193 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 2194 lag_id); 2195 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2196 } 2197 2198 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 2199 u16 lag_id) 2200 { 2201 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2202 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2203 2204 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 2205 lag_id); 2206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2207 } 2208 2209 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 2210 u16 lag_id) 2211 { 2212 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2213 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2214 2215 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 2216 lag_id); 2217 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2218 } 2219 2220 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 2221 struct net_device *lag_dev, 2222 u16 *p_lag_id) 2223 { 2224 struct mlxsw_sp_upper *lag; 2225 int free_lag_id = -1; 2226 int i; 2227 2228 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { 2229 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 2230 if (lag->ref_count) { 2231 if (lag->dev == lag_dev) { 2232 *p_lag_id = i; 2233 return 0; 2234 } 2235 } else if (free_lag_id < 0) { 2236 free_lag_id = i; 2237 } 2238 } 2239 if (free_lag_id < 0) 2240 return -EBUSY; 2241 *p_lag_id = free_lag_id; 2242 return 0; 2243 } 2244 2245 static bool 2246 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 2247 struct net_device *lag_dev, 2248 struct netdev_lag_upper_info *lag_upper_info) 2249 { 2250 u16 lag_id; 2251 2252 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 2253 return false; 2254 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 2255 return false; 2256 return true; 2257 } 2258 2259 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 2260 u16 lag_id, u8 *p_port_index) 2261 { 2262 int i; 2263 2264 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 2265 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 2266 *p_port_index = i; 2267 return 0; 2268 } 2269 } 2270 return -EBUSY; 2271 } 2272 2273 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 2274 struct net_device *lag_dev) 2275 { 2276 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2277 struct mlxsw_sp_upper *lag; 2278 u16 lag_id; 2279 u8 port_index; 2280 int err; 2281 2282 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 2283 if (err) 2284 return err; 2285 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 2286 if (!lag->ref_count) { 2287 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 2288 if (err) 2289 return err; 2290 lag->dev = lag_dev; 2291 } 2292 2293 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 2294 if (err) 2295 return err; 2296 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 2297 if (err) 2298 goto err_col_port_add; 2299 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 2300 if (err) 2301 goto err_col_port_enable; 2302 2303 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 2304 mlxsw_sp_port->local_port); 2305 mlxsw_sp_port->lag_id = lag_id; 2306 mlxsw_sp_port->lagged = 1; 2307 lag->ref_count++; 2308 return 0; 2309 2310 err_col_port_add: 2311 if (!lag->ref_count) 2312 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2313 err_col_port_enable: 2314 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2315 return err; 2316 } 2317 2318 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 2319 struct net_device *br_dev, 2320 bool flush_fdb); 2321 2322 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2323 struct net_device *lag_dev) 2324 { 2325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2326 struct mlxsw_sp_port *mlxsw_sp_vport; 2327 struct mlxsw_sp_upper *lag; 2328 u16 lag_id = mlxsw_sp_port->lag_id; 2329 int err; 2330 2331 if (!mlxsw_sp_port->lagged) 2332 return 0; 2333 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 2334 WARN_ON(lag->ref_count == 0); 2335 2336 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 2337 if (err) 2338 return err; 2339 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2340 if (err) 2341 return err; 2342 2343 /* In case we leave a LAG device that has bridges built on top, 2344 * then their teardown sequence is never issued and we need to 2345 * invoke the necessary cleanup routines ourselves. 2346 */ 2347 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 2348 vport.list) { 2349 struct net_device *br_dev; 2350 2351 if (!mlxsw_sp_vport->bridged) 2352 continue; 2353 2354 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 2355 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); 2356 } 2357 2358 if (mlxsw_sp_port->bridged) { 2359 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 2360 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); 2361 2362 if (lag->ref_count == 1) 2363 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); 2364 } 2365 2366 if (lag->ref_count == 1) { 2367 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) 2368 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2369 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2370 if (err) 2371 return err; 2372 } 2373 2374 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 2375 mlxsw_sp_port->local_port); 2376 mlxsw_sp_port->lagged = 0; 2377 lag->ref_count--; 2378 return 0; 2379 } 2380 2381 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 2382 u16 lag_id) 2383 { 2384 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2385 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2386 2387 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 2388 mlxsw_sp_port->local_port); 2389 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2390 } 2391 2392 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 2393 u16 lag_id) 2394 { 2395 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2396 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2397 2398 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 2399 mlxsw_sp_port->local_port); 2400 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2401 } 2402 2403 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 2404 bool lag_tx_enabled) 2405 { 2406 if (lag_tx_enabled) 2407 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 2408 mlxsw_sp_port->lag_id); 2409 else 2410 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 2411 mlxsw_sp_port->lag_id); 2412 } 2413 2414 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 2415 struct netdev_lag_lower_state_info *info) 2416 { 2417 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 2418 } 2419 2420 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 2421 struct net_device *vlan_dev) 2422 { 2423 struct mlxsw_sp_port *mlxsw_sp_vport; 2424 u16 vid = vlan_dev_vlan_id(vlan_dev); 2425 2426 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2427 if (!mlxsw_sp_vport) { 2428 WARN_ON(!mlxsw_sp_vport); 2429 return -EINVAL; 2430 } 2431 2432 mlxsw_sp_vport->dev = vlan_dev; 2433 2434 return 0; 2435 } 2436 2437 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 2438 struct net_device *vlan_dev) 2439 { 2440 struct mlxsw_sp_port *mlxsw_sp_vport; 2441 u16 vid = vlan_dev_vlan_id(vlan_dev); 2442 2443 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2444 if (!mlxsw_sp_vport) { 2445 WARN_ON(!mlxsw_sp_vport); 2446 return -EINVAL; 2447 } 2448 2449 /* When removing a VLAN device while still bridged we should first 2450 * remove it from the bridge, as we receive the bridge's notification 2451 * when the vPort is already gone. 2452 */ 2453 if (mlxsw_sp_vport->bridged) { 2454 struct net_device *br_dev; 2455 2456 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 2457 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); 2458 } 2459 2460 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 2461 2462 return 0; 2463 } 2464 2465 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 2466 unsigned long event, void *ptr) 2467 { 2468 struct netdev_notifier_changeupper_info *info; 2469 struct mlxsw_sp_port *mlxsw_sp_port; 2470 struct net_device *upper_dev; 2471 struct mlxsw_sp *mlxsw_sp; 2472 int err; 2473 2474 mlxsw_sp_port = netdev_priv(dev); 2475 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2476 info = ptr; 2477 2478 switch (event) { 2479 case NETDEV_PRECHANGEUPPER: 2480 upper_dev = info->upper_dev; 2481 if (!info->master || !info->linking) 2482 break; 2483 /* HW limitation forbids to put ports to multiple bridges. */ 2484 if (netif_is_bridge_master(upper_dev) && 2485 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 2486 return NOTIFY_BAD; 2487 if (netif_is_lag_master(upper_dev) && 2488 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 2489 info->upper_info)) 2490 return NOTIFY_BAD; 2491 break; 2492 case NETDEV_CHANGEUPPER: 2493 upper_dev = info->upper_dev; 2494 if (is_vlan_dev(upper_dev)) { 2495 if (info->linking) { 2496 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 2497 upper_dev); 2498 if (err) { 2499 netdev_err(dev, "Failed to link VLAN device\n"); 2500 return NOTIFY_BAD; 2501 } 2502 } else { 2503 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 2504 upper_dev); 2505 if (err) { 2506 netdev_err(dev, "Failed to unlink VLAN device\n"); 2507 return NOTIFY_BAD; 2508 } 2509 } 2510 } else if (netif_is_bridge_master(upper_dev)) { 2511 if (info->linking) { 2512 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); 2513 if (err) { 2514 netdev_err(dev, "Failed to join bridge\n"); 2515 return NOTIFY_BAD; 2516 } 2517 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); 2518 } else { 2519 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 2520 true); 2521 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); 2522 if (err) { 2523 netdev_err(dev, "Failed to leave bridge\n"); 2524 return NOTIFY_BAD; 2525 } 2526 } 2527 } else if (netif_is_lag_master(upper_dev)) { 2528 if (info->linking) { 2529 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 2530 upper_dev); 2531 if (err) { 2532 netdev_err(dev, "Failed to join link aggregation\n"); 2533 return NOTIFY_BAD; 2534 } 2535 } else { 2536 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, 2537 upper_dev); 2538 if (err) { 2539 netdev_err(dev, "Failed to leave link aggregation\n"); 2540 return NOTIFY_BAD; 2541 } 2542 } 2543 } 2544 break; 2545 } 2546 2547 return NOTIFY_DONE; 2548 } 2549 2550 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 2551 unsigned long event, void *ptr) 2552 { 2553 struct netdev_notifier_changelowerstate_info *info; 2554 struct mlxsw_sp_port *mlxsw_sp_port; 2555 int err; 2556 2557 mlxsw_sp_port = netdev_priv(dev); 2558 info = ptr; 2559 2560 switch (event) { 2561 case NETDEV_CHANGELOWERSTATE: 2562 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 2563 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 2564 info->lower_state_info); 2565 if (err) 2566 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 2567 } 2568 break; 2569 } 2570 2571 return NOTIFY_DONE; 2572 } 2573 2574 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 2575 unsigned long event, void *ptr) 2576 { 2577 switch (event) { 2578 case NETDEV_PRECHANGEUPPER: 2579 case NETDEV_CHANGEUPPER: 2580 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 2581 case NETDEV_CHANGELOWERSTATE: 2582 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 2583 } 2584 2585 return NOTIFY_DONE; 2586 } 2587 2588 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 2589 unsigned long event, void *ptr) 2590 { 2591 struct net_device *dev; 2592 struct list_head *iter; 2593 int ret; 2594 2595 netdev_for_each_lower_dev(lag_dev, dev, iter) { 2596 if (mlxsw_sp_port_dev_check(dev)) { 2597 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 2598 if (ret == NOTIFY_BAD) 2599 return ret; 2600 } 2601 } 2602 2603 return NOTIFY_DONE; 2604 } 2605 2606 static struct mlxsw_sp_vfid * 2607 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, 2608 const struct net_device *br_dev) 2609 { 2610 struct mlxsw_sp_vfid *vfid; 2611 2612 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { 2613 if (vfid->br_dev == br_dev) 2614 return vfid; 2615 } 2616 2617 return NULL; 2618 } 2619 2620 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) 2621 { 2622 return vfid - MLXSW_SP_VFID_PORT_MAX; 2623 } 2624 2625 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) 2626 { 2627 return MLXSW_SP_VFID_PORT_MAX + br_vfid; 2628 } 2629 2630 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) 2631 { 2632 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, 2633 MLXSW_SP_VFID_BR_MAX); 2634 } 2635 2636 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, 2637 struct net_device *br_dev) 2638 { 2639 struct device *dev = mlxsw_sp->bus_info->dev; 2640 struct mlxsw_sp_vfid *vfid; 2641 u16 n_vfid; 2642 int err; 2643 2644 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); 2645 if (n_vfid == MLXSW_SP_VFID_MAX) { 2646 dev_err(dev, "No available vFIDs\n"); 2647 return ERR_PTR(-ERANGE); 2648 } 2649 2650 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 2651 if (err) { 2652 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 2653 return ERR_PTR(err); 2654 } 2655 2656 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 2657 if (!vfid) 2658 goto err_allocate_vfid; 2659 2660 vfid->vfid = n_vfid; 2661 vfid->br_dev = br_dev; 2662 2663 list_add(&vfid->list, &mlxsw_sp->br_vfids.list); 2664 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); 2665 2666 return vfid; 2667 2668 err_allocate_vfid: 2669 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 2670 return ERR_PTR(-ENOMEM); 2671 } 2672 2673 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 2674 struct mlxsw_sp_vfid *vfid) 2675 { 2676 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); 2677 2678 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); 2679 list_del(&vfid->list); 2680 2681 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 2682 2683 kfree(vfid); 2684 } 2685 2686 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 2687 struct net_device *br_dev, 2688 bool flush_fdb) 2689 { 2690 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 2691 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 2692 struct net_device *dev = mlxsw_sp_vport->dev; 2693 struct mlxsw_sp_vfid *vfid, *new_vfid; 2694 int err; 2695 2696 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 2697 if (!vfid) { 2698 WARN_ON(!vfid); 2699 return -EINVAL; 2700 } 2701 2702 /* We need a vFID to go back to after leaving the bridge's vFID. */ 2703 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 2704 if (!new_vfid) { 2705 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); 2706 if (IS_ERR(new_vfid)) { 2707 netdev_err(dev, "Failed to create vFID for VID=%d\n", 2708 vid); 2709 return PTR_ERR(new_vfid); 2710 } 2711 } 2712 2713 /* Invalidate existing {Port, VID} to vFID mapping and create a new 2714 * one for the new vFID. 2715 */ 2716 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2717 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2718 false, 2719 mlxsw_sp_vfid_to_fid(vfid->vfid), 2720 vid); 2721 if (err) { 2722 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", 2723 vfid->vfid); 2724 goto err_port_vid_to_fid_invalidate; 2725 } 2726 2727 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2728 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2729 true, 2730 mlxsw_sp_vfid_to_fid(new_vfid->vfid), 2731 vid); 2732 if (err) { 2733 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", 2734 new_vfid->vfid); 2735 goto err_port_vid_to_fid_validate; 2736 } 2737 2738 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 2739 if (err) { 2740 netdev_err(dev, "Failed to disable learning\n"); 2741 goto err_port_vid_learning_set; 2742 } 2743 2744 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, 2745 false); 2746 if (err) { 2747 netdev_err(dev, "Failed clear to clear flooding\n"); 2748 goto err_vport_flood_set; 2749 } 2750 2751 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 2752 MLXSW_REG_SPMS_STATE_FORWARDING); 2753 if (err) { 2754 netdev_err(dev, "Failed to set STP state\n"); 2755 goto err_port_stp_state_set; 2756 } 2757 2758 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) 2759 netdev_err(dev, "Failed to flush FDB\n"); 2760 2761 /* Switch between the vFIDs and destroy the old one if needed. */ 2762 new_vfid->nr_vports++; 2763 mlxsw_sp_vport->vport.vfid = new_vfid; 2764 vfid->nr_vports--; 2765 if (!vfid->nr_vports) 2766 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); 2767 2768 mlxsw_sp_vport->learning = 0; 2769 mlxsw_sp_vport->learning_sync = 0; 2770 mlxsw_sp_vport->uc_flood = 0; 2771 mlxsw_sp_vport->bridged = 0; 2772 2773 return 0; 2774 2775 err_port_stp_state_set: 2776 err_vport_flood_set: 2777 err_port_vid_learning_set: 2778 err_port_vid_to_fid_validate: 2779 err_port_vid_to_fid_invalidate: 2780 /* Rollback vFID only if new. */ 2781 if (!new_vfid->nr_vports) 2782 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); 2783 return err; 2784 } 2785 2786 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 2787 struct net_device *br_dev) 2788 { 2789 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; 2790 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 2791 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 2792 struct net_device *dev = mlxsw_sp_vport->dev; 2793 struct mlxsw_sp_vfid *vfid; 2794 int err; 2795 2796 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 2797 if (!vfid) { 2798 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); 2799 if (IS_ERR(vfid)) { 2800 netdev_err(dev, "Failed to create bridge vFID\n"); 2801 return PTR_ERR(vfid); 2802 } 2803 } 2804 2805 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); 2806 if (err) { 2807 netdev_err(dev, "Failed to setup flooding for vFID=%d\n", 2808 vfid->vfid); 2809 goto err_port_flood_set; 2810 } 2811 2812 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 2813 if (err) { 2814 netdev_err(dev, "Failed to enable learning\n"); 2815 goto err_port_vid_learning_set; 2816 } 2817 2818 /* We need to invalidate existing {Port, VID} to vFID mapping and 2819 * create a new one for the bridge's vFID. 2820 */ 2821 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2822 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2823 false, 2824 mlxsw_sp_vfid_to_fid(old_vfid->vfid), 2825 vid); 2826 if (err) { 2827 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", 2828 old_vfid->vfid); 2829 goto err_port_vid_to_fid_invalidate; 2830 } 2831 2832 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2833 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 2834 true, 2835 mlxsw_sp_vfid_to_fid(vfid->vfid), 2836 vid); 2837 if (err) { 2838 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", 2839 vfid->vfid); 2840 goto err_port_vid_to_fid_validate; 2841 } 2842 2843 /* Switch between the vFIDs and destroy the old one if needed. */ 2844 vfid->nr_vports++; 2845 mlxsw_sp_vport->vport.vfid = vfid; 2846 old_vfid->nr_vports--; 2847 if (!old_vfid->nr_vports) 2848 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); 2849 2850 mlxsw_sp_vport->learning = 1; 2851 mlxsw_sp_vport->learning_sync = 1; 2852 mlxsw_sp_vport->uc_flood = 1; 2853 mlxsw_sp_vport->bridged = 1; 2854 2855 return 0; 2856 2857 err_port_vid_to_fid_validate: 2858 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 2859 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 2860 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); 2861 err_port_vid_to_fid_invalidate: 2862 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 2863 err_port_vid_learning_set: 2864 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); 2865 err_port_flood_set: 2866 if (!vfid->nr_vports) 2867 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); 2868 return err; 2869 } 2870 2871 static bool 2872 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 2873 const struct net_device *br_dev) 2874 { 2875 struct mlxsw_sp_port *mlxsw_sp_vport; 2876 2877 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 2878 vport.list) { 2879 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) 2880 return false; 2881 } 2882 2883 return true; 2884 } 2885 2886 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 2887 unsigned long event, void *ptr, 2888 u16 vid) 2889 { 2890 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 2891 struct netdev_notifier_changeupper_info *info = ptr; 2892 struct mlxsw_sp_port *mlxsw_sp_vport; 2893 struct net_device *upper_dev; 2894 int err; 2895 2896 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2897 2898 switch (event) { 2899 case NETDEV_PRECHANGEUPPER: 2900 upper_dev = info->upper_dev; 2901 if (!info->master || !info->linking) 2902 break; 2903 if (!netif_is_bridge_master(upper_dev)) 2904 return NOTIFY_BAD; 2905 /* We can't have multiple VLAN interfaces configured on 2906 * the same port and being members in the same bridge. 2907 */ 2908 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 2909 upper_dev)) 2910 return NOTIFY_BAD; 2911 break; 2912 case NETDEV_CHANGEUPPER: 2913 upper_dev = info->upper_dev; 2914 if (!info->master) 2915 break; 2916 if (info->linking) { 2917 if (!mlxsw_sp_vport) { 2918 WARN_ON(!mlxsw_sp_vport); 2919 return NOTIFY_BAD; 2920 } 2921 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 2922 upper_dev); 2923 if (err) { 2924 netdev_err(dev, "Failed to join bridge\n"); 2925 return NOTIFY_BAD; 2926 } 2927 } else { 2928 /* We ignore bridge's unlinking notifications if vPort 2929 * is gone, since we already left the bridge when the 2930 * VLAN device was unlinked from the real device. 2931 */ 2932 if (!mlxsw_sp_vport) 2933 return NOTIFY_DONE; 2934 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, 2935 upper_dev, true); 2936 if (err) { 2937 netdev_err(dev, "Failed to leave bridge\n"); 2938 return NOTIFY_BAD; 2939 } 2940 } 2941 } 2942 2943 return NOTIFY_DONE; 2944 } 2945 2946 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 2947 unsigned long event, void *ptr, 2948 u16 vid) 2949 { 2950 struct net_device *dev; 2951 struct list_head *iter; 2952 int ret; 2953 2954 netdev_for_each_lower_dev(lag_dev, dev, iter) { 2955 if (mlxsw_sp_port_dev_check(dev)) { 2956 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 2957 vid); 2958 if (ret == NOTIFY_BAD) 2959 return ret; 2960 } 2961 } 2962 2963 return NOTIFY_DONE; 2964 } 2965 2966 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 2967 unsigned long event, void *ptr) 2968 { 2969 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 2970 u16 vid = vlan_dev_vlan_id(vlan_dev); 2971 2972 if (mlxsw_sp_port_dev_check(real_dev)) 2973 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 2974 vid); 2975 else if (netif_is_lag_master(real_dev)) 2976 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 2977 vid); 2978 2979 return NOTIFY_DONE; 2980 } 2981 2982 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 2983 unsigned long event, void *ptr) 2984 { 2985 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2986 2987 if (mlxsw_sp_port_dev_check(dev)) 2988 return mlxsw_sp_netdevice_port_event(dev, event, ptr); 2989 2990 if (netif_is_lag_master(dev)) 2991 return mlxsw_sp_netdevice_lag_event(dev, event, ptr); 2992 2993 if (is_vlan_dev(dev)) 2994 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 2995 2996 return NOTIFY_DONE; 2997 } 2998 2999 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 3000 .notifier_call = mlxsw_sp_netdevice_event, 3001 }; 3002 3003 static int __init mlxsw_sp_module_init(void) 3004 { 3005 int err; 3006 3007 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3008 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 3009 if (err) 3010 goto err_core_driver_register; 3011 return 0; 3012 3013 err_core_driver_register: 3014 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3015 return err; 3016 } 3017 3018 static void __exit mlxsw_sp_module_exit(void) 3019 { 3020 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 3021 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3022 } 3023 3024 module_init(mlxsw_sp_module_init); 3025 module_exit(mlxsw_sp_module_exit); 3026 3027 MODULE_LICENSE("Dual BSD/GPL"); 3028 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 3029 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 3030 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); 3031