1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/netdevice.h> 41 #include <linux/etherdevice.h> 42 #include <linux/ethtool.h> 43 #include <linux/slab.h> 44 #include <linux/device.h> 45 #include <linux/skbuff.h> 46 #include <linux/if_vlan.h> 47 #include <linux/if_bridge.h> 48 #include <linux/workqueue.h> 49 #include <linux/jiffies.h> 50 #include <linux/bitops.h> 51 #include <net/switchdev.h> 52 #include <generated/utsrelease.h> 53 54 #include "spectrum.h" 55 #include "core.h" 56 #include "reg.h" 57 #include "port.h" 58 #include "trap.h" 59 #include "txheader.h" 60 61 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 62 static const char mlxsw_sp_driver_version[] = "1.0"; 63 64 /* tx_hdr_version 65 * Tx header version. 66 * Must be set to 1. 67 */ 68 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 69 70 /* tx_hdr_ctl 71 * Packet control type. 72 * 0 - Ethernet control (e.g. EMADs, LACP) 73 * 1 - Ethernet data 74 */ 75 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 76 77 /* tx_hdr_proto 78 * Packet protocol type. Must be set to 1 (Ethernet). 79 */ 80 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 81 82 /* tx_hdr_rx_is_router 83 * Packet is sent from the router. Valid for data packets only. 84 */ 85 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 86 87 /* tx_hdr_fid_valid 88 * Indicates if the 'fid' field is valid and should be used for 89 * forwarding lookup. Valid for data packets only. 90 */ 91 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 92 93 /* tx_hdr_swid 94 * Switch partition ID. Must be set to 0. 95 */ 96 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 97 98 /* tx_hdr_control_tclass 99 * Indicates if the packet should use the control TClass and not one 100 * of the data TClasses. 101 */ 102 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 103 104 /* tx_hdr_etclass 105 * Egress TClass to be used on the egress device on the egress port. 106 */ 107 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 108 109 /* tx_hdr_port_mid 110 * Destination local port for unicast packets. 111 * Destination multicast ID for multicast packets. 112 * 113 * Control packets are directed to a specific egress port, while data 114 * packets are transmitted through the CPU port (0) into the switch partition, 115 * where forwarding rules are applied. 116 */ 117 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 118 119 /* tx_hdr_fid 120 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 121 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 122 * Valid for data packets only. 123 */ 124 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 125 126 /* tx_hdr_type 127 * 0 - Data packets 128 * 6 - Control packets 129 */ 130 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 131 132 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 133 const struct mlxsw_tx_info *tx_info) 134 { 135 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 136 137 memset(txhdr, 0, MLXSW_TXHDR_LEN); 138 139 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 140 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 141 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 142 mlxsw_tx_hdr_swid_set(txhdr, 0); 143 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 144 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 145 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 146 } 147 148 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 149 { 150 char spad_pl[MLXSW_REG_SPAD_LEN]; 151 int err; 152 153 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 154 if (err) 155 return err; 156 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 157 return 0; 158 } 159 160 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 161 bool is_up) 162 { 163 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 164 char paos_pl[MLXSW_REG_PAOS_LEN]; 165 166 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 167 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 168 MLXSW_PORT_ADMIN_STATUS_DOWN); 169 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 170 } 171 172 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, 173 bool *p_is_up) 174 { 175 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 176 char paos_pl[MLXSW_REG_PAOS_LEN]; 177 u8 oper_status; 178 int err; 179 180 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0); 181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 182 if (err) 183 return err; 184 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); 185 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; 186 return 0; 187 } 188 189 static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) 190 { 191 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 192 int err; 193 194 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, 195 MLXSW_SP_VFID_BASE + vfid, 0); 196 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 197 198 if (err) 199 return err; 200 201 set_bit(vfid, mlxsw_sp->active_vfids); 202 return 0; 203 } 204 205 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) 206 { 207 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 208 209 clear_bit(vfid, mlxsw_sp->active_vfids); 210 211 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, 212 MLXSW_SP_VFID_BASE + vfid, 0); 213 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 214 } 215 216 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 217 unsigned char *addr) 218 { 219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 220 char ppad_pl[MLXSW_REG_PPAD_LEN]; 221 222 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 223 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 225 } 226 227 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 228 { 229 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 230 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 231 232 ether_addr_copy(addr, mlxsw_sp->base_mac); 233 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 234 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 235 } 236 237 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 238 u16 vid, enum mlxsw_reg_spms_state state) 239 { 240 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 241 char *spms_pl; 242 int err; 243 244 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 245 if (!spms_pl) 246 return -ENOMEM; 247 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 248 mlxsw_reg_spms_vid_pack(spms_pl, vid, state); 249 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 250 kfree(spms_pl); 251 return err; 252 } 253 254 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 255 { 256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 257 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 258 int max_mtu; 259 int err; 260 261 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 262 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 263 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 264 if (err) 265 return err; 266 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 267 268 if (mtu > max_mtu) 269 return -EINVAL; 270 271 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 273 } 274 275 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 276 { 277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 278 char pspa_pl[MLXSW_REG_PSPA_LEN]; 279 280 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 281 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 282 } 283 284 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 285 bool enable) 286 { 287 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 288 char svpe_pl[MLXSW_REG_SVPE_LEN]; 289 290 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 292 } 293 294 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 295 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 296 u16 vid) 297 { 298 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 299 char svfa_pl[MLXSW_REG_SVFA_LEN]; 300 301 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 302 fid, vid); 303 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 304 } 305 306 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 307 u16 vid, bool learn_enable) 308 { 309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 310 char *spvmlr_pl; 311 int err; 312 313 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 314 if (!spvmlr_pl) 315 return -ENOMEM; 316 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 317 learn_enable); 318 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 319 kfree(spvmlr_pl); 320 return err; 321 } 322 323 static int 324 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 325 { 326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 327 char sspr_pl[MLXSW_REG_SSPR_LEN]; 328 329 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 330 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 331 } 332 333 static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port, 334 bool *p_usable) 335 { 336 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 337 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 338 int err; 339 340 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); 341 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 342 if (err) 343 return err; 344 *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; 345 return 0; 346 } 347 348 static int mlxsw_sp_port_open(struct net_device *dev) 349 { 350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 351 int err; 352 353 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 354 if (err) 355 return err; 356 netif_start_queue(dev); 357 return 0; 358 } 359 360 static int mlxsw_sp_port_stop(struct net_device *dev) 361 { 362 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 363 364 netif_stop_queue(dev); 365 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 366 } 367 368 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 369 struct net_device *dev) 370 { 371 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 372 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 373 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 374 const struct mlxsw_tx_info tx_info = { 375 .local_port = mlxsw_sp_port->local_port, 376 .is_emad = false, 377 }; 378 u64 len; 379 int err; 380 381 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) 382 return NETDEV_TX_BUSY; 383 384 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 385 struct sk_buff *skb_orig = skb; 386 387 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 388 if (!skb) { 389 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 390 dev_kfree_skb_any(skb_orig); 391 return NETDEV_TX_OK; 392 } 393 } 394 395 if (eth_skb_pad(skb)) { 396 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 397 return NETDEV_TX_OK; 398 } 399 400 mlxsw_sp_txhdr_construct(skb, &tx_info); 401 len = skb->len; 402 /* Due to a race we might fail here because of a full queue. In that 403 * unlikely case we simply drop the packet. 404 */ 405 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); 406 407 if (!err) { 408 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 409 u64_stats_update_begin(&pcpu_stats->syncp); 410 pcpu_stats->tx_packets++; 411 pcpu_stats->tx_bytes += len; 412 u64_stats_update_end(&pcpu_stats->syncp); 413 } else { 414 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 415 dev_kfree_skb_any(skb); 416 } 417 return NETDEV_TX_OK; 418 } 419 420 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 421 { 422 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 423 struct sockaddr *addr = p; 424 int err; 425 426 if (!is_valid_ether_addr(addr->sa_data)) 427 return -EADDRNOTAVAIL; 428 429 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 430 if (err) 431 return err; 432 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 433 return 0; 434 } 435 436 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 437 { 438 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 439 int err; 440 441 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 442 if (err) 443 return err; 444 dev->mtu = mtu; 445 return 0; 446 } 447 448 static struct rtnl_link_stats64 * 449 mlxsw_sp_port_get_stats64(struct net_device *dev, 450 struct rtnl_link_stats64 *stats) 451 { 452 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 453 struct mlxsw_sp_port_pcpu_stats *p; 454 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 455 u32 tx_dropped = 0; 456 unsigned int start; 457 int i; 458 459 for_each_possible_cpu(i) { 460 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 461 do { 462 start = u64_stats_fetch_begin_irq(&p->syncp); 463 rx_packets = p->rx_packets; 464 rx_bytes = p->rx_bytes; 465 tx_packets = p->tx_packets; 466 tx_bytes = p->tx_bytes; 467 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 468 469 stats->rx_packets += rx_packets; 470 stats->rx_bytes += rx_bytes; 471 stats->tx_packets += tx_packets; 472 stats->tx_bytes += tx_bytes; 473 /* tx_dropped is u32, updated without syncp protection. */ 474 tx_dropped += p->tx_dropped; 475 } 476 stats->tx_dropped = tx_dropped; 477 return stats; 478 } 479 480 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 481 u16 vid_end, bool is_member, bool untagged) 482 { 483 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 484 char *spvm_pl; 485 int err; 486 487 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 488 if (!spvm_pl) 489 return -ENOMEM; 490 491 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 492 vid_end, is_member, untagged); 493 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 494 kfree(spvm_pl); 495 return err; 496 } 497 498 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 499 { 500 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 501 u16 vid, last_visited_vid; 502 int err; 503 504 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 505 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 506 vid); 507 if (err) { 508 last_visited_vid = vid; 509 goto err_port_vid_to_fid_set; 510 } 511 } 512 513 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 514 if (err) { 515 last_visited_vid = VLAN_N_VID; 516 goto err_port_vid_to_fid_set; 517 } 518 519 return 0; 520 521 err_port_vid_to_fid_set: 522 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 523 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 524 vid); 525 return err; 526 } 527 528 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 529 { 530 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 531 u16 vid; 532 int err; 533 534 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 535 if (err) 536 return err; 537 538 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 539 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 540 vid, vid); 541 if (err) 542 return err; 543 } 544 545 return 0; 546 } 547 548 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 549 u16 vid) 550 { 551 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 553 char *sftr_pl; 554 int err; 555 556 /* VLAN 0 is added to HW filter when device goes up, but it is 557 * reserved in our case, so simply return. 558 */ 559 if (!vid) 560 return 0; 561 562 if (test_bit(vid, mlxsw_sp_port->active_vfids)) { 563 netdev_warn(dev, "VID=%d already configured\n", vid); 564 return 0; 565 } 566 567 if (!test_bit(vid, mlxsw_sp->active_vfids)) { 568 err = mlxsw_sp_vfid_create(mlxsw_sp, vid); 569 if (err) { 570 netdev_err(dev, "Failed to create vFID=%d\n", 571 MLXSW_SP_VFID_BASE + vid); 572 return err; 573 } 574 575 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 576 if (!sftr_pl) { 577 err = -ENOMEM; 578 goto err_flood_table_alloc; 579 } 580 mlxsw_reg_sftr_pack(sftr_pl, 0, vid, 581 MLXSW_REG_SFGC_TABLE_TYPE_FID, 0, 582 MLXSW_PORT_CPU_PORT, true); 583 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 584 kfree(sftr_pl); 585 if (err) { 586 netdev_err(dev, "Failed to configure flood table\n"); 587 goto err_flood_table_config; 588 } 589 } 590 591 /* In case we fail in the following steps, we intentionally do not 592 * destroy the associated vFID. 593 */ 594 595 /* When adding the first VLAN interface on a bridged port we need to 596 * transition all the active 802.1Q bridge VLANs to use explicit 597 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 598 */ 599 if (!mlxsw_sp_port->nr_vfids) { 600 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 601 if (err) { 602 netdev_err(dev, "Failed to set to Virtual mode\n"); 603 return err; 604 } 605 } 606 607 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, 608 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 609 true, MLXSW_SP_VFID_BASE + vid, vid); 610 if (err) { 611 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", 612 vid, MLXSW_SP_VFID_BASE + vid); 613 goto err_port_vid_to_fid_set; 614 } 615 616 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 617 if (err) { 618 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); 619 goto err_port_vid_learning_set; 620 } 621 622 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false); 623 if (err) { 624 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 625 vid); 626 goto err_port_add_vid; 627 } 628 629 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, 630 MLXSW_REG_SPMS_STATE_FORWARDING); 631 if (err) { 632 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 633 goto err_port_stp_state_set; 634 } 635 636 mlxsw_sp_port->nr_vfids++; 637 set_bit(vid, mlxsw_sp_port->active_vfids); 638 639 return 0; 640 641 err_flood_table_config: 642 err_flood_table_alloc: 643 mlxsw_sp_vfid_destroy(mlxsw_sp, vid); 644 return err; 645 646 err_port_stp_state_set: 647 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 648 err_port_add_vid: 649 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 650 err_port_vid_learning_set: 651 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, 652 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 653 MLXSW_SP_VFID_BASE + vid, vid); 654 err_port_vid_to_fid_set: 655 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 656 return err; 657 } 658 659 int mlxsw_sp_port_kill_vid(struct net_device *dev, 660 __be16 __always_unused proto, u16 vid) 661 { 662 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 663 int err; 664 665 /* VLAN 0 is removed from HW filter when device goes down, but 666 * it is reserved in our case, so simply return. 667 */ 668 if (!vid) 669 return 0; 670 671 if (!test_bit(vid, mlxsw_sp_port->active_vfids)) { 672 netdev_warn(dev, "VID=%d does not exist\n", vid); 673 return 0; 674 } 675 676 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, 677 MLXSW_REG_SPMS_STATE_DISCARDING); 678 if (err) { 679 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 680 return err; 681 } 682 683 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); 684 if (err) { 685 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 686 vid); 687 return err; 688 } 689 690 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 691 if (err) { 692 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); 693 return err; 694 } 695 696 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, 697 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 698 false, MLXSW_SP_VFID_BASE + vid, 699 vid); 700 if (err) { 701 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", 702 vid, MLXSW_SP_VFID_BASE + vid); 703 return err; 704 } 705 706 /* When removing the last VLAN interface on a bridged port we need to 707 * transition all active 802.1Q bridge VLANs to use VID to FID 708 * mappings and set port's mode to VLAN mode. 709 */ 710 if (mlxsw_sp_port->nr_vfids == 1) { 711 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 712 if (err) { 713 netdev_err(dev, "Failed to set to VLAN mode\n"); 714 return err; 715 } 716 } 717 718 mlxsw_sp_port->nr_vfids--; 719 clear_bit(vid, mlxsw_sp_port->active_vfids); 720 721 return 0; 722 } 723 724 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 725 .ndo_open = mlxsw_sp_port_open, 726 .ndo_stop = mlxsw_sp_port_stop, 727 .ndo_start_xmit = mlxsw_sp_port_xmit, 728 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 729 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 730 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 731 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 732 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 733 .ndo_fdb_add = switchdev_port_fdb_add, 734 .ndo_fdb_del = switchdev_port_fdb_del, 735 .ndo_fdb_dump = switchdev_port_fdb_dump, 736 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 737 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 738 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 739 }; 740 741 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 742 struct ethtool_drvinfo *drvinfo) 743 { 744 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 746 747 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 748 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 749 sizeof(drvinfo->version)); 750 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 751 "%d.%d.%d", 752 mlxsw_sp->bus_info->fw_rev.major, 753 mlxsw_sp->bus_info->fw_rev.minor, 754 mlxsw_sp->bus_info->fw_rev.subminor); 755 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 756 sizeof(drvinfo->bus_info)); 757 } 758 759 struct mlxsw_sp_port_hw_stats { 760 char str[ETH_GSTRING_LEN]; 761 u64 (*getter)(char *payload); 762 }; 763 764 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 765 { 766 .str = "a_frames_transmitted_ok", 767 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 768 }, 769 { 770 .str = "a_frames_received_ok", 771 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 772 }, 773 { 774 .str = "a_frame_check_sequence_errors", 775 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 776 }, 777 { 778 .str = "a_alignment_errors", 779 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 780 }, 781 { 782 .str = "a_octets_transmitted_ok", 783 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 784 }, 785 { 786 .str = "a_octets_received_ok", 787 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 788 }, 789 { 790 .str = "a_multicast_frames_xmitted_ok", 791 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 792 }, 793 { 794 .str = "a_broadcast_frames_xmitted_ok", 795 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 796 }, 797 { 798 .str = "a_multicast_frames_received_ok", 799 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 800 }, 801 { 802 .str = "a_broadcast_frames_received_ok", 803 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 804 }, 805 { 806 .str = "a_in_range_length_errors", 807 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 808 }, 809 { 810 .str = "a_out_of_range_length_field", 811 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 812 }, 813 { 814 .str = "a_frame_too_long_errors", 815 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 816 }, 817 { 818 .str = "a_symbol_error_during_carrier", 819 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 820 }, 821 { 822 .str = "a_mac_control_frames_transmitted", 823 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 824 }, 825 { 826 .str = "a_mac_control_frames_received", 827 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 828 }, 829 { 830 .str = "a_unsupported_opcodes_received", 831 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 832 }, 833 { 834 .str = "a_pause_mac_ctrl_frames_received", 835 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 836 }, 837 { 838 .str = "a_pause_mac_ctrl_frames_xmitted", 839 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 840 }, 841 }; 842 843 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 844 845 static void mlxsw_sp_port_get_strings(struct net_device *dev, 846 u32 stringset, u8 *data) 847 { 848 u8 *p = data; 849 int i; 850 851 switch (stringset) { 852 case ETH_SS_STATS: 853 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 854 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 855 ETH_GSTRING_LEN); 856 p += ETH_GSTRING_LEN; 857 } 858 break; 859 } 860 } 861 862 static void mlxsw_sp_port_get_stats(struct net_device *dev, 863 struct ethtool_stats *stats, u64 *data) 864 { 865 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 866 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 867 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 868 int i; 869 int err; 870 871 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); 872 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 873 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) 874 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; 875 } 876 877 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 878 { 879 switch (sset) { 880 case ETH_SS_STATS: 881 return MLXSW_SP_PORT_HW_STATS_LEN; 882 default: 883 return -EOPNOTSUPP; 884 } 885 } 886 887 struct mlxsw_sp_port_link_mode { 888 u32 mask; 889 u32 supported; 890 u32 advertised; 891 u32 speed; 892 }; 893 894 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 895 { 896 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 897 .supported = SUPPORTED_100baseT_Full, 898 .advertised = ADVERTISED_100baseT_Full, 899 .speed = 100, 900 }, 901 { 902 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, 903 .speed = 100, 904 }, 905 { 906 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 907 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 908 .supported = SUPPORTED_1000baseKX_Full, 909 .advertised = ADVERTISED_1000baseKX_Full, 910 .speed = 1000, 911 }, 912 { 913 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 914 .supported = SUPPORTED_10000baseT_Full, 915 .advertised = ADVERTISED_10000baseT_Full, 916 .speed = 10000, 917 }, 918 { 919 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 920 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 921 .supported = SUPPORTED_10000baseKX4_Full, 922 .advertised = ADVERTISED_10000baseKX4_Full, 923 .speed = 10000, 924 }, 925 { 926 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 927 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 928 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 929 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 930 .supported = SUPPORTED_10000baseKR_Full, 931 .advertised = ADVERTISED_10000baseKR_Full, 932 .speed = 10000, 933 }, 934 { 935 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 936 .supported = SUPPORTED_20000baseKR2_Full, 937 .advertised = ADVERTISED_20000baseKR2_Full, 938 .speed = 20000, 939 }, 940 { 941 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 942 .supported = SUPPORTED_40000baseCR4_Full, 943 .advertised = ADVERTISED_40000baseCR4_Full, 944 .speed = 40000, 945 }, 946 { 947 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 948 .supported = SUPPORTED_40000baseKR4_Full, 949 .advertised = ADVERTISED_40000baseKR4_Full, 950 .speed = 40000, 951 }, 952 { 953 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 954 .supported = SUPPORTED_40000baseSR4_Full, 955 .advertised = ADVERTISED_40000baseSR4_Full, 956 .speed = 40000, 957 }, 958 { 959 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 960 .supported = SUPPORTED_40000baseLR4_Full, 961 .advertised = ADVERTISED_40000baseLR4_Full, 962 .speed = 40000, 963 }, 964 { 965 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 966 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 967 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 968 .speed = 25000, 969 }, 970 { 971 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 972 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 973 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 974 .speed = 50000, 975 }, 976 { 977 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 978 .supported = SUPPORTED_56000baseKR4_Full, 979 .advertised = ADVERTISED_56000baseKR4_Full, 980 .speed = 56000, 981 }, 982 { 983 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 984 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 985 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 986 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 987 .speed = 100000, 988 }, 989 }; 990 991 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 992 993 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) 994 { 995 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 996 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 997 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 998 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 999 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1000 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1001 return SUPPORTED_FIBRE; 1002 1003 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1004 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1005 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1006 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1007 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1008 return SUPPORTED_Backplane; 1009 return 0; 1010 } 1011 1012 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) 1013 { 1014 u32 modes = 0; 1015 int i; 1016 1017 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1018 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1019 modes |= mlxsw_sp_port_link_mode[i].supported; 1020 } 1021 return modes; 1022 } 1023 1024 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) 1025 { 1026 u32 modes = 0; 1027 int i; 1028 1029 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1030 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1031 modes |= mlxsw_sp_port_link_mode[i].advertised; 1032 } 1033 return modes; 1034 } 1035 1036 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1037 struct ethtool_cmd *cmd) 1038 { 1039 u32 speed = SPEED_UNKNOWN; 1040 u8 duplex = DUPLEX_UNKNOWN; 1041 int i; 1042 1043 if (!carrier_ok) 1044 goto out; 1045 1046 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1047 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1048 speed = mlxsw_sp_port_link_mode[i].speed; 1049 duplex = DUPLEX_FULL; 1050 break; 1051 } 1052 } 1053 out: 1054 ethtool_cmd_speed_set(cmd, speed); 1055 cmd->duplex = duplex; 1056 } 1057 1058 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1059 { 1060 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1061 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1062 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1063 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1064 return PORT_FIBRE; 1065 1066 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1067 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1068 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1069 return PORT_DA; 1070 1071 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1072 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1073 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1074 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1075 return PORT_NONE; 1076 1077 return PORT_OTHER; 1078 } 1079 1080 static int mlxsw_sp_port_get_settings(struct net_device *dev, 1081 struct ethtool_cmd *cmd) 1082 { 1083 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1084 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1085 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1086 u32 eth_proto_cap; 1087 u32 eth_proto_admin; 1088 u32 eth_proto_oper; 1089 int err; 1090 1091 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1092 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1093 if (err) { 1094 netdev_err(dev, "Failed to get proto"); 1095 return err; 1096 } 1097 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, 1098 ð_proto_admin, ð_proto_oper); 1099 1100 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1101 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1102 SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1103 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1104 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1105 eth_proto_oper, cmd); 1106 1107 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 1108 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); 1109 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); 1110 1111 cmd->transceiver = XCVR_INTERNAL; 1112 return 0; 1113 } 1114 1115 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) 1116 { 1117 u32 ptys_proto = 0; 1118 int i; 1119 1120 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1121 if (advertising & mlxsw_sp_port_link_mode[i].advertised) 1122 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1123 } 1124 return ptys_proto; 1125 } 1126 1127 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1128 { 1129 u32 ptys_proto = 0; 1130 int i; 1131 1132 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1133 if (speed == mlxsw_sp_port_link_mode[i].speed) 1134 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1135 } 1136 return ptys_proto; 1137 } 1138 1139 static int mlxsw_sp_port_set_settings(struct net_device *dev, 1140 struct ethtool_cmd *cmd) 1141 { 1142 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1143 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1144 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1145 u32 speed; 1146 u32 eth_proto_new; 1147 u32 eth_proto_cap; 1148 u32 eth_proto_admin; 1149 bool is_up; 1150 int err; 1151 1152 speed = ethtool_cmd_speed(cmd); 1153 1154 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? 1155 mlxsw_sp_to_ptys_advert_link(cmd->advertising) : 1156 mlxsw_sp_to_ptys_speed(speed); 1157 1158 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1159 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1160 if (err) { 1161 netdev_err(dev, "Failed to get proto"); 1162 return err; 1163 } 1164 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); 1165 1166 eth_proto_new = eth_proto_new & eth_proto_cap; 1167 if (!eth_proto_new) { 1168 netdev_err(dev, "Not supported proto admin requested"); 1169 return -EINVAL; 1170 } 1171 if (eth_proto_new == eth_proto_admin) 1172 return 0; 1173 1174 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 1175 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1176 if (err) { 1177 netdev_err(dev, "Failed to set proto admin"); 1178 return err; 1179 } 1180 1181 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); 1182 if (err) { 1183 netdev_err(dev, "Failed to get oper status"); 1184 return err; 1185 } 1186 if (!is_up) 1187 return 0; 1188 1189 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1190 if (err) { 1191 netdev_err(dev, "Failed to set admin status"); 1192 return err; 1193 } 1194 1195 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1196 if (err) { 1197 netdev_err(dev, "Failed to set admin status"); 1198 return err; 1199 } 1200 1201 return 0; 1202 } 1203 1204 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1205 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1206 .get_link = ethtool_op_get_link, 1207 .get_strings = mlxsw_sp_port_get_strings, 1208 .get_ethtool_stats = mlxsw_sp_port_get_stats, 1209 .get_sset_count = mlxsw_sp_port_get_sset_count, 1210 .get_settings = mlxsw_sp_port_get_settings, 1211 .set_settings = mlxsw_sp_port_set_settings, 1212 }; 1213 1214 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1215 { 1216 struct mlxsw_sp_port *mlxsw_sp_port; 1217 struct net_device *dev; 1218 bool usable; 1219 int err; 1220 1221 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1222 if (!dev) 1223 return -ENOMEM; 1224 mlxsw_sp_port = netdev_priv(dev); 1225 mlxsw_sp_port->dev = dev; 1226 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1227 mlxsw_sp_port->local_port = local_port; 1228 mlxsw_sp_port->learning = 1; 1229 mlxsw_sp_port->learning_sync = 1; 1230 mlxsw_sp_port->uc_flood = 1; 1231 mlxsw_sp_port->pvid = 1; 1232 1233 mlxsw_sp_port->pcpu_stats = 1234 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1235 if (!mlxsw_sp_port->pcpu_stats) { 1236 err = -ENOMEM; 1237 goto err_alloc_stats; 1238 } 1239 1240 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1241 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1242 1243 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1244 if (err) { 1245 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1246 mlxsw_sp_port->local_port); 1247 goto err_dev_addr_init; 1248 } 1249 1250 netif_carrier_off(dev); 1251 1252 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1253 NETIF_F_HW_VLAN_CTAG_FILTER; 1254 1255 /* Each packet needs to have a Tx header (metadata) on top all other 1256 * headers. 1257 */ 1258 dev->hard_header_len += MLXSW_TXHDR_LEN; 1259 1260 err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable); 1261 if (err) { 1262 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n", 1263 mlxsw_sp_port->local_port); 1264 goto err_port_module_check; 1265 } 1266 1267 if (!usable) { 1268 dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n", 1269 mlxsw_sp_port->local_port); 1270 goto port_not_usable; 1271 } 1272 1273 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1274 if (err) { 1275 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1276 mlxsw_sp_port->local_port); 1277 goto err_port_system_port_mapping_set; 1278 } 1279 1280 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1281 if (err) { 1282 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1283 mlxsw_sp_port->local_port); 1284 goto err_port_swid_set; 1285 } 1286 1287 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1288 if (err) { 1289 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1290 mlxsw_sp_port->local_port); 1291 goto err_port_mtu_set; 1292 } 1293 1294 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1295 if (err) 1296 goto err_port_admin_status_set; 1297 1298 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1299 if (err) { 1300 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1301 mlxsw_sp_port->local_port); 1302 goto err_port_buffers_init; 1303 } 1304 1305 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 1306 err = register_netdev(dev); 1307 if (err) { 1308 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1309 mlxsw_sp_port->local_port); 1310 goto err_register_netdev; 1311 } 1312 1313 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); 1314 if (err) 1315 goto err_port_vlan_init; 1316 1317 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1318 return 0; 1319 1320 err_port_vlan_init: 1321 unregister_netdev(dev); 1322 err_register_netdev: 1323 err_port_buffers_init: 1324 err_port_admin_status_set: 1325 err_port_mtu_set: 1326 err_port_swid_set: 1327 err_port_system_port_mapping_set: 1328 port_not_usable: 1329 err_port_module_check: 1330 err_dev_addr_init: 1331 free_percpu(mlxsw_sp_port->pcpu_stats); 1332 err_alloc_stats: 1333 free_netdev(dev); 1334 return err; 1335 } 1336 1337 static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp) 1338 { 1339 u16 vfid; 1340 1341 for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID) 1342 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); 1343 } 1344 1345 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1346 { 1347 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1348 1349 if (!mlxsw_sp_port) 1350 return; 1351 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); 1352 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1353 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 1354 free_percpu(mlxsw_sp_port->pcpu_stats); 1355 free_netdev(mlxsw_sp_port->dev); 1356 } 1357 1358 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1359 { 1360 int i; 1361 1362 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 1363 mlxsw_sp_port_remove(mlxsw_sp, i); 1364 kfree(mlxsw_sp->ports); 1365 } 1366 1367 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1368 { 1369 size_t alloc_size; 1370 int i; 1371 int err; 1372 1373 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 1374 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1375 if (!mlxsw_sp->ports) 1376 return -ENOMEM; 1377 1378 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 1379 err = mlxsw_sp_port_create(mlxsw_sp, i); 1380 if (err) 1381 goto err_port_create; 1382 } 1383 return 0; 1384 1385 err_port_create: 1386 for (i--; i >= 1; i--) 1387 mlxsw_sp_port_remove(mlxsw_sp, i); 1388 kfree(mlxsw_sp->ports); 1389 return err; 1390 } 1391 1392 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 1393 char *pude_pl, void *priv) 1394 { 1395 struct mlxsw_sp *mlxsw_sp = priv; 1396 struct mlxsw_sp_port *mlxsw_sp_port; 1397 enum mlxsw_reg_pude_oper_status status; 1398 u8 local_port; 1399 1400 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 1401 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1402 if (!mlxsw_sp_port) { 1403 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", 1404 local_port); 1405 return; 1406 } 1407 1408 status = mlxsw_reg_pude_oper_status_get(pude_pl); 1409 if (status == MLXSW_PORT_OPER_STATUS_UP) { 1410 netdev_info(mlxsw_sp_port->dev, "link up\n"); 1411 netif_carrier_on(mlxsw_sp_port->dev); 1412 } else { 1413 netdev_info(mlxsw_sp_port->dev, "link down\n"); 1414 netif_carrier_off(mlxsw_sp_port->dev); 1415 } 1416 } 1417 1418 static struct mlxsw_event_listener mlxsw_sp_pude_event = { 1419 .func = mlxsw_sp_pude_event_func, 1420 .trap_id = MLXSW_TRAP_ID_PUDE, 1421 }; 1422 1423 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 1424 enum mlxsw_event_trap_id trap_id) 1425 { 1426 struct mlxsw_event_listener *el; 1427 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1428 int err; 1429 1430 switch (trap_id) { 1431 case MLXSW_TRAP_ID_PUDE: 1432 el = &mlxsw_sp_pude_event; 1433 break; 1434 } 1435 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 1436 if (err) 1437 return err; 1438 1439 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 1440 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1441 if (err) 1442 goto err_event_trap_set; 1443 1444 return 0; 1445 1446 err_event_trap_set: 1447 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1448 return err; 1449 } 1450 1451 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 1452 enum mlxsw_event_trap_id trap_id) 1453 { 1454 struct mlxsw_event_listener *el; 1455 1456 switch (trap_id) { 1457 case MLXSW_TRAP_ID_PUDE: 1458 el = &mlxsw_sp_pude_event; 1459 break; 1460 } 1461 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 1462 } 1463 1464 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 1465 void *priv) 1466 { 1467 struct mlxsw_sp *mlxsw_sp = priv; 1468 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1469 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 1470 1471 if (unlikely(!mlxsw_sp_port)) { 1472 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 1473 local_port); 1474 return; 1475 } 1476 1477 skb->dev = mlxsw_sp_port->dev; 1478 1479 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 1480 u64_stats_update_begin(&pcpu_stats->syncp); 1481 pcpu_stats->rx_packets++; 1482 pcpu_stats->rx_bytes += skb->len; 1483 u64_stats_update_end(&pcpu_stats->syncp); 1484 1485 skb->protocol = eth_type_trans(skb, skb->dev); 1486 netif_receive_skb(skb); 1487 } 1488 1489 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 1490 { 1491 .func = mlxsw_sp_rx_listener_func, 1492 .local_port = MLXSW_PORT_DONT_CARE, 1493 .trap_id = MLXSW_TRAP_ID_FDB_MC, 1494 }, 1495 /* Traps for specific L2 packet types, not trapped as FDB MC */ 1496 { 1497 .func = mlxsw_sp_rx_listener_func, 1498 .local_port = MLXSW_PORT_DONT_CARE, 1499 .trap_id = MLXSW_TRAP_ID_STP, 1500 }, 1501 { 1502 .func = mlxsw_sp_rx_listener_func, 1503 .local_port = MLXSW_PORT_DONT_CARE, 1504 .trap_id = MLXSW_TRAP_ID_LACP, 1505 }, 1506 { 1507 .func = mlxsw_sp_rx_listener_func, 1508 .local_port = MLXSW_PORT_DONT_CARE, 1509 .trap_id = MLXSW_TRAP_ID_EAPOL, 1510 }, 1511 { 1512 .func = mlxsw_sp_rx_listener_func, 1513 .local_port = MLXSW_PORT_DONT_CARE, 1514 .trap_id = MLXSW_TRAP_ID_LLDP, 1515 }, 1516 { 1517 .func = mlxsw_sp_rx_listener_func, 1518 .local_port = MLXSW_PORT_DONT_CARE, 1519 .trap_id = MLXSW_TRAP_ID_MMRP, 1520 }, 1521 { 1522 .func = mlxsw_sp_rx_listener_func, 1523 .local_port = MLXSW_PORT_DONT_CARE, 1524 .trap_id = MLXSW_TRAP_ID_MVRP, 1525 }, 1526 { 1527 .func = mlxsw_sp_rx_listener_func, 1528 .local_port = MLXSW_PORT_DONT_CARE, 1529 .trap_id = MLXSW_TRAP_ID_RPVST, 1530 }, 1531 { 1532 .func = mlxsw_sp_rx_listener_func, 1533 .local_port = MLXSW_PORT_DONT_CARE, 1534 .trap_id = MLXSW_TRAP_ID_DHCP, 1535 }, 1536 { 1537 .func = mlxsw_sp_rx_listener_func, 1538 .local_port = MLXSW_PORT_DONT_CARE, 1539 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, 1540 }, 1541 { 1542 .func = mlxsw_sp_rx_listener_func, 1543 .local_port = MLXSW_PORT_DONT_CARE, 1544 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, 1545 }, 1546 { 1547 .func = mlxsw_sp_rx_listener_func, 1548 .local_port = MLXSW_PORT_DONT_CARE, 1549 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, 1550 }, 1551 { 1552 .func = mlxsw_sp_rx_listener_func, 1553 .local_port = MLXSW_PORT_DONT_CARE, 1554 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, 1555 }, 1556 { 1557 .func = mlxsw_sp_rx_listener_func, 1558 .local_port = MLXSW_PORT_DONT_CARE, 1559 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 1560 }, 1561 }; 1562 1563 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 1564 { 1565 char htgt_pl[MLXSW_REG_HTGT_LEN]; 1566 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1567 int i; 1568 int err; 1569 1570 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 1571 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1572 if (err) 1573 return err; 1574 1575 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 1576 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 1577 if (err) 1578 return err; 1579 1580 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 1581 err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 1582 &mlxsw_sp_rx_listener[i], 1583 mlxsw_sp); 1584 if (err) 1585 goto err_rx_listener_register; 1586 1587 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 1588 mlxsw_sp_rx_listener[i].trap_id); 1589 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1590 if (err) 1591 goto err_rx_trap_set; 1592 } 1593 return 0; 1594 1595 err_rx_trap_set: 1596 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1597 &mlxsw_sp_rx_listener[i], 1598 mlxsw_sp); 1599 err_rx_listener_register: 1600 for (i--; i >= 0; i--) { 1601 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 1602 mlxsw_sp_rx_listener[i].trap_id); 1603 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1604 1605 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1606 &mlxsw_sp_rx_listener[i], 1607 mlxsw_sp); 1608 } 1609 return err; 1610 } 1611 1612 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 1613 { 1614 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 1615 int i; 1616 1617 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 1618 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 1619 mlxsw_sp_rx_listener[i].trap_id); 1620 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 1621 1622 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 1623 &mlxsw_sp_rx_listener[i], 1624 mlxsw_sp); 1625 } 1626 } 1627 1628 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 1629 enum mlxsw_reg_sfgc_type type, 1630 enum mlxsw_reg_sfgc_bridge_type bridge_type) 1631 { 1632 enum mlxsw_flood_table_type table_type; 1633 enum mlxsw_sp_flood_table flood_table; 1634 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 1635 1636 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) { 1637 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 1638 flood_table = 0; 1639 } else { 1640 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 1641 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 1642 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 1643 else 1644 flood_table = MLXSW_SP_FLOOD_TABLE_BM; 1645 } 1646 1647 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 1648 flood_table); 1649 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 1650 } 1651 1652 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 1653 { 1654 int type, err; 1655 1656 /* For non-offloaded netdevs, flood all traffic types to CPU 1657 * port. 1658 */ 1659 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 1660 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 1661 continue; 1662 1663 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 1664 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 1665 if (err) 1666 return err; 1667 } 1668 1669 /* For bridged ports, use one flooding table for unknown unicast 1670 * traffic and a second table for unregistered multicast and 1671 * broadcast. 1672 */ 1673 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 1674 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 1675 continue; 1676 1677 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 1678 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 1679 if (err) 1680 return err; 1681 } 1682 1683 return 0; 1684 } 1685 1686 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, 1687 const struct mlxsw_bus_info *mlxsw_bus_info) 1688 { 1689 struct mlxsw_sp *mlxsw_sp = priv; 1690 int err; 1691 1692 mlxsw_sp->core = mlxsw_core; 1693 mlxsw_sp->bus_info = mlxsw_bus_info; 1694 1695 err = mlxsw_sp_base_mac_get(mlxsw_sp); 1696 if (err) { 1697 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 1698 return err; 1699 } 1700 1701 err = mlxsw_sp_ports_create(mlxsw_sp); 1702 if (err) { 1703 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 1704 goto err_ports_create; 1705 } 1706 1707 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1708 if (err) { 1709 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 1710 goto err_event_register; 1711 } 1712 1713 err = mlxsw_sp_traps_init(mlxsw_sp); 1714 if (err) { 1715 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 1716 goto err_rx_listener_register; 1717 } 1718 1719 err = mlxsw_sp_flood_init(mlxsw_sp); 1720 if (err) { 1721 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 1722 goto err_flood_init; 1723 } 1724 1725 err = mlxsw_sp_buffers_init(mlxsw_sp); 1726 if (err) { 1727 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 1728 goto err_buffers_init; 1729 } 1730 1731 err = mlxsw_sp_switchdev_init(mlxsw_sp); 1732 if (err) { 1733 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 1734 goto err_switchdev_init; 1735 } 1736 1737 return 0; 1738 1739 err_switchdev_init: 1740 err_buffers_init: 1741 err_flood_init: 1742 mlxsw_sp_traps_fini(mlxsw_sp); 1743 err_rx_listener_register: 1744 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1745 err_event_register: 1746 mlxsw_sp_ports_remove(mlxsw_sp); 1747 err_ports_create: 1748 mlxsw_sp_vfids_fini(mlxsw_sp); 1749 return err; 1750 } 1751 1752 static void mlxsw_sp_fini(void *priv) 1753 { 1754 struct mlxsw_sp *mlxsw_sp = priv; 1755 1756 mlxsw_sp_switchdev_fini(mlxsw_sp); 1757 mlxsw_sp_traps_fini(mlxsw_sp); 1758 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 1759 mlxsw_sp_ports_remove(mlxsw_sp); 1760 mlxsw_sp_vfids_fini(mlxsw_sp); 1761 } 1762 1763 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 1764 .used_max_vepa_channels = 1, 1765 .max_vepa_channels = 0, 1766 .used_max_lag = 1, 1767 .max_lag = 64, 1768 .used_max_port_per_lag = 1, 1769 .max_port_per_lag = 16, 1770 .used_max_mid = 1, 1771 .max_mid = 7000, 1772 .used_max_pgt = 1, 1773 .max_pgt = 0, 1774 .used_max_system_port = 1, 1775 .max_system_port = 64, 1776 .used_max_vlan_groups = 1, 1777 .max_vlan_groups = 127, 1778 .used_max_regions = 1, 1779 .max_regions = 400, 1780 .used_flood_tables = 1, 1781 .used_flood_mode = 1, 1782 .flood_mode = 3, 1783 .max_fid_offset_flood_tables = 2, 1784 .fid_offset_flood_table_size = VLAN_N_VID - 1, 1785 .max_fid_flood_tables = 1, 1786 .fid_flood_table_size = VLAN_N_VID, 1787 .used_max_ib_mc = 1, 1788 .max_ib_mc = 0, 1789 .used_max_pkey = 1, 1790 .max_pkey = 0, 1791 .swid_config = { 1792 { 1793 .used_type = 1, 1794 .type = MLXSW_PORT_SWID_TYPE_ETH, 1795 } 1796 }, 1797 }; 1798 1799 static struct mlxsw_driver mlxsw_sp_driver = { 1800 .kind = MLXSW_DEVICE_KIND_SPECTRUM, 1801 .owner = THIS_MODULE, 1802 .priv_size = sizeof(struct mlxsw_sp), 1803 .init = mlxsw_sp_init, 1804 .fini = mlxsw_sp_fini, 1805 .txhdr_construct = mlxsw_sp_txhdr_construct, 1806 .txhdr_len = MLXSW_TXHDR_LEN, 1807 .profile = &mlxsw_sp_config_profile, 1808 }; 1809 1810 static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 1811 { 1812 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 1813 } 1814 1815 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) 1816 { 1817 struct net_device *dev = mlxsw_sp_port->dev; 1818 int err; 1819 1820 /* When port is not bridged untagged packets are tagged with 1821 * PVID=VID=1, thereby creating an implicit VLAN interface in 1822 * the device. Remove it and let bridge code take care of its 1823 * own VLANs. 1824 */ 1825 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 1826 if (err) 1827 netdev_err(dev, "Failed to remove VID 1\n"); 1828 1829 return err; 1830 } 1831 1832 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) 1833 { 1834 struct net_device *dev = mlxsw_sp_port->dev; 1835 int err; 1836 1837 /* Add implicit VLAN interface in the device, so that untagged 1838 * packets will be classified to the default vFID. 1839 */ 1840 err = mlxsw_sp_port_add_vid(dev, 0, 1); 1841 if (err) 1842 netdev_err(dev, "Failed to add VID 1\n"); 1843 1844 return err; 1845 } 1846 1847 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 1848 struct net_device *br_dev) 1849 { 1850 return !mlxsw_sp->master_bridge.dev || 1851 mlxsw_sp->master_bridge.dev == br_dev; 1852 } 1853 1854 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 1855 struct net_device *br_dev) 1856 { 1857 mlxsw_sp->master_bridge.dev = br_dev; 1858 mlxsw_sp->master_bridge.ref_count++; 1859 } 1860 1861 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, 1862 struct net_device *br_dev) 1863 { 1864 if (--mlxsw_sp->master_bridge.ref_count == 0) 1865 mlxsw_sp->master_bridge.dev = NULL; 1866 } 1867 1868 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 1869 unsigned long event, void *ptr) 1870 { 1871 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1872 struct netdev_notifier_changeupper_info *info; 1873 struct mlxsw_sp_port *mlxsw_sp_port; 1874 struct net_device *upper_dev; 1875 struct mlxsw_sp *mlxsw_sp; 1876 int err; 1877 1878 if (!mlxsw_sp_port_dev_check(dev)) 1879 return NOTIFY_DONE; 1880 1881 mlxsw_sp_port = netdev_priv(dev); 1882 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1883 info = ptr; 1884 1885 switch (event) { 1886 case NETDEV_PRECHANGEUPPER: 1887 upper_dev = info->upper_dev; 1888 /* HW limitation forbids to put ports to multiple bridges. */ 1889 if (info->master && info->linking && 1890 netif_is_bridge_master(upper_dev) && 1891 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 1892 return NOTIFY_BAD; 1893 break; 1894 case NETDEV_CHANGEUPPER: 1895 upper_dev = info->upper_dev; 1896 if (info->master && 1897 netif_is_bridge_master(upper_dev)) { 1898 if (info->linking) { 1899 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); 1900 if (err) 1901 netdev_err(dev, "Failed to join bridge\n"); 1902 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); 1903 mlxsw_sp_port->bridged = 1; 1904 } else { 1905 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); 1906 if (err) 1907 netdev_err(dev, "Failed to leave bridge\n"); 1908 mlxsw_sp_port->bridged = 0; 1909 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); 1910 } 1911 } 1912 break; 1913 } 1914 1915 return NOTIFY_DONE; 1916 } 1917 1918 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 1919 .notifier_call = mlxsw_sp_netdevice_event, 1920 }; 1921 1922 static int __init mlxsw_sp_module_init(void) 1923 { 1924 int err; 1925 1926 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 1927 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 1928 if (err) 1929 goto err_core_driver_register; 1930 return 0; 1931 1932 err_core_driver_register: 1933 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 1934 return err; 1935 } 1936 1937 static void __exit mlxsw_sp_module_exit(void) 1938 { 1939 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 1940 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 1941 } 1942 1943 module_init(mlxsw_sp_module_init); 1944 module_exit(mlxsw_sp_module_exit); 1945 1946 MODULE_LICENSE("Dual BSD/GPL"); 1947 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1948 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 1949 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); 1950