1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/types.h> 40 #include <linux/netdevice.h> 41 #include <linux/etherdevice.h> 42 #include <linux/ethtool.h> 43 #include <linux/slab.h> 44 #include <linux/device.h> 45 #include <linux/skbuff.h> 46 #include <linux/if_vlan.h> 47 #include <linux/if_bridge.h> 48 #include <linux/workqueue.h> 49 #include <linux/jiffies.h> 50 #include <linux/bitops.h> 51 #include <linux/list.h> 52 #include <linux/dcbnl.h> 53 #include <net/switchdev.h> 54 #include <generated/utsrelease.h> 55 56 #include "spectrum.h" 57 #include "core.h" 58 #include "reg.h" 59 #include "port.h" 60 #include "trap.h" 61 #include "txheader.h" 62 63 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; 64 static const char mlxsw_sp_driver_version[] = "1.0"; 65 66 /* tx_hdr_version 67 * Tx header version. 68 * Must be set to 1. 69 */ 70 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); 71 72 /* tx_hdr_ctl 73 * Packet control type. 74 * 0 - Ethernet control (e.g. EMADs, LACP) 75 * 1 - Ethernet data 76 */ 77 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); 78 79 /* tx_hdr_proto 80 * Packet protocol type. Must be set to 1 (Ethernet). 81 */ 82 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); 83 84 /* tx_hdr_rx_is_router 85 * Packet is sent from the router. Valid for data packets only. 86 */ 87 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); 88 89 /* tx_hdr_fid_valid 90 * Indicates if the 'fid' field is valid and should be used for 91 * forwarding lookup. Valid for data packets only. 92 */ 93 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); 94 95 /* tx_hdr_swid 96 * Switch partition ID. Must be set to 0. 97 */ 98 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); 99 100 /* tx_hdr_control_tclass 101 * Indicates if the packet should use the control TClass and not one 102 * of the data TClasses. 103 */ 104 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); 105 106 /* tx_hdr_etclass 107 * Egress TClass to be used on the egress device on the egress port. 108 */ 109 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); 110 111 /* tx_hdr_port_mid 112 * Destination local port for unicast packets. 113 * Destination multicast ID for multicast packets. 114 * 115 * Control packets are directed to a specific egress port, while data 116 * packets are transmitted through the CPU port (0) into the switch partition, 117 * where forwarding rules are applied. 118 */ 119 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); 120 121 /* tx_hdr_fid 122 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is 123 * set, otherwise calculated based on the packet's VID using VID to FID mapping. 124 * Valid for data packets only. 125 */ 126 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); 127 128 /* tx_hdr_type 129 * 0 - Data packets 130 * 6 - Control packets 131 */ 132 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); 133 134 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, 135 const struct mlxsw_tx_info *tx_info) 136 { 137 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); 138 139 memset(txhdr, 0, MLXSW_TXHDR_LEN); 140 141 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); 142 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); 143 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); 144 mlxsw_tx_hdr_swid_set(txhdr, 0); 145 mlxsw_tx_hdr_control_tclass_set(txhdr, 1); 146 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); 147 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 148 } 149 150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) 151 { 152 char spad_pl[MLXSW_REG_SPAD_LEN]; 153 int err; 154 155 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); 156 if (err) 157 return err; 158 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); 159 return 0; 160 } 161 162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, 163 bool is_up) 164 { 165 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 166 char paos_pl[MLXSW_REG_PAOS_LEN]; 167 168 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 169 is_up ? MLXSW_PORT_ADMIN_STATUS_UP : 170 MLXSW_PORT_ADMIN_STATUS_DOWN); 171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); 172 } 173 174 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, 175 unsigned char *addr) 176 { 177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 178 char ppad_pl[MLXSW_REG_PPAD_LEN]; 179 180 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); 181 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); 182 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); 183 } 184 185 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) 186 { 187 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 188 unsigned char *addr = mlxsw_sp_port->dev->dev_addr; 189 190 ether_addr_copy(addr, mlxsw_sp->base_mac); 191 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; 192 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 193 } 194 195 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, 196 u16 vid, enum mlxsw_reg_spms_state state) 197 { 198 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 199 char *spms_pl; 200 int err; 201 202 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 203 if (!spms_pl) 204 return -ENOMEM; 205 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); 206 mlxsw_reg_spms_vid_pack(spms_pl, vid, state); 207 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); 208 kfree(spms_pl); 209 return err; 210 } 211 212 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 213 { 214 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 215 char pmtu_pl[MLXSW_REG_PMTU_LEN]; 216 int max_mtu; 217 int err; 218 219 mtu += MLXSW_TXHDR_LEN + ETH_HLEN; 220 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); 221 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 222 if (err) 223 return err; 224 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); 225 226 if (mtu > max_mtu) 227 return -EINVAL; 228 229 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); 230 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 231 } 232 233 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, 234 u8 swid) 235 { 236 char pspa_pl[MLXSW_REG_PSPA_LEN]; 237 238 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 239 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 240 } 241 242 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 243 { 244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 245 246 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, 247 swid); 248 } 249 250 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, 251 bool enable) 252 { 253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 254 char svpe_pl[MLXSW_REG_SVPE_LEN]; 255 256 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); 257 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); 258 } 259 260 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 261 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 262 u16 vid) 263 { 264 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 265 char svfa_pl[MLXSW_REG_SVFA_LEN]; 266 267 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, 268 fid, vid); 269 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); 270 } 271 272 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 273 u16 vid, bool learn_enable) 274 { 275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 276 char *spvmlr_pl; 277 int err; 278 279 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); 280 if (!spvmlr_pl) 281 return -ENOMEM; 282 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, 283 learn_enable); 284 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); 285 kfree(spvmlr_pl); 286 return err; 287 } 288 289 static int 290 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) 291 { 292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 293 char sspr_pl[MLXSW_REG_SSPR_LEN]; 294 295 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); 296 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 297 } 298 299 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 300 u8 local_port, u8 *p_module, 301 u8 *p_width, u8 *p_lane) 302 { 303 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 304 int err; 305 306 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 307 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 308 if (err) 309 return err; 310 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); 311 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 312 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 313 return 0; 314 } 315 316 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, 317 u8 module, u8 width, u8 lane) 318 { 319 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 320 int i; 321 322 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 323 mlxsw_reg_pmlp_width_set(pmlp_pl, width); 324 for (i = 0; i < width; i++) { 325 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); 326 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ 327 } 328 329 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 330 } 331 332 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) 333 { 334 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 335 336 mlxsw_reg_pmlp_pack(pmlp_pl, local_port); 337 mlxsw_reg_pmlp_width_set(pmlp_pl, 0); 338 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); 339 } 340 341 static int mlxsw_sp_port_open(struct net_device *dev) 342 { 343 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 344 int err; 345 346 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 347 if (err) 348 return err; 349 netif_start_queue(dev); 350 return 0; 351 } 352 353 static int mlxsw_sp_port_stop(struct net_device *dev) 354 { 355 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 356 357 netif_stop_queue(dev); 358 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 359 } 360 361 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, 362 struct net_device *dev) 363 { 364 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 365 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 366 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 367 const struct mlxsw_tx_info tx_info = { 368 .local_port = mlxsw_sp_port->local_port, 369 .is_emad = false, 370 }; 371 u64 len; 372 int err; 373 374 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) 375 return NETDEV_TX_BUSY; 376 377 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { 378 struct sk_buff *skb_orig = skb; 379 380 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); 381 if (!skb) { 382 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 383 dev_kfree_skb_any(skb_orig); 384 return NETDEV_TX_OK; 385 } 386 } 387 388 if (eth_skb_pad(skb)) { 389 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 390 return NETDEV_TX_OK; 391 } 392 393 mlxsw_sp_txhdr_construct(skb, &tx_info); 394 /* TX header is consumed by HW on the way so we shouldn't count its 395 * bytes as being sent. 396 */ 397 len = skb->len - MLXSW_TXHDR_LEN; 398 399 /* Due to a race we might fail here because of a full queue. In that 400 * unlikely case we simply drop the packet. 401 */ 402 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); 403 404 if (!err) { 405 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 406 u64_stats_update_begin(&pcpu_stats->syncp); 407 pcpu_stats->tx_packets++; 408 pcpu_stats->tx_bytes += len; 409 u64_stats_update_end(&pcpu_stats->syncp); 410 } else { 411 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); 412 dev_kfree_skb_any(skb); 413 } 414 return NETDEV_TX_OK; 415 } 416 417 static void mlxsw_sp_set_rx_mode(struct net_device *dev) 418 { 419 } 420 421 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) 422 { 423 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 424 struct sockaddr *addr = p; 425 int err; 426 427 if (!is_valid_ether_addr(addr->sa_data)) 428 return -EADDRNOTAVAIL; 429 430 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); 431 if (err) 432 return err; 433 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 434 return 0; 435 } 436 437 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, 438 bool pause_en, bool pfc_en, u16 delay) 439 { 440 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); 441 442 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : 443 MLXSW_SP_PAUSE_DELAY; 444 445 if (pause_en || pfc_en) 446 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, 447 pg_size + delay, pg_size); 448 else 449 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); 450 } 451 452 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 453 u8 *prio_tc, bool pause_en, 454 struct ieee_pfc *my_pfc) 455 { 456 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 457 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; 458 u16 delay = !!my_pfc ? my_pfc->delay : 0; 459 char pbmc_pl[MLXSW_REG_PBMC_LEN]; 460 int i, j, err; 461 462 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); 463 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 464 if (err) 465 return err; 466 467 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 468 bool configure = false; 469 bool pfc = false; 470 471 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { 472 if (prio_tc[j] == i) { 473 pfc = pfc_en & BIT(j); 474 configure = true; 475 break; 476 } 477 } 478 479 if (!configure) 480 continue; 481 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); 482 } 483 484 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); 485 } 486 487 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, 488 int mtu, bool pause_en) 489 { 490 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; 491 bool dcb_en = !!mlxsw_sp_port->dcb.ets; 492 struct ieee_pfc *my_pfc; 493 u8 *prio_tc; 494 495 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; 496 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; 497 498 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, 499 pause_en, my_pfc); 500 } 501 502 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) 503 { 504 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 505 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 506 int err; 507 508 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); 509 if (err) 510 return err; 511 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 512 if (err) 513 goto err_port_mtu_set; 514 dev->mtu = mtu; 515 return 0; 516 517 err_port_mtu_set: 518 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 519 return err; 520 } 521 522 static struct rtnl_link_stats64 * 523 mlxsw_sp_port_get_stats64(struct net_device *dev, 524 struct rtnl_link_stats64 *stats) 525 { 526 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 527 struct mlxsw_sp_port_pcpu_stats *p; 528 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 529 u32 tx_dropped = 0; 530 unsigned int start; 531 int i; 532 533 for_each_possible_cpu(i) { 534 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); 535 do { 536 start = u64_stats_fetch_begin_irq(&p->syncp); 537 rx_packets = p->rx_packets; 538 rx_bytes = p->rx_bytes; 539 tx_packets = p->tx_packets; 540 tx_bytes = p->tx_bytes; 541 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 542 543 stats->rx_packets += rx_packets; 544 stats->rx_bytes += rx_bytes; 545 stats->tx_packets += tx_packets; 546 stats->tx_bytes += tx_bytes; 547 /* tx_dropped is u32, updated without syncp protection. */ 548 tx_dropped += p->tx_dropped; 549 } 550 stats->tx_dropped = tx_dropped; 551 return stats; 552 } 553 554 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 555 u16 vid_end, bool is_member, bool untagged) 556 { 557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 558 char *spvm_pl; 559 int err; 560 561 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); 562 if (!spvm_pl) 563 return -ENOMEM; 564 565 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, 566 vid_end, is_member, untagged); 567 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); 568 kfree(spvm_pl); 569 return err; 570 } 571 572 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 573 { 574 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 575 u16 vid, last_visited_vid; 576 int err; 577 578 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 579 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, 580 vid); 581 if (err) { 582 last_visited_vid = vid; 583 goto err_port_vid_to_fid_set; 584 } 585 } 586 587 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 588 if (err) { 589 last_visited_vid = VLAN_N_VID; 590 goto err_port_vid_to_fid_set; 591 } 592 593 return 0; 594 595 err_port_vid_to_fid_set: 596 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) 597 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, 598 vid); 599 return err; 600 } 601 602 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) 603 { 604 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; 605 u16 vid; 606 int err; 607 608 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); 609 if (err) 610 return err; 611 612 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { 613 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, 614 vid, vid); 615 if (err) 616 return err; 617 } 618 619 return 0; 620 } 621 622 static struct mlxsw_sp_vfid * 623 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) 624 { 625 struct mlxsw_sp_vfid *vfid; 626 627 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { 628 if (vfid->vid == vid) 629 return vfid; 630 } 631 632 return NULL; 633 } 634 635 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) 636 { 637 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, 638 MLXSW_SP_VFID_PORT_MAX); 639 } 640 641 static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) 642 { 643 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 644 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 645 646 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); 647 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 648 } 649 650 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) 651 { 652 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 653 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 654 655 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); 656 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); 657 } 658 659 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, 660 u16 vid) 661 { 662 struct device *dev = mlxsw_sp->bus_info->dev; 663 struct mlxsw_sp_vfid *vfid; 664 u16 n_vfid; 665 int err; 666 667 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); 668 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { 669 dev_err(dev, "No available vFIDs\n"); 670 return ERR_PTR(-ERANGE); 671 } 672 673 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 674 if (err) { 675 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 676 return ERR_PTR(err); 677 } 678 679 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 680 if (!vfid) 681 goto err_allocate_vfid; 682 683 vfid->vfid = n_vfid; 684 vfid->vid = vid; 685 686 list_add(&vfid->list, &mlxsw_sp->port_vfids.list); 687 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); 688 689 return vfid; 690 691 err_allocate_vfid: 692 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 693 return ERR_PTR(-ENOMEM); 694 } 695 696 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 697 struct mlxsw_sp_vfid *vfid) 698 { 699 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); 700 list_del(&vfid->list); 701 702 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 703 704 kfree(vfid); 705 } 706 707 static struct mlxsw_sp_port * 708 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, 709 struct mlxsw_sp_vfid *vfid) 710 { 711 struct mlxsw_sp_port *mlxsw_sp_vport; 712 713 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); 714 if (!mlxsw_sp_vport) 715 return NULL; 716 717 /* dev will be set correctly after the VLAN device is linked 718 * with the real device. In case of bridge SELF invocation, dev 719 * will remain as is. 720 */ 721 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 722 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 723 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; 724 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; 725 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; 726 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; 727 mlxsw_sp_vport->vport.vfid = vfid; 728 mlxsw_sp_vport->vport.vid = vfid->vid; 729 730 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); 731 732 return mlxsw_sp_vport; 733 } 734 735 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) 736 { 737 list_del(&mlxsw_sp_vport->vport.list); 738 kfree(mlxsw_sp_vport); 739 } 740 741 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 742 u16 vid) 743 { 744 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 746 struct mlxsw_sp_port *mlxsw_sp_vport; 747 struct mlxsw_sp_vfid *vfid; 748 int err; 749 750 /* VLAN 0 is added to HW filter when device goes up, but it is 751 * reserved in our case, so simply return. 752 */ 753 if (!vid) 754 return 0; 755 756 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { 757 netdev_warn(dev, "VID=%d already configured\n", vid); 758 return 0; 759 } 760 761 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 762 if (!vfid) { 763 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); 764 if (IS_ERR(vfid)) { 765 netdev_err(dev, "Failed to create vFID for VID=%d\n", 766 vid); 767 return PTR_ERR(vfid); 768 } 769 } 770 771 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); 772 if (!mlxsw_sp_vport) { 773 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); 774 err = -ENOMEM; 775 goto err_port_vport_create; 776 } 777 778 if (!vfid->nr_vports) { 779 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, 780 true, false); 781 if (err) { 782 netdev_err(dev, "Failed to setup flooding for vFID=%d\n", 783 vfid->vfid); 784 goto err_vport_flood_set; 785 } 786 } 787 788 /* When adding the first VLAN interface on a bridged port we need to 789 * transition all the active 802.1Q bridge VLANs to use explicit 790 * {Port, VID} to FID mappings and set the port's mode to Virtual mode. 791 */ 792 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 793 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 794 if (err) { 795 netdev_err(dev, "Failed to set to Virtual mode\n"); 796 goto err_port_vp_mode_trans; 797 } 798 } 799 800 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 801 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 802 true, 803 mlxsw_sp_vfid_to_fid(vfid->vfid), 804 vid); 805 if (err) { 806 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", 807 vid, vfid->vfid); 808 goto err_port_vid_to_fid_set; 809 } 810 811 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 812 if (err) { 813 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); 814 goto err_port_vid_learning_set; 815 } 816 817 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); 818 if (err) { 819 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 820 vid); 821 goto err_port_add_vid; 822 } 823 824 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 825 MLXSW_REG_SPMS_STATE_FORWARDING); 826 if (err) { 827 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 828 goto err_port_stp_state_set; 829 } 830 831 vfid->nr_vports++; 832 833 return 0; 834 835 err_port_stp_state_set: 836 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 837 err_port_add_vid: 838 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 839 err_port_vid_learning_set: 840 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 841 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 842 mlxsw_sp_vfid_to_fid(vfid->vfid), vid); 843 err_port_vid_to_fid_set: 844 if (list_is_singular(&mlxsw_sp_port->vports_list)) 845 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 846 err_port_vp_mode_trans: 847 if (!vfid->nr_vports) 848 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, 849 false); 850 err_vport_flood_set: 851 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 852 err_port_vport_create: 853 if (!vfid->nr_vports) 854 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); 855 return err; 856 } 857 858 int mlxsw_sp_port_kill_vid(struct net_device *dev, 859 __be16 __always_unused proto, u16 vid) 860 { 861 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 862 struct mlxsw_sp_port *mlxsw_sp_vport; 863 struct mlxsw_sp_vfid *vfid; 864 int err; 865 866 /* VLAN 0 is removed from HW filter when device goes down, but 867 * it is reserved in our case, so simply return. 868 */ 869 if (!vid) 870 return 0; 871 872 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 873 if (!mlxsw_sp_vport) { 874 netdev_warn(dev, "VID=%d does not exist\n", vid); 875 return 0; 876 } 877 878 vfid = mlxsw_sp_vport->vport.vfid; 879 880 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 881 MLXSW_REG_SPMS_STATE_DISCARDING); 882 if (err) { 883 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); 884 return err; 885 } 886 887 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 888 if (err) { 889 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 890 vid); 891 return err; 892 } 893 894 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 895 if (err) { 896 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); 897 return err; 898 } 899 900 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 901 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 902 false, 903 mlxsw_sp_vfid_to_fid(vfid->vfid), 904 vid); 905 if (err) { 906 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", 907 vid, vfid->vfid); 908 return err; 909 } 910 911 /* When removing the last VLAN interface on a bridged port we need to 912 * transition all active 802.1Q bridge VLANs to use VID to FID 913 * mappings and set port's mode to VLAN mode. 914 */ 915 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 916 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 917 if (err) { 918 netdev_err(dev, "Failed to set to VLAN mode\n"); 919 return err; 920 } 921 } 922 923 vfid->nr_vports--; 924 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 925 926 /* Destroy the vFID if no vPorts are assigned to it anymore. */ 927 if (!vfid->nr_vports) 928 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); 929 930 return 0; 931 } 932 933 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, 934 size_t len) 935 { 936 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 937 u8 module = mlxsw_sp_port->mapping.module; 938 u8 width = mlxsw_sp_port->mapping.width; 939 u8 lane = mlxsw_sp_port->mapping.lane; 940 int err; 941 942 if (!mlxsw_sp_port->split) 943 err = snprintf(name, len, "p%d", module + 1); 944 else 945 err = snprintf(name, len, "p%ds%d", module + 1, 946 lane / width); 947 948 if (err >= len) 949 return -EINVAL; 950 951 return 0; 952 } 953 954 static const struct net_device_ops mlxsw_sp_port_netdev_ops = { 955 .ndo_open = mlxsw_sp_port_open, 956 .ndo_stop = mlxsw_sp_port_stop, 957 .ndo_start_xmit = mlxsw_sp_port_xmit, 958 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, 959 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, 960 .ndo_change_mtu = mlxsw_sp_port_change_mtu, 961 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 962 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 963 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 964 .ndo_fdb_add = switchdev_port_fdb_add, 965 .ndo_fdb_del = switchdev_port_fdb_del, 966 .ndo_fdb_dump = switchdev_port_fdb_dump, 967 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 968 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 969 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 970 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, 971 }; 972 973 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, 974 struct ethtool_drvinfo *drvinfo) 975 { 976 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 977 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 978 979 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); 980 strlcpy(drvinfo->version, mlxsw_sp_driver_version, 981 sizeof(drvinfo->version)); 982 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 983 "%d.%d.%d", 984 mlxsw_sp->bus_info->fw_rev.major, 985 mlxsw_sp->bus_info->fw_rev.minor, 986 mlxsw_sp->bus_info->fw_rev.subminor); 987 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, 988 sizeof(drvinfo->bus_info)); 989 } 990 991 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, 992 struct ethtool_pauseparam *pause) 993 { 994 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 995 996 pause->rx_pause = mlxsw_sp_port->link.rx_pause; 997 pause->tx_pause = mlxsw_sp_port->link.tx_pause; 998 } 999 1000 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, 1001 struct ethtool_pauseparam *pause) 1002 { 1003 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 1004 1005 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 1006 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); 1007 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); 1008 1009 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 1010 pfcc_pl); 1011 } 1012 1013 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, 1014 struct ethtool_pauseparam *pause) 1015 { 1016 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1017 bool pause_en = pause->tx_pause || pause->rx_pause; 1018 int err; 1019 1020 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { 1021 netdev_err(dev, "PFC already enabled on port\n"); 1022 return -EINVAL; 1023 } 1024 1025 if (pause->autoneg) { 1026 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); 1027 return -EINVAL; 1028 } 1029 1030 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1031 if (err) { 1032 netdev_err(dev, "Failed to configure port's headroom\n"); 1033 return err; 1034 } 1035 1036 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); 1037 if (err) { 1038 netdev_err(dev, "Failed to set PAUSE parameters\n"); 1039 goto err_port_pause_configure; 1040 } 1041 1042 mlxsw_sp_port->link.rx_pause = pause->rx_pause; 1043 mlxsw_sp_port->link.tx_pause = pause->tx_pause; 1044 1045 return 0; 1046 1047 err_port_pause_configure: 1048 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); 1049 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); 1050 return err; 1051 } 1052 1053 struct mlxsw_sp_port_hw_stats { 1054 char str[ETH_GSTRING_LEN]; 1055 u64 (*getter)(char *payload); 1056 }; 1057 1058 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { 1059 { 1060 .str = "a_frames_transmitted_ok", 1061 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, 1062 }, 1063 { 1064 .str = "a_frames_received_ok", 1065 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, 1066 }, 1067 { 1068 .str = "a_frame_check_sequence_errors", 1069 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, 1070 }, 1071 { 1072 .str = "a_alignment_errors", 1073 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, 1074 }, 1075 { 1076 .str = "a_octets_transmitted_ok", 1077 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, 1078 }, 1079 { 1080 .str = "a_octets_received_ok", 1081 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, 1082 }, 1083 { 1084 .str = "a_multicast_frames_xmitted_ok", 1085 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, 1086 }, 1087 { 1088 .str = "a_broadcast_frames_xmitted_ok", 1089 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, 1090 }, 1091 { 1092 .str = "a_multicast_frames_received_ok", 1093 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, 1094 }, 1095 { 1096 .str = "a_broadcast_frames_received_ok", 1097 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, 1098 }, 1099 { 1100 .str = "a_in_range_length_errors", 1101 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, 1102 }, 1103 { 1104 .str = "a_out_of_range_length_field", 1105 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, 1106 }, 1107 { 1108 .str = "a_frame_too_long_errors", 1109 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, 1110 }, 1111 { 1112 .str = "a_symbol_error_during_carrier", 1113 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, 1114 }, 1115 { 1116 .str = "a_mac_control_frames_transmitted", 1117 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, 1118 }, 1119 { 1120 .str = "a_mac_control_frames_received", 1121 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, 1122 }, 1123 { 1124 .str = "a_unsupported_opcodes_received", 1125 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, 1126 }, 1127 { 1128 .str = "a_pause_mac_ctrl_frames_received", 1129 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, 1130 }, 1131 { 1132 .str = "a_pause_mac_ctrl_frames_xmitted", 1133 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, 1134 }, 1135 }; 1136 1137 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) 1138 1139 static void mlxsw_sp_port_get_strings(struct net_device *dev, 1140 u32 stringset, u8 *data) 1141 { 1142 u8 *p = data; 1143 int i; 1144 1145 switch (stringset) { 1146 case ETH_SS_STATS: 1147 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { 1148 memcpy(p, mlxsw_sp_port_hw_stats[i].str, 1149 ETH_GSTRING_LEN); 1150 p += ETH_GSTRING_LEN; 1151 } 1152 break; 1153 } 1154 } 1155 1156 static int mlxsw_sp_port_set_phys_id(struct net_device *dev, 1157 enum ethtool_phys_id_state state) 1158 { 1159 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1160 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1161 char mlcr_pl[MLXSW_REG_MLCR_LEN]; 1162 bool active; 1163 1164 switch (state) { 1165 case ETHTOOL_ID_ACTIVE: 1166 active = true; 1167 break; 1168 case ETHTOOL_ID_INACTIVE: 1169 active = false; 1170 break; 1171 default: 1172 return -EOPNOTSUPP; 1173 } 1174 1175 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); 1176 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); 1177 } 1178 1179 static void mlxsw_sp_port_get_stats(struct net_device *dev, 1180 struct ethtool_stats *stats, u64 *data) 1181 { 1182 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1183 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1184 char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; 1185 int i; 1186 int err; 1187 1188 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, 1189 MLXSW_REG_PPCNT_IEEE_8023_CNT, 0); 1190 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); 1191 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) 1192 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; 1193 } 1194 1195 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) 1196 { 1197 switch (sset) { 1198 case ETH_SS_STATS: 1199 return MLXSW_SP_PORT_HW_STATS_LEN; 1200 default: 1201 return -EOPNOTSUPP; 1202 } 1203 } 1204 1205 struct mlxsw_sp_port_link_mode { 1206 u32 mask; 1207 u32 supported; 1208 u32 advertised; 1209 u32 speed; 1210 }; 1211 1212 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { 1213 { 1214 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, 1215 .supported = SUPPORTED_100baseT_Full, 1216 .advertised = ADVERTISED_100baseT_Full, 1217 .speed = 100, 1218 }, 1219 { 1220 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, 1221 .speed = 100, 1222 }, 1223 { 1224 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | 1225 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, 1226 .supported = SUPPORTED_1000baseKX_Full, 1227 .advertised = ADVERTISED_1000baseKX_Full, 1228 .speed = 1000, 1229 }, 1230 { 1231 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, 1232 .supported = SUPPORTED_10000baseT_Full, 1233 .advertised = ADVERTISED_10000baseT_Full, 1234 .speed = 10000, 1235 }, 1236 { 1237 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | 1238 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, 1239 .supported = SUPPORTED_10000baseKX4_Full, 1240 .advertised = ADVERTISED_10000baseKX4_Full, 1241 .speed = 10000, 1242 }, 1243 { 1244 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1245 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1246 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1247 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, 1248 .supported = SUPPORTED_10000baseKR_Full, 1249 .advertised = ADVERTISED_10000baseKR_Full, 1250 .speed = 10000, 1251 }, 1252 { 1253 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, 1254 .supported = SUPPORTED_20000baseKR2_Full, 1255 .advertised = ADVERTISED_20000baseKR2_Full, 1256 .speed = 20000, 1257 }, 1258 { 1259 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, 1260 .supported = SUPPORTED_40000baseCR4_Full, 1261 .advertised = ADVERTISED_40000baseCR4_Full, 1262 .speed = 40000, 1263 }, 1264 { 1265 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, 1266 .supported = SUPPORTED_40000baseKR4_Full, 1267 .advertised = ADVERTISED_40000baseKR4_Full, 1268 .speed = 40000, 1269 }, 1270 { 1271 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, 1272 .supported = SUPPORTED_40000baseSR4_Full, 1273 .advertised = ADVERTISED_40000baseSR4_Full, 1274 .speed = 40000, 1275 }, 1276 { 1277 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, 1278 .supported = SUPPORTED_40000baseLR4_Full, 1279 .advertised = ADVERTISED_40000baseLR4_Full, 1280 .speed = 40000, 1281 }, 1282 { 1283 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | 1284 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | 1285 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, 1286 .speed = 25000, 1287 }, 1288 { 1289 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | 1290 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | 1291 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, 1292 .speed = 50000, 1293 }, 1294 { 1295 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, 1296 .supported = SUPPORTED_56000baseKR4_Full, 1297 .advertised = ADVERTISED_56000baseKR4_Full, 1298 .speed = 56000, 1299 }, 1300 { 1301 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | 1302 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1303 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1304 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, 1305 .speed = 100000, 1306 }, 1307 }; 1308 1309 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) 1310 1311 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) 1312 { 1313 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1314 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1315 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1316 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1317 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1318 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1319 return SUPPORTED_FIBRE; 1320 1321 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1322 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1323 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1324 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | 1325 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) 1326 return SUPPORTED_Backplane; 1327 return 0; 1328 } 1329 1330 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) 1331 { 1332 u32 modes = 0; 1333 int i; 1334 1335 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1336 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1337 modes |= mlxsw_sp_port_link_mode[i].supported; 1338 } 1339 return modes; 1340 } 1341 1342 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) 1343 { 1344 u32 modes = 0; 1345 int i; 1346 1347 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1348 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) 1349 modes |= mlxsw_sp_port_link_mode[i].advertised; 1350 } 1351 return modes; 1352 } 1353 1354 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, 1355 struct ethtool_cmd *cmd) 1356 { 1357 u32 speed = SPEED_UNKNOWN; 1358 u8 duplex = DUPLEX_UNKNOWN; 1359 int i; 1360 1361 if (!carrier_ok) 1362 goto out; 1363 1364 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1365 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { 1366 speed = mlxsw_sp_port_link_mode[i].speed; 1367 duplex = DUPLEX_FULL; 1368 break; 1369 } 1370 } 1371 out: 1372 ethtool_cmd_speed_set(cmd, speed); 1373 cmd->duplex = duplex; 1374 } 1375 1376 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) 1377 { 1378 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | 1379 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | 1380 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | 1381 MLXSW_REG_PTYS_ETH_SPEED_SGMII)) 1382 return PORT_FIBRE; 1383 1384 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | 1385 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | 1386 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) 1387 return PORT_DA; 1388 1389 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | 1390 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | 1391 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | 1392 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) 1393 return PORT_NONE; 1394 1395 return PORT_OTHER; 1396 } 1397 1398 static int mlxsw_sp_port_get_settings(struct net_device *dev, 1399 struct ethtool_cmd *cmd) 1400 { 1401 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1402 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1403 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1404 u32 eth_proto_cap; 1405 u32 eth_proto_admin; 1406 u32 eth_proto_oper; 1407 int err; 1408 1409 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1410 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1411 if (err) { 1412 netdev_err(dev, "Failed to get proto"); 1413 return err; 1414 } 1415 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, 1416 ð_proto_admin, ð_proto_oper); 1417 1418 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | 1419 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | 1420 SUPPORTED_Pause | SUPPORTED_Asym_Pause | 1421 SUPPORTED_Autoneg; 1422 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); 1423 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), 1424 eth_proto_oper, cmd); 1425 1426 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 1427 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); 1428 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); 1429 1430 cmd->transceiver = XCVR_INTERNAL; 1431 return 0; 1432 } 1433 1434 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) 1435 { 1436 u32 ptys_proto = 0; 1437 int i; 1438 1439 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1440 if (advertising & mlxsw_sp_port_link_mode[i].advertised) 1441 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1442 } 1443 return ptys_proto; 1444 } 1445 1446 static u32 mlxsw_sp_to_ptys_speed(u32 speed) 1447 { 1448 u32 ptys_proto = 0; 1449 int i; 1450 1451 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1452 if (speed == mlxsw_sp_port_link_mode[i].speed) 1453 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1454 } 1455 return ptys_proto; 1456 } 1457 1458 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) 1459 { 1460 u32 ptys_proto = 0; 1461 int i; 1462 1463 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { 1464 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) 1465 ptys_proto |= mlxsw_sp_port_link_mode[i].mask; 1466 } 1467 return ptys_proto; 1468 } 1469 1470 static int mlxsw_sp_port_set_settings(struct net_device *dev, 1471 struct ethtool_cmd *cmd) 1472 { 1473 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1474 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1475 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1476 u32 speed; 1477 u32 eth_proto_new; 1478 u32 eth_proto_cap; 1479 u32 eth_proto_admin; 1480 int err; 1481 1482 speed = ethtool_cmd_speed(cmd); 1483 1484 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? 1485 mlxsw_sp_to_ptys_advert_link(cmd->advertising) : 1486 mlxsw_sp_to_ptys_speed(speed); 1487 1488 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); 1489 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1490 if (err) { 1491 netdev_err(dev, "Failed to get proto"); 1492 return err; 1493 } 1494 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); 1495 1496 eth_proto_new = eth_proto_new & eth_proto_cap; 1497 if (!eth_proto_new) { 1498 netdev_err(dev, "Not supported proto admin requested"); 1499 return -EINVAL; 1500 } 1501 if (eth_proto_new == eth_proto_admin) 1502 return 0; 1503 1504 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); 1505 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1506 if (err) { 1507 netdev_err(dev, "Failed to set proto admin"); 1508 return err; 1509 } 1510 1511 if (!netif_running(dev)) 1512 return 0; 1513 1514 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1515 if (err) { 1516 netdev_err(dev, "Failed to set admin status"); 1517 return err; 1518 } 1519 1520 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); 1521 if (err) { 1522 netdev_err(dev, "Failed to set admin status"); 1523 return err; 1524 } 1525 1526 return 0; 1527 } 1528 1529 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { 1530 .get_drvinfo = mlxsw_sp_port_get_drvinfo, 1531 .get_link = ethtool_op_get_link, 1532 .get_pauseparam = mlxsw_sp_port_get_pauseparam, 1533 .set_pauseparam = mlxsw_sp_port_set_pauseparam, 1534 .get_strings = mlxsw_sp_port_get_strings, 1535 .set_phys_id = mlxsw_sp_port_set_phys_id, 1536 .get_ethtool_stats = mlxsw_sp_port_get_stats, 1537 .get_sset_count = mlxsw_sp_port_get_sset_count, 1538 .get_settings = mlxsw_sp_port_get_settings, 1539 .set_settings = mlxsw_sp_port_set_settings, 1540 }; 1541 1542 static int 1543 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) 1544 { 1545 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1546 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; 1547 char ptys_pl[MLXSW_REG_PTYS_LEN]; 1548 u32 eth_proto_admin; 1549 1550 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); 1551 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 1552 eth_proto_admin); 1553 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); 1554 } 1555 1556 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 1557 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 1558 bool dwrr, u8 dwrr_weight) 1559 { 1560 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1561 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1562 1563 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1564 next_index); 1565 mlxsw_reg_qeec_de_set(qeec_pl, true); 1566 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); 1567 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); 1568 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1569 } 1570 1571 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 1572 enum mlxsw_reg_qeec_hr hr, u8 index, 1573 u8 next_index, u32 maxrate) 1574 { 1575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1576 char qeec_pl[MLXSW_REG_QEEC_LEN]; 1577 1578 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, 1579 next_index); 1580 mlxsw_reg_qeec_mase_set(qeec_pl, true); 1581 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); 1582 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); 1583 } 1584 1585 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 1586 u8 switch_prio, u8 tclass) 1587 { 1588 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1589 char qtct_pl[MLXSW_REG_QTCT_LEN]; 1590 1591 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, 1592 tclass); 1593 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); 1594 } 1595 1596 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) 1597 { 1598 int err, i; 1599 1600 /* Setup the elements hierarcy, so that each TC is linked to 1601 * one subgroup, which are all member in the same group. 1602 */ 1603 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1604 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, 1605 0); 1606 if (err) 1607 return err; 1608 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1609 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1610 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, 1611 0, false, 0); 1612 if (err) 1613 return err; 1614 } 1615 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1616 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 1617 MLXSW_REG_QEEC_HIERARCY_TC, i, i, 1618 false, 0); 1619 if (err) 1620 return err; 1621 } 1622 1623 /* Make sure the max shaper is disabled in all hierarcies that 1624 * support it. 1625 */ 1626 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1627 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, 1628 MLXSW_REG_QEEC_MAS_DIS); 1629 if (err) 1630 return err; 1631 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1632 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1633 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, 1634 i, 0, 1635 MLXSW_REG_QEEC_MAS_DIS); 1636 if (err) 1637 return err; 1638 } 1639 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1640 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 1641 MLXSW_REG_QEEC_HIERARCY_TC, 1642 i, i, 1643 MLXSW_REG_QEEC_MAS_DIS); 1644 if (err) 1645 return err; 1646 } 1647 1648 /* Map all priorities to traffic class 0. */ 1649 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 1650 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); 1651 if (err) 1652 return err; 1653 } 1654 1655 return 0; 1656 } 1657 1658 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1659 bool split, u8 module, u8 width, u8 lane) 1660 { 1661 struct mlxsw_sp_port *mlxsw_sp_port; 1662 struct net_device *dev; 1663 size_t bytes; 1664 int err; 1665 1666 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); 1667 if (!dev) 1668 return -ENOMEM; 1669 mlxsw_sp_port = netdev_priv(dev); 1670 mlxsw_sp_port->dev = dev; 1671 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1672 mlxsw_sp_port->local_port = local_port; 1673 mlxsw_sp_port->split = split; 1674 mlxsw_sp_port->mapping.module = module; 1675 mlxsw_sp_port->mapping.width = width; 1676 mlxsw_sp_port->mapping.lane = lane; 1677 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 1678 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 1679 if (!mlxsw_sp_port->active_vlans) { 1680 err = -ENOMEM; 1681 goto err_port_active_vlans_alloc; 1682 } 1683 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); 1684 if (!mlxsw_sp_port->untagged_vlans) { 1685 err = -ENOMEM; 1686 goto err_port_untagged_vlans_alloc; 1687 } 1688 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); 1689 1690 mlxsw_sp_port->pcpu_stats = 1691 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); 1692 if (!mlxsw_sp_port->pcpu_stats) { 1693 err = -ENOMEM; 1694 goto err_alloc_stats; 1695 } 1696 1697 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 1698 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 1699 1700 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 1701 if (err) { 1702 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 1703 mlxsw_sp_port->local_port); 1704 goto err_dev_addr_init; 1705 } 1706 1707 netif_carrier_off(dev); 1708 1709 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | 1710 NETIF_F_HW_VLAN_CTAG_FILTER; 1711 1712 /* Each packet needs to have a Tx header (metadata) on top all other 1713 * headers. 1714 */ 1715 dev->hard_header_len += MLXSW_TXHDR_LEN; 1716 1717 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); 1718 if (err) { 1719 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", 1720 mlxsw_sp_port->local_port); 1721 goto err_port_system_port_mapping_set; 1722 } 1723 1724 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); 1725 if (err) { 1726 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", 1727 mlxsw_sp_port->local_port); 1728 goto err_port_swid_set; 1729 } 1730 1731 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 1732 if (err) { 1733 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 1734 mlxsw_sp_port->local_port); 1735 goto err_port_speed_by_width_set; 1736 } 1737 1738 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); 1739 if (err) { 1740 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", 1741 mlxsw_sp_port->local_port); 1742 goto err_port_mtu_set; 1743 } 1744 1745 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); 1746 if (err) 1747 goto err_port_admin_status_set; 1748 1749 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); 1750 if (err) { 1751 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", 1752 mlxsw_sp_port->local_port); 1753 goto err_port_buffers_init; 1754 } 1755 1756 err = mlxsw_sp_port_ets_init(mlxsw_sp_port); 1757 if (err) { 1758 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", 1759 mlxsw_sp_port->local_port); 1760 goto err_port_ets_init; 1761 } 1762 1763 /* ETS and buffers must be initialized before DCB. */ 1764 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); 1765 if (err) { 1766 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", 1767 mlxsw_sp_port->local_port); 1768 goto err_port_dcb_init; 1769 } 1770 1771 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 1772 err = register_netdev(dev); 1773 if (err) { 1774 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 1775 mlxsw_sp_port->local_port); 1776 goto err_register_netdev; 1777 } 1778 1779 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port, 1780 mlxsw_sp_port->local_port, dev, 1781 mlxsw_sp_port->split, module); 1782 if (err) { 1783 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", 1784 mlxsw_sp_port->local_port); 1785 goto err_core_port_init; 1786 } 1787 1788 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); 1789 if (err) 1790 goto err_port_vlan_init; 1791 1792 mlxsw_sp->ports[local_port] = mlxsw_sp_port; 1793 return 0; 1794 1795 err_port_vlan_init: 1796 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 1797 err_core_port_init: 1798 unregister_netdev(dev); 1799 err_register_netdev: 1800 err_port_dcb_init: 1801 err_port_ets_init: 1802 err_port_buffers_init: 1803 err_port_admin_status_set: 1804 err_port_mtu_set: 1805 err_port_speed_by_width_set: 1806 err_port_swid_set: 1807 err_port_system_port_mapping_set: 1808 err_dev_addr_init: 1809 free_percpu(mlxsw_sp_port->pcpu_stats); 1810 err_alloc_stats: 1811 kfree(mlxsw_sp_port->untagged_vlans); 1812 err_port_untagged_vlans_alloc: 1813 kfree(mlxsw_sp_port->active_vlans); 1814 err_port_active_vlans_alloc: 1815 free_netdev(dev); 1816 return err; 1817 } 1818 1819 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1820 { 1821 struct net_device *dev = mlxsw_sp_port->dev; 1822 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; 1823 1824 list_for_each_entry_safe(mlxsw_sp_vport, tmp, 1825 &mlxsw_sp_port->vports_list, vport.list) { 1826 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 1827 1828 /* vPorts created for VLAN devices should already be gone 1829 * by now, since we unregistered the port netdev. 1830 */ 1831 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); 1832 mlxsw_sp_port_kill_vid(dev, 0, vid); 1833 } 1834 } 1835 1836 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1837 { 1838 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1839 1840 if (!mlxsw_sp_port) 1841 return; 1842 mlxsw_sp->ports[local_port] = NULL; 1843 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 1844 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1845 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1846 mlxsw_sp_port_vports_fini(mlxsw_sp_port); 1847 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 1848 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1849 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 1850 free_percpu(mlxsw_sp_port->pcpu_stats); 1851 kfree(mlxsw_sp_port->untagged_vlans); 1852 kfree(mlxsw_sp_port->active_vlans); 1853 free_netdev(mlxsw_sp_port->dev); 1854 } 1855 1856 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) 1857 { 1858 int i; 1859 1860 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) 1861 mlxsw_sp_port_remove(mlxsw_sp, i); 1862 kfree(mlxsw_sp->ports); 1863 } 1864 1865 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1866 { 1867 u8 module, width, lane; 1868 size_t alloc_size; 1869 int i; 1870 int err; 1871 1872 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; 1873 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); 1874 if (!mlxsw_sp->ports) 1875 return -ENOMEM; 1876 1877 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 1878 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 1879 &width, &lane); 1880 if (err) 1881 goto err_port_module_info_get; 1882 if (!width) 1883 continue; 1884 mlxsw_sp->port_to_module[i] = module; 1885 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, 1886 lane); 1887 if (err) 1888 goto err_port_create; 1889 } 1890 return 0; 1891 1892 err_port_create: 1893 err_port_module_info_get: 1894 for (i--; i >= 1; i--) 1895 mlxsw_sp_port_remove(mlxsw_sp, i); 1896 kfree(mlxsw_sp->ports); 1897 return err; 1898 } 1899 1900 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) 1901 { 1902 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; 1903 1904 return local_port - offset; 1905 } 1906 1907 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 1908 u8 module, unsigned int count) 1909 { 1910 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 1911 int err, i; 1912 1913 for (i = 0; i < count; i++) { 1914 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, 1915 width, i * width); 1916 if (err) 1917 goto err_port_module_map; 1918 } 1919 1920 for (i = 0; i < count; i++) { 1921 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); 1922 if (err) 1923 goto err_port_swid_set; 1924 } 1925 1926 for (i = 0; i < count; i++) { 1927 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 1928 module, width, i * width); 1929 if (err) 1930 goto err_port_create; 1931 } 1932 1933 return 0; 1934 1935 err_port_create: 1936 for (i--; i >= 0; i--) 1937 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 1938 i = count; 1939 err_port_swid_set: 1940 for (i--; i >= 0; i--) 1941 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 1942 MLXSW_PORT_SWID_DISABLED_PORT); 1943 i = count; 1944 err_port_module_map: 1945 for (i--; i >= 0; i--) 1946 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); 1947 return err; 1948 } 1949 1950 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 1951 u8 base_port, unsigned int count) 1952 { 1953 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 1954 int i; 1955 1956 /* Split by four means we need to re-create two ports, otherwise 1957 * only one. 1958 */ 1959 count = count / 2; 1960 1961 for (i = 0; i < count; i++) { 1962 local_port = base_port + i * 2; 1963 module = mlxsw_sp->port_to_module[local_port]; 1964 1965 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 1966 0); 1967 } 1968 1969 for (i = 0; i < count; i++) 1970 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); 1971 1972 for (i = 0; i < count; i++) { 1973 local_port = base_port + i * 2; 1974 module = mlxsw_sp->port_to_module[local_port]; 1975 1976 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 1977 width, 0); 1978 } 1979 } 1980 1981 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 1982 unsigned int count) 1983 { 1984 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 1985 struct mlxsw_sp_port *mlxsw_sp_port; 1986 u8 module, cur_width, base_port; 1987 int i; 1988 int err; 1989 1990 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1991 if (!mlxsw_sp_port) { 1992 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 1993 local_port); 1994 return -EINVAL; 1995 } 1996 1997 module = mlxsw_sp_port->mapping.module; 1998 cur_width = mlxsw_sp_port->mapping.width; 1999 2000 if (count != 2 && count != 4) { 2001 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 2002 return -EINVAL; 2003 } 2004 2005 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { 2006 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); 2007 return -EINVAL; 2008 } 2009 2010 /* Make sure we have enough slave (even) ports for the split. */ 2011 if (count == 2) { 2012 base_port = local_port; 2013 if (mlxsw_sp->ports[base_port + 1]) { 2014 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2015 return -EINVAL; 2016 } 2017 } else { 2018 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2019 if (mlxsw_sp->ports[base_port + 1] || 2020 mlxsw_sp->ports[base_port + 3]) { 2021 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); 2022 return -EINVAL; 2023 } 2024 } 2025 2026 for (i = 0; i < count; i++) 2027 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2028 2029 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 2030 if (err) { 2031 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2032 goto err_port_split_create; 2033 } 2034 2035 return 0; 2036 2037 err_port_split_create: 2038 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2039 return err; 2040 } 2041 2042 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) 2043 { 2044 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2045 struct mlxsw_sp_port *mlxsw_sp_port; 2046 u8 cur_width, base_port; 2047 unsigned int count; 2048 int i; 2049 2050 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2051 if (!mlxsw_sp_port) { 2052 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 2053 local_port); 2054 return -EINVAL; 2055 } 2056 2057 if (!mlxsw_sp_port->split) { 2058 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); 2059 return -EINVAL; 2060 } 2061 2062 cur_width = mlxsw_sp_port->mapping.width; 2063 count = cur_width == 1 ? 4 : 2; 2064 2065 base_port = mlxsw_sp_cluster_base_port_get(local_port); 2066 2067 /* Determine which ports to remove. */ 2068 if (count == 2 && local_port >= base_port + 2) 2069 base_port = base_port + 2; 2070 2071 for (i = 0; i < count; i++) 2072 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2073 2074 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2075 2076 return 0; 2077 } 2078 2079 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, 2080 char *pude_pl, void *priv) 2081 { 2082 struct mlxsw_sp *mlxsw_sp = priv; 2083 struct mlxsw_sp_port *mlxsw_sp_port; 2084 enum mlxsw_reg_pude_oper_status status; 2085 u8 local_port; 2086 2087 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 2088 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2089 if (!mlxsw_sp_port) { 2090 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", 2091 local_port); 2092 return; 2093 } 2094 2095 status = mlxsw_reg_pude_oper_status_get(pude_pl); 2096 if (status == MLXSW_PORT_OPER_STATUS_UP) { 2097 netdev_info(mlxsw_sp_port->dev, "link up\n"); 2098 netif_carrier_on(mlxsw_sp_port->dev); 2099 } else { 2100 netdev_info(mlxsw_sp_port->dev, "link down\n"); 2101 netif_carrier_off(mlxsw_sp_port->dev); 2102 } 2103 } 2104 2105 static struct mlxsw_event_listener mlxsw_sp_pude_event = { 2106 .func = mlxsw_sp_pude_event_func, 2107 .trap_id = MLXSW_TRAP_ID_PUDE, 2108 }; 2109 2110 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, 2111 enum mlxsw_event_trap_id trap_id) 2112 { 2113 struct mlxsw_event_listener *el; 2114 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2115 int err; 2116 2117 switch (trap_id) { 2118 case MLXSW_TRAP_ID_PUDE: 2119 el = &mlxsw_sp_pude_event; 2120 break; 2121 } 2122 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); 2123 if (err) 2124 return err; 2125 2126 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); 2127 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2128 if (err) 2129 goto err_event_trap_set; 2130 2131 return 0; 2132 2133 err_event_trap_set: 2134 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2135 return err; 2136 } 2137 2138 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, 2139 enum mlxsw_event_trap_id trap_id) 2140 { 2141 struct mlxsw_event_listener *el; 2142 2143 switch (trap_id) { 2144 case MLXSW_TRAP_ID_PUDE: 2145 el = &mlxsw_sp_pude_event; 2146 break; 2147 } 2148 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); 2149 } 2150 2151 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, 2152 void *priv) 2153 { 2154 struct mlxsw_sp *mlxsw_sp = priv; 2155 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2156 struct mlxsw_sp_port_pcpu_stats *pcpu_stats; 2157 2158 if (unlikely(!mlxsw_sp_port)) { 2159 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", 2160 local_port); 2161 return; 2162 } 2163 2164 skb->dev = mlxsw_sp_port->dev; 2165 2166 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); 2167 u64_stats_update_begin(&pcpu_stats->syncp); 2168 pcpu_stats->rx_packets++; 2169 pcpu_stats->rx_bytes += skb->len; 2170 u64_stats_update_end(&pcpu_stats->syncp); 2171 2172 skb->protocol = eth_type_trans(skb, skb->dev); 2173 netif_receive_skb(skb); 2174 } 2175 2176 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { 2177 { 2178 .func = mlxsw_sp_rx_listener_func, 2179 .local_port = MLXSW_PORT_DONT_CARE, 2180 .trap_id = MLXSW_TRAP_ID_FDB_MC, 2181 }, 2182 /* Traps for specific L2 packet types, not trapped as FDB MC */ 2183 { 2184 .func = mlxsw_sp_rx_listener_func, 2185 .local_port = MLXSW_PORT_DONT_CARE, 2186 .trap_id = MLXSW_TRAP_ID_STP, 2187 }, 2188 { 2189 .func = mlxsw_sp_rx_listener_func, 2190 .local_port = MLXSW_PORT_DONT_CARE, 2191 .trap_id = MLXSW_TRAP_ID_LACP, 2192 }, 2193 { 2194 .func = mlxsw_sp_rx_listener_func, 2195 .local_port = MLXSW_PORT_DONT_CARE, 2196 .trap_id = MLXSW_TRAP_ID_EAPOL, 2197 }, 2198 { 2199 .func = mlxsw_sp_rx_listener_func, 2200 .local_port = MLXSW_PORT_DONT_CARE, 2201 .trap_id = MLXSW_TRAP_ID_LLDP, 2202 }, 2203 { 2204 .func = mlxsw_sp_rx_listener_func, 2205 .local_port = MLXSW_PORT_DONT_CARE, 2206 .trap_id = MLXSW_TRAP_ID_MMRP, 2207 }, 2208 { 2209 .func = mlxsw_sp_rx_listener_func, 2210 .local_port = MLXSW_PORT_DONT_CARE, 2211 .trap_id = MLXSW_TRAP_ID_MVRP, 2212 }, 2213 { 2214 .func = mlxsw_sp_rx_listener_func, 2215 .local_port = MLXSW_PORT_DONT_CARE, 2216 .trap_id = MLXSW_TRAP_ID_RPVST, 2217 }, 2218 { 2219 .func = mlxsw_sp_rx_listener_func, 2220 .local_port = MLXSW_PORT_DONT_CARE, 2221 .trap_id = MLXSW_TRAP_ID_DHCP, 2222 }, 2223 { 2224 .func = mlxsw_sp_rx_listener_func, 2225 .local_port = MLXSW_PORT_DONT_CARE, 2226 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, 2227 }, 2228 { 2229 .func = mlxsw_sp_rx_listener_func, 2230 .local_port = MLXSW_PORT_DONT_CARE, 2231 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, 2232 }, 2233 { 2234 .func = mlxsw_sp_rx_listener_func, 2235 .local_port = MLXSW_PORT_DONT_CARE, 2236 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, 2237 }, 2238 { 2239 .func = mlxsw_sp_rx_listener_func, 2240 .local_port = MLXSW_PORT_DONT_CARE, 2241 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, 2242 }, 2243 { 2244 .func = mlxsw_sp_rx_listener_func, 2245 .local_port = MLXSW_PORT_DONT_CARE, 2246 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 2247 }, 2248 }; 2249 2250 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2251 { 2252 char htgt_pl[MLXSW_REG_HTGT_LEN]; 2253 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2254 int i; 2255 int err; 2256 2257 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); 2258 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2259 if (err) 2260 return err; 2261 2262 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); 2263 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); 2264 if (err) 2265 return err; 2266 2267 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2268 err = mlxsw_core_rx_listener_register(mlxsw_sp->core, 2269 &mlxsw_sp_rx_listener[i], 2270 mlxsw_sp); 2271 if (err) 2272 goto err_rx_listener_register; 2273 2274 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, 2275 mlxsw_sp_rx_listener[i].trap_id); 2276 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2277 if (err) 2278 goto err_rx_trap_set; 2279 } 2280 return 0; 2281 2282 err_rx_trap_set: 2283 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2284 &mlxsw_sp_rx_listener[i], 2285 mlxsw_sp); 2286 err_rx_listener_register: 2287 for (i--; i >= 0; i--) { 2288 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 2289 mlxsw_sp_rx_listener[i].trap_id); 2290 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2291 2292 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2293 &mlxsw_sp_rx_listener[i], 2294 mlxsw_sp); 2295 } 2296 return err; 2297 } 2298 2299 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) 2300 { 2301 char hpkt_pl[MLXSW_REG_HPKT_LEN]; 2302 int i; 2303 2304 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2305 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 2306 mlxsw_sp_rx_listener[i].trap_id); 2307 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2308 2309 mlxsw_core_rx_listener_unregister(mlxsw_sp->core, 2310 &mlxsw_sp_rx_listener[i], 2311 mlxsw_sp); 2312 } 2313 } 2314 2315 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, 2316 enum mlxsw_reg_sfgc_type type, 2317 enum mlxsw_reg_sfgc_bridge_type bridge_type) 2318 { 2319 enum mlxsw_flood_table_type table_type; 2320 enum mlxsw_sp_flood_table flood_table; 2321 char sfgc_pl[MLXSW_REG_SFGC_LEN]; 2322 2323 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) 2324 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 2325 else 2326 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 2327 2328 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) 2329 flood_table = MLXSW_SP_FLOOD_TABLE_UC; 2330 else 2331 flood_table = MLXSW_SP_FLOOD_TABLE_BM; 2332 2333 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, 2334 flood_table); 2335 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); 2336 } 2337 2338 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) 2339 { 2340 int type, err; 2341 2342 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { 2343 if (type == MLXSW_REG_SFGC_TYPE_RESERVED) 2344 continue; 2345 2346 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2347 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); 2348 if (err) 2349 return err; 2350 2351 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, 2352 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); 2353 if (err) 2354 return err; 2355 } 2356 2357 return 0; 2358 } 2359 2360 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2361 { 2362 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2363 2364 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | 2365 MLXSW_REG_SLCR_LAG_HASH_DMAC | 2366 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | 2367 MLXSW_REG_SLCR_LAG_HASH_VLANID | 2368 MLXSW_REG_SLCR_LAG_HASH_SIP | 2369 MLXSW_REG_SLCR_LAG_HASH_DIP | 2370 MLXSW_REG_SLCR_LAG_HASH_SPORT | 2371 MLXSW_REG_SLCR_LAG_HASH_DPORT | 2372 MLXSW_REG_SLCR_LAG_HASH_IPPROTO); 2373 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); 2374 } 2375 2376 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, 2377 const struct mlxsw_bus_info *mlxsw_bus_info) 2378 { 2379 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2380 int err; 2381 2382 mlxsw_sp->core = mlxsw_core; 2383 mlxsw_sp->bus_info = mlxsw_bus_info; 2384 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); 2385 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); 2386 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 2387 2388 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2389 if (err) { 2390 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); 2391 return err; 2392 } 2393 2394 err = mlxsw_sp_ports_create(mlxsw_sp); 2395 if (err) { 2396 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); 2397 return err; 2398 } 2399 2400 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2401 if (err) { 2402 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 2403 goto err_event_register; 2404 } 2405 2406 err = mlxsw_sp_traps_init(mlxsw_sp); 2407 if (err) { 2408 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); 2409 goto err_rx_listener_register; 2410 } 2411 2412 err = mlxsw_sp_flood_init(mlxsw_sp); 2413 if (err) { 2414 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); 2415 goto err_flood_init; 2416 } 2417 2418 err = mlxsw_sp_buffers_init(mlxsw_sp); 2419 if (err) { 2420 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); 2421 goto err_buffers_init; 2422 } 2423 2424 err = mlxsw_sp_lag_init(mlxsw_sp); 2425 if (err) { 2426 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); 2427 goto err_lag_init; 2428 } 2429 2430 err = mlxsw_sp_switchdev_init(mlxsw_sp); 2431 if (err) { 2432 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 2433 goto err_switchdev_init; 2434 } 2435 2436 return 0; 2437 2438 err_switchdev_init: 2439 err_lag_init: 2440 mlxsw_sp_buffers_fini(mlxsw_sp); 2441 err_buffers_init: 2442 err_flood_init: 2443 mlxsw_sp_traps_fini(mlxsw_sp); 2444 err_rx_listener_register: 2445 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2446 err_event_register: 2447 mlxsw_sp_ports_remove(mlxsw_sp); 2448 return err; 2449 } 2450 2451 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 2452 { 2453 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2454 2455 mlxsw_sp_switchdev_fini(mlxsw_sp); 2456 mlxsw_sp_buffers_fini(mlxsw_sp); 2457 mlxsw_sp_traps_fini(mlxsw_sp); 2458 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2459 mlxsw_sp_ports_remove(mlxsw_sp); 2460 } 2461 2462 static struct mlxsw_config_profile mlxsw_sp_config_profile = { 2463 .used_max_vepa_channels = 1, 2464 .max_vepa_channels = 0, 2465 .used_max_lag = 1, 2466 .max_lag = MLXSW_SP_LAG_MAX, 2467 .used_max_port_per_lag = 1, 2468 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, 2469 .used_max_mid = 1, 2470 .max_mid = MLXSW_SP_MID_MAX, 2471 .used_max_pgt = 1, 2472 .max_pgt = 0, 2473 .used_max_system_port = 1, 2474 .max_system_port = 64, 2475 .used_max_vlan_groups = 1, 2476 .max_vlan_groups = 127, 2477 .used_max_regions = 1, 2478 .max_regions = 400, 2479 .used_flood_tables = 1, 2480 .used_flood_mode = 1, 2481 .flood_mode = 3, 2482 .max_fid_offset_flood_tables = 2, 2483 .fid_offset_flood_table_size = VLAN_N_VID - 1, 2484 .max_fid_flood_tables = 2, 2485 .fid_flood_table_size = MLXSW_SP_VFID_MAX, 2486 .used_max_ib_mc = 1, 2487 .max_ib_mc = 0, 2488 .used_max_pkey = 1, 2489 .max_pkey = 0, 2490 .swid_config = { 2491 { 2492 .used_type = 1, 2493 .type = MLXSW_PORT_SWID_TYPE_ETH, 2494 } 2495 }, 2496 }; 2497 2498 static struct mlxsw_driver mlxsw_sp_driver = { 2499 .kind = MLXSW_DEVICE_KIND_SPECTRUM, 2500 .owner = THIS_MODULE, 2501 .priv_size = sizeof(struct mlxsw_sp), 2502 .init = mlxsw_sp_init, 2503 .fini = mlxsw_sp_fini, 2504 .port_split = mlxsw_sp_port_split, 2505 .port_unsplit = mlxsw_sp_port_unsplit, 2506 .sb_pool_get = mlxsw_sp_sb_pool_get, 2507 .sb_pool_set = mlxsw_sp_sb_pool_set, 2508 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, 2509 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, 2510 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, 2511 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, 2512 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, 2513 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, 2514 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, 2515 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, 2516 .txhdr_construct = mlxsw_sp_txhdr_construct, 2517 .txhdr_len = MLXSW_TXHDR_LEN, 2518 .profile = &mlxsw_sp_config_profile, 2519 }; 2520 2521 static int 2522 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) 2523 { 2524 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2525 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2526 2527 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); 2528 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); 2529 2530 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2531 } 2532 2533 static int 2534 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 2535 u16 fid) 2536 { 2537 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2538 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2539 2540 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); 2541 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 2542 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, 2543 mlxsw_sp_port->local_port); 2544 2545 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2546 } 2547 2548 static int 2549 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) 2550 { 2551 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2552 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2553 2554 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); 2555 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 2556 2557 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2558 } 2559 2560 static int 2561 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 2562 u16 fid) 2563 { 2564 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2565 char sfdf_pl[MLXSW_REG_SFDF_LEN]; 2566 2567 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); 2568 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); 2569 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); 2570 2571 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 2572 } 2573 2574 static int 2575 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) 2576 { 2577 int err, last_err = 0; 2578 u16 vid; 2579 2580 for (vid = 1; vid < VLAN_N_VID - 1; vid++) { 2581 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); 2582 if (err) 2583 last_err = err; 2584 } 2585 2586 return last_err; 2587 } 2588 2589 static int 2590 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) 2591 { 2592 int err, last_err = 0; 2593 u16 vid; 2594 2595 for (vid = 1; vid < VLAN_N_VID - 1; vid++) { 2596 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); 2597 if (err) 2598 last_err = err; 2599 } 2600 2601 return last_err; 2602 } 2603 2604 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) 2605 { 2606 if (!list_empty(&mlxsw_sp_port->vports_list)) 2607 if (mlxsw_sp_port->lagged) 2608 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); 2609 else 2610 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); 2611 else 2612 if (mlxsw_sp_port->lagged) 2613 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); 2614 else 2615 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); 2616 } 2617 2618 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) 2619 { 2620 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); 2621 u16 fid = mlxsw_sp_vfid_to_fid(vfid); 2622 2623 if (mlxsw_sp_vport->lagged) 2624 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, 2625 fid); 2626 else 2627 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); 2628 } 2629 2630 static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 2631 { 2632 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 2633 } 2634 2635 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) 2636 { 2637 struct net_device *dev = mlxsw_sp_port->dev; 2638 int err; 2639 2640 /* When port is not bridged untagged packets are tagged with 2641 * PVID=VID=1, thereby creating an implicit VLAN interface in 2642 * the device. Remove it and let bridge code take care of its 2643 * own VLANs. 2644 */ 2645 err = mlxsw_sp_port_kill_vid(dev, 0, 1); 2646 if (err) 2647 return err; 2648 2649 mlxsw_sp_port->learning = 1; 2650 mlxsw_sp_port->learning_sync = 1; 2651 mlxsw_sp_port->uc_flood = 1; 2652 mlxsw_sp_port->bridged = 1; 2653 2654 return 0; 2655 } 2656 2657 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2658 bool flush_fdb) 2659 { 2660 struct net_device *dev = mlxsw_sp_port->dev; 2661 2662 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) 2663 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2664 2665 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); 2666 2667 mlxsw_sp_port->learning = 0; 2668 mlxsw_sp_port->learning_sync = 0; 2669 mlxsw_sp_port->uc_flood = 0; 2670 mlxsw_sp_port->bridged = 0; 2671 2672 /* Add implicit VLAN interface in the device, so that untagged 2673 * packets will be classified to the default vFID. 2674 */ 2675 return mlxsw_sp_port_add_vid(dev, 0, 1); 2676 } 2677 2678 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 2679 struct net_device *br_dev) 2680 { 2681 return !mlxsw_sp->master_bridge.dev || 2682 mlxsw_sp->master_bridge.dev == br_dev; 2683 } 2684 2685 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, 2686 struct net_device *br_dev) 2687 { 2688 mlxsw_sp->master_bridge.dev = br_dev; 2689 mlxsw_sp->master_bridge.ref_count++; 2690 } 2691 2692 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, 2693 struct net_device *br_dev) 2694 { 2695 if (--mlxsw_sp->master_bridge.ref_count == 0) 2696 mlxsw_sp->master_bridge.dev = NULL; 2697 } 2698 2699 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 2700 { 2701 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2702 2703 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 2704 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2705 } 2706 2707 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 2708 { 2709 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2710 2711 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 2712 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2713 } 2714 2715 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 2716 u16 lag_id, u8 port_index) 2717 { 2718 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2719 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2720 2721 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, 2722 lag_id, port_index); 2723 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2724 } 2725 2726 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 2727 u16 lag_id) 2728 { 2729 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2730 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2731 2732 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, 2733 lag_id); 2734 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2735 } 2736 2737 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, 2738 u16 lag_id) 2739 { 2740 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2741 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2742 2743 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, 2744 lag_id); 2745 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2746 } 2747 2748 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, 2749 u16 lag_id) 2750 { 2751 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2752 char slcor_pl[MLXSW_REG_SLCOR_LEN]; 2753 2754 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, 2755 lag_id); 2756 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 2757 } 2758 2759 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 2760 struct net_device *lag_dev, 2761 u16 *p_lag_id) 2762 { 2763 struct mlxsw_sp_upper *lag; 2764 int free_lag_id = -1; 2765 int i; 2766 2767 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { 2768 lag = mlxsw_sp_lag_get(mlxsw_sp, i); 2769 if (lag->ref_count) { 2770 if (lag->dev == lag_dev) { 2771 *p_lag_id = i; 2772 return 0; 2773 } 2774 } else if (free_lag_id < 0) { 2775 free_lag_id = i; 2776 } 2777 } 2778 if (free_lag_id < 0) 2779 return -EBUSY; 2780 *p_lag_id = free_lag_id; 2781 return 0; 2782 } 2783 2784 static bool 2785 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, 2786 struct net_device *lag_dev, 2787 struct netdev_lag_upper_info *lag_upper_info) 2788 { 2789 u16 lag_id; 2790 2791 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) 2792 return false; 2793 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 2794 return false; 2795 return true; 2796 } 2797 2798 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, 2799 u16 lag_id, u8 *p_port_index) 2800 { 2801 int i; 2802 2803 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { 2804 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { 2805 *p_port_index = i; 2806 return 0; 2807 } 2808 } 2809 return -EBUSY; 2810 } 2811 2812 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 2813 struct net_device *lag_dev) 2814 { 2815 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2816 struct mlxsw_sp_upper *lag; 2817 u16 lag_id; 2818 u8 port_index; 2819 int err; 2820 2821 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 2822 if (err) 2823 return err; 2824 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 2825 if (!lag->ref_count) { 2826 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 2827 if (err) 2828 return err; 2829 lag->dev = lag_dev; 2830 } 2831 2832 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 2833 if (err) 2834 return err; 2835 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); 2836 if (err) 2837 goto err_col_port_add; 2838 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); 2839 if (err) 2840 goto err_col_port_enable; 2841 2842 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, 2843 mlxsw_sp_port->local_port); 2844 mlxsw_sp_port->lag_id = lag_id; 2845 mlxsw_sp_port->lagged = 1; 2846 lag->ref_count++; 2847 return 0; 2848 2849 err_col_port_enable: 2850 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2851 err_col_port_add: 2852 if (!lag->ref_count) 2853 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2854 return err; 2855 } 2856 2857 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 2858 struct net_device *br_dev, 2859 bool flush_fdb); 2860 2861 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, 2862 struct net_device *lag_dev) 2863 { 2864 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2865 struct mlxsw_sp_port *mlxsw_sp_vport; 2866 struct mlxsw_sp_upper *lag; 2867 u16 lag_id = mlxsw_sp_port->lag_id; 2868 int err; 2869 2870 if (!mlxsw_sp_port->lagged) 2871 return 0; 2872 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 2873 WARN_ON(lag->ref_count == 0); 2874 2875 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); 2876 if (err) 2877 return err; 2878 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2879 if (err) 2880 return err; 2881 2882 /* In case we leave a LAG device that has bridges built on top, 2883 * then their teardown sequence is never issued and we need to 2884 * invoke the necessary cleanup routines ourselves. 2885 */ 2886 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 2887 vport.list) { 2888 struct net_device *br_dev; 2889 2890 if (!mlxsw_sp_vport->bridged) 2891 continue; 2892 2893 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 2894 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); 2895 } 2896 2897 if (mlxsw_sp_port->bridged) { 2898 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 2899 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); 2900 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); 2901 } 2902 2903 if (lag->ref_count == 1) { 2904 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) 2905 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2906 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2907 if (err) 2908 return err; 2909 } 2910 2911 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 2912 mlxsw_sp_port->local_port); 2913 mlxsw_sp_port->lagged = 0; 2914 lag->ref_count--; 2915 return 0; 2916 } 2917 2918 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 2919 u16 lag_id) 2920 { 2921 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2922 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2923 2924 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, 2925 mlxsw_sp_port->local_port); 2926 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2927 } 2928 2929 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, 2930 u16 lag_id) 2931 { 2932 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2933 char sldr_pl[MLXSW_REG_SLDR_LEN]; 2934 2935 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, 2936 mlxsw_sp_port->local_port); 2937 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 2938 } 2939 2940 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, 2941 bool lag_tx_enabled) 2942 { 2943 if (lag_tx_enabled) 2944 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, 2945 mlxsw_sp_port->lag_id); 2946 else 2947 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, 2948 mlxsw_sp_port->lag_id); 2949 } 2950 2951 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, 2952 struct netdev_lag_lower_state_info *info) 2953 { 2954 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); 2955 } 2956 2957 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, 2958 struct net_device *vlan_dev) 2959 { 2960 struct mlxsw_sp_port *mlxsw_sp_vport; 2961 u16 vid = vlan_dev_vlan_id(vlan_dev); 2962 2963 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2964 if (!mlxsw_sp_vport) { 2965 WARN_ON(!mlxsw_sp_vport); 2966 return -EINVAL; 2967 } 2968 2969 mlxsw_sp_vport->dev = vlan_dev; 2970 2971 return 0; 2972 } 2973 2974 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, 2975 struct net_device *vlan_dev) 2976 { 2977 struct mlxsw_sp_port *mlxsw_sp_vport; 2978 u16 vid = vlan_dev_vlan_id(vlan_dev); 2979 2980 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 2981 if (!mlxsw_sp_vport) { 2982 WARN_ON(!mlxsw_sp_vport); 2983 return -EINVAL; 2984 } 2985 2986 /* When removing a VLAN device while still bridged we should first 2987 * remove it from the bridge, as we receive the bridge's notification 2988 * when the vPort is already gone. 2989 */ 2990 if (mlxsw_sp_vport->bridged) { 2991 struct net_device *br_dev; 2992 2993 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 2994 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); 2995 } 2996 2997 mlxsw_sp_vport->dev = mlxsw_sp_port->dev; 2998 2999 return 0; 3000 } 3001 3002 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, 3003 unsigned long event, void *ptr) 3004 { 3005 struct netdev_notifier_changeupper_info *info; 3006 struct mlxsw_sp_port *mlxsw_sp_port; 3007 struct net_device *upper_dev; 3008 struct mlxsw_sp *mlxsw_sp; 3009 int err; 3010 3011 mlxsw_sp_port = netdev_priv(dev); 3012 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 3013 info = ptr; 3014 3015 switch (event) { 3016 case NETDEV_PRECHANGEUPPER: 3017 upper_dev = info->upper_dev; 3018 if (!info->master || !info->linking) 3019 break; 3020 /* HW limitation forbids to put ports to multiple bridges. */ 3021 if (netif_is_bridge_master(upper_dev) && 3022 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) 3023 return NOTIFY_BAD; 3024 if (netif_is_lag_master(upper_dev) && 3025 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, 3026 info->upper_info)) 3027 return NOTIFY_BAD; 3028 break; 3029 case NETDEV_CHANGEUPPER: 3030 upper_dev = info->upper_dev; 3031 if (is_vlan_dev(upper_dev)) { 3032 if (info->linking) { 3033 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, 3034 upper_dev); 3035 if (err) { 3036 netdev_err(dev, "Failed to link VLAN device\n"); 3037 return NOTIFY_BAD; 3038 } 3039 } else { 3040 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, 3041 upper_dev); 3042 if (err) { 3043 netdev_err(dev, "Failed to unlink VLAN device\n"); 3044 return NOTIFY_BAD; 3045 } 3046 } 3047 } else if (netif_is_bridge_master(upper_dev)) { 3048 if (info->linking) { 3049 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); 3050 if (err) { 3051 netdev_err(dev, "Failed to join bridge\n"); 3052 return NOTIFY_BAD; 3053 } 3054 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); 3055 } else { 3056 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, 3057 true); 3058 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); 3059 if (err) { 3060 netdev_err(dev, "Failed to leave bridge\n"); 3061 return NOTIFY_BAD; 3062 } 3063 } 3064 } else if (netif_is_lag_master(upper_dev)) { 3065 if (info->linking) { 3066 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 3067 upper_dev); 3068 if (err) { 3069 netdev_err(dev, "Failed to join link aggregation\n"); 3070 return NOTIFY_BAD; 3071 } 3072 } else { 3073 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, 3074 upper_dev); 3075 if (err) { 3076 netdev_err(dev, "Failed to leave link aggregation\n"); 3077 return NOTIFY_BAD; 3078 } 3079 } 3080 } 3081 break; 3082 } 3083 3084 return NOTIFY_DONE; 3085 } 3086 3087 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, 3088 unsigned long event, void *ptr) 3089 { 3090 struct netdev_notifier_changelowerstate_info *info; 3091 struct mlxsw_sp_port *mlxsw_sp_port; 3092 int err; 3093 3094 mlxsw_sp_port = netdev_priv(dev); 3095 info = ptr; 3096 3097 switch (event) { 3098 case NETDEV_CHANGELOWERSTATE: 3099 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { 3100 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, 3101 info->lower_state_info); 3102 if (err) 3103 netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); 3104 } 3105 break; 3106 } 3107 3108 return NOTIFY_DONE; 3109 } 3110 3111 static int mlxsw_sp_netdevice_port_event(struct net_device *dev, 3112 unsigned long event, void *ptr) 3113 { 3114 switch (event) { 3115 case NETDEV_PRECHANGEUPPER: 3116 case NETDEV_CHANGEUPPER: 3117 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); 3118 case NETDEV_CHANGELOWERSTATE: 3119 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); 3120 } 3121 3122 return NOTIFY_DONE; 3123 } 3124 3125 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, 3126 unsigned long event, void *ptr) 3127 { 3128 struct net_device *dev; 3129 struct list_head *iter; 3130 int ret; 3131 3132 netdev_for_each_lower_dev(lag_dev, dev, iter) { 3133 if (mlxsw_sp_port_dev_check(dev)) { 3134 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); 3135 if (ret == NOTIFY_BAD) 3136 return ret; 3137 } 3138 } 3139 3140 return NOTIFY_DONE; 3141 } 3142 3143 static struct mlxsw_sp_vfid * 3144 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, 3145 const struct net_device *br_dev) 3146 { 3147 struct mlxsw_sp_vfid *vfid; 3148 3149 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { 3150 if (vfid->br_dev == br_dev) 3151 return vfid; 3152 } 3153 3154 return NULL; 3155 } 3156 3157 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) 3158 { 3159 return vfid - MLXSW_SP_VFID_PORT_MAX; 3160 } 3161 3162 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) 3163 { 3164 return MLXSW_SP_VFID_PORT_MAX + br_vfid; 3165 } 3166 3167 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) 3168 { 3169 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, 3170 MLXSW_SP_VFID_BR_MAX); 3171 } 3172 3173 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, 3174 struct net_device *br_dev) 3175 { 3176 struct device *dev = mlxsw_sp->bus_info->dev; 3177 struct mlxsw_sp_vfid *vfid; 3178 u16 n_vfid; 3179 int err; 3180 3181 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); 3182 if (n_vfid == MLXSW_SP_VFID_MAX) { 3183 dev_err(dev, "No available vFIDs\n"); 3184 return ERR_PTR(-ERANGE); 3185 } 3186 3187 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); 3188 if (err) { 3189 dev_err(dev, "Failed to create vFID=%d\n", n_vfid); 3190 return ERR_PTR(err); 3191 } 3192 3193 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); 3194 if (!vfid) 3195 goto err_allocate_vfid; 3196 3197 vfid->vfid = n_vfid; 3198 vfid->br_dev = br_dev; 3199 3200 list_add(&vfid->list, &mlxsw_sp->br_vfids.list); 3201 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); 3202 3203 return vfid; 3204 3205 err_allocate_vfid: 3206 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); 3207 return ERR_PTR(-ENOMEM); 3208 } 3209 3210 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 3211 struct mlxsw_sp_vfid *vfid) 3212 { 3213 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); 3214 3215 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); 3216 list_del(&vfid->list); 3217 3218 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); 3219 3220 kfree(vfid); 3221 } 3222 3223 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, 3224 struct net_device *br_dev, 3225 bool flush_fdb) 3226 { 3227 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3228 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 3229 struct net_device *dev = mlxsw_sp_vport->dev; 3230 struct mlxsw_sp_vfid *vfid, *new_vfid; 3231 int err; 3232 3233 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 3234 if (!vfid) { 3235 WARN_ON(!vfid); 3236 return -EINVAL; 3237 } 3238 3239 /* We need a vFID to go back to after leaving the bridge's vFID. */ 3240 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); 3241 if (!new_vfid) { 3242 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); 3243 if (IS_ERR(new_vfid)) { 3244 netdev_err(dev, "Failed to create vFID for VID=%d\n", 3245 vid); 3246 return PTR_ERR(new_vfid); 3247 } 3248 } 3249 3250 /* Invalidate existing {Port, VID} to vFID mapping and create a new 3251 * one for the new vFID. 3252 */ 3253 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 3254 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 3255 false, 3256 mlxsw_sp_vfid_to_fid(vfid->vfid), 3257 vid); 3258 if (err) { 3259 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", 3260 vfid->vfid); 3261 goto err_port_vid_to_fid_invalidate; 3262 } 3263 3264 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 3265 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 3266 true, 3267 mlxsw_sp_vfid_to_fid(new_vfid->vfid), 3268 vid); 3269 if (err) { 3270 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", 3271 new_vfid->vfid); 3272 goto err_port_vid_to_fid_validate; 3273 } 3274 3275 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 3276 if (err) { 3277 netdev_err(dev, "Failed to disable learning\n"); 3278 goto err_port_vid_learning_set; 3279 } 3280 3281 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, 3282 false); 3283 if (err) { 3284 netdev_err(dev, "Failed clear to clear flooding\n"); 3285 goto err_vport_flood_set; 3286 } 3287 3288 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, 3289 MLXSW_REG_SPMS_STATE_FORWARDING); 3290 if (err) { 3291 netdev_err(dev, "Failed to set STP state\n"); 3292 goto err_port_stp_state_set; 3293 } 3294 3295 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) 3296 netdev_err(dev, "Failed to flush FDB\n"); 3297 3298 /* Switch between the vFIDs and destroy the old one if needed. */ 3299 new_vfid->nr_vports++; 3300 mlxsw_sp_vport->vport.vfid = new_vfid; 3301 vfid->nr_vports--; 3302 if (!vfid->nr_vports) 3303 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); 3304 3305 mlxsw_sp_vport->learning = 0; 3306 mlxsw_sp_vport->learning_sync = 0; 3307 mlxsw_sp_vport->uc_flood = 0; 3308 mlxsw_sp_vport->bridged = 0; 3309 3310 return 0; 3311 3312 err_port_stp_state_set: 3313 err_vport_flood_set: 3314 err_port_vid_learning_set: 3315 err_port_vid_to_fid_validate: 3316 err_port_vid_to_fid_invalidate: 3317 /* Rollback vFID only if new. */ 3318 if (!new_vfid->nr_vports) 3319 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); 3320 return err; 3321 } 3322 3323 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3324 struct net_device *br_dev) 3325 { 3326 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; 3327 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; 3328 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 3329 struct net_device *dev = mlxsw_sp_vport->dev; 3330 struct mlxsw_sp_vfid *vfid; 3331 int err; 3332 3333 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); 3334 if (!vfid) { 3335 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); 3336 if (IS_ERR(vfid)) { 3337 netdev_err(dev, "Failed to create bridge vFID\n"); 3338 return PTR_ERR(vfid); 3339 } 3340 } 3341 3342 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); 3343 if (err) { 3344 netdev_err(dev, "Failed to setup flooding for vFID=%d\n", 3345 vfid->vfid); 3346 goto err_port_flood_set; 3347 } 3348 3349 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 3350 if (err) { 3351 netdev_err(dev, "Failed to enable learning\n"); 3352 goto err_port_vid_learning_set; 3353 } 3354 3355 /* We need to invalidate existing {Port, VID} to vFID mapping and 3356 * create a new one for the bridge's vFID. 3357 */ 3358 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 3359 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 3360 false, 3361 mlxsw_sp_vfid_to_fid(old_vfid->vfid), 3362 vid); 3363 if (err) { 3364 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", 3365 old_vfid->vfid); 3366 goto err_port_vid_to_fid_invalidate; 3367 } 3368 3369 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 3370 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, 3371 true, 3372 mlxsw_sp_vfid_to_fid(vfid->vfid), 3373 vid); 3374 if (err) { 3375 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", 3376 vfid->vfid); 3377 goto err_port_vid_to_fid_validate; 3378 } 3379 3380 /* Switch between the vFIDs and destroy the old one if needed. */ 3381 vfid->nr_vports++; 3382 mlxsw_sp_vport->vport.vfid = vfid; 3383 old_vfid->nr_vports--; 3384 if (!old_vfid->nr_vports) 3385 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); 3386 3387 mlxsw_sp_vport->learning = 1; 3388 mlxsw_sp_vport->learning_sync = 1; 3389 mlxsw_sp_vport->uc_flood = 1; 3390 mlxsw_sp_vport->bridged = 1; 3391 3392 return 0; 3393 3394 err_port_vid_to_fid_validate: 3395 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, 3396 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, 3397 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); 3398 err_port_vid_to_fid_invalidate: 3399 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 3400 err_port_vid_learning_set: 3401 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); 3402 err_port_flood_set: 3403 if (!vfid->nr_vports) 3404 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); 3405 return err; 3406 } 3407 3408 static bool 3409 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, 3410 const struct net_device *br_dev) 3411 { 3412 struct mlxsw_sp_port *mlxsw_sp_vport; 3413 3414 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 3415 vport.list) { 3416 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) 3417 return false; 3418 } 3419 3420 return true; 3421 } 3422 3423 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, 3424 unsigned long event, void *ptr, 3425 u16 vid) 3426 { 3427 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 3428 struct netdev_notifier_changeupper_info *info = ptr; 3429 struct mlxsw_sp_port *mlxsw_sp_vport; 3430 struct net_device *upper_dev; 3431 int err; 3432 3433 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 3434 3435 switch (event) { 3436 case NETDEV_PRECHANGEUPPER: 3437 upper_dev = info->upper_dev; 3438 if (!info->master || !info->linking) 3439 break; 3440 if (!netif_is_bridge_master(upper_dev)) 3441 return NOTIFY_BAD; 3442 /* We can't have multiple VLAN interfaces configured on 3443 * the same port and being members in the same bridge. 3444 */ 3445 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, 3446 upper_dev)) 3447 return NOTIFY_BAD; 3448 break; 3449 case NETDEV_CHANGEUPPER: 3450 upper_dev = info->upper_dev; 3451 if (!info->master) 3452 break; 3453 if (info->linking) { 3454 if (!mlxsw_sp_vport) { 3455 WARN_ON(!mlxsw_sp_vport); 3456 return NOTIFY_BAD; 3457 } 3458 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, 3459 upper_dev); 3460 if (err) { 3461 netdev_err(dev, "Failed to join bridge\n"); 3462 return NOTIFY_BAD; 3463 } 3464 } else { 3465 /* We ignore bridge's unlinking notifications if vPort 3466 * is gone, since we already left the bridge when the 3467 * VLAN device was unlinked from the real device. 3468 */ 3469 if (!mlxsw_sp_vport) 3470 return NOTIFY_DONE; 3471 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, 3472 upper_dev, true); 3473 if (err) { 3474 netdev_err(dev, "Failed to leave bridge\n"); 3475 return NOTIFY_BAD; 3476 } 3477 } 3478 } 3479 3480 return NOTIFY_DONE; 3481 } 3482 3483 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, 3484 unsigned long event, void *ptr, 3485 u16 vid) 3486 { 3487 struct net_device *dev; 3488 struct list_head *iter; 3489 int ret; 3490 3491 netdev_for_each_lower_dev(lag_dev, dev, iter) { 3492 if (mlxsw_sp_port_dev_check(dev)) { 3493 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, 3494 vid); 3495 if (ret == NOTIFY_BAD) 3496 return ret; 3497 } 3498 } 3499 3500 return NOTIFY_DONE; 3501 } 3502 3503 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, 3504 unsigned long event, void *ptr) 3505 { 3506 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 3507 u16 vid = vlan_dev_vlan_id(vlan_dev); 3508 3509 if (mlxsw_sp_port_dev_check(real_dev)) 3510 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, 3511 vid); 3512 else if (netif_is_lag_master(real_dev)) 3513 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, 3514 vid); 3515 3516 return NOTIFY_DONE; 3517 } 3518 3519 static int mlxsw_sp_netdevice_event(struct notifier_block *unused, 3520 unsigned long event, void *ptr) 3521 { 3522 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3523 3524 if (mlxsw_sp_port_dev_check(dev)) 3525 return mlxsw_sp_netdevice_port_event(dev, event, ptr); 3526 3527 if (netif_is_lag_master(dev)) 3528 return mlxsw_sp_netdevice_lag_event(dev, event, ptr); 3529 3530 if (is_vlan_dev(dev)) 3531 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 3532 3533 return NOTIFY_DONE; 3534 } 3535 3536 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { 3537 .notifier_call = mlxsw_sp_netdevice_event, 3538 }; 3539 3540 static int __init mlxsw_sp_module_init(void) 3541 { 3542 int err; 3543 3544 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3545 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 3546 if (err) 3547 goto err_core_driver_register; 3548 return 0; 3549 3550 err_core_driver_register: 3551 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3552 return err; 3553 } 3554 3555 static void __exit mlxsw_sp_module_exit(void) 3556 { 3557 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 3558 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3559 } 3560 3561 module_init(mlxsw_sp_module_init); 3562 module_exit(mlxsw_sp_module_exit); 3563 3564 MODULE_LICENSE("Dual BSD/GPL"); 3565 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 3566 MODULE_DESCRIPTION("Mellanox Spectrum driver"); 3567 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); 3568