1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/if_bridge.h> 5 #include <linux/list.h> 6 #include <net/arp.h> 7 #include <net/gre.h> 8 #include <net/lag.h> 9 #include <net/ndisc.h> 10 #include <net/ip6_tunnel.h> 11 12 #include "spectrum.h" 13 #include "spectrum_ipip.h" 14 #include "spectrum_span.h" 15 #include "spectrum_switchdev.h" 16 17 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 18 { 19 int i; 20 21 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 22 return -EIO; 23 24 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, 25 MAX_SPAN); 26 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, 27 sizeof(struct mlxsw_sp_span_entry), 28 GFP_KERNEL); 29 if (!mlxsw_sp->span.entries) 30 return -ENOMEM; 31 32 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 33 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 34 35 INIT_LIST_HEAD(&curr->bound_ports_list); 36 curr->id = i; 37 } 38 39 return 0; 40 } 41 42 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 43 { 44 int i; 45 46 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 47 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 48 49 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 50 } 51 kfree(mlxsw_sp->span.entries); 52 } 53 54 static int 55 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev, 56 struct mlxsw_sp_span_parms *sparmsp) 57 { 58 sparmsp->dest_port = netdev_priv(to_dev); 59 return 0; 60 } 61 62 static int 63 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, 64 struct mlxsw_sp_span_parms sparms) 65 { 66 struct mlxsw_sp_port *dest_port = sparms.dest_port; 67 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 68 u8 local_port = dest_port->local_port; 69 char mpat_pl[MLXSW_REG_MPAT_LEN]; 70 int pa_id = span_entry->id; 71 72 /* Create a new port analayzer entry for local_port. */ 73 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 74 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 75 76 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 77 } 78 79 static void 80 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, 81 enum mlxsw_reg_mpat_span_type span_type) 82 { 83 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; 84 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 85 u8 local_port = dest_port->local_port; 86 char mpat_pl[MLXSW_REG_MPAT_LEN]; 87 int pa_id = span_entry->id; 88 89 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); 90 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 91 } 92 93 static void 94 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) 95 { 96 mlxsw_sp_span_entry_deconfigure_common(span_entry, 97 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 98 } 99 100 static const 101 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { 102 .can_handle = mlxsw_sp_port_dev_check, 103 .parms = mlxsw_sp_span_entry_phys_parms, 104 .configure = mlxsw_sp_span_entry_phys_configure, 105 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, 106 }; 107 108 static int mlxsw_sp_span_dmac(struct neigh_table *tbl, 109 const void *pkey, 110 struct net_device *dev, 111 unsigned char dmac[ETH_ALEN]) 112 { 113 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); 114 int err = 0; 115 116 if (!neigh) { 117 neigh = neigh_create(tbl, pkey, dev); 118 if (IS_ERR(neigh)) 119 return PTR_ERR(neigh); 120 } 121 122 neigh_event_send(neigh, NULL); 123 124 read_lock_bh(&neigh->lock); 125 if ((neigh->nud_state & NUD_VALID) && !neigh->dead) 126 memcpy(dmac, neigh->ha, ETH_ALEN); 127 else 128 err = -ENOENT; 129 read_unlock_bh(&neigh->lock); 130 131 neigh_release(neigh); 132 return err; 133 } 134 135 static int 136 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) 137 { 138 sparmsp->dest_port = NULL; 139 return 0; 140 } 141 142 static struct net_device * 143 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, 144 unsigned char *dmac, 145 u16 *p_vid) 146 { 147 struct bridge_vlan_info vinfo; 148 struct net_device *edev; 149 u16 vid = *p_vid; 150 151 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) 152 return NULL; 153 if (!vid || 154 br_vlan_get_info(br_dev, vid, &vinfo) || 155 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) 156 return NULL; 157 158 edev = br_fdb_find_port(br_dev, dmac, vid); 159 if (!edev) 160 return NULL; 161 162 if (br_vlan_get_info(edev, vid, &vinfo)) 163 return NULL; 164 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) 165 *p_vid = 0; 166 else 167 *p_vid = vid; 168 return edev; 169 } 170 171 static struct net_device * 172 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, 173 unsigned char *dmac) 174 { 175 return br_fdb_find_port(br_dev, dmac, 0); 176 } 177 178 static struct net_device * 179 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, 180 unsigned char dmac[ETH_ALEN], 181 u16 *p_vid) 182 { 183 struct mlxsw_sp_bridge_port *bridge_port; 184 enum mlxsw_reg_spms_state spms_state; 185 struct net_device *dev = NULL; 186 struct mlxsw_sp_port *port; 187 u8 stp_state; 188 189 if (br_vlan_enabled(br_dev)) 190 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); 191 else if (!*p_vid) 192 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); 193 if (!dev) 194 return NULL; 195 196 port = mlxsw_sp_port_dev_lower_find(dev); 197 if (!port) 198 return NULL; 199 200 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); 201 if (!bridge_port) 202 return NULL; 203 204 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); 205 spms_state = mlxsw_sp_stp_spms_state(stp_state); 206 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) 207 return NULL; 208 209 return dev; 210 } 211 212 static struct net_device * 213 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, 214 u16 *p_vid) 215 { 216 *p_vid = vlan_dev_vlan_id(vlan_dev); 217 return vlan_dev_real_dev(vlan_dev); 218 } 219 220 static struct net_device * 221 mlxsw_sp_span_entry_lag(struct net_device *lag_dev) 222 { 223 struct net_device *dev; 224 struct list_head *iter; 225 226 netdev_for_each_lower_dev(lag_dev, dev, iter) 227 if (netif_carrier_ok(dev) && 228 net_lag_port_dev_txable(dev) && 229 mlxsw_sp_port_dev_check(dev)) 230 return dev; 231 232 return NULL; 233 } 234 235 static __maybe_unused int 236 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, 237 union mlxsw_sp_l3addr saddr, 238 union mlxsw_sp_l3addr daddr, 239 union mlxsw_sp_l3addr gw, 240 __u8 ttl, 241 struct neigh_table *tbl, 242 struct mlxsw_sp_span_parms *sparmsp) 243 { 244 unsigned char dmac[ETH_ALEN]; 245 u16 vid = 0; 246 247 if (mlxsw_sp_l3addr_is_zero(gw)) 248 gw = daddr; 249 250 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) 251 goto unoffloadable; 252 253 if (is_vlan_dev(edev)) 254 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 255 256 if (netif_is_bridge_master(edev)) { 257 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); 258 if (!edev) 259 goto unoffloadable; 260 } 261 262 if (is_vlan_dev(edev)) { 263 if (vid || !(edev->flags & IFF_UP)) 264 goto unoffloadable; 265 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 266 } 267 268 if (netif_is_lag_master(edev)) { 269 if (!(edev->flags & IFF_UP)) 270 goto unoffloadable; 271 edev = mlxsw_sp_span_entry_lag(edev); 272 if (!edev) 273 goto unoffloadable; 274 } 275 276 if (!mlxsw_sp_port_dev_check(edev)) 277 goto unoffloadable; 278 279 sparmsp->dest_port = netdev_priv(edev); 280 sparmsp->ttl = ttl; 281 memcpy(sparmsp->dmac, dmac, ETH_ALEN); 282 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); 283 sparmsp->saddr = saddr; 284 sparmsp->daddr = daddr; 285 sparmsp->vid = vid; 286 return 0; 287 288 unoffloadable: 289 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 290 } 291 292 #if IS_ENABLED(CONFIG_NET_IPGRE) 293 static struct net_device * 294 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, 295 __be32 *saddrp, __be32 *daddrp) 296 { 297 struct ip_tunnel *tun = netdev_priv(to_dev); 298 struct net_device *dev = NULL; 299 struct ip_tunnel_parm parms; 300 struct rtable *rt = NULL; 301 struct flowi4 fl4; 302 303 /* We assume "dev" stays valid after rt is put. */ 304 ASSERT_RTNL(); 305 306 parms = mlxsw_sp_ipip_netdev_parms4(to_dev); 307 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, 308 0, 0, parms.link, tun->fwmark); 309 310 rt = ip_route_output_key(tun->net, &fl4); 311 if (IS_ERR(rt)) 312 return NULL; 313 314 if (rt->rt_type != RTN_UNICAST) 315 goto out; 316 317 dev = rt->dst.dev; 318 *saddrp = fl4.saddr; 319 *daddrp = rt->rt_gateway; 320 321 out: 322 ip_rt_put(rt); 323 return dev; 324 } 325 326 static int 327 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev, 328 struct mlxsw_sp_span_parms *sparmsp) 329 { 330 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 331 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; 332 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; 333 bool inherit_tos = tparm.iph.tos & 0x1; 334 bool inherit_ttl = !tparm.iph.ttl; 335 union mlxsw_sp_l3addr gw = daddr; 336 struct net_device *l3edev; 337 338 if (!(to_dev->flags & IFF_UP) || 339 /* Reject tunnels with GRE keys, checksums, etc. */ 340 tparm.i_flags || tparm.o_flags || 341 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 342 inherit_ttl || !inherit_tos || 343 /* A destination address may not be "any". */ 344 mlxsw_sp_l3addr_is_zero(daddr)) 345 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 346 347 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); 348 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 349 tparm.iph.ttl, 350 &arp_tbl, sparmsp); 351 } 352 353 static int 354 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, 355 struct mlxsw_sp_span_parms sparms) 356 { 357 struct mlxsw_sp_port *dest_port = sparms.dest_port; 358 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 359 u8 local_port = dest_port->local_port; 360 char mpat_pl[MLXSW_REG_MPAT_LEN]; 361 int pa_id = span_entry->id; 362 363 /* Create a new port analayzer entry for local_port. */ 364 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 365 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 366 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 367 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 368 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 369 sparms.dmac, !!sparms.vid); 370 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, 371 sparms.ttl, sparms.smac, 372 be32_to_cpu(sparms.saddr.addr4), 373 be32_to_cpu(sparms.daddr.addr4)); 374 375 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 376 } 377 378 static void 379 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) 380 { 381 mlxsw_sp_span_entry_deconfigure_common(span_entry, 382 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 383 } 384 385 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { 386 .can_handle = netif_is_gretap, 387 .parms = mlxsw_sp_span_entry_gretap4_parms, 388 .configure = mlxsw_sp_span_entry_gretap4_configure, 389 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, 390 }; 391 #endif 392 393 #if IS_ENABLED(CONFIG_IPV6_GRE) 394 static struct net_device * 395 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, 396 struct in6_addr *saddrp, 397 struct in6_addr *daddrp) 398 { 399 struct ip6_tnl *t = netdev_priv(to_dev); 400 struct flowi6 fl6 = t->fl.u.ip6; 401 struct net_device *dev = NULL; 402 struct dst_entry *dst; 403 struct rt6_info *rt6; 404 405 /* We assume "dev" stays valid after dst is released. */ 406 ASSERT_RTNL(); 407 408 fl6.flowi6_mark = t->parms.fwmark; 409 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) 410 return NULL; 411 412 dst = ip6_route_output(t->net, NULL, &fl6); 413 if (!dst || dst->error) 414 goto out; 415 416 rt6 = container_of(dst, struct rt6_info, dst); 417 418 dev = dst->dev; 419 *saddrp = fl6.saddr; 420 *daddrp = rt6->rt6i_gateway; 421 422 out: 423 dst_release(dst); 424 return dev; 425 } 426 427 static int 428 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev, 429 struct mlxsw_sp_span_parms *sparmsp) 430 { 431 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); 432 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; 433 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; 434 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; 435 bool inherit_ttl = !tparm.hop_limit; 436 union mlxsw_sp_l3addr gw = daddr; 437 struct net_device *l3edev; 438 439 if (!(to_dev->flags & IFF_UP) || 440 /* Reject tunnels with GRE keys, checksums, etc. */ 441 tparm.i_flags || tparm.o_flags || 442 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 443 inherit_ttl || !inherit_tos || 444 /* A destination address may not be "any". */ 445 mlxsw_sp_l3addr_is_zero(daddr)) 446 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 447 448 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); 449 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 450 tparm.hop_limit, 451 &nd_tbl, sparmsp); 452 } 453 454 static int 455 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, 456 struct mlxsw_sp_span_parms sparms) 457 { 458 struct mlxsw_sp_port *dest_port = sparms.dest_port; 459 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 460 u8 local_port = dest_port->local_port; 461 char mpat_pl[MLXSW_REG_MPAT_LEN]; 462 int pa_id = span_entry->id; 463 464 /* Create a new port analayzer entry for local_port. */ 465 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 466 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 467 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 468 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 469 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 470 sparms.dmac, !!sparms.vid); 471 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, 472 sparms.saddr.addr6, 473 sparms.daddr.addr6); 474 475 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 476 } 477 478 static void 479 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) 480 { 481 mlxsw_sp_span_entry_deconfigure_common(span_entry, 482 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 483 } 484 485 static const 486 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { 487 .can_handle = netif_is_ip6gretap, 488 .parms = mlxsw_sp_span_entry_gretap6_parms, 489 .configure = mlxsw_sp_span_entry_gretap6_configure, 490 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, 491 }; 492 #endif 493 494 static bool 495 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) 496 { 497 return is_vlan_dev(dev) && 498 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); 499 } 500 501 static int 502 mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev, 503 struct mlxsw_sp_span_parms *sparmsp) 504 { 505 struct net_device *real_dev; 506 u16 vid; 507 508 if (!(to_dev->flags & IFF_UP)) 509 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 510 511 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); 512 sparmsp->dest_port = netdev_priv(real_dev); 513 sparmsp->vid = vid; 514 return 0; 515 } 516 517 static int 518 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, 519 struct mlxsw_sp_span_parms sparms) 520 { 521 struct mlxsw_sp_port *dest_port = sparms.dest_port; 522 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 523 u8 local_port = dest_port->local_port; 524 char mpat_pl[MLXSW_REG_MPAT_LEN]; 525 int pa_id = span_entry->id; 526 527 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 528 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 529 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 530 531 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 532 } 533 534 static void 535 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) 536 { 537 mlxsw_sp_span_entry_deconfigure_common(span_entry, 538 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 539 } 540 541 static const 542 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { 543 .can_handle = mlxsw_sp_span_vlan_can_handle, 544 .parms = mlxsw_sp_span_entry_vlan_parms, 545 .configure = mlxsw_sp_span_entry_vlan_configure, 546 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, 547 }; 548 549 static const 550 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { 551 &mlxsw_sp_span_entry_ops_phys, 552 #if IS_ENABLED(CONFIG_NET_IPGRE) 553 &mlxsw_sp_span_entry_ops_gretap4, 554 #endif 555 #if IS_ENABLED(CONFIG_IPV6_GRE) 556 &mlxsw_sp_span_entry_ops_gretap6, 557 #endif 558 &mlxsw_sp_span_entry_ops_vlan, 559 }; 560 561 static int 562 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev, 563 struct mlxsw_sp_span_parms *sparmsp) 564 { 565 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 566 } 567 568 static int 569 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, 570 struct mlxsw_sp_span_parms sparms) 571 { 572 return 0; 573 } 574 575 static void 576 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) 577 { 578 } 579 580 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { 581 .parms = mlxsw_sp_span_entry_nop_parms, 582 .configure = mlxsw_sp_span_entry_nop_configure, 583 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, 584 }; 585 586 static void 587 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, 588 struct mlxsw_sp_span_entry *span_entry, 589 struct mlxsw_sp_span_parms sparms) 590 { 591 if (sparms.dest_port) { 592 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { 593 netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance", 594 sparms.dest_port->dev->name); 595 sparms.dest_port = NULL; 596 } else if (span_entry->ops->configure(span_entry, sparms)) { 597 netdev_err(span_entry->to_dev, "Failed to offload mirror to %s", 598 sparms.dest_port->dev->name); 599 sparms.dest_port = NULL; 600 } 601 } 602 603 span_entry->parms = sparms; 604 } 605 606 static void 607 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) 608 { 609 if (span_entry->parms.dest_port) 610 span_entry->ops->deconfigure(span_entry); 611 } 612 613 static struct mlxsw_sp_span_entry * 614 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, 615 const struct net_device *to_dev, 616 const struct mlxsw_sp_span_entry_ops *ops, 617 struct mlxsw_sp_span_parms sparms) 618 { 619 struct mlxsw_sp_span_entry *span_entry = NULL; 620 int i; 621 622 /* find a free entry to use */ 623 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 624 if (!mlxsw_sp->span.entries[i].ref_count) { 625 span_entry = &mlxsw_sp->span.entries[i]; 626 break; 627 } 628 } 629 if (!span_entry) 630 return NULL; 631 632 span_entry->ops = ops; 633 span_entry->ref_count = 1; 634 span_entry->to_dev = to_dev; 635 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); 636 637 return span_entry; 638 } 639 640 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry) 641 { 642 mlxsw_sp_span_entry_deconfigure(span_entry); 643 } 644 645 struct mlxsw_sp_span_entry * 646 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, 647 const struct net_device *to_dev) 648 { 649 int i; 650 651 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 652 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 653 654 if (curr->ref_count && curr->to_dev == to_dev) 655 return curr; 656 } 657 return NULL; 658 } 659 660 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, 661 struct mlxsw_sp_span_entry *span_entry) 662 { 663 mlxsw_sp_span_entry_deconfigure(span_entry); 664 span_entry->ops = &mlxsw_sp_span_entry_ops_nop; 665 } 666 667 static struct mlxsw_sp_span_entry * 668 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) 669 { 670 int i; 671 672 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 673 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 674 675 if (curr->ref_count && curr->id == span_id) 676 return curr; 677 } 678 return NULL; 679 } 680 681 static struct mlxsw_sp_span_entry * 682 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, 683 const struct net_device *to_dev, 684 const struct mlxsw_sp_span_entry_ops *ops, 685 struct mlxsw_sp_span_parms sparms) 686 { 687 struct mlxsw_sp_span_entry *span_entry; 688 689 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev); 690 if (span_entry) { 691 /* Already exists, just take a reference */ 692 span_entry->ref_count++; 693 return span_entry; 694 } 695 696 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); 697 } 698 699 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 700 struct mlxsw_sp_span_entry *span_entry) 701 { 702 WARN_ON(!span_entry->ref_count); 703 if (--span_entry->ref_count == 0) 704 mlxsw_sp_span_entry_destroy(span_entry); 705 return 0; 706 } 707 708 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 709 { 710 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 711 struct mlxsw_sp_span_inspected_port *p; 712 int i; 713 714 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 715 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 716 717 list_for_each_entry(p, &curr->bound_ports_list, list) 718 if (p->local_port == port->local_port && 719 p->type == MLXSW_SP_SPAN_EGRESS) 720 return true; 721 } 722 723 return false; 724 } 725 726 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, 727 int mtu) 728 { 729 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; 730 } 731 732 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 733 { 734 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 735 char sbib_pl[MLXSW_REG_SBIB_LEN]; 736 int err; 737 738 /* If port is egress mirrored, the shared buffer size should be 739 * updated according to the mtu value 740 */ 741 if (mlxsw_sp_span_is_egress_mirror(port)) { 742 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); 743 744 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 745 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 746 if (err) { 747 netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); 748 return err; 749 } 750 } 751 752 return 0; 753 } 754 755 static struct mlxsw_sp_span_inspected_port * 756 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry, 757 enum mlxsw_sp_span_type type, 758 struct mlxsw_sp_port *port, 759 bool bind) 760 { 761 struct mlxsw_sp_span_inspected_port *p; 762 763 list_for_each_entry(p, &span_entry->bound_ports_list, list) 764 if (type == p->type && 765 port->local_port == p->local_port && 766 bind == p->bound) 767 return p; 768 return NULL; 769 } 770 771 static int 772 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 773 struct mlxsw_sp_span_entry *span_entry, 774 enum mlxsw_sp_span_type type, 775 bool bind) 776 { 777 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 778 char mpar_pl[MLXSW_REG_MPAR_LEN]; 779 int pa_id = span_entry->id; 780 781 /* bind the port to the SPAN entry */ 782 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 783 (enum mlxsw_reg_mpar_i_e)type, bind, pa_id); 784 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 785 } 786 787 static int 788 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, 789 struct mlxsw_sp_span_entry *span_entry, 790 enum mlxsw_sp_span_type type, 791 bool bind) 792 { 793 struct mlxsw_sp_span_inspected_port *inspected_port; 794 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 795 char sbib_pl[MLXSW_REG_SBIB_LEN]; 796 int i; 797 int err; 798 799 /* A given (source port, direction) can only be bound to one analyzer, 800 * so if a binding is requested, check for conflicts. 801 */ 802 if (bind) 803 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 804 struct mlxsw_sp_span_entry *curr = 805 &mlxsw_sp->span.entries[i]; 806 807 if (mlxsw_sp_span_entry_bound_port_find(curr, type, 808 port, bind)) 809 return -EEXIST; 810 } 811 812 /* if it is an egress SPAN, bind a shared buffer to it */ 813 if (type == MLXSW_SP_SPAN_EGRESS) { 814 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 815 port->dev->mtu); 816 817 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); 818 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 819 if (err) { 820 netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); 821 return err; 822 } 823 } 824 825 if (bind) { 826 err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, 827 true); 828 if (err) 829 goto err_port_bind; 830 } 831 832 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 833 if (!inspected_port) { 834 err = -ENOMEM; 835 goto err_inspected_port_alloc; 836 } 837 inspected_port->local_port = port->local_port; 838 inspected_port->type = type; 839 inspected_port->bound = bind; 840 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 841 842 return 0; 843 844 err_inspected_port_alloc: 845 if (bind) 846 mlxsw_sp_span_inspected_port_bind(port, span_entry, type, 847 false); 848 err_port_bind: 849 if (type == MLXSW_SP_SPAN_EGRESS) { 850 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 851 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 852 } 853 return err; 854 } 855 856 static void 857 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, 858 struct mlxsw_sp_span_entry *span_entry, 859 enum mlxsw_sp_span_type type, 860 bool bind) 861 { 862 struct mlxsw_sp_span_inspected_port *inspected_port; 863 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 864 char sbib_pl[MLXSW_REG_SBIB_LEN]; 865 866 inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type, 867 port, bind); 868 if (!inspected_port) 869 return; 870 871 if (bind) 872 mlxsw_sp_span_inspected_port_bind(port, span_entry, type, 873 false); 874 /* remove the SBIB buffer if it was egress SPAN */ 875 if (type == MLXSW_SP_SPAN_EGRESS) { 876 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 877 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 878 } 879 880 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 881 882 list_del(&inspected_port->list); 883 kfree(inspected_port); 884 } 885 886 static const struct mlxsw_sp_span_entry_ops * 887 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, 888 const struct net_device *to_dev) 889 { 890 size_t i; 891 892 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i) 893 if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev)) 894 return mlxsw_sp_span_entry_types[i]; 895 896 return NULL; 897 } 898 899 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 900 const struct net_device *to_dev, 901 enum mlxsw_sp_span_type type, bool bind, 902 int *p_span_id) 903 { 904 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 905 const struct mlxsw_sp_span_entry_ops *ops; 906 struct mlxsw_sp_span_parms sparms = {NULL}; 907 struct mlxsw_sp_span_entry *span_entry; 908 int err; 909 910 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); 911 if (!ops) { 912 netdev_err(to_dev, "Cannot mirror to %s", to_dev->name); 913 return -EOPNOTSUPP; 914 } 915 916 err = ops->parms(to_dev, &sparms); 917 if (err) 918 return err; 919 920 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); 921 if (!span_entry) 922 return -ENOBUFS; 923 924 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 925 span_entry->id); 926 927 err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); 928 if (err) 929 goto err_port_bind; 930 931 *p_span_id = span_entry->id; 932 return 0; 933 934 err_port_bind: 935 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 936 return err; 937 } 938 939 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id, 940 enum mlxsw_sp_span_type type, bool bind) 941 { 942 struct mlxsw_sp_span_entry *span_entry; 943 944 span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id); 945 if (!span_entry) { 946 netdev_err(from->dev, "no span entry found\n"); 947 return; 948 } 949 950 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 951 span_entry->id); 952 mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); 953 } 954 955 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) 956 { 957 int i; 958 int err; 959 960 ASSERT_RTNL(); 961 for (i = 0; i < mlxsw_sp->span.entries_count; i++) { 962 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; 963 struct mlxsw_sp_span_parms sparms = {NULL}; 964 965 if (!curr->ref_count) 966 continue; 967 968 err = curr->ops->parms(curr->to_dev, &sparms); 969 if (err) 970 continue; 971 972 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { 973 mlxsw_sp_span_entry_deconfigure(curr); 974 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); 975 } 976 } 977 } 978