1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/if_bridge.h> 5 #include <linux/list.h> 6 #include <linux/rtnetlink.h> 7 #include <linux/workqueue.h> 8 #include <net/arp.h> 9 #include <net/gre.h> 10 #include <net/lag.h> 11 #include <net/ndisc.h> 12 #include <net/ip6_tunnel.h> 13 14 #include "spectrum.h" 15 #include "spectrum_ipip.h" 16 #include "spectrum_span.h" 17 #include "spectrum_switchdev.h" 18 19 struct mlxsw_sp_span { 20 struct work_struct work; 21 struct mlxsw_sp *mlxsw_sp; 22 atomic_t active_entries_count; 23 int entries_count; 24 struct mlxsw_sp_span_entry entries[0]; 25 }; 26 27 static void mlxsw_sp_span_respin_work(struct work_struct *work); 28 29 static u64 mlxsw_sp_span_occ_get(void *priv) 30 { 31 const struct mlxsw_sp *mlxsw_sp = priv; 32 33 return atomic_read(&mlxsw_sp->span->active_entries_count); 34 } 35 36 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 37 { 38 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 39 struct mlxsw_sp_span *span; 40 int i, entries_count; 41 42 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 43 return -EIO; 44 45 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN); 46 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); 47 if (!span) 48 return -ENOMEM; 49 span->entries_count = entries_count; 50 atomic_set(&span->active_entries_count, 0); 51 span->mlxsw_sp = mlxsw_sp; 52 mlxsw_sp->span = span; 53 54 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 55 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 56 57 INIT_LIST_HEAD(&curr->bound_ports_list); 58 curr->id = i; 59 } 60 61 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN, 62 mlxsw_sp_span_occ_get, mlxsw_sp); 63 INIT_WORK(&span->work, mlxsw_sp_span_respin_work); 64 65 return 0; 66 } 67 68 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 69 { 70 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 71 int i; 72 73 cancel_work_sync(&mlxsw_sp->span->work); 74 devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN); 75 76 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 77 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 78 79 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); 80 } 81 kfree(mlxsw_sp->span); 82 } 83 84 static int 85 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev, 86 struct mlxsw_sp_span_parms *sparmsp) 87 { 88 sparmsp->dest_port = netdev_priv(to_dev); 89 return 0; 90 } 91 92 static int 93 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, 94 struct mlxsw_sp_span_parms sparms) 95 { 96 struct mlxsw_sp_port *dest_port = sparms.dest_port; 97 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 98 u8 local_port = dest_port->local_port; 99 char mpat_pl[MLXSW_REG_MPAT_LEN]; 100 int pa_id = span_entry->id; 101 102 /* Create a new port analayzer entry for local_port. */ 103 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 104 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 105 106 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 107 } 108 109 static void 110 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, 111 enum mlxsw_reg_mpat_span_type span_type) 112 { 113 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; 114 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 115 u8 local_port = dest_port->local_port; 116 char mpat_pl[MLXSW_REG_MPAT_LEN]; 117 int pa_id = span_entry->id; 118 119 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); 120 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 121 } 122 123 static void 124 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) 125 { 126 mlxsw_sp_span_entry_deconfigure_common(span_entry, 127 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 128 } 129 130 static const 131 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { 132 .can_handle = mlxsw_sp_port_dev_check, 133 .parms = mlxsw_sp_span_entry_phys_parms, 134 .configure = mlxsw_sp_span_entry_phys_configure, 135 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, 136 }; 137 138 static int mlxsw_sp_span_dmac(struct neigh_table *tbl, 139 const void *pkey, 140 struct net_device *dev, 141 unsigned char dmac[ETH_ALEN]) 142 { 143 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); 144 int err = 0; 145 146 if (!neigh) { 147 neigh = neigh_create(tbl, pkey, dev); 148 if (IS_ERR(neigh)) 149 return PTR_ERR(neigh); 150 } 151 152 neigh_event_send(neigh, NULL); 153 154 read_lock_bh(&neigh->lock); 155 if ((neigh->nud_state & NUD_VALID) && !neigh->dead) 156 memcpy(dmac, neigh->ha, ETH_ALEN); 157 else 158 err = -ENOENT; 159 read_unlock_bh(&neigh->lock); 160 161 neigh_release(neigh); 162 return err; 163 } 164 165 static int 166 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) 167 { 168 sparmsp->dest_port = NULL; 169 return 0; 170 } 171 172 static struct net_device * 173 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, 174 unsigned char *dmac, 175 u16 *p_vid) 176 { 177 struct bridge_vlan_info vinfo; 178 struct net_device *edev; 179 u16 vid = *p_vid; 180 181 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) 182 return NULL; 183 if (!vid || 184 br_vlan_get_info(br_dev, vid, &vinfo) || 185 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) 186 return NULL; 187 188 edev = br_fdb_find_port(br_dev, dmac, vid); 189 if (!edev) 190 return NULL; 191 192 if (br_vlan_get_info(edev, vid, &vinfo)) 193 return NULL; 194 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) 195 *p_vid = 0; 196 else 197 *p_vid = vid; 198 return edev; 199 } 200 201 static struct net_device * 202 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, 203 unsigned char *dmac) 204 { 205 return br_fdb_find_port(br_dev, dmac, 0); 206 } 207 208 static struct net_device * 209 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, 210 unsigned char dmac[ETH_ALEN], 211 u16 *p_vid) 212 { 213 struct mlxsw_sp_bridge_port *bridge_port; 214 enum mlxsw_reg_spms_state spms_state; 215 struct net_device *dev = NULL; 216 struct mlxsw_sp_port *port; 217 u8 stp_state; 218 219 if (br_vlan_enabled(br_dev)) 220 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); 221 else if (!*p_vid) 222 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); 223 if (!dev) 224 return NULL; 225 226 port = mlxsw_sp_port_dev_lower_find(dev); 227 if (!port) 228 return NULL; 229 230 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); 231 if (!bridge_port) 232 return NULL; 233 234 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); 235 spms_state = mlxsw_sp_stp_spms_state(stp_state); 236 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) 237 return NULL; 238 239 return dev; 240 } 241 242 static struct net_device * 243 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, 244 u16 *p_vid) 245 { 246 *p_vid = vlan_dev_vlan_id(vlan_dev); 247 return vlan_dev_real_dev(vlan_dev); 248 } 249 250 static struct net_device * 251 mlxsw_sp_span_entry_lag(struct net_device *lag_dev) 252 { 253 struct net_device *dev; 254 struct list_head *iter; 255 256 netdev_for_each_lower_dev(lag_dev, dev, iter) 257 if (netif_carrier_ok(dev) && 258 net_lag_port_dev_txable(dev) && 259 mlxsw_sp_port_dev_check(dev)) 260 return dev; 261 262 return NULL; 263 } 264 265 static __maybe_unused int 266 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, 267 union mlxsw_sp_l3addr saddr, 268 union mlxsw_sp_l3addr daddr, 269 union mlxsw_sp_l3addr gw, 270 __u8 ttl, 271 struct neigh_table *tbl, 272 struct mlxsw_sp_span_parms *sparmsp) 273 { 274 unsigned char dmac[ETH_ALEN]; 275 u16 vid = 0; 276 277 if (mlxsw_sp_l3addr_is_zero(gw)) 278 gw = daddr; 279 280 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) 281 goto unoffloadable; 282 283 if (is_vlan_dev(edev)) 284 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 285 286 if (netif_is_bridge_master(edev)) { 287 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); 288 if (!edev) 289 goto unoffloadable; 290 } 291 292 if (is_vlan_dev(edev)) { 293 if (vid || !(edev->flags & IFF_UP)) 294 goto unoffloadable; 295 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 296 } 297 298 if (netif_is_lag_master(edev)) { 299 if (!(edev->flags & IFF_UP)) 300 goto unoffloadable; 301 edev = mlxsw_sp_span_entry_lag(edev); 302 if (!edev) 303 goto unoffloadable; 304 } 305 306 if (!mlxsw_sp_port_dev_check(edev)) 307 goto unoffloadable; 308 309 sparmsp->dest_port = netdev_priv(edev); 310 sparmsp->ttl = ttl; 311 memcpy(sparmsp->dmac, dmac, ETH_ALEN); 312 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); 313 sparmsp->saddr = saddr; 314 sparmsp->daddr = daddr; 315 sparmsp->vid = vid; 316 return 0; 317 318 unoffloadable: 319 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 320 } 321 322 #if IS_ENABLED(CONFIG_NET_IPGRE) 323 static struct net_device * 324 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, 325 __be32 *saddrp, __be32 *daddrp) 326 { 327 struct ip_tunnel *tun = netdev_priv(to_dev); 328 struct net_device *dev = NULL; 329 struct ip_tunnel_parm parms; 330 struct rtable *rt = NULL; 331 struct flowi4 fl4; 332 333 /* We assume "dev" stays valid after rt is put. */ 334 ASSERT_RTNL(); 335 336 parms = mlxsw_sp_ipip_netdev_parms4(to_dev); 337 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, 338 0, 0, parms.link, tun->fwmark, 0); 339 340 rt = ip_route_output_key(tun->net, &fl4); 341 if (IS_ERR(rt)) 342 return NULL; 343 344 if (rt->rt_type != RTN_UNICAST) 345 goto out; 346 347 dev = rt->dst.dev; 348 *saddrp = fl4.saddr; 349 if (rt->rt_gw_family == AF_INET) 350 *daddrp = rt->rt_gw4; 351 /* can not offload if route has an IPv6 gateway */ 352 else if (rt->rt_gw_family == AF_INET6) 353 dev = NULL; 354 355 out: 356 ip_rt_put(rt); 357 return dev; 358 } 359 360 static int 361 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev, 362 struct mlxsw_sp_span_parms *sparmsp) 363 { 364 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 365 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; 366 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; 367 bool inherit_tos = tparm.iph.tos & 0x1; 368 bool inherit_ttl = !tparm.iph.ttl; 369 union mlxsw_sp_l3addr gw = daddr; 370 struct net_device *l3edev; 371 372 if (!(to_dev->flags & IFF_UP) || 373 /* Reject tunnels with GRE keys, checksums, etc. */ 374 tparm.i_flags || tparm.o_flags || 375 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 376 inherit_ttl || !inherit_tos || 377 /* A destination address may not be "any". */ 378 mlxsw_sp_l3addr_is_zero(daddr)) 379 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 380 381 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); 382 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 383 tparm.iph.ttl, 384 &arp_tbl, sparmsp); 385 } 386 387 static int 388 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, 389 struct mlxsw_sp_span_parms sparms) 390 { 391 struct mlxsw_sp_port *dest_port = sparms.dest_port; 392 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 393 u8 local_port = dest_port->local_port; 394 char mpat_pl[MLXSW_REG_MPAT_LEN]; 395 int pa_id = span_entry->id; 396 397 /* Create a new port analayzer entry for local_port. */ 398 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 399 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 400 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 401 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 402 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 403 sparms.dmac, !!sparms.vid); 404 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, 405 sparms.ttl, sparms.smac, 406 be32_to_cpu(sparms.saddr.addr4), 407 be32_to_cpu(sparms.daddr.addr4)); 408 409 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 410 } 411 412 static void 413 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) 414 { 415 mlxsw_sp_span_entry_deconfigure_common(span_entry, 416 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 417 } 418 419 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { 420 .can_handle = netif_is_gretap, 421 .parms = mlxsw_sp_span_entry_gretap4_parms, 422 .configure = mlxsw_sp_span_entry_gretap4_configure, 423 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, 424 }; 425 #endif 426 427 #if IS_ENABLED(CONFIG_IPV6_GRE) 428 static struct net_device * 429 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, 430 struct in6_addr *saddrp, 431 struct in6_addr *daddrp) 432 { 433 struct ip6_tnl *t = netdev_priv(to_dev); 434 struct flowi6 fl6 = t->fl.u.ip6; 435 struct net_device *dev = NULL; 436 struct dst_entry *dst; 437 struct rt6_info *rt6; 438 439 /* We assume "dev" stays valid after dst is released. */ 440 ASSERT_RTNL(); 441 442 fl6.flowi6_mark = t->parms.fwmark; 443 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) 444 return NULL; 445 446 dst = ip6_route_output(t->net, NULL, &fl6); 447 if (!dst || dst->error) 448 goto out; 449 450 rt6 = container_of(dst, struct rt6_info, dst); 451 452 dev = dst->dev; 453 *saddrp = fl6.saddr; 454 *daddrp = rt6->rt6i_gateway; 455 456 out: 457 dst_release(dst); 458 return dev; 459 } 460 461 static int 462 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev, 463 struct mlxsw_sp_span_parms *sparmsp) 464 { 465 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); 466 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; 467 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; 468 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; 469 bool inherit_ttl = !tparm.hop_limit; 470 union mlxsw_sp_l3addr gw = daddr; 471 struct net_device *l3edev; 472 473 if (!(to_dev->flags & IFF_UP) || 474 /* Reject tunnels with GRE keys, checksums, etc. */ 475 tparm.i_flags || tparm.o_flags || 476 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 477 inherit_ttl || !inherit_tos || 478 /* A destination address may not be "any". */ 479 mlxsw_sp_l3addr_is_zero(daddr)) 480 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 481 482 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); 483 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 484 tparm.hop_limit, 485 &nd_tbl, sparmsp); 486 } 487 488 static int 489 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, 490 struct mlxsw_sp_span_parms sparms) 491 { 492 struct mlxsw_sp_port *dest_port = sparms.dest_port; 493 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 494 u8 local_port = dest_port->local_port; 495 char mpat_pl[MLXSW_REG_MPAT_LEN]; 496 int pa_id = span_entry->id; 497 498 /* Create a new port analayzer entry for local_port. */ 499 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 500 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 501 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 502 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 503 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 504 sparms.dmac, !!sparms.vid); 505 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, 506 sparms.saddr.addr6, 507 sparms.daddr.addr6); 508 509 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 510 } 511 512 static void 513 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) 514 { 515 mlxsw_sp_span_entry_deconfigure_common(span_entry, 516 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 517 } 518 519 static const 520 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { 521 .can_handle = netif_is_ip6gretap, 522 .parms = mlxsw_sp_span_entry_gretap6_parms, 523 .configure = mlxsw_sp_span_entry_gretap6_configure, 524 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, 525 }; 526 #endif 527 528 static bool 529 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) 530 { 531 return is_vlan_dev(dev) && 532 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); 533 } 534 535 static int 536 mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev, 537 struct mlxsw_sp_span_parms *sparmsp) 538 { 539 struct net_device *real_dev; 540 u16 vid; 541 542 if (!(to_dev->flags & IFF_UP)) 543 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 544 545 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); 546 sparmsp->dest_port = netdev_priv(real_dev); 547 sparmsp->vid = vid; 548 return 0; 549 } 550 551 static int 552 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, 553 struct mlxsw_sp_span_parms sparms) 554 { 555 struct mlxsw_sp_port *dest_port = sparms.dest_port; 556 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 557 u8 local_port = dest_port->local_port; 558 char mpat_pl[MLXSW_REG_MPAT_LEN]; 559 int pa_id = span_entry->id; 560 561 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 562 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 563 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 564 565 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 566 } 567 568 static void 569 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) 570 { 571 mlxsw_sp_span_entry_deconfigure_common(span_entry, 572 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 573 } 574 575 static const 576 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { 577 .can_handle = mlxsw_sp_span_vlan_can_handle, 578 .parms = mlxsw_sp_span_entry_vlan_parms, 579 .configure = mlxsw_sp_span_entry_vlan_configure, 580 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, 581 }; 582 583 static const 584 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { 585 &mlxsw_sp_span_entry_ops_phys, 586 #if IS_ENABLED(CONFIG_NET_IPGRE) 587 &mlxsw_sp_span_entry_ops_gretap4, 588 #endif 589 #if IS_ENABLED(CONFIG_IPV6_GRE) 590 &mlxsw_sp_span_entry_ops_gretap6, 591 #endif 592 &mlxsw_sp_span_entry_ops_vlan, 593 }; 594 595 static int 596 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev, 597 struct mlxsw_sp_span_parms *sparmsp) 598 { 599 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 600 } 601 602 static int 603 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, 604 struct mlxsw_sp_span_parms sparms) 605 { 606 return 0; 607 } 608 609 static void 610 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) 611 { 612 } 613 614 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { 615 .parms = mlxsw_sp_span_entry_nop_parms, 616 .configure = mlxsw_sp_span_entry_nop_configure, 617 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, 618 }; 619 620 static void 621 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, 622 struct mlxsw_sp_span_entry *span_entry, 623 struct mlxsw_sp_span_parms sparms) 624 { 625 if (sparms.dest_port) { 626 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { 627 netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance", 628 sparms.dest_port->dev->name); 629 sparms.dest_port = NULL; 630 } else if (span_entry->ops->configure(span_entry, sparms)) { 631 netdev_err(span_entry->to_dev, "Failed to offload mirror to %s", 632 sparms.dest_port->dev->name); 633 sparms.dest_port = NULL; 634 } 635 } 636 637 span_entry->parms = sparms; 638 } 639 640 static void 641 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) 642 { 643 if (span_entry->parms.dest_port) 644 span_entry->ops->deconfigure(span_entry); 645 } 646 647 static struct mlxsw_sp_span_entry * 648 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, 649 const struct net_device *to_dev, 650 const struct mlxsw_sp_span_entry_ops *ops, 651 struct mlxsw_sp_span_parms sparms) 652 { 653 struct mlxsw_sp_span_entry *span_entry = NULL; 654 int i; 655 656 /* find a free entry to use */ 657 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 658 if (!mlxsw_sp->span->entries[i].ref_count) { 659 span_entry = &mlxsw_sp->span->entries[i]; 660 break; 661 } 662 } 663 if (!span_entry) 664 return NULL; 665 666 atomic_inc(&mlxsw_sp->span->active_entries_count); 667 span_entry->ops = ops; 668 span_entry->ref_count = 1; 669 span_entry->to_dev = to_dev; 670 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); 671 672 return span_entry; 673 } 674 675 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 676 struct mlxsw_sp_span_entry *span_entry) 677 { 678 mlxsw_sp_span_entry_deconfigure(span_entry); 679 atomic_dec(&mlxsw_sp->span->active_entries_count); 680 } 681 682 struct mlxsw_sp_span_entry * 683 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, 684 const struct net_device *to_dev) 685 { 686 int i; 687 688 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 689 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 690 691 if (curr->ref_count && curr->to_dev == to_dev) 692 return curr; 693 } 694 return NULL; 695 } 696 697 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, 698 struct mlxsw_sp_span_entry *span_entry) 699 { 700 mlxsw_sp_span_entry_deconfigure(span_entry); 701 span_entry->ops = &mlxsw_sp_span_entry_ops_nop; 702 } 703 704 static struct mlxsw_sp_span_entry * 705 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) 706 { 707 int i; 708 709 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 710 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 711 712 if (curr->ref_count && curr->id == span_id) 713 return curr; 714 } 715 return NULL; 716 } 717 718 static struct mlxsw_sp_span_entry * 719 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, 720 const struct net_device *to_dev, 721 const struct mlxsw_sp_span_entry_ops *ops, 722 struct mlxsw_sp_span_parms sparms) 723 { 724 struct mlxsw_sp_span_entry *span_entry; 725 726 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev); 727 if (span_entry) { 728 /* Already exists, just take a reference */ 729 span_entry->ref_count++; 730 return span_entry; 731 } 732 733 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); 734 } 735 736 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 737 struct mlxsw_sp_span_entry *span_entry) 738 { 739 WARN_ON(!span_entry->ref_count); 740 if (--span_entry->ref_count == 0) 741 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 742 return 0; 743 } 744 745 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) 746 { 747 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 748 struct mlxsw_sp_span_inspected_port *p; 749 int i; 750 751 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 752 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 753 754 list_for_each_entry(p, &curr->bound_ports_list, list) 755 if (p->local_port == port->local_port && 756 p->type == MLXSW_SP_SPAN_EGRESS) 757 return true; 758 } 759 760 return false; 761 } 762 763 static int 764 mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 765 { 766 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 767 char sbib_pl[MLXSW_REG_SBIB_LEN]; 768 u32 buffsize; 769 u32 speed; 770 int err; 771 772 err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed); 773 if (err) 774 return err; 775 if (speed == SPEED_UNKNOWN) 776 speed = 0; 777 778 buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu); 779 mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize); 780 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 781 } 782 783 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 784 { 785 /* If port is egress mirrored, the shared buffer size should be 786 * updated according to the mtu value 787 */ 788 if (mlxsw_sp_span_is_egress_mirror(port)) 789 return mlxsw_sp_span_port_buffsize_update(port, mtu); 790 return 0; 791 } 792 793 void mlxsw_sp_span_speed_update_work(struct work_struct *work) 794 { 795 struct delayed_work *dwork = to_delayed_work(work); 796 struct mlxsw_sp_port *mlxsw_sp_port; 797 798 mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port, 799 span.speed_update_dw); 800 801 /* If port is egress mirrored, the shared buffer size should be 802 * updated according to the speed value. 803 */ 804 if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port)) 805 mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port, 806 mlxsw_sp_port->dev->mtu); 807 } 808 809 static struct mlxsw_sp_span_inspected_port * 810 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry, 811 enum mlxsw_sp_span_type type, 812 struct mlxsw_sp_port *port, 813 bool bind) 814 { 815 struct mlxsw_sp_span_inspected_port *p; 816 817 list_for_each_entry(p, &span_entry->bound_ports_list, list) 818 if (type == p->type && 819 port->local_port == p->local_port && 820 bind == p->bound) 821 return p; 822 return NULL; 823 } 824 825 static int 826 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, 827 struct mlxsw_sp_span_entry *span_entry, 828 enum mlxsw_sp_span_type type, 829 bool bind) 830 { 831 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 832 char mpar_pl[MLXSW_REG_MPAR_LEN]; 833 int pa_id = span_entry->id; 834 835 /* bind the port to the SPAN entry */ 836 mlxsw_reg_mpar_pack(mpar_pl, port->local_port, 837 (enum mlxsw_reg_mpar_i_e)type, bind, pa_id); 838 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 839 } 840 841 static int 842 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, 843 struct mlxsw_sp_span_entry *span_entry, 844 enum mlxsw_sp_span_type type, 845 bool bind) 846 { 847 struct mlxsw_sp_span_inspected_port *inspected_port; 848 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 849 char sbib_pl[MLXSW_REG_SBIB_LEN]; 850 int i; 851 int err; 852 853 /* A given (source port, direction) can only be bound to one analyzer, 854 * so if a binding is requested, check for conflicts. 855 */ 856 if (bind) 857 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 858 struct mlxsw_sp_span_entry *curr = 859 &mlxsw_sp->span->entries[i]; 860 861 if (mlxsw_sp_span_entry_bound_port_find(curr, type, 862 port, bind)) 863 return -EEXIST; 864 } 865 866 /* if it is an egress SPAN, bind a shared buffer to it */ 867 if (type == MLXSW_SP_SPAN_EGRESS) { 868 err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu); 869 if (err) 870 return err; 871 } 872 873 if (bind) { 874 err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, 875 true); 876 if (err) 877 goto err_port_bind; 878 } 879 880 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); 881 if (!inspected_port) { 882 err = -ENOMEM; 883 goto err_inspected_port_alloc; 884 } 885 inspected_port->local_port = port->local_port; 886 inspected_port->type = type; 887 inspected_port->bound = bind; 888 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 889 890 return 0; 891 892 err_inspected_port_alloc: 893 if (bind) 894 mlxsw_sp_span_inspected_port_bind(port, span_entry, type, 895 false); 896 err_port_bind: 897 if (type == MLXSW_SP_SPAN_EGRESS) { 898 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 899 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 900 } 901 return err; 902 } 903 904 static void 905 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, 906 struct mlxsw_sp_span_entry *span_entry, 907 enum mlxsw_sp_span_type type, 908 bool bind) 909 { 910 struct mlxsw_sp_span_inspected_port *inspected_port; 911 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 912 char sbib_pl[MLXSW_REG_SBIB_LEN]; 913 914 inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type, 915 port, bind); 916 if (!inspected_port) 917 return; 918 919 if (bind) 920 mlxsw_sp_span_inspected_port_bind(port, span_entry, type, 921 false); 922 /* remove the SBIB buffer if it was egress SPAN */ 923 if (type == MLXSW_SP_SPAN_EGRESS) { 924 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); 925 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 926 } 927 928 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 929 930 list_del(&inspected_port->list); 931 kfree(inspected_port); 932 } 933 934 static const struct mlxsw_sp_span_entry_ops * 935 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, 936 const struct net_device *to_dev) 937 { 938 size_t i; 939 940 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i) 941 if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev)) 942 return mlxsw_sp_span_entry_types[i]; 943 944 return NULL; 945 } 946 947 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, 948 const struct net_device *to_dev, 949 enum mlxsw_sp_span_type type, bool bind, 950 int *p_span_id) 951 { 952 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; 953 const struct mlxsw_sp_span_entry_ops *ops; 954 struct mlxsw_sp_span_parms sparms = {NULL}; 955 struct mlxsw_sp_span_entry *span_entry; 956 int err; 957 958 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); 959 if (!ops) { 960 netdev_err(to_dev, "Cannot mirror to %s", to_dev->name); 961 return -EOPNOTSUPP; 962 } 963 964 err = ops->parms(to_dev, &sparms); 965 if (err) 966 return err; 967 968 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); 969 if (!span_entry) 970 return -ENOBUFS; 971 972 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", 973 span_entry->id); 974 975 err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); 976 if (err) 977 goto err_port_bind; 978 979 *p_span_id = span_entry->id; 980 return 0; 981 982 err_port_bind: 983 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 984 return err; 985 } 986 987 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id, 988 enum mlxsw_sp_span_type type, bool bind) 989 { 990 struct mlxsw_sp_span_entry *span_entry; 991 992 span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id); 993 if (!span_entry) { 994 netdev_err(from->dev, "no span entry found\n"); 995 return; 996 } 997 998 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", 999 span_entry->id); 1000 mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); 1001 } 1002 1003 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1004 { 1005 struct mlxsw_sp_span *span; 1006 struct mlxsw_sp *mlxsw_sp; 1007 int i, err; 1008 1009 span = container_of(work, struct mlxsw_sp_span, work); 1010 mlxsw_sp = span->mlxsw_sp; 1011 1012 rtnl_lock(); 1013 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 1014 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 1015 struct mlxsw_sp_span_parms sparms = {NULL}; 1016 1017 if (!curr->ref_count) 1018 continue; 1019 1020 err = curr->ops->parms(curr->to_dev, &sparms); 1021 if (err) 1022 continue; 1023 1024 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { 1025 mlxsw_sp_span_entry_deconfigure(curr); 1026 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); 1027 } 1028 } 1029 rtnl_unlock(); 1030 } 1031 1032 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) 1033 { 1034 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0) 1035 return; 1036 mlxsw_core_schedule_work(&mlxsw_sp->span->work); 1037 } 1038