1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/if_bridge.h> 5 #include <linux/list.h> 6 #include <linux/mutex.h> 7 #include <linux/refcount.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/workqueue.h> 10 #include <net/arp.h> 11 #include <net/gre.h> 12 #include <net/lag.h> 13 #include <net/ndisc.h> 14 #include <net/ip6_tunnel.h> 15 16 #include "spectrum.h" 17 #include "spectrum_ipip.h" 18 #include "spectrum_span.h" 19 #include "spectrum_switchdev.h" 20 21 struct mlxsw_sp_span { 22 struct work_struct work; 23 struct mlxsw_sp *mlxsw_sp; 24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; 25 const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr; 26 size_t span_entry_ops_arr_size; 27 struct list_head analyzed_ports_list; 28 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ 29 struct list_head trigger_entries_list; 30 u16 policer_id_base; 31 refcount_t policer_id_base_ref_count; 32 atomic_t active_entries_count; 33 int entries_count; 34 struct mlxsw_sp_span_entry entries[]; 35 }; 36 37 struct mlxsw_sp_span_analyzed_port { 38 struct list_head list; /* Member of analyzed_ports_list */ 39 refcount_t ref_count; 40 u8 local_port; 41 bool ingress; 42 }; 43 44 struct mlxsw_sp_span_trigger_entry { 45 struct list_head list; /* Member of trigger_entries_list */ 46 struct mlxsw_sp_span *span; 47 const struct mlxsw_sp_span_trigger_ops *ops; 48 refcount_t ref_count; 49 u8 local_port; 50 enum mlxsw_sp_span_trigger trigger; 51 struct mlxsw_sp_span_trigger_parms parms; 52 }; 53 54 enum mlxsw_sp_span_trigger_type { 55 MLXSW_SP_SPAN_TRIGGER_TYPE_PORT, 56 MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL, 57 }; 58 59 struct mlxsw_sp_span_trigger_ops { 60 int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 61 void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 62 bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 63 enum mlxsw_sp_span_trigger trigger, 64 struct mlxsw_sp_port *mlxsw_sp_port); 65 int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 66 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 67 void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 68 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 69 }; 70 71 static void mlxsw_sp_span_respin_work(struct work_struct *work); 72 73 static u64 mlxsw_sp_span_occ_get(void *priv) 74 { 75 const struct mlxsw_sp *mlxsw_sp = priv; 76 77 return atomic_read(&mlxsw_sp->span->active_entries_count); 78 } 79 80 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 81 { 82 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 83 struct mlxsw_sp_span *span; 84 int i, entries_count, err; 85 86 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 87 return -EIO; 88 89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN); 90 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); 91 if (!span) 92 return -ENOMEM; 93 refcount_set(&span->policer_id_base_ref_count, 0); 94 span->entries_count = entries_count; 95 atomic_set(&span->active_entries_count, 0); 96 mutex_init(&span->analyzed_ports_lock); 97 INIT_LIST_HEAD(&span->analyzed_ports_list); 98 INIT_LIST_HEAD(&span->trigger_entries_list); 99 span->mlxsw_sp = mlxsw_sp; 100 mlxsw_sp->span = span; 101 102 for (i = 0; i < mlxsw_sp->span->entries_count; i++) 103 mlxsw_sp->span->entries[i].id = i; 104 105 err = mlxsw_sp->span_ops->init(mlxsw_sp); 106 if (err) 107 goto err_init; 108 109 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN, 110 mlxsw_sp_span_occ_get, mlxsw_sp); 111 INIT_WORK(&span->work, mlxsw_sp_span_respin_work); 112 113 return 0; 114 115 err_init: 116 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 117 kfree(mlxsw_sp->span); 118 return err; 119 } 120 121 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 122 { 123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 124 125 cancel_work_sync(&mlxsw_sp->span->work); 126 devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN); 127 128 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list)); 129 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list)); 130 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 131 kfree(mlxsw_sp->span); 132 } 133 134 static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev) 135 { 136 return !dev; 137 } 138 139 static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 140 const struct net_device *to_dev, 141 struct mlxsw_sp_span_parms *sparmsp) 142 { 143 return -EOPNOTSUPP; 144 } 145 146 static int 147 mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 148 struct mlxsw_sp_span_parms sparms) 149 { 150 return -EOPNOTSUPP; 151 } 152 153 static void 154 mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 155 { 156 } 157 158 static const 159 struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { 160 .is_static = true, 161 .can_handle = mlxsw_sp1_span_cpu_can_handle, 162 .parms_set = mlxsw_sp1_span_entry_cpu_parms, 163 .configure = mlxsw_sp1_span_entry_cpu_configure, 164 .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure, 165 }; 166 167 static int 168 mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, 169 const struct net_device *to_dev, 170 struct mlxsw_sp_span_parms *sparmsp) 171 { 172 sparmsp->dest_port = netdev_priv(to_dev); 173 return 0; 174 } 175 176 static int 177 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, 178 struct mlxsw_sp_span_parms sparms) 179 { 180 struct mlxsw_sp_port *dest_port = sparms.dest_port; 181 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 182 u8 local_port = dest_port->local_port; 183 char mpat_pl[MLXSW_REG_MPAT_LEN]; 184 int pa_id = span_entry->id; 185 186 /* Create a new port analayzer entry for local_port. */ 187 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 188 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 189 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 190 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 191 192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 193 } 194 195 static void 196 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, 197 enum mlxsw_reg_mpat_span_type span_type) 198 { 199 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; 200 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 201 u8 local_port = dest_port->local_port; 202 char mpat_pl[MLXSW_REG_MPAT_LEN]; 203 int pa_id = span_entry->id; 204 205 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); 206 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 207 } 208 209 static void 210 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) 211 { 212 mlxsw_sp_span_entry_deconfigure_common(span_entry, 213 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 214 } 215 216 static const 217 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { 218 .is_static = true, 219 .can_handle = mlxsw_sp_port_dev_check, 220 .parms_set = mlxsw_sp_span_entry_phys_parms, 221 .configure = mlxsw_sp_span_entry_phys_configure, 222 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, 223 }; 224 225 static int mlxsw_sp_span_dmac(struct neigh_table *tbl, 226 const void *pkey, 227 struct net_device *dev, 228 unsigned char dmac[ETH_ALEN]) 229 { 230 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); 231 int err = 0; 232 233 if (!neigh) { 234 neigh = neigh_create(tbl, pkey, dev); 235 if (IS_ERR(neigh)) 236 return PTR_ERR(neigh); 237 } 238 239 neigh_event_send(neigh, NULL); 240 241 read_lock_bh(&neigh->lock); 242 if ((neigh->nud_state & NUD_VALID) && !neigh->dead) 243 memcpy(dmac, neigh->ha, ETH_ALEN); 244 else 245 err = -ENOENT; 246 read_unlock_bh(&neigh->lock); 247 248 neigh_release(neigh); 249 return err; 250 } 251 252 static int 253 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) 254 { 255 sparmsp->dest_port = NULL; 256 return 0; 257 } 258 259 static struct net_device * 260 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, 261 unsigned char *dmac, 262 u16 *p_vid) 263 { 264 struct bridge_vlan_info vinfo; 265 struct net_device *edev; 266 u16 vid = *p_vid; 267 268 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) 269 return NULL; 270 if (!vid || 271 br_vlan_get_info(br_dev, vid, &vinfo) || 272 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) 273 return NULL; 274 275 edev = br_fdb_find_port(br_dev, dmac, vid); 276 if (!edev) 277 return NULL; 278 279 if (br_vlan_get_info(edev, vid, &vinfo)) 280 return NULL; 281 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) 282 *p_vid = 0; 283 else 284 *p_vid = vid; 285 return edev; 286 } 287 288 static struct net_device * 289 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, 290 unsigned char *dmac) 291 { 292 return br_fdb_find_port(br_dev, dmac, 0); 293 } 294 295 static struct net_device * 296 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, 297 unsigned char dmac[ETH_ALEN], 298 u16 *p_vid) 299 { 300 struct mlxsw_sp_bridge_port *bridge_port; 301 enum mlxsw_reg_spms_state spms_state; 302 struct net_device *dev = NULL; 303 struct mlxsw_sp_port *port; 304 u8 stp_state; 305 306 if (br_vlan_enabled(br_dev)) 307 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); 308 else if (!*p_vid) 309 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); 310 if (!dev) 311 return NULL; 312 313 port = mlxsw_sp_port_dev_lower_find(dev); 314 if (!port) 315 return NULL; 316 317 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); 318 if (!bridge_port) 319 return NULL; 320 321 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); 322 spms_state = mlxsw_sp_stp_spms_state(stp_state); 323 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) 324 return NULL; 325 326 return dev; 327 } 328 329 static struct net_device * 330 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, 331 u16 *p_vid) 332 { 333 *p_vid = vlan_dev_vlan_id(vlan_dev); 334 return vlan_dev_real_dev(vlan_dev); 335 } 336 337 static struct net_device * 338 mlxsw_sp_span_entry_lag(struct net_device *lag_dev) 339 { 340 struct net_device *dev; 341 struct list_head *iter; 342 343 netdev_for_each_lower_dev(lag_dev, dev, iter) 344 if (netif_carrier_ok(dev) && 345 net_lag_port_dev_txable(dev) && 346 mlxsw_sp_port_dev_check(dev)) 347 return dev; 348 349 return NULL; 350 } 351 352 static __maybe_unused int 353 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, 354 union mlxsw_sp_l3addr saddr, 355 union mlxsw_sp_l3addr daddr, 356 union mlxsw_sp_l3addr gw, 357 __u8 ttl, 358 struct neigh_table *tbl, 359 struct mlxsw_sp_span_parms *sparmsp) 360 { 361 unsigned char dmac[ETH_ALEN]; 362 u16 vid = 0; 363 364 if (mlxsw_sp_l3addr_is_zero(gw)) 365 gw = daddr; 366 367 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) 368 goto unoffloadable; 369 370 if (is_vlan_dev(edev)) 371 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 372 373 if (netif_is_bridge_master(edev)) { 374 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); 375 if (!edev) 376 goto unoffloadable; 377 } 378 379 if (is_vlan_dev(edev)) { 380 if (vid || !(edev->flags & IFF_UP)) 381 goto unoffloadable; 382 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 383 } 384 385 if (netif_is_lag_master(edev)) { 386 if (!(edev->flags & IFF_UP)) 387 goto unoffloadable; 388 edev = mlxsw_sp_span_entry_lag(edev); 389 if (!edev) 390 goto unoffloadable; 391 } 392 393 if (!mlxsw_sp_port_dev_check(edev)) 394 goto unoffloadable; 395 396 sparmsp->dest_port = netdev_priv(edev); 397 sparmsp->ttl = ttl; 398 memcpy(sparmsp->dmac, dmac, ETH_ALEN); 399 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); 400 sparmsp->saddr = saddr; 401 sparmsp->daddr = daddr; 402 sparmsp->vid = vid; 403 return 0; 404 405 unoffloadable: 406 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 407 } 408 409 #if IS_ENABLED(CONFIG_NET_IPGRE) 410 static struct net_device * 411 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, 412 __be32 *saddrp, __be32 *daddrp) 413 { 414 struct ip_tunnel *tun = netdev_priv(to_dev); 415 struct net_device *dev = NULL; 416 struct ip_tunnel_parm parms; 417 struct rtable *rt = NULL; 418 struct flowi4 fl4; 419 420 /* We assume "dev" stays valid after rt is put. */ 421 ASSERT_RTNL(); 422 423 parms = mlxsw_sp_ipip_netdev_parms4(to_dev); 424 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, 425 0, 0, parms.link, tun->fwmark, 0); 426 427 rt = ip_route_output_key(tun->net, &fl4); 428 if (IS_ERR(rt)) 429 return NULL; 430 431 if (rt->rt_type != RTN_UNICAST) 432 goto out; 433 434 dev = rt->dst.dev; 435 *saddrp = fl4.saddr; 436 if (rt->rt_gw_family == AF_INET) 437 *daddrp = rt->rt_gw4; 438 /* can not offload if route has an IPv6 gateway */ 439 else if (rt->rt_gw_family == AF_INET6) 440 dev = NULL; 441 442 out: 443 ip_rt_put(rt); 444 return dev; 445 } 446 447 static int 448 mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, 449 const struct net_device *to_dev, 450 struct mlxsw_sp_span_parms *sparmsp) 451 { 452 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 453 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; 454 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; 455 bool inherit_tos = tparm.iph.tos & 0x1; 456 bool inherit_ttl = !tparm.iph.ttl; 457 union mlxsw_sp_l3addr gw = daddr; 458 struct net_device *l3edev; 459 460 if (!(to_dev->flags & IFF_UP) || 461 /* Reject tunnels with GRE keys, checksums, etc. */ 462 tparm.i_flags || tparm.o_flags || 463 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 464 inherit_ttl || !inherit_tos || 465 /* A destination address may not be "any". */ 466 mlxsw_sp_l3addr_is_zero(daddr)) 467 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 468 469 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); 470 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 471 tparm.iph.ttl, 472 &arp_tbl, sparmsp); 473 } 474 475 static int 476 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, 477 struct mlxsw_sp_span_parms sparms) 478 { 479 struct mlxsw_sp_port *dest_port = sparms.dest_port; 480 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 481 u8 local_port = dest_port->local_port; 482 char mpat_pl[MLXSW_REG_MPAT_LEN]; 483 int pa_id = span_entry->id; 484 485 /* Create a new port analayzer entry for local_port. */ 486 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 487 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 488 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 489 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 490 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 491 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 492 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 493 sparms.dmac, !!sparms.vid); 494 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, 495 sparms.ttl, sparms.smac, 496 be32_to_cpu(sparms.saddr.addr4), 497 be32_to_cpu(sparms.daddr.addr4)); 498 499 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 500 } 501 502 static void 503 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) 504 { 505 mlxsw_sp_span_entry_deconfigure_common(span_entry, 506 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 507 } 508 509 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { 510 .can_handle = netif_is_gretap, 511 .parms_set = mlxsw_sp_span_entry_gretap4_parms, 512 .configure = mlxsw_sp_span_entry_gretap4_configure, 513 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, 514 }; 515 #endif 516 517 #if IS_ENABLED(CONFIG_IPV6_GRE) 518 static struct net_device * 519 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, 520 struct in6_addr *saddrp, 521 struct in6_addr *daddrp) 522 { 523 struct ip6_tnl *t = netdev_priv(to_dev); 524 struct flowi6 fl6 = t->fl.u.ip6; 525 struct net_device *dev = NULL; 526 struct dst_entry *dst; 527 struct rt6_info *rt6; 528 529 /* We assume "dev" stays valid after dst is released. */ 530 ASSERT_RTNL(); 531 532 fl6.flowi6_mark = t->parms.fwmark; 533 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) 534 return NULL; 535 536 dst = ip6_route_output(t->net, NULL, &fl6); 537 if (!dst || dst->error) 538 goto out; 539 540 rt6 = container_of(dst, struct rt6_info, dst); 541 542 dev = dst->dev; 543 *saddrp = fl6.saddr; 544 *daddrp = rt6->rt6i_gateway; 545 546 out: 547 dst_release(dst); 548 return dev; 549 } 550 551 static int 552 mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, 553 const struct net_device *to_dev, 554 struct mlxsw_sp_span_parms *sparmsp) 555 { 556 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); 557 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; 558 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; 559 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; 560 bool inherit_ttl = !tparm.hop_limit; 561 union mlxsw_sp_l3addr gw = daddr; 562 struct net_device *l3edev; 563 564 if (!(to_dev->flags & IFF_UP) || 565 /* Reject tunnels with GRE keys, checksums, etc. */ 566 tparm.i_flags || tparm.o_flags || 567 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 568 inherit_ttl || !inherit_tos || 569 /* A destination address may not be "any". */ 570 mlxsw_sp_l3addr_is_zero(daddr)) 571 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 572 573 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); 574 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 575 tparm.hop_limit, 576 &nd_tbl, sparmsp); 577 } 578 579 static int 580 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, 581 struct mlxsw_sp_span_parms sparms) 582 { 583 struct mlxsw_sp_port *dest_port = sparms.dest_port; 584 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 585 u8 local_port = dest_port->local_port; 586 char mpat_pl[MLXSW_REG_MPAT_LEN]; 587 int pa_id = span_entry->id; 588 589 /* Create a new port analayzer entry for local_port. */ 590 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 591 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 592 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 593 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 594 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 595 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 596 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 597 sparms.dmac, !!sparms.vid); 598 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, 599 sparms.saddr.addr6, 600 sparms.daddr.addr6); 601 602 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 603 } 604 605 static void 606 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) 607 { 608 mlxsw_sp_span_entry_deconfigure_common(span_entry, 609 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 610 } 611 612 static const 613 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { 614 .can_handle = netif_is_ip6gretap, 615 .parms_set = mlxsw_sp_span_entry_gretap6_parms, 616 .configure = mlxsw_sp_span_entry_gretap6_configure, 617 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, 618 }; 619 #endif 620 621 static bool 622 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) 623 { 624 return is_vlan_dev(dev) && 625 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); 626 } 627 628 static int 629 mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp, 630 const struct net_device *to_dev, 631 struct mlxsw_sp_span_parms *sparmsp) 632 { 633 struct net_device *real_dev; 634 u16 vid; 635 636 if (!(to_dev->flags & IFF_UP)) 637 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 638 639 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); 640 sparmsp->dest_port = netdev_priv(real_dev); 641 sparmsp->vid = vid; 642 return 0; 643 } 644 645 static int 646 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, 647 struct mlxsw_sp_span_parms sparms) 648 { 649 struct mlxsw_sp_port *dest_port = sparms.dest_port; 650 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 651 u8 local_port = dest_port->local_port; 652 char mpat_pl[MLXSW_REG_MPAT_LEN]; 653 int pa_id = span_entry->id; 654 655 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 656 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 657 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 658 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 659 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 660 661 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 662 } 663 664 static void 665 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) 666 { 667 mlxsw_sp_span_entry_deconfigure_common(span_entry, 668 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 669 } 670 671 static const 672 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { 673 .can_handle = mlxsw_sp_span_vlan_can_handle, 674 .parms_set = mlxsw_sp_span_entry_vlan_parms, 675 .configure = mlxsw_sp_span_entry_vlan_configure, 676 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, 677 }; 678 679 static const 680 struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { 681 &mlxsw_sp1_span_entry_ops_cpu, 682 &mlxsw_sp_span_entry_ops_phys, 683 #if IS_ENABLED(CONFIG_NET_IPGRE) 684 &mlxsw_sp_span_entry_ops_gretap4, 685 #endif 686 #if IS_ENABLED(CONFIG_IPV6_GRE) 687 &mlxsw_sp_span_entry_ops_gretap6, 688 #endif 689 &mlxsw_sp_span_entry_ops_vlan, 690 }; 691 692 static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev) 693 { 694 return !dev; 695 } 696 697 static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 698 const struct net_device *to_dev, 699 struct mlxsw_sp_span_parms *sparmsp) 700 { 701 sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 702 return 0; 703 } 704 705 static int 706 mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 707 struct mlxsw_sp_span_parms sparms) 708 { 709 /* Mirroring to the CPU port is like mirroring to any other physical 710 * port. Its local port is used instead of that of the physical port. 711 */ 712 return mlxsw_sp_span_entry_phys_configure(span_entry, sparms); 713 } 714 715 static void 716 mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 717 { 718 enum mlxsw_reg_mpat_span_type span_type; 719 720 span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH; 721 mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type); 722 } 723 724 static const 725 struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { 726 .is_static = true, 727 .can_handle = mlxsw_sp2_span_cpu_can_handle, 728 .parms_set = mlxsw_sp2_span_entry_cpu_parms, 729 .configure = mlxsw_sp2_span_entry_cpu_configure, 730 .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure, 731 }; 732 733 static const 734 struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { 735 &mlxsw_sp2_span_entry_ops_cpu, 736 &mlxsw_sp_span_entry_ops_phys, 737 #if IS_ENABLED(CONFIG_NET_IPGRE) 738 &mlxsw_sp_span_entry_ops_gretap4, 739 #endif 740 #if IS_ENABLED(CONFIG_IPV6_GRE) 741 &mlxsw_sp_span_entry_ops_gretap6, 742 #endif 743 &mlxsw_sp_span_entry_ops_vlan, 744 }; 745 746 static int 747 mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp, 748 const struct net_device *to_dev, 749 struct mlxsw_sp_span_parms *sparmsp) 750 { 751 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 752 } 753 754 static int 755 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, 756 struct mlxsw_sp_span_parms sparms) 757 { 758 return 0; 759 } 760 761 static void 762 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) 763 { 764 } 765 766 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { 767 .parms_set = mlxsw_sp_span_entry_nop_parms, 768 .configure = mlxsw_sp_span_entry_nop_configure, 769 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, 770 }; 771 772 static void 773 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, 774 struct mlxsw_sp_span_entry *span_entry, 775 struct mlxsw_sp_span_parms sparms) 776 { 777 int err; 778 779 if (!sparms.dest_port) 780 goto set_parms; 781 782 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { 783 dev_err(mlxsw_sp->bus_info->dev, 784 "Cannot mirror to a port which belongs to a different mlxsw instance\n"); 785 sparms.dest_port = NULL; 786 goto set_parms; 787 } 788 789 err = span_entry->ops->configure(span_entry, sparms); 790 if (err) { 791 dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n"); 792 sparms.dest_port = NULL; 793 goto set_parms; 794 } 795 796 set_parms: 797 span_entry->parms = sparms; 798 } 799 800 static void 801 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) 802 { 803 if (span_entry->parms.dest_port) 804 span_entry->ops->deconfigure(span_entry); 805 } 806 807 static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, 808 u16 policer_id) 809 { 810 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; 811 u16 policer_id_base; 812 int err; 813 814 /* Policers set on SPAN agents must be in the range of 815 * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the 816 * base is set and the new policer is not within the range, then we 817 * must error out. 818 */ 819 if (refcount_read(&span->policer_id_base_ref_count)) { 820 if (policer_id < span->policer_id_base || 821 policer_id >= span->policer_id_base + span->entries_count) 822 return -EINVAL; 823 824 refcount_inc(&span->policer_id_base_ref_count); 825 return 0; 826 } 827 828 /* Base must be even. */ 829 policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1; 830 err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp, 831 policer_id_base); 832 if (err) 833 return err; 834 835 span->policer_id_base = policer_id_base; 836 refcount_set(&span->policer_id_base_ref_count, 1); 837 838 return 0; 839 } 840 841 static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) 842 { 843 if (refcount_dec_and_test(&span->policer_id_base_ref_count)) 844 span->policer_id_base = 0; 845 } 846 847 static struct mlxsw_sp_span_entry * 848 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, 849 const struct net_device *to_dev, 850 const struct mlxsw_sp_span_entry_ops *ops, 851 struct mlxsw_sp_span_parms sparms) 852 { 853 struct mlxsw_sp_span_entry *span_entry = NULL; 854 int i; 855 856 /* find a free entry to use */ 857 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 858 if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) { 859 span_entry = &mlxsw_sp->span->entries[i]; 860 break; 861 } 862 } 863 if (!span_entry) 864 return NULL; 865 866 if (sparms.policer_enable) { 867 int err; 868 869 err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span, 870 sparms.policer_id); 871 if (err) 872 return NULL; 873 } 874 875 atomic_inc(&mlxsw_sp->span->active_entries_count); 876 span_entry->ops = ops; 877 refcount_set(&span_entry->ref_count, 1); 878 span_entry->to_dev = to_dev; 879 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); 880 881 return span_entry; 882 } 883 884 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 885 struct mlxsw_sp_span_entry *span_entry) 886 { 887 mlxsw_sp_span_entry_deconfigure(span_entry); 888 atomic_dec(&mlxsw_sp->span->active_entries_count); 889 if (span_entry->parms.policer_enable) 890 mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span); 891 } 892 893 struct mlxsw_sp_span_entry * 894 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, 895 const struct net_device *to_dev) 896 { 897 int i; 898 899 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 900 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 901 902 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev) 903 return curr; 904 } 905 return NULL; 906 } 907 908 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, 909 struct mlxsw_sp_span_entry *span_entry) 910 { 911 mlxsw_sp_span_entry_deconfigure(span_entry); 912 span_entry->ops = &mlxsw_sp_span_entry_ops_nop; 913 } 914 915 static struct mlxsw_sp_span_entry * 916 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) 917 { 918 int i; 919 920 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 921 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 922 923 if (refcount_read(&curr->ref_count) && curr->id == span_id) 924 return curr; 925 } 926 return NULL; 927 } 928 929 static struct mlxsw_sp_span_entry * 930 mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, 931 const struct net_device *to_dev, 932 const struct mlxsw_sp_span_parms *sparms) 933 { 934 int i; 935 936 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 937 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 938 939 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && 940 curr->parms.policer_enable == sparms->policer_enable && 941 curr->parms.policer_id == sparms->policer_id) 942 return curr; 943 } 944 return NULL; 945 } 946 947 static struct mlxsw_sp_span_entry * 948 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, 949 const struct net_device *to_dev, 950 const struct mlxsw_sp_span_entry_ops *ops, 951 struct mlxsw_sp_span_parms sparms) 952 { 953 struct mlxsw_sp_span_entry *span_entry; 954 955 span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev, 956 &sparms); 957 if (span_entry) { 958 /* Already exists, just take a reference */ 959 refcount_inc(&span_entry->ref_count); 960 return span_entry; 961 } 962 963 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); 964 } 965 966 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 967 struct mlxsw_sp_span_entry *span_entry) 968 { 969 if (refcount_dec_and_test(&span_entry->ref_count)) 970 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 971 return 0; 972 } 973 974 static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 975 { 976 struct mlxsw_sp_hdroom hdroom; 977 978 hdroom = *mlxsw_sp_port->hdroom; 979 hdroom.int_buf.enable = enable; 980 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 981 982 return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 983 } 984 985 static int 986 mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port) 987 { 988 return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true); 989 } 990 991 static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port) 992 { 993 mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false); 994 } 995 996 static struct mlxsw_sp_span_analyzed_port * 997 mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port, 998 bool ingress) 999 { 1000 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1001 1002 list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) { 1003 if (analyzed_port->local_port == local_port && 1004 analyzed_port->ingress == ingress) 1005 return analyzed_port; 1006 } 1007 1008 return NULL; 1009 } 1010 1011 static const struct mlxsw_sp_span_entry_ops * 1012 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, 1013 const struct net_device *to_dev) 1014 { 1015 struct mlxsw_sp_span *span = mlxsw_sp->span; 1016 size_t i; 1017 1018 for (i = 0; i < span->span_entry_ops_arr_size; ++i) 1019 if (span->span_entry_ops_arr[i]->can_handle(to_dev)) 1020 return span->span_entry_ops_arr[i]; 1021 1022 return NULL; 1023 } 1024 1025 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1026 { 1027 struct mlxsw_sp_span *span; 1028 struct mlxsw_sp *mlxsw_sp; 1029 int i, err; 1030 1031 span = container_of(work, struct mlxsw_sp_span, work); 1032 mlxsw_sp = span->mlxsw_sp; 1033 1034 rtnl_lock(); 1035 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 1036 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 1037 struct mlxsw_sp_span_parms sparms = {NULL}; 1038 1039 if (!refcount_read(&curr->ref_count)) 1040 continue; 1041 1042 if (curr->ops->is_static) 1043 continue; 1044 1045 err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); 1046 if (err) 1047 continue; 1048 1049 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { 1050 mlxsw_sp_span_entry_deconfigure(curr); 1051 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); 1052 } 1053 } 1054 rtnl_unlock(); 1055 } 1056 1057 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) 1058 { 1059 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0) 1060 return; 1061 mlxsw_core_schedule_work(&mlxsw_sp->span->work); 1062 } 1063 1064 int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, 1065 const struct mlxsw_sp_span_agent_parms *parms) 1066 { 1067 const struct net_device *to_dev = parms->to_dev; 1068 const struct mlxsw_sp_span_entry_ops *ops; 1069 struct mlxsw_sp_span_entry *span_entry; 1070 struct mlxsw_sp_span_parms sparms; 1071 int err; 1072 1073 ASSERT_RTNL(); 1074 1075 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); 1076 if (!ops) { 1077 dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n"); 1078 return -EOPNOTSUPP; 1079 } 1080 1081 memset(&sparms, 0, sizeof(sparms)); 1082 err = ops->parms_set(mlxsw_sp, to_dev, &sparms); 1083 if (err) 1084 return err; 1085 1086 sparms.policer_id = parms->policer_id; 1087 sparms.policer_enable = parms->policer_enable; 1088 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); 1089 if (!span_entry) 1090 return -ENOBUFS; 1091 1092 *p_span_id = span_entry->id; 1093 1094 return 0; 1095 } 1096 1097 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id) 1098 { 1099 struct mlxsw_sp_span_entry *span_entry; 1100 1101 ASSERT_RTNL(); 1102 1103 span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id); 1104 if (WARN_ON_ONCE(!span_entry)) 1105 return; 1106 1107 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 1108 } 1109 1110 static struct mlxsw_sp_span_analyzed_port * 1111 mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span, 1112 struct mlxsw_sp_port *mlxsw_sp_port, 1113 bool ingress) 1114 { 1115 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1116 int err; 1117 1118 analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL); 1119 if (!analyzed_port) 1120 return ERR_PTR(-ENOMEM); 1121 1122 refcount_set(&analyzed_port->ref_count, 1); 1123 analyzed_port->local_port = mlxsw_sp_port->local_port; 1124 analyzed_port->ingress = ingress; 1125 list_add_tail(&analyzed_port->list, &span->analyzed_ports_list); 1126 1127 /* An egress mirror buffer should be allocated on the egress port which 1128 * does the mirroring. 1129 */ 1130 if (!ingress) { 1131 err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port); 1132 if (err) 1133 goto err_buffer_update; 1134 } 1135 1136 return analyzed_port; 1137 1138 err_buffer_update: 1139 list_del(&analyzed_port->list); 1140 kfree(analyzed_port); 1141 return ERR_PTR(err); 1142 } 1143 1144 static void 1145 mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 1146 struct mlxsw_sp_span_analyzed_port * 1147 analyzed_port) 1148 { 1149 /* Remove egress mirror buffer now that port is no longer analyzed 1150 * at egress. 1151 */ 1152 if (!analyzed_port->ingress) 1153 mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port); 1154 1155 list_del(&analyzed_port->list); 1156 kfree(analyzed_port); 1157 } 1158 1159 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, 1160 bool ingress) 1161 { 1162 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1163 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1164 u8 local_port = mlxsw_sp_port->local_port; 1165 int err = 0; 1166 1167 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1168 1169 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1170 local_port, ingress); 1171 if (analyzed_port) { 1172 refcount_inc(&analyzed_port->ref_count); 1173 goto out_unlock; 1174 } 1175 1176 analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span, 1177 mlxsw_sp_port, 1178 ingress); 1179 if (IS_ERR(analyzed_port)) 1180 err = PTR_ERR(analyzed_port); 1181 1182 out_unlock: 1183 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1184 return err; 1185 } 1186 1187 void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, 1188 bool ingress) 1189 { 1190 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1191 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1192 u8 local_port = mlxsw_sp_port->local_port; 1193 1194 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1195 1196 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1197 local_port, ingress); 1198 if (WARN_ON_ONCE(!analyzed_port)) 1199 goto out_unlock; 1200 1201 if (!refcount_dec_and_test(&analyzed_port->ref_count)) 1202 goto out_unlock; 1203 1204 mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port); 1205 1206 out_unlock: 1207 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1208 } 1209 1210 static int 1211 __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, 1212 struct mlxsw_sp_span_trigger_entry * 1213 trigger_entry, bool enable) 1214 { 1215 char mpar_pl[MLXSW_REG_MPAR_LEN]; 1216 enum mlxsw_reg_mpar_i_e i_e; 1217 1218 switch (trigger_entry->trigger) { 1219 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1220 i_e = MLXSW_REG_MPAR_TYPE_INGRESS; 1221 break; 1222 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1223 i_e = MLXSW_REG_MPAR_TYPE_EGRESS; 1224 break; 1225 default: 1226 WARN_ON_ONCE(1); 1227 return -EINVAL; 1228 } 1229 1230 mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable, 1231 trigger_entry->parms.span_id); 1232 return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 1233 } 1234 1235 static int 1236 mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry * 1237 trigger_entry) 1238 { 1239 return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, 1240 trigger_entry, true); 1241 } 1242 1243 static void 1244 mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry * 1245 trigger_entry) 1246 { 1247 __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry, 1248 false); 1249 } 1250 1251 static bool 1252 mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry * 1253 trigger_entry, 1254 enum mlxsw_sp_span_trigger trigger, 1255 struct mlxsw_sp_port *mlxsw_sp_port) 1256 { 1257 return trigger_entry->trigger == trigger && 1258 trigger_entry->local_port == mlxsw_sp_port->local_port; 1259 } 1260 1261 static int 1262 mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry * 1263 trigger_entry, 1264 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1265 { 1266 /* Port trigger are enabled during binding. */ 1267 return 0; 1268 } 1269 1270 static void 1271 mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry * 1272 trigger_entry, 1273 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1274 { 1275 } 1276 1277 static const struct mlxsw_sp_span_trigger_ops 1278 mlxsw_sp_span_trigger_port_ops = { 1279 .bind = mlxsw_sp_span_trigger_port_bind, 1280 .unbind = mlxsw_sp_span_trigger_port_unbind, 1281 .matches = mlxsw_sp_span_trigger_port_matches, 1282 .enable = mlxsw_sp_span_trigger_port_enable, 1283 .disable = mlxsw_sp_span_trigger_port_disable, 1284 }; 1285 1286 static int 1287 mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1288 trigger_entry) 1289 { 1290 return -EOPNOTSUPP; 1291 } 1292 1293 static void 1294 mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1295 trigger_entry) 1296 { 1297 } 1298 1299 static bool 1300 mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1301 trigger_entry, 1302 enum mlxsw_sp_span_trigger trigger, 1303 struct mlxsw_sp_port *mlxsw_sp_port) 1304 { 1305 WARN_ON_ONCE(1); 1306 return false; 1307 } 1308 1309 static int 1310 mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1311 trigger_entry, 1312 struct mlxsw_sp_port *mlxsw_sp_port, 1313 u8 tc) 1314 { 1315 return -EOPNOTSUPP; 1316 } 1317 1318 static void 1319 mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1320 trigger_entry, 1321 struct mlxsw_sp_port *mlxsw_sp_port, 1322 u8 tc) 1323 { 1324 } 1325 1326 static const struct mlxsw_sp_span_trigger_ops 1327 mlxsw_sp1_span_trigger_global_ops = { 1328 .bind = mlxsw_sp1_span_trigger_global_bind, 1329 .unbind = mlxsw_sp1_span_trigger_global_unbind, 1330 .matches = mlxsw_sp1_span_trigger_global_matches, 1331 .enable = mlxsw_sp1_span_trigger_global_enable, 1332 .disable = mlxsw_sp1_span_trigger_global_disable, 1333 }; 1334 1335 static const struct mlxsw_sp_span_trigger_ops * 1336 mlxsw_sp1_span_trigger_ops_arr[] = { 1337 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1338 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1339 &mlxsw_sp1_span_trigger_global_ops, 1340 }; 1341 1342 static int 1343 mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1344 trigger_entry) 1345 { 1346 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1347 enum mlxsw_reg_mpagr_trigger trigger; 1348 char mpagr_pl[MLXSW_REG_MPAGR_LEN]; 1349 1350 switch (trigger_entry->trigger) { 1351 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1352 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER; 1353 break; 1354 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1355 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED; 1356 break; 1357 case MLXSW_SP_SPAN_TRIGGER_ECN: 1358 trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN; 1359 break; 1360 default: 1361 WARN_ON_ONCE(1); 1362 return -EINVAL; 1363 } 1364 1365 mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, 1366 1); 1367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); 1368 } 1369 1370 static void 1371 mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1372 trigger_entry) 1373 { 1374 /* There is no unbinding for global triggers. The trigger should be 1375 * disabled on all ports by now. 1376 */ 1377 } 1378 1379 static bool 1380 mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1381 trigger_entry, 1382 enum mlxsw_sp_span_trigger trigger, 1383 struct mlxsw_sp_port *mlxsw_sp_port) 1384 { 1385 return trigger_entry->trigger == trigger; 1386 } 1387 1388 static int 1389 __mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1390 trigger_entry, 1391 struct mlxsw_sp_port *mlxsw_sp_port, 1392 u8 tc, bool enable) 1393 { 1394 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1395 char momte_pl[MLXSW_REG_MOMTE_LEN]; 1396 enum mlxsw_reg_momte_type type; 1397 int err; 1398 1399 switch (trigger_entry->trigger) { 1400 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1401 type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS; 1402 break; 1403 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1404 type = MLXSW_REG_MOMTE_TYPE_WRED; 1405 break; 1406 case MLXSW_SP_SPAN_TRIGGER_ECN: 1407 type = MLXSW_REG_MOMTE_TYPE_ECN; 1408 break; 1409 default: 1410 WARN_ON_ONCE(1); 1411 return -EINVAL; 1412 } 1413 1414 /* Query existing configuration in order to only change the state of 1415 * the specified traffic class. 1416 */ 1417 mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type); 1418 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1419 if (err) 1420 return err; 1421 1422 mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable); 1423 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1424 } 1425 1426 static int 1427 mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1428 trigger_entry, 1429 struct mlxsw_sp_port *mlxsw_sp_port, 1430 u8 tc) 1431 { 1432 return __mlxsw_sp2_span_trigger_global_enable(trigger_entry, 1433 mlxsw_sp_port, tc, true); 1434 } 1435 1436 static void 1437 mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1438 trigger_entry, 1439 struct mlxsw_sp_port *mlxsw_sp_port, 1440 u8 tc) 1441 { 1442 __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc, 1443 false); 1444 } 1445 1446 static const struct mlxsw_sp_span_trigger_ops 1447 mlxsw_sp2_span_trigger_global_ops = { 1448 .bind = mlxsw_sp2_span_trigger_global_bind, 1449 .unbind = mlxsw_sp2_span_trigger_global_unbind, 1450 .matches = mlxsw_sp2_span_trigger_global_matches, 1451 .enable = mlxsw_sp2_span_trigger_global_enable, 1452 .disable = mlxsw_sp2_span_trigger_global_disable, 1453 }; 1454 1455 static const struct mlxsw_sp_span_trigger_ops * 1456 mlxsw_sp2_span_trigger_ops_arr[] = { 1457 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1458 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1459 &mlxsw_sp2_span_trigger_global_ops, 1460 }; 1461 1462 static void 1463 mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) 1464 { 1465 struct mlxsw_sp_span *span = trigger_entry->span; 1466 enum mlxsw_sp_span_trigger_type type; 1467 1468 switch (trigger_entry->trigger) { 1469 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1470 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1471 type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; 1472 break; 1473 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1474 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1475 case MLXSW_SP_SPAN_TRIGGER_ECN: 1476 type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; 1477 break; 1478 default: 1479 WARN_ON_ONCE(1); 1480 return; 1481 } 1482 1483 trigger_entry->ops = span->span_trigger_ops_arr[type]; 1484 } 1485 1486 static struct mlxsw_sp_span_trigger_entry * 1487 mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span, 1488 enum mlxsw_sp_span_trigger trigger, 1489 struct mlxsw_sp_port *mlxsw_sp_port, 1490 const struct mlxsw_sp_span_trigger_parms 1491 *parms) 1492 { 1493 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1494 int err; 1495 1496 trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL); 1497 if (!trigger_entry) 1498 return ERR_PTR(-ENOMEM); 1499 1500 refcount_set(&trigger_entry->ref_count, 1); 1501 trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port : 1502 0; 1503 trigger_entry->trigger = trigger; 1504 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms)); 1505 trigger_entry->span = span; 1506 mlxsw_sp_span_trigger_ops_set(trigger_entry); 1507 list_add_tail(&trigger_entry->list, &span->trigger_entries_list); 1508 1509 err = trigger_entry->ops->bind(trigger_entry); 1510 if (err) 1511 goto err_trigger_entry_bind; 1512 1513 return trigger_entry; 1514 1515 err_trigger_entry_bind: 1516 list_del(&trigger_entry->list); 1517 kfree(trigger_entry); 1518 return ERR_PTR(err); 1519 } 1520 1521 static void 1522 mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span, 1523 struct mlxsw_sp_span_trigger_entry * 1524 trigger_entry) 1525 { 1526 trigger_entry->ops->unbind(trigger_entry); 1527 list_del(&trigger_entry->list); 1528 kfree(trigger_entry); 1529 } 1530 1531 static struct mlxsw_sp_span_trigger_entry * 1532 mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span, 1533 enum mlxsw_sp_span_trigger trigger, 1534 struct mlxsw_sp_port *mlxsw_sp_port) 1535 { 1536 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1537 1538 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) { 1539 if (trigger_entry->ops->matches(trigger_entry, trigger, 1540 mlxsw_sp_port)) 1541 return trigger_entry; 1542 } 1543 1544 return NULL; 1545 } 1546 1547 int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp, 1548 enum mlxsw_sp_span_trigger trigger, 1549 struct mlxsw_sp_port *mlxsw_sp_port, 1550 const struct mlxsw_sp_span_trigger_parms *parms) 1551 { 1552 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1553 int err = 0; 1554 1555 ASSERT_RTNL(); 1556 1557 if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id)) 1558 return -EINVAL; 1559 1560 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1561 trigger, 1562 mlxsw_sp_port); 1563 if (trigger_entry) { 1564 if (trigger_entry->parms.span_id != parms->span_id) 1565 return -EINVAL; 1566 refcount_inc(&trigger_entry->ref_count); 1567 goto out; 1568 } 1569 1570 trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span, 1571 trigger, 1572 mlxsw_sp_port, 1573 parms); 1574 if (IS_ERR(trigger_entry)) 1575 err = PTR_ERR(trigger_entry); 1576 1577 out: 1578 return err; 1579 } 1580 1581 void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, 1582 enum mlxsw_sp_span_trigger trigger, 1583 struct mlxsw_sp_port *mlxsw_sp_port, 1584 const struct mlxsw_sp_span_trigger_parms *parms) 1585 { 1586 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1587 1588 ASSERT_RTNL(); 1589 1590 if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, 1591 parms->span_id))) 1592 return; 1593 1594 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1595 trigger, 1596 mlxsw_sp_port); 1597 if (WARN_ON_ONCE(!trigger_entry)) 1598 return; 1599 1600 if (!refcount_dec_and_test(&trigger_entry->ref_count)) 1601 return; 1602 1603 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); 1604 } 1605 1606 int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, 1607 enum mlxsw_sp_span_trigger trigger, u8 tc) 1608 { 1609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1610 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1611 1612 ASSERT_RTNL(); 1613 1614 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1615 trigger, 1616 mlxsw_sp_port); 1617 if (WARN_ON_ONCE(!trigger_entry)) 1618 return -EINVAL; 1619 1620 return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc); 1621 } 1622 1623 void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, 1624 enum mlxsw_sp_span_trigger trigger, u8 tc) 1625 { 1626 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1627 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1628 1629 ASSERT_RTNL(); 1630 1631 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1632 trigger, 1633 mlxsw_sp_port); 1634 if (WARN_ON_ONCE(!trigger_entry)) 1635 return; 1636 1637 return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); 1638 } 1639 1640 static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) 1641 { 1642 size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); 1643 1644 /* Must be first to avoid NULL pointer dereference by subsequent 1645 * can_handle() callbacks. 1646 */ 1647 if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] != 1648 &mlxsw_sp1_span_entry_ops_cpu)) 1649 return -EINVAL; 1650 1651 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; 1652 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; 1653 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1654 1655 return 0; 1656 } 1657 1658 static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1659 u16 policer_id_base) 1660 { 1661 return -EOPNOTSUPP; 1662 } 1663 1664 const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 1665 .init = mlxsw_sp1_span_init, 1666 .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, 1667 }; 1668 1669 static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) 1670 { 1671 size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); 1672 1673 /* Must be first to avoid NULL pointer dereference by subsequent 1674 * can_handle() callbacks. 1675 */ 1676 if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] != 1677 &mlxsw_sp2_span_entry_ops_cpu)) 1678 return -EINVAL; 1679 1680 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; 1681 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; 1682 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1683 1684 return 0; 1685 } 1686 1687 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 1688 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 1689 1690 static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1691 u16 policer_id_base) 1692 { 1693 char mogcr_pl[MLXSW_REG_MOGCR_LEN]; 1694 int err; 1695 1696 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1697 if (err) 1698 return err; 1699 1700 mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base); 1701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1702 } 1703 1704 const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 1705 .init = mlxsw_sp2_span_init, 1706 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1707 }; 1708 1709 const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 1710 .init = mlxsw_sp2_span_init, 1711 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1712 }; 1713