1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/if_bridge.h> 5 #include <linux/list.h> 6 #include <linux/mutex.h> 7 #include <linux/refcount.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/workqueue.h> 10 #include <net/arp.h> 11 #include <net/gre.h> 12 #include <net/lag.h> 13 #include <net/ndisc.h> 14 #include <net/ip6_tunnel.h> 15 16 #include "spectrum.h" 17 #include "spectrum_ipip.h" 18 #include "spectrum_span.h" 19 #include "spectrum_switchdev.h" 20 21 struct mlxsw_sp_span { 22 struct work_struct work; 23 struct mlxsw_sp *mlxsw_sp; 24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; 25 const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr; 26 size_t span_entry_ops_arr_size; 27 struct list_head analyzed_ports_list; 28 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ 29 struct list_head trigger_entries_list; 30 u16 policer_id_base; 31 refcount_t policer_id_base_ref_count; 32 atomic_t active_entries_count; 33 int entries_count; 34 struct mlxsw_sp_span_entry entries[]; 35 }; 36 37 struct mlxsw_sp_span_analyzed_port { 38 struct list_head list; /* Member of analyzed_ports_list */ 39 refcount_t ref_count; 40 u16 local_port; 41 bool ingress; 42 }; 43 44 struct mlxsw_sp_span_trigger_entry { 45 struct list_head list; /* Member of trigger_entries_list */ 46 struct mlxsw_sp_span *span; 47 const struct mlxsw_sp_span_trigger_ops *ops; 48 refcount_t ref_count; 49 u16 local_port; 50 enum mlxsw_sp_span_trigger trigger; 51 struct mlxsw_sp_span_trigger_parms parms; 52 }; 53 54 enum mlxsw_sp_span_trigger_type { 55 MLXSW_SP_SPAN_TRIGGER_TYPE_PORT, 56 MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL, 57 }; 58 59 struct mlxsw_sp_span_trigger_ops { 60 int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 61 void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 62 bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 63 enum mlxsw_sp_span_trigger trigger, 64 struct mlxsw_sp_port *mlxsw_sp_port); 65 int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 66 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 67 void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 68 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 69 }; 70 71 static void mlxsw_sp_span_respin_work(struct work_struct *work); 72 73 static u64 mlxsw_sp_span_occ_get(void *priv) 74 { 75 const struct mlxsw_sp *mlxsw_sp = priv; 76 77 return atomic_read(&mlxsw_sp->span->active_entries_count); 78 } 79 80 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 81 { 82 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 83 struct mlxsw_sp_span *span; 84 int i, entries_count, err; 85 86 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 87 return -EIO; 88 89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN); 90 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); 91 if (!span) 92 return -ENOMEM; 93 refcount_set(&span->policer_id_base_ref_count, 0); 94 span->entries_count = entries_count; 95 atomic_set(&span->active_entries_count, 0); 96 mutex_init(&span->analyzed_ports_lock); 97 INIT_LIST_HEAD(&span->analyzed_ports_list); 98 INIT_LIST_HEAD(&span->trigger_entries_list); 99 span->mlxsw_sp = mlxsw_sp; 100 mlxsw_sp->span = span; 101 102 for (i = 0; i < mlxsw_sp->span->entries_count; i++) 103 mlxsw_sp->span->entries[i].id = i; 104 105 err = mlxsw_sp->span_ops->init(mlxsw_sp); 106 if (err) 107 goto err_init; 108 109 devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN, 110 mlxsw_sp_span_occ_get, mlxsw_sp); 111 INIT_WORK(&span->work, mlxsw_sp_span_respin_work); 112 113 return 0; 114 115 err_init: 116 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 117 kfree(mlxsw_sp->span); 118 return err; 119 } 120 121 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 122 { 123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 124 125 cancel_work_sync(&mlxsw_sp->span->work); 126 devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN); 127 128 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list)); 129 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list)); 130 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 131 kfree(mlxsw_sp->span); 132 } 133 134 static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev) 135 { 136 return !dev; 137 } 138 139 static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 140 const struct net_device *to_dev, 141 struct mlxsw_sp_span_parms *sparmsp) 142 { 143 return -EOPNOTSUPP; 144 } 145 146 static int 147 mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 148 struct mlxsw_sp_span_parms sparms) 149 { 150 return -EOPNOTSUPP; 151 } 152 153 static void 154 mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 155 { 156 } 157 158 static const 159 struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { 160 .is_static = true, 161 .can_handle = mlxsw_sp1_span_cpu_can_handle, 162 .parms_set = mlxsw_sp1_span_entry_cpu_parms, 163 .configure = mlxsw_sp1_span_entry_cpu_configure, 164 .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure, 165 }; 166 167 static int 168 mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, 169 const struct net_device *to_dev, 170 struct mlxsw_sp_span_parms *sparmsp) 171 { 172 sparmsp->dest_port = netdev_priv(to_dev); 173 return 0; 174 } 175 176 static int 177 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, 178 struct mlxsw_sp_span_parms sparms) 179 { 180 struct mlxsw_sp_port *dest_port = sparms.dest_port; 181 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 182 u16 local_port = dest_port->local_port; 183 char mpat_pl[MLXSW_REG_MPAT_LEN]; 184 int pa_id = span_entry->id; 185 186 /* Create a new port analayzer entry for local_port. */ 187 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 188 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 189 mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id); 190 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 191 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 192 193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 194 } 195 196 static void 197 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, 198 enum mlxsw_reg_mpat_span_type span_type) 199 { 200 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; 201 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 202 u16 local_port = dest_port->local_port; 203 char mpat_pl[MLXSW_REG_MPAT_LEN]; 204 int pa_id = span_entry->id; 205 206 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); 207 mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id); 208 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 209 } 210 211 static void 212 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) 213 { 214 mlxsw_sp_span_entry_deconfigure_common(span_entry, 215 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 216 } 217 218 static const 219 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { 220 .is_static = true, 221 .can_handle = mlxsw_sp_port_dev_check, 222 .parms_set = mlxsw_sp_span_entry_phys_parms, 223 .configure = mlxsw_sp_span_entry_phys_configure, 224 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, 225 }; 226 227 static int mlxsw_sp_span_dmac(struct neigh_table *tbl, 228 const void *pkey, 229 struct net_device *dev, 230 unsigned char dmac[ETH_ALEN]) 231 { 232 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); 233 int err = 0; 234 235 if (!neigh) { 236 neigh = neigh_create(tbl, pkey, dev); 237 if (IS_ERR(neigh)) 238 return PTR_ERR(neigh); 239 } 240 241 neigh_event_send(neigh, NULL); 242 243 read_lock_bh(&neigh->lock); 244 if ((neigh->nud_state & NUD_VALID) && !neigh->dead) 245 memcpy(dmac, neigh->ha, ETH_ALEN); 246 else 247 err = -ENOENT; 248 read_unlock_bh(&neigh->lock); 249 250 neigh_release(neigh); 251 return err; 252 } 253 254 static int 255 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) 256 { 257 sparmsp->dest_port = NULL; 258 return 0; 259 } 260 261 static struct net_device * 262 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, 263 unsigned char *dmac, 264 u16 *p_vid) 265 { 266 struct bridge_vlan_info vinfo; 267 struct net_device *edev; 268 u16 vid = *p_vid; 269 270 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) 271 return NULL; 272 if (!vid || br_vlan_get_info(br_dev, vid, &vinfo) || 273 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) 274 return NULL; 275 276 edev = br_fdb_find_port(br_dev, dmac, vid); 277 if (!edev) 278 return NULL; 279 280 if (br_vlan_get_info(edev, vid, &vinfo)) 281 return NULL; 282 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) 283 *p_vid = 0; 284 else 285 *p_vid = vid; 286 return edev; 287 } 288 289 static struct net_device * 290 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, 291 unsigned char *dmac) 292 { 293 return br_fdb_find_port(br_dev, dmac, 0); 294 } 295 296 static struct net_device * 297 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, 298 unsigned char dmac[ETH_ALEN], 299 u16 *p_vid) 300 { 301 struct mlxsw_sp_bridge_port *bridge_port; 302 enum mlxsw_reg_spms_state spms_state; 303 struct net_device *dev = NULL; 304 struct mlxsw_sp_port *port; 305 u8 stp_state; 306 307 if (br_vlan_enabled(br_dev)) 308 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); 309 else if (!*p_vid) 310 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); 311 if (!dev) 312 return NULL; 313 314 port = mlxsw_sp_port_dev_lower_find(dev); 315 if (!port) 316 return NULL; 317 318 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); 319 if (!bridge_port) 320 return NULL; 321 322 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); 323 spms_state = mlxsw_sp_stp_spms_state(stp_state); 324 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) 325 return NULL; 326 327 return dev; 328 } 329 330 static struct net_device * 331 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, 332 u16 *p_vid) 333 { 334 *p_vid = vlan_dev_vlan_id(vlan_dev); 335 return vlan_dev_real_dev(vlan_dev); 336 } 337 338 static struct net_device * 339 mlxsw_sp_span_entry_lag(struct net_device *lag_dev) 340 { 341 struct net_device *dev; 342 struct list_head *iter; 343 344 netdev_for_each_lower_dev(lag_dev, dev, iter) 345 if (netif_carrier_ok(dev) && 346 net_lag_port_dev_txable(dev) && 347 mlxsw_sp_port_dev_check(dev)) 348 return dev; 349 350 return NULL; 351 } 352 353 static __maybe_unused int 354 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, 355 union mlxsw_sp_l3addr saddr, 356 union mlxsw_sp_l3addr daddr, 357 union mlxsw_sp_l3addr gw, 358 __u8 ttl, 359 struct neigh_table *tbl, 360 struct mlxsw_sp_span_parms *sparmsp) 361 { 362 unsigned char dmac[ETH_ALEN]; 363 u16 vid = 0; 364 365 if (mlxsw_sp_l3addr_is_zero(gw)) 366 gw = daddr; 367 368 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) 369 goto unoffloadable; 370 371 if (is_vlan_dev(edev)) 372 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 373 374 if (netif_is_bridge_master(edev)) { 375 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); 376 if (!edev) 377 goto unoffloadable; 378 } 379 380 if (is_vlan_dev(edev)) { 381 if (vid || !(edev->flags & IFF_UP)) 382 goto unoffloadable; 383 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 384 } 385 386 if (netif_is_lag_master(edev)) { 387 if (!(edev->flags & IFF_UP)) 388 goto unoffloadable; 389 edev = mlxsw_sp_span_entry_lag(edev); 390 if (!edev) 391 goto unoffloadable; 392 } 393 394 if (!mlxsw_sp_port_dev_check(edev)) 395 goto unoffloadable; 396 397 sparmsp->dest_port = netdev_priv(edev); 398 sparmsp->ttl = ttl; 399 memcpy(sparmsp->dmac, dmac, ETH_ALEN); 400 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); 401 sparmsp->saddr = saddr; 402 sparmsp->daddr = daddr; 403 sparmsp->vid = vid; 404 return 0; 405 406 unoffloadable: 407 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 408 } 409 410 #if IS_ENABLED(CONFIG_NET_IPGRE) 411 static struct net_device * 412 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, 413 __be32 *saddrp, __be32 *daddrp) 414 { 415 struct ip_tunnel *tun = netdev_priv(to_dev); 416 struct net_device *dev = NULL; 417 struct ip_tunnel_parm parms; 418 struct rtable *rt = NULL; 419 struct flowi4 fl4; 420 421 /* We assume "dev" stays valid after rt is put. */ 422 ASSERT_RTNL(); 423 424 parms = mlxsw_sp_ipip_netdev_parms4(to_dev); 425 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, 426 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0); 427 428 rt = ip_route_output_key(tun->net, &fl4); 429 if (IS_ERR(rt)) 430 return NULL; 431 432 if (rt->rt_type != RTN_UNICAST) 433 goto out; 434 435 dev = rt->dst.dev; 436 *saddrp = fl4.saddr; 437 if (rt->rt_gw_family == AF_INET) 438 *daddrp = rt->rt_gw4; 439 /* can not offload if route has an IPv6 gateway */ 440 else if (rt->rt_gw_family == AF_INET6) 441 dev = NULL; 442 443 out: 444 ip_rt_put(rt); 445 return dev; 446 } 447 448 static int 449 mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, 450 const struct net_device *to_dev, 451 struct mlxsw_sp_span_parms *sparmsp) 452 { 453 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 454 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; 455 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; 456 bool inherit_tos = tparm.iph.tos & 0x1; 457 bool inherit_ttl = !tparm.iph.ttl; 458 union mlxsw_sp_l3addr gw = daddr; 459 struct net_device *l3edev; 460 461 if (!(to_dev->flags & IFF_UP) || 462 /* Reject tunnels with GRE keys, checksums, etc. */ 463 tparm.i_flags || tparm.o_flags || 464 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 465 inherit_ttl || !inherit_tos || 466 /* A destination address may not be "any". */ 467 mlxsw_sp_l3addr_is_zero(daddr)) 468 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 469 470 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); 471 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 472 tparm.iph.ttl, 473 &arp_tbl, sparmsp); 474 } 475 476 static int 477 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, 478 struct mlxsw_sp_span_parms sparms) 479 { 480 struct mlxsw_sp_port *dest_port = sparms.dest_port; 481 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 482 u16 local_port = dest_port->local_port; 483 char mpat_pl[MLXSW_REG_MPAT_LEN]; 484 int pa_id = span_entry->id; 485 486 /* Create a new port analayzer entry for local_port. */ 487 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 488 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 489 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 490 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 491 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 492 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 493 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 494 sparms.dmac, !!sparms.vid); 495 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, 496 sparms.ttl, sparms.smac, 497 be32_to_cpu(sparms.saddr.addr4), 498 be32_to_cpu(sparms.daddr.addr4)); 499 500 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 501 } 502 503 static void 504 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) 505 { 506 mlxsw_sp_span_entry_deconfigure_common(span_entry, 507 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 508 } 509 510 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { 511 .can_handle = netif_is_gretap, 512 .parms_set = mlxsw_sp_span_entry_gretap4_parms, 513 .configure = mlxsw_sp_span_entry_gretap4_configure, 514 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, 515 }; 516 #endif 517 518 #if IS_ENABLED(CONFIG_IPV6_GRE) 519 static struct net_device * 520 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, 521 struct in6_addr *saddrp, 522 struct in6_addr *daddrp) 523 { 524 struct ip6_tnl *t = netdev_priv(to_dev); 525 struct flowi6 fl6 = t->fl.u.ip6; 526 struct net_device *dev = NULL; 527 struct dst_entry *dst; 528 struct rt6_info *rt6; 529 530 /* We assume "dev" stays valid after dst is released. */ 531 ASSERT_RTNL(); 532 533 fl6.flowi6_mark = t->parms.fwmark; 534 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) 535 return NULL; 536 537 dst = ip6_route_output(t->net, NULL, &fl6); 538 if (!dst || dst->error) 539 goto out; 540 541 rt6 = container_of(dst, struct rt6_info, dst); 542 543 dev = dst->dev; 544 *saddrp = fl6.saddr; 545 *daddrp = rt6->rt6i_gateway; 546 547 out: 548 dst_release(dst); 549 return dev; 550 } 551 552 static int 553 mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, 554 const struct net_device *to_dev, 555 struct mlxsw_sp_span_parms *sparmsp) 556 { 557 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); 558 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; 559 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; 560 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; 561 bool inherit_ttl = !tparm.hop_limit; 562 union mlxsw_sp_l3addr gw = daddr; 563 struct net_device *l3edev; 564 565 if (!(to_dev->flags & IFF_UP) || 566 /* Reject tunnels with GRE keys, checksums, etc. */ 567 tparm.i_flags || tparm.o_flags || 568 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 569 inherit_ttl || !inherit_tos || 570 /* A destination address may not be "any". */ 571 mlxsw_sp_l3addr_is_zero(daddr)) 572 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 573 574 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); 575 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 576 tparm.hop_limit, 577 &nd_tbl, sparmsp); 578 } 579 580 static int 581 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, 582 struct mlxsw_sp_span_parms sparms) 583 { 584 struct mlxsw_sp_port *dest_port = sparms.dest_port; 585 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 586 u16 local_port = dest_port->local_port; 587 char mpat_pl[MLXSW_REG_MPAT_LEN]; 588 int pa_id = span_entry->id; 589 590 /* Create a new port analayzer entry for local_port. */ 591 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 592 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 593 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 594 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 595 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 596 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 597 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 598 sparms.dmac, !!sparms.vid); 599 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, 600 sparms.saddr.addr6, 601 sparms.daddr.addr6); 602 603 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 604 } 605 606 static void 607 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) 608 { 609 mlxsw_sp_span_entry_deconfigure_common(span_entry, 610 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 611 } 612 613 static const 614 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { 615 .can_handle = netif_is_ip6gretap, 616 .parms_set = mlxsw_sp_span_entry_gretap6_parms, 617 .configure = mlxsw_sp_span_entry_gretap6_configure, 618 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, 619 }; 620 #endif 621 622 static bool 623 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) 624 { 625 return is_vlan_dev(dev) && 626 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); 627 } 628 629 static int 630 mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp, 631 const struct net_device *to_dev, 632 struct mlxsw_sp_span_parms *sparmsp) 633 { 634 struct net_device *real_dev; 635 u16 vid; 636 637 if (!(to_dev->flags & IFF_UP)) 638 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 639 640 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); 641 sparmsp->dest_port = netdev_priv(real_dev); 642 sparmsp->vid = vid; 643 return 0; 644 } 645 646 static int 647 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, 648 struct mlxsw_sp_span_parms sparms) 649 { 650 struct mlxsw_sp_port *dest_port = sparms.dest_port; 651 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 652 u16 local_port = dest_port->local_port; 653 char mpat_pl[MLXSW_REG_MPAT_LEN]; 654 int pa_id = span_entry->id; 655 656 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 657 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 658 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 659 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 660 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 661 662 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 663 } 664 665 static void 666 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) 667 { 668 mlxsw_sp_span_entry_deconfigure_common(span_entry, 669 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 670 } 671 672 static const 673 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { 674 .can_handle = mlxsw_sp_span_vlan_can_handle, 675 .parms_set = mlxsw_sp_span_entry_vlan_parms, 676 .configure = mlxsw_sp_span_entry_vlan_configure, 677 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, 678 }; 679 680 static const 681 struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { 682 &mlxsw_sp1_span_entry_ops_cpu, 683 &mlxsw_sp_span_entry_ops_phys, 684 #if IS_ENABLED(CONFIG_NET_IPGRE) 685 &mlxsw_sp_span_entry_ops_gretap4, 686 #endif 687 #if IS_ENABLED(CONFIG_IPV6_GRE) 688 &mlxsw_sp_span_entry_ops_gretap6, 689 #endif 690 &mlxsw_sp_span_entry_ops_vlan, 691 }; 692 693 static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev) 694 { 695 return !dev; 696 } 697 698 static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 699 const struct net_device *to_dev, 700 struct mlxsw_sp_span_parms *sparmsp) 701 { 702 sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 703 return 0; 704 } 705 706 static int 707 mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 708 struct mlxsw_sp_span_parms sparms) 709 { 710 /* Mirroring to the CPU port is like mirroring to any other physical 711 * port. Its local port is used instead of that of the physical port. 712 */ 713 return mlxsw_sp_span_entry_phys_configure(span_entry, sparms); 714 } 715 716 static void 717 mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 718 { 719 enum mlxsw_reg_mpat_span_type span_type; 720 721 span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH; 722 mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type); 723 } 724 725 static const 726 struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { 727 .is_static = true, 728 .can_handle = mlxsw_sp2_span_cpu_can_handle, 729 .parms_set = mlxsw_sp2_span_entry_cpu_parms, 730 .configure = mlxsw_sp2_span_entry_cpu_configure, 731 .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure, 732 }; 733 734 static const 735 struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { 736 &mlxsw_sp2_span_entry_ops_cpu, 737 &mlxsw_sp_span_entry_ops_phys, 738 #if IS_ENABLED(CONFIG_NET_IPGRE) 739 &mlxsw_sp_span_entry_ops_gretap4, 740 #endif 741 #if IS_ENABLED(CONFIG_IPV6_GRE) 742 &mlxsw_sp_span_entry_ops_gretap6, 743 #endif 744 &mlxsw_sp_span_entry_ops_vlan, 745 }; 746 747 static int 748 mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp, 749 const struct net_device *to_dev, 750 struct mlxsw_sp_span_parms *sparmsp) 751 { 752 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 753 } 754 755 static int 756 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, 757 struct mlxsw_sp_span_parms sparms) 758 { 759 return 0; 760 } 761 762 static void 763 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) 764 { 765 } 766 767 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { 768 .parms_set = mlxsw_sp_span_entry_nop_parms, 769 .configure = mlxsw_sp_span_entry_nop_configure, 770 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, 771 }; 772 773 static void 774 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, 775 struct mlxsw_sp_span_entry *span_entry, 776 struct mlxsw_sp_span_parms sparms) 777 { 778 int err; 779 780 if (!sparms.dest_port) 781 goto set_parms; 782 783 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { 784 dev_err(mlxsw_sp->bus_info->dev, 785 "Cannot mirror to a port which belongs to a different mlxsw instance\n"); 786 sparms.dest_port = NULL; 787 goto set_parms; 788 } 789 790 err = span_entry->ops->configure(span_entry, sparms); 791 if (err) { 792 dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n"); 793 sparms.dest_port = NULL; 794 goto set_parms; 795 } 796 797 set_parms: 798 span_entry->parms = sparms; 799 } 800 801 static void 802 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) 803 { 804 if (span_entry->parms.dest_port) 805 span_entry->ops->deconfigure(span_entry); 806 } 807 808 static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, 809 u16 policer_id) 810 { 811 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; 812 u16 policer_id_base; 813 int err; 814 815 /* Policers set on SPAN agents must be in the range of 816 * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the 817 * base is set and the new policer is not within the range, then we 818 * must error out. 819 */ 820 if (refcount_read(&span->policer_id_base_ref_count)) { 821 if (policer_id < span->policer_id_base || 822 policer_id >= span->policer_id_base + span->entries_count) 823 return -EINVAL; 824 825 refcount_inc(&span->policer_id_base_ref_count); 826 return 0; 827 } 828 829 /* Base must be even. */ 830 policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1; 831 err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp, 832 policer_id_base); 833 if (err) 834 return err; 835 836 span->policer_id_base = policer_id_base; 837 refcount_set(&span->policer_id_base_ref_count, 1); 838 839 return 0; 840 } 841 842 static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) 843 { 844 if (refcount_dec_and_test(&span->policer_id_base_ref_count)) 845 span->policer_id_base = 0; 846 } 847 848 static struct mlxsw_sp_span_entry * 849 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, 850 const struct net_device *to_dev, 851 const struct mlxsw_sp_span_entry_ops *ops, 852 struct mlxsw_sp_span_parms sparms) 853 { 854 struct mlxsw_sp_span_entry *span_entry = NULL; 855 int i; 856 857 /* find a free entry to use */ 858 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 859 if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) { 860 span_entry = &mlxsw_sp->span->entries[i]; 861 break; 862 } 863 } 864 if (!span_entry) 865 return NULL; 866 867 if (sparms.policer_enable) { 868 int err; 869 870 err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span, 871 sparms.policer_id); 872 if (err) 873 return NULL; 874 } 875 876 atomic_inc(&mlxsw_sp->span->active_entries_count); 877 span_entry->ops = ops; 878 refcount_set(&span_entry->ref_count, 1); 879 span_entry->to_dev = to_dev; 880 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); 881 882 return span_entry; 883 } 884 885 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 886 struct mlxsw_sp_span_entry *span_entry) 887 { 888 mlxsw_sp_span_entry_deconfigure(span_entry); 889 atomic_dec(&mlxsw_sp->span->active_entries_count); 890 if (span_entry->parms.policer_enable) 891 mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span); 892 } 893 894 struct mlxsw_sp_span_entry * 895 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, 896 const struct net_device *to_dev) 897 { 898 int i; 899 900 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 901 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 902 903 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev) 904 return curr; 905 } 906 return NULL; 907 } 908 909 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, 910 struct mlxsw_sp_span_entry *span_entry) 911 { 912 mlxsw_sp_span_entry_deconfigure(span_entry); 913 span_entry->ops = &mlxsw_sp_span_entry_ops_nop; 914 } 915 916 static struct mlxsw_sp_span_entry * 917 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) 918 { 919 int i; 920 921 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 922 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 923 924 if (refcount_read(&curr->ref_count) && curr->id == span_id) 925 return curr; 926 } 927 return NULL; 928 } 929 930 static struct mlxsw_sp_span_entry * 931 mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, 932 const struct net_device *to_dev, 933 const struct mlxsw_sp_span_parms *sparms) 934 { 935 int i; 936 937 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 938 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 939 940 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && 941 curr->parms.policer_enable == sparms->policer_enable && 942 curr->parms.policer_id == sparms->policer_id && 943 curr->parms.session_id == sparms->session_id) 944 return curr; 945 } 946 return NULL; 947 } 948 949 static struct mlxsw_sp_span_entry * 950 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, 951 const struct net_device *to_dev, 952 const struct mlxsw_sp_span_entry_ops *ops, 953 struct mlxsw_sp_span_parms sparms) 954 { 955 struct mlxsw_sp_span_entry *span_entry; 956 957 span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev, 958 &sparms); 959 if (span_entry) { 960 /* Already exists, just take a reference */ 961 refcount_inc(&span_entry->ref_count); 962 return span_entry; 963 } 964 965 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); 966 } 967 968 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 969 struct mlxsw_sp_span_entry *span_entry) 970 { 971 if (refcount_dec_and_test(&span_entry->ref_count)) 972 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 973 return 0; 974 } 975 976 static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 977 { 978 struct mlxsw_sp_hdroom hdroom; 979 980 hdroom = *mlxsw_sp_port->hdroom; 981 hdroom.int_buf.enable = enable; 982 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 983 984 return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 985 } 986 987 static int 988 mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port) 989 { 990 return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true); 991 } 992 993 static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port) 994 { 995 mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false); 996 } 997 998 static struct mlxsw_sp_span_analyzed_port * 999 mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port, 1000 bool ingress) 1001 { 1002 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1003 1004 list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) { 1005 if (analyzed_port->local_port == local_port && 1006 analyzed_port->ingress == ingress) 1007 return analyzed_port; 1008 } 1009 1010 return NULL; 1011 } 1012 1013 static const struct mlxsw_sp_span_entry_ops * 1014 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, 1015 const struct net_device *to_dev) 1016 { 1017 struct mlxsw_sp_span *span = mlxsw_sp->span; 1018 size_t i; 1019 1020 for (i = 0; i < span->span_entry_ops_arr_size; ++i) 1021 if (span->span_entry_ops_arr[i]->can_handle(to_dev)) 1022 return span->span_entry_ops_arr[i]; 1023 1024 return NULL; 1025 } 1026 1027 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1028 { 1029 struct mlxsw_sp_span *span; 1030 struct mlxsw_sp *mlxsw_sp; 1031 int i, err; 1032 1033 span = container_of(work, struct mlxsw_sp_span, work); 1034 mlxsw_sp = span->mlxsw_sp; 1035 1036 rtnl_lock(); 1037 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 1038 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 1039 struct mlxsw_sp_span_parms sparms = {NULL}; 1040 1041 if (!refcount_read(&curr->ref_count)) 1042 continue; 1043 1044 if (curr->ops->is_static) 1045 continue; 1046 1047 err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); 1048 if (err) 1049 continue; 1050 1051 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { 1052 mlxsw_sp_span_entry_deconfigure(curr); 1053 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); 1054 } 1055 } 1056 rtnl_unlock(); 1057 } 1058 1059 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) 1060 { 1061 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0) 1062 return; 1063 mlxsw_core_schedule_work(&mlxsw_sp->span->work); 1064 } 1065 1066 int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, 1067 const struct mlxsw_sp_span_agent_parms *parms) 1068 { 1069 const struct net_device *to_dev = parms->to_dev; 1070 const struct mlxsw_sp_span_entry_ops *ops; 1071 struct mlxsw_sp_span_entry *span_entry; 1072 struct mlxsw_sp_span_parms sparms; 1073 int err; 1074 1075 ASSERT_RTNL(); 1076 1077 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); 1078 if (!ops) { 1079 dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n"); 1080 return -EOPNOTSUPP; 1081 } 1082 1083 memset(&sparms, 0, sizeof(sparms)); 1084 err = ops->parms_set(mlxsw_sp, to_dev, &sparms); 1085 if (err) 1086 return err; 1087 1088 sparms.policer_id = parms->policer_id; 1089 sparms.policer_enable = parms->policer_enable; 1090 sparms.session_id = parms->session_id; 1091 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); 1092 if (!span_entry) 1093 return -ENOBUFS; 1094 1095 *p_span_id = span_entry->id; 1096 1097 return 0; 1098 } 1099 1100 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id) 1101 { 1102 struct mlxsw_sp_span_entry *span_entry; 1103 1104 ASSERT_RTNL(); 1105 1106 span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id); 1107 if (WARN_ON_ONCE(!span_entry)) 1108 return; 1109 1110 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 1111 } 1112 1113 static struct mlxsw_sp_span_analyzed_port * 1114 mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span, 1115 struct mlxsw_sp_port *mlxsw_sp_port, 1116 bool ingress) 1117 { 1118 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1119 int err; 1120 1121 analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL); 1122 if (!analyzed_port) 1123 return ERR_PTR(-ENOMEM); 1124 1125 refcount_set(&analyzed_port->ref_count, 1); 1126 analyzed_port->local_port = mlxsw_sp_port->local_port; 1127 analyzed_port->ingress = ingress; 1128 list_add_tail(&analyzed_port->list, &span->analyzed_ports_list); 1129 1130 /* An egress mirror buffer should be allocated on the egress port which 1131 * does the mirroring. 1132 */ 1133 if (!ingress) { 1134 err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port); 1135 if (err) 1136 goto err_buffer_update; 1137 } 1138 1139 return analyzed_port; 1140 1141 err_buffer_update: 1142 list_del(&analyzed_port->list); 1143 kfree(analyzed_port); 1144 return ERR_PTR(err); 1145 } 1146 1147 static void 1148 mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 1149 struct mlxsw_sp_span_analyzed_port * 1150 analyzed_port) 1151 { 1152 /* Remove egress mirror buffer now that port is no longer analyzed 1153 * at egress. 1154 */ 1155 if (!analyzed_port->ingress) 1156 mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port); 1157 1158 list_del(&analyzed_port->list); 1159 kfree(analyzed_port); 1160 } 1161 1162 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, 1163 bool ingress) 1164 { 1165 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1166 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1167 u16 local_port = mlxsw_sp_port->local_port; 1168 int err = 0; 1169 1170 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1171 1172 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1173 local_port, ingress); 1174 if (analyzed_port) { 1175 refcount_inc(&analyzed_port->ref_count); 1176 goto out_unlock; 1177 } 1178 1179 analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span, 1180 mlxsw_sp_port, 1181 ingress); 1182 if (IS_ERR(analyzed_port)) 1183 err = PTR_ERR(analyzed_port); 1184 1185 out_unlock: 1186 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1187 return err; 1188 } 1189 1190 void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, 1191 bool ingress) 1192 { 1193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1194 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1195 u16 local_port = mlxsw_sp_port->local_port; 1196 1197 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1198 1199 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1200 local_port, ingress); 1201 if (WARN_ON_ONCE(!analyzed_port)) 1202 goto out_unlock; 1203 1204 if (!refcount_dec_and_test(&analyzed_port->ref_count)) 1205 goto out_unlock; 1206 1207 mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port); 1208 1209 out_unlock: 1210 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1211 } 1212 1213 static int 1214 __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, 1215 struct mlxsw_sp_span_trigger_entry * 1216 trigger_entry, bool enable) 1217 { 1218 char mpar_pl[MLXSW_REG_MPAR_LEN]; 1219 enum mlxsw_reg_mpar_i_e i_e; 1220 1221 switch (trigger_entry->trigger) { 1222 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1223 i_e = MLXSW_REG_MPAR_TYPE_INGRESS; 1224 break; 1225 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1226 i_e = MLXSW_REG_MPAR_TYPE_EGRESS; 1227 break; 1228 default: 1229 WARN_ON_ONCE(1); 1230 return -EINVAL; 1231 } 1232 1233 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX) 1234 return -EINVAL; 1235 1236 mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable, 1237 trigger_entry->parms.span_id, 1238 trigger_entry->parms.probability_rate); 1239 return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 1240 } 1241 1242 static int 1243 mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry * 1244 trigger_entry) 1245 { 1246 return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, 1247 trigger_entry, true); 1248 } 1249 1250 static void 1251 mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry * 1252 trigger_entry) 1253 { 1254 __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry, 1255 false); 1256 } 1257 1258 static bool 1259 mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry * 1260 trigger_entry, 1261 enum mlxsw_sp_span_trigger trigger, 1262 struct mlxsw_sp_port *mlxsw_sp_port) 1263 { 1264 return trigger_entry->trigger == trigger && 1265 trigger_entry->local_port == mlxsw_sp_port->local_port; 1266 } 1267 1268 static int 1269 mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry * 1270 trigger_entry, 1271 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1272 { 1273 /* Port trigger are enabled during binding. */ 1274 return 0; 1275 } 1276 1277 static void 1278 mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry * 1279 trigger_entry, 1280 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1281 { 1282 } 1283 1284 static const struct mlxsw_sp_span_trigger_ops 1285 mlxsw_sp_span_trigger_port_ops = { 1286 .bind = mlxsw_sp_span_trigger_port_bind, 1287 .unbind = mlxsw_sp_span_trigger_port_unbind, 1288 .matches = mlxsw_sp_span_trigger_port_matches, 1289 .enable = mlxsw_sp_span_trigger_port_enable, 1290 .disable = mlxsw_sp_span_trigger_port_disable, 1291 }; 1292 1293 static int 1294 mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1295 trigger_entry) 1296 { 1297 return -EOPNOTSUPP; 1298 } 1299 1300 static void 1301 mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1302 trigger_entry) 1303 { 1304 } 1305 1306 static bool 1307 mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1308 trigger_entry, 1309 enum mlxsw_sp_span_trigger trigger, 1310 struct mlxsw_sp_port *mlxsw_sp_port) 1311 { 1312 WARN_ON_ONCE(1); 1313 return false; 1314 } 1315 1316 static int 1317 mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1318 trigger_entry, 1319 struct mlxsw_sp_port *mlxsw_sp_port, 1320 u8 tc) 1321 { 1322 return -EOPNOTSUPP; 1323 } 1324 1325 static void 1326 mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1327 trigger_entry, 1328 struct mlxsw_sp_port *mlxsw_sp_port, 1329 u8 tc) 1330 { 1331 } 1332 1333 static const struct mlxsw_sp_span_trigger_ops 1334 mlxsw_sp1_span_trigger_global_ops = { 1335 .bind = mlxsw_sp1_span_trigger_global_bind, 1336 .unbind = mlxsw_sp1_span_trigger_global_unbind, 1337 .matches = mlxsw_sp1_span_trigger_global_matches, 1338 .enable = mlxsw_sp1_span_trigger_global_enable, 1339 .disable = mlxsw_sp1_span_trigger_global_disable, 1340 }; 1341 1342 static const struct mlxsw_sp_span_trigger_ops * 1343 mlxsw_sp1_span_trigger_ops_arr[] = { 1344 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1345 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1346 &mlxsw_sp1_span_trigger_global_ops, 1347 }; 1348 1349 static int 1350 mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1351 trigger_entry) 1352 { 1353 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1354 enum mlxsw_reg_mpagr_trigger trigger; 1355 char mpagr_pl[MLXSW_REG_MPAGR_LEN]; 1356 1357 switch (trigger_entry->trigger) { 1358 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1359 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER; 1360 break; 1361 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1362 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED; 1363 break; 1364 case MLXSW_SP_SPAN_TRIGGER_ECN: 1365 trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN; 1366 break; 1367 default: 1368 WARN_ON_ONCE(1); 1369 return -EINVAL; 1370 } 1371 1372 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX) 1373 return -EINVAL; 1374 1375 mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, 1376 trigger_entry->parms.probability_rate); 1377 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); 1378 } 1379 1380 static void 1381 mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1382 trigger_entry) 1383 { 1384 /* There is no unbinding for global triggers. The trigger should be 1385 * disabled on all ports by now. 1386 */ 1387 } 1388 1389 static bool 1390 mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1391 trigger_entry, 1392 enum mlxsw_sp_span_trigger trigger, 1393 struct mlxsw_sp_port *mlxsw_sp_port) 1394 { 1395 return trigger_entry->trigger == trigger; 1396 } 1397 1398 static int 1399 __mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1400 trigger_entry, 1401 struct mlxsw_sp_port *mlxsw_sp_port, 1402 u8 tc, bool enable) 1403 { 1404 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1405 char momte_pl[MLXSW_REG_MOMTE_LEN]; 1406 enum mlxsw_reg_momte_type type; 1407 int err; 1408 1409 switch (trigger_entry->trigger) { 1410 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1411 type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS; 1412 break; 1413 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1414 type = MLXSW_REG_MOMTE_TYPE_WRED; 1415 break; 1416 case MLXSW_SP_SPAN_TRIGGER_ECN: 1417 type = MLXSW_REG_MOMTE_TYPE_ECN; 1418 break; 1419 default: 1420 WARN_ON_ONCE(1); 1421 return -EINVAL; 1422 } 1423 1424 /* Query existing configuration in order to only change the state of 1425 * the specified traffic class. 1426 */ 1427 mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type); 1428 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1429 if (err) 1430 return err; 1431 1432 mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable); 1433 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1434 } 1435 1436 static int 1437 mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1438 trigger_entry, 1439 struct mlxsw_sp_port *mlxsw_sp_port, 1440 u8 tc) 1441 { 1442 return __mlxsw_sp2_span_trigger_global_enable(trigger_entry, 1443 mlxsw_sp_port, tc, true); 1444 } 1445 1446 static void 1447 mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1448 trigger_entry, 1449 struct mlxsw_sp_port *mlxsw_sp_port, 1450 u8 tc) 1451 { 1452 __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc, 1453 false); 1454 } 1455 1456 static const struct mlxsw_sp_span_trigger_ops 1457 mlxsw_sp2_span_trigger_global_ops = { 1458 .bind = mlxsw_sp2_span_trigger_global_bind, 1459 .unbind = mlxsw_sp2_span_trigger_global_unbind, 1460 .matches = mlxsw_sp2_span_trigger_global_matches, 1461 .enable = mlxsw_sp2_span_trigger_global_enable, 1462 .disable = mlxsw_sp2_span_trigger_global_disable, 1463 }; 1464 1465 static const struct mlxsw_sp_span_trigger_ops * 1466 mlxsw_sp2_span_trigger_ops_arr[] = { 1467 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1468 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1469 &mlxsw_sp2_span_trigger_global_ops, 1470 }; 1471 1472 static void 1473 mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) 1474 { 1475 struct mlxsw_sp_span *span = trigger_entry->span; 1476 enum mlxsw_sp_span_trigger_type type; 1477 1478 switch (trigger_entry->trigger) { 1479 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1480 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1481 type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; 1482 break; 1483 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1484 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1485 case MLXSW_SP_SPAN_TRIGGER_ECN: 1486 type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; 1487 break; 1488 default: 1489 WARN_ON_ONCE(1); 1490 return; 1491 } 1492 1493 trigger_entry->ops = span->span_trigger_ops_arr[type]; 1494 } 1495 1496 static struct mlxsw_sp_span_trigger_entry * 1497 mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span, 1498 enum mlxsw_sp_span_trigger trigger, 1499 struct mlxsw_sp_port *mlxsw_sp_port, 1500 const struct mlxsw_sp_span_trigger_parms 1501 *parms) 1502 { 1503 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1504 int err; 1505 1506 trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL); 1507 if (!trigger_entry) 1508 return ERR_PTR(-ENOMEM); 1509 1510 refcount_set(&trigger_entry->ref_count, 1); 1511 trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port : 1512 0; 1513 trigger_entry->trigger = trigger; 1514 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms)); 1515 trigger_entry->span = span; 1516 mlxsw_sp_span_trigger_ops_set(trigger_entry); 1517 list_add_tail(&trigger_entry->list, &span->trigger_entries_list); 1518 1519 err = trigger_entry->ops->bind(trigger_entry); 1520 if (err) 1521 goto err_trigger_entry_bind; 1522 1523 return trigger_entry; 1524 1525 err_trigger_entry_bind: 1526 list_del(&trigger_entry->list); 1527 kfree(trigger_entry); 1528 return ERR_PTR(err); 1529 } 1530 1531 static void 1532 mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span, 1533 struct mlxsw_sp_span_trigger_entry * 1534 trigger_entry) 1535 { 1536 trigger_entry->ops->unbind(trigger_entry); 1537 list_del(&trigger_entry->list); 1538 kfree(trigger_entry); 1539 } 1540 1541 static struct mlxsw_sp_span_trigger_entry * 1542 mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span, 1543 enum mlxsw_sp_span_trigger trigger, 1544 struct mlxsw_sp_port *mlxsw_sp_port) 1545 { 1546 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1547 1548 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) { 1549 if (trigger_entry->ops->matches(trigger_entry, trigger, 1550 mlxsw_sp_port)) 1551 return trigger_entry; 1552 } 1553 1554 return NULL; 1555 } 1556 1557 int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp, 1558 enum mlxsw_sp_span_trigger trigger, 1559 struct mlxsw_sp_port *mlxsw_sp_port, 1560 const struct mlxsw_sp_span_trigger_parms *parms) 1561 { 1562 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1563 int err = 0; 1564 1565 ASSERT_RTNL(); 1566 1567 if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id)) 1568 return -EINVAL; 1569 1570 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1571 trigger, 1572 mlxsw_sp_port); 1573 if (trigger_entry) { 1574 if (trigger_entry->parms.span_id != parms->span_id || 1575 trigger_entry->parms.probability_rate != 1576 parms->probability_rate) 1577 return -EINVAL; 1578 refcount_inc(&trigger_entry->ref_count); 1579 goto out; 1580 } 1581 1582 trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span, 1583 trigger, 1584 mlxsw_sp_port, 1585 parms); 1586 if (IS_ERR(trigger_entry)) 1587 err = PTR_ERR(trigger_entry); 1588 1589 out: 1590 return err; 1591 } 1592 1593 void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, 1594 enum mlxsw_sp_span_trigger trigger, 1595 struct mlxsw_sp_port *mlxsw_sp_port, 1596 const struct mlxsw_sp_span_trigger_parms *parms) 1597 { 1598 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1599 1600 ASSERT_RTNL(); 1601 1602 if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, 1603 parms->span_id))) 1604 return; 1605 1606 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1607 trigger, 1608 mlxsw_sp_port); 1609 if (WARN_ON_ONCE(!trigger_entry)) 1610 return; 1611 1612 if (!refcount_dec_and_test(&trigger_entry->ref_count)) 1613 return; 1614 1615 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); 1616 } 1617 1618 int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, 1619 enum mlxsw_sp_span_trigger trigger, u8 tc) 1620 { 1621 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1622 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1623 1624 ASSERT_RTNL(); 1625 1626 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1627 trigger, 1628 mlxsw_sp_port); 1629 if (WARN_ON_ONCE(!trigger_entry)) 1630 return -EINVAL; 1631 1632 return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc); 1633 } 1634 1635 void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, 1636 enum mlxsw_sp_span_trigger trigger, u8 tc) 1637 { 1638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1639 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1640 1641 ASSERT_RTNL(); 1642 1643 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1644 trigger, 1645 mlxsw_sp_port); 1646 if (WARN_ON_ONCE(!trigger_entry)) 1647 return; 1648 1649 return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); 1650 } 1651 1652 bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger) 1653 { 1654 switch (trigger) { 1655 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1656 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1657 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1658 return true; 1659 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1660 case MLXSW_SP_SPAN_TRIGGER_ECN: 1661 return false; 1662 } 1663 1664 WARN_ON_ONCE(1); 1665 return false; 1666 } 1667 1668 static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) 1669 { 1670 size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); 1671 1672 /* Must be first to avoid NULL pointer dereference by subsequent 1673 * can_handle() callbacks. 1674 */ 1675 if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] != 1676 &mlxsw_sp1_span_entry_ops_cpu)) 1677 return -EINVAL; 1678 1679 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; 1680 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; 1681 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1682 1683 return 0; 1684 } 1685 1686 static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1687 u16 policer_id_base) 1688 { 1689 return -EOPNOTSUPP; 1690 } 1691 1692 const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 1693 .init = mlxsw_sp1_span_init, 1694 .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, 1695 }; 1696 1697 static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) 1698 { 1699 size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); 1700 1701 /* Must be first to avoid NULL pointer dereference by subsequent 1702 * can_handle() callbacks. 1703 */ 1704 if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] != 1705 &mlxsw_sp2_span_entry_ops_cpu)) 1706 return -EINVAL; 1707 1708 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; 1709 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; 1710 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1711 1712 return 0; 1713 } 1714 1715 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 1716 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 1717 1718 static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1719 u16 policer_id_base) 1720 { 1721 char mogcr_pl[MLXSW_REG_MOGCR_LEN]; 1722 int err; 1723 1724 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1725 if (err) 1726 return err; 1727 1728 mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base); 1729 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1730 } 1731 1732 const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 1733 .init = mlxsw_sp2_span_init, 1734 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1735 }; 1736 1737 const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 1738 .init = mlxsw_sp2_span_init, 1739 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1740 }; 1741