1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/if_bridge.h> 5 #include <linux/list.h> 6 #include <linux/mutex.h> 7 #include <linux/refcount.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/workqueue.h> 10 #include <net/arp.h> 11 #include <net/gre.h> 12 #include <net/lag.h> 13 #include <net/ndisc.h> 14 #include <net/ip6_tunnel.h> 15 16 #include "spectrum.h" 17 #include "spectrum_ipip.h" 18 #include "spectrum_span.h" 19 #include "spectrum_switchdev.h" 20 21 struct mlxsw_sp_span { 22 struct work_struct work; 23 struct mlxsw_sp *mlxsw_sp; 24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; 25 const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr; 26 size_t span_entry_ops_arr_size; 27 struct list_head analyzed_ports_list; 28 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ 29 struct list_head trigger_entries_list; 30 u16 policer_id_base; 31 refcount_t policer_id_base_ref_count; 32 atomic_t active_entries_count; 33 int entries_count; 34 struct mlxsw_sp_span_entry entries[]; 35 }; 36 37 struct mlxsw_sp_span_analyzed_port { 38 struct list_head list; /* Member of analyzed_ports_list */ 39 refcount_t ref_count; 40 u16 local_port; 41 bool ingress; 42 }; 43 44 struct mlxsw_sp_span_trigger_entry { 45 struct list_head list; /* Member of trigger_entries_list */ 46 struct mlxsw_sp_span *span; 47 const struct mlxsw_sp_span_trigger_ops *ops; 48 refcount_t ref_count; 49 u16 local_port; 50 enum mlxsw_sp_span_trigger trigger; 51 struct mlxsw_sp_span_trigger_parms parms; 52 }; 53 54 enum mlxsw_sp_span_trigger_type { 55 MLXSW_SP_SPAN_TRIGGER_TYPE_PORT, 56 MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL, 57 }; 58 59 struct mlxsw_sp_span_trigger_ops { 60 int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 61 void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 62 bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 63 enum mlxsw_sp_span_trigger trigger, 64 struct mlxsw_sp_port *mlxsw_sp_port); 65 int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 66 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 67 void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 68 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 69 }; 70 71 static void mlxsw_sp_span_respin_work(struct work_struct *work); 72 73 static u64 mlxsw_sp_span_occ_get(void *priv) 74 { 75 const struct mlxsw_sp *mlxsw_sp = priv; 76 77 return atomic_read(&mlxsw_sp->span->active_entries_count); 78 } 79 80 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 81 { 82 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 83 struct mlxsw_sp_span *span; 84 int i, entries_count, err; 85 86 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 87 return -EIO; 88 89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN); 90 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); 91 if (!span) 92 return -ENOMEM; 93 refcount_set(&span->policer_id_base_ref_count, 0); 94 span->entries_count = entries_count; 95 atomic_set(&span->active_entries_count, 0); 96 mutex_init(&span->analyzed_ports_lock); 97 INIT_LIST_HEAD(&span->analyzed_ports_list); 98 INIT_LIST_HEAD(&span->trigger_entries_list); 99 span->mlxsw_sp = mlxsw_sp; 100 mlxsw_sp->span = span; 101 102 for (i = 0; i < mlxsw_sp->span->entries_count; i++) 103 mlxsw_sp->span->entries[i].id = i; 104 105 err = mlxsw_sp->span_ops->init(mlxsw_sp); 106 if (err) 107 goto err_init; 108 109 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN, 110 mlxsw_sp_span_occ_get, mlxsw_sp); 111 INIT_WORK(&span->work, mlxsw_sp_span_respin_work); 112 113 return 0; 114 115 err_init: 116 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 117 kfree(mlxsw_sp->span); 118 return err; 119 } 120 121 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 122 { 123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 124 125 cancel_work_sync(&mlxsw_sp->span->work); 126 devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN); 127 128 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list)); 129 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list)); 130 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 131 kfree(mlxsw_sp->span); 132 } 133 134 static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev) 135 { 136 return !dev; 137 } 138 139 static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 140 const struct net_device *to_dev, 141 struct mlxsw_sp_span_parms *sparmsp) 142 { 143 return -EOPNOTSUPP; 144 } 145 146 static int 147 mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 148 struct mlxsw_sp_span_parms sparms) 149 { 150 return -EOPNOTSUPP; 151 } 152 153 static void 154 mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 155 { 156 } 157 158 static const 159 struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { 160 .is_static = true, 161 .can_handle = mlxsw_sp1_span_cpu_can_handle, 162 .parms_set = mlxsw_sp1_span_entry_cpu_parms, 163 .configure = mlxsw_sp1_span_entry_cpu_configure, 164 .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure, 165 }; 166 167 static int 168 mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, 169 const struct net_device *to_dev, 170 struct mlxsw_sp_span_parms *sparmsp) 171 { 172 sparmsp->dest_port = netdev_priv(to_dev); 173 return 0; 174 } 175 176 static int 177 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, 178 struct mlxsw_sp_span_parms sparms) 179 { 180 struct mlxsw_sp_port *dest_port = sparms.dest_port; 181 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 182 u16 local_port = dest_port->local_port; 183 char mpat_pl[MLXSW_REG_MPAT_LEN]; 184 int pa_id = span_entry->id; 185 186 /* Create a new port analayzer entry for local_port. */ 187 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 188 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 189 mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id); 190 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 191 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 192 193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 194 } 195 196 static void 197 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, 198 enum mlxsw_reg_mpat_span_type span_type) 199 { 200 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; 201 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 202 u16 local_port = dest_port->local_port; 203 char mpat_pl[MLXSW_REG_MPAT_LEN]; 204 int pa_id = span_entry->id; 205 206 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); 207 mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id); 208 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 209 } 210 211 static void 212 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) 213 { 214 mlxsw_sp_span_entry_deconfigure_common(span_entry, 215 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 216 } 217 218 static const 219 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { 220 .is_static = true, 221 .can_handle = mlxsw_sp_port_dev_check, 222 .parms_set = mlxsw_sp_span_entry_phys_parms, 223 .configure = mlxsw_sp_span_entry_phys_configure, 224 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, 225 }; 226 227 static int mlxsw_sp_span_dmac(struct neigh_table *tbl, 228 const void *pkey, 229 struct net_device *dev, 230 unsigned char dmac[ETH_ALEN]) 231 { 232 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); 233 int err = 0; 234 235 if (!neigh) { 236 neigh = neigh_create(tbl, pkey, dev); 237 if (IS_ERR(neigh)) 238 return PTR_ERR(neigh); 239 } 240 241 neigh_event_send(neigh, NULL); 242 243 read_lock_bh(&neigh->lock); 244 if ((neigh->nud_state & NUD_VALID) && !neigh->dead) 245 memcpy(dmac, neigh->ha, ETH_ALEN); 246 else 247 err = -ENOENT; 248 read_unlock_bh(&neigh->lock); 249 250 neigh_release(neigh); 251 return err; 252 } 253 254 static int 255 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) 256 { 257 sparmsp->dest_port = NULL; 258 return 0; 259 } 260 261 static struct net_device * 262 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, 263 unsigned char *dmac, 264 u16 *p_vid) 265 { 266 struct bridge_vlan_info vinfo; 267 struct net_device *edev; 268 u16 vid = *p_vid; 269 270 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) 271 return NULL; 272 if (!vid || 273 br_vlan_get_info(br_dev, vid, &vinfo) || 274 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) 275 return NULL; 276 277 edev = br_fdb_find_port(br_dev, dmac, vid); 278 if (!edev) 279 return NULL; 280 281 if (br_vlan_get_info(edev, vid, &vinfo)) 282 return NULL; 283 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) 284 *p_vid = 0; 285 else 286 *p_vid = vid; 287 return edev; 288 } 289 290 static struct net_device * 291 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, 292 unsigned char *dmac) 293 { 294 return br_fdb_find_port(br_dev, dmac, 0); 295 } 296 297 static struct net_device * 298 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, 299 unsigned char dmac[ETH_ALEN], 300 u16 *p_vid) 301 { 302 struct mlxsw_sp_bridge_port *bridge_port; 303 enum mlxsw_reg_spms_state spms_state; 304 struct net_device *dev = NULL; 305 struct mlxsw_sp_port *port; 306 u8 stp_state; 307 308 if (br_vlan_enabled(br_dev)) 309 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); 310 else if (!*p_vid) 311 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); 312 if (!dev) 313 return NULL; 314 315 port = mlxsw_sp_port_dev_lower_find(dev); 316 if (!port) 317 return NULL; 318 319 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); 320 if (!bridge_port) 321 return NULL; 322 323 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); 324 spms_state = mlxsw_sp_stp_spms_state(stp_state); 325 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) 326 return NULL; 327 328 return dev; 329 } 330 331 static struct net_device * 332 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, 333 u16 *p_vid) 334 { 335 *p_vid = vlan_dev_vlan_id(vlan_dev); 336 return vlan_dev_real_dev(vlan_dev); 337 } 338 339 static struct net_device * 340 mlxsw_sp_span_entry_lag(struct net_device *lag_dev) 341 { 342 struct net_device *dev; 343 struct list_head *iter; 344 345 netdev_for_each_lower_dev(lag_dev, dev, iter) 346 if (netif_carrier_ok(dev) && 347 net_lag_port_dev_txable(dev) && 348 mlxsw_sp_port_dev_check(dev)) 349 return dev; 350 351 return NULL; 352 } 353 354 static __maybe_unused int 355 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, 356 union mlxsw_sp_l3addr saddr, 357 union mlxsw_sp_l3addr daddr, 358 union mlxsw_sp_l3addr gw, 359 __u8 ttl, 360 struct neigh_table *tbl, 361 struct mlxsw_sp_span_parms *sparmsp) 362 { 363 unsigned char dmac[ETH_ALEN]; 364 u16 vid = 0; 365 366 if (mlxsw_sp_l3addr_is_zero(gw)) 367 gw = daddr; 368 369 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) 370 goto unoffloadable; 371 372 if (is_vlan_dev(edev)) 373 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 374 375 if (netif_is_bridge_master(edev)) { 376 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); 377 if (!edev) 378 goto unoffloadable; 379 } 380 381 if (is_vlan_dev(edev)) { 382 if (vid || !(edev->flags & IFF_UP)) 383 goto unoffloadable; 384 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 385 } 386 387 if (netif_is_lag_master(edev)) { 388 if (!(edev->flags & IFF_UP)) 389 goto unoffloadable; 390 edev = mlxsw_sp_span_entry_lag(edev); 391 if (!edev) 392 goto unoffloadable; 393 } 394 395 if (!mlxsw_sp_port_dev_check(edev)) 396 goto unoffloadable; 397 398 sparmsp->dest_port = netdev_priv(edev); 399 sparmsp->ttl = ttl; 400 memcpy(sparmsp->dmac, dmac, ETH_ALEN); 401 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); 402 sparmsp->saddr = saddr; 403 sparmsp->daddr = daddr; 404 sparmsp->vid = vid; 405 return 0; 406 407 unoffloadable: 408 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 409 } 410 411 #if IS_ENABLED(CONFIG_NET_IPGRE) 412 static struct net_device * 413 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, 414 __be32 *saddrp, __be32 *daddrp) 415 { 416 struct ip_tunnel *tun = netdev_priv(to_dev); 417 struct net_device *dev = NULL; 418 struct ip_tunnel_parm parms; 419 struct rtable *rt = NULL; 420 struct flowi4 fl4; 421 422 /* We assume "dev" stays valid after rt is put. */ 423 ASSERT_RTNL(); 424 425 parms = mlxsw_sp_ipip_netdev_parms4(to_dev); 426 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, 427 0, 0, parms.link, tun->fwmark, 0); 428 429 rt = ip_route_output_key(tun->net, &fl4); 430 if (IS_ERR(rt)) 431 return NULL; 432 433 if (rt->rt_type != RTN_UNICAST) 434 goto out; 435 436 dev = rt->dst.dev; 437 *saddrp = fl4.saddr; 438 if (rt->rt_gw_family == AF_INET) 439 *daddrp = rt->rt_gw4; 440 /* can not offload if route has an IPv6 gateway */ 441 else if (rt->rt_gw_family == AF_INET6) 442 dev = NULL; 443 444 out: 445 ip_rt_put(rt); 446 return dev; 447 } 448 449 static int 450 mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, 451 const struct net_device *to_dev, 452 struct mlxsw_sp_span_parms *sparmsp) 453 { 454 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 455 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; 456 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; 457 bool inherit_tos = tparm.iph.tos & 0x1; 458 bool inherit_ttl = !tparm.iph.ttl; 459 union mlxsw_sp_l3addr gw = daddr; 460 struct net_device *l3edev; 461 462 if (!(to_dev->flags & IFF_UP) || 463 /* Reject tunnels with GRE keys, checksums, etc. */ 464 tparm.i_flags || tparm.o_flags || 465 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 466 inherit_ttl || !inherit_tos || 467 /* A destination address may not be "any". */ 468 mlxsw_sp_l3addr_is_zero(daddr)) 469 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 470 471 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); 472 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 473 tparm.iph.ttl, 474 &arp_tbl, sparmsp); 475 } 476 477 static int 478 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, 479 struct mlxsw_sp_span_parms sparms) 480 { 481 struct mlxsw_sp_port *dest_port = sparms.dest_port; 482 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 483 u16 local_port = dest_port->local_port; 484 char mpat_pl[MLXSW_REG_MPAT_LEN]; 485 int pa_id = span_entry->id; 486 487 /* Create a new port analayzer entry for local_port. */ 488 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 489 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 490 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 491 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 492 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 493 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 494 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 495 sparms.dmac, !!sparms.vid); 496 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, 497 sparms.ttl, sparms.smac, 498 be32_to_cpu(sparms.saddr.addr4), 499 be32_to_cpu(sparms.daddr.addr4)); 500 501 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 502 } 503 504 static void 505 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) 506 { 507 mlxsw_sp_span_entry_deconfigure_common(span_entry, 508 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 509 } 510 511 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { 512 .can_handle = netif_is_gretap, 513 .parms_set = mlxsw_sp_span_entry_gretap4_parms, 514 .configure = mlxsw_sp_span_entry_gretap4_configure, 515 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, 516 }; 517 #endif 518 519 #if IS_ENABLED(CONFIG_IPV6_GRE) 520 static struct net_device * 521 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, 522 struct in6_addr *saddrp, 523 struct in6_addr *daddrp) 524 { 525 struct ip6_tnl *t = netdev_priv(to_dev); 526 struct flowi6 fl6 = t->fl.u.ip6; 527 struct net_device *dev = NULL; 528 struct dst_entry *dst; 529 struct rt6_info *rt6; 530 531 /* We assume "dev" stays valid after dst is released. */ 532 ASSERT_RTNL(); 533 534 fl6.flowi6_mark = t->parms.fwmark; 535 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) 536 return NULL; 537 538 dst = ip6_route_output(t->net, NULL, &fl6); 539 if (!dst || dst->error) 540 goto out; 541 542 rt6 = container_of(dst, struct rt6_info, dst); 543 544 dev = dst->dev; 545 *saddrp = fl6.saddr; 546 *daddrp = rt6->rt6i_gateway; 547 548 out: 549 dst_release(dst); 550 return dev; 551 } 552 553 static int 554 mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, 555 const struct net_device *to_dev, 556 struct mlxsw_sp_span_parms *sparmsp) 557 { 558 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); 559 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; 560 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; 561 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; 562 bool inherit_ttl = !tparm.hop_limit; 563 union mlxsw_sp_l3addr gw = daddr; 564 struct net_device *l3edev; 565 566 if (!(to_dev->flags & IFF_UP) || 567 /* Reject tunnels with GRE keys, checksums, etc. */ 568 tparm.i_flags || tparm.o_flags || 569 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 570 inherit_ttl || !inherit_tos || 571 /* A destination address may not be "any". */ 572 mlxsw_sp_l3addr_is_zero(daddr)) 573 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 574 575 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); 576 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 577 tparm.hop_limit, 578 &nd_tbl, sparmsp); 579 } 580 581 static int 582 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, 583 struct mlxsw_sp_span_parms sparms) 584 { 585 struct mlxsw_sp_port *dest_port = sparms.dest_port; 586 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 587 u16 local_port = dest_port->local_port; 588 char mpat_pl[MLXSW_REG_MPAT_LEN]; 589 int pa_id = span_entry->id; 590 591 /* Create a new port analayzer entry for local_port. */ 592 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 593 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 594 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 595 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 596 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 597 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 598 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 599 sparms.dmac, !!sparms.vid); 600 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, 601 sparms.saddr.addr6, 602 sparms.daddr.addr6); 603 604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 605 } 606 607 static void 608 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) 609 { 610 mlxsw_sp_span_entry_deconfigure_common(span_entry, 611 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 612 } 613 614 static const 615 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { 616 .can_handle = netif_is_ip6gretap, 617 .parms_set = mlxsw_sp_span_entry_gretap6_parms, 618 .configure = mlxsw_sp_span_entry_gretap6_configure, 619 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, 620 }; 621 #endif 622 623 static bool 624 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) 625 { 626 return is_vlan_dev(dev) && 627 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); 628 } 629 630 static int 631 mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp, 632 const struct net_device *to_dev, 633 struct mlxsw_sp_span_parms *sparmsp) 634 { 635 struct net_device *real_dev; 636 u16 vid; 637 638 if (!(to_dev->flags & IFF_UP)) 639 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 640 641 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); 642 sparmsp->dest_port = netdev_priv(real_dev); 643 sparmsp->vid = vid; 644 return 0; 645 } 646 647 static int 648 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, 649 struct mlxsw_sp_span_parms sparms) 650 { 651 struct mlxsw_sp_port *dest_port = sparms.dest_port; 652 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 653 u16 local_port = dest_port->local_port; 654 char mpat_pl[MLXSW_REG_MPAT_LEN]; 655 int pa_id = span_entry->id; 656 657 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 658 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 659 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 660 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 661 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 662 663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 664 } 665 666 static void 667 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) 668 { 669 mlxsw_sp_span_entry_deconfigure_common(span_entry, 670 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 671 } 672 673 static const 674 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { 675 .can_handle = mlxsw_sp_span_vlan_can_handle, 676 .parms_set = mlxsw_sp_span_entry_vlan_parms, 677 .configure = mlxsw_sp_span_entry_vlan_configure, 678 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, 679 }; 680 681 static const 682 struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { 683 &mlxsw_sp1_span_entry_ops_cpu, 684 &mlxsw_sp_span_entry_ops_phys, 685 #if IS_ENABLED(CONFIG_NET_IPGRE) 686 &mlxsw_sp_span_entry_ops_gretap4, 687 #endif 688 #if IS_ENABLED(CONFIG_IPV6_GRE) 689 &mlxsw_sp_span_entry_ops_gretap6, 690 #endif 691 &mlxsw_sp_span_entry_ops_vlan, 692 }; 693 694 static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev) 695 { 696 return !dev; 697 } 698 699 static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 700 const struct net_device *to_dev, 701 struct mlxsw_sp_span_parms *sparmsp) 702 { 703 sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 704 return 0; 705 } 706 707 static int 708 mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 709 struct mlxsw_sp_span_parms sparms) 710 { 711 /* Mirroring to the CPU port is like mirroring to any other physical 712 * port. Its local port is used instead of that of the physical port. 713 */ 714 return mlxsw_sp_span_entry_phys_configure(span_entry, sparms); 715 } 716 717 static void 718 mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 719 { 720 enum mlxsw_reg_mpat_span_type span_type; 721 722 span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH; 723 mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type); 724 } 725 726 static const 727 struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { 728 .is_static = true, 729 .can_handle = mlxsw_sp2_span_cpu_can_handle, 730 .parms_set = mlxsw_sp2_span_entry_cpu_parms, 731 .configure = mlxsw_sp2_span_entry_cpu_configure, 732 .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure, 733 }; 734 735 static const 736 struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { 737 &mlxsw_sp2_span_entry_ops_cpu, 738 &mlxsw_sp_span_entry_ops_phys, 739 #if IS_ENABLED(CONFIG_NET_IPGRE) 740 &mlxsw_sp_span_entry_ops_gretap4, 741 #endif 742 #if IS_ENABLED(CONFIG_IPV6_GRE) 743 &mlxsw_sp_span_entry_ops_gretap6, 744 #endif 745 &mlxsw_sp_span_entry_ops_vlan, 746 }; 747 748 static int 749 mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp, 750 const struct net_device *to_dev, 751 struct mlxsw_sp_span_parms *sparmsp) 752 { 753 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 754 } 755 756 static int 757 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, 758 struct mlxsw_sp_span_parms sparms) 759 { 760 return 0; 761 } 762 763 static void 764 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) 765 { 766 } 767 768 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { 769 .parms_set = mlxsw_sp_span_entry_nop_parms, 770 .configure = mlxsw_sp_span_entry_nop_configure, 771 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, 772 }; 773 774 static void 775 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, 776 struct mlxsw_sp_span_entry *span_entry, 777 struct mlxsw_sp_span_parms sparms) 778 { 779 int err; 780 781 if (!sparms.dest_port) 782 goto set_parms; 783 784 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { 785 dev_err(mlxsw_sp->bus_info->dev, 786 "Cannot mirror to a port which belongs to a different mlxsw instance\n"); 787 sparms.dest_port = NULL; 788 goto set_parms; 789 } 790 791 err = span_entry->ops->configure(span_entry, sparms); 792 if (err) { 793 dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n"); 794 sparms.dest_port = NULL; 795 goto set_parms; 796 } 797 798 set_parms: 799 span_entry->parms = sparms; 800 } 801 802 static void 803 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) 804 { 805 if (span_entry->parms.dest_port) 806 span_entry->ops->deconfigure(span_entry); 807 } 808 809 static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, 810 u16 policer_id) 811 { 812 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; 813 u16 policer_id_base; 814 int err; 815 816 /* Policers set on SPAN agents must be in the range of 817 * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the 818 * base is set and the new policer is not within the range, then we 819 * must error out. 820 */ 821 if (refcount_read(&span->policer_id_base_ref_count)) { 822 if (policer_id < span->policer_id_base || 823 policer_id >= span->policer_id_base + span->entries_count) 824 return -EINVAL; 825 826 refcount_inc(&span->policer_id_base_ref_count); 827 return 0; 828 } 829 830 /* Base must be even. */ 831 policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1; 832 err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp, 833 policer_id_base); 834 if (err) 835 return err; 836 837 span->policer_id_base = policer_id_base; 838 refcount_set(&span->policer_id_base_ref_count, 1); 839 840 return 0; 841 } 842 843 static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) 844 { 845 if (refcount_dec_and_test(&span->policer_id_base_ref_count)) 846 span->policer_id_base = 0; 847 } 848 849 static struct mlxsw_sp_span_entry * 850 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, 851 const struct net_device *to_dev, 852 const struct mlxsw_sp_span_entry_ops *ops, 853 struct mlxsw_sp_span_parms sparms) 854 { 855 struct mlxsw_sp_span_entry *span_entry = NULL; 856 int i; 857 858 /* find a free entry to use */ 859 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 860 if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) { 861 span_entry = &mlxsw_sp->span->entries[i]; 862 break; 863 } 864 } 865 if (!span_entry) 866 return NULL; 867 868 if (sparms.policer_enable) { 869 int err; 870 871 err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span, 872 sparms.policer_id); 873 if (err) 874 return NULL; 875 } 876 877 atomic_inc(&mlxsw_sp->span->active_entries_count); 878 span_entry->ops = ops; 879 refcount_set(&span_entry->ref_count, 1); 880 span_entry->to_dev = to_dev; 881 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); 882 883 return span_entry; 884 } 885 886 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 887 struct mlxsw_sp_span_entry *span_entry) 888 { 889 mlxsw_sp_span_entry_deconfigure(span_entry); 890 atomic_dec(&mlxsw_sp->span->active_entries_count); 891 if (span_entry->parms.policer_enable) 892 mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span); 893 } 894 895 struct mlxsw_sp_span_entry * 896 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, 897 const struct net_device *to_dev) 898 { 899 int i; 900 901 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 902 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 903 904 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev) 905 return curr; 906 } 907 return NULL; 908 } 909 910 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, 911 struct mlxsw_sp_span_entry *span_entry) 912 { 913 mlxsw_sp_span_entry_deconfigure(span_entry); 914 span_entry->ops = &mlxsw_sp_span_entry_ops_nop; 915 } 916 917 static struct mlxsw_sp_span_entry * 918 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) 919 { 920 int i; 921 922 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 923 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 924 925 if (refcount_read(&curr->ref_count) && curr->id == span_id) 926 return curr; 927 } 928 return NULL; 929 } 930 931 static struct mlxsw_sp_span_entry * 932 mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, 933 const struct net_device *to_dev, 934 const struct mlxsw_sp_span_parms *sparms) 935 { 936 int i; 937 938 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 939 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 940 941 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && 942 curr->parms.policer_enable == sparms->policer_enable && 943 curr->parms.policer_id == sparms->policer_id && 944 curr->parms.session_id == sparms->session_id) 945 return curr; 946 } 947 return NULL; 948 } 949 950 static struct mlxsw_sp_span_entry * 951 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, 952 const struct net_device *to_dev, 953 const struct mlxsw_sp_span_entry_ops *ops, 954 struct mlxsw_sp_span_parms sparms) 955 { 956 struct mlxsw_sp_span_entry *span_entry; 957 958 span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev, 959 &sparms); 960 if (span_entry) { 961 /* Already exists, just take a reference */ 962 refcount_inc(&span_entry->ref_count); 963 return span_entry; 964 } 965 966 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); 967 } 968 969 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 970 struct mlxsw_sp_span_entry *span_entry) 971 { 972 if (refcount_dec_and_test(&span_entry->ref_count)) 973 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 974 return 0; 975 } 976 977 static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) 978 { 979 struct mlxsw_sp_hdroom hdroom; 980 981 hdroom = *mlxsw_sp_port->hdroom; 982 hdroom.int_buf.enable = enable; 983 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); 984 985 return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); 986 } 987 988 static int 989 mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port) 990 { 991 return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true); 992 } 993 994 static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port) 995 { 996 mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false); 997 } 998 999 static struct mlxsw_sp_span_analyzed_port * 1000 mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port, 1001 bool ingress) 1002 { 1003 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1004 1005 list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) { 1006 if (analyzed_port->local_port == local_port && 1007 analyzed_port->ingress == ingress) 1008 return analyzed_port; 1009 } 1010 1011 return NULL; 1012 } 1013 1014 static const struct mlxsw_sp_span_entry_ops * 1015 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, 1016 const struct net_device *to_dev) 1017 { 1018 struct mlxsw_sp_span *span = mlxsw_sp->span; 1019 size_t i; 1020 1021 for (i = 0; i < span->span_entry_ops_arr_size; ++i) 1022 if (span->span_entry_ops_arr[i]->can_handle(to_dev)) 1023 return span->span_entry_ops_arr[i]; 1024 1025 return NULL; 1026 } 1027 1028 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1029 { 1030 struct mlxsw_sp_span *span; 1031 struct mlxsw_sp *mlxsw_sp; 1032 int i, err; 1033 1034 span = container_of(work, struct mlxsw_sp_span, work); 1035 mlxsw_sp = span->mlxsw_sp; 1036 1037 rtnl_lock(); 1038 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 1039 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 1040 struct mlxsw_sp_span_parms sparms = {NULL}; 1041 1042 if (!refcount_read(&curr->ref_count)) 1043 continue; 1044 1045 if (curr->ops->is_static) 1046 continue; 1047 1048 err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); 1049 if (err) 1050 continue; 1051 1052 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { 1053 mlxsw_sp_span_entry_deconfigure(curr); 1054 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); 1055 } 1056 } 1057 rtnl_unlock(); 1058 } 1059 1060 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) 1061 { 1062 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0) 1063 return; 1064 mlxsw_core_schedule_work(&mlxsw_sp->span->work); 1065 } 1066 1067 int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, 1068 const struct mlxsw_sp_span_agent_parms *parms) 1069 { 1070 const struct net_device *to_dev = parms->to_dev; 1071 const struct mlxsw_sp_span_entry_ops *ops; 1072 struct mlxsw_sp_span_entry *span_entry; 1073 struct mlxsw_sp_span_parms sparms; 1074 int err; 1075 1076 ASSERT_RTNL(); 1077 1078 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); 1079 if (!ops) { 1080 dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n"); 1081 return -EOPNOTSUPP; 1082 } 1083 1084 memset(&sparms, 0, sizeof(sparms)); 1085 err = ops->parms_set(mlxsw_sp, to_dev, &sparms); 1086 if (err) 1087 return err; 1088 1089 sparms.policer_id = parms->policer_id; 1090 sparms.policer_enable = parms->policer_enable; 1091 sparms.session_id = parms->session_id; 1092 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); 1093 if (!span_entry) 1094 return -ENOBUFS; 1095 1096 *p_span_id = span_entry->id; 1097 1098 return 0; 1099 } 1100 1101 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id) 1102 { 1103 struct mlxsw_sp_span_entry *span_entry; 1104 1105 ASSERT_RTNL(); 1106 1107 span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id); 1108 if (WARN_ON_ONCE(!span_entry)) 1109 return; 1110 1111 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 1112 } 1113 1114 static struct mlxsw_sp_span_analyzed_port * 1115 mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span, 1116 struct mlxsw_sp_port *mlxsw_sp_port, 1117 bool ingress) 1118 { 1119 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1120 int err; 1121 1122 analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL); 1123 if (!analyzed_port) 1124 return ERR_PTR(-ENOMEM); 1125 1126 refcount_set(&analyzed_port->ref_count, 1); 1127 analyzed_port->local_port = mlxsw_sp_port->local_port; 1128 analyzed_port->ingress = ingress; 1129 list_add_tail(&analyzed_port->list, &span->analyzed_ports_list); 1130 1131 /* An egress mirror buffer should be allocated on the egress port which 1132 * does the mirroring. 1133 */ 1134 if (!ingress) { 1135 err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port); 1136 if (err) 1137 goto err_buffer_update; 1138 } 1139 1140 return analyzed_port; 1141 1142 err_buffer_update: 1143 list_del(&analyzed_port->list); 1144 kfree(analyzed_port); 1145 return ERR_PTR(err); 1146 } 1147 1148 static void 1149 mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 1150 struct mlxsw_sp_span_analyzed_port * 1151 analyzed_port) 1152 { 1153 /* Remove egress mirror buffer now that port is no longer analyzed 1154 * at egress. 1155 */ 1156 if (!analyzed_port->ingress) 1157 mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port); 1158 1159 list_del(&analyzed_port->list); 1160 kfree(analyzed_port); 1161 } 1162 1163 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, 1164 bool ingress) 1165 { 1166 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1167 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1168 u16 local_port = mlxsw_sp_port->local_port; 1169 int err = 0; 1170 1171 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1172 1173 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1174 local_port, ingress); 1175 if (analyzed_port) { 1176 refcount_inc(&analyzed_port->ref_count); 1177 goto out_unlock; 1178 } 1179 1180 analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span, 1181 mlxsw_sp_port, 1182 ingress); 1183 if (IS_ERR(analyzed_port)) 1184 err = PTR_ERR(analyzed_port); 1185 1186 out_unlock: 1187 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1188 return err; 1189 } 1190 1191 void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, 1192 bool ingress) 1193 { 1194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1195 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1196 u16 local_port = mlxsw_sp_port->local_port; 1197 1198 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1199 1200 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1201 local_port, ingress); 1202 if (WARN_ON_ONCE(!analyzed_port)) 1203 goto out_unlock; 1204 1205 if (!refcount_dec_and_test(&analyzed_port->ref_count)) 1206 goto out_unlock; 1207 1208 mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port); 1209 1210 out_unlock: 1211 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1212 } 1213 1214 static int 1215 __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, 1216 struct mlxsw_sp_span_trigger_entry * 1217 trigger_entry, bool enable) 1218 { 1219 char mpar_pl[MLXSW_REG_MPAR_LEN]; 1220 enum mlxsw_reg_mpar_i_e i_e; 1221 1222 switch (trigger_entry->trigger) { 1223 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1224 i_e = MLXSW_REG_MPAR_TYPE_INGRESS; 1225 break; 1226 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1227 i_e = MLXSW_REG_MPAR_TYPE_EGRESS; 1228 break; 1229 default: 1230 WARN_ON_ONCE(1); 1231 return -EINVAL; 1232 } 1233 1234 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX) 1235 return -EINVAL; 1236 1237 mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable, 1238 trigger_entry->parms.span_id, 1239 trigger_entry->parms.probability_rate); 1240 return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 1241 } 1242 1243 static int 1244 mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry * 1245 trigger_entry) 1246 { 1247 return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, 1248 trigger_entry, true); 1249 } 1250 1251 static void 1252 mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry * 1253 trigger_entry) 1254 { 1255 __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry, 1256 false); 1257 } 1258 1259 static bool 1260 mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry * 1261 trigger_entry, 1262 enum mlxsw_sp_span_trigger trigger, 1263 struct mlxsw_sp_port *mlxsw_sp_port) 1264 { 1265 return trigger_entry->trigger == trigger && 1266 trigger_entry->local_port == mlxsw_sp_port->local_port; 1267 } 1268 1269 static int 1270 mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry * 1271 trigger_entry, 1272 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1273 { 1274 /* Port trigger are enabled during binding. */ 1275 return 0; 1276 } 1277 1278 static void 1279 mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry * 1280 trigger_entry, 1281 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1282 { 1283 } 1284 1285 static const struct mlxsw_sp_span_trigger_ops 1286 mlxsw_sp_span_trigger_port_ops = { 1287 .bind = mlxsw_sp_span_trigger_port_bind, 1288 .unbind = mlxsw_sp_span_trigger_port_unbind, 1289 .matches = mlxsw_sp_span_trigger_port_matches, 1290 .enable = mlxsw_sp_span_trigger_port_enable, 1291 .disable = mlxsw_sp_span_trigger_port_disable, 1292 }; 1293 1294 static int 1295 mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1296 trigger_entry) 1297 { 1298 return -EOPNOTSUPP; 1299 } 1300 1301 static void 1302 mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1303 trigger_entry) 1304 { 1305 } 1306 1307 static bool 1308 mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1309 trigger_entry, 1310 enum mlxsw_sp_span_trigger trigger, 1311 struct mlxsw_sp_port *mlxsw_sp_port) 1312 { 1313 WARN_ON_ONCE(1); 1314 return false; 1315 } 1316 1317 static int 1318 mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1319 trigger_entry, 1320 struct mlxsw_sp_port *mlxsw_sp_port, 1321 u8 tc) 1322 { 1323 return -EOPNOTSUPP; 1324 } 1325 1326 static void 1327 mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1328 trigger_entry, 1329 struct mlxsw_sp_port *mlxsw_sp_port, 1330 u8 tc) 1331 { 1332 } 1333 1334 static const struct mlxsw_sp_span_trigger_ops 1335 mlxsw_sp1_span_trigger_global_ops = { 1336 .bind = mlxsw_sp1_span_trigger_global_bind, 1337 .unbind = mlxsw_sp1_span_trigger_global_unbind, 1338 .matches = mlxsw_sp1_span_trigger_global_matches, 1339 .enable = mlxsw_sp1_span_trigger_global_enable, 1340 .disable = mlxsw_sp1_span_trigger_global_disable, 1341 }; 1342 1343 static const struct mlxsw_sp_span_trigger_ops * 1344 mlxsw_sp1_span_trigger_ops_arr[] = { 1345 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1346 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1347 &mlxsw_sp1_span_trigger_global_ops, 1348 }; 1349 1350 static int 1351 mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1352 trigger_entry) 1353 { 1354 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1355 enum mlxsw_reg_mpagr_trigger trigger; 1356 char mpagr_pl[MLXSW_REG_MPAGR_LEN]; 1357 1358 switch (trigger_entry->trigger) { 1359 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1360 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER; 1361 break; 1362 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1363 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED; 1364 break; 1365 case MLXSW_SP_SPAN_TRIGGER_ECN: 1366 trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN; 1367 break; 1368 default: 1369 WARN_ON_ONCE(1); 1370 return -EINVAL; 1371 } 1372 1373 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX) 1374 return -EINVAL; 1375 1376 mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, 1377 trigger_entry->parms.probability_rate); 1378 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); 1379 } 1380 1381 static void 1382 mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1383 trigger_entry) 1384 { 1385 /* There is no unbinding for global triggers. The trigger should be 1386 * disabled on all ports by now. 1387 */ 1388 } 1389 1390 static bool 1391 mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1392 trigger_entry, 1393 enum mlxsw_sp_span_trigger trigger, 1394 struct mlxsw_sp_port *mlxsw_sp_port) 1395 { 1396 return trigger_entry->trigger == trigger; 1397 } 1398 1399 static int 1400 __mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1401 trigger_entry, 1402 struct mlxsw_sp_port *mlxsw_sp_port, 1403 u8 tc, bool enable) 1404 { 1405 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1406 char momte_pl[MLXSW_REG_MOMTE_LEN]; 1407 enum mlxsw_reg_momte_type type; 1408 int err; 1409 1410 switch (trigger_entry->trigger) { 1411 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1412 type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS; 1413 break; 1414 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1415 type = MLXSW_REG_MOMTE_TYPE_WRED; 1416 break; 1417 case MLXSW_SP_SPAN_TRIGGER_ECN: 1418 type = MLXSW_REG_MOMTE_TYPE_ECN; 1419 break; 1420 default: 1421 WARN_ON_ONCE(1); 1422 return -EINVAL; 1423 } 1424 1425 /* Query existing configuration in order to only change the state of 1426 * the specified traffic class. 1427 */ 1428 mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type); 1429 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1430 if (err) 1431 return err; 1432 1433 mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable); 1434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1435 } 1436 1437 static int 1438 mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1439 trigger_entry, 1440 struct mlxsw_sp_port *mlxsw_sp_port, 1441 u8 tc) 1442 { 1443 return __mlxsw_sp2_span_trigger_global_enable(trigger_entry, 1444 mlxsw_sp_port, tc, true); 1445 } 1446 1447 static void 1448 mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1449 trigger_entry, 1450 struct mlxsw_sp_port *mlxsw_sp_port, 1451 u8 tc) 1452 { 1453 __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc, 1454 false); 1455 } 1456 1457 static const struct mlxsw_sp_span_trigger_ops 1458 mlxsw_sp2_span_trigger_global_ops = { 1459 .bind = mlxsw_sp2_span_trigger_global_bind, 1460 .unbind = mlxsw_sp2_span_trigger_global_unbind, 1461 .matches = mlxsw_sp2_span_trigger_global_matches, 1462 .enable = mlxsw_sp2_span_trigger_global_enable, 1463 .disable = mlxsw_sp2_span_trigger_global_disable, 1464 }; 1465 1466 static const struct mlxsw_sp_span_trigger_ops * 1467 mlxsw_sp2_span_trigger_ops_arr[] = { 1468 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1469 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1470 &mlxsw_sp2_span_trigger_global_ops, 1471 }; 1472 1473 static void 1474 mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) 1475 { 1476 struct mlxsw_sp_span *span = trigger_entry->span; 1477 enum mlxsw_sp_span_trigger_type type; 1478 1479 switch (trigger_entry->trigger) { 1480 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1481 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1482 type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; 1483 break; 1484 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1485 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1486 case MLXSW_SP_SPAN_TRIGGER_ECN: 1487 type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; 1488 break; 1489 default: 1490 WARN_ON_ONCE(1); 1491 return; 1492 } 1493 1494 trigger_entry->ops = span->span_trigger_ops_arr[type]; 1495 } 1496 1497 static struct mlxsw_sp_span_trigger_entry * 1498 mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span, 1499 enum mlxsw_sp_span_trigger trigger, 1500 struct mlxsw_sp_port *mlxsw_sp_port, 1501 const struct mlxsw_sp_span_trigger_parms 1502 *parms) 1503 { 1504 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1505 int err; 1506 1507 trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL); 1508 if (!trigger_entry) 1509 return ERR_PTR(-ENOMEM); 1510 1511 refcount_set(&trigger_entry->ref_count, 1); 1512 trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port : 1513 0; 1514 trigger_entry->trigger = trigger; 1515 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms)); 1516 trigger_entry->span = span; 1517 mlxsw_sp_span_trigger_ops_set(trigger_entry); 1518 list_add_tail(&trigger_entry->list, &span->trigger_entries_list); 1519 1520 err = trigger_entry->ops->bind(trigger_entry); 1521 if (err) 1522 goto err_trigger_entry_bind; 1523 1524 return trigger_entry; 1525 1526 err_trigger_entry_bind: 1527 list_del(&trigger_entry->list); 1528 kfree(trigger_entry); 1529 return ERR_PTR(err); 1530 } 1531 1532 static void 1533 mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span, 1534 struct mlxsw_sp_span_trigger_entry * 1535 trigger_entry) 1536 { 1537 trigger_entry->ops->unbind(trigger_entry); 1538 list_del(&trigger_entry->list); 1539 kfree(trigger_entry); 1540 } 1541 1542 static struct mlxsw_sp_span_trigger_entry * 1543 mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span, 1544 enum mlxsw_sp_span_trigger trigger, 1545 struct mlxsw_sp_port *mlxsw_sp_port) 1546 { 1547 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1548 1549 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) { 1550 if (trigger_entry->ops->matches(trigger_entry, trigger, 1551 mlxsw_sp_port)) 1552 return trigger_entry; 1553 } 1554 1555 return NULL; 1556 } 1557 1558 int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp, 1559 enum mlxsw_sp_span_trigger trigger, 1560 struct mlxsw_sp_port *mlxsw_sp_port, 1561 const struct mlxsw_sp_span_trigger_parms *parms) 1562 { 1563 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1564 int err = 0; 1565 1566 ASSERT_RTNL(); 1567 1568 if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id)) 1569 return -EINVAL; 1570 1571 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1572 trigger, 1573 mlxsw_sp_port); 1574 if (trigger_entry) { 1575 if (trigger_entry->parms.span_id != parms->span_id || 1576 trigger_entry->parms.probability_rate != 1577 parms->probability_rate) 1578 return -EINVAL; 1579 refcount_inc(&trigger_entry->ref_count); 1580 goto out; 1581 } 1582 1583 trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span, 1584 trigger, 1585 mlxsw_sp_port, 1586 parms); 1587 if (IS_ERR(trigger_entry)) 1588 err = PTR_ERR(trigger_entry); 1589 1590 out: 1591 return err; 1592 } 1593 1594 void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, 1595 enum mlxsw_sp_span_trigger trigger, 1596 struct mlxsw_sp_port *mlxsw_sp_port, 1597 const struct mlxsw_sp_span_trigger_parms *parms) 1598 { 1599 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1600 1601 ASSERT_RTNL(); 1602 1603 if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, 1604 parms->span_id))) 1605 return; 1606 1607 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1608 trigger, 1609 mlxsw_sp_port); 1610 if (WARN_ON_ONCE(!trigger_entry)) 1611 return; 1612 1613 if (!refcount_dec_and_test(&trigger_entry->ref_count)) 1614 return; 1615 1616 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); 1617 } 1618 1619 int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, 1620 enum mlxsw_sp_span_trigger trigger, u8 tc) 1621 { 1622 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1623 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1624 1625 ASSERT_RTNL(); 1626 1627 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1628 trigger, 1629 mlxsw_sp_port); 1630 if (WARN_ON_ONCE(!trigger_entry)) 1631 return -EINVAL; 1632 1633 return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc); 1634 } 1635 1636 void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, 1637 enum mlxsw_sp_span_trigger trigger, u8 tc) 1638 { 1639 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1640 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1641 1642 ASSERT_RTNL(); 1643 1644 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1645 trigger, 1646 mlxsw_sp_port); 1647 if (WARN_ON_ONCE(!trigger_entry)) 1648 return; 1649 1650 return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); 1651 } 1652 1653 bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger) 1654 { 1655 switch (trigger) { 1656 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1657 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1658 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1659 return true; 1660 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1661 case MLXSW_SP_SPAN_TRIGGER_ECN: 1662 return false; 1663 } 1664 1665 WARN_ON_ONCE(1); 1666 return false; 1667 } 1668 1669 static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) 1670 { 1671 size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); 1672 1673 /* Must be first to avoid NULL pointer dereference by subsequent 1674 * can_handle() callbacks. 1675 */ 1676 if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] != 1677 &mlxsw_sp1_span_entry_ops_cpu)) 1678 return -EINVAL; 1679 1680 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; 1681 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; 1682 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1683 1684 return 0; 1685 } 1686 1687 static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1688 u16 policer_id_base) 1689 { 1690 return -EOPNOTSUPP; 1691 } 1692 1693 const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 1694 .init = mlxsw_sp1_span_init, 1695 .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, 1696 }; 1697 1698 static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) 1699 { 1700 size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); 1701 1702 /* Must be first to avoid NULL pointer dereference by subsequent 1703 * can_handle() callbacks. 1704 */ 1705 if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] != 1706 &mlxsw_sp2_span_entry_ops_cpu)) 1707 return -EINVAL; 1708 1709 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; 1710 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; 1711 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1712 1713 return 0; 1714 } 1715 1716 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 1717 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 1718 1719 static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1720 u16 policer_id_base) 1721 { 1722 char mogcr_pl[MLXSW_REG_MOGCR_LEN]; 1723 int err; 1724 1725 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1726 if (err) 1727 return err; 1728 1729 mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base); 1730 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1731 } 1732 1733 const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 1734 .init = mlxsw_sp2_span_init, 1735 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1736 }; 1737 1738 const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 1739 .init = mlxsw_sp2_span_init, 1740 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1741 }; 1742