1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/if_bridge.h> 5 #include <linux/list.h> 6 #include <linux/mutex.h> 7 #include <linux/refcount.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/workqueue.h> 10 #include <net/arp.h> 11 #include <net/gre.h> 12 #include <net/lag.h> 13 #include <net/ndisc.h> 14 #include <net/ip6_tunnel.h> 15 16 #include "spectrum.h" 17 #include "spectrum_ipip.h" 18 #include "spectrum_span.h" 19 #include "spectrum_switchdev.h" 20 21 struct mlxsw_sp_span { 22 struct work_struct work; 23 struct mlxsw_sp *mlxsw_sp; 24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; 25 const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr; 26 size_t span_entry_ops_arr_size; 27 struct list_head analyzed_ports_list; 28 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ 29 struct list_head trigger_entries_list; 30 u16 policer_id_base; 31 refcount_t policer_id_base_ref_count; 32 atomic_t active_entries_count; 33 int entries_count; 34 struct mlxsw_sp_span_entry entries[]; 35 }; 36 37 struct mlxsw_sp_span_analyzed_port { 38 struct list_head list; /* Member of analyzed_ports_list */ 39 refcount_t ref_count; 40 u8 local_port; 41 bool ingress; 42 }; 43 44 struct mlxsw_sp_span_trigger_entry { 45 struct list_head list; /* Member of trigger_entries_list */ 46 struct mlxsw_sp_span *span; 47 const struct mlxsw_sp_span_trigger_ops *ops; 48 refcount_t ref_count; 49 u8 local_port; 50 enum mlxsw_sp_span_trigger trigger; 51 struct mlxsw_sp_span_trigger_parms parms; 52 }; 53 54 enum mlxsw_sp_span_trigger_type { 55 MLXSW_SP_SPAN_TRIGGER_TYPE_PORT, 56 MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL, 57 }; 58 59 struct mlxsw_sp_span_trigger_ops { 60 int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 61 void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); 62 bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 63 enum mlxsw_sp_span_trigger trigger, 64 struct mlxsw_sp_port *mlxsw_sp_port); 65 int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 66 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 67 void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, 68 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); 69 }; 70 71 static void mlxsw_sp_span_respin_work(struct work_struct *work); 72 73 static u64 mlxsw_sp_span_occ_get(void *priv) 74 { 75 const struct mlxsw_sp *mlxsw_sp = priv; 76 77 return atomic_read(&mlxsw_sp->span->active_entries_count); 78 } 79 80 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 81 { 82 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 83 struct mlxsw_sp_span *span; 84 int i, entries_count, err; 85 86 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) 87 return -EIO; 88 89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN); 90 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); 91 if (!span) 92 return -ENOMEM; 93 refcount_set(&span->policer_id_base_ref_count, 0); 94 span->entries_count = entries_count; 95 atomic_set(&span->active_entries_count, 0); 96 mutex_init(&span->analyzed_ports_lock); 97 INIT_LIST_HEAD(&span->analyzed_ports_list); 98 INIT_LIST_HEAD(&span->trigger_entries_list); 99 span->mlxsw_sp = mlxsw_sp; 100 mlxsw_sp->span = span; 101 102 for (i = 0; i < mlxsw_sp->span->entries_count; i++) 103 mlxsw_sp->span->entries[i].id = i; 104 105 err = mlxsw_sp->span_ops->init(mlxsw_sp); 106 if (err) 107 goto err_init; 108 109 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN, 110 mlxsw_sp_span_occ_get, mlxsw_sp); 111 INIT_WORK(&span->work, mlxsw_sp_span_respin_work); 112 113 return 0; 114 115 err_init: 116 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 117 kfree(mlxsw_sp->span); 118 return err; 119 } 120 121 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) 122 { 123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 124 125 cancel_work_sync(&mlxsw_sp->span->work); 126 devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN); 127 128 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list)); 129 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list)); 130 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); 131 kfree(mlxsw_sp->span); 132 } 133 134 static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev) 135 { 136 return !dev; 137 } 138 139 static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 140 const struct net_device *to_dev, 141 struct mlxsw_sp_span_parms *sparmsp) 142 { 143 return -EOPNOTSUPP; 144 } 145 146 static int 147 mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 148 struct mlxsw_sp_span_parms sparms) 149 { 150 return -EOPNOTSUPP; 151 } 152 153 static void 154 mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 155 { 156 } 157 158 static const 159 struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { 160 .can_handle = mlxsw_sp1_span_cpu_can_handle, 161 .parms_set = mlxsw_sp1_span_entry_cpu_parms, 162 .configure = mlxsw_sp1_span_entry_cpu_configure, 163 .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure, 164 }; 165 166 static int 167 mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, 168 const struct net_device *to_dev, 169 struct mlxsw_sp_span_parms *sparmsp) 170 { 171 sparmsp->dest_port = netdev_priv(to_dev); 172 return 0; 173 } 174 175 static int 176 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, 177 struct mlxsw_sp_span_parms sparms) 178 { 179 struct mlxsw_sp_port *dest_port = sparms.dest_port; 180 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 181 u8 local_port = dest_port->local_port; 182 char mpat_pl[MLXSW_REG_MPAT_LEN]; 183 int pa_id = span_entry->id; 184 185 /* Create a new port analayzer entry for local_port. */ 186 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 187 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 188 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 189 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 190 191 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 192 } 193 194 static void 195 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, 196 enum mlxsw_reg_mpat_span_type span_type) 197 { 198 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; 199 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 200 u8 local_port = dest_port->local_port; 201 char mpat_pl[MLXSW_REG_MPAT_LEN]; 202 int pa_id = span_entry->id; 203 204 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); 205 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 206 } 207 208 static void 209 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) 210 { 211 mlxsw_sp_span_entry_deconfigure_common(span_entry, 212 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); 213 } 214 215 static const 216 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { 217 .can_handle = mlxsw_sp_port_dev_check, 218 .parms_set = mlxsw_sp_span_entry_phys_parms, 219 .configure = mlxsw_sp_span_entry_phys_configure, 220 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, 221 }; 222 223 static int mlxsw_sp_span_dmac(struct neigh_table *tbl, 224 const void *pkey, 225 struct net_device *dev, 226 unsigned char dmac[ETH_ALEN]) 227 { 228 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); 229 int err = 0; 230 231 if (!neigh) { 232 neigh = neigh_create(tbl, pkey, dev); 233 if (IS_ERR(neigh)) 234 return PTR_ERR(neigh); 235 } 236 237 neigh_event_send(neigh, NULL); 238 239 read_lock_bh(&neigh->lock); 240 if ((neigh->nud_state & NUD_VALID) && !neigh->dead) 241 memcpy(dmac, neigh->ha, ETH_ALEN); 242 else 243 err = -ENOENT; 244 read_unlock_bh(&neigh->lock); 245 246 neigh_release(neigh); 247 return err; 248 } 249 250 static int 251 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) 252 { 253 sparmsp->dest_port = NULL; 254 return 0; 255 } 256 257 static struct net_device * 258 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, 259 unsigned char *dmac, 260 u16 *p_vid) 261 { 262 struct bridge_vlan_info vinfo; 263 struct net_device *edev; 264 u16 vid = *p_vid; 265 266 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) 267 return NULL; 268 if (!vid || 269 br_vlan_get_info(br_dev, vid, &vinfo) || 270 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) 271 return NULL; 272 273 edev = br_fdb_find_port(br_dev, dmac, vid); 274 if (!edev) 275 return NULL; 276 277 if (br_vlan_get_info(edev, vid, &vinfo)) 278 return NULL; 279 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) 280 *p_vid = 0; 281 else 282 *p_vid = vid; 283 return edev; 284 } 285 286 static struct net_device * 287 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, 288 unsigned char *dmac) 289 { 290 return br_fdb_find_port(br_dev, dmac, 0); 291 } 292 293 static struct net_device * 294 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, 295 unsigned char dmac[ETH_ALEN], 296 u16 *p_vid) 297 { 298 struct mlxsw_sp_bridge_port *bridge_port; 299 enum mlxsw_reg_spms_state spms_state; 300 struct net_device *dev = NULL; 301 struct mlxsw_sp_port *port; 302 u8 stp_state; 303 304 if (br_vlan_enabled(br_dev)) 305 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); 306 else if (!*p_vid) 307 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); 308 if (!dev) 309 return NULL; 310 311 port = mlxsw_sp_port_dev_lower_find(dev); 312 if (!port) 313 return NULL; 314 315 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); 316 if (!bridge_port) 317 return NULL; 318 319 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); 320 spms_state = mlxsw_sp_stp_spms_state(stp_state); 321 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) 322 return NULL; 323 324 return dev; 325 } 326 327 static struct net_device * 328 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, 329 u16 *p_vid) 330 { 331 *p_vid = vlan_dev_vlan_id(vlan_dev); 332 return vlan_dev_real_dev(vlan_dev); 333 } 334 335 static struct net_device * 336 mlxsw_sp_span_entry_lag(struct net_device *lag_dev) 337 { 338 struct net_device *dev; 339 struct list_head *iter; 340 341 netdev_for_each_lower_dev(lag_dev, dev, iter) 342 if (netif_carrier_ok(dev) && 343 net_lag_port_dev_txable(dev) && 344 mlxsw_sp_port_dev_check(dev)) 345 return dev; 346 347 return NULL; 348 } 349 350 static __maybe_unused int 351 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, 352 union mlxsw_sp_l3addr saddr, 353 union mlxsw_sp_l3addr daddr, 354 union mlxsw_sp_l3addr gw, 355 __u8 ttl, 356 struct neigh_table *tbl, 357 struct mlxsw_sp_span_parms *sparmsp) 358 { 359 unsigned char dmac[ETH_ALEN]; 360 u16 vid = 0; 361 362 if (mlxsw_sp_l3addr_is_zero(gw)) 363 gw = daddr; 364 365 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) 366 goto unoffloadable; 367 368 if (is_vlan_dev(edev)) 369 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 370 371 if (netif_is_bridge_master(edev)) { 372 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); 373 if (!edev) 374 goto unoffloadable; 375 } 376 377 if (is_vlan_dev(edev)) { 378 if (vid || !(edev->flags & IFF_UP)) 379 goto unoffloadable; 380 edev = mlxsw_sp_span_entry_vlan(edev, &vid); 381 } 382 383 if (netif_is_lag_master(edev)) { 384 if (!(edev->flags & IFF_UP)) 385 goto unoffloadable; 386 edev = mlxsw_sp_span_entry_lag(edev); 387 if (!edev) 388 goto unoffloadable; 389 } 390 391 if (!mlxsw_sp_port_dev_check(edev)) 392 goto unoffloadable; 393 394 sparmsp->dest_port = netdev_priv(edev); 395 sparmsp->ttl = ttl; 396 memcpy(sparmsp->dmac, dmac, ETH_ALEN); 397 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); 398 sparmsp->saddr = saddr; 399 sparmsp->daddr = daddr; 400 sparmsp->vid = vid; 401 return 0; 402 403 unoffloadable: 404 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 405 } 406 407 #if IS_ENABLED(CONFIG_NET_IPGRE) 408 static struct net_device * 409 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, 410 __be32 *saddrp, __be32 *daddrp) 411 { 412 struct ip_tunnel *tun = netdev_priv(to_dev); 413 struct net_device *dev = NULL; 414 struct ip_tunnel_parm parms; 415 struct rtable *rt = NULL; 416 struct flowi4 fl4; 417 418 /* We assume "dev" stays valid after rt is put. */ 419 ASSERT_RTNL(); 420 421 parms = mlxsw_sp_ipip_netdev_parms4(to_dev); 422 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, 423 0, 0, parms.link, tun->fwmark, 0); 424 425 rt = ip_route_output_key(tun->net, &fl4); 426 if (IS_ERR(rt)) 427 return NULL; 428 429 if (rt->rt_type != RTN_UNICAST) 430 goto out; 431 432 dev = rt->dst.dev; 433 *saddrp = fl4.saddr; 434 if (rt->rt_gw_family == AF_INET) 435 *daddrp = rt->rt_gw4; 436 /* can not offload if route has an IPv6 gateway */ 437 else if (rt->rt_gw_family == AF_INET6) 438 dev = NULL; 439 440 out: 441 ip_rt_put(rt); 442 return dev; 443 } 444 445 static int 446 mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, 447 const struct net_device *to_dev, 448 struct mlxsw_sp_span_parms *sparmsp) 449 { 450 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); 451 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; 452 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; 453 bool inherit_tos = tparm.iph.tos & 0x1; 454 bool inherit_ttl = !tparm.iph.ttl; 455 union mlxsw_sp_l3addr gw = daddr; 456 struct net_device *l3edev; 457 458 if (!(to_dev->flags & IFF_UP) || 459 /* Reject tunnels with GRE keys, checksums, etc. */ 460 tparm.i_flags || tparm.o_flags || 461 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 462 inherit_ttl || !inherit_tos || 463 /* A destination address may not be "any". */ 464 mlxsw_sp_l3addr_is_zero(daddr)) 465 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 466 467 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); 468 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 469 tparm.iph.ttl, 470 &arp_tbl, sparmsp); 471 } 472 473 static int 474 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, 475 struct mlxsw_sp_span_parms sparms) 476 { 477 struct mlxsw_sp_port *dest_port = sparms.dest_port; 478 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 479 u8 local_port = dest_port->local_port; 480 char mpat_pl[MLXSW_REG_MPAT_LEN]; 481 int pa_id = span_entry->id; 482 483 /* Create a new port analayzer entry for local_port. */ 484 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 485 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 486 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 487 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 488 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 489 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 490 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 491 sparms.dmac, !!sparms.vid); 492 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, 493 sparms.ttl, sparms.smac, 494 be32_to_cpu(sparms.saddr.addr4), 495 be32_to_cpu(sparms.daddr.addr4)); 496 497 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 498 } 499 500 static void 501 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) 502 { 503 mlxsw_sp_span_entry_deconfigure_common(span_entry, 504 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 505 } 506 507 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { 508 .can_handle = netif_is_gretap, 509 .parms_set = mlxsw_sp_span_entry_gretap4_parms, 510 .configure = mlxsw_sp_span_entry_gretap4_configure, 511 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, 512 }; 513 #endif 514 515 #if IS_ENABLED(CONFIG_IPV6_GRE) 516 static struct net_device * 517 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, 518 struct in6_addr *saddrp, 519 struct in6_addr *daddrp) 520 { 521 struct ip6_tnl *t = netdev_priv(to_dev); 522 struct flowi6 fl6 = t->fl.u.ip6; 523 struct net_device *dev = NULL; 524 struct dst_entry *dst; 525 struct rt6_info *rt6; 526 527 /* We assume "dev" stays valid after dst is released. */ 528 ASSERT_RTNL(); 529 530 fl6.flowi6_mark = t->parms.fwmark; 531 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) 532 return NULL; 533 534 dst = ip6_route_output(t->net, NULL, &fl6); 535 if (!dst || dst->error) 536 goto out; 537 538 rt6 = container_of(dst, struct rt6_info, dst); 539 540 dev = dst->dev; 541 *saddrp = fl6.saddr; 542 *daddrp = rt6->rt6i_gateway; 543 544 out: 545 dst_release(dst); 546 return dev; 547 } 548 549 static int 550 mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, 551 const struct net_device *to_dev, 552 struct mlxsw_sp_span_parms *sparmsp) 553 { 554 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); 555 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; 556 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; 557 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; 558 bool inherit_ttl = !tparm.hop_limit; 559 union mlxsw_sp_l3addr gw = daddr; 560 struct net_device *l3edev; 561 562 if (!(to_dev->flags & IFF_UP) || 563 /* Reject tunnels with GRE keys, checksums, etc. */ 564 tparm.i_flags || tparm.o_flags || 565 /* Require a fixed TTL and a TOS copied from the mirrored packet. */ 566 inherit_ttl || !inherit_tos || 567 /* A destination address may not be "any". */ 568 mlxsw_sp_l3addr_is_zero(daddr)) 569 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 570 571 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); 572 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, 573 tparm.hop_limit, 574 &nd_tbl, sparmsp); 575 } 576 577 static int 578 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, 579 struct mlxsw_sp_span_parms sparms) 580 { 581 struct mlxsw_sp_port *dest_port = sparms.dest_port; 582 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 583 u8 local_port = dest_port->local_port; 584 char mpat_pl[MLXSW_REG_MPAT_LEN]; 585 int pa_id = span_entry->id; 586 587 /* Create a new port analayzer entry for local_port. */ 588 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 589 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 590 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 591 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 592 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 593 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 594 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 595 sparms.dmac, !!sparms.vid); 596 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, 597 sparms.saddr.addr6, 598 sparms.daddr.addr6); 599 600 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 601 } 602 603 static void 604 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) 605 { 606 mlxsw_sp_span_entry_deconfigure_common(span_entry, 607 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 608 } 609 610 static const 611 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { 612 .can_handle = netif_is_ip6gretap, 613 .parms_set = mlxsw_sp_span_entry_gretap6_parms, 614 .configure = mlxsw_sp_span_entry_gretap6_configure, 615 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, 616 }; 617 #endif 618 619 static bool 620 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) 621 { 622 return is_vlan_dev(dev) && 623 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); 624 } 625 626 static int 627 mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp, 628 const struct net_device *to_dev, 629 struct mlxsw_sp_span_parms *sparmsp) 630 { 631 struct net_device *real_dev; 632 u16 vid; 633 634 if (!(to_dev->flags & IFF_UP)) 635 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 636 637 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); 638 sparmsp->dest_port = netdev_priv(real_dev); 639 sparmsp->vid = vid; 640 return 0; 641 } 642 643 static int 644 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, 645 struct mlxsw_sp_span_parms sparms) 646 { 647 struct mlxsw_sp_port *dest_port = sparms.dest_port; 648 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; 649 u8 local_port = dest_port->local_port; 650 char mpat_pl[MLXSW_REG_MPAT_LEN]; 651 int pa_id = span_entry->id; 652 653 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 654 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 655 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); 656 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); 657 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); 658 659 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); 660 } 661 662 static void 663 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) 664 { 665 mlxsw_sp_span_entry_deconfigure_common(span_entry, 666 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); 667 } 668 669 static const 670 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { 671 .can_handle = mlxsw_sp_span_vlan_can_handle, 672 .parms_set = mlxsw_sp_span_entry_vlan_parms, 673 .configure = mlxsw_sp_span_entry_vlan_configure, 674 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, 675 }; 676 677 static const 678 struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { 679 &mlxsw_sp1_span_entry_ops_cpu, 680 &mlxsw_sp_span_entry_ops_phys, 681 #if IS_ENABLED(CONFIG_NET_IPGRE) 682 &mlxsw_sp_span_entry_ops_gretap4, 683 #endif 684 #if IS_ENABLED(CONFIG_IPV6_GRE) 685 &mlxsw_sp_span_entry_ops_gretap6, 686 #endif 687 &mlxsw_sp_span_entry_ops_vlan, 688 }; 689 690 static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev) 691 { 692 return !dev; 693 } 694 695 static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, 696 const struct net_device *to_dev, 697 struct mlxsw_sp_span_parms *sparmsp) 698 { 699 sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; 700 return 0; 701 } 702 703 static int 704 mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, 705 struct mlxsw_sp_span_parms sparms) 706 { 707 /* Mirroring to the CPU port is like mirroring to any other physical 708 * port. Its local port is used instead of that of the physical port. 709 */ 710 return mlxsw_sp_span_entry_phys_configure(span_entry, sparms); 711 } 712 713 static void 714 mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) 715 { 716 enum mlxsw_reg_mpat_span_type span_type; 717 718 span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH; 719 mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type); 720 } 721 722 static const 723 struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { 724 .can_handle = mlxsw_sp2_span_cpu_can_handle, 725 .parms_set = mlxsw_sp2_span_entry_cpu_parms, 726 .configure = mlxsw_sp2_span_entry_cpu_configure, 727 .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure, 728 }; 729 730 static const 731 struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { 732 &mlxsw_sp2_span_entry_ops_cpu, 733 &mlxsw_sp_span_entry_ops_phys, 734 #if IS_ENABLED(CONFIG_NET_IPGRE) 735 &mlxsw_sp_span_entry_ops_gretap4, 736 #endif 737 #if IS_ENABLED(CONFIG_IPV6_GRE) 738 &mlxsw_sp_span_entry_ops_gretap6, 739 #endif 740 &mlxsw_sp_span_entry_ops_vlan, 741 }; 742 743 static int 744 mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp, 745 const struct net_device *to_dev, 746 struct mlxsw_sp_span_parms *sparmsp) 747 { 748 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 749 } 750 751 static int 752 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, 753 struct mlxsw_sp_span_parms sparms) 754 { 755 return 0; 756 } 757 758 static void 759 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) 760 { 761 } 762 763 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { 764 .parms_set = mlxsw_sp_span_entry_nop_parms, 765 .configure = mlxsw_sp_span_entry_nop_configure, 766 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, 767 }; 768 769 static void 770 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, 771 struct mlxsw_sp_span_entry *span_entry, 772 struct mlxsw_sp_span_parms sparms) 773 { 774 int err; 775 776 if (!sparms.dest_port) 777 goto set_parms; 778 779 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { 780 dev_err(mlxsw_sp->bus_info->dev, 781 "Cannot mirror to a port which belongs to a different mlxsw instance\n"); 782 sparms.dest_port = NULL; 783 goto set_parms; 784 } 785 786 err = span_entry->ops->configure(span_entry, sparms); 787 if (err) { 788 dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n"); 789 sparms.dest_port = NULL; 790 goto set_parms; 791 } 792 793 set_parms: 794 span_entry->parms = sparms; 795 } 796 797 static void 798 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) 799 { 800 if (span_entry->parms.dest_port) 801 span_entry->ops->deconfigure(span_entry); 802 } 803 804 static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, 805 u16 policer_id) 806 { 807 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; 808 u16 policer_id_base; 809 int err; 810 811 /* Policers set on SPAN agents must be in the range of 812 * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the 813 * base is set and the new policer is not within the range, then we 814 * must error out. 815 */ 816 if (refcount_read(&span->policer_id_base_ref_count)) { 817 if (policer_id < span->policer_id_base || 818 policer_id >= span->policer_id_base + span->entries_count) 819 return -EINVAL; 820 821 refcount_inc(&span->policer_id_base_ref_count); 822 return 0; 823 } 824 825 /* Base must be even. */ 826 policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1; 827 err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp, 828 policer_id_base); 829 if (err) 830 return err; 831 832 span->policer_id_base = policer_id_base; 833 refcount_set(&span->policer_id_base_ref_count, 1); 834 835 return 0; 836 } 837 838 static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) 839 { 840 if (refcount_dec_and_test(&span->policer_id_base_ref_count)) 841 span->policer_id_base = 0; 842 } 843 844 static struct mlxsw_sp_span_entry * 845 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, 846 const struct net_device *to_dev, 847 const struct mlxsw_sp_span_entry_ops *ops, 848 struct mlxsw_sp_span_parms sparms) 849 { 850 struct mlxsw_sp_span_entry *span_entry = NULL; 851 int i; 852 853 /* find a free entry to use */ 854 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 855 if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) { 856 span_entry = &mlxsw_sp->span->entries[i]; 857 break; 858 } 859 } 860 if (!span_entry) 861 return NULL; 862 863 if (sparms.policer_enable) { 864 int err; 865 866 err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span, 867 sparms.policer_id); 868 if (err) 869 return NULL; 870 } 871 872 atomic_inc(&mlxsw_sp->span->active_entries_count); 873 span_entry->ops = ops; 874 refcount_set(&span_entry->ref_count, 1); 875 span_entry->to_dev = to_dev; 876 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); 877 878 return span_entry; 879 } 880 881 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, 882 struct mlxsw_sp_span_entry *span_entry) 883 { 884 mlxsw_sp_span_entry_deconfigure(span_entry); 885 atomic_dec(&mlxsw_sp->span->active_entries_count); 886 if (span_entry->parms.policer_enable) 887 mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span); 888 } 889 890 struct mlxsw_sp_span_entry * 891 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, 892 const struct net_device *to_dev) 893 { 894 int i; 895 896 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 897 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 898 899 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev) 900 return curr; 901 } 902 return NULL; 903 } 904 905 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, 906 struct mlxsw_sp_span_entry *span_entry) 907 { 908 mlxsw_sp_span_entry_deconfigure(span_entry); 909 span_entry->ops = &mlxsw_sp_span_entry_ops_nop; 910 } 911 912 static struct mlxsw_sp_span_entry * 913 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) 914 { 915 int i; 916 917 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 918 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 919 920 if (refcount_read(&curr->ref_count) && curr->id == span_id) 921 return curr; 922 } 923 return NULL; 924 } 925 926 static struct mlxsw_sp_span_entry * 927 mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, 928 const struct net_device *to_dev, 929 const struct mlxsw_sp_span_parms *sparms) 930 { 931 int i; 932 933 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 934 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 935 936 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && 937 curr->parms.policer_enable == sparms->policer_enable && 938 curr->parms.policer_id == sparms->policer_id) 939 return curr; 940 } 941 return NULL; 942 } 943 944 static struct mlxsw_sp_span_entry * 945 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, 946 const struct net_device *to_dev, 947 const struct mlxsw_sp_span_entry_ops *ops, 948 struct mlxsw_sp_span_parms sparms) 949 { 950 struct mlxsw_sp_span_entry *span_entry; 951 952 span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev, 953 &sparms); 954 if (span_entry) { 955 /* Already exists, just take a reference */ 956 refcount_inc(&span_entry->ref_count); 957 return span_entry; 958 } 959 960 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); 961 } 962 963 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 964 struct mlxsw_sp_span_entry *span_entry) 965 { 966 if (refcount_dec_and_test(&span_entry->ref_count)) 967 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 968 return 0; 969 } 970 971 static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, 972 u32 speed) 973 { 974 u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); 975 976 return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; 977 } 978 979 static int 980 mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 981 { 982 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 983 char sbib_pl[MLXSW_REG_SBIB_LEN]; 984 u32 buffsize; 985 u32 speed; 986 int err; 987 988 err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed); 989 if (err) 990 return err; 991 if (speed == SPEED_UNKNOWN) 992 speed = 0; 993 994 buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu); 995 buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize); 996 mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize); 997 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 998 } 999 1000 static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp, 1001 u8 local_port) 1002 { 1003 char sbib_pl[MLXSW_REG_SBIB_LEN]; 1004 1005 mlxsw_reg_sbib_pack(sbib_pl, local_port, 0); 1006 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); 1007 } 1008 1009 static struct mlxsw_sp_span_analyzed_port * 1010 mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port, 1011 bool ingress) 1012 { 1013 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1014 1015 list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) { 1016 if (analyzed_port->local_port == local_port && 1017 analyzed_port->ingress == ingress) 1018 return analyzed_port; 1019 } 1020 1021 return NULL; 1022 } 1023 1024 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) 1025 { 1026 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 1027 int err = 0; 1028 1029 /* If port is egress mirrored, the shared buffer size should be 1030 * updated according to the mtu value 1031 */ 1032 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1033 1034 if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port, 1035 false)) 1036 err = mlxsw_sp_span_port_buffer_update(port, mtu); 1037 1038 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1039 1040 return err; 1041 } 1042 1043 void mlxsw_sp_span_speed_update_work(struct work_struct *work) 1044 { 1045 struct delayed_work *dwork = to_delayed_work(work); 1046 struct mlxsw_sp_port *mlxsw_sp_port; 1047 struct mlxsw_sp *mlxsw_sp; 1048 1049 mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port, 1050 span.speed_update_dw); 1051 1052 /* If port is egress mirrored, the shared buffer size should be 1053 * updated according to the speed value. 1054 */ 1055 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1056 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1057 1058 if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1059 mlxsw_sp_port->local_port, false)) 1060 mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, 1061 mlxsw_sp_port->dev->mtu); 1062 1063 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1064 } 1065 1066 static const struct mlxsw_sp_span_entry_ops * 1067 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, 1068 const struct net_device *to_dev) 1069 { 1070 struct mlxsw_sp_span *span = mlxsw_sp->span; 1071 size_t i; 1072 1073 for (i = 0; i < span->span_entry_ops_arr_size; ++i) 1074 if (span->span_entry_ops_arr[i]->can_handle(to_dev)) 1075 return span->span_entry_ops_arr[i]; 1076 1077 return NULL; 1078 } 1079 1080 static void mlxsw_sp_span_respin_work(struct work_struct *work) 1081 { 1082 struct mlxsw_sp_span *span; 1083 struct mlxsw_sp *mlxsw_sp; 1084 int i, err; 1085 1086 span = container_of(work, struct mlxsw_sp_span, work); 1087 mlxsw_sp = span->mlxsw_sp; 1088 1089 rtnl_lock(); 1090 for (i = 0; i < mlxsw_sp->span->entries_count; i++) { 1091 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; 1092 struct mlxsw_sp_span_parms sparms = {NULL}; 1093 1094 if (!refcount_read(&curr->ref_count)) 1095 continue; 1096 1097 err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); 1098 if (err) 1099 continue; 1100 1101 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { 1102 mlxsw_sp_span_entry_deconfigure(curr); 1103 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); 1104 } 1105 } 1106 rtnl_unlock(); 1107 } 1108 1109 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) 1110 { 1111 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0) 1112 return; 1113 mlxsw_core_schedule_work(&mlxsw_sp->span->work); 1114 } 1115 1116 int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, 1117 const struct mlxsw_sp_span_agent_parms *parms) 1118 { 1119 const struct net_device *to_dev = parms->to_dev; 1120 const struct mlxsw_sp_span_entry_ops *ops; 1121 struct mlxsw_sp_span_entry *span_entry; 1122 struct mlxsw_sp_span_parms sparms; 1123 int err; 1124 1125 ASSERT_RTNL(); 1126 1127 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); 1128 if (!ops) { 1129 dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n"); 1130 return -EOPNOTSUPP; 1131 } 1132 1133 memset(&sparms, 0, sizeof(sparms)); 1134 err = ops->parms_set(mlxsw_sp, to_dev, &sparms); 1135 if (err) 1136 return err; 1137 1138 sparms.policer_id = parms->policer_id; 1139 sparms.policer_enable = parms->policer_enable; 1140 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); 1141 if (!span_entry) 1142 return -ENOBUFS; 1143 1144 *p_span_id = span_entry->id; 1145 1146 return 0; 1147 } 1148 1149 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id) 1150 { 1151 struct mlxsw_sp_span_entry *span_entry; 1152 1153 ASSERT_RTNL(); 1154 1155 span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id); 1156 if (WARN_ON_ONCE(!span_entry)) 1157 return; 1158 1159 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); 1160 } 1161 1162 static struct mlxsw_sp_span_analyzed_port * 1163 mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span, 1164 struct mlxsw_sp_port *mlxsw_sp_port, 1165 bool ingress) 1166 { 1167 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1168 int err; 1169 1170 analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL); 1171 if (!analyzed_port) 1172 return ERR_PTR(-ENOMEM); 1173 1174 refcount_set(&analyzed_port->ref_count, 1); 1175 analyzed_port->local_port = mlxsw_sp_port->local_port; 1176 analyzed_port->ingress = ingress; 1177 list_add_tail(&analyzed_port->list, &span->analyzed_ports_list); 1178 1179 /* An egress mirror buffer should be allocated on the egress port which 1180 * does the mirroring. 1181 */ 1182 if (!ingress) { 1183 u16 mtu = mlxsw_sp_port->dev->mtu; 1184 1185 err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu); 1186 if (err) 1187 goto err_buffer_update; 1188 } 1189 1190 return analyzed_port; 1191 1192 err_buffer_update: 1193 list_del(&analyzed_port->list); 1194 kfree(analyzed_port); 1195 return ERR_PTR(err); 1196 } 1197 1198 static void 1199 mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span, 1200 struct mlxsw_sp_span_analyzed_port * 1201 analyzed_port) 1202 { 1203 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; 1204 1205 /* Remove egress mirror buffer now that port is no longer analyzed 1206 * at egress. 1207 */ 1208 if (!analyzed_port->ingress) 1209 mlxsw_sp_span_port_buffer_disable(mlxsw_sp, 1210 analyzed_port->local_port); 1211 1212 list_del(&analyzed_port->list); 1213 kfree(analyzed_port); 1214 } 1215 1216 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, 1217 bool ingress) 1218 { 1219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1220 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1221 u8 local_port = mlxsw_sp_port->local_port; 1222 int err = 0; 1223 1224 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1225 1226 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1227 local_port, ingress); 1228 if (analyzed_port) { 1229 refcount_inc(&analyzed_port->ref_count); 1230 goto out_unlock; 1231 } 1232 1233 analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span, 1234 mlxsw_sp_port, 1235 ingress); 1236 if (IS_ERR(analyzed_port)) 1237 err = PTR_ERR(analyzed_port); 1238 1239 out_unlock: 1240 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1241 return err; 1242 } 1243 1244 void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, 1245 bool ingress) 1246 { 1247 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1248 struct mlxsw_sp_span_analyzed_port *analyzed_port; 1249 u8 local_port = mlxsw_sp_port->local_port; 1250 1251 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); 1252 1253 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, 1254 local_port, ingress); 1255 if (WARN_ON_ONCE(!analyzed_port)) 1256 goto out_unlock; 1257 1258 if (!refcount_dec_and_test(&analyzed_port->ref_count)) 1259 goto out_unlock; 1260 1261 mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port); 1262 1263 out_unlock: 1264 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); 1265 } 1266 1267 static int 1268 __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, 1269 struct mlxsw_sp_span_trigger_entry * 1270 trigger_entry, bool enable) 1271 { 1272 char mpar_pl[MLXSW_REG_MPAR_LEN]; 1273 enum mlxsw_reg_mpar_i_e i_e; 1274 1275 switch (trigger_entry->trigger) { 1276 case MLXSW_SP_SPAN_TRIGGER_INGRESS: 1277 i_e = MLXSW_REG_MPAR_TYPE_INGRESS; 1278 break; 1279 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1280 i_e = MLXSW_REG_MPAR_TYPE_EGRESS; 1281 break; 1282 default: 1283 WARN_ON_ONCE(1); 1284 return -EINVAL; 1285 } 1286 1287 mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable, 1288 trigger_entry->parms.span_id); 1289 return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); 1290 } 1291 1292 static int 1293 mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry * 1294 trigger_entry) 1295 { 1296 return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, 1297 trigger_entry, true); 1298 } 1299 1300 static void 1301 mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry * 1302 trigger_entry) 1303 { 1304 __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry, 1305 false); 1306 } 1307 1308 static bool 1309 mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry * 1310 trigger_entry, 1311 enum mlxsw_sp_span_trigger trigger, 1312 struct mlxsw_sp_port *mlxsw_sp_port) 1313 { 1314 return trigger_entry->trigger == trigger && 1315 trigger_entry->local_port == mlxsw_sp_port->local_port; 1316 } 1317 1318 static int 1319 mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry * 1320 trigger_entry, 1321 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1322 { 1323 /* Port trigger are enabled during binding. */ 1324 return 0; 1325 } 1326 1327 static void 1328 mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry * 1329 trigger_entry, 1330 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) 1331 { 1332 } 1333 1334 static const struct mlxsw_sp_span_trigger_ops 1335 mlxsw_sp_span_trigger_port_ops = { 1336 .bind = mlxsw_sp_span_trigger_port_bind, 1337 .unbind = mlxsw_sp_span_trigger_port_unbind, 1338 .matches = mlxsw_sp_span_trigger_port_matches, 1339 .enable = mlxsw_sp_span_trigger_port_enable, 1340 .disable = mlxsw_sp_span_trigger_port_disable, 1341 }; 1342 1343 static int 1344 mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1345 trigger_entry) 1346 { 1347 return -EOPNOTSUPP; 1348 } 1349 1350 static void 1351 mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1352 trigger_entry) 1353 { 1354 } 1355 1356 static bool 1357 mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1358 trigger_entry, 1359 enum mlxsw_sp_span_trigger trigger, 1360 struct mlxsw_sp_port *mlxsw_sp_port) 1361 { 1362 WARN_ON_ONCE(1); 1363 return false; 1364 } 1365 1366 static int 1367 mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1368 trigger_entry, 1369 struct mlxsw_sp_port *mlxsw_sp_port, 1370 u8 tc) 1371 { 1372 return -EOPNOTSUPP; 1373 } 1374 1375 static void 1376 mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1377 trigger_entry, 1378 struct mlxsw_sp_port *mlxsw_sp_port, 1379 u8 tc) 1380 { 1381 } 1382 1383 static const struct mlxsw_sp_span_trigger_ops 1384 mlxsw_sp1_span_trigger_global_ops = { 1385 .bind = mlxsw_sp1_span_trigger_global_bind, 1386 .unbind = mlxsw_sp1_span_trigger_global_unbind, 1387 .matches = mlxsw_sp1_span_trigger_global_matches, 1388 .enable = mlxsw_sp1_span_trigger_global_enable, 1389 .disable = mlxsw_sp1_span_trigger_global_disable, 1390 }; 1391 1392 static const struct mlxsw_sp_span_trigger_ops * 1393 mlxsw_sp1_span_trigger_ops_arr[] = { 1394 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1395 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1396 &mlxsw_sp1_span_trigger_global_ops, 1397 }; 1398 1399 static int 1400 mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * 1401 trigger_entry) 1402 { 1403 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1404 enum mlxsw_reg_mpagr_trigger trigger; 1405 char mpagr_pl[MLXSW_REG_MPAGR_LEN]; 1406 1407 switch (trigger_entry->trigger) { 1408 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1409 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER; 1410 break; 1411 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1412 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED; 1413 break; 1414 case MLXSW_SP_SPAN_TRIGGER_ECN: 1415 trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN; 1416 break; 1417 default: 1418 WARN_ON_ONCE(1); 1419 return -EINVAL; 1420 } 1421 1422 mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, 1423 1); 1424 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); 1425 } 1426 1427 static void 1428 mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * 1429 trigger_entry) 1430 { 1431 /* There is no unbinding for global triggers. The trigger should be 1432 * disabled on all ports by now. 1433 */ 1434 } 1435 1436 static bool 1437 mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * 1438 trigger_entry, 1439 enum mlxsw_sp_span_trigger trigger, 1440 struct mlxsw_sp_port *mlxsw_sp_port) 1441 { 1442 return trigger_entry->trigger == trigger; 1443 } 1444 1445 static int 1446 __mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1447 trigger_entry, 1448 struct mlxsw_sp_port *mlxsw_sp_port, 1449 u8 tc, bool enable) 1450 { 1451 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; 1452 char momte_pl[MLXSW_REG_MOMTE_LEN]; 1453 enum mlxsw_reg_momte_type type; 1454 int err; 1455 1456 switch (trigger_entry->trigger) { 1457 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: 1458 type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS; 1459 break; 1460 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: 1461 type = MLXSW_REG_MOMTE_TYPE_WRED; 1462 break; 1463 case MLXSW_SP_SPAN_TRIGGER_ECN: 1464 type = MLXSW_REG_MOMTE_TYPE_ECN; 1465 break; 1466 default: 1467 WARN_ON_ONCE(1); 1468 return -EINVAL; 1469 } 1470 1471 /* Query existing configuration in order to only change the state of 1472 * the specified traffic class. 1473 */ 1474 mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type); 1475 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1476 if (err) 1477 return err; 1478 1479 mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable); 1480 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); 1481 } 1482 1483 static int 1484 mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * 1485 trigger_entry, 1486 struct mlxsw_sp_port *mlxsw_sp_port, 1487 u8 tc) 1488 { 1489 return __mlxsw_sp2_span_trigger_global_enable(trigger_entry, 1490 mlxsw_sp_port, tc, true); 1491 } 1492 1493 static void 1494 mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * 1495 trigger_entry, 1496 struct mlxsw_sp_port *mlxsw_sp_port, 1497 u8 tc) 1498 { 1499 __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc, 1500 false); 1501 } 1502 1503 static const struct mlxsw_sp_span_trigger_ops 1504 mlxsw_sp2_span_trigger_global_ops = { 1505 .bind = mlxsw_sp2_span_trigger_global_bind, 1506 .unbind = mlxsw_sp2_span_trigger_global_unbind, 1507 .matches = mlxsw_sp2_span_trigger_global_matches, 1508 .enable = mlxsw_sp2_span_trigger_global_enable, 1509 .disable = mlxsw_sp2_span_trigger_global_disable, 1510 }; 1511 1512 static const struct mlxsw_sp_span_trigger_ops * 1513 mlxsw_sp2_span_trigger_ops_arr[] = { 1514 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, 1515 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = 1516 &mlxsw_sp2_span_trigger_global_ops, 1517 }; 1518 1519 static void 1520 mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) 1521 { 1522 struct mlxsw_sp_span *span = trigger_entry->span; 1523 enum mlxsw_sp_span_trigger_type type; 1524 1525 switch (trigger_entry->trigger) { 1526 case MLXSW_SP_SPAN_TRIGGER_INGRESS: /* fall-through */ 1527 case MLXSW_SP_SPAN_TRIGGER_EGRESS: 1528 type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; 1529 break; 1530 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: /* fall-through */ 1531 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: /* fall-through */ 1532 case MLXSW_SP_SPAN_TRIGGER_ECN: 1533 type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; 1534 break; 1535 default: 1536 WARN_ON_ONCE(1); 1537 return; 1538 } 1539 1540 trigger_entry->ops = span->span_trigger_ops_arr[type]; 1541 } 1542 1543 static struct mlxsw_sp_span_trigger_entry * 1544 mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span, 1545 enum mlxsw_sp_span_trigger trigger, 1546 struct mlxsw_sp_port *mlxsw_sp_port, 1547 const struct mlxsw_sp_span_trigger_parms 1548 *parms) 1549 { 1550 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1551 int err; 1552 1553 trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL); 1554 if (!trigger_entry) 1555 return ERR_PTR(-ENOMEM); 1556 1557 refcount_set(&trigger_entry->ref_count, 1); 1558 trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port : 1559 0; 1560 trigger_entry->trigger = trigger; 1561 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms)); 1562 trigger_entry->span = span; 1563 mlxsw_sp_span_trigger_ops_set(trigger_entry); 1564 list_add_tail(&trigger_entry->list, &span->trigger_entries_list); 1565 1566 err = trigger_entry->ops->bind(trigger_entry); 1567 if (err) 1568 goto err_trigger_entry_bind; 1569 1570 return trigger_entry; 1571 1572 err_trigger_entry_bind: 1573 list_del(&trigger_entry->list); 1574 kfree(trigger_entry); 1575 return ERR_PTR(err); 1576 } 1577 1578 static void 1579 mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span, 1580 struct mlxsw_sp_span_trigger_entry * 1581 trigger_entry) 1582 { 1583 trigger_entry->ops->unbind(trigger_entry); 1584 list_del(&trigger_entry->list); 1585 kfree(trigger_entry); 1586 } 1587 1588 static struct mlxsw_sp_span_trigger_entry * 1589 mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span, 1590 enum mlxsw_sp_span_trigger trigger, 1591 struct mlxsw_sp_port *mlxsw_sp_port) 1592 { 1593 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1594 1595 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) { 1596 if (trigger_entry->ops->matches(trigger_entry, trigger, 1597 mlxsw_sp_port)) 1598 return trigger_entry; 1599 } 1600 1601 return NULL; 1602 } 1603 1604 int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp, 1605 enum mlxsw_sp_span_trigger trigger, 1606 struct mlxsw_sp_port *mlxsw_sp_port, 1607 const struct mlxsw_sp_span_trigger_parms *parms) 1608 { 1609 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1610 int err = 0; 1611 1612 ASSERT_RTNL(); 1613 1614 if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id)) 1615 return -EINVAL; 1616 1617 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1618 trigger, 1619 mlxsw_sp_port); 1620 if (trigger_entry) { 1621 if (trigger_entry->parms.span_id != parms->span_id) 1622 return -EINVAL; 1623 refcount_inc(&trigger_entry->ref_count); 1624 goto out; 1625 } 1626 1627 trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span, 1628 trigger, 1629 mlxsw_sp_port, 1630 parms); 1631 if (IS_ERR(trigger_entry)) 1632 err = PTR_ERR(trigger_entry); 1633 1634 out: 1635 return err; 1636 } 1637 1638 void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, 1639 enum mlxsw_sp_span_trigger trigger, 1640 struct mlxsw_sp_port *mlxsw_sp_port, 1641 const struct mlxsw_sp_span_trigger_parms *parms) 1642 { 1643 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1644 1645 ASSERT_RTNL(); 1646 1647 if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, 1648 parms->span_id))) 1649 return; 1650 1651 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1652 trigger, 1653 mlxsw_sp_port); 1654 if (WARN_ON_ONCE(!trigger_entry)) 1655 return; 1656 1657 if (!refcount_dec_and_test(&trigger_entry->ref_count)) 1658 return; 1659 1660 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); 1661 } 1662 1663 int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, 1664 enum mlxsw_sp_span_trigger trigger, u8 tc) 1665 { 1666 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1667 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1668 1669 ASSERT_RTNL(); 1670 1671 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1672 trigger, 1673 mlxsw_sp_port); 1674 if (WARN_ON_ONCE(!trigger_entry)) 1675 return -EINVAL; 1676 1677 return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc); 1678 } 1679 1680 void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, 1681 enum mlxsw_sp_span_trigger trigger, u8 tc) 1682 { 1683 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1684 struct mlxsw_sp_span_trigger_entry *trigger_entry; 1685 1686 ASSERT_RTNL(); 1687 1688 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, 1689 trigger, 1690 mlxsw_sp_port); 1691 if (WARN_ON_ONCE(!trigger_entry)) 1692 return; 1693 1694 return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); 1695 } 1696 1697 static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) 1698 { 1699 size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); 1700 1701 /* Must be first to avoid NULL pointer dereference by subsequent 1702 * can_handle() callbacks. 1703 */ 1704 if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] != 1705 &mlxsw_sp1_span_entry_ops_cpu)) 1706 return -EINVAL; 1707 1708 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; 1709 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; 1710 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1711 1712 return 0; 1713 } 1714 1715 static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) 1716 { 1717 return mtu * 5 / 2; 1718 } 1719 1720 static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1721 u16 policer_id_base) 1722 { 1723 return -EOPNOTSUPP; 1724 } 1725 1726 const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { 1727 .init = mlxsw_sp1_span_init, 1728 .buffsize_get = mlxsw_sp1_span_buffsize_get, 1729 .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, 1730 }; 1731 1732 static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) 1733 { 1734 size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); 1735 1736 /* Must be first to avoid NULL pointer dereference by subsequent 1737 * can_handle() callbacks. 1738 */ 1739 if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] != 1740 &mlxsw_sp2_span_entry_ops_cpu)) 1741 return -EINVAL; 1742 1743 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; 1744 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; 1745 mlxsw_sp->span->span_entry_ops_arr_size = arr_size; 1746 1747 return 0; 1748 } 1749 1750 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 1751 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 1752 1753 static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) 1754 { 1755 return 3 * mtu + buffer_factor * speed / 1000; 1756 } 1757 1758 static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) 1759 { 1760 int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; 1761 1762 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 1763 } 1764 1765 static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, 1766 u16 policer_id_base) 1767 { 1768 char mogcr_pl[MLXSW_REG_MOGCR_LEN]; 1769 int err; 1770 1771 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1772 if (err) 1773 return err; 1774 1775 mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base); 1776 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); 1777 } 1778 1779 const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { 1780 .init = mlxsw_sp2_span_init, 1781 .buffsize_get = mlxsw_sp2_span_buffsize_get, 1782 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1783 }; 1784 1785 static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) 1786 { 1787 int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; 1788 1789 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); 1790 } 1791 1792 const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { 1793 .init = mlxsw_sp2_span_init, 1794 .buffsize_get = mlxsw_sp3_span_buffsize_get, 1795 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, 1796 }; 1797