1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/if_bridge.h>
5 #include <linux/list.h>
6 #include <linux/mutex.h>
7 #include <linux/refcount.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/workqueue.h>
10 #include <net/arp.h>
11 #include <net/gre.h>
12 #include <net/lag.h>
13 #include <net/ndisc.h>
14 #include <net/ip6_tunnel.h>
15 
16 #include "spectrum.h"
17 #include "spectrum_ipip.h"
18 #include "spectrum_span.h"
19 #include "spectrum_switchdev.h"
20 
21 struct mlxsw_sp_span {
22 	struct work_struct work;
23 	struct mlxsw_sp *mlxsw_sp;
24 	struct list_head analyzed_ports_list;
25 	struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
26 	struct list_head trigger_entries_list;
27 	atomic_t active_entries_count;
28 	int entries_count;
29 	struct mlxsw_sp_span_entry entries[];
30 };
31 
32 struct mlxsw_sp_span_analyzed_port {
33 	struct list_head list; /* Member of analyzed_ports_list */
34 	refcount_t ref_count;
35 	u8 local_port;
36 	bool ingress;
37 };
38 
39 struct mlxsw_sp_span_trigger_entry {
40 	struct list_head list; /* Member of trigger_entries_list */
41 	refcount_t ref_count;
42 	u8 local_port;
43 	enum mlxsw_sp_span_trigger trigger;
44 	struct mlxsw_sp_span_trigger_parms parms;
45 };
46 
47 static void mlxsw_sp_span_respin_work(struct work_struct *work);
48 
49 static u64 mlxsw_sp_span_occ_get(void *priv)
50 {
51 	const struct mlxsw_sp *mlxsw_sp = priv;
52 
53 	return atomic_read(&mlxsw_sp->span->active_entries_count);
54 }
55 
56 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
57 {
58 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
59 	struct mlxsw_sp_span *span;
60 	int i, entries_count;
61 
62 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
63 		return -EIO;
64 
65 	entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
66 	span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
67 	if (!span)
68 		return -ENOMEM;
69 	span->entries_count = entries_count;
70 	atomic_set(&span->active_entries_count, 0);
71 	mutex_init(&span->analyzed_ports_lock);
72 	INIT_LIST_HEAD(&span->analyzed_ports_list);
73 	INIT_LIST_HEAD(&span->trigger_entries_list);
74 	span->mlxsw_sp = mlxsw_sp;
75 	mlxsw_sp->span = span;
76 
77 	for (i = 0; i < mlxsw_sp->span->entries_count; i++)
78 		mlxsw_sp->span->entries[i].id = i;
79 
80 	devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
81 					  mlxsw_sp_span_occ_get, mlxsw_sp);
82 	INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
83 
84 	return 0;
85 }
86 
87 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
88 {
89 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
90 
91 	cancel_work_sync(&mlxsw_sp->span->work);
92 	devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
93 
94 	WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
95 	WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
96 	mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
97 	kfree(mlxsw_sp->span);
98 }
99 
100 static int
101 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
102 			       struct mlxsw_sp_span_parms *sparmsp)
103 {
104 	sparmsp->dest_port = netdev_priv(to_dev);
105 	return 0;
106 }
107 
108 static int
109 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
110 				   struct mlxsw_sp_span_parms sparms)
111 {
112 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
113 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
114 	u8 local_port = dest_port->local_port;
115 	char mpat_pl[MLXSW_REG_MPAT_LEN];
116 	int pa_id = span_entry->id;
117 
118 	/* Create a new port analayzer entry for local_port. */
119 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
120 			    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
121 
122 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
123 }
124 
125 static void
126 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
127 				       enum mlxsw_reg_mpat_span_type span_type)
128 {
129 	struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
130 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
131 	u8 local_port = dest_port->local_port;
132 	char mpat_pl[MLXSW_REG_MPAT_LEN];
133 	int pa_id = span_entry->id;
134 
135 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
136 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
137 }
138 
139 static void
140 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
141 {
142 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
143 					    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
144 }
145 
146 static const
147 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
148 	.can_handle = mlxsw_sp_port_dev_check,
149 	.parms_set = mlxsw_sp_span_entry_phys_parms,
150 	.configure = mlxsw_sp_span_entry_phys_configure,
151 	.deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
152 };
153 
154 static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
155 			      const void *pkey,
156 			      struct net_device *dev,
157 			      unsigned char dmac[ETH_ALEN])
158 {
159 	struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
160 	int err = 0;
161 
162 	if (!neigh) {
163 		neigh = neigh_create(tbl, pkey, dev);
164 		if (IS_ERR(neigh))
165 			return PTR_ERR(neigh);
166 	}
167 
168 	neigh_event_send(neigh, NULL);
169 
170 	read_lock_bh(&neigh->lock);
171 	if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
172 		memcpy(dmac, neigh->ha, ETH_ALEN);
173 	else
174 		err = -ENOENT;
175 	read_unlock_bh(&neigh->lock);
176 
177 	neigh_release(neigh);
178 	return err;
179 }
180 
181 static int
182 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
183 {
184 	sparmsp->dest_port = NULL;
185 	return 0;
186 }
187 
188 static struct net_device *
189 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
190 				 unsigned char *dmac,
191 				 u16 *p_vid)
192 {
193 	struct bridge_vlan_info vinfo;
194 	struct net_device *edev;
195 	u16 vid = *p_vid;
196 
197 	if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
198 		return NULL;
199 	if (!vid ||
200 	    br_vlan_get_info(br_dev, vid, &vinfo) ||
201 	    !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
202 		return NULL;
203 
204 	edev = br_fdb_find_port(br_dev, dmac, vid);
205 	if (!edev)
206 		return NULL;
207 
208 	if (br_vlan_get_info(edev, vid, &vinfo))
209 		return NULL;
210 	if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
211 		*p_vid = 0;
212 	else
213 		*p_vid = vid;
214 	return edev;
215 }
216 
217 static struct net_device *
218 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
219 				 unsigned char *dmac)
220 {
221 	return br_fdb_find_port(br_dev, dmac, 0);
222 }
223 
224 static struct net_device *
225 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
226 			   unsigned char dmac[ETH_ALEN],
227 			   u16 *p_vid)
228 {
229 	struct mlxsw_sp_bridge_port *bridge_port;
230 	enum mlxsw_reg_spms_state spms_state;
231 	struct net_device *dev = NULL;
232 	struct mlxsw_sp_port *port;
233 	u8 stp_state;
234 
235 	if (br_vlan_enabled(br_dev))
236 		dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
237 	else if (!*p_vid)
238 		dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
239 	if (!dev)
240 		return NULL;
241 
242 	port = mlxsw_sp_port_dev_lower_find(dev);
243 	if (!port)
244 		return NULL;
245 
246 	bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
247 	if (!bridge_port)
248 		return NULL;
249 
250 	stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
251 	spms_state = mlxsw_sp_stp_spms_state(stp_state);
252 	if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
253 		return NULL;
254 
255 	return dev;
256 }
257 
258 static struct net_device *
259 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
260 			 u16 *p_vid)
261 {
262 	*p_vid = vlan_dev_vlan_id(vlan_dev);
263 	return vlan_dev_real_dev(vlan_dev);
264 }
265 
266 static struct net_device *
267 mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
268 {
269 	struct net_device *dev;
270 	struct list_head *iter;
271 
272 	netdev_for_each_lower_dev(lag_dev, dev, iter)
273 		if (netif_carrier_ok(dev) &&
274 		    net_lag_port_dev_txable(dev) &&
275 		    mlxsw_sp_port_dev_check(dev))
276 			return dev;
277 
278 	return NULL;
279 }
280 
281 static __maybe_unused int
282 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
283 					union mlxsw_sp_l3addr saddr,
284 					union mlxsw_sp_l3addr daddr,
285 					union mlxsw_sp_l3addr gw,
286 					__u8 ttl,
287 					struct neigh_table *tbl,
288 					struct mlxsw_sp_span_parms *sparmsp)
289 {
290 	unsigned char dmac[ETH_ALEN];
291 	u16 vid = 0;
292 
293 	if (mlxsw_sp_l3addr_is_zero(gw))
294 		gw = daddr;
295 
296 	if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
297 		goto unoffloadable;
298 
299 	if (is_vlan_dev(edev))
300 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
301 
302 	if (netif_is_bridge_master(edev)) {
303 		edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
304 		if (!edev)
305 			goto unoffloadable;
306 	}
307 
308 	if (is_vlan_dev(edev)) {
309 		if (vid || !(edev->flags & IFF_UP))
310 			goto unoffloadable;
311 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
312 	}
313 
314 	if (netif_is_lag_master(edev)) {
315 		if (!(edev->flags & IFF_UP))
316 			goto unoffloadable;
317 		edev = mlxsw_sp_span_entry_lag(edev);
318 		if (!edev)
319 			goto unoffloadable;
320 	}
321 
322 	if (!mlxsw_sp_port_dev_check(edev))
323 		goto unoffloadable;
324 
325 	sparmsp->dest_port = netdev_priv(edev);
326 	sparmsp->ttl = ttl;
327 	memcpy(sparmsp->dmac, dmac, ETH_ALEN);
328 	memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
329 	sparmsp->saddr = saddr;
330 	sparmsp->daddr = daddr;
331 	sparmsp->vid = vid;
332 	return 0;
333 
334 unoffloadable:
335 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
336 }
337 
338 #if IS_ENABLED(CONFIG_NET_IPGRE)
339 static struct net_device *
340 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
341 			    __be32 *saddrp, __be32 *daddrp)
342 {
343 	struct ip_tunnel *tun = netdev_priv(to_dev);
344 	struct net_device *dev = NULL;
345 	struct ip_tunnel_parm parms;
346 	struct rtable *rt = NULL;
347 	struct flowi4 fl4;
348 
349 	/* We assume "dev" stays valid after rt is put. */
350 	ASSERT_RTNL();
351 
352 	parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
353 	ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
354 			    0, 0, parms.link, tun->fwmark, 0);
355 
356 	rt = ip_route_output_key(tun->net, &fl4);
357 	if (IS_ERR(rt))
358 		return NULL;
359 
360 	if (rt->rt_type != RTN_UNICAST)
361 		goto out;
362 
363 	dev = rt->dst.dev;
364 	*saddrp = fl4.saddr;
365 	if (rt->rt_gw_family == AF_INET)
366 		*daddrp = rt->rt_gw4;
367 	/* can not offload if route has an IPv6 gateway */
368 	else if (rt->rt_gw_family == AF_INET6)
369 		dev = NULL;
370 
371 out:
372 	ip_rt_put(rt);
373 	return dev;
374 }
375 
376 static int
377 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
378 				  struct mlxsw_sp_span_parms *sparmsp)
379 {
380 	struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
381 	union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
382 	union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
383 	bool inherit_tos = tparm.iph.tos & 0x1;
384 	bool inherit_ttl = !tparm.iph.ttl;
385 	union mlxsw_sp_l3addr gw = daddr;
386 	struct net_device *l3edev;
387 
388 	if (!(to_dev->flags & IFF_UP) ||
389 	    /* Reject tunnels with GRE keys, checksums, etc. */
390 	    tparm.i_flags || tparm.o_flags ||
391 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
392 	    inherit_ttl || !inherit_tos ||
393 	    /* A destination address may not be "any". */
394 	    mlxsw_sp_l3addr_is_zero(daddr))
395 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
396 
397 	l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
398 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
399 						       tparm.iph.ttl,
400 						       &arp_tbl, sparmsp);
401 }
402 
403 static int
404 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
405 				      struct mlxsw_sp_span_parms sparms)
406 {
407 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
408 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
409 	u8 local_port = dest_port->local_port;
410 	char mpat_pl[MLXSW_REG_MPAT_LEN];
411 	int pa_id = span_entry->id;
412 
413 	/* Create a new port analayzer entry for local_port. */
414 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
415 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
416 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
417 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
418 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
419 				    sparms.dmac, !!sparms.vid);
420 	mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
421 					      sparms.ttl, sparms.smac,
422 					      be32_to_cpu(sparms.saddr.addr4),
423 					      be32_to_cpu(sparms.daddr.addr4));
424 
425 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
426 }
427 
428 static void
429 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
430 {
431 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
432 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
433 }
434 
435 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
436 	.can_handle = netif_is_gretap,
437 	.parms_set = mlxsw_sp_span_entry_gretap4_parms,
438 	.configure = mlxsw_sp_span_entry_gretap4_configure,
439 	.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
440 };
441 #endif
442 
443 #if IS_ENABLED(CONFIG_IPV6_GRE)
444 static struct net_device *
445 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
446 			    struct in6_addr *saddrp,
447 			    struct in6_addr *daddrp)
448 {
449 	struct ip6_tnl *t = netdev_priv(to_dev);
450 	struct flowi6 fl6 = t->fl.u.ip6;
451 	struct net_device *dev = NULL;
452 	struct dst_entry *dst;
453 	struct rt6_info *rt6;
454 
455 	/* We assume "dev" stays valid after dst is released. */
456 	ASSERT_RTNL();
457 
458 	fl6.flowi6_mark = t->parms.fwmark;
459 	if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
460 		return NULL;
461 
462 	dst = ip6_route_output(t->net, NULL, &fl6);
463 	if (!dst || dst->error)
464 		goto out;
465 
466 	rt6 = container_of(dst, struct rt6_info, dst);
467 
468 	dev = dst->dev;
469 	*saddrp = fl6.saddr;
470 	*daddrp = rt6->rt6i_gateway;
471 
472 out:
473 	dst_release(dst);
474 	return dev;
475 }
476 
477 static int
478 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
479 				  struct mlxsw_sp_span_parms *sparmsp)
480 {
481 	struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
482 	bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
483 	union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
484 	union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
485 	bool inherit_ttl = !tparm.hop_limit;
486 	union mlxsw_sp_l3addr gw = daddr;
487 	struct net_device *l3edev;
488 
489 	if (!(to_dev->flags & IFF_UP) ||
490 	    /* Reject tunnels with GRE keys, checksums, etc. */
491 	    tparm.i_flags || tparm.o_flags ||
492 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
493 	    inherit_ttl || !inherit_tos ||
494 	    /* A destination address may not be "any". */
495 	    mlxsw_sp_l3addr_is_zero(daddr))
496 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
497 
498 	l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
499 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
500 						       tparm.hop_limit,
501 						       &nd_tbl, sparmsp);
502 }
503 
504 static int
505 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
506 				      struct mlxsw_sp_span_parms sparms)
507 {
508 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
509 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
510 	u8 local_port = dest_port->local_port;
511 	char mpat_pl[MLXSW_REG_MPAT_LEN];
512 	int pa_id = span_entry->id;
513 
514 	/* Create a new port analayzer entry for local_port. */
515 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
516 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
517 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
518 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
519 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
520 				    sparms.dmac, !!sparms.vid);
521 	mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
522 					      sparms.saddr.addr6,
523 					      sparms.daddr.addr6);
524 
525 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
526 }
527 
528 static void
529 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
530 {
531 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
532 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
533 }
534 
535 static const
536 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
537 	.can_handle = netif_is_ip6gretap,
538 	.parms_set = mlxsw_sp_span_entry_gretap6_parms,
539 	.configure = mlxsw_sp_span_entry_gretap6_configure,
540 	.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
541 };
542 #endif
543 
544 static bool
545 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
546 {
547 	return is_vlan_dev(dev) &&
548 	       mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
549 }
550 
551 static int
552 mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
553 			       struct mlxsw_sp_span_parms *sparmsp)
554 {
555 	struct net_device *real_dev;
556 	u16 vid;
557 
558 	if (!(to_dev->flags & IFF_UP))
559 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
560 
561 	real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
562 	sparmsp->dest_port = netdev_priv(real_dev);
563 	sparmsp->vid = vid;
564 	return 0;
565 }
566 
567 static int
568 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
569 				   struct mlxsw_sp_span_parms sparms)
570 {
571 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
572 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
573 	u8 local_port = dest_port->local_port;
574 	char mpat_pl[MLXSW_REG_MPAT_LEN];
575 	int pa_id = span_entry->id;
576 
577 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
578 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
579 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
580 
581 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
582 }
583 
584 static void
585 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
586 {
587 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
588 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
589 }
590 
591 static const
592 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
593 	.can_handle = mlxsw_sp_span_vlan_can_handle,
594 	.parms_set = mlxsw_sp_span_entry_vlan_parms,
595 	.configure = mlxsw_sp_span_entry_vlan_configure,
596 	.deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
597 };
598 
599 static const
600 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
601 	&mlxsw_sp_span_entry_ops_phys,
602 #if IS_ENABLED(CONFIG_NET_IPGRE)
603 	&mlxsw_sp_span_entry_ops_gretap4,
604 #endif
605 #if IS_ENABLED(CONFIG_IPV6_GRE)
606 	&mlxsw_sp_span_entry_ops_gretap6,
607 #endif
608 	&mlxsw_sp_span_entry_ops_vlan,
609 };
610 
611 static int
612 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
613 			      struct mlxsw_sp_span_parms *sparmsp)
614 {
615 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
616 }
617 
618 static int
619 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
620 				  struct mlxsw_sp_span_parms sparms)
621 {
622 	return 0;
623 }
624 
625 static void
626 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
627 {
628 }
629 
630 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
631 	.parms_set = mlxsw_sp_span_entry_nop_parms,
632 	.configure = mlxsw_sp_span_entry_nop_configure,
633 	.deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
634 };
635 
636 static void
637 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
638 			      struct mlxsw_sp_span_entry *span_entry,
639 			      struct mlxsw_sp_span_parms sparms)
640 {
641 	int err;
642 
643 	if (!sparms.dest_port)
644 		goto set_parms;
645 
646 	if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
647 		netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
648 			   sparms.dest_port->dev->name);
649 		sparms.dest_port = NULL;
650 		goto set_parms;
651 	}
652 
653 	err = span_entry->ops->configure(span_entry, sparms);
654 	if (err) {
655 		netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
656 			   sparms.dest_port->dev->name);
657 		sparms.dest_port = NULL;
658 		goto set_parms;
659 	}
660 
661 set_parms:
662 	span_entry->parms = sparms;
663 }
664 
665 static void
666 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
667 {
668 	if (span_entry->parms.dest_port)
669 		span_entry->ops->deconfigure(span_entry);
670 }
671 
672 static struct mlxsw_sp_span_entry *
673 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
674 			   const struct net_device *to_dev,
675 			   const struct mlxsw_sp_span_entry_ops *ops,
676 			   struct mlxsw_sp_span_parms sparms)
677 {
678 	struct mlxsw_sp_span_entry *span_entry = NULL;
679 	int i;
680 
681 	/* find a free entry to use */
682 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
683 		if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
684 			span_entry = &mlxsw_sp->span->entries[i];
685 			break;
686 		}
687 	}
688 	if (!span_entry)
689 		return NULL;
690 
691 	atomic_inc(&mlxsw_sp->span->active_entries_count);
692 	span_entry->ops = ops;
693 	refcount_set(&span_entry->ref_count, 1);
694 	span_entry->to_dev = to_dev;
695 	mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
696 
697 	return span_entry;
698 }
699 
700 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
701 					struct mlxsw_sp_span_entry *span_entry)
702 {
703 	mlxsw_sp_span_entry_deconfigure(span_entry);
704 	atomic_dec(&mlxsw_sp->span->active_entries_count);
705 }
706 
707 struct mlxsw_sp_span_entry *
708 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
709 				 const struct net_device *to_dev)
710 {
711 	int i;
712 
713 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
714 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
715 
716 		if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
717 			return curr;
718 	}
719 	return NULL;
720 }
721 
722 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
723 				    struct mlxsw_sp_span_entry *span_entry)
724 {
725 	mlxsw_sp_span_entry_deconfigure(span_entry);
726 	span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
727 }
728 
729 static struct mlxsw_sp_span_entry *
730 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
731 {
732 	int i;
733 
734 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
735 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
736 
737 		if (refcount_read(&curr->ref_count) && curr->id == span_id)
738 			return curr;
739 	}
740 	return NULL;
741 }
742 
743 static struct mlxsw_sp_span_entry *
744 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
745 			const struct net_device *to_dev,
746 			const struct mlxsw_sp_span_entry_ops *ops,
747 			struct mlxsw_sp_span_parms sparms)
748 {
749 	struct mlxsw_sp_span_entry *span_entry;
750 
751 	span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
752 	if (span_entry) {
753 		/* Already exists, just take a reference */
754 		refcount_inc(&span_entry->ref_count);
755 		return span_entry;
756 	}
757 
758 	return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
759 }
760 
761 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
762 				   struct mlxsw_sp_span_entry *span_entry)
763 {
764 	if (refcount_dec_and_test(&span_entry->ref_count))
765 		mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
766 	return 0;
767 }
768 
769 static int
770 mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
771 {
772 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
773 	char sbib_pl[MLXSW_REG_SBIB_LEN];
774 	u32 buffsize;
775 	u32 speed;
776 	int err;
777 
778 	err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
779 	if (err)
780 		return err;
781 	if (speed == SPEED_UNKNOWN)
782 		speed = 0;
783 
784 	buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
785 	buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
786 	mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
787 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
788 }
789 
790 static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
791 					      u8 local_port)
792 {
793 	char sbib_pl[MLXSW_REG_SBIB_LEN];
794 
795 	mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
796 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
797 }
798 
799 static struct mlxsw_sp_span_analyzed_port *
800 mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
801 				 bool ingress)
802 {
803 	struct mlxsw_sp_span_analyzed_port *analyzed_port;
804 
805 	list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
806 		if (analyzed_port->local_port == local_port &&
807 		    analyzed_port->ingress == ingress)
808 			return analyzed_port;
809 	}
810 
811 	return NULL;
812 }
813 
814 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
815 {
816 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
817 	int err = 0;
818 
819 	/* If port is egress mirrored, the shared buffer size should be
820 	 * updated according to the mtu value
821 	 */
822 	mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
823 
824 	if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
825 					     false))
826 		err = mlxsw_sp_span_port_buffer_update(port, mtu);
827 
828 	mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
829 
830 	return err;
831 }
832 
833 void mlxsw_sp_span_speed_update_work(struct work_struct *work)
834 {
835 	struct delayed_work *dwork = to_delayed_work(work);
836 	struct mlxsw_sp_port *mlxsw_sp_port;
837 	struct mlxsw_sp *mlxsw_sp;
838 
839 	mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
840 				     span.speed_update_dw);
841 
842 	/* If port is egress mirrored, the shared buffer size should be
843 	 * updated according to the speed value.
844 	 */
845 	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
846 	mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
847 
848 	if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
849 					     mlxsw_sp_port->local_port, false))
850 		mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
851 						 mlxsw_sp_port->dev->mtu);
852 
853 	mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
854 }
855 
856 static const struct mlxsw_sp_span_entry_ops *
857 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
858 			const struct net_device *to_dev)
859 {
860 	size_t i;
861 
862 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
863 		if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
864 			return mlxsw_sp_span_entry_types[i];
865 
866 	return NULL;
867 }
868 
869 static void mlxsw_sp_span_respin_work(struct work_struct *work)
870 {
871 	struct mlxsw_sp_span *span;
872 	struct mlxsw_sp *mlxsw_sp;
873 	int i, err;
874 
875 	span = container_of(work, struct mlxsw_sp_span, work);
876 	mlxsw_sp = span->mlxsw_sp;
877 
878 	rtnl_lock();
879 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
880 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
881 		struct mlxsw_sp_span_parms sparms = {NULL};
882 
883 		if (!refcount_read(&curr->ref_count))
884 			continue;
885 
886 		err = curr->ops->parms_set(curr->to_dev, &sparms);
887 		if (err)
888 			continue;
889 
890 		if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
891 			mlxsw_sp_span_entry_deconfigure(curr);
892 			mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
893 		}
894 	}
895 	rtnl_unlock();
896 }
897 
898 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
899 {
900 	if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
901 		return;
902 	mlxsw_core_schedule_work(&mlxsw_sp->span->work);
903 }
904 
905 int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
906 			    const struct net_device *to_dev, int *p_span_id)
907 {
908 	const struct mlxsw_sp_span_entry_ops *ops;
909 	struct mlxsw_sp_span_entry *span_entry;
910 	struct mlxsw_sp_span_parms sparms;
911 	int err;
912 
913 	ASSERT_RTNL();
914 
915 	ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
916 	if (!ops) {
917 		dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
918 		return -EOPNOTSUPP;
919 	}
920 
921 	memset(&sparms, 0, sizeof(sparms));
922 	err = ops->parms_set(to_dev, &sparms);
923 	if (err)
924 		return err;
925 
926 	span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
927 	if (!span_entry)
928 		return -ENOBUFS;
929 
930 	*p_span_id = span_entry->id;
931 
932 	return 0;
933 }
934 
935 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
936 {
937 	struct mlxsw_sp_span_entry *span_entry;
938 
939 	ASSERT_RTNL();
940 
941 	span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
942 	if (WARN_ON_ONCE(!span_entry))
943 		return;
944 
945 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
946 }
947 
948 static struct mlxsw_sp_span_analyzed_port *
949 mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
950 				   struct mlxsw_sp_port *mlxsw_sp_port,
951 				   bool ingress)
952 {
953 	struct mlxsw_sp_span_analyzed_port *analyzed_port;
954 	int err;
955 
956 	analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
957 	if (!analyzed_port)
958 		return ERR_PTR(-ENOMEM);
959 
960 	refcount_set(&analyzed_port->ref_count, 1);
961 	analyzed_port->local_port = mlxsw_sp_port->local_port;
962 	analyzed_port->ingress = ingress;
963 	list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
964 
965 	/* An egress mirror buffer should be allocated on the egress port which
966 	 * does the mirroring.
967 	 */
968 	if (!ingress) {
969 		u16 mtu = mlxsw_sp_port->dev->mtu;
970 
971 		err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
972 		if (err)
973 			goto err_buffer_update;
974 	}
975 
976 	return analyzed_port;
977 
978 err_buffer_update:
979 	list_del(&analyzed_port->list);
980 	kfree(analyzed_port);
981 	return ERR_PTR(err);
982 }
983 
984 static void
985 mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
986 				    struct mlxsw_sp_span_analyzed_port *
987 				    analyzed_port)
988 {
989 	struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
990 
991 	/* Remove egress mirror buffer now that port is no longer analyzed
992 	 * at egress.
993 	 */
994 	if (!analyzed_port->ingress)
995 		mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
996 						  analyzed_port->local_port);
997 
998 	list_del(&analyzed_port->list);
999 	kfree(analyzed_port);
1000 }
1001 
1002 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
1003 				    bool ingress)
1004 {
1005 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1006 	struct mlxsw_sp_span_analyzed_port *analyzed_port;
1007 	u8 local_port = mlxsw_sp_port->local_port;
1008 	int err = 0;
1009 
1010 	mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1011 
1012 	analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1013 							 local_port, ingress);
1014 	if (analyzed_port) {
1015 		refcount_inc(&analyzed_port->ref_count);
1016 		goto out_unlock;
1017 	}
1018 
1019 	analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
1020 							   mlxsw_sp_port,
1021 							   ingress);
1022 	if (IS_ERR(analyzed_port))
1023 		err = PTR_ERR(analyzed_port);
1024 
1025 out_unlock:
1026 	mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1027 	return err;
1028 }
1029 
1030 void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
1031 				     bool ingress)
1032 {
1033 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1034 	struct mlxsw_sp_span_analyzed_port *analyzed_port;
1035 	u8 local_port = mlxsw_sp_port->local_port;
1036 
1037 	mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1038 
1039 	analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1040 							 local_port, ingress);
1041 	if (WARN_ON_ONCE(!analyzed_port))
1042 		goto out_unlock;
1043 
1044 	if (!refcount_dec_and_test(&analyzed_port->ref_count))
1045 		goto out_unlock;
1046 
1047 	mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
1048 
1049 out_unlock:
1050 	mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1051 }
1052 
1053 static int
1054 __mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
1055 				   struct mlxsw_sp_span_trigger_entry *
1056 				   trigger_entry, bool enable)
1057 {
1058 	char mpar_pl[MLXSW_REG_MPAR_LEN];
1059 	enum mlxsw_reg_mpar_i_e i_e;
1060 
1061 	switch (trigger_entry->trigger) {
1062 	case MLXSW_SP_SPAN_TRIGGER_INGRESS:
1063 		i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
1064 		break;
1065 	case MLXSW_SP_SPAN_TRIGGER_EGRESS:
1066 		i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
1067 		break;
1068 	default:
1069 		WARN_ON_ONCE(1);
1070 		return -EINVAL;
1071 	}
1072 
1073 	mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
1074 			    trigger_entry->parms.span_id);
1075 	return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
1076 }
1077 
1078 static int
1079 mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
1080 				 struct mlxsw_sp_span_trigger_entry *
1081 				 trigger_entry)
1082 {
1083 	return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true);
1084 }
1085 
1086 static void
1087 mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span,
1088 				   struct mlxsw_sp_span_trigger_entry *
1089 				   trigger_entry)
1090 {
1091 	__mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false);
1092 }
1093 
1094 static struct mlxsw_sp_span_trigger_entry *
1095 mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
1096 				   enum mlxsw_sp_span_trigger trigger,
1097 				   struct mlxsw_sp_port *mlxsw_sp_port,
1098 				   const struct mlxsw_sp_span_trigger_parms
1099 				   *parms)
1100 {
1101 	struct mlxsw_sp_span_trigger_entry *trigger_entry;
1102 	int err;
1103 
1104 	trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
1105 	if (!trigger_entry)
1106 		return ERR_PTR(-ENOMEM);
1107 
1108 	refcount_set(&trigger_entry->ref_count, 1);
1109 	trigger_entry->local_port = mlxsw_sp_port->local_port;
1110 	trigger_entry->trigger = trigger;
1111 	memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
1112 	list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
1113 
1114 	err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry);
1115 	if (err)
1116 		goto err_trigger_entry_bind;
1117 
1118 	return trigger_entry;
1119 
1120 err_trigger_entry_bind:
1121 	list_del(&trigger_entry->list);
1122 	kfree(trigger_entry);
1123 	return ERR_PTR(err);
1124 }
1125 
1126 static void
1127 mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
1128 				    struct mlxsw_sp_span_trigger_entry *
1129 				    trigger_entry)
1130 {
1131 	mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry);
1132 	list_del(&trigger_entry->list);
1133 	kfree(trigger_entry);
1134 }
1135 
1136 static struct mlxsw_sp_span_trigger_entry *
1137 mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
1138 				 enum mlxsw_sp_span_trigger trigger,
1139 				 struct mlxsw_sp_port *mlxsw_sp_port)
1140 {
1141 	struct mlxsw_sp_span_trigger_entry *trigger_entry;
1142 
1143 	list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
1144 		if (trigger_entry->trigger == trigger &&
1145 		    trigger_entry->local_port == mlxsw_sp_port->local_port)
1146 			return trigger_entry;
1147 	}
1148 
1149 	return NULL;
1150 }
1151 
1152 int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
1153 			     enum mlxsw_sp_span_trigger trigger,
1154 			     struct mlxsw_sp_port *mlxsw_sp_port,
1155 			     const struct mlxsw_sp_span_trigger_parms *parms)
1156 {
1157 	struct mlxsw_sp_span_trigger_entry *trigger_entry;
1158 	int err = 0;
1159 
1160 	ASSERT_RTNL();
1161 
1162 	if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
1163 		return -EINVAL;
1164 
1165 	trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1166 							 trigger,
1167 							 mlxsw_sp_port);
1168 	if (trigger_entry) {
1169 		if (trigger_entry->parms.span_id != parms->span_id)
1170 			return -EINVAL;
1171 		refcount_inc(&trigger_entry->ref_count);
1172 		goto out;
1173 	}
1174 
1175 	trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
1176 							   trigger,
1177 							   mlxsw_sp_port,
1178 							   parms);
1179 	if (IS_ERR(trigger_entry))
1180 		err = PTR_ERR(trigger_entry);
1181 
1182 out:
1183 	return err;
1184 }
1185 
1186 void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
1187 				enum mlxsw_sp_span_trigger trigger,
1188 				struct mlxsw_sp_port *mlxsw_sp_port,
1189 				const struct mlxsw_sp_span_trigger_parms *parms)
1190 {
1191 	struct mlxsw_sp_span_trigger_entry *trigger_entry;
1192 
1193 	ASSERT_RTNL();
1194 
1195 	if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
1196 							 parms->span_id)))
1197 		return;
1198 
1199 	trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1200 							 trigger,
1201 							 mlxsw_sp_port);
1202 	if (WARN_ON_ONCE(!trigger_entry))
1203 		return;
1204 
1205 	if (!refcount_dec_and_test(&trigger_entry->ref_count))
1206 		return;
1207 
1208 	mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
1209 }
1210