1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/if_bridge.h>
5 #include <linux/list.h>
6 #include <linux/mutex.h>
7 #include <linux/refcount.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/workqueue.h>
10 #include <net/arp.h>
11 #include <net/gre.h>
12 #include <net/lag.h>
13 #include <net/ndisc.h>
14 #include <net/ip6_tunnel.h>
15 
16 #include "spectrum.h"
17 #include "spectrum_ipip.h"
18 #include "spectrum_span.h"
19 #include "spectrum_switchdev.h"
20 
21 struct mlxsw_sp_span {
22 	struct work_struct work;
23 	struct mlxsw_sp *mlxsw_sp;
24 	struct list_head analyzed_ports_list;
25 	struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
26 	struct list_head trigger_entries_list;
27 	atomic_t active_entries_count;
28 	int entries_count;
29 	struct mlxsw_sp_span_entry entries[];
30 };
31 
32 struct mlxsw_sp_span_analyzed_port {
33 	struct list_head list; /* Member of analyzed_ports_list */
34 	refcount_t ref_count;
35 	u8 local_port;
36 	bool ingress;
37 };
38 
39 struct mlxsw_sp_span_trigger_entry {
40 	struct list_head list; /* Member of trigger_entries_list */
41 	refcount_t ref_count;
42 	u8 local_port;
43 	enum mlxsw_sp_span_trigger trigger;
44 	struct mlxsw_sp_span_trigger_parms parms;
45 };
46 
47 static void mlxsw_sp_span_respin_work(struct work_struct *work);
48 
49 static u64 mlxsw_sp_span_occ_get(void *priv)
50 {
51 	const struct mlxsw_sp *mlxsw_sp = priv;
52 
53 	return atomic_read(&mlxsw_sp->span->active_entries_count);
54 }
55 
56 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
57 {
58 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
59 	struct mlxsw_sp_span *span;
60 	int i, entries_count;
61 
62 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
63 		return -EIO;
64 
65 	entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
66 	span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
67 	if (!span)
68 		return -ENOMEM;
69 	span->entries_count = entries_count;
70 	atomic_set(&span->active_entries_count, 0);
71 	mutex_init(&span->analyzed_ports_lock);
72 	INIT_LIST_HEAD(&span->analyzed_ports_list);
73 	INIT_LIST_HEAD(&span->trigger_entries_list);
74 	span->mlxsw_sp = mlxsw_sp;
75 	mlxsw_sp->span = span;
76 
77 	for (i = 0; i < mlxsw_sp->span->entries_count; i++)
78 		mlxsw_sp->span->entries[i].id = i;
79 
80 	devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
81 					  mlxsw_sp_span_occ_get, mlxsw_sp);
82 	INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
83 
84 	return 0;
85 }
86 
87 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
88 {
89 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
90 
91 	cancel_work_sync(&mlxsw_sp->span->work);
92 	devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
93 
94 	WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
95 	WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
96 	mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
97 	kfree(mlxsw_sp->span);
98 }
99 
100 static int
101 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
102 			       struct mlxsw_sp_span_parms *sparmsp)
103 {
104 	sparmsp->dest_port = netdev_priv(to_dev);
105 	return 0;
106 }
107 
108 static int
109 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
110 				   struct mlxsw_sp_span_parms sparms)
111 {
112 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
113 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
114 	u8 local_port = dest_port->local_port;
115 	char mpat_pl[MLXSW_REG_MPAT_LEN];
116 	int pa_id = span_entry->id;
117 
118 	/* Create a new port analayzer entry for local_port. */
119 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
120 			    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
121 
122 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
123 }
124 
125 static void
126 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
127 				       enum mlxsw_reg_mpat_span_type span_type)
128 {
129 	struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
130 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
131 	u8 local_port = dest_port->local_port;
132 	char mpat_pl[MLXSW_REG_MPAT_LEN];
133 	int pa_id = span_entry->id;
134 
135 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
136 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
137 }
138 
139 static void
140 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
141 {
142 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
143 					    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
144 }
145 
146 static const
147 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
148 	.can_handle = mlxsw_sp_port_dev_check,
149 	.parms_set = mlxsw_sp_span_entry_phys_parms,
150 	.configure = mlxsw_sp_span_entry_phys_configure,
151 	.deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
152 };
153 
154 static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
155 			      const void *pkey,
156 			      struct net_device *dev,
157 			      unsigned char dmac[ETH_ALEN])
158 {
159 	struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
160 	int err = 0;
161 
162 	if (!neigh) {
163 		neigh = neigh_create(tbl, pkey, dev);
164 		if (IS_ERR(neigh))
165 			return PTR_ERR(neigh);
166 	}
167 
168 	neigh_event_send(neigh, NULL);
169 
170 	read_lock_bh(&neigh->lock);
171 	if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
172 		memcpy(dmac, neigh->ha, ETH_ALEN);
173 	else
174 		err = -ENOENT;
175 	read_unlock_bh(&neigh->lock);
176 
177 	neigh_release(neigh);
178 	return err;
179 }
180 
181 static int
182 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
183 {
184 	sparmsp->dest_port = NULL;
185 	return 0;
186 }
187 
188 static struct net_device *
189 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
190 				 unsigned char *dmac,
191 				 u16 *p_vid)
192 {
193 	struct bridge_vlan_info vinfo;
194 	struct net_device *edev;
195 	u16 vid = *p_vid;
196 
197 	if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
198 		return NULL;
199 	if (!vid ||
200 	    br_vlan_get_info(br_dev, vid, &vinfo) ||
201 	    !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
202 		return NULL;
203 
204 	edev = br_fdb_find_port(br_dev, dmac, vid);
205 	if (!edev)
206 		return NULL;
207 
208 	if (br_vlan_get_info(edev, vid, &vinfo))
209 		return NULL;
210 	if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
211 		*p_vid = 0;
212 	else
213 		*p_vid = vid;
214 	return edev;
215 }
216 
217 static struct net_device *
218 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
219 				 unsigned char *dmac)
220 {
221 	return br_fdb_find_port(br_dev, dmac, 0);
222 }
223 
224 static struct net_device *
225 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
226 			   unsigned char dmac[ETH_ALEN],
227 			   u16 *p_vid)
228 {
229 	struct mlxsw_sp_bridge_port *bridge_port;
230 	enum mlxsw_reg_spms_state spms_state;
231 	struct net_device *dev = NULL;
232 	struct mlxsw_sp_port *port;
233 	u8 stp_state;
234 
235 	if (br_vlan_enabled(br_dev))
236 		dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
237 	else if (!*p_vid)
238 		dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
239 	if (!dev)
240 		return NULL;
241 
242 	port = mlxsw_sp_port_dev_lower_find(dev);
243 	if (!port)
244 		return NULL;
245 
246 	bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
247 	if (!bridge_port)
248 		return NULL;
249 
250 	stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
251 	spms_state = mlxsw_sp_stp_spms_state(stp_state);
252 	if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
253 		return NULL;
254 
255 	return dev;
256 }
257 
258 static struct net_device *
259 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
260 			 u16 *p_vid)
261 {
262 	*p_vid = vlan_dev_vlan_id(vlan_dev);
263 	return vlan_dev_real_dev(vlan_dev);
264 }
265 
266 static struct net_device *
267 mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
268 {
269 	struct net_device *dev;
270 	struct list_head *iter;
271 
272 	netdev_for_each_lower_dev(lag_dev, dev, iter)
273 		if (netif_carrier_ok(dev) &&
274 		    net_lag_port_dev_txable(dev) &&
275 		    mlxsw_sp_port_dev_check(dev))
276 			return dev;
277 
278 	return NULL;
279 }
280 
281 static __maybe_unused int
282 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
283 					union mlxsw_sp_l3addr saddr,
284 					union mlxsw_sp_l3addr daddr,
285 					union mlxsw_sp_l3addr gw,
286 					__u8 ttl,
287 					struct neigh_table *tbl,
288 					struct mlxsw_sp_span_parms *sparmsp)
289 {
290 	unsigned char dmac[ETH_ALEN];
291 	u16 vid = 0;
292 
293 	if (mlxsw_sp_l3addr_is_zero(gw))
294 		gw = daddr;
295 
296 	if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
297 		goto unoffloadable;
298 
299 	if (is_vlan_dev(edev))
300 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
301 
302 	if (netif_is_bridge_master(edev)) {
303 		edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
304 		if (!edev)
305 			goto unoffloadable;
306 	}
307 
308 	if (is_vlan_dev(edev)) {
309 		if (vid || !(edev->flags & IFF_UP))
310 			goto unoffloadable;
311 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
312 	}
313 
314 	if (netif_is_lag_master(edev)) {
315 		if (!(edev->flags & IFF_UP))
316 			goto unoffloadable;
317 		edev = mlxsw_sp_span_entry_lag(edev);
318 		if (!edev)
319 			goto unoffloadable;
320 	}
321 
322 	if (!mlxsw_sp_port_dev_check(edev))
323 		goto unoffloadable;
324 
325 	sparmsp->dest_port = netdev_priv(edev);
326 	sparmsp->ttl = ttl;
327 	memcpy(sparmsp->dmac, dmac, ETH_ALEN);
328 	memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
329 	sparmsp->saddr = saddr;
330 	sparmsp->daddr = daddr;
331 	sparmsp->vid = vid;
332 	return 0;
333 
334 unoffloadable:
335 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
336 }
337 
338 #if IS_ENABLED(CONFIG_NET_IPGRE)
339 static struct net_device *
340 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
341 			    __be32 *saddrp, __be32 *daddrp)
342 {
343 	struct ip_tunnel *tun = netdev_priv(to_dev);
344 	struct net_device *dev = NULL;
345 	struct ip_tunnel_parm parms;
346 	struct rtable *rt = NULL;
347 	struct flowi4 fl4;
348 
349 	/* We assume "dev" stays valid after rt is put. */
350 	ASSERT_RTNL();
351 
352 	parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
353 	ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
354 			    0, 0, parms.link, tun->fwmark, 0);
355 
356 	rt = ip_route_output_key(tun->net, &fl4);
357 	if (IS_ERR(rt))
358 		return NULL;
359 
360 	if (rt->rt_type != RTN_UNICAST)
361 		goto out;
362 
363 	dev = rt->dst.dev;
364 	*saddrp = fl4.saddr;
365 	if (rt->rt_gw_family == AF_INET)
366 		*daddrp = rt->rt_gw4;
367 	/* can not offload if route has an IPv6 gateway */
368 	else if (rt->rt_gw_family == AF_INET6)
369 		dev = NULL;
370 
371 out:
372 	ip_rt_put(rt);
373 	return dev;
374 }
375 
376 static int
377 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
378 				  struct mlxsw_sp_span_parms *sparmsp)
379 {
380 	struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
381 	union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
382 	union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
383 	bool inherit_tos = tparm.iph.tos & 0x1;
384 	bool inherit_ttl = !tparm.iph.ttl;
385 	union mlxsw_sp_l3addr gw = daddr;
386 	struct net_device *l3edev;
387 
388 	if (!(to_dev->flags & IFF_UP) ||
389 	    /* Reject tunnels with GRE keys, checksums, etc. */
390 	    tparm.i_flags || tparm.o_flags ||
391 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
392 	    inherit_ttl || !inherit_tos ||
393 	    /* A destination address may not be "any". */
394 	    mlxsw_sp_l3addr_is_zero(daddr))
395 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
396 
397 	l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
398 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
399 						       tparm.iph.ttl,
400 						       &arp_tbl, sparmsp);
401 }
402 
403 static int
404 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
405 				      struct mlxsw_sp_span_parms sparms)
406 {
407 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
408 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
409 	u8 local_port = dest_port->local_port;
410 	char mpat_pl[MLXSW_REG_MPAT_LEN];
411 	int pa_id = span_entry->id;
412 
413 	/* Create a new port analayzer entry for local_port. */
414 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
415 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
416 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
417 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
418 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
419 				    sparms.dmac, !!sparms.vid);
420 	mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
421 					      sparms.ttl, sparms.smac,
422 					      be32_to_cpu(sparms.saddr.addr4),
423 					      be32_to_cpu(sparms.daddr.addr4));
424 
425 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
426 }
427 
428 static void
429 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
430 {
431 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
432 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
433 }
434 
435 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
436 	.can_handle = netif_is_gretap,
437 	.parms_set = mlxsw_sp_span_entry_gretap4_parms,
438 	.configure = mlxsw_sp_span_entry_gretap4_configure,
439 	.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
440 };
441 #endif
442 
443 #if IS_ENABLED(CONFIG_IPV6_GRE)
444 static struct net_device *
445 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
446 			    struct in6_addr *saddrp,
447 			    struct in6_addr *daddrp)
448 {
449 	struct ip6_tnl *t = netdev_priv(to_dev);
450 	struct flowi6 fl6 = t->fl.u.ip6;
451 	struct net_device *dev = NULL;
452 	struct dst_entry *dst;
453 	struct rt6_info *rt6;
454 
455 	/* We assume "dev" stays valid after dst is released. */
456 	ASSERT_RTNL();
457 
458 	fl6.flowi6_mark = t->parms.fwmark;
459 	if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
460 		return NULL;
461 
462 	dst = ip6_route_output(t->net, NULL, &fl6);
463 	if (!dst || dst->error)
464 		goto out;
465 
466 	rt6 = container_of(dst, struct rt6_info, dst);
467 
468 	dev = dst->dev;
469 	*saddrp = fl6.saddr;
470 	*daddrp = rt6->rt6i_gateway;
471 
472 out:
473 	dst_release(dst);
474 	return dev;
475 }
476 
477 static int
478 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
479 				  struct mlxsw_sp_span_parms *sparmsp)
480 {
481 	struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
482 	bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
483 	union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
484 	union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
485 	bool inherit_ttl = !tparm.hop_limit;
486 	union mlxsw_sp_l3addr gw = daddr;
487 	struct net_device *l3edev;
488 
489 	if (!(to_dev->flags & IFF_UP) ||
490 	    /* Reject tunnels with GRE keys, checksums, etc. */
491 	    tparm.i_flags || tparm.o_flags ||
492 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
493 	    inherit_ttl || !inherit_tos ||
494 	    /* A destination address may not be "any". */
495 	    mlxsw_sp_l3addr_is_zero(daddr))
496 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
497 
498 	l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
499 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
500 						       tparm.hop_limit,
501 						       &nd_tbl, sparmsp);
502 }
503 
504 static int
505 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
506 				      struct mlxsw_sp_span_parms sparms)
507 {
508 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
509 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
510 	u8 local_port = dest_port->local_port;
511 	char mpat_pl[MLXSW_REG_MPAT_LEN];
512 	int pa_id = span_entry->id;
513 
514 	/* Create a new port analayzer entry for local_port. */
515 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
516 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
517 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
518 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
519 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
520 				    sparms.dmac, !!sparms.vid);
521 	mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
522 					      sparms.saddr.addr6,
523 					      sparms.daddr.addr6);
524 
525 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
526 }
527 
528 static void
529 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
530 {
531 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
532 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
533 }
534 
535 static const
536 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
537 	.can_handle = netif_is_ip6gretap,
538 	.parms_set = mlxsw_sp_span_entry_gretap6_parms,
539 	.configure = mlxsw_sp_span_entry_gretap6_configure,
540 	.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
541 };
542 #endif
543 
544 static bool
545 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
546 {
547 	return is_vlan_dev(dev) &&
548 	       mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
549 }
550 
551 static int
552 mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
553 			       struct mlxsw_sp_span_parms *sparmsp)
554 {
555 	struct net_device *real_dev;
556 	u16 vid;
557 
558 	if (!(to_dev->flags & IFF_UP))
559 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
560 
561 	real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
562 	sparmsp->dest_port = netdev_priv(real_dev);
563 	sparmsp->vid = vid;
564 	return 0;
565 }
566 
567 static int
568 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
569 				   struct mlxsw_sp_span_parms sparms)
570 {
571 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
572 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
573 	u8 local_port = dest_port->local_port;
574 	char mpat_pl[MLXSW_REG_MPAT_LEN];
575 	int pa_id = span_entry->id;
576 
577 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
578 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
579 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
580 
581 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
582 }
583 
584 static void
585 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
586 {
587 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
588 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
589 }
590 
591 static const
592 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
593 	.can_handle = mlxsw_sp_span_vlan_can_handle,
594 	.parms_set = mlxsw_sp_span_entry_vlan_parms,
595 	.configure = mlxsw_sp_span_entry_vlan_configure,
596 	.deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
597 };
598 
599 static const
600 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
601 	&mlxsw_sp_span_entry_ops_phys,
602 #if IS_ENABLED(CONFIG_NET_IPGRE)
603 	&mlxsw_sp_span_entry_ops_gretap4,
604 #endif
605 #if IS_ENABLED(CONFIG_IPV6_GRE)
606 	&mlxsw_sp_span_entry_ops_gretap6,
607 #endif
608 	&mlxsw_sp_span_entry_ops_vlan,
609 };
610 
611 static int
612 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
613 			      struct mlxsw_sp_span_parms *sparmsp)
614 {
615 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
616 }
617 
618 static int
619 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
620 				  struct mlxsw_sp_span_parms sparms)
621 {
622 	return 0;
623 }
624 
625 static void
626 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
627 {
628 }
629 
630 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
631 	.parms_set = mlxsw_sp_span_entry_nop_parms,
632 	.configure = mlxsw_sp_span_entry_nop_configure,
633 	.deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
634 };
635 
636 static void
637 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
638 			      struct mlxsw_sp_span_entry *span_entry,
639 			      struct mlxsw_sp_span_parms sparms)
640 {
641 	int err;
642 
643 	if (!sparms.dest_port)
644 		goto set_parms;
645 
646 	if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
647 		netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
648 			   sparms.dest_port->dev->name);
649 		sparms.dest_port = NULL;
650 		goto set_parms;
651 	}
652 
653 	err = span_entry->ops->configure(span_entry, sparms);
654 	if (err) {
655 		netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
656 			   sparms.dest_port->dev->name);
657 		sparms.dest_port = NULL;
658 		goto set_parms;
659 	}
660 
661 set_parms:
662 	span_entry->parms = sparms;
663 }
664 
665 static void
666 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
667 {
668 	if (span_entry->parms.dest_port)
669 		span_entry->ops->deconfigure(span_entry);
670 }
671 
672 static struct mlxsw_sp_span_entry *
673 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
674 			   const struct net_device *to_dev,
675 			   const struct mlxsw_sp_span_entry_ops *ops,
676 			   struct mlxsw_sp_span_parms sparms)
677 {
678 	struct mlxsw_sp_span_entry *span_entry = NULL;
679 	int i;
680 
681 	/* find a free entry to use */
682 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
683 		if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
684 			span_entry = &mlxsw_sp->span->entries[i];
685 			break;
686 		}
687 	}
688 	if (!span_entry)
689 		return NULL;
690 
691 	atomic_inc(&mlxsw_sp->span->active_entries_count);
692 	span_entry->ops = ops;
693 	refcount_set(&span_entry->ref_count, 1);
694 	span_entry->to_dev = to_dev;
695 	mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
696 
697 	return span_entry;
698 }
699 
700 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
701 					struct mlxsw_sp_span_entry *span_entry)
702 {
703 	mlxsw_sp_span_entry_deconfigure(span_entry);
704 	atomic_dec(&mlxsw_sp->span->active_entries_count);
705 }
706 
707 struct mlxsw_sp_span_entry *
708 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
709 				 const struct net_device *to_dev)
710 {
711 	int i;
712 
713 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
714 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
715 
716 		if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
717 			return curr;
718 	}
719 	return NULL;
720 }
721 
722 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
723 				    struct mlxsw_sp_span_entry *span_entry)
724 {
725 	mlxsw_sp_span_entry_deconfigure(span_entry);
726 	span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
727 }
728 
729 static struct mlxsw_sp_span_entry *
730 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
731 {
732 	int i;
733 
734 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
735 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
736 
737 		if (refcount_read(&curr->ref_count) && curr->id == span_id)
738 			return curr;
739 	}
740 	return NULL;
741 }
742 
743 static struct mlxsw_sp_span_entry *
744 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
745 			const struct net_device *to_dev,
746 			const struct mlxsw_sp_span_entry_ops *ops,
747 			struct mlxsw_sp_span_parms sparms)
748 {
749 	struct mlxsw_sp_span_entry *span_entry;
750 
751 	span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
752 	if (span_entry) {
753 		/* Already exists, just take a reference */
754 		refcount_inc(&span_entry->ref_count);
755 		return span_entry;
756 	}
757 
758 	return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
759 }
760 
761 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
762 				   struct mlxsw_sp_span_entry *span_entry)
763 {
764 	if (refcount_dec_and_test(&span_entry->ref_count))
765 		mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
766 	return 0;
767 }
768 
769 static int
770 mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
771 {
772 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
773 	char sbib_pl[MLXSW_REG_SBIB_LEN];
774 	u32 buffsize;
775 	u32 speed;
776 	int err;
777 
778 	err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
779 	if (err)
780 		return err;
781 	if (speed == SPEED_UNKNOWN)
782 		speed = 0;
783 
784 	buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
785 	mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
786 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
787 }
788 
789 static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
790 					      u8 local_port)
791 {
792 	char sbib_pl[MLXSW_REG_SBIB_LEN];
793 
794 	mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
795 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
796 }
797 
798 static struct mlxsw_sp_span_analyzed_port *
799 mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
800 				 bool ingress)
801 {
802 	struct mlxsw_sp_span_analyzed_port *analyzed_port;
803 
804 	list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
805 		if (analyzed_port->local_port == local_port &&
806 		    analyzed_port->ingress == ingress)
807 			return analyzed_port;
808 	}
809 
810 	return NULL;
811 }
812 
813 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
814 {
815 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
816 	int err = 0;
817 
818 	/* If port is egress mirrored, the shared buffer size should be
819 	 * updated according to the mtu value
820 	 */
821 	mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
822 
823 	if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
824 					     false))
825 		err = mlxsw_sp_span_port_buffer_update(port, mtu);
826 
827 	mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
828 
829 	return err;
830 }
831 
832 void mlxsw_sp_span_speed_update_work(struct work_struct *work)
833 {
834 	struct delayed_work *dwork = to_delayed_work(work);
835 	struct mlxsw_sp_port *mlxsw_sp_port;
836 	struct mlxsw_sp *mlxsw_sp;
837 
838 	mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
839 				     span.speed_update_dw);
840 
841 	/* If port is egress mirrored, the shared buffer size should be
842 	 * updated according to the speed value.
843 	 */
844 	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
845 	mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
846 
847 	if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
848 					     mlxsw_sp_port->local_port, false))
849 		mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
850 						 mlxsw_sp_port->dev->mtu);
851 
852 	mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
853 }
854 
855 static const struct mlxsw_sp_span_entry_ops *
856 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
857 			const struct net_device *to_dev)
858 {
859 	size_t i;
860 
861 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
862 		if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
863 			return mlxsw_sp_span_entry_types[i];
864 
865 	return NULL;
866 }
867 
868 static void mlxsw_sp_span_respin_work(struct work_struct *work)
869 {
870 	struct mlxsw_sp_span *span;
871 	struct mlxsw_sp *mlxsw_sp;
872 	int i, err;
873 
874 	span = container_of(work, struct mlxsw_sp_span, work);
875 	mlxsw_sp = span->mlxsw_sp;
876 
877 	rtnl_lock();
878 	for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
879 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
880 		struct mlxsw_sp_span_parms sparms = {NULL};
881 
882 		if (!refcount_read(&curr->ref_count))
883 			continue;
884 
885 		err = curr->ops->parms_set(curr->to_dev, &sparms);
886 		if (err)
887 			continue;
888 
889 		if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
890 			mlxsw_sp_span_entry_deconfigure(curr);
891 			mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
892 		}
893 	}
894 	rtnl_unlock();
895 }
896 
897 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
898 {
899 	if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
900 		return;
901 	mlxsw_core_schedule_work(&mlxsw_sp->span->work);
902 }
903 
904 int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
905 			    const struct net_device *to_dev, int *p_span_id)
906 {
907 	const struct mlxsw_sp_span_entry_ops *ops;
908 	struct mlxsw_sp_span_entry *span_entry;
909 	struct mlxsw_sp_span_parms sparms;
910 	int err;
911 
912 	ASSERT_RTNL();
913 
914 	ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
915 	if (!ops) {
916 		dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
917 		return -EOPNOTSUPP;
918 	}
919 
920 	memset(&sparms, 0, sizeof(sparms));
921 	err = ops->parms_set(to_dev, &sparms);
922 	if (err)
923 		return err;
924 
925 	span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
926 	if (!span_entry)
927 		return -ENOBUFS;
928 
929 	*p_span_id = span_entry->id;
930 
931 	return 0;
932 }
933 
934 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
935 {
936 	struct mlxsw_sp_span_entry *span_entry;
937 
938 	ASSERT_RTNL();
939 
940 	span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
941 	if (WARN_ON_ONCE(!span_entry))
942 		return;
943 
944 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
945 }
946 
947 static struct mlxsw_sp_span_analyzed_port *
948 mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
949 				   struct mlxsw_sp_port *mlxsw_sp_port,
950 				   bool ingress)
951 {
952 	struct mlxsw_sp_span_analyzed_port *analyzed_port;
953 	int err;
954 
955 	analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
956 	if (!analyzed_port)
957 		return ERR_PTR(-ENOMEM);
958 
959 	refcount_set(&analyzed_port->ref_count, 1);
960 	analyzed_port->local_port = mlxsw_sp_port->local_port;
961 	analyzed_port->ingress = ingress;
962 	list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
963 
964 	/* An egress mirror buffer should be allocated on the egress port which
965 	 * does the mirroring.
966 	 */
967 	if (!ingress) {
968 		u16 mtu = mlxsw_sp_port->dev->mtu;
969 
970 		err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
971 		if (err)
972 			goto err_buffer_update;
973 	}
974 
975 	return analyzed_port;
976 
977 err_buffer_update:
978 	list_del(&analyzed_port->list);
979 	kfree(analyzed_port);
980 	return ERR_PTR(err);
981 }
982 
983 static void
984 mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
985 				    struct mlxsw_sp_span_analyzed_port *
986 				    analyzed_port)
987 {
988 	struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
989 
990 	/* Remove egress mirror buffer now that port is no longer analyzed
991 	 * at egress.
992 	 */
993 	if (!analyzed_port->ingress)
994 		mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
995 						  analyzed_port->local_port);
996 
997 	list_del(&analyzed_port->list);
998 	kfree(analyzed_port);
999 }
1000 
1001 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
1002 				    bool ingress)
1003 {
1004 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1005 	struct mlxsw_sp_span_analyzed_port *analyzed_port;
1006 	u8 local_port = mlxsw_sp_port->local_port;
1007 	int err = 0;
1008 
1009 	mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1010 
1011 	analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1012 							 local_port, ingress);
1013 	if (analyzed_port) {
1014 		refcount_inc(&analyzed_port->ref_count);
1015 		goto out_unlock;
1016 	}
1017 
1018 	analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
1019 							   mlxsw_sp_port,
1020 							   ingress);
1021 	if (IS_ERR(analyzed_port))
1022 		err = PTR_ERR(analyzed_port);
1023 
1024 out_unlock:
1025 	mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1026 	return err;
1027 }
1028 
1029 void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
1030 				     bool ingress)
1031 {
1032 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1033 	struct mlxsw_sp_span_analyzed_port *analyzed_port;
1034 	u8 local_port = mlxsw_sp_port->local_port;
1035 
1036 	mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1037 
1038 	analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1039 							 local_port, ingress);
1040 	if (WARN_ON_ONCE(!analyzed_port))
1041 		goto out_unlock;
1042 
1043 	if (!refcount_dec_and_test(&analyzed_port->ref_count))
1044 		goto out_unlock;
1045 
1046 	mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
1047 
1048 out_unlock:
1049 	mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1050 }
1051 
1052 static int
1053 __mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
1054 				   struct mlxsw_sp_span_trigger_entry *
1055 				   trigger_entry, bool enable)
1056 {
1057 	char mpar_pl[MLXSW_REG_MPAR_LEN];
1058 	enum mlxsw_reg_mpar_i_e i_e;
1059 
1060 	switch (trigger_entry->trigger) {
1061 	case MLXSW_SP_SPAN_TRIGGER_INGRESS:
1062 		i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
1063 		break;
1064 	case MLXSW_SP_SPAN_TRIGGER_EGRESS:
1065 		i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
1066 		break;
1067 	default:
1068 		WARN_ON_ONCE(1);
1069 		return -EINVAL;
1070 	}
1071 
1072 	mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
1073 			    trigger_entry->parms.span_id);
1074 	return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
1075 }
1076 
1077 static int
1078 mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
1079 				 struct mlxsw_sp_span_trigger_entry *
1080 				 trigger_entry)
1081 {
1082 	return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true);
1083 }
1084 
1085 static void
1086 mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span,
1087 				   struct mlxsw_sp_span_trigger_entry *
1088 				   trigger_entry)
1089 {
1090 	__mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false);
1091 }
1092 
1093 static struct mlxsw_sp_span_trigger_entry *
1094 mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
1095 				   enum mlxsw_sp_span_trigger trigger,
1096 				   struct mlxsw_sp_port *mlxsw_sp_port,
1097 				   const struct mlxsw_sp_span_trigger_parms
1098 				   *parms)
1099 {
1100 	struct mlxsw_sp_span_trigger_entry *trigger_entry;
1101 	int err;
1102 
1103 	trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
1104 	if (!trigger_entry)
1105 		return ERR_PTR(-ENOMEM);
1106 
1107 	refcount_set(&trigger_entry->ref_count, 1);
1108 	trigger_entry->local_port = mlxsw_sp_port->local_port;
1109 	trigger_entry->trigger = trigger;
1110 	memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
1111 	list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
1112 
1113 	err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry);
1114 	if (err)
1115 		goto err_trigger_entry_bind;
1116 
1117 	return trigger_entry;
1118 
1119 err_trigger_entry_bind:
1120 	list_del(&trigger_entry->list);
1121 	kfree(trigger_entry);
1122 	return ERR_PTR(err);
1123 }
1124 
1125 static void
1126 mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
1127 				    struct mlxsw_sp_span_trigger_entry *
1128 				    trigger_entry)
1129 {
1130 	mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry);
1131 	list_del(&trigger_entry->list);
1132 	kfree(trigger_entry);
1133 }
1134 
1135 static struct mlxsw_sp_span_trigger_entry *
1136 mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
1137 				 enum mlxsw_sp_span_trigger trigger,
1138 				 struct mlxsw_sp_port *mlxsw_sp_port)
1139 {
1140 	struct mlxsw_sp_span_trigger_entry *trigger_entry;
1141 
1142 	list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
1143 		if (trigger_entry->trigger == trigger &&
1144 		    trigger_entry->local_port == mlxsw_sp_port->local_port)
1145 			return trigger_entry;
1146 	}
1147 
1148 	return NULL;
1149 }
1150 
1151 int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
1152 			     enum mlxsw_sp_span_trigger trigger,
1153 			     struct mlxsw_sp_port *mlxsw_sp_port,
1154 			     const struct mlxsw_sp_span_trigger_parms *parms)
1155 {
1156 	struct mlxsw_sp_span_trigger_entry *trigger_entry;
1157 	int err = 0;
1158 
1159 	ASSERT_RTNL();
1160 
1161 	if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
1162 		return -EINVAL;
1163 
1164 	trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1165 							 trigger,
1166 							 mlxsw_sp_port);
1167 	if (trigger_entry) {
1168 		if (trigger_entry->parms.span_id != parms->span_id)
1169 			return -EINVAL;
1170 		refcount_inc(&trigger_entry->ref_count);
1171 		goto out;
1172 	}
1173 
1174 	trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
1175 							   trigger,
1176 							   mlxsw_sp_port,
1177 							   parms);
1178 	if (IS_ERR(trigger_entry))
1179 		err = PTR_ERR(trigger_entry);
1180 
1181 out:
1182 	return err;
1183 }
1184 
1185 void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
1186 				enum mlxsw_sp_span_trigger trigger,
1187 				struct mlxsw_sp_port *mlxsw_sp_port,
1188 				const struct mlxsw_sp_span_trigger_parms *parms)
1189 {
1190 	struct mlxsw_sp_span_trigger_entry *trigger_entry;
1191 
1192 	ASSERT_RTNL();
1193 
1194 	if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
1195 							 parms->span_id)))
1196 		return;
1197 
1198 	trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1199 							 trigger,
1200 							 mlxsw_sp_port);
1201 	if (WARN_ON_ONCE(!trigger_entry))
1202 		return;
1203 
1204 	if (!refcount_dec_and_test(&trigger_entry->ref_count))
1205 		return;
1206 
1207 	mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
1208 }
1209