xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c (revision a0ae2562c6c4b2721d9fddba63b7286c13517d9f)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c
3  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2018 Petr Machata <petrm@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/if_bridge.h>
36 #include <linux/list.h>
37 #include <net/arp.h>
38 #include <net/gre.h>
39 #include <net/lag.h>
40 #include <net/ndisc.h>
41 #include <net/ip6_tunnel.h>
42 
43 #include "spectrum.h"
44 #include "spectrum_ipip.h"
45 #include "spectrum_span.h"
46 #include "spectrum_switchdev.h"
47 
48 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
49 {
50 	int i;
51 
52 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
53 		return -EIO;
54 
55 	mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
56 							  MAX_SPAN);
57 	mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
58 					 sizeof(struct mlxsw_sp_span_entry),
59 					 GFP_KERNEL);
60 	if (!mlxsw_sp->span.entries)
61 		return -ENOMEM;
62 
63 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
64 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
65 
66 		INIT_LIST_HEAD(&curr->bound_ports_list);
67 		curr->id = i;
68 	}
69 
70 	return 0;
71 }
72 
73 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
74 {
75 	int i;
76 
77 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
78 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
79 
80 		WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
81 	}
82 	kfree(mlxsw_sp->span.entries);
83 }
84 
85 static int
86 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
87 			       struct mlxsw_sp_span_parms *sparmsp)
88 {
89 	sparmsp->dest_port = netdev_priv(to_dev);
90 	return 0;
91 }
92 
93 static int
94 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
95 				   struct mlxsw_sp_span_parms sparms)
96 {
97 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
98 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
99 	u8 local_port = dest_port->local_port;
100 	char mpat_pl[MLXSW_REG_MPAT_LEN];
101 	int pa_id = span_entry->id;
102 
103 	/* Create a new port analayzer entry for local_port. */
104 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
105 			    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
106 
107 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
108 }
109 
110 static void
111 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
112 				       enum mlxsw_reg_mpat_span_type span_type)
113 {
114 	struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
115 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
116 	u8 local_port = dest_port->local_port;
117 	char mpat_pl[MLXSW_REG_MPAT_LEN];
118 	int pa_id = span_entry->id;
119 
120 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
121 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
122 }
123 
124 static void
125 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
126 {
127 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
128 					    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
129 }
130 
131 static const
132 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
133 	.can_handle = mlxsw_sp_port_dev_check,
134 	.parms = mlxsw_sp_span_entry_phys_parms,
135 	.configure = mlxsw_sp_span_entry_phys_configure,
136 	.deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
137 };
138 
139 static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
140 			      const void *pkey,
141 			      struct net_device *dev,
142 			      unsigned char dmac[ETH_ALEN])
143 {
144 	struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
145 	int err = 0;
146 
147 	if (!neigh) {
148 		neigh = neigh_create(tbl, pkey, dev);
149 		if (IS_ERR(neigh))
150 			return PTR_ERR(neigh);
151 	}
152 
153 	neigh_event_send(neigh, NULL);
154 
155 	read_lock_bh(&neigh->lock);
156 	if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
157 		memcpy(dmac, neigh->ha, ETH_ALEN);
158 	else
159 		err = -ENOENT;
160 	read_unlock_bh(&neigh->lock);
161 
162 	neigh_release(neigh);
163 	return err;
164 }
165 
166 static int
167 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
168 {
169 	sparmsp->dest_port = NULL;
170 	return 0;
171 }
172 
173 static struct net_device *
174 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
175 				 unsigned char *dmac,
176 				 u16 *p_vid)
177 {
178 	struct bridge_vlan_info vinfo;
179 	struct net_device *edev;
180 	u16 vid = *p_vid;
181 
182 	if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
183 		return NULL;
184 	if (!vid ||
185 	    br_vlan_get_info(br_dev, vid, &vinfo) ||
186 	    !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
187 		return NULL;
188 
189 	edev = br_fdb_find_port(br_dev, dmac, vid);
190 	if (!edev)
191 		return NULL;
192 
193 	if (br_vlan_get_info(edev, vid, &vinfo))
194 		return NULL;
195 	if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
196 		*p_vid = 0;
197 	else
198 		*p_vid = vid;
199 	return edev;
200 }
201 
202 static struct net_device *
203 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
204 				 unsigned char *dmac)
205 {
206 	return br_fdb_find_port(br_dev, dmac, 0);
207 }
208 
209 static struct net_device *
210 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
211 			   unsigned char dmac[ETH_ALEN],
212 			   u16 *p_vid)
213 {
214 	struct mlxsw_sp_bridge_port *bridge_port;
215 	enum mlxsw_reg_spms_state spms_state;
216 	struct net_device *dev = NULL;
217 	struct mlxsw_sp_port *port;
218 	u8 stp_state;
219 
220 	if (br_vlan_enabled(br_dev))
221 		dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
222 	else if (!*p_vid)
223 		dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
224 	if (!dev)
225 		return NULL;
226 
227 	port = mlxsw_sp_port_dev_lower_find(dev);
228 	if (!port)
229 		return NULL;
230 
231 	bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
232 	if (!bridge_port)
233 		return NULL;
234 
235 	stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
236 	spms_state = mlxsw_sp_stp_spms_state(stp_state);
237 	if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
238 		return NULL;
239 
240 	return dev;
241 }
242 
243 static struct net_device *
244 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
245 			 u16 *p_vid)
246 {
247 	*p_vid = vlan_dev_vlan_id(vlan_dev);
248 	return vlan_dev_real_dev(vlan_dev);
249 }
250 
251 static struct net_device *
252 mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
253 {
254 	struct net_device *dev;
255 	struct list_head *iter;
256 
257 	netdev_for_each_lower_dev(lag_dev, dev, iter)
258 		if (netif_carrier_ok(dev) &&
259 		    net_lag_port_dev_txable(dev) &&
260 		    mlxsw_sp_port_dev_check(dev))
261 			return dev;
262 
263 	return NULL;
264 }
265 
266 static __maybe_unused int
267 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
268 					union mlxsw_sp_l3addr saddr,
269 					union mlxsw_sp_l3addr daddr,
270 					union mlxsw_sp_l3addr gw,
271 					__u8 ttl,
272 					struct neigh_table *tbl,
273 					struct mlxsw_sp_span_parms *sparmsp)
274 {
275 	unsigned char dmac[ETH_ALEN];
276 	u16 vid = 0;
277 
278 	if (mlxsw_sp_l3addr_is_zero(gw))
279 		gw = daddr;
280 
281 	if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
282 		goto unoffloadable;
283 
284 	if (is_vlan_dev(edev))
285 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
286 
287 	if (netif_is_bridge_master(edev)) {
288 		edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
289 		if (!edev)
290 			goto unoffloadable;
291 	}
292 
293 	if (is_vlan_dev(edev)) {
294 		if (vid || !(edev->flags & IFF_UP))
295 			goto unoffloadable;
296 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
297 	}
298 
299 	if (netif_is_lag_master(edev)) {
300 		if (!(edev->flags & IFF_UP))
301 			goto unoffloadable;
302 		edev = mlxsw_sp_span_entry_lag(edev);
303 		if (!edev)
304 			goto unoffloadable;
305 	}
306 
307 	if (!mlxsw_sp_port_dev_check(edev))
308 		goto unoffloadable;
309 
310 	sparmsp->dest_port = netdev_priv(edev);
311 	sparmsp->ttl = ttl;
312 	memcpy(sparmsp->dmac, dmac, ETH_ALEN);
313 	memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
314 	sparmsp->saddr = saddr;
315 	sparmsp->daddr = daddr;
316 	sparmsp->vid = vid;
317 	return 0;
318 
319 unoffloadable:
320 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
321 }
322 
323 #if IS_ENABLED(CONFIG_NET_IPGRE)
324 static struct net_device *
325 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
326 			    __be32 *saddrp, __be32 *daddrp)
327 {
328 	struct ip_tunnel *tun = netdev_priv(to_dev);
329 	struct net_device *dev = NULL;
330 	struct ip_tunnel_parm parms;
331 	struct rtable *rt = NULL;
332 	struct flowi4 fl4;
333 
334 	/* We assume "dev" stays valid after rt is put. */
335 	ASSERT_RTNL();
336 
337 	parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
338 	ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
339 			    0, 0, parms.link, tun->fwmark);
340 
341 	rt = ip_route_output_key(tun->net, &fl4);
342 	if (IS_ERR(rt))
343 		return NULL;
344 
345 	if (rt->rt_type != RTN_UNICAST)
346 		goto out;
347 
348 	dev = rt->dst.dev;
349 	*saddrp = fl4.saddr;
350 	*daddrp = rt->rt_gateway;
351 
352 out:
353 	ip_rt_put(rt);
354 	return dev;
355 }
356 
357 static int
358 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
359 				  struct mlxsw_sp_span_parms *sparmsp)
360 {
361 	struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
362 	union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
363 	union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
364 	bool inherit_tos = tparm.iph.tos & 0x1;
365 	bool inherit_ttl = !tparm.iph.ttl;
366 	union mlxsw_sp_l3addr gw = daddr;
367 	struct net_device *l3edev;
368 
369 	if (!(to_dev->flags & IFF_UP) ||
370 	    /* Reject tunnels with GRE keys, checksums, etc. */
371 	    tparm.i_flags || tparm.o_flags ||
372 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
373 	    inherit_ttl || !inherit_tos ||
374 	    /* A destination address may not be "any". */
375 	    mlxsw_sp_l3addr_is_zero(daddr))
376 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
377 
378 	l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
379 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
380 						       tparm.iph.ttl,
381 						       &arp_tbl, sparmsp);
382 }
383 
384 static int
385 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
386 				      struct mlxsw_sp_span_parms sparms)
387 {
388 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
389 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
390 	u8 local_port = dest_port->local_port;
391 	char mpat_pl[MLXSW_REG_MPAT_LEN];
392 	int pa_id = span_entry->id;
393 
394 	/* Create a new port analayzer entry for local_port. */
395 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
396 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
397 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
398 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
399 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
400 				    sparms.dmac, !!sparms.vid);
401 	mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
402 					      sparms.ttl, sparms.smac,
403 					      be32_to_cpu(sparms.saddr.addr4),
404 					      be32_to_cpu(sparms.daddr.addr4));
405 
406 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
407 }
408 
409 static void
410 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
411 {
412 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
413 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
414 }
415 
416 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
417 	.can_handle = is_gretap_dev,
418 	.parms = mlxsw_sp_span_entry_gretap4_parms,
419 	.configure = mlxsw_sp_span_entry_gretap4_configure,
420 	.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
421 };
422 #endif
423 
424 #if IS_ENABLED(CONFIG_IPV6_GRE)
425 static struct net_device *
426 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
427 			    struct in6_addr *saddrp,
428 			    struct in6_addr *daddrp)
429 {
430 	struct ip6_tnl *t = netdev_priv(to_dev);
431 	struct flowi6 fl6 = t->fl.u.ip6;
432 	struct net_device *dev = NULL;
433 	struct dst_entry *dst;
434 	struct rt6_info *rt6;
435 
436 	/* We assume "dev" stays valid after dst is released. */
437 	ASSERT_RTNL();
438 
439 	fl6.flowi6_mark = t->parms.fwmark;
440 	if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
441 		return NULL;
442 
443 	dst = ip6_route_output(t->net, NULL, &fl6);
444 	if (!dst || dst->error)
445 		goto out;
446 
447 	rt6 = container_of(dst, struct rt6_info, dst);
448 
449 	dev = dst->dev;
450 	*saddrp = fl6.saddr;
451 	*daddrp = rt6->rt6i_gateway;
452 
453 out:
454 	dst_release(dst);
455 	return dev;
456 }
457 
458 static int
459 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
460 				  struct mlxsw_sp_span_parms *sparmsp)
461 {
462 	struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
463 	bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
464 	union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
465 	union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
466 	bool inherit_ttl = !tparm.hop_limit;
467 	union mlxsw_sp_l3addr gw = daddr;
468 	struct net_device *l3edev;
469 
470 	if (!(to_dev->flags & IFF_UP) ||
471 	    /* Reject tunnels with GRE keys, checksums, etc. */
472 	    tparm.i_flags || tparm.o_flags ||
473 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
474 	    inherit_ttl || !inherit_tos ||
475 	    /* A destination address may not be "any". */
476 	    mlxsw_sp_l3addr_is_zero(daddr))
477 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
478 
479 	l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
480 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
481 						       tparm.hop_limit,
482 						       &nd_tbl, sparmsp);
483 }
484 
485 static int
486 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
487 				      struct mlxsw_sp_span_parms sparms)
488 {
489 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
490 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
491 	u8 local_port = dest_port->local_port;
492 	char mpat_pl[MLXSW_REG_MPAT_LEN];
493 	int pa_id = span_entry->id;
494 
495 	/* Create a new port analayzer entry for local_port. */
496 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
497 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
498 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
499 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
500 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
501 				    sparms.dmac, !!sparms.vid);
502 	mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
503 					      sparms.saddr.addr6,
504 					      sparms.daddr.addr6);
505 
506 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
507 }
508 
509 static void
510 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
511 {
512 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
513 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
514 }
515 
516 static const
517 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
518 	.can_handle = is_ip6gretap_dev,
519 	.parms = mlxsw_sp_span_entry_gretap6_parms,
520 	.configure = mlxsw_sp_span_entry_gretap6_configure,
521 	.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
522 };
523 #endif
524 
525 static bool
526 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
527 {
528 	return is_vlan_dev(dev) &&
529 	       mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
530 }
531 
532 static int
533 mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
534 			       struct mlxsw_sp_span_parms *sparmsp)
535 {
536 	struct net_device *real_dev;
537 	u16 vid;
538 
539 	if (!(to_dev->flags & IFF_UP))
540 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
541 
542 	real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
543 	sparmsp->dest_port = netdev_priv(real_dev);
544 	sparmsp->vid = vid;
545 	return 0;
546 }
547 
548 static int
549 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
550 				   struct mlxsw_sp_span_parms sparms)
551 {
552 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
553 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
554 	u8 local_port = dest_port->local_port;
555 	char mpat_pl[MLXSW_REG_MPAT_LEN];
556 	int pa_id = span_entry->id;
557 
558 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
559 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
560 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
561 
562 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
563 }
564 
565 static void
566 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
567 {
568 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
569 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
570 }
571 
572 static const
573 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
574 	.can_handle = mlxsw_sp_span_vlan_can_handle,
575 	.parms = mlxsw_sp_span_entry_vlan_parms,
576 	.configure = mlxsw_sp_span_entry_vlan_configure,
577 	.deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
578 };
579 
580 static const
581 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
582 	&mlxsw_sp_span_entry_ops_phys,
583 #if IS_ENABLED(CONFIG_NET_IPGRE)
584 	&mlxsw_sp_span_entry_ops_gretap4,
585 #endif
586 #if IS_ENABLED(CONFIG_IPV6_GRE)
587 	&mlxsw_sp_span_entry_ops_gretap6,
588 #endif
589 	&mlxsw_sp_span_entry_ops_vlan,
590 };
591 
592 static int
593 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
594 			      struct mlxsw_sp_span_parms *sparmsp)
595 {
596 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
597 }
598 
599 static int
600 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
601 				  struct mlxsw_sp_span_parms sparms)
602 {
603 	return 0;
604 }
605 
606 static void
607 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
608 {
609 }
610 
611 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
612 	.parms = mlxsw_sp_span_entry_nop_parms,
613 	.configure = mlxsw_sp_span_entry_nop_configure,
614 	.deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
615 };
616 
617 static void
618 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
619 			      struct mlxsw_sp_span_entry *span_entry,
620 			      struct mlxsw_sp_span_parms sparms)
621 {
622 	if (sparms.dest_port) {
623 		if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
624 			netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
625 				   sparms.dest_port->dev->name);
626 			sparms.dest_port = NULL;
627 		} else if (span_entry->ops->configure(span_entry, sparms)) {
628 			netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
629 				   sparms.dest_port->dev->name);
630 			sparms.dest_port = NULL;
631 		}
632 	}
633 
634 	span_entry->parms = sparms;
635 }
636 
637 static void
638 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
639 {
640 	if (span_entry->parms.dest_port)
641 		span_entry->ops->deconfigure(span_entry);
642 }
643 
644 static struct mlxsw_sp_span_entry *
645 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
646 			   const struct net_device *to_dev,
647 			   const struct mlxsw_sp_span_entry_ops *ops,
648 			   struct mlxsw_sp_span_parms sparms)
649 {
650 	struct mlxsw_sp_span_entry *span_entry = NULL;
651 	int i;
652 
653 	/* find a free entry to use */
654 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
655 		if (!mlxsw_sp->span.entries[i].ref_count) {
656 			span_entry = &mlxsw_sp->span.entries[i];
657 			break;
658 		}
659 	}
660 	if (!span_entry)
661 		return NULL;
662 
663 	span_entry->ops = ops;
664 	span_entry->ref_count = 1;
665 	span_entry->to_dev = to_dev;
666 	mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
667 
668 	return span_entry;
669 }
670 
671 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
672 {
673 	mlxsw_sp_span_entry_deconfigure(span_entry);
674 }
675 
676 struct mlxsw_sp_span_entry *
677 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
678 				 const struct net_device *to_dev)
679 {
680 	int i;
681 
682 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
683 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
684 
685 		if (curr->ref_count && curr->to_dev == to_dev)
686 			return curr;
687 	}
688 	return NULL;
689 }
690 
691 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
692 				    struct mlxsw_sp_span_entry *span_entry)
693 {
694 	mlxsw_sp_span_entry_deconfigure(span_entry);
695 	span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
696 }
697 
698 static struct mlxsw_sp_span_entry *
699 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
700 {
701 	int i;
702 
703 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
704 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
705 
706 		if (curr->ref_count && curr->id == span_id)
707 			return curr;
708 	}
709 	return NULL;
710 }
711 
712 static struct mlxsw_sp_span_entry *
713 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
714 			const struct net_device *to_dev,
715 			const struct mlxsw_sp_span_entry_ops *ops,
716 			struct mlxsw_sp_span_parms sparms)
717 {
718 	struct mlxsw_sp_span_entry *span_entry;
719 
720 	span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
721 	if (span_entry) {
722 		/* Already exists, just take a reference */
723 		span_entry->ref_count++;
724 		return span_entry;
725 	}
726 
727 	return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
728 }
729 
730 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
731 				   struct mlxsw_sp_span_entry *span_entry)
732 {
733 	WARN_ON(!span_entry->ref_count);
734 	if (--span_entry->ref_count == 0)
735 		mlxsw_sp_span_entry_destroy(span_entry);
736 	return 0;
737 }
738 
739 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
740 {
741 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
742 	struct mlxsw_sp_span_inspected_port *p;
743 	int i;
744 
745 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
746 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
747 
748 		list_for_each_entry(p, &curr->bound_ports_list, list)
749 			if (p->local_port == port->local_port &&
750 			    p->type == MLXSW_SP_SPAN_EGRESS)
751 				return true;
752 	}
753 
754 	return false;
755 }
756 
757 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
758 					 int mtu)
759 {
760 	return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
761 }
762 
763 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
764 {
765 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
766 	char sbib_pl[MLXSW_REG_SBIB_LEN];
767 	int err;
768 
769 	/* If port is egress mirrored, the shared buffer size should be
770 	 * updated according to the mtu value
771 	 */
772 	if (mlxsw_sp_span_is_egress_mirror(port)) {
773 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
774 
775 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
776 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
777 		if (err) {
778 			netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
779 			return err;
780 		}
781 	}
782 
783 	return 0;
784 }
785 
786 static struct mlxsw_sp_span_inspected_port *
787 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
788 				    enum mlxsw_sp_span_type type,
789 				    struct mlxsw_sp_port *port,
790 				    bool bind)
791 {
792 	struct mlxsw_sp_span_inspected_port *p;
793 
794 	list_for_each_entry(p, &span_entry->bound_ports_list, list)
795 		if (type == p->type &&
796 		    port->local_port == p->local_port &&
797 		    bind == p->bound)
798 			return p;
799 	return NULL;
800 }
801 
802 static int
803 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
804 				  struct mlxsw_sp_span_entry *span_entry,
805 				  enum mlxsw_sp_span_type type,
806 				  bool bind)
807 {
808 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
809 	char mpar_pl[MLXSW_REG_MPAR_LEN];
810 	int pa_id = span_entry->id;
811 
812 	/* bind the port to the SPAN entry */
813 	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
814 			    (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
815 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
816 }
817 
818 static int
819 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
820 				 struct mlxsw_sp_span_entry *span_entry,
821 				 enum mlxsw_sp_span_type type,
822 				 bool bind)
823 {
824 	struct mlxsw_sp_span_inspected_port *inspected_port;
825 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
826 	char sbib_pl[MLXSW_REG_SBIB_LEN];
827 	int i;
828 	int err;
829 
830 	/* A given (source port, direction) can only be bound to one analyzer,
831 	 * so if a binding is requested, check for conflicts.
832 	 */
833 	if (bind)
834 		for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
835 			struct mlxsw_sp_span_entry *curr =
836 				&mlxsw_sp->span.entries[i];
837 
838 			if (mlxsw_sp_span_entry_bound_port_find(curr, type,
839 								port, bind))
840 				return -EEXIST;
841 		}
842 
843 	/* if it is an egress SPAN, bind a shared buffer to it */
844 	if (type == MLXSW_SP_SPAN_EGRESS) {
845 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
846 							     port->dev->mtu);
847 
848 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
849 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
850 		if (err) {
851 			netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
852 			return err;
853 		}
854 	}
855 
856 	if (bind) {
857 		err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
858 							true);
859 		if (err)
860 			goto err_port_bind;
861 	}
862 
863 	inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
864 	if (!inspected_port) {
865 		err = -ENOMEM;
866 		goto err_inspected_port_alloc;
867 	}
868 	inspected_port->local_port = port->local_port;
869 	inspected_port->type = type;
870 	inspected_port->bound = bind;
871 	list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
872 
873 	return 0;
874 
875 err_inspected_port_alloc:
876 	if (bind)
877 		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
878 						  false);
879 err_port_bind:
880 	if (type == MLXSW_SP_SPAN_EGRESS) {
881 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
882 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
883 	}
884 	return err;
885 }
886 
887 static void
888 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
889 				 struct mlxsw_sp_span_entry *span_entry,
890 				 enum mlxsw_sp_span_type type,
891 				 bool bind)
892 {
893 	struct mlxsw_sp_span_inspected_port *inspected_port;
894 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
895 	char sbib_pl[MLXSW_REG_SBIB_LEN];
896 
897 	inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
898 							     port, bind);
899 	if (!inspected_port)
900 		return;
901 
902 	if (bind)
903 		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
904 						  false);
905 	/* remove the SBIB buffer if it was egress SPAN */
906 	if (type == MLXSW_SP_SPAN_EGRESS) {
907 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
908 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
909 	}
910 
911 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
912 
913 	list_del(&inspected_port->list);
914 	kfree(inspected_port);
915 }
916 
917 static const struct mlxsw_sp_span_entry_ops *
918 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
919 			const struct net_device *to_dev)
920 {
921 	size_t i;
922 
923 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
924 		if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
925 			return mlxsw_sp_span_entry_types[i];
926 
927 	return NULL;
928 }
929 
930 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
931 			     const struct net_device *to_dev,
932 			     enum mlxsw_sp_span_type type, bool bind,
933 			     int *p_span_id)
934 {
935 	struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
936 	const struct mlxsw_sp_span_entry_ops *ops;
937 	struct mlxsw_sp_span_parms sparms = {NULL};
938 	struct mlxsw_sp_span_entry *span_entry;
939 	int err;
940 
941 	ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
942 	if (!ops) {
943 		netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
944 		return -EOPNOTSUPP;
945 	}
946 
947 	err = ops->parms(to_dev, &sparms);
948 	if (err)
949 		return err;
950 
951 	span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
952 	if (!span_entry)
953 		return -ENOBUFS;
954 
955 	netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
956 		   span_entry->id);
957 
958 	err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
959 	if (err)
960 		goto err_port_bind;
961 
962 	*p_span_id = span_entry->id;
963 	return 0;
964 
965 err_port_bind:
966 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
967 	return err;
968 }
969 
970 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
971 			      enum mlxsw_sp_span_type type, bool bind)
972 {
973 	struct mlxsw_sp_span_entry *span_entry;
974 
975 	span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
976 	if (!span_entry) {
977 		netdev_err(from->dev, "no span entry found\n");
978 		return;
979 	}
980 
981 	netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
982 		   span_entry->id);
983 	mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
984 }
985 
986 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
987 {
988 	int i;
989 	int err;
990 
991 	ASSERT_RTNL();
992 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
993 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
994 		struct mlxsw_sp_span_parms sparms = {NULL};
995 
996 		if (!curr->ref_count)
997 			continue;
998 
999 		err = curr->ops->parms(curr->to_dev, &sparms);
1000 		if (err)
1001 			continue;
1002 
1003 		if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
1004 			mlxsw_sp_span_entry_deconfigure(curr);
1005 			mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
1006 		}
1007 	}
1008 }
1009