1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c
3  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2018 Petr Machata <petrm@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/if_bridge.h>
36 #include <linux/list.h>
37 #include <net/arp.h>
38 #include <net/gre.h>
39 #include <net/ndisc.h>
40 #include <net/ip6_tunnel.h>
41 
42 #include "spectrum.h"
43 #include "spectrum_ipip.h"
44 #include "spectrum_span.h"
45 #include "spectrum_switchdev.h"
46 
47 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
48 {
49 	int i;
50 
51 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
52 		return -EIO;
53 
54 	mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
55 							  MAX_SPAN);
56 	mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
57 					 sizeof(struct mlxsw_sp_span_entry),
58 					 GFP_KERNEL);
59 	if (!mlxsw_sp->span.entries)
60 		return -ENOMEM;
61 
62 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
63 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
64 
65 		INIT_LIST_HEAD(&curr->bound_ports_list);
66 		curr->id = i;
67 	}
68 
69 	return 0;
70 }
71 
72 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
73 {
74 	int i;
75 
76 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
77 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
78 
79 		WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
80 	}
81 	kfree(mlxsw_sp->span.entries);
82 }
83 
84 static int
85 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
86 			       struct mlxsw_sp_span_parms *sparmsp)
87 {
88 	sparmsp->dest_port = netdev_priv(to_dev);
89 	return 0;
90 }
91 
92 static int
93 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
94 				   struct mlxsw_sp_span_parms sparms)
95 {
96 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
97 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
98 	u8 local_port = dest_port->local_port;
99 	char mpat_pl[MLXSW_REG_MPAT_LEN];
100 	int pa_id = span_entry->id;
101 
102 	/* Create a new port analayzer entry for local_port. */
103 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
104 			    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
105 
106 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
107 }
108 
109 static void
110 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
111 				       enum mlxsw_reg_mpat_span_type span_type)
112 {
113 	struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
114 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
115 	u8 local_port = dest_port->local_port;
116 	char mpat_pl[MLXSW_REG_MPAT_LEN];
117 	int pa_id = span_entry->id;
118 
119 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
120 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
121 }
122 
123 static void
124 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
125 {
126 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
127 					    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
128 }
129 
130 static const
131 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
132 	.can_handle = mlxsw_sp_port_dev_check,
133 	.parms = mlxsw_sp_span_entry_phys_parms,
134 	.configure = mlxsw_sp_span_entry_phys_configure,
135 	.deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
136 };
137 
138 static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
139 			      const void *pkey,
140 			      struct net_device *dev,
141 			      unsigned char dmac[ETH_ALEN])
142 {
143 	struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
144 	int err = 0;
145 
146 	if (!neigh) {
147 		neigh = neigh_create(tbl, pkey, dev);
148 		if (IS_ERR(neigh))
149 			return PTR_ERR(neigh);
150 	}
151 
152 	neigh_event_send(neigh, NULL);
153 
154 	read_lock_bh(&neigh->lock);
155 	if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
156 		memcpy(dmac, neigh->ha, ETH_ALEN);
157 	else
158 		err = -ENOENT;
159 	read_unlock_bh(&neigh->lock);
160 
161 	neigh_release(neigh);
162 	return err;
163 }
164 
165 static int
166 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
167 {
168 	sparmsp->dest_port = NULL;
169 	return 0;
170 }
171 
172 static struct net_device *
173 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
174 				 unsigned char *dmac,
175 				 u16 *p_vid)
176 {
177 	struct bridge_vlan_info vinfo;
178 	struct net_device *edev;
179 	u16 vid = *p_vid;
180 
181 	if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
182 		return NULL;
183 	if (!vid ||
184 	    br_vlan_get_info(br_dev, vid, &vinfo) ||
185 	    !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
186 		return NULL;
187 
188 	edev = br_fdb_find_port(br_dev, dmac, vid);
189 	if (!edev)
190 		return NULL;
191 
192 	if (br_vlan_get_info(edev, vid, &vinfo))
193 		return NULL;
194 	if (!(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED))
195 		*p_vid = vid;
196 	return edev;
197 }
198 
199 static struct net_device *
200 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
201 				 unsigned char *dmac)
202 {
203 	return br_fdb_find_port(br_dev, dmac, 0);
204 }
205 
206 static struct net_device *
207 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
208 			   unsigned char dmac[ETH_ALEN],
209 			   u16 *p_vid)
210 {
211 	struct mlxsw_sp_bridge_port *bridge_port;
212 	enum mlxsw_reg_spms_state spms_state;
213 	struct net_device *dev = NULL;
214 	struct mlxsw_sp_port *port;
215 	u8 stp_state;
216 
217 	if (br_vlan_enabled(br_dev))
218 		dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
219 	else if (!*p_vid)
220 		dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
221 	if (!dev)
222 		return NULL;
223 
224 	port = mlxsw_sp_port_dev_lower_find(dev);
225 	if (!port)
226 		return NULL;
227 
228 	bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
229 	if (!bridge_port)
230 		return NULL;
231 
232 	stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
233 	spms_state = mlxsw_sp_stp_spms_state(stp_state);
234 	if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
235 		return NULL;
236 
237 	return dev;
238 }
239 
240 static struct net_device *
241 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
242 			 u16 *p_vid)
243 {
244 	*p_vid = vlan_dev_vlan_id(vlan_dev);
245 	return vlan_dev_real_dev(vlan_dev);
246 }
247 
248 static struct net_device *
249 mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
250 {
251 	struct net_device *dev;
252 	struct list_head *iter;
253 
254 	netdev_for_each_lower_dev(lag_dev, dev, iter)
255 		if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev))
256 			return dev;
257 
258 	return NULL;
259 }
260 
261 static __maybe_unused int
262 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
263 					union mlxsw_sp_l3addr saddr,
264 					union mlxsw_sp_l3addr daddr,
265 					union mlxsw_sp_l3addr gw,
266 					__u8 ttl,
267 					struct neigh_table *tbl,
268 					struct mlxsw_sp_span_parms *sparmsp)
269 {
270 	unsigned char dmac[ETH_ALEN];
271 	u16 vid = 0;
272 
273 	if (mlxsw_sp_l3addr_is_zero(gw))
274 		gw = daddr;
275 
276 	if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
277 		goto unoffloadable;
278 
279 	if (is_vlan_dev(edev))
280 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
281 
282 	if (netif_is_bridge_master(edev)) {
283 		edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
284 		if (!edev)
285 			goto unoffloadable;
286 	}
287 
288 	if (is_vlan_dev(edev)) {
289 		if (vid || !(edev->flags & IFF_UP))
290 			goto unoffloadable;
291 		edev = mlxsw_sp_span_entry_vlan(edev, &vid);
292 	}
293 
294 	if (netif_is_lag_master(edev)) {
295 		if (!(edev->flags & IFF_UP))
296 			goto unoffloadable;
297 		edev = mlxsw_sp_span_entry_lag(edev);
298 		if (!edev)
299 			goto unoffloadable;
300 	}
301 
302 	if (!mlxsw_sp_port_dev_check(edev))
303 		goto unoffloadable;
304 
305 	sparmsp->dest_port = netdev_priv(edev);
306 	sparmsp->ttl = ttl;
307 	memcpy(sparmsp->dmac, dmac, ETH_ALEN);
308 	memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
309 	sparmsp->saddr = saddr;
310 	sparmsp->daddr = daddr;
311 	sparmsp->vid = vid;
312 	return 0;
313 
314 unoffloadable:
315 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
316 }
317 
318 #if IS_ENABLED(CONFIG_NET_IPGRE)
319 static struct net_device *
320 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
321 			    __be32 *saddrp, __be32 *daddrp)
322 {
323 	struct ip_tunnel *tun = netdev_priv(to_dev);
324 	struct net_device *dev = NULL;
325 	struct ip_tunnel_parm parms;
326 	struct rtable *rt = NULL;
327 	struct flowi4 fl4;
328 
329 	/* We assume "dev" stays valid after rt is put. */
330 	ASSERT_RTNL();
331 
332 	parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
333 	ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
334 			    0, 0, parms.link, tun->fwmark);
335 
336 	rt = ip_route_output_key(tun->net, &fl4);
337 	if (IS_ERR(rt))
338 		return NULL;
339 
340 	if (rt->rt_type != RTN_UNICAST)
341 		goto out;
342 
343 	dev = rt->dst.dev;
344 	*saddrp = fl4.saddr;
345 	*daddrp = rt->rt_gateway;
346 
347 out:
348 	ip_rt_put(rt);
349 	return dev;
350 }
351 
352 static int
353 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
354 				  struct mlxsw_sp_span_parms *sparmsp)
355 {
356 	struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
357 	union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
358 	union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
359 	bool inherit_tos = tparm.iph.tos & 0x1;
360 	bool inherit_ttl = !tparm.iph.ttl;
361 	union mlxsw_sp_l3addr gw = daddr;
362 	struct net_device *l3edev;
363 
364 	if (!(to_dev->flags & IFF_UP) ||
365 	    /* Reject tunnels with GRE keys, checksums, etc. */
366 	    tparm.i_flags || tparm.o_flags ||
367 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
368 	    inherit_ttl || !inherit_tos ||
369 	    /* A destination address may not be "any". */
370 	    mlxsw_sp_l3addr_is_zero(daddr))
371 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
372 
373 	l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
374 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
375 						       tparm.iph.ttl,
376 						       &arp_tbl, sparmsp);
377 }
378 
379 static int
380 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
381 				      struct mlxsw_sp_span_parms sparms)
382 {
383 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
384 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
385 	u8 local_port = dest_port->local_port;
386 	char mpat_pl[MLXSW_REG_MPAT_LEN];
387 	int pa_id = span_entry->id;
388 
389 	/* Create a new port analayzer entry for local_port. */
390 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
391 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
392 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
393 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
394 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
395 				    sparms.dmac, !!sparms.vid);
396 	mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
397 					      sparms.ttl, sparms.smac,
398 					      be32_to_cpu(sparms.saddr.addr4),
399 					      be32_to_cpu(sparms.daddr.addr4));
400 
401 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
402 }
403 
404 static void
405 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
406 {
407 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
408 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
409 }
410 
411 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
412 	.can_handle = is_gretap_dev,
413 	.parms = mlxsw_sp_span_entry_gretap4_parms,
414 	.configure = mlxsw_sp_span_entry_gretap4_configure,
415 	.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
416 };
417 #endif
418 
419 #if IS_ENABLED(CONFIG_IPV6_GRE)
420 static struct net_device *
421 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
422 			    struct in6_addr *saddrp,
423 			    struct in6_addr *daddrp)
424 {
425 	struct ip6_tnl *t = netdev_priv(to_dev);
426 	struct flowi6 fl6 = t->fl.u.ip6;
427 	struct net_device *dev = NULL;
428 	struct dst_entry *dst;
429 	struct rt6_info *rt6;
430 
431 	/* We assume "dev" stays valid after dst is released. */
432 	ASSERT_RTNL();
433 
434 	fl6.flowi6_mark = t->parms.fwmark;
435 	if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
436 		return NULL;
437 
438 	dst = ip6_route_output(t->net, NULL, &fl6);
439 	if (!dst || dst->error)
440 		goto out;
441 
442 	rt6 = container_of(dst, struct rt6_info, dst);
443 
444 	dev = dst->dev;
445 	*saddrp = fl6.saddr;
446 	*daddrp = rt6->rt6i_gateway;
447 
448 out:
449 	dst_release(dst);
450 	return dev;
451 }
452 
453 static int
454 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
455 				  struct mlxsw_sp_span_parms *sparmsp)
456 {
457 	struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
458 	bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
459 	union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
460 	union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
461 	bool inherit_ttl = !tparm.hop_limit;
462 	union mlxsw_sp_l3addr gw = daddr;
463 	struct net_device *l3edev;
464 
465 	if (!(to_dev->flags & IFF_UP) ||
466 	    /* Reject tunnels with GRE keys, checksums, etc. */
467 	    tparm.i_flags || tparm.o_flags ||
468 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
469 	    inherit_ttl || !inherit_tos ||
470 	    /* A destination address may not be "any". */
471 	    mlxsw_sp_l3addr_is_zero(daddr))
472 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
473 
474 	l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
475 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
476 						       tparm.hop_limit,
477 						       &nd_tbl, sparmsp);
478 }
479 
480 static int
481 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
482 				      struct mlxsw_sp_span_parms sparms)
483 {
484 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
485 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
486 	u8 local_port = dest_port->local_port;
487 	char mpat_pl[MLXSW_REG_MPAT_LEN];
488 	int pa_id = span_entry->id;
489 
490 	/* Create a new port analayzer entry for local_port. */
491 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
492 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
493 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
494 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
495 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
496 				    sparms.dmac, !!sparms.vid);
497 	mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
498 					      sparms.saddr.addr6,
499 					      sparms.daddr.addr6);
500 
501 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
502 }
503 
504 static void
505 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
506 {
507 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
508 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
509 }
510 
511 static const
512 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
513 	.can_handle = is_ip6gretap_dev,
514 	.parms = mlxsw_sp_span_entry_gretap6_parms,
515 	.configure = mlxsw_sp_span_entry_gretap6_configure,
516 	.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
517 };
518 #endif
519 
520 static bool
521 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
522 {
523 	return is_vlan_dev(dev) &&
524 	       mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
525 }
526 
527 static int
528 mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
529 			       struct mlxsw_sp_span_parms *sparmsp)
530 {
531 	struct net_device *real_dev;
532 	u16 vid;
533 
534 	if (!(to_dev->flags & IFF_UP))
535 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
536 
537 	real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
538 	sparmsp->dest_port = netdev_priv(real_dev);
539 	sparmsp->vid = vid;
540 	return 0;
541 }
542 
543 static int
544 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
545 				   struct mlxsw_sp_span_parms sparms)
546 {
547 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
548 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
549 	u8 local_port = dest_port->local_port;
550 	char mpat_pl[MLXSW_REG_MPAT_LEN];
551 	int pa_id = span_entry->id;
552 
553 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
554 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
555 	mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
556 
557 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
558 }
559 
560 static void
561 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
562 {
563 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
564 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
565 }
566 
567 static const
568 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
569 	.can_handle = mlxsw_sp_span_vlan_can_handle,
570 	.parms = mlxsw_sp_span_entry_vlan_parms,
571 	.configure = mlxsw_sp_span_entry_vlan_configure,
572 	.deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
573 };
574 
575 static const
576 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
577 	&mlxsw_sp_span_entry_ops_phys,
578 #if IS_ENABLED(CONFIG_NET_IPGRE)
579 	&mlxsw_sp_span_entry_ops_gretap4,
580 #endif
581 #if IS_ENABLED(CONFIG_IPV6_GRE)
582 	&mlxsw_sp_span_entry_ops_gretap6,
583 #endif
584 	&mlxsw_sp_span_entry_ops_vlan,
585 };
586 
587 static int
588 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
589 			      struct mlxsw_sp_span_parms *sparmsp)
590 {
591 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
592 }
593 
594 static int
595 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
596 				  struct mlxsw_sp_span_parms sparms)
597 {
598 	return 0;
599 }
600 
601 static void
602 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
603 {
604 }
605 
606 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
607 	.parms = mlxsw_sp_span_entry_nop_parms,
608 	.configure = mlxsw_sp_span_entry_nop_configure,
609 	.deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
610 };
611 
612 static void
613 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
614 			      struct mlxsw_sp_span_entry *span_entry,
615 			      struct mlxsw_sp_span_parms sparms)
616 {
617 	if (sparms.dest_port) {
618 		if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
619 			netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
620 				   sparms.dest_port->dev->name);
621 			sparms.dest_port = NULL;
622 		} else if (span_entry->ops->configure(span_entry, sparms)) {
623 			netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
624 				   sparms.dest_port->dev->name);
625 			sparms.dest_port = NULL;
626 		}
627 	}
628 
629 	span_entry->parms = sparms;
630 }
631 
632 static void
633 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
634 {
635 	if (span_entry->parms.dest_port)
636 		span_entry->ops->deconfigure(span_entry);
637 }
638 
639 static struct mlxsw_sp_span_entry *
640 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
641 			   const struct net_device *to_dev,
642 			   const struct mlxsw_sp_span_entry_ops *ops,
643 			   struct mlxsw_sp_span_parms sparms)
644 {
645 	struct mlxsw_sp_span_entry *span_entry = NULL;
646 	int i;
647 
648 	/* find a free entry to use */
649 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
650 		if (!mlxsw_sp->span.entries[i].ref_count) {
651 			span_entry = &mlxsw_sp->span.entries[i];
652 			break;
653 		}
654 	}
655 	if (!span_entry)
656 		return NULL;
657 
658 	span_entry->ops = ops;
659 	span_entry->ref_count = 1;
660 	span_entry->to_dev = to_dev;
661 	mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
662 
663 	return span_entry;
664 }
665 
666 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
667 {
668 	mlxsw_sp_span_entry_deconfigure(span_entry);
669 }
670 
671 struct mlxsw_sp_span_entry *
672 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
673 				 const struct net_device *to_dev)
674 {
675 	int i;
676 
677 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
678 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
679 
680 		if (curr->ref_count && curr->to_dev == to_dev)
681 			return curr;
682 	}
683 	return NULL;
684 }
685 
686 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
687 				    struct mlxsw_sp_span_entry *span_entry)
688 {
689 	mlxsw_sp_span_entry_deconfigure(span_entry);
690 	span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
691 }
692 
693 static struct mlxsw_sp_span_entry *
694 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
695 {
696 	int i;
697 
698 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
699 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
700 
701 		if (curr->ref_count && curr->id == span_id)
702 			return curr;
703 	}
704 	return NULL;
705 }
706 
707 static struct mlxsw_sp_span_entry *
708 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
709 			const struct net_device *to_dev,
710 			const struct mlxsw_sp_span_entry_ops *ops,
711 			struct mlxsw_sp_span_parms sparms)
712 {
713 	struct mlxsw_sp_span_entry *span_entry;
714 
715 	span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
716 	if (span_entry) {
717 		/* Already exists, just take a reference */
718 		span_entry->ref_count++;
719 		return span_entry;
720 	}
721 
722 	return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
723 }
724 
725 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
726 				   struct mlxsw_sp_span_entry *span_entry)
727 {
728 	WARN_ON(!span_entry->ref_count);
729 	if (--span_entry->ref_count == 0)
730 		mlxsw_sp_span_entry_destroy(span_entry);
731 	return 0;
732 }
733 
734 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
735 {
736 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
737 	struct mlxsw_sp_span_inspected_port *p;
738 	int i;
739 
740 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
741 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
742 
743 		list_for_each_entry(p, &curr->bound_ports_list, list)
744 			if (p->local_port == port->local_port &&
745 			    p->type == MLXSW_SP_SPAN_EGRESS)
746 				return true;
747 	}
748 
749 	return false;
750 }
751 
752 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
753 					 int mtu)
754 {
755 	return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
756 }
757 
758 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
759 {
760 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
761 	char sbib_pl[MLXSW_REG_SBIB_LEN];
762 	int err;
763 
764 	/* If port is egress mirrored, the shared buffer size should be
765 	 * updated according to the mtu value
766 	 */
767 	if (mlxsw_sp_span_is_egress_mirror(port)) {
768 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
769 
770 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
771 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
772 		if (err) {
773 			netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
774 			return err;
775 		}
776 	}
777 
778 	return 0;
779 }
780 
781 static struct mlxsw_sp_span_inspected_port *
782 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
783 				    enum mlxsw_sp_span_type type,
784 				    struct mlxsw_sp_port *port,
785 				    bool bind)
786 {
787 	struct mlxsw_sp_span_inspected_port *p;
788 
789 	list_for_each_entry(p, &span_entry->bound_ports_list, list)
790 		if (type == p->type &&
791 		    port->local_port == p->local_port &&
792 		    bind == p->bound)
793 			return p;
794 	return NULL;
795 }
796 
797 static int
798 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
799 				  struct mlxsw_sp_span_entry *span_entry,
800 				  enum mlxsw_sp_span_type type,
801 				  bool bind)
802 {
803 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
804 	char mpar_pl[MLXSW_REG_MPAR_LEN];
805 	int pa_id = span_entry->id;
806 
807 	/* bind the port to the SPAN entry */
808 	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
809 			    (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
810 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
811 }
812 
813 static int
814 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
815 				 struct mlxsw_sp_span_entry *span_entry,
816 				 enum mlxsw_sp_span_type type,
817 				 bool bind)
818 {
819 	struct mlxsw_sp_span_inspected_port *inspected_port;
820 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
821 	char sbib_pl[MLXSW_REG_SBIB_LEN];
822 	int i;
823 	int err;
824 
825 	/* A given (source port, direction) can only be bound to one analyzer,
826 	 * so if a binding is requested, check for conflicts.
827 	 */
828 	if (bind)
829 		for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
830 			struct mlxsw_sp_span_entry *curr =
831 				&mlxsw_sp->span.entries[i];
832 
833 			if (mlxsw_sp_span_entry_bound_port_find(curr, type,
834 								port, bind))
835 				return -EEXIST;
836 		}
837 
838 	/* if it is an egress SPAN, bind a shared buffer to it */
839 	if (type == MLXSW_SP_SPAN_EGRESS) {
840 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
841 							     port->dev->mtu);
842 
843 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
844 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
845 		if (err) {
846 			netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
847 			return err;
848 		}
849 	}
850 
851 	if (bind) {
852 		err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
853 							true);
854 		if (err)
855 			goto err_port_bind;
856 	}
857 
858 	inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
859 	if (!inspected_port) {
860 		err = -ENOMEM;
861 		goto err_inspected_port_alloc;
862 	}
863 	inspected_port->local_port = port->local_port;
864 	inspected_port->type = type;
865 	inspected_port->bound = bind;
866 	list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
867 
868 	return 0;
869 
870 err_inspected_port_alloc:
871 	if (bind)
872 		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
873 						  false);
874 err_port_bind:
875 	if (type == MLXSW_SP_SPAN_EGRESS) {
876 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
877 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
878 	}
879 	return err;
880 }
881 
882 static void
883 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
884 				 struct mlxsw_sp_span_entry *span_entry,
885 				 enum mlxsw_sp_span_type type,
886 				 bool bind)
887 {
888 	struct mlxsw_sp_span_inspected_port *inspected_port;
889 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
890 	char sbib_pl[MLXSW_REG_SBIB_LEN];
891 
892 	inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
893 							     port, bind);
894 	if (!inspected_port)
895 		return;
896 
897 	if (bind)
898 		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
899 						  false);
900 	/* remove the SBIB buffer if it was egress SPAN */
901 	if (type == MLXSW_SP_SPAN_EGRESS) {
902 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
903 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
904 	}
905 
906 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
907 
908 	list_del(&inspected_port->list);
909 	kfree(inspected_port);
910 }
911 
912 static const struct mlxsw_sp_span_entry_ops *
913 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
914 			const struct net_device *to_dev)
915 {
916 	size_t i;
917 
918 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
919 		if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
920 			return mlxsw_sp_span_entry_types[i];
921 
922 	return NULL;
923 }
924 
925 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
926 			     const struct net_device *to_dev,
927 			     enum mlxsw_sp_span_type type, bool bind,
928 			     int *p_span_id)
929 {
930 	struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
931 	const struct mlxsw_sp_span_entry_ops *ops;
932 	struct mlxsw_sp_span_parms sparms = {NULL};
933 	struct mlxsw_sp_span_entry *span_entry;
934 	int err;
935 
936 	ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
937 	if (!ops) {
938 		netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
939 		return -EOPNOTSUPP;
940 	}
941 
942 	err = ops->parms(to_dev, &sparms);
943 	if (err)
944 		return err;
945 
946 	span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
947 	if (!span_entry)
948 		return -ENOBUFS;
949 
950 	netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
951 		   span_entry->id);
952 
953 	err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
954 	if (err)
955 		goto err_port_bind;
956 
957 	*p_span_id = span_entry->id;
958 	return 0;
959 
960 err_port_bind:
961 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
962 	return err;
963 }
964 
965 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
966 			      enum mlxsw_sp_span_type type, bool bind)
967 {
968 	struct mlxsw_sp_span_entry *span_entry;
969 
970 	span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
971 	if (!span_entry) {
972 		netdev_err(from->dev, "no span entry found\n");
973 		return;
974 	}
975 
976 	netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
977 		   span_entry->id);
978 	mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
979 }
980 
981 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
982 {
983 	int i;
984 	int err;
985 
986 	ASSERT_RTNL();
987 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
988 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
989 		struct mlxsw_sp_span_parms sparms = {NULL};
990 
991 		if (!curr->ref_count)
992 			continue;
993 
994 		err = curr->ops->parms(curr->to_dev, &sparms);
995 		if (err)
996 			continue;
997 
998 		if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
999 			mlxsw_sp_span_entry_deconfigure(curr);
1000 			mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
1001 		}
1002 	}
1003 }
1004