1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c
3  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2018 Petr Machata <petrm@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/list.h>
36 #include <net/arp.h>
37 #include <net/gre.h>
38 #include <net/ndisc.h>
39 #include <net/ip6_tunnel.h>
40 
41 #include "spectrum.h"
42 #include "spectrum_span.h"
43 #include "spectrum_ipip.h"
44 
45 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
46 {
47 	int i;
48 
49 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
50 		return -EIO;
51 
52 	mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
53 							  MAX_SPAN);
54 	mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
55 					 sizeof(struct mlxsw_sp_span_entry),
56 					 GFP_KERNEL);
57 	if (!mlxsw_sp->span.entries)
58 		return -ENOMEM;
59 
60 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
61 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
62 
63 		INIT_LIST_HEAD(&curr->bound_ports_list);
64 		curr->id = i;
65 	}
66 
67 	return 0;
68 }
69 
70 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
71 {
72 	int i;
73 
74 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
75 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
76 
77 		WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
78 	}
79 	kfree(mlxsw_sp->span.entries);
80 }
81 
82 static int
83 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
84 			       struct mlxsw_sp_span_parms *sparmsp)
85 {
86 	sparmsp->dest_port = netdev_priv(to_dev);
87 	return 0;
88 }
89 
90 static int
91 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
92 				   struct mlxsw_sp_span_parms sparms)
93 {
94 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
95 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
96 	u8 local_port = dest_port->local_port;
97 	char mpat_pl[MLXSW_REG_MPAT_LEN];
98 	int pa_id = span_entry->id;
99 
100 	/* Create a new port analayzer entry for local_port. */
101 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
102 			    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
103 
104 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
105 }
106 
107 static void
108 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
109 				       enum mlxsw_reg_mpat_span_type span_type)
110 {
111 	struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
112 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
113 	u8 local_port = dest_port->local_port;
114 	char mpat_pl[MLXSW_REG_MPAT_LEN];
115 	int pa_id = span_entry->id;
116 
117 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
118 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
119 }
120 
121 static void
122 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
123 {
124 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
125 					    MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
126 }
127 
128 static const
129 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
130 	.can_handle = mlxsw_sp_port_dev_check,
131 	.parms = mlxsw_sp_span_entry_phys_parms,
132 	.configure = mlxsw_sp_span_entry_phys_configure,
133 	.deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
134 };
135 
136 static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
137 			      const void *pkey,
138 			      struct net_device *l3edev,
139 			      unsigned char dmac[ETH_ALEN])
140 {
141 	struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev);
142 	int err = 0;
143 
144 	if (!neigh) {
145 		neigh = neigh_create(tbl, pkey, l3edev);
146 		if (IS_ERR(neigh))
147 			return PTR_ERR(neigh);
148 	}
149 
150 	neigh_event_send(neigh, NULL);
151 
152 	read_lock_bh(&neigh->lock);
153 	if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
154 		memcpy(dmac, neigh->ha, ETH_ALEN);
155 	else
156 		err = -ENOENT;
157 	read_unlock_bh(&neigh->lock);
158 
159 	neigh_release(neigh);
160 	return err;
161 }
162 
163 static int
164 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
165 {
166 	sparmsp->dest_port = NULL;
167 	return 0;
168 }
169 
170 static __maybe_unused int
171 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
172 					union mlxsw_sp_l3addr saddr,
173 					union mlxsw_sp_l3addr daddr,
174 					union mlxsw_sp_l3addr gw,
175 					__u8 ttl,
176 					struct neigh_table *tbl,
177 					struct mlxsw_sp_span_parms *sparmsp)
178 {
179 	unsigned char dmac[ETH_ALEN];
180 
181 	if (mlxsw_sp_l3addr_is_zero(gw))
182 		gw = daddr;
183 
184 	if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) ||
185 	    mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac))
186 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
187 
188 	sparmsp->dest_port = netdev_priv(l3edev);
189 	sparmsp->ttl = ttl;
190 	memcpy(sparmsp->dmac, dmac, ETH_ALEN);
191 	memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN);
192 	sparmsp->saddr = saddr;
193 	sparmsp->daddr = daddr;
194 	return 0;
195 }
196 
197 #if IS_ENABLED(CONFIG_NET_IPGRE)
198 static struct net_device *
199 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
200 			    __be32 *saddrp, __be32 *daddrp)
201 {
202 	struct ip_tunnel *tun = netdev_priv(to_dev);
203 	struct net_device *dev = NULL;
204 	struct ip_tunnel_parm parms;
205 	struct rtable *rt = NULL;
206 	struct flowi4 fl4;
207 
208 	/* We assume "dev" stays valid after rt is put. */
209 	ASSERT_RTNL();
210 
211 	parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
212 	ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
213 			    0, 0, parms.link, tun->fwmark);
214 
215 	rt = ip_route_output_key(tun->net, &fl4);
216 	if (IS_ERR(rt))
217 		return NULL;
218 
219 	if (rt->rt_type != RTN_UNICAST)
220 		goto out;
221 
222 	dev = rt->dst.dev;
223 	*saddrp = fl4.saddr;
224 	*daddrp = rt->rt_gateway;
225 
226 out:
227 	ip_rt_put(rt);
228 	return dev;
229 }
230 
231 static int
232 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
233 				  struct mlxsw_sp_span_parms *sparmsp)
234 {
235 	struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
236 	union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
237 	union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
238 	bool inherit_tos = tparm.iph.tos & 0x1;
239 	bool inherit_ttl = !tparm.iph.ttl;
240 	union mlxsw_sp_l3addr gw = daddr;
241 	struct net_device *l3edev;
242 
243 	if (!(to_dev->flags & IFF_UP) ||
244 	    /* Reject tunnels with GRE keys, checksums, etc. */
245 	    tparm.i_flags || tparm.o_flags ||
246 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
247 	    inherit_ttl || !inherit_tos ||
248 	    /* A destination address may not be "any". */
249 	    mlxsw_sp_l3addr_is_zero(daddr))
250 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
251 
252 	l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
253 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
254 						       tparm.iph.ttl,
255 						       &arp_tbl, sparmsp);
256 }
257 
258 static int
259 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
260 				      struct mlxsw_sp_span_parms sparms)
261 {
262 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
263 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
264 	u8 local_port = dest_port->local_port;
265 	char mpat_pl[MLXSW_REG_MPAT_LEN];
266 	int pa_id = span_entry->id;
267 
268 	/* Create a new port analayzer entry for local_port. */
269 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
270 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
271 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
272 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
273 				    sparms.dmac, false);
274 	mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
275 					      sparms.ttl, sparms.smac,
276 					      be32_to_cpu(sparms.saddr.addr4),
277 					      be32_to_cpu(sparms.daddr.addr4));
278 
279 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
280 }
281 
282 static void
283 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
284 {
285 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
286 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
287 }
288 
289 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
290 	.can_handle = is_gretap_dev,
291 	.parms = mlxsw_sp_span_entry_gretap4_parms,
292 	.configure = mlxsw_sp_span_entry_gretap4_configure,
293 	.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
294 };
295 #endif
296 
297 #if IS_ENABLED(CONFIG_IPV6_GRE)
298 static struct net_device *
299 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
300 			    struct in6_addr *saddrp,
301 			    struct in6_addr *daddrp)
302 {
303 	struct ip6_tnl *t = netdev_priv(to_dev);
304 	struct flowi6 fl6 = t->fl.u.ip6;
305 	struct net_device *dev = NULL;
306 	struct dst_entry *dst;
307 	struct rt6_info *rt6;
308 
309 	/* We assume "dev" stays valid after dst is released. */
310 	ASSERT_RTNL();
311 
312 	fl6.flowi6_mark = t->parms.fwmark;
313 	if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
314 		return NULL;
315 
316 	dst = ip6_route_output(t->net, NULL, &fl6);
317 	if (!dst || dst->error)
318 		goto out;
319 
320 	rt6 = container_of(dst, struct rt6_info, dst);
321 
322 	dev = dst->dev;
323 	*saddrp = fl6.saddr;
324 	*daddrp = rt6->rt6i_gateway;
325 
326 out:
327 	dst_release(dst);
328 	return dev;
329 }
330 
331 static int
332 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
333 				  struct mlxsw_sp_span_parms *sparmsp)
334 {
335 	struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
336 	bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
337 	union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
338 	union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
339 	bool inherit_ttl = !tparm.hop_limit;
340 	union mlxsw_sp_l3addr gw = daddr;
341 	struct net_device *l3edev;
342 
343 	if (!(to_dev->flags & IFF_UP) ||
344 	    /* Reject tunnels with GRE keys, checksums, etc. */
345 	    tparm.i_flags || tparm.o_flags ||
346 	    /* Require a fixed TTL and a TOS copied from the mirrored packet. */
347 	    inherit_ttl || !inherit_tos ||
348 	    /* A destination address may not be "any". */
349 	    mlxsw_sp_l3addr_is_zero(daddr))
350 		return mlxsw_sp_span_entry_unoffloadable(sparmsp);
351 
352 	l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
353 	return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
354 						       tparm.hop_limit,
355 						       &nd_tbl, sparmsp);
356 }
357 
358 static int
359 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
360 				      struct mlxsw_sp_span_parms sparms)
361 {
362 	struct mlxsw_sp_port *dest_port = sparms.dest_port;
363 	struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
364 	u8 local_port = dest_port->local_port;
365 	char mpat_pl[MLXSW_REG_MPAT_LEN];
366 	int pa_id = span_entry->id;
367 
368 	/* Create a new port analayzer entry for local_port. */
369 	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
370 			    MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
371 	mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
372 				    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
373 				    sparms.dmac, false);
374 	mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
375 					      sparms.saddr.addr6,
376 					      sparms.daddr.addr6);
377 
378 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
379 }
380 
381 static void
382 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
383 {
384 	mlxsw_sp_span_entry_deconfigure_common(span_entry,
385 					MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
386 }
387 
388 static const
389 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
390 	.can_handle = is_ip6gretap_dev,
391 	.parms = mlxsw_sp_span_entry_gretap6_parms,
392 	.configure = mlxsw_sp_span_entry_gretap6_configure,
393 	.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
394 };
395 #endif
396 
397 static const
398 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
399 	&mlxsw_sp_span_entry_ops_phys,
400 #if IS_ENABLED(CONFIG_NET_IPGRE)
401 	&mlxsw_sp_span_entry_ops_gretap4,
402 #endif
403 #if IS_ENABLED(CONFIG_IPV6_GRE)
404 	&mlxsw_sp_span_entry_ops_gretap6,
405 #endif
406 };
407 
408 static int
409 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
410 			      struct mlxsw_sp_span_parms *sparmsp)
411 {
412 	return mlxsw_sp_span_entry_unoffloadable(sparmsp);
413 }
414 
415 static int
416 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
417 				  struct mlxsw_sp_span_parms sparms)
418 {
419 	return 0;
420 }
421 
422 static void
423 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
424 {
425 }
426 
427 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
428 	.parms = mlxsw_sp_span_entry_nop_parms,
429 	.configure = mlxsw_sp_span_entry_nop_configure,
430 	.deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
431 };
432 
433 static void
434 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
435 			      struct mlxsw_sp_span_entry *span_entry,
436 			      struct mlxsw_sp_span_parms sparms)
437 {
438 	if (sparms.dest_port) {
439 		if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
440 			netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
441 				   sparms.dest_port->dev->name);
442 			sparms.dest_port = NULL;
443 		} else if (span_entry->ops->configure(span_entry, sparms)) {
444 			netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
445 				   sparms.dest_port->dev->name);
446 			sparms.dest_port = NULL;
447 		}
448 	}
449 
450 	span_entry->parms = sparms;
451 }
452 
453 static void
454 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
455 {
456 	if (span_entry->parms.dest_port)
457 		span_entry->ops->deconfigure(span_entry);
458 }
459 
460 static struct mlxsw_sp_span_entry *
461 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
462 			   const struct net_device *to_dev,
463 			   const struct mlxsw_sp_span_entry_ops *ops,
464 			   struct mlxsw_sp_span_parms sparms)
465 {
466 	struct mlxsw_sp_span_entry *span_entry = NULL;
467 	int i;
468 
469 	/* find a free entry to use */
470 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
471 		if (!mlxsw_sp->span.entries[i].ref_count) {
472 			span_entry = &mlxsw_sp->span.entries[i];
473 			break;
474 		}
475 	}
476 	if (!span_entry)
477 		return NULL;
478 
479 	span_entry->ops = ops;
480 	span_entry->ref_count = 1;
481 	span_entry->to_dev = to_dev;
482 	mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
483 
484 	return span_entry;
485 }
486 
487 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
488 {
489 	mlxsw_sp_span_entry_deconfigure(span_entry);
490 }
491 
492 struct mlxsw_sp_span_entry *
493 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
494 				 const struct net_device *to_dev)
495 {
496 	int i;
497 
498 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
499 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
500 
501 		if (curr->ref_count && curr->to_dev == to_dev)
502 			return curr;
503 	}
504 	return NULL;
505 }
506 
507 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
508 				    struct mlxsw_sp_span_entry *span_entry)
509 {
510 	mlxsw_sp_span_entry_deconfigure(span_entry);
511 	span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
512 }
513 
514 static struct mlxsw_sp_span_entry *
515 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
516 {
517 	int i;
518 
519 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
520 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
521 
522 		if (curr->ref_count && curr->id == span_id)
523 			return curr;
524 	}
525 	return NULL;
526 }
527 
528 static struct mlxsw_sp_span_entry *
529 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
530 			const struct net_device *to_dev,
531 			const struct mlxsw_sp_span_entry_ops *ops,
532 			struct mlxsw_sp_span_parms sparms)
533 {
534 	struct mlxsw_sp_span_entry *span_entry;
535 
536 	span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
537 	if (span_entry) {
538 		/* Already exists, just take a reference */
539 		span_entry->ref_count++;
540 		return span_entry;
541 	}
542 
543 	return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
544 }
545 
546 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
547 				   struct mlxsw_sp_span_entry *span_entry)
548 {
549 	WARN_ON(!span_entry->ref_count);
550 	if (--span_entry->ref_count == 0)
551 		mlxsw_sp_span_entry_destroy(span_entry);
552 	return 0;
553 }
554 
555 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
556 {
557 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
558 	struct mlxsw_sp_span_inspected_port *p;
559 	int i;
560 
561 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
562 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
563 
564 		list_for_each_entry(p, &curr->bound_ports_list, list)
565 			if (p->local_port == port->local_port &&
566 			    p->type == MLXSW_SP_SPAN_EGRESS)
567 				return true;
568 	}
569 
570 	return false;
571 }
572 
573 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
574 					 int mtu)
575 {
576 	return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
577 }
578 
579 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
580 {
581 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
582 	char sbib_pl[MLXSW_REG_SBIB_LEN];
583 	int err;
584 
585 	/* If port is egress mirrored, the shared buffer size should be
586 	 * updated according to the mtu value
587 	 */
588 	if (mlxsw_sp_span_is_egress_mirror(port)) {
589 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
590 
591 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
592 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
593 		if (err) {
594 			netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
595 			return err;
596 		}
597 	}
598 
599 	return 0;
600 }
601 
602 static struct mlxsw_sp_span_inspected_port *
603 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
604 				    enum mlxsw_sp_span_type type,
605 				    struct mlxsw_sp_port *port,
606 				    bool bind)
607 {
608 	struct mlxsw_sp_span_inspected_port *p;
609 
610 	list_for_each_entry(p, &span_entry->bound_ports_list, list)
611 		if (type == p->type &&
612 		    port->local_port == p->local_port &&
613 		    bind == p->bound)
614 			return p;
615 	return NULL;
616 }
617 
618 static int
619 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
620 				  struct mlxsw_sp_span_entry *span_entry,
621 				  enum mlxsw_sp_span_type type,
622 				  bool bind)
623 {
624 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
625 	char mpar_pl[MLXSW_REG_MPAR_LEN];
626 	int pa_id = span_entry->id;
627 
628 	/* bind the port to the SPAN entry */
629 	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
630 			    (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
631 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
632 }
633 
634 static int
635 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
636 				 struct mlxsw_sp_span_entry *span_entry,
637 				 enum mlxsw_sp_span_type type,
638 				 bool bind)
639 {
640 	struct mlxsw_sp_span_inspected_port *inspected_port;
641 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
642 	char sbib_pl[MLXSW_REG_SBIB_LEN];
643 	int i;
644 	int err;
645 
646 	/* A given (source port, direction) can only be bound to one analyzer,
647 	 * so if a binding is requested, check for conflicts.
648 	 */
649 	if (bind)
650 		for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
651 			struct mlxsw_sp_span_entry *curr =
652 				&mlxsw_sp->span.entries[i];
653 
654 			if (mlxsw_sp_span_entry_bound_port_find(curr, type,
655 								port, bind))
656 				return -EEXIST;
657 		}
658 
659 	/* if it is an egress SPAN, bind a shared buffer to it */
660 	if (type == MLXSW_SP_SPAN_EGRESS) {
661 		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
662 							     port->dev->mtu);
663 
664 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
665 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
666 		if (err) {
667 			netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
668 			return err;
669 		}
670 	}
671 
672 	if (bind) {
673 		err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
674 							true);
675 		if (err)
676 			goto err_port_bind;
677 	}
678 
679 	inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
680 	if (!inspected_port) {
681 		err = -ENOMEM;
682 		goto err_inspected_port_alloc;
683 	}
684 	inspected_port->local_port = port->local_port;
685 	inspected_port->type = type;
686 	inspected_port->bound = bind;
687 	list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
688 
689 	return 0;
690 
691 err_inspected_port_alloc:
692 	if (bind)
693 		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
694 						  false);
695 err_port_bind:
696 	if (type == MLXSW_SP_SPAN_EGRESS) {
697 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
698 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
699 	}
700 	return err;
701 }
702 
703 static void
704 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
705 				 struct mlxsw_sp_span_entry *span_entry,
706 				 enum mlxsw_sp_span_type type,
707 				 bool bind)
708 {
709 	struct mlxsw_sp_span_inspected_port *inspected_port;
710 	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
711 	char sbib_pl[MLXSW_REG_SBIB_LEN];
712 
713 	inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
714 							     port, bind);
715 	if (!inspected_port)
716 		return;
717 
718 	if (bind)
719 		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
720 						  false);
721 	/* remove the SBIB buffer if it was egress SPAN */
722 	if (type == MLXSW_SP_SPAN_EGRESS) {
723 		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
724 		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
725 	}
726 
727 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
728 
729 	list_del(&inspected_port->list);
730 	kfree(inspected_port);
731 }
732 
733 static const struct mlxsw_sp_span_entry_ops *
734 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
735 			const struct net_device *to_dev)
736 {
737 	size_t i;
738 
739 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
740 		if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
741 			return mlxsw_sp_span_entry_types[i];
742 
743 	return NULL;
744 }
745 
746 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
747 			     const struct net_device *to_dev,
748 			     enum mlxsw_sp_span_type type, bool bind,
749 			     int *p_span_id)
750 {
751 	struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
752 	const struct mlxsw_sp_span_entry_ops *ops;
753 	struct mlxsw_sp_span_parms sparms = {NULL};
754 	struct mlxsw_sp_span_entry *span_entry;
755 	int err;
756 
757 	ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
758 	if (!ops) {
759 		netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
760 		return -EOPNOTSUPP;
761 	}
762 
763 	err = ops->parms(to_dev, &sparms);
764 	if (err)
765 		return err;
766 
767 	span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
768 	if (!span_entry)
769 		return -ENOENT;
770 
771 	netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
772 		   span_entry->id);
773 
774 	err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
775 	if (err)
776 		goto err_port_bind;
777 
778 	*p_span_id = span_entry->id;
779 	return 0;
780 
781 err_port_bind:
782 	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
783 	return err;
784 }
785 
786 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
787 			      enum mlxsw_sp_span_type type, bool bind)
788 {
789 	struct mlxsw_sp_span_entry *span_entry;
790 
791 	span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
792 	if (!span_entry) {
793 		netdev_err(from->dev, "no span entry found\n");
794 		return;
795 	}
796 
797 	netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
798 		   span_entry->id);
799 	mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
800 }
801 
802 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
803 {
804 	int i;
805 	int err;
806 
807 	ASSERT_RTNL();
808 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
809 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
810 		struct mlxsw_sp_span_parms sparms = {NULL};
811 
812 		if (!curr->ref_count)
813 			continue;
814 
815 		err = curr->ops->parms(curr->to_dev, &sparms);
816 		if (err)
817 			continue;
818 
819 		if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
820 			mlxsw_sp_span_entry_deconfigure(curr);
821 			mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
822 		}
823 	}
824 }
825