xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c (revision 023e41632e065d49bcbe31b3c4b336217f96a271)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
7 #include <linux/idr.h>
8 #include <net/dst_metadata.h>
9 #include <net/arp.h>
10 
11 #include "cmsg.h"
12 #include "main.h"
13 #include "../nfp_net_repr.h"
14 #include "../nfp_net.h"
15 
16 #define NFP_FL_MAX_ROUTES               32
17 
18 /**
19  * struct nfp_tun_active_tuns - periodic message of active tunnels
20  * @seq:		sequence number of the message
21  * @count:		number of tunnels report in message
22  * @flags:		options part of the request
23  * @tun_info.ipv4:		dest IPv4 address of active route
24  * @tun_info.egress_port:	port the encapsulated packet egressed
25  * @tun_info.extra:		reserved for future use
26  * @tun_info:		tunnels that have sent traffic in reported period
27  */
28 struct nfp_tun_active_tuns {
29 	__be32 seq;
30 	__be32 count;
31 	__be32 flags;
32 	struct route_ip_info {
33 		__be32 ipv4;
34 		__be32 egress_port;
35 		__be32 extra[2];
36 	} tun_info[];
37 };
38 
39 /**
40  * struct nfp_tun_neigh - neighbour/route entry on the NFP
41  * @dst_ipv4:	destination IPv4 address
42  * @src_ipv4:	source IPv4 address
43  * @dst_addr:	destination MAC address
44  * @src_addr:	source MAC address
45  * @port_id:	NFP port to output packet on - associated with source IPv4
46  */
47 struct nfp_tun_neigh {
48 	__be32 dst_ipv4;
49 	__be32 src_ipv4;
50 	u8 dst_addr[ETH_ALEN];
51 	u8 src_addr[ETH_ALEN];
52 	__be32 port_id;
53 };
54 
55 /**
56  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
57  * @ingress_port:	ingress port of packet that signalled request
58  * @ipv4_addr:		destination ipv4 address for route
59  * @reserved:		reserved for future use
60  */
61 struct nfp_tun_req_route_ipv4 {
62 	__be32 ingress_port;
63 	__be32 ipv4_addr;
64 	__be32 reserved[2];
65 };
66 
67 /**
68  * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
69  * @ipv4_addr:	destination of route
70  * @list:	list pointer
71  */
72 struct nfp_ipv4_route_entry {
73 	__be32 ipv4_addr;
74 	struct list_head list;
75 };
76 
77 #define NFP_FL_IPV4_ADDRS_MAX        32
78 
79 /**
80  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
81  * @count:	number of IPs populated in the array
82  * @ipv4_addr:	array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
83  */
84 struct nfp_tun_ipv4_addr {
85 	__be32 count;
86 	__be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
87 };
88 
89 /**
90  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
91  * @ipv4_addr:	IP address
92  * @ref_count:	number of rules currently using this IP
93  * @list:	list pointer
94  */
95 struct nfp_ipv4_addr_entry {
96 	__be32 ipv4_addr;
97 	int ref_count;
98 	struct list_head list;
99 };
100 
101 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG	0x2
102 
103 /**
104  * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
105  * @flags:	MAC address offload options
106  * @count:	number of MAC addresses in the message (should be 1)
107  * @index:	index of MAC address in the lookup table
108  * @addr:	interface MAC address
109  */
110 struct nfp_tun_mac_addr_offload {
111 	__be16 flags;
112 	__be16 count;
113 	__be16 index;
114 	u8 addr[ETH_ALEN];
115 };
116 
117 enum nfp_flower_mac_offload_cmd {
118 	NFP_TUNNEL_MAC_OFFLOAD_ADD =		0,
119 	NFP_TUNNEL_MAC_OFFLOAD_DEL =		1,
120 	NFP_TUNNEL_MAC_OFFLOAD_MOD =		2,
121 };
122 
123 #define NFP_MAX_MAC_INDEX       0xff
124 
125 /**
126  * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
127  * @ht_node:	Hashtable entry
128  * @addr:	Offloaded MAC address
129  * @index:	Offloaded index for given MAC address
130  * @ref_count:	Number of devs using this MAC address
131  * @repr_list:	List of reprs sharing this MAC address
132  */
133 struct nfp_tun_offloaded_mac {
134 	struct rhash_head ht_node;
135 	u8 addr[ETH_ALEN];
136 	u16 index;
137 	int ref_count;
138 	struct list_head repr_list;
139 };
140 
141 static const struct rhashtable_params offloaded_macs_params = {
142 	.key_offset	= offsetof(struct nfp_tun_offloaded_mac, addr),
143 	.head_offset	= offsetof(struct nfp_tun_offloaded_mac, ht_node),
144 	.key_len	= ETH_ALEN,
145 	.automatic_shrinking	= true,
146 };
147 
148 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
149 {
150 	struct nfp_tun_active_tuns *payload;
151 	struct net_device *netdev;
152 	int count, i, pay_len;
153 	struct neighbour *n;
154 	__be32 ipv4_addr;
155 	u32 port;
156 
157 	payload = nfp_flower_cmsg_get_data(skb);
158 	count = be32_to_cpu(payload->count);
159 	if (count > NFP_FL_MAX_ROUTES) {
160 		nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
161 		return;
162 	}
163 
164 	pay_len = nfp_flower_cmsg_get_data_len(skb);
165 	if (pay_len != sizeof(struct nfp_tun_active_tuns) +
166 	    sizeof(struct route_ip_info) * count) {
167 		nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
168 		return;
169 	}
170 
171 	for (i = 0; i < count; i++) {
172 		ipv4_addr = payload->tun_info[i].ipv4;
173 		port = be32_to_cpu(payload->tun_info[i].egress_port);
174 		netdev = nfp_app_repr_get(app, port);
175 		if (!netdev)
176 			continue;
177 
178 		n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
179 		if (!n)
180 			continue;
181 
182 		/* Update the used timestamp of neighbour */
183 		neigh_event_send(n, NULL);
184 		neigh_release(n);
185 	}
186 }
187 
188 static int
189 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
190 			 gfp_t flag)
191 {
192 	struct sk_buff *skb;
193 	unsigned char *msg;
194 
195 	skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
196 	if (!skb)
197 		return -ENOMEM;
198 
199 	msg = nfp_flower_cmsg_get_data(skb);
200 	memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
201 
202 	nfp_ctrl_tx(app->ctrl, skb);
203 	return 0;
204 }
205 
206 static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
207 {
208 	struct nfp_flower_priv *priv = app->priv;
209 	struct nfp_ipv4_route_entry *entry;
210 	struct list_head *ptr, *storage;
211 
212 	spin_lock_bh(&priv->tun.neigh_off_lock);
213 	list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
214 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
215 		if (entry->ipv4_addr == ipv4_addr) {
216 			spin_unlock_bh(&priv->tun.neigh_off_lock);
217 			return true;
218 		}
219 	}
220 	spin_unlock_bh(&priv->tun.neigh_off_lock);
221 	return false;
222 }
223 
224 static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
225 {
226 	struct nfp_flower_priv *priv = app->priv;
227 	struct nfp_ipv4_route_entry *entry;
228 	struct list_head *ptr, *storage;
229 
230 	spin_lock_bh(&priv->tun.neigh_off_lock);
231 	list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
232 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
233 		if (entry->ipv4_addr == ipv4_addr) {
234 			spin_unlock_bh(&priv->tun.neigh_off_lock);
235 			return;
236 		}
237 	}
238 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
239 	if (!entry) {
240 		spin_unlock_bh(&priv->tun.neigh_off_lock);
241 		nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
242 		return;
243 	}
244 
245 	entry->ipv4_addr = ipv4_addr;
246 	list_add_tail(&entry->list, &priv->tun.neigh_off_list);
247 	spin_unlock_bh(&priv->tun.neigh_off_lock);
248 }
249 
250 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
251 {
252 	struct nfp_flower_priv *priv = app->priv;
253 	struct nfp_ipv4_route_entry *entry;
254 	struct list_head *ptr, *storage;
255 
256 	spin_lock_bh(&priv->tun.neigh_off_lock);
257 	list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
258 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
259 		if (entry->ipv4_addr == ipv4_addr) {
260 			list_del(&entry->list);
261 			kfree(entry);
262 			break;
263 		}
264 	}
265 	spin_unlock_bh(&priv->tun.neigh_off_lock);
266 }
267 
268 static void
269 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
270 		    struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
271 {
272 	struct nfp_tun_neigh payload;
273 
274 	/* Only offload representor IPv4s for now. */
275 	if (!nfp_netdev_is_nfp_repr(netdev))
276 		return;
277 
278 	memset(&payload, 0, sizeof(struct nfp_tun_neigh));
279 	payload.dst_ipv4 = flow->daddr;
280 
281 	/* If entry has expired send dst IP with all other fields 0. */
282 	if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
283 		nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
284 		/* Trigger ARP to verify invalid neighbour state. */
285 		neigh_event_send(neigh, NULL);
286 		goto send_msg;
287 	}
288 
289 	/* Have a valid neighbour so populate rest of entry. */
290 	payload.src_ipv4 = flow->saddr;
291 	ether_addr_copy(payload.src_addr, netdev->dev_addr);
292 	neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
293 	payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
294 	/* Add destination of new route to NFP cache. */
295 	nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
296 
297 send_msg:
298 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
299 				 sizeof(struct nfp_tun_neigh),
300 				 (unsigned char *)&payload, flag);
301 }
302 
303 static int
304 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
305 			    void *ptr)
306 {
307 	struct nfp_flower_priv *app_priv;
308 	struct netevent_redirect *redir;
309 	struct flowi4 flow = {};
310 	struct neighbour *n;
311 	struct nfp_app *app;
312 	struct rtable *rt;
313 	int err;
314 
315 	switch (event) {
316 	case NETEVENT_REDIRECT:
317 		redir = (struct netevent_redirect *)ptr;
318 		n = redir->neigh;
319 		break;
320 	case NETEVENT_NEIGH_UPDATE:
321 		n = (struct neighbour *)ptr;
322 		break;
323 	default:
324 		return NOTIFY_DONE;
325 	}
326 
327 	flow.daddr = *(__be32 *)n->primary_key;
328 
329 	/* Only concerned with route changes for representors. */
330 	if (!nfp_netdev_is_nfp_repr(n->dev))
331 		return NOTIFY_DONE;
332 
333 	app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
334 	app = app_priv->app;
335 
336 	/* Only concerned with changes to routes already added to NFP. */
337 	if (!nfp_tun_has_route(app, flow.daddr))
338 		return NOTIFY_DONE;
339 
340 #if IS_ENABLED(CONFIG_INET)
341 	/* Do a route lookup to populate flow data. */
342 	rt = ip_route_output_key(dev_net(n->dev), &flow);
343 	err = PTR_ERR_OR_ZERO(rt);
344 	if (err)
345 		return NOTIFY_DONE;
346 
347 	ip_rt_put(rt);
348 #else
349 	return NOTIFY_DONE;
350 #endif
351 
352 	flow.flowi4_proto = IPPROTO_UDP;
353 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
354 
355 	return NOTIFY_OK;
356 }
357 
358 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
359 {
360 	struct nfp_tun_req_route_ipv4 *payload;
361 	struct net_device *netdev;
362 	struct flowi4 flow = {};
363 	struct neighbour *n;
364 	struct rtable *rt;
365 	int err;
366 
367 	payload = nfp_flower_cmsg_get_data(skb);
368 
369 	netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
370 	if (!netdev)
371 		goto route_fail_warning;
372 
373 	flow.daddr = payload->ipv4_addr;
374 	flow.flowi4_proto = IPPROTO_UDP;
375 
376 #if IS_ENABLED(CONFIG_INET)
377 	/* Do a route lookup on same namespace as ingress port. */
378 	rt = ip_route_output_key(dev_net(netdev), &flow);
379 	err = PTR_ERR_OR_ZERO(rt);
380 	if (err)
381 		goto route_fail_warning;
382 #else
383 	goto route_fail_warning;
384 #endif
385 
386 	/* Get the neighbour entry for the lookup */
387 	n = dst_neigh_lookup(&rt->dst, &flow.daddr);
388 	ip_rt_put(rt);
389 	if (!n)
390 		goto route_fail_warning;
391 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
392 	neigh_release(n);
393 	return;
394 
395 route_fail_warning:
396 	nfp_flower_cmsg_warn(app, "Requested route not found.\n");
397 }
398 
399 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
400 {
401 	struct nfp_flower_priv *priv = app->priv;
402 	struct nfp_ipv4_addr_entry *entry;
403 	struct nfp_tun_ipv4_addr payload;
404 	struct list_head *ptr, *storage;
405 	int count;
406 
407 	memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
408 	mutex_lock(&priv->tun.ipv4_off_lock);
409 	count = 0;
410 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
411 		if (count >= NFP_FL_IPV4_ADDRS_MAX) {
412 			mutex_unlock(&priv->tun.ipv4_off_lock);
413 			nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
414 			return;
415 		}
416 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
417 		payload.ipv4_addr[count++] = entry->ipv4_addr;
418 	}
419 	payload.count = cpu_to_be32(count);
420 	mutex_unlock(&priv->tun.ipv4_off_lock);
421 
422 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
423 				 sizeof(struct nfp_tun_ipv4_addr),
424 				 &payload, GFP_KERNEL);
425 }
426 
427 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
428 {
429 	struct nfp_flower_priv *priv = app->priv;
430 	struct nfp_ipv4_addr_entry *entry;
431 	struct list_head *ptr, *storage;
432 
433 	mutex_lock(&priv->tun.ipv4_off_lock);
434 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
435 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
436 		if (entry->ipv4_addr == ipv4) {
437 			entry->ref_count++;
438 			mutex_unlock(&priv->tun.ipv4_off_lock);
439 			return;
440 		}
441 	}
442 
443 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
444 	if (!entry) {
445 		mutex_unlock(&priv->tun.ipv4_off_lock);
446 		nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
447 		return;
448 	}
449 	entry->ipv4_addr = ipv4;
450 	entry->ref_count = 1;
451 	list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
452 	mutex_unlock(&priv->tun.ipv4_off_lock);
453 
454 	nfp_tun_write_ipv4_list(app);
455 }
456 
457 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
458 {
459 	struct nfp_flower_priv *priv = app->priv;
460 	struct nfp_ipv4_addr_entry *entry;
461 	struct list_head *ptr, *storage;
462 
463 	mutex_lock(&priv->tun.ipv4_off_lock);
464 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
465 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
466 		if (entry->ipv4_addr == ipv4) {
467 			entry->ref_count--;
468 			if (!entry->ref_count) {
469 				list_del(&entry->list);
470 				kfree(entry);
471 			}
472 			break;
473 		}
474 	}
475 	mutex_unlock(&priv->tun.ipv4_off_lock);
476 
477 	nfp_tun_write_ipv4_list(app);
478 }
479 
480 static int
481 __nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
482 {
483 	struct nfp_tun_mac_addr_offload payload;
484 
485 	memset(&payload, 0, sizeof(payload));
486 
487 	if (del)
488 		payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
489 
490 	/* FW supports multiple MACs per cmsg but restrict to single. */
491 	payload.count = cpu_to_be16(1);
492 	payload.index = cpu_to_be16(idx);
493 	ether_addr_copy(payload.addr, mac);
494 
495 	return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
496 					sizeof(struct nfp_tun_mac_addr_offload),
497 					&payload, GFP_KERNEL);
498 }
499 
500 static bool nfp_tunnel_port_is_phy_repr(int port)
501 {
502 	if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
503 	    NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
504 		return true;
505 
506 	return false;
507 }
508 
509 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
510 {
511 	return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
512 }
513 
514 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
515 {
516 	return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
517 }
518 
519 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
520 {
521 	return nfp_mac_idx >> 8;
522 }
523 
524 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
525 {
526 	return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
527 }
528 
529 static struct nfp_tun_offloaded_mac *
530 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
531 {
532 	struct nfp_flower_priv *priv = app->priv;
533 
534 	return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
535 				      offloaded_macs_params);
536 }
537 
538 static void
539 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
540 					   struct net_device *netdev, bool mod)
541 {
542 	if (nfp_netdev_is_nfp_repr(netdev)) {
543 		struct nfp_flower_repr_priv *repr_priv;
544 		struct nfp_repr *repr;
545 
546 		repr = netdev_priv(netdev);
547 		repr_priv = repr->app_priv;
548 
549 		/* If modifing MAC, remove repr from old list first. */
550 		if (mod)
551 			list_del(&repr_priv->mac_list);
552 
553 		list_add_tail(&repr_priv->mac_list, &entry->repr_list);
554 	}
555 
556 	entry->ref_count++;
557 }
558 
559 static int
560 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
561 			  int port, bool mod)
562 {
563 	struct nfp_flower_priv *priv = app->priv;
564 	int ida_idx = NFP_MAX_MAC_INDEX, err;
565 	struct nfp_tun_offloaded_mac *entry;
566 	u16 nfp_mac_idx = 0;
567 
568 	entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
569 	if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
570 		nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
571 		return 0;
572 	}
573 
574 	/* Assign a global index if non-repr or MAC address is now shared. */
575 	if (entry || !port) {
576 		ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
577 					 NFP_MAX_MAC_INDEX, GFP_KERNEL);
578 		if (ida_idx < 0)
579 			return ida_idx;
580 
581 		nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
582 	} else {
583 		nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
584 	}
585 
586 	if (!entry) {
587 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
588 		if (!entry) {
589 			err = -ENOMEM;
590 			goto err_free_ida;
591 		}
592 
593 		ether_addr_copy(entry->addr, netdev->dev_addr);
594 		INIT_LIST_HEAD(&entry->repr_list);
595 
596 		if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
597 					   &entry->ht_node,
598 					   offloaded_macs_params)) {
599 			err = -ENOMEM;
600 			goto err_free_entry;
601 		}
602 	}
603 
604 	err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
605 				       nfp_mac_idx, false);
606 	if (err) {
607 		/* If not shared then free. */
608 		if (!entry->ref_count)
609 			goto err_remove_hash;
610 		goto err_free_ida;
611 	}
612 
613 	entry->index = nfp_mac_idx;
614 	nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
615 
616 	return 0;
617 
618 err_remove_hash:
619 	rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
620 			       offloaded_macs_params);
621 err_free_entry:
622 	kfree(entry);
623 err_free_ida:
624 	if (ida_idx != NFP_MAX_MAC_INDEX)
625 		ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
626 
627 	return err;
628 }
629 
630 static int
631 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
632 			  u8 *mac, bool mod)
633 {
634 	struct nfp_flower_priv *priv = app->priv;
635 	struct nfp_flower_repr_priv *repr_priv;
636 	struct nfp_tun_offloaded_mac *entry;
637 	struct nfp_repr *repr;
638 	int ida_idx;
639 
640 	entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
641 	if (!entry)
642 		return 0;
643 
644 	entry->ref_count--;
645 	/* If del is part of a mod then mac_list is still in use elsewheree. */
646 	if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
647 		repr = netdev_priv(netdev);
648 		repr_priv = repr->app_priv;
649 		list_del(&repr_priv->mac_list);
650 	}
651 
652 	/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
653 	if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
654 		u16 nfp_mac_idx;
655 		int port, err;
656 
657 		repr_priv = list_first_entry(&entry->repr_list,
658 					     struct nfp_flower_repr_priv,
659 					     mac_list);
660 		repr = repr_priv->nfp_repr;
661 		port = nfp_repr_get_port_id(repr->netdev);
662 		nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
663 		err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
664 		if (err) {
665 			nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
666 					     netdev_name(netdev));
667 			return 0;
668 		}
669 
670 		ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
671 		ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
672 		entry->index = nfp_mac_idx;
673 		return 0;
674 	}
675 
676 	if (entry->ref_count)
677 		return 0;
678 
679 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
680 					    &entry->ht_node,
681 					    offloaded_macs_params));
682 	/* If MAC has global ID then extract and free the ida entry. */
683 	if (nfp_tunnel_is_mac_idx_global(entry->index)) {
684 		ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
685 		ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
686 	}
687 
688 	kfree(entry);
689 
690 	return __nfp_tunnel_offload_mac(app, mac, 0, true);
691 }
692 
693 static int
694 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
695 		       enum nfp_flower_mac_offload_cmd cmd)
696 {
697 	struct nfp_flower_non_repr_priv *nr_priv = NULL;
698 	bool non_repr = false, *mac_offloaded;
699 	u8 *off_mac = NULL;
700 	int err, port = 0;
701 
702 	if (nfp_netdev_is_nfp_repr(netdev)) {
703 		struct nfp_flower_repr_priv *repr_priv;
704 		struct nfp_repr *repr;
705 
706 		repr = netdev_priv(netdev);
707 		if (repr->app != app)
708 			return 0;
709 
710 		repr_priv = repr->app_priv;
711 		mac_offloaded = &repr_priv->mac_offloaded;
712 		off_mac = &repr_priv->offloaded_mac_addr[0];
713 		port = nfp_repr_get_port_id(netdev);
714 		if (!nfp_tunnel_port_is_phy_repr(port))
715 			return 0;
716 	} else if (nfp_fl_is_netdev_to_offload(netdev)) {
717 		nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
718 		if (!nr_priv)
719 			return -ENOMEM;
720 
721 		mac_offloaded = &nr_priv->mac_offloaded;
722 		off_mac = &nr_priv->offloaded_mac_addr[0];
723 		non_repr = true;
724 	} else {
725 		return 0;
726 	}
727 
728 	if (!is_valid_ether_addr(netdev->dev_addr)) {
729 		err = -EINVAL;
730 		goto err_put_non_repr_priv;
731 	}
732 
733 	if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
734 		cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
735 
736 	switch (cmd) {
737 	case NFP_TUNNEL_MAC_OFFLOAD_ADD:
738 		err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
739 		if (err)
740 			goto err_put_non_repr_priv;
741 
742 		if (non_repr)
743 			__nfp_flower_non_repr_priv_get(nr_priv);
744 
745 		*mac_offloaded = true;
746 		ether_addr_copy(off_mac, netdev->dev_addr);
747 		break;
748 	case NFP_TUNNEL_MAC_OFFLOAD_DEL:
749 		/* Only attempt delete if add was successful. */
750 		if (!*mac_offloaded)
751 			break;
752 
753 		if (non_repr)
754 			__nfp_flower_non_repr_priv_put(nr_priv);
755 
756 		*mac_offloaded = false;
757 
758 		err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
759 						false);
760 		if (err)
761 			goto err_put_non_repr_priv;
762 
763 		break;
764 	case NFP_TUNNEL_MAC_OFFLOAD_MOD:
765 		/* Ignore if changing to the same address. */
766 		if (ether_addr_equal(netdev->dev_addr, off_mac))
767 			break;
768 
769 		err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
770 		if (err)
771 			goto err_put_non_repr_priv;
772 
773 		/* Delete the previous MAC address. */
774 		err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
775 		if (err)
776 			nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
777 					     netdev_name(netdev));
778 
779 		ether_addr_copy(off_mac, netdev->dev_addr);
780 		break;
781 	default:
782 		err = -EINVAL;
783 		goto err_put_non_repr_priv;
784 	}
785 
786 	if (non_repr)
787 		__nfp_flower_non_repr_priv_put(nr_priv);
788 
789 	return 0;
790 
791 err_put_non_repr_priv:
792 	if (non_repr)
793 		__nfp_flower_non_repr_priv_put(nr_priv);
794 
795 	return err;
796 }
797 
798 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
799 				 struct net_device *netdev,
800 				 unsigned long event, void *ptr)
801 {
802 	int err;
803 
804 	if (event == NETDEV_DOWN) {
805 		err = nfp_tunnel_offload_mac(app, netdev,
806 					     NFP_TUNNEL_MAC_OFFLOAD_DEL);
807 		if (err)
808 			nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
809 					     netdev_name(netdev));
810 	} else if (event == NETDEV_UP) {
811 		err = nfp_tunnel_offload_mac(app, netdev,
812 					     NFP_TUNNEL_MAC_OFFLOAD_ADD);
813 		if (err)
814 			nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
815 					     netdev_name(netdev));
816 	} else if (event == NETDEV_CHANGEADDR) {
817 		/* Only offload addr change if netdev is already up. */
818 		if (!(netdev->flags & IFF_UP))
819 			return NOTIFY_OK;
820 
821 		err = nfp_tunnel_offload_mac(app, netdev,
822 					     NFP_TUNNEL_MAC_OFFLOAD_MOD);
823 		if (err)
824 			nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
825 					     netdev_name(netdev));
826 	}
827 	return NOTIFY_OK;
828 }
829 
830 int nfp_tunnel_config_start(struct nfp_app *app)
831 {
832 	struct nfp_flower_priv *priv = app->priv;
833 	int err;
834 
835 	/* Initialise rhash for MAC offload tracking. */
836 	err = rhashtable_init(&priv->tun.offloaded_macs,
837 			      &offloaded_macs_params);
838 	if (err)
839 		return err;
840 
841 	ida_init(&priv->tun.mac_off_ids);
842 
843 	/* Initialise priv data for IPv4 offloading. */
844 	mutex_init(&priv->tun.ipv4_off_lock);
845 	INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
846 
847 	/* Initialise priv data for neighbour offloading. */
848 	spin_lock_init(&priv->tun.neigh_off_lock);
849 	INIT_LIST_HEAD(&priv->tun.neigh_off_list);
850 	priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
851 
852 	err = register_netevent_notifier(&priv->tun.neigh_nb);
853 	if (err) {
854 		rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
855 					    nfp_check_rhashtable_empty, NULL);
856 		return err;
857 	}
858 
859 	return 0;
860 }
861 
862 void nfp_tunnel_config_stop(struct nfp_app *app)
863 {
864 	struct nfp_flower_priv *priv = app->priv;
865 	struct nfp_ipv4_route_entry *route_entry;
866 	struct nfp_ipv4_addr_entry *ip_entry;
867 	struct list_head *ptr, *storage;
868 
869 	unregister_netevent_notifier(&priv->tun.neigh_nb);
870 
871 	ida_destroy(&priv->tun.mac_off_ids);
872 
873 	/* Free any memory that may be occupied by ipv4 list. */
874 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
875 		ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
876 		list_del(&ip_entry->list);
877 		kfree(ip_entry);
878 	}
879 
880 	/* Free any memory that may be occupied by the route list. */
881 	list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
882 		route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
883 					 list);
884 		list_del(&route_entry->list);
885 		kfree(route_entry);
886 	}
887 
888 	/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
889 	rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
890 				    nfp_check_rhashtable_empty, NULL);
891 }
892