1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/etherdevice.h>
35 #include <linux/inetdevice.h>
36 #include <net/netevent.h>
37 #include <linux/idr.h>
38 #include <net/dst_metadata.h>
39 #include <net/arp.h>
40 
41 #include "cmsg.h"
42 #include "main.h"
43 #include "../nfp_net_repr.h"
44 #include "../nfp_net.h"
45 
46 #define NFP_FL_MAX_ROUTES               32
47 
48 /**
49  * struct nfp_tun_active_tuns - periodic message of active tunnels
50  * @seq:		sequence number of the message
51  * @count:		number of tunnels report in message
52  * @flags:		options part of the request
53  * @ipv4:		dest IPv4 address of active route
54  * @egress_port:	port the encapsulated packet egressed
55  * @extra:		reserved for future use
56  * @tun_info:		tunnels that have sent traffic in reported period
57  */
58 struct nfp_tun_active_tuns {
59 	__be32 seq;
60 	__be32 count;
61 	__be32 flags;
62 	struct route_ip_info {
63 		__be32 ipv4;
64 		__be32 egress_port;
65 		__be32 extra[2];
66 	} tun_info[];
67 };
68 
69 /**
70  * struct nfp_tun_neigh - neighbour/route entry on the NFP
71  * @dst_ipv4:	destination IPv4 address
72  * @src_ipv4:	source IPv4 address
73  * @dst_addr:	destination MAC address
74  * @src_addr:	source MAC address
75  * @port_id:	NFP port to output packet on - associated with source IPv4
76  */
77 struct nfp_tun_neigh {
78 	__be32 dst_ipv4;
79 	__be32 src_ipv4;
80 	u8 dst_addr[ETH_ALEN];
81 	u8 src_addr[ETH_ALEN];
82 	__be32 port_id;
83 };
84 
85 /**
86  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
87  * @ingress_port:	ingress port of packet that signalled request
88  * @ipv4_addr:		destination ipv4 address for route
89  * @reserved:		reserved for future use
90  */
91 struct nfp_tun_req_route_ipv4 {
92 	__be32 ingress_port;
93 	__be32 ipv4_addr;
94 	__be32 reserved[2];
95 };
96 
97 /**
98  * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
99  * @ipv4_addr:	destination of route
100  * @list:	list pointer
101  */
102 struct nfp_ipv4_route_entry {
103 	__be32 ipv4_addr;
104 	struct list_head list;
105 };
106 
107 #define NFP_FL_IPV4_ADDRS_MAX        32
108 
109 /**
110  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
111  * @count:	number of IPs populated in the array
112  * @ipv4_addr:	array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
113  */
114 struct nfp_tun_ipv4_addr {
115 	__be32 count;
116 	__be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
117 };
118 
119 /**
120  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
121  * @ipv4_addr:	IP address
122  * @ref_count:	number of rules currently using this IP
123  * @list:	list pointer
124  */
125 struct nfp_ipv4_addr_entry {
126 	__be32 ipv4_addr;
127 	int ref_count;
128 	struct list_head list;
129 };
130 
131 /**
132  * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
133  * @reserved:	reserved for future use
134  * @count:	number of MAC addresses in the message
135  * @index:	index of MAC address in the lookup table
136  * @addr:	interface MAC address
137  * @addresses:	series of MACs to offload
138  */
139 struct nfp_tun_mac_addr {
140 	__be16 reserved;
141 	__be16 count;
142 	struct index_mac_addr {
143 		__be16 index;
144 		u8 addr[ETH_ALEN];
145 	} addresses[];
146 };
147 
148 /**
149  * struct nfp_tun_mac_offload_entry - list of MACs to offload
150  * @index:	index of MAC address for offloading
151  * @addr:	interface MAC address
152  * @list:	list pointer
153  */
154 struct nfp_tun_mac_offload_entry {
155 	__be16 index;
156 	u8 addr[ETH_ALEN];
157 	struct list_head list;
158 };
159 
160 #define NFP_MAX_MAC_INDEX       0xff
161 
162 /**
163  * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
164  * @ifindex:	netdev ifindex of the device
165  * @index:	index of netdevs mac on NFP
166  * @list:	list pointer
167  */
168 struct nfp_tun_mac_non_nfp_idx {
169 	int ifindex;
170 	u8 index;
171 	struct list_head list;
172 };
173 
174 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
175 {
176 	struct nfp_tun_active_tuns *payload;
177 	struct net_device *netdev;
178 	int count, i, pay_len;
179 	struct neighbour *n;
180 	__be32 ipv4_addr;
181 	u32 port;
182 
183 	payload = nfp_flower_cmsg_get_data(skb);
184 	count = be32_to_cpu(payload->count);
185 	if (count > NFP_FL_MAX_ROUTES) {
186 		nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
187 		return;
188 	}
189 
190 	pay_len = nfp_flower_cmsg_get_data_len(skb);
191 	if (pay_len != sizeof(struct nfp_tun_active_tuns) +
192 	    sizeof(struct route_ip_info) * count) {
193 		nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
194 		return;
195 	}
196 
197 	for (i = 0; i < count; i++) {
198 		ipv4_addr = payload->tun_info[i].ipv4;
199 		port = be32_to_cpu(payload->tun_info[i].egress_port);
200 		netdev = nfp_app_repr_get(app, port);
201 		if (!netdev)
202 			continue;
203 
204 		n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
205 		if (!n)
206 			continue;
207 
208 		/* Update the used timestamp of neighbour */
209 		neigh_event_send(n, NULL);
210 		neigh_release(n);
211 	}
212 }
213 
214 static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
215 {
216 	if (!netdev->rtnl_link_ops)
217 		return false;
218 	if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
219 		return true;
220 	if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
221 		return true;
222 
223 	return false;
224 }
225 
226 static int
227 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
228 			 gfp_t flag)
229 {
230 	struct sk_buff *skb;
231 	unsigned char *msg;
232 
233 	skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
234 	if (!skb)
235 		return -ENOMEM;
236 
237 	msg = nfp_flower_cmsg_get_data(skb);
238 	memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
239 
240 	nfp_ctrl_tx(app->ctrl, skb);
241 	return 0;
242 }
243 
244 static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
245 {
246 	struct nfp_flower_priv *priv = app->priv;
247 	struct nfp_ipv4_route_entry *entry;
248 	struct list_head *ptr, *storage;
249 
250 	spin_lock_bh(&priv->nfp_neigh_off_lock);
251 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
252 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
253 		if (entry->ipv4_addr == ipv4_addr) {
254 			spin_unlock_bh(&priv->nfp_neigh_off_lock);
255 			return true;
256 		}
257 	}
258 	spin_unlock_bh(&priv->nfp_neigh_off_lock);
259 	return false;
260 }
261 
262 static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
263 {
264 	struct nfp_flower_priv *priv = app->priv;
265 	struct nfp_ipv4_route_entry *entry;
266 	struct list_head *ptr, *storage;
267 
268 	spin_lock_bh(&priv->nfp_neigh_off_lock);
269 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
270 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
271 		if (entry->ipv4_addr == ipv4_addr) {
272 			spin_unlock_bh(&priv->nfp_neigh_off_lock);
273 			return;
274 		}
275 	}
276 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
277 	if (!entry) {
278 		spin_unlock_bh(&priv->nfp_neigh_off_lock);
279 		nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
280 		return;
281 	}
282 
283 	entry->ipv4_addr = ipv4_addr;
284 	list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
285 	spin_unlock_bh(&priv->nfp_neigh_off_lock);
286 }
287 
288 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
289 {
290 	struct nfp_flower_priv *priv = app->priv;
291 	struct nfp_ipv4_route_entry *entry;
292 	struct list_head *ptr, *storage;
293 
294 	spin_lock_bh(&priv->nfp_neigh_off_lock);
295 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
296 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
297 		if (entry->ipv4_addr == ipv4_addr) {
298 			list_del(&entry->list);
299 			kfree(entry);
300 			break;
301 		}
302 	}
303 	spin_unlock_bh(&priv->nfp_neigh_off_lock);
304 }
305 
306 static void
307 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
308 		    struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
309 {
310 	struct nfp_tun_neigh payload;
311 
312 	/* Only offload representor IPv4s for now. */
313 	if (!nfp_netdev_is_nfp_repr(netdev))
314 		return;
315 
316 	memset(&payload, 0, sizeof(struct nfp_tun_neigh));
317 	payload.dst_ipv4 = flow->daddr;
318 
319 	/* If entry has expired send dst IP with all other fields 0. */
320 	if (!(neigh->nud_state & NUD_VALID)) {
321 		nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
322 		/* Trigger ARP to verify invalid neighbour state. */
323 		neigh_event_send(neigh, NULL);
324 		goto send_msg;
325 	}
326 
327 	/* Have a valid neighbour so populate rest of entry. */
328 	payload.src_ipv4 = flow->saddr;
329 	ether_addr_copy(payload.src_addr, netdev->dev_addr);
330 	neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
331 	payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
332 	/* Add destination of new route to NFP cache. */
333 	nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
334 
335 send_msg:
336 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
337 				 sizeof(struct nfp_tun_neigh),
338 				 (unsigned char *)&payload, flag);
339 }
340 
341 static int
342 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
343 			    void *ptr)
344 {
345 	struct nfp_flower_priv *app_priv;
346 	struct netevent_redirect *redir;
347 	struct flowi4 flow = {};
348 	struct neighbour *n;
349 	struct nfp_app *app;
350 	struct rtable *rt;
351 	int err;
352 
353 	switch (event) {
354 	case NETEVENT_REDIRECT:
355 		redir = (struct netevent_redirect *)ptr;
356 		n = redir->neigh;
357 		break;
358 	case NETEVENT_NEIGH_UPDATE:
359 		n = (struct neighbour *)ptr;
360 		break;
361 	default:
362 		return NOTIFY_DONE;
363 	}
364 
365 	flow.daddr = *(__be32 *)n->primary_key;
366 
367 	/* Only concerned with route changes for representors. */
368 	if (!nfp_netdev_is_nfp_repr(n->dev))
369 		return NOTIFY_DONE;
370 
371 	app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
372 	app = app_priv->app;
373 
374 	/* Only concerned with changes to routes already added to NFP. */
375 	if (!nfp_tun_has_route(app, flow.daddr))
376 		return NOTIFY_DONE;
377 
378 #if IS_ENABLED(CONFIG_INET)
379 	/* Do a route lookup to populate flow data. */
380 	rt = ip_route_output_key(dev_net(n->dev), &flow);
381 	err = PTR_ERR_OR_ZERO(rt);
382 	if (err)
383 		return NOTIFY_DONE;
384 #else
385 	return NOTIFY_DONE;
386 #endif
387 
388 	flow.flowi4_proto = IPPROTO_UDP;
389 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
390 
391 	return NOTIFY_OK;
392 }
393 
394 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
395 {
396 	struct nfp_tun_req_route_ipv4 *payload;
397 	struct net_device *netdev;
398 	struct flowi4 flow = {};
399 	struct neighbour *n;
400 	struct rtable *rt;
401 	int err;
402 
403 	payload = nfp_flower_cmsg_get_data(skb);
404 
405 	netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
406 	if (!netdev)
407 		goto route_fail_warning;
408 
409 	flow.daddr = payload->ipv4_addr;
410 	flow.flowi4_proto = IPPROTO_UDP;
411 
412 #if IS_ENABLED(CONFIG_INET)
413 	/* Do a route lookup on same namespace as ingress port. */
414 	rt = ip_route_output_key(dev_net(netdev), &flow);
415 	err = PTR_ERR_OR_ZERO(rt);
416 	if (err)
417 		goto route_fail_warning;
418 #else
419 	goto route_fail_warning;
420 #endif
421 
422 	/* Get the neighbour entry for the lookup */
423 	n = dst_neigh_lookup(&rt->dst, &flow.daddr);
424 	ip_rt_put(rt);
425 	if (!n)
426 		goto route_fail_warning;
427 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
428 	neigh_release(n);
429 	return;
430 
431 route_fail_warning:
432 	nfp_flower_cmsg_warn(app, "Requested route not found.\n");
433 }
434 
435 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
436 {
437 	struct nfp_flower_priv *priv = app->priv;
438 	struct nfp_ipv4_addr_entry *entry;
439 	struct nfp_tun_ipv4_addr payload;
440 	struct list_head *ptr, *storage;
441 	int count;
442 
443 	memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
444 	mutex_lock(&priv->nfp_ipv4_off_lock);
445 	count = 0;
446 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
447 		if (count >= NFP_FL_IPV4_ADDRS_MAX) {
448 			mutex_unlock(&priv->nfp_ipv4_off_lock);
449 			nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
450 			return;
451 		}
452 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
453 		payload.ipv4_addr[count++] = entry->ipv4_addr;
454 	}
455 	payload.count = cpu_to_be32(count);
456 	mutex_unlock(&priv->nfp_ipv4_off_lock);
457 
458 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
459 				 sizeof(struct nfp_tun_ipv4_addr),
460 				 &payload, GFP_KERNEL);
461 }
462 
463 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
464 {
465 	struct nfp_flower_priv *priv = app->priv;
466 	struct nfp_ipv4_addr_entry *entry;
467 	struct list_head *ptr, *storage;
468 
469 	mutex_lock(&priv->nfp_ipv4_off_lock);
470 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
471 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
472 		if (entry->ipv4_addr == ipv4) {
473 			entry->ref_count++;
474 			mutex_unlock(&priv->nfp_ipv4_off_lock);
475 			return;
476 		}
477 	}
478 
479 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
480 	if (!entry) {
481 		mutex_unlock(&priv->nfp_ipv4_off_lock);
482 		nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
483 		return;
484 	}
485 	entry->ipv4_addr = ipv4;
486 	entry->ref_count = 1;
487 	list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
488 	mutex_unlock(&priv->nfp_ipv4_off_lock);
489 
490 	nfp_tun_write_ipv4_list(app);
491 }
492 
493 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
494 {
495 	struct nfp_flower_priv *priv = app->priv;
496 	struct nfp_ipv4_addr_entry *entry;
497 	struct list_head *ptr, *storage;
498 
499 	mutex_lock(&priv->nfp_ipv4_off_lock);
500 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
501 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
502 		if (entry->ipv4_addr == ipv4) {
503 			entry->ref_count--;
504 			if (!entry->ref_count) {
505 				list_del(&entry->list);
506 				kfree(entry);
507 			}
508 			break;
509 		}
510 	}
511 	mutex_unlock(&priv->nfp_ipv4_off_lock);
512 
513 	nfp_tun_write_ipv4_list(app);
514 }
515 
516 void nfp_tunnel_write_macs(struct nfp_app *app)
517 {
518 	struct nfp_flower_priv *priv = app->priv;
519 	struct nfp_tun_mac_offload_entry *entry;
520 	struct nfp_tun_mac_addr *payload;
521 	struct list_head *ptr, *storage;
522 	int mac_count, err, pay_size;
523 
524 	mutex_lock(&priv->nfp_mac_off_lock);
525 	if (!priv->nfp_mac_off_count) {
526 		mutex_unlock(&priv->nfp_mac_off_lock);
527 		return;
528 	}
529 
530 	pay_size = sizeof(struct nfp_tun_mac_addr) +
531 		   sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
532 
533 	payload = kzalloc(pay_size, GFP_KERNEL);
534 	if (!payload) {
535 		mutex_unlock(&priv->nfp_mac_off_lock);
536 		return;
537 	}
538 
539 	payload->count = cpu_to_be16(priv->nfp_mac_off_count);
540 
541 	mac_count = 0;
542 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
543 		entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
544 				   list);
545 		payload->addresses[mac_count].index = entry->index;
546 		ether_addr_copy(payload->addresses[mac_count].addr,
547 				entry->addr);
548 		mac_count++;
549 	}
550 
551 	err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
552 				       pay_size, payload, GFP_KERNEL);
553 
554 	kfree(payload);
555 
556 	if (err) {
557 		mutex_unlock(&priv->nfp_mac_off_lock);
558 		/* Write failed so retain list for future retry. */
559 		return;
560 	}
561 
562 	/* If list was successfully offloaded, flush it. */
563 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
564 		entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
565 				   list);
566 		list_del(&entry->list);
567 		kfree(entry);
568 	}
569 
570 	priv->nfp_mac_off_count = 0;
571 	mutex_unlock(&priv->nfp_mac_off_lock);
572 }
573 
574 static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
575 {
576 	struct nfp_flower_priv *priv = app->priv;
577 	struct nfp_tun_mac_non_nfp_idx *entry;
578 	struct list_head *ptr, *storage;
579 	int idx;
580 
581 	mutex_lock(&priv->nfp_mac_index_lock);
582 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
583 		entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
584 		if (entry->ifindex == ifindex) {
585 			idx = entry->index;
586 			mutex_unlock(&priv->nfp_mac_index_lock);
587 			return idx;
588 		}
589 	}
590 
591 	idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
592 			     NFP_MAX_MAC_INDEX, GFP_KERNEL);
593 	if (idx < 0) {
594 		mutex_unlock(&priv->nfp_mac_index_lock);
595 		return idx;
596 	}
597 
598 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
599 	if (!entry) {
600 		mutex_unlock(&priv->nfp_mac_index_lock);
601 		return -ENOMEM;
602 	}
603 	entry->ifindex = ifindex;
604 	entry->index = idx;
605 	list_add_tail(&entry->list, &priv->nfp_mac_index_list);
606 	mutex_unlock(&priv->nfp_mac_index_lock);
607 
608 	return idx;
609 }
610 
611 static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
612 {
613 	struct nfp_flower_priv *priv = app->priv;
614 	struct nfp_tun_mac_non_nfp_idx *entry;
615 	struct list_head *ptr, *storage;
616 
617 	mutex_lock(&priv->nfp_mac_index_lock);
618 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
619 		entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
620 		if (entry->ifindex == ifindex) {
621 			ida_simple_remove(&priv->nfp_mac_off_ids,
622 					  entry->index);
623 			list_del(&entry->list);
624 			kfree(entry);
625 			break;
626 		}
627 	}
628 	mutex_unlock(&priv->nfp_mac_index_lock);
629 }
630 
631 static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
632 					    struct nfp_app *app)
633 {
634 	struct nfp_flower_priv *priv = app->priv;
635 	struct nfp_tun_mac_offload_entry *entry;
636 	u16 nfp_mac_idx;
637 	int port = 0;
638 
639 	/* Check if MAC should be offloaded. */
640 	if (!is_valid_ether_addr(netdev->dev_addr))
641 		return;
642 
643 	if (nfp_netdev_is_nfp_repr(netdev))
644 		port = nfp_repr_get_port_id(netdev);
645 	else if (!nfp_tun_is_netdev_to_offload(netdev))
646 		return;
647 
648 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
649 	if (!entry) {
650 		nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
651 		return;
652 	}
653 
654 	if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
655 	    NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
656 		nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
657 	} else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
658 		   NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
659 		port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
660 		nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
661 	} else {
662 		/* Must assign our own unique 8-bit index. */
663 		int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
664 
665 		if (idx < 0) {
666 			nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
667 			kfree(entry);
668 			return;
669 		}
670 		nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
671 	}
672 
673 	entry->index = cpu_to_be16(nfp_mac_idx);
674 	ether_addr_copy(entry->addr, netdev->dev_addr);
675 
676 	mutex_lock(&priv->nfp_mac_off_lock);
677 	priv->nfp_mac_off_count++;
678 	list_add_tail(&entry->list, &priv->nfp_mac_off_list);
679 	mutex_unlock(&priv->nfp_mac_off_lock);
680 }
681 
682 static int nfp_tun_mac_event_handler(struct notifier_block *nb,
683 				     unsigned long event, void *ptr)
684 {
685 	struct nfp_flower_priv *app_priv;
686 	struct net_device *netdev;
687 	struct nfp_app *app;
688 
689 	if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
690 		app_priv = container_of(nb, struct nfp_flower_priv,
691 					nfp_tun_mac_nb);
692 		app = app_priv->app;
693 		netdev = netdev_notifier_info_to_dev(ptr);
694 
695 		/* If non-nfp netdev then free its offload index. */
696 		if (nfp_tun_is_netdev_to_offload(netdev))
697 			nfp_tun_del_mac_idx(app, netdev->ifindex);
698 	} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
699 		   event == NETDEV_REGISTER) {
700 		app_priv = container_of(nb, struct nfp_flower_priv,
701 					nfp_tun_mac_nb);
702 		app = app_priv->app;
703 		netdev = netdev_notifier_info_to_dev(ptr);
704 
705 		nfp_tun_add_to_mac_offload_list(netdev, app);
706 
707 		/* Force a list write to keep NFP up to date. */
708 		nfp_tunnel_write_macs(app);
709 	}
710 	return NOTIFY_OK;
711 }
712 
713 int nfp_tunnel_config_start(struct nfp_app *app)
714 {
715 	struct nfp_flower_priv *priv = app->priv;
716 	struct net_device *netdev;
717 	int err;
718 
719 	/* Initialise priv data for MAC offloading. */
720 	priv->nfp_mac_off_count = 0;
721 	mutex_init(&priv->nfp_mac_off_lock);
722 	INIT_LIST_HEAD(&priv->nfp_mac_off_list);
723 	priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
724 	mutex_init(&priv->nfp_mac_index_lock);
725 	INIT_LIST_HEAD(&priv->nfp_mac_index_list);
726 	ida_init(&priv->nfp_mac_off_ids);
727 
728 	/* Initialise priv data for IPv4 offloading. */
729 	mutex_init(&priv->nfp_ipv4_off_lock);
730 	INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
731 
732 	/* Initialise priv data for neighbour offloading. */
733 	spin_lock_init(&priv->nfp_neigh_off_lock);
734 	INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
735 	priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
736 
737 	err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
738 	if (err)
739 		goto err_free_mac_ida;
740 
741 	err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
742 	if (err)
743 		goto err_unreg_mac_nb;
744 
745 	/* Parse netdevs already registered for MACs that need offloaded. */
746 	rtnl_lock();
747 	for_each_netdev(&init_net, netdev)
748 		nfp_tun_add_to_mac_offload_list(netdev, app);
749 	rtnl_unlock();
750 
751 	return 0;
752 
753 err_unreg_mac_nb:
754 	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
755 err_free_mac_ida:
756 	ida_destroy(&priv->nfp_mac_off_ids);
757 	return err;
758 }
759 
760 void nfp_tunnel_config_stop(struct nfp_app *app)
761 {
762 	struct nfp_tun_mac_offload_entry *mac_entry;
763 	struct nfp_flower_priv *priv = app->priv;
764 	struct nfp_ipv4_route_entry *route_entry;
765 	struct nfp_tun_mac_non_nfp_idx *mac_idx;
766 	struct nfp_ipv4_addr_entry *ip_entry;
767 	struct list_head *ptr, *storage;
768 
769 	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
770 	unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
771 
772 	/* Free any memory that may be occupied by MAC list. */
773 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
774 		mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
775 				       list);
776 		list_del(&mac_entry->list);
777 		kfree(mac_entry);
778 	}
779 
780 	/* Free any memory that may be occupied by MAC index list. */
781 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
782 		mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
783 				     list);
784 		list_del(&mac_idx->list);
785 		kfree(mac_idx);
786 	}
787 
788 	ida_destroy(&priv->nfp_mac_off_ids);
789 
790 	/* Free any memory that may be occupied by ipv4 list. */
791 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
792 		ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
793 		list_del(&ip_entry->list);
794 		kfree(ip_entry);
795 	}
796 
797 	/* Free any memory that may be occupied by the route list. */
798 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
799 		route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
800 					 list);
801 		list_del(&route_entry->list);
802 		kfree(route_entry);
803 	}
804 }
805