1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
7 #include <net/vxlan.h>
8 #include <linux/idr.h>
9 #include <net/dst_metadata.h>
10 #include <net/arp.h>
11 
12 #include "cmsg.h"
13 #include "main.h"
14 #include "../nfp_net_repr.h"
15 #include "../nfp_net.h"
16 
17 #define NFP_FL_MAX_ROUTES               32
18 
19 /**
20  * struct nfp_tun_active_tuns - periodic message of active tunnels
21  * @seq:		sequence number of the message
22  * @count:		number of tunnels report in message
23  * @flags:		options part of the request
24  * @tun_info.ipv4:		dest IPv4 address of active route
25  * @tun_info.egress_port:	port the encapsulated packet egressed
26  * @tun_info.extra:		reserved for future use
27  * @tun_info:		tunnels that have sent traffic in reported period
28  */
29 struct nfp_tun_active_tuns {
30 	__be32 seq;
31 	__be32 count;
32 	__be32 flags;
33 	struct route_ip_info {
34 		__be32 ipv4;
35 		__be32 egress_port;
36 		__be32 extra[2];
37 	} tun_info[];
38 };
39 
40 /**
41  * struct nfp_tun_neigh - neighbour/route entry on the NFP
42  * @dst_ipv4:	destination IPv4 address
43  * @src_ipv4:	source IPv4 address
44  * @dst_addr:	destination MAC address
45  * @src_addr:	source MAC address
46  * @port_id:	NFP port to output packet on - associated with source IPv4
47  */
48 struct nfp_tun_neigh {
49 	__be32 dst_ipv4;
50 	__be32 src_ipv4;
51 	u8 dst_addr[ETH_ALEN];
52 	u8 src_addr[ETH_ALEN];
53 	__be32 port_id;
54 };
55 
56 /**
57  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
58  * @ingress_port:	ingress port of packet that signalled request
59  * @ipv4_addr:		destination ipv4 address for route
60  * @reserved:		reserved for future use
61  */
62 struct nfp_tun_req_route_ipv4 {
63 	__be32 ingress_port;
64 	__be32 ipv4_addr;
65 	__be32 reserved[2];
66 };
67 
68 /**
69  * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
70  * @ipv4_addr:	destination of route
71  * @list:	list pointer
72  */
73 struct nfp_ipv4_route_entry {
74 	__be32 ipv4_addr;
75 	struct list_head list;
76 };
77 
78 #define NFP_FL_IPV4_ADDRS_MAX        32
79 
80 /**
81  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
82  * @count:	number of IPs populated in the array
83  * @ipv4_addr:	array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
84  */
85 struct nfp_tun_ipv4_addr {
86 	__be32 count;
87 	__be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
88 };
89 
90 /**
91  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
92  * @ipv4_addr:	IP address
93  * @ref_count:	number of rules currently using this IP
94  * @list:	list pointer
95  */
96 struct nfp_ipv4_addr_entry {
97 	__be32 ipv4_addr;
98 	int ref_count;
99 	struct list_head list;
100 };
101 
102 /**
103  * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
104  * @reserved:	reserved for future use
105  * @count:	number of MAC addresses in the message
106  * @addresses.index:	index of MAC address in the lookup table
107  * @addresses.addr:	interface MAC address
108  * @addresses:	series of MACs to offload
109  */
110 struct nfp_tun_mac_addr {
111 	__be16 reserved;
112 	__be16 count;
113 	struct index_mac_addr {
114 		__be16 index;
115 		u8 addr[ETH_ALEN];
116 	} addresses[];
117 };
118 
119 /**
120  * struct nfp_tun_mac_offload_entry - list of MACs to offload
121  * @index:	index of MAC address for offloading
122  * @addr:	interface MAC address
123  * @list:	list pointer
124  */
125 struct nfp_tun_mac_offload_entry {
126 	__be16 index;
127 	u8 addr[ETH_ALEN];
128 	struct list_head list;
129 };
130 
131 #define NFP_MAX_MAC_INDEX       0xff
132 
133 /**
134  * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
135  * @ifindex:	netdev ifindex of the device
136  * @index:	index of netdevs mac on NFP
137  * @list:	list pointer
138  */
139 struct nfp_tun_mac_non_nfp_idx {
140 	int ifindex;
141 	u8 index;
142 	struct list_head list;
143 };
144 
145 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
146 {
147 	struct nfp_tun_active_tuns *payload;
148 	struct net_device *netdev;
149 	int count, i, pay_len;
150 	struct neighbour *n;
151 	__be32 ipv4_addr;
152 	u32 port;
153 
154 	payload = nfp_flower_cmsg_get_data(skb);
155 	count = be32_to_cpu(payload->count);
156 	if (count > NFP_FL_MAX_ROUTES) {
157 		nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
158 		return;
159 	}
160 
161 	pay_len = nfp_flower_cmsg_get_data_len(skb);
162 	if (pay_len != sizeof(struct nfp_tun_active_tuns) +
163 	    sizeof(struct route_ip_info) * count) {
164 		nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
165 		return;
166 	}
167 
168 	for (i = 0; i < count; i++) {
169 		ipv4_addr = payload->tun_info[i].ipv4;
170 		port = be32_to_cpu(payload->tun_info[i].egress_port);
171 		netdev = nfp_app_repr_get(app, port);
172 		if (!netdev)
173 			continue;
174 
175 		n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
176 		if (!n)
177 			continue;
178 
179 		/* Update the used timestamp of neighbour */
180 		neigh_event_send(n, NULL);
181 		neigh_release(n);
182 	}
183 }
184 
185 static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
186 {
187 	if (!netdev->rtnl_link_ops)
188 		return false;
189 	if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
190 		return true;
191 	if (netif_is_vxlan(netdev))
192 		return true;
193 
194 	return false;
195 }
196 
197 static int
198 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
199 			 gfp_t flag)
200 {
201 	struct sk_buff *skb;
202 	unsigned char *msg;
203 
204 	skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
205 	if (!skb)
206 		return -ENOMEM;
207 
208 	msg = nfp_flower_cmsg_get_data(skb);
209 	memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
210 
211 	nfp_ctrl_tx(app->ctrl, skb);
212 	return 0;
213 }
214 
215 static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
216 {
217 	struct nfp_flower_priv *priv = app->priv;
218 	struct nfp_ipv4_route_entry *entry;
219 	struct list_head *ptr, *storage;
220 
221 	spin_lock_bh(&priv->nfp_neigh_off_lock);
222 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
223 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
224 		if (entry->ipv4_addr == ipv4_addr) {
225 			spin_unlock_bh(&priv->nfp_neigh_off_lock);
226 			return true;
227 		}
228 	}
229 	spin_unlock_bh(&priv->nfp_neigh_off_lock);
230 	return false;
231 }
232 
233 static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
234 {
235 	struct nfp_flower_priv *priv = app->priv;
236 	struct nfp_ipv4_route_entry *entry;
237 	struct list_head *ptr, *storage;
238 
239 	spin_lock_bh(&priv->nfp_neigh_off_lock);
240 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
241 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
242 		if (entry->ipv4_addr == ipv4_addr) {
243 			spin_unlock_bh(&priv->nfp_neigh_off_lock);
244 			return;
245 		}
246 	}
247 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
248 	if (!entry) {
249 		spin_unlock_bh(&priv->nfp_neigh_off_lock);
250 		nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
251 		return;
252 	}
253 
254 	entry->ipv4_addr = ipv4_addr;
255 	list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
256 	spin_unlock_bh(&priv->nfp_neigh_off_lock);
257 }
258 
259 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
260 {
261 	struct nfp_flower_priv *priv = app->priv;
262 	struct nfp_ipv4_route_entry *entry;
263 	struct list_head *ptr, *storage;
264 
265 	spin_lock_bh(&priv->nfp_neigh_off_lock);
266 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
267 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
268 		if (entry->ipv4_addr == ipv4_addr) {
269 			list_del(&entry->list);
270 			kfree(entry);
271 			break;
272 		}
273 	}
274 	spin_unlock_bh(&priv->nfp_neigh_off_lock);
275 }
276 
277 static void
278 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
279 		    struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
280 {
281 	struct nfp_tun_neigh payload;
282 
283 	/* Only offload representor IPv4s for now. */
284 	if (!nfp_netdev_is_nfp_repr(netdev))
285 		return;
286 
287 	memset(&payload, 0, sizeof(struct nfp_tun_neigh));
288 	payload.dst_ipv4 = flow->daddr;
289 
290 	/* If entry has expired send dst IP with all other fields 0. */
291 	if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
292 		nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
293 		/* Trigger ARP to verify invalid neighbour state. */
294 		neigh_event_send(neigh, NULL);
295 		goto send_msg;
296 	}
297 
298 	/* Have a valid neighbour so populate rest of entry. */
299 	payload.src_ipv4 = flow->saddr;
300 	ether_addr_copy(payload.src_addr, netdev->dev_addr);
301 	neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
302 	payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
303 	/* Add destination of new route to NFP cache. */
304 	nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
305 
306 send_msg:
307 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
308 				 sizeof(struct nfp_tun_neigh),
309 				 (unsigned char *)&payload, flag);
310 }
311 
312 static int
313 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
314 			    void *ptr)
315 {
316 	struct nfp_flower_priv *app_priv;
317 	struct netevent_redirect *redir;
318 	struct flowi4 flow = {};
319 	struct neighbour *n;
320 	struct nfp_app *app;
321 	struct rtable *rt;
322 	int err;
323 
324 	switch (event) {
325 	case NETEVENT_REDIRECT:
326 		redir = (struct netevent_redirect *)ptr;
327 		n = redir->neigh;
328 		break;
329 	case NETEVENT_NEIGH_UPDATE:
330 		n = (struct neighbour *)ptr;
331 		break;
332 	default:
333 		return NOTIFY_DONE;
334 	}
335 
336 	flow.daddr = *(__be32 *)n->primary_key;
337 
338 	/* Only concerned with route changes for representors. */
339 	if (!nfp_netdev_is_nfp_repr(n->dev))
340 		return NOTIFY_DONE;
341 
342 	app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
343 	app = app_priv->app;
344 
345 	/* Only concerned with changes to routes already added to NFP. */
346 	if (!nfp_tun_has_route(app, flow.daddr))
347 		return NOTIFY_DONE;
348 
349 #if IS_ENABLED(CONFIG_INET)
350 	/* Do a route lookup to populate flow data. */
351 	rt = ip_route_output_key(dev_net(n->dev), &flow);
352 	err = PTR_ERR_OR_ZERO(rt);
353 	if (err)
354 		return NOTIFY_DONE;
355 
356 	ip_rt_put(rt);
357 #else
358 	return NOTIFY_DONE;
359 #endif
360 
361 	flow.flowi4_proto = IPPROTO_UDP;
362 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
363 
364 	return NOTIFY_OK;
365 }
366 
367 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
368 {
369 	struct nfp_tun_req_route_ipv4 *payload;
370 	struct net_device *netdev;
371 	struct flowi4 flow = {};
372 	struct neighbour *n;
373 	struct rtable *rt;
374 	int err;
375 
376 	payload = nfp_flower_cmsg_get_data(skb);
377 
378 	netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
379 	if (!netdev)
380 		goto route_fail_warning;
381 
382 	flow.daddr = payload->ipv4_addr;
383 	flow.flowi4_proto = IPPROTO_UDP;
384 
385 #if IS_ENABLED(CONFIG_INET)
386 	/* Do a route lookup on same namespace as ingress port. */
387 	rt = ip_route_output_key(dev_net(netdev), &flow);
388 	err = PTR_ERR_OR_ZERO(rt);
389 	if (err)
390 		goto route_fail_warning;
391 #else
392 	goto route_fail_warning;
393 #endif
394 
395 	/* Get the neighbour entry for the lookup */
396 	n = dst_neigh_lookup(&rt->dst, &flow.daddr);
397 	ip_rt_put(rt);
398 	if (!n)
399 		goto route_fail_warning;
400 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
401 	neigh_release(n);
402 	return;
403 
404 route_fail_warning:
405 	nfp_flower_cmsg_warn(app, "Requested route not found.\n");
406 }
407 
408 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
409 {
410 	struct nfp_flower_priv *priv = app->priv;
411 	struct nfp_ipv4_addr_entry *entry;
412 	struct nfp_tun_ipv4_addr payload;
413 	struct list_head *ptr, *storage;
414 	int count;
415 
416 	memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
417 	mutex_lock(&priv->nfp_ipv4_off_lock);
418 	count = 0;
419 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
420 		if (count >= NFP_FL_IPV4_ADDRS_MAX) {
421 			mutex_unlock(&priv->nfp_ipv4_off_lock);
422 			nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
423 			return;
424 		}
425 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
426 		payload.ipv4_addr[count++] = entry->ipv4_addr;
427 	}
428 	payload.count = cpu_to_be32(count);
429 	mutex_unlock(&priv->nfp_ipv4_off_lock);
430 
431 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
432 				 sizeof(struct nfp_tun_ipv4_addr),
433 				 &payload, GFP_KERNEL);
434 }
435 
436 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
437 {
438 	struct nfp_flower_priv *priv = app->priv;
439 	struct nfp_ipv4_addr_entry *entry;
440 	struct list_head *ptr, *storage;
441 
442 	mutex_lock(&priv->nfp_ipv4_off_lock);
443 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
444 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
445 		if (entry->ipv4_addr == ipv4) {
446 			entry->ref_count++;
447 			mutex_unlock(&priv->nfp_ipv4_off_lock);
448 			return;
449 		}
450 	}
451 
452 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
453 	if (!entry) {
454 		mutex_unlock(&priv->nfp_ipv4_off_lock);
455 		nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
456 		return;
457 	}
458 	entry->ipv4_addr = ipv4;
459 	entry->ref_count = 1;
460 	list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
461 	mutex_unlock(&priv->nfp_ipv4_off_lock);
462 
463 	nfp_tun_write_ipv4_list(app);
464 }
465 
466 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
467 {
468 	struct nfp_flower_priv *priv = app->priv;
469 	struct nfp_ipv4_addr_entry *entry;
470 	struct list_head *ptr, *storage;
471 
472 	mutex_lock(&priv->nfp_ipv4_off_lock);
473 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
474 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
475 		if (entry->ipv4_addr == ipv4) {
476 			entry->ref_count--;
477 			if (!entry->ref_count) {
478 				list_del(&entry->list);
479 				kfree(entry);
480 			}
481 			break;
482 		}
483 	}
484 	mutex_unlock(&priv->nfp_ipv4_off_lock);
485 
486 	nfp_tun_write_ipv4_list(app);
487 }
488 
489 void nfp_tunnel_write_macs(struct nfp_app *app)
490 {
491 	struct nfp_flower_priv *priv = app->priv;
492 	struct nfp_tun_mac_offload_entry *entry;
493 	struct nfp_tun_mac_addr *payload;
494 	struct list_head *ptr, *storage;
495 	int mac_count, err, pay_size;
496 
497 	mutex_lock(&priv->nfp_mac_off_lock);
498 	if (!priv->nfp_mac_off_count) {
499 		mutex_unlock(&priv->nfp_mac_off_lock);
500 		return;
501 	}
502 
503 	pay_size = sizeof(struct nfp_tun_mac_addr) +
504 		   sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
505 
506 	payload = kzalloc(pay_size, GFP_KERNEL);
507 	if (!payload) {
508 		mutex_unlock(&priv->nfp_mac_off_lock);
509 		return;
510 	}
511 
512 	payload->count = cpu_to_be16(priv->nfp_mac_off_count);
513 
514 	mac_count = 0;
515 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
516 		entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
517 				   list);
518 		payload->addresses[mac_count].index = entry->index;
519 		ether_addr_copy(payload->addresses[mac_count].addr,
520 				entry->addr);
521 		mac_count++;
522 	}
523 
524 	err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
525 				       pay_size, payload, GFP_KERNEL);
526 
527 	kfree(payload);
528 
529 	if (err) {
530 		mutex_unlock(&priv->nfp_mac_off_lock);
531 		/* Write failed so retain list for future retry. */
532 		return;
533 	}
534 
535 	/* If list was successfully offloaded, flush it. */
536 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
537 		entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
538 				   list);
539 		list_del(&entry->list);
540 		kfree(entry);
541 	}
542 
543 	priv->nfp_mac_off_count = 0;
544 	mutex_unlock(&priv->nfp_mac_off_lock);
545 }
546 
547 static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
548 {
549 	struct nfp_flower_priv *priv = app->priv;
550 	struct nfp_tun_mac_non_nfp_idx *entry;
551 	struct list_head *ptr, *storage;
552 	int idx;
553 
554 	mutex_lock(&priv->nfp_mac_index_lock);
555 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
556 		entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
557 		if (entry->ifindex == ifindex) {
558 			idx = entry->index;
559 			mutex_unlock(&priv->nfp_mac_index_lock);
560 			return idx;
561 		}
562 	}
563 
564 	idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
565 			     NFP_MAX_MAC_INDEX, GFP_KERNEL);
566 	if (idx < 0) {
567 		mutex_unlock(&priv->nfp_mac_index_lock);
568 		return idx;
569 	}
570 
571 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
572 	if (!entry) {
573 		mutex_unlock(&priv->nfp_mac_index_lock);
574 		return -ENOMEM;
575 	}
576 	entry->ifindex = ifindex;
577 	entry->index = idx;
578 	list_add_tail(&entry->list, &priv->nfp_mac_index_list);
579 	mutex_unlock(&priv->nfp_mac_index_lock);
580 
581 	return idx;
582 }
583 
584 static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
585 {
586 	struct nfp_flower_priv *priv = app->priv;
587 	struct nfp_tun_mac_non_nfp_idx *entry;
588 	struct list_head *ptr, *storage;
589 
590 	mutex_lock(&priv->nfp_mac_index_lock);
591 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
592 		entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
593 		if (entry->ifindex == ifindex) {
594 			ida_simple_remove(&priv->nfp_mac_off_ids,
595 					  entry->index);
596 			list_del(&entry->list);
597 			kfree(entry);
598 			break;
599 		}
600 	}
601 	mutex_unlock(&priv->nfp_mac_index_lock);
602 }
603 
604 static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
605 					    struct nfp_app *app)
606 {
607 	struct nfp_flower_priv *priv = app->priv;
608 	struct nfp_tun_mac_offload_entry *entry;
609 	u16 nfp_mac_idx;
610 	int port = 0;
611 
612 	/* Check if MAC should be offloaded. */
613 	if (!is_valid_ether_addr(netdev->dev_addr))
614 		return;
615 
616 	if (nfp_netdev_is_nfp_repr(netdev))
617 		port = nfp_repr_get_port_id(netdev);
618 	else if (!nfp_tun_is_netdev_to_offload(netdev))
619 		return;
620 
621 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
622 	if (!entry) {
623 		nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
624 		return;
625 	}
626 
627 	if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
628 	    NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
629 		nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
630 	} else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
631 		   NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
632 		port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
633 		nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
634 	} else {
635 		/* Must assign our own unique 8-bit index. */
636 		int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
637 
638 		if (idx < 0) {
639 			nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
640 			kfree(entry);
641 			return;
642 		}
643 		nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
644 	}
645 
646 	entry->index = cpu_to_be16(nfp_mac_idx);
647 	ether_addr_copy(entry->addr, netdev->dev_addr);
648 
649 	mutex_lock(&priv->nfp_mac_off_lock);
650 	priv->nfp_mac_off_count++;
651 	list_add_tail(&entry->list, &priv->nfp_mac_off_list);
652 	mutex_unlock(&priv->nfp_mac_off_lock);
653 }
654 
655 static int nfp_tun_mac_event_handler(struct notifier_block *nb,
656 				     unsigned long event, void *ptr)
657 {
658 	struct nfp_flower_priv *app_priv;
659 	struct net_device *netdev;
660 	struct nfp_app *app;
661 
662 	if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
663 		app_priv = container_of(nb, struct nfp_flower_priv,
664 					nfp_tun_mac_nb);
665 		app = app_priv->app;
666 		netdev = netdev_notifier_info_to_dev(ptr);
667 
668 		/* If non-nfp netdev then free its offload index. */
669 		if (nfp_tun_is_netdev_to_offload(netdev))
670 			nfp_tun_del_mac_idx(app, netdev->ifindex);
671 	} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
672 		   event == NETDEV_REGISTER) {
673 		app_priv = container_of(nb, struct nfp_flower_priv,
674 					nfp_tun_mac_nb);
675 		app = app_priv->app;
676 		netdev = netdev_notifier_info_to_dev(ptr);
677 
678 		nfp_tun_add_to_mac_offload_list(netdev, app);
679 
680 		/* Force a list write to keep NFP up to date. */
681 		nfp_tunnel_write_macs(app);
682 	}
683 	return NOTIFY_OK;
684 }
685 
686 int nfp_tunnel_config_start(struct nfp_app *app)
687 {
688 	struct nfp_flower_priv *priv = app->priv;
689 	struct net_device *netdev;
690 	int err;
691 
692 	/* Initialise priv data for MAC offloading. */
693 	priv->nfp_mac_off_count = 0;
694 	mutex_init(&priv->nfp_mac_off_lock);
695 	INIT_LIST_HEAD(&priv->nfp_mac_off_list);
696 	priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
697 	mutex_init(&priv->nfp_mac_index_lock);
698 	INIT_LIST_HEAD(&priv->nfp_mac_index_list);
699 	ida_init(&priv->nfp_mac_off_ids);
700 
701 	/* Initialise priv data for IPv4 offloading. */
702 	mutex_init(&priv->nfp_ipv4_off_lock);
703 	INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
704 
705 	/* Initialise priv data for neighbour offloading. */
706 	spin_lock_init(&priv->nfp_neigh_off_lock);
707 	INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
708 	priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
709 
710 	err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
711 	if (err)
712 		goto err_free_mac_ida;
713 
714 	err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
715 	if (err)
716 		goto err_unreg_mac_nb;
717 
718 	/* Parse netdevs already registered for MACs that need offloaded. */
719 	rtnl_lock();
720 	for_each_netdev(&init_net, netdev)
721 		nfp_tun_add_to_mac_offload_list(netdev, app);
722 	rtnl_unlock();
723 
724 	return 0;
725 
726 err_unreg_mac_nb:
727 	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
728 err_free_mac_ida:
729 	ida_destroy(&priv->nfp_mac_off_ids);
730 	return err;
731 }
732 
733 void nfp_tunnel_config_stop(struct nfp_app *app)
734 {
735 	struct nfp_tun_mac_offload_entry *mac_entry;
736 	struct nfp_flower_priv *priv = app->priv;
737 	struct nfp_ipv4_route_entry *route_entry;
738 	struct nfp_tun_mac_non_nfp_idx *mac_idx;
739 	struct nfp_ipv4_addr_entry *ip_entry;
740 	struct list_head *ptr, *storage;
741 
742 	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
743 	unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
744 
745 	/* Free any memory that may be occupied by MAC list. */
746 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
747 		mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
748 				       list);
749 		list_del(&mac_entry->list);
750 		kfree(mac_entry);
751 	}
752 
753 	/* Free any memory that may be occupied by MAC index list. */
754 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
755 		mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
756 				     list);
757 		list_del(&mac_idx->list);
758 		kfree(mac_idx);
759 	}
760 
761 	ida_destroy(&priv->nfp_mac_off_ids);
762 
763 	/* Free any memory that may be occupied by ipv4 list. */
764 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
765 		ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
766 		list_del(&ip_entry->list);
767 		kfree(ip_entry);
768 	}
769 
770 	/* Free any memory that may be occupied by the route list. */
771 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
772 		route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
773 					 list);
774 		list_del(&route_entry->list);
775 		kfree(route_entry);
776 	}
777 }
778