1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
7 #include <linux/idr.h>
8 #include <net/dst_metadata.h>
9 #include <net/arp.h>
10 
11 #include "cmsg.h"
12 #include "main.h"
13 #include "../nfp_net_repr.h"
14 #include "../nfp_net.h"
15 
16 #define NFP_FL_MAX_ROUTES               32
17 
18 #define NFP_TUN_PRE_TUN_RULE_LIMIT	32
19 #define NFP_TUN_PRE_TUN_RULE_DEL	0x1
20 #define NFP_TUN_PRE_TUN_IDX_BIT		0x8
21 
22 /**
23  * struct nfp_tun_pre_run_rule - rule matched before decap
24  * @flags:		options for the rule offset
25  * @port_idx:		index of destination MAC address for the rule
26  * @vlan_tci:		VLAN info associated with MAC
27  * @host_ctx_id:	stats context of rule to update
28  */
29 struct nfp_tun_pre_tun_rule {
30 	__be32 flags;
31 	__be16 port_idx;
32 	__be16 vlan_tci;
33 	__be32 host_ctx_id;
34 };
35 
36 /**
37  * struct nfp_tun_active_tuns - periodic message of active tunnels
38  * @seq:		sequence number of the message
39  * @count:		number of tunnels report in message
40  * @flags:		options part of the request
41  * @tun_info.ipv4:		dest IPv4 address of active route
42  * @tun_info.egress_port:	port the encapsulated packet egressed
43  * @tun_info.extra:		reserved for future use
44  * @tun_info:		tunnels that have sent traffic in reported period
45  */
46 struct nfp_tun_active_tuns {
47 	__be32 seq;
48 	__be32 count;
49 	__be32 flags;
50 	struct route_ip_info {
51 		__be32 ipv4;
52 		__be32 egress_port;
53 		__be32 extra[2];
54 	} tun_info[];
55 };
56 
57 /**
58  * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
59  * @seq:		sequence number of the message
60  * @count:		number of tunnels report in message
61  * @flags:		options part of the request
62  * @tun_info.ipv6:		dest IPv6 address of active route
63  * @tun_info.egress_port:	port the encapsulated packet egressed
64  * @tun_info:		tunnels that have sent traffic in reported period
65  */
66 struct nfp_tun_active_tuns_v6 {
67 	__be32 seq;
68 	__be32 count;
69 	__be32 flags;
70 	struct route_ip_info_v6 {
71 		struct in6_addr ipv6;
72 		__be32 egress_port;
73 	} tun_info[];
74 };
75 
76 /**
77  * struct nfp_tun_neigh - neighbour/route entry on the NFP
78  * @dst_ipv4:	destination IPv4 address
79  * @src_ipv4:	source IPv4 address
80  * @dst_addr:	destination MAC address
81  * @src_addr:	source MAC address
82  * @port_id:	NFP port to output packet on - associated with source IPv4
83  */
84 struct nfp_tun_neigh {
85 	__be32 dst_ipv4;
86 	__be32 src_ipv4;
87 	u8 dst_addr[ETH_ALEN];
88 	u8 src_addr[ETH_ALEN];
89 	__be32 port_id;
90 };
91 
92 /**
93  * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
94  * @dst_ipv6:	destination IPv6 address
95  * @src_ipv6:	source IPv6 address
96  * @dst_addr:	destination MAC address
97  * @src_addr:	source MAC address
98  * @port_id:	NFP port to output packet on - associated with source IPv6
99  */
100 struct nfp_tun_neigh_v6 {
101 	struct in6_addr dst_ipv6;
102 	struct in6_addr src_ipv6;
103 	u8 dst_addr[ETH_ALEN];
104 	u8 src_addr[ETH_ALEN];
105 	__be32 port_id;
106 };
107 
108 /**
109  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
110  * @ingress_port:	ingress port of packet that signalled request
111  * @ipv4_addr:		destination ipv4 address for route
112  * @reserved:		reserved for future use
113  */
114 struct nfp_tun_req_route_ipv4 {
115 	__be32 ingress_port;
116 	__be32 ipv4_addr;
117 	__be32 reserved[2];
118 };
119 
120 /**
121  * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
122  * @ingress_port:	ingress port of packet that signalled request
123  * @ipv6_addr:		destination ipv6 address for route
124  */
125 struct nfp_tun_req_route_ipv6 {
126 	__be32 ingress_port;
127 	struct in6_addr ipv6_addr;
128 };
129 
130 /**
131  * struct nfp_offloaded_route - routes that are offloaded to the NFP
132  * @list:	list pointer
133  * @ip_add:	destination of route - can be IPv4 or IPv6
134  */
135 struct nfp_offloaded_route {
136 	struct list_head list;
137 	u8 ip_add[];
138 };
139 
140 #define NFP_FL_IPV4_ADDRS_MAX        32
141 
142 /**
143  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
144  * @count:	number of IPs populated in the array
145  * @ipv4_addr:	array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
146  */
147 struct nfp_tun_ipv4_addr {
148 	__be32 count;
149 	__be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
150 };
151 
152 /**
153  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
154  * @ipv4_addr:	IP address
155  * @ref_count:	number of rules currently using this IP
156  * @list:	list pointer
157  */
158 struct nfp_ipv4_addr_entry {
159 	__be32 ipv4_addr;
160 	int ref_count;
161 	struct list_head list;
162 };
163 
164 #define NFP_FL_IPV6_ADDRS_MAX        4
165 
166 /**
167  * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
168  * @count:	number of IPs populated in the array
169  * @ipv6_addr:	array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
170  */
171 struct nfp_tun_ipv6_addr {
172 	__be32 count;
173 	struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
174 };
175 
176 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG	0x2
177 
178 /**
179  * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
180  * @flags:	MAC address offload options
181  * @count:	number of MAC addresses in the message (should be 1)
182  * @index:	index of MAC address in the lookup table
183  * @addr:	interface MAC address
184  */
185 struct nfp_tun_mac_addr_offload {
186 	__be16 flags;
187 	__be16 count;
188 	__be16 index;
189 	u8 addr[ETH_ALEN];
190 };
191 
192 enum nfp_flower_mac_offload_cmd {
193 	NFP_TUNNEL_MAC_OFFLOAD_ADD =		0,
194 	NFP_TUNNEL_MAC_OFFLOAD_DEL =		1,
195 	NFP_TUNNEL_MAC_OFFLOAD_MOD =		2,
196 };
197 
198 #define NFP_MAX_MAC_INDEX       0xff
199 
200 /**
201  * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
202  * @ht_node:		Hashtable entry
203  * @addr:		Offloaded MAC address
204  * @index:		Offloaded index for given MAC address
205  * @ref_count:		Number of devs using this MAC address
206  * @repr_list:		List of reprs sharing this MAC address
207  * @bridge_count:	Number of bridge/internal devs with MAC
208  */
209 struct nfp_tun_offloaded_mac {
210 	struct rhash_head ht_node;
211 	u8 addr[ETH_ALEN];
212 	u16 index;
213 	int ref_count;
214 	struct list_head repr_list;
215 	int bridge_count;
216 };
217 
218 static const struct rhashtable_params offloaded_macs_params = {
219 	.key_offset	= offsetof(struct nfp_tun_offloaded_mac, addr),
220 	.head_offset	= offsetof(struct nfp_tun_offloaded_mac, ht_node),
221 	.key_len	= ETH_ALEN,
222 	.automatic_shrinking	= true,
223 };
224 
225 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
226 {
227 	struct nfp_tun_active_tuns *payload;
228 	struct net_device *netdev;
229 	int count, i, pay_len;
230 	struct neighbour *n;
231 	__be32 ipv4_addr;
232 	u32 port;
233 
234 	payload = nfp_flower_cmsg_get_data(skb);
235 	count = be32_to_cpu(payload->count);
236 	if (count > NFP_FL_MAX_ROUTES) {
237 		nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
238 		return;
239 	}
240 
241 	pay_len = nfp_flower_cmsg_get_data_len(skb);
242 	if (pay_len != struct_size(payload, tun_info, count)) {
243 		nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
244 		return;
245 	}
246 
247 	rcu_read_lock();
248 	for (i = 0; i < count; i++) {
249 		ipv4_addr = payload->tun_info[i].ipv4;
250 		port = be32_to_cpu(payload->tun_info[i].egress_port);
251 		netdev = nfp_app_dev_get(app, port, NULL);
252 		if (!netdev)
253 			continue;
254 
255 		n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
256 		if (!n)
257 			continue;
258 
259 		/* Update the used timestamp of neighbour */
260 		neigh_event_send(n, NULL);
261 		neigh_release(n);
262 	}
263 	rcu_read_unlock();
264 }
265 
266 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
267 {
268 #if IS_ENABLED(CONFIG_IPV6)
269 	struct nfp_tun_active_tuns_v6 *payload;
270 	struct net_device *netdev;
271 	int count, i, pay_len;
272 	struct neighbour *n;
273 	void *ipv6_add;
274 	u32 port;
275 
276 	payload = nfp_flower_cmsg_get_data(skb);
277 	count = be32_to_cpu(payload->count);
278 	if (count > NFP_FL_IPV6_ADDRS_MAX) {
279 		nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
280 		return;
281 	}
282 
283 	pay_len = nfp_flower_cmsg_get_data_len(skb);
284 	if (pay_len != struct_size(payload, tun_info, count)) {
285 		nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
286 		return;
287 	}
288 
289 	rcu_read_lock();
290 	for (i = 0; i < count; i++) {
291 		ipv6_add = &payload->tun_info[i].ipv6;
292 		port = be32_to_cpu(payload->tun_info[i].egress_port);
293 		netdev = nfp_app_dev_get(app, port, NULL);
294 		if (!netdev)
295 			continue;
296 
297 		n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
298 		if (!n)
299 			continue;
300 
301 		/* Update the used timestamp of neighbour */
302 		neigh_event_send(n, NULL);
303 		neigh_release(n);
304 	}
305 	rcu_read_unlock();
306 #endif
307 }
308 
309 static int
310 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
311 			 gfp_t flag)
312 {
313 	struct sk_buff *skb;
314 	unsigned char *msg;
315 
316 	skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
317 	if (!skb)
318 		return -ENOMEM;
319 
320 	msg = nfp_flower_cmsg_get_data(skb);
321 	memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
322 
323 	nfp_ctrl_tx(app->ctrl, skb);
324 	return 0;
325 }
326 
327 static bool
328 __nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock,
329 		    void *add, int add_len)
330 {
331 	struct nfp_offloaded_route *entry;
332 
333 	spin_lock_bh(list_lock);
334 	list_for_each_entry(entry, route_list, list)
335 		if (!memcmp(entry->ip_add, add, add_len)) {
336 			spin_unlock_bh(list_lock);
337 			return true;
338 		}
339 	spin_unlock_bh(list_lock);
340 	return false;
341 }
342 
343 static int
344 __nfp_tun_add_route_to_cache(struct list_head *route_list,
345 			     spinlock_t *list_lock, void *add, int add_len)
346 {
347 	struct nfp_offloaded_route *entry;
348 
349 	spin_lock_bh(list_lock);
350 	list_for_each_entry(entry, route_list, list)
351 		if (!memcmp(entry->ip_add, add, add_len)) {
352 			spin_unlock_bh(list_lock);
353 			return 0;
354 		}
355 
356 	entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
357 	if (!entry) {
358 		spin_unlock_bh(list_lock);
359 		return -ENOMEM;
360 	}
361 
362 	memcpy(entry->ip_add, add, add_len);
363 	list_add_tail(&entry->list, route_list);
364 	spin_unlock_bh(list_lock);
365 
366 	return 0;
367 }
368 
369 static void
370 __nfp_tun_del_route_from_cache(struct list_head *route_list,
371 			       spinlock_t *list_lock, void *add, int add_len)
372 {
373 	struct nfp_offloaded_route *entry;
374 
375 	spin_lock_bh(list_lock);
376 	list_for_each_entry(entry, route_list, list)
377 		if (!memcmp(entry->ip_add, add, add_len)) {
378 			list_del(&entry->list);
379 			kfree(entry);
380 			break;
381 		}
382 	spin_unlock_bh(list_lock);
383 }
384 
385 static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr)
386 {
387 	struct nfp_flower_priv *priv = app->priv;
388 
389 	return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4,
390 				   &priv->tun.neigh_off_lock_v4, ipv4_addr,
391 				   sizeof(*ipv4_addr));
392 }
393 
394 static bool
395 nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
396 {
397 	struct nfp_flower_priv *priv = app->priv;
398 
399 	return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6,
400 				   &priv->tun.neigh_off_lock_v6, ipv6_addr,
401 				   sizeof(*ipv6_addr));
402 }
403 
404 static void
405 nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
406 {
407 	struct nfp_flower_priv *priv = app->priv;
408 
409 	__nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4,
410 				     &priv->tun.neigh_off_lock_v4, ipv4_addr,
411 				     sizeof(*ipv4_addr));
412 }
413 
414 static void
415 nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
416 {
417 	struct nfp_flower_priv *priv = app->priv;
418 
419 	__nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6,
420 				     &priv->tun.neigh_off_lock_v6, ipv6_addr,
421 				     sizeof(*ipv6_addr));
422 }
423 
424 static void
425 nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr)
426 {
427 	struct nfp_flower_priv *priv = app->priv;
428 
429 	__nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4,
430 				       &priv->tun.neigh_off_lock_v4, ipv4_addr,
431 				       sizeof(*ipv4_addr));
432 }
433 
434 static void
435 nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr)
436 {
437 	struct nfp_flower_priv *priv = app->priv;
438 
439 	__nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6,
440 				       &priv->tun.neigh_off_lock_v6, ipv6_addr,
441 				       sizeof(*ipv6_addr));
442 }
443 
444 static void
445 nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app,
446 		       struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
447 {
448 	struct nfp_tun_neigh payload;
449 	u32 port_id;
450 
451 	port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
452 	if (!port_id)
453 		return;
454 
455 	memset(&payload, 0, sizeof(struct nfp_tun_neigh));
456 	payload.dst_ipv4 = flow->daddr;
457 
458 	/* If entry has expired send dst IP with all other fields 0. */
459 	if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
460 		nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4);
461 		/* Trigger ARP to verify invalid neighbour state. */
462 		neigh_event_send(neigh, NULL);
463 		goto send_msg;
464 	}
465 
466 	/* Have a valid neighbour so populate rest of entry. */
467 	payload.src_ipv4 = flow->saddr;
468 	ether_addr_copy(payload.src_addr, netdev->dev_addr);
469 	neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
470 	payload.port_id = cpu_to_be32(port_id);
471 	/* Add destination of new route to NFP cache. */
472 	nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4);
473 
474 send_msg:
475 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
476 				 sizeof(struct nfp_tun_neigh),
477 				 (unsigned char *)&payload, flag);
478 }
479 
480 static void
481 nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app,
482 		       struct flowi6 *flow, struct neighbour *neigh, gfp_t flag)
483 {
484 	struct nfp_tun_neigh_v6 payload;
485 	u32 port_id;
486 
487 	port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
488 	if (!port_id)
489 		return;
490 
491 	memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6));
492 	payload.dst_ipv6 = flow->daddr;
493 
494 	/* If entry has expired send dst IP with all other fields 0. */
495 	if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
496 		nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6);
497 		/* Trigger probe to verify invalid neighbour state. */
498 		neigh_event_send(neigh, NULL);
499 		goto send_msg;
500 	}
501 
502 	/* Have a valid neighbour so populate rest of entry. */
503 	payload.src_ipv6 = flow->saddr;
504 	ether_addr_copy(payload.src_addr, netdev->dev_addr);
505 	neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
506 	payload.port_id = cpu_to_be32(port_id);
507 	/* Add destination of new route to NFP cache. */
508 	nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6);
509 
510 send_msg:
511 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
512 				 sizeof(struct nfp_tun_neigh_v6),
513 				 (unsigned char *)&payload, flag);
514 }
515 
516 static int
517 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
518 			    void *ptr)
519 {
520 	struct nfp_flower_priv *app_priv;
521 	struct netevent_redirect *redir;
522 	struct flowi4 flow4 = {};
523 	struct flowi6 flow6 = {};
524 	struct neighbour *n;
525 	struct nfp_app *app;
526 	struct rtable *rt;
527 	bool ipv6 = false;
528 	int err;
529 
530 	switch (event) {
531 	case NETEVENT_REDIRECT:
532 		redir = (struct netevent_redirect *)ptr;
533 		n = redir->neigh;
534 		break;
535 	case NETEVENT_NEIGH_UPDATE:
536 		n = (struct neighbour *)ptr;
537 		break;
538 	default:
539 		return NOTIFY_DONE;
540 	}
541 
542 	if (n->tbl->family == AF_INET6)
543 		ipv6 = true;
544 
545 	if (ipv6)
546 		flow6.daddr = *(struct in6_addr *)n->primary_key;
547 	else
548 		flow4.daddr = *(__be32 *)n->primary_key;
549 
550 	app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
551 	app = app_priv->app;
552 
553 	if (!nfp_netdev_is_nfp_repr(n->dev) &&
554 	    !nfp_flower_internal_port_can_offload(app, n->dev))
555 		return NOTIFY_DONE;
556 
557 	/* Only concerned with changes to routes already added to NFP. */
558 	if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) ||
559 	    (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr)))
560 		return NOTIFY_DONE;
561 
562 #if IS_ENABLED(CONFIG_INET)
563 	if (ipv6) {
564 #if IS_ENABLED(CONFIG_IPV6)
565 		struct dst_entry *dst;
566 
567 		dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL,
568 						      &flow6, NULL);
569 		if (IS_ERR(dst))
570 			return NOTIFY_DONE;
571 
572 		dst_release(dst);
573 		flow6.flowi6_proto = IPPROTO_UDP;
574 		nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC);
575 #else
576 		return NOTIFY_DONE;
577 #endif /* CONFIG_IPV6 */
578 	} else {
579 		/* Do a route lookup to populate flow data. */
580 		rt = ip_route_output_key(dev_net(n->dev), &flow4);
581 		err = PTR_ERR_OR_ZERO(rt);
582 		if (err)
583 			return NOTIFY_DONE;
584 
585 		ip_rt_put(rt);
586 
587 		flow4.flowi4_proto = IPPROTO_UDP;
588 		nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC);
589 	}
590 #else
591 	return NOTIFY_DONE;
592 #endif /* CONFIG_INET */
593 
594 	return NOTIFY_OK;
595 }
596 
597 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
598 {
599 	struct nfp_tun_req_route_ipv4 *payload;
600 	struct net_device *netdev;
601 	struct flowi4 flow = {};
602 	struct neighbour *n;
603 	struct rtable *rt;
604 	int err;
605 
606 	payload = nfp_flower_cmsg_get_data(skb);
607 
608 	rcu_read_lock();
609 	netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
610 	if (!netdev)
611 		goto fail_rcu_unlock;
612 
613 	flow.daddr = payload->ipv4_addr;
614 	flow.flowi4_proto = IPPROTO_UDP;
615 
616 #if IS_ENABLED(CONFIG_INET)
617 	/* Do a route lookup on same namespace as ingress port. */
618 	rt = ip_route_output_key(dev_net(netdev), &flow);
619 	err = PTR_ERR_OR_ZERO(rt);
620 	if (err)
621 		goto fail_rcu_unlock;
622 #else
623 	goto fail_rcu_unlock;
624 #endif
625 
626 	/* Get the neighbour entry for the lookup */
627 	n = dst_neigh_lookup(&rt->dst, &flow.daddr);
628 	ip_rt_put(rt);
629 	if (!n)
630 		goto fail_rcu_unlock;
631 	nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC);
632 	neigh_release(n);
633 	rcu_read_unlock();
634 	return;
635 
636 fail_rcu_unlock:
637 	rcu_read_unlock();
638 	nfp_flower_cmsg_warn(app, "Requested route not found.\n");
639 }
640 
641 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
642 {
643 	struct nfp_tun_req_route_ipv6 *payload;
644 	struct net_device *netdev;
645 	struct flowi6 flow = {};
646 	struct dst_entry *dst;
647 	struct neighbour *n;
648 
649 	payload = nfp_flower_cmsg_get_data(skb);
650 
651 	rcu_read_lock();
652 	netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
653 	if (!netdev)
654 		goto fail_rcu_unlock;
655 
656 	flow.daddr = payload->ipv6_addr;
657 	flow.flowi6_proto = IPPROTO_UDP;
658 
659 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
660 	dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
661 					      NULL);
662 	if (IS_ERR(dst))
663 		goto fail_rcu_unlock;
664 #else
665 	goto fail_rcu_unlock;
666 #endif
667 
668 	n = dst_neigh_lookup(dst, &flow.daddr);
669 	dst_release(dst);
670 	if (!n)
671 		goto fail_rcu_unlock;
672 
673 	nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC);
674 	neigh_release(n);
675 	rcu_read_unlock();
676 	return;
677 
678 fail_rcu_unlock:
679 	rcu_read_unlock();
680 	nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
681 }
682 
683 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
684 {
685 	struct nfp_flower_priv *priv = app->priv;
686 	struct nfp_ipv4_addr_entry *entry;
687 	struct nfp_tun_ipv4_addr payload;
688 	struct list_head *ptr, *storage;
689 	int count;
690 
691 	memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
692 	mutex_lock(&priv->tun.ipv4_off_lock);
693 	count = 0;
694 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
695 		if (count >= NFP_FL_IPV4_ADDRS_MAX) {
696 			mutex_unlock(&priv->tun.ipv4_off_lock);
697 			nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
698 			return;
699 		}
700 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
701 		payload.ipv4_addr[count++] = entry->ipv4_addr;
702 	}
703 	payload.count = cpu_to_be32(count);
704 	mutex_unlock(&priv->tun.ipv4_off_lock);
705 
706 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
707 				 sizeof(struct nfp_tun_ipv4_addr),
708 				 &payload, GFP_KERNEL);
709 }
710 
711 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
712 {
713 	struct nfp_flower_priv *priv = app->priv;
714 	struct nfp_ipv4_addr_entry *entry;
715 	struct list_head *ptr, *storage;
716 
717 	mutex_lock(&priv->tun.ipv4_off_lock);
718 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
719 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
720 		if (entry->ipv4_addr == ipv4) {
721 			entry->ref_count++;
722 			mutex_unlock(&priv->tun.ipv4_off_lock);
723 			return;
724 		}
725 	}
726 
727 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
728 	if (!entry) {
729 		mutex_unlock(&priv->tun.ipv4_off_lock);
730 		nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
731 		return;
732 	}
733 	entry->ipv4_addr = ipv4;
734 	entry->ref_count = 1;
735 	list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
736 	mutex_unlock(&priv->tun.ipv4_off_lock);
737 
738 	nfp_tun_write_ipv4_list(app);
739 }
740 
741 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
742 {
743 	struct nfp_flower_priv *priv = app->priv;
744 	struct nfp_ipv4_addr_entry *entry;
745 	struct list_head *ptr, *storage;
746 
747 	mutex_lock(&priv->tun.ipv4_off_lock);
748 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
749 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
750 		if (entry->ipv4_addr == ipv4) {
751 			entry->ref_count--;
752 			if (!entry->ref_count) {
753 				list_del(&entry->list);
754 				kfree(entry);
755 			}
756 			break;
757 		}
758 	}
759 	mutex_unlock(&priv->tun.ipv4_off_lock);
760 
761 	nfp_tun_write_ipv4_list(app);
762 }
763 
764 static void nfp_tun_write_ipv6_list(struct nfp_app *app)
765 {
766 	struct nfp_flower_priv *priv = app->priv;
767 	struct nfp_ipv6_addr_entry *entry;
768 	struct nfp_tun_ipv6_addr payload;
769 	int count = 0;
770 
771 	memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
772 	mutex_lock(&priv->tun.ipv6_off_lock);
773 	list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
774 		if (count >= NFP_FL_IPV6_ADDRS_MAX) {
775 			nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
776 			break;
777 		}
778 		payload.ipv6_addr[count++] = entry->ipv6_addr;
779 	}
780 	mutex_unlock(&priv->tun.ipv6_off_lock);
781 	payload.count = cpu_to_be32(count);
782 
783 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
784 				 sizeof(struct nfp_tun_ipv6_addr),
785 				 &payload, GFP_KERNEL);
786 }
787 
788 struct nfp_ipv6_addr_entry *
789 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
790 {
791 	struct nfp_flower_priv *priv = app->priv;
792 	struct nfp_ipv6_addr_entry *entry;
793 
794 	mutex_lock(&priv->tun.ipv6_off_lock);
795 	list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
796 		if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
797 			entry->ref_count++;
798 			mutex_unlock(&priv->tun.ipv6_off_lock);
799 			return entry;
800 		}
801 
802 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
803 	if (!entry) {
804 		mutex_unlock(&priv->tun.ipv6_off_lock);
805 		nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
806 		return NULL;
807 	}
808 	entry->ipv6_addr = *ipv6;
809 	entry->ref_count = 1;
810 	list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
811 	mutex_unlock(&priv->tun.ipv6_off_lock);
812 
813 	nfp_tun_write_ipv6_list(app);
814 
815 	return entry;
816 }
817 
818 void
819 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
820 {
821 	struct nfp_flower_priv *priv = app->priv;
822 	bool freed = false;
823 
824 	mutex_lock(&priv->tun.ipv6_off_lock);
825 	if (!--entry->ref_count) {
826 		list_del(&entry->list);
827 		kfree(entry);
828 		freed = true;
829 	}
830 	mutex_unlock(&priv->tun.ipv6_off_lock);
831 
832 	if (freed)
833 		nfp_tun_write_ipv6_list(app);
834 }
835 
836 static int
837 __nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
838 {
839 	struct nfp_tun_mac_addr_offload payload;
840 
841 	memset(&payload, 0, sizeof(payload));
842 
843 	if (del)
844 		payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
845 
846 	/* FW supports multiple MACs per cmsg but restrict to single. */
847 	payload.count = cpu_to_be16(1);
848 	payload.index = cpu_to_be16(idx);
849 	ether_addr_copy(payload.addr, mac);
850 
851 	return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
852 					sizeof(struct nfp_tun_mac_addr_offload),
853 					&payload, GFP_KERNEL);
854 }
855 
856 static bool nfp_tunnel_port_is_phy_repr(int port)
857 {
858 	if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
859 	    NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
860 		return true;
861 
862 	return false;
863 }
864 
865 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
866 {
867 	return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
868 }
869 
870 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
871 {
872 	return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
873 }
874 
875 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
876 {
877 	return nfp_mac_idx >> 8;
878 }
879 
880 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
881 {
882 	return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
883 }
884 
885 static struct nfp_tun_offloaded_mac *
886 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
887 {
888 	struct nfp_flower_priv *priv = app->priv;
889 
890 	return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
891 				      offloaded_macs_params);
892 }
893 
894 static void
895 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
896 					   struct net_device *netdev, bool mod)
897 {
898 	if (nfp_netdev_is_nfp_repr(netdev)) {
899 		struct nfp_flower_repr_priv *repr_priv;
900 		struct nfp_repr *repr;
901 
902 		repr = netdev_priv(netdev);
903 		repr_priv = repr->app_priv;
904 
905 		/* If modifing MAC, remove repr from old list first. */
906 		if (mod)
907 			list_del(&repr_priv->mac_list);
908 
909 		list_add_tail(&repr_priv->mac_list, &entry->repr_list);
910 	} else if (nfp_flower_is_supported_bridge(netdev)) {
911 		entry->bridge_count++;
912 	}
913 
914 	entry->ref_count++;
915 }
916 
917 static int
918 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
919 			  int port, bool mod)
920 {
921 	struct nfp_flower_priv *priv = app->priv;
922 	int ida_idx = NFP_MAX_MAC_INDEX, err;
923 	struct nfp_tun_offloaded_mac *entry;
924 	u16 nfp_mac_idx = 0;
925 
926 	entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
927 	if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
928 		if (entry->bridge_count ||
929 		    !nfp_flower_is_supported_bridge(netdev)) {
930 			nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
931 								   netdev, mod);
932 			return 0;
933 		}
934 
935 		/* MAC is global but matches need to go to pre_tun table. */
936 		nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
937 	}
938 
939 	if (!nfp_mac_idx) {
940 		/* Assign a global index if non-repr or MAC is now shared. */
941 		if (entry || !port) {
942 			ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
943 						 NFP_MAX_MAC_INDEX, GFP_KERNEL);
944 			if (ida_idx < 0)
945 				return ida_idx;
946 
947 			nfp_mac_idx =
948 				nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
949 
950 			if (nfp_flower_is_supported_bridge(netdev))
951 				nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
952 
953 		} else {
954 			nfp_mac_idx =
955 				nfp_tunnel_get_mac_idx_from_phy_port_id(port);
956 		}
957 	}
958 
959 	if (!entry) {
960 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
961 		if (!entry) {
962 			err = -ENOMEM;
963 			goto err_free_ida;
964 		}
965 
966 		ether_addr_copy(entry->addr, netdev->dev_addr);
967 		INIT_LIST_HEAD(&entry->repr_list);
968 
969 		if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
970 					   &entry->ht_node,
971 					   offloaded_macs_params)) {
972 			err = -ENOMEM;
973 			goto err_free_entry;
974 		}
975 	}
976 
977 	err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
978 				       nfp_mac_idx, false);
979 	if (err) {
980 		/* If not shared then free. */
981 		if (!entry->ref_count)
982 			goto err_remove_hash;
983 		goto err_free_ida;
984 	}
985 
986 	entry->index = nfp_mac_idx;
987 	nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
988 
989 	return 0;
990 
991 err_remove_hash:
992 	rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
993 			       offloaded_macs_params);
994 err_free_entry:
995 	kfree(entry);
996 err_free_ida:
997 	if (ida_idx != NFP_MAX_MAC_INDEX)
998 		ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
999 
1000 	return err;
1001 }
1002 
1003 static int
1004 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
1005 			  u8 *mac, bool mod)
1006 {
1007 	struct nfp_flower_priv *priv = app->priv;
1008 	struct nfp_flower_repr_priv *repr_priv;
1009 	struct nfp_tun_offloaded_mac *entry;
1010 	struct nfp_repr *repr;
1011 	int ida_idx;
1012 
1013 	entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
1014 	if (!entry)
1015 		return 0;
1016 
1017 	entry->ref_count--;
1018 	/* If del is part of a mod then mac_list is still in use elsewheree. */
1019 	if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
1020 		repr = netdev_priv(netdev);
1021 		repr_priv = repr->app_priv;
1022 		list_del(&repr_priv->mac_list);
1023 	}
1024 
1025 	if (nfp_flower_is_supported_bridge(netdev)) {
1026 		entry->bridge_count--;
1027 
1028 		if (!entry->bridge_count && entry->ref_count) {
1029 			u16 nfp_mac_idx;
1030 
1031 			nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1032 			if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
1033 						     false)) {
1034 				nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1035 						     netdev_name(netdev));
1036 				return 0;
1037 			}
1038 
1039 			entry->index = nfp_mac_idx;
1040 			return 0;
1041 		}
1042 	}
1043 
1044 	/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
1045 	if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
1046 		u16 nfp_mac_idx;
1047 		int port, err;
1048 
1049 		repr_priv = list_first_entry(&entry->repr_list,
1050 					     struct nfp_flower_repr_priv,
1051 					     mac_list);
1052 		repr = repr_priv->nfp_repr;
1053 		port = nfp_repr_get_port_id(repr->netdev);
1054 		nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1055 		err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
1056 		if (err) {
1057 			nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1058 					     netdev_name(netdev));
1059 			return 0;
1060 		}
1061 
1062 		ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1063 		ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1064 		entry->index = nfp_mac_idx;
1065 		return 0;
1066 	}
1067 
1068 	if (entry->ref_count)
1069 		return 0;
1070 
1071 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
1072 					    &entry->ht_node,
1073 					    offloaded_macs_params));
1074 	/* If MAC has global ID then extract and free the ida entry. */
1075 	if (nfp_tunnel_is_mac_idx_global(entry->index)) {
1076 		ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1077 		ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
1078 	}
1079 
1080 	kfree(entry);
1081 
1082 	return __nfp_tunnel_offload_mac(app, mac, 0, true);
1083 }
1084 
1085 static int
1086 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
1087 		       enum nfp_flower_mac_offload_cmd cmd)
1088 {
1089 	struct nfp_flower_non_repr_priv *nr_priv = NULL;
1090 	bool non_repr = false, *mac_offloaded;
1091 	u8 *off_mac = NULL;
1092 	int err, port = 0;
1093 
1094 	if (nfp_netdev_is_nfp_repr(netdev)) {
1095 		struct nfp_flower_repr_priv *repr_priv;
1096 		struct nfp_repr *repr;
1097 
1098 		repr = netdev_priv(netdev);
1099 		if (repr->app != app)
1100 			return 0;
1101 
1102 		repr_priv = repr->app_priv;
1103 		if (repr_priv->on_bridge)
1104 			return 0;
1105 
1106 		mac_offloaded = &repr_priv->mac_offloaded;
1107 		off_mac = &repr_priv->offloaded_mac_addr[0];
1108 		port = nfp_repr_get_port_id(netdev);
1109 		if (!nfp_tunnel_port_is_phy_repr(port))
1110 			return 0;
1111 	} else if (nfp_fl_is_netdev_to_offload(netdev)) {
1112 		nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
1113 		if (!nr_priv)
1114 			return -ENOMEM;
1115 
1116 		mac_offloaded = &nr_priv->mac_offloaded;
1117 		off_mac = &nr_priv->offloaded_mac_addr[0];
1118 		non_repr = true;
1119 	} else {
1120 		return 0;
1121 	}
1122 
1123 	if (!is_valid_ether_addr(netdev->dev_addr)) {
1124 		err = -EINVAL;
1125 		goto err_put_non_repr_priv;
1126 	}
1127 
1128 	if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
1129 		cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
1130 
1131 	switch (cmd) {
1132 	case NFP_TUNNEL_MAC_OFFLOAD_ADD:
1133 		err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
1134 		if (err)
1135 			goto err_put_non_repr_priv;
1136 
1137 		if (non_repr)
1138 			__nfp_flower_non_repr_priv_get(nr_priv);
1139 
1140 		*mac_offloaded = true;
1141 		ether_addr_copy(off_mac, netdev->dev_addr);
1142 		break;
1143 	case NFP_TUNNEL_MAC_OFFLOAD_DEL:
1144 		/* Only attempt delete if add was successful. */
1145 		if (!*mac_offloaded)
1146 			break;
1147 
1148 		if (non_repr)
1149 			__nfp_flower_non_repr_priv_put(nr_priv);
1150 
1151 		*mac_offloaded = false;
1152 
1153 		err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
1154 						false);
1155 		if (err)
1156 			goto err_put_non_repr_priv;
1157 
1158 		break;
1159 	case NFP_TUNNEL_MAC_OFFLOAD_MOD:
1160 		/* Ignore if changing to the same address. */
1161 		if (ether_addr_equal(netdev->dev_addr, off_mac))
1162 			break;
1163 
1164 		err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
1165 		if (err)
1166 			goto err_put_non_repr_priv;
1167 
1168 		/* Delete the previous MAC address. */
1169 		err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
1170 		if (err)
1171 			nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
1172 					     netdev_name(netdev));
1173 
1174 		ether_addr_copy(off_mac, netdev->dev_addr);
1175 		break;
1176 	default:
1177 		err = -EINVAL;
1178 		goto err_put_non_repr_priv;
1179 	}
1180 
1181 	if (non_repr)
1182 		__nfp_flower_non_repr_priv_put(nr_priv);
1183 
1184 	return 0;
1185 
1186 err_put_non_repr_priv:
1187 	if (non_repr)
1188 		__nfp_flower_non_repr_priv_put(nr_priv);
1189 
1190 	return err;
1191 }
1192 
1193 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
1194 				 struct net_device *netdev,
1195 				 unsigned long event, void *ptr)
1196 {
1197 	int err;
1198 
1199 	if (event == NETDEV_DOWN) {
1200 		err = nfp_tunnel_offload_mac(app, netdev,
1201 					     NFP_TUNNEL_MAC_OFFLOAD_DEL);
1202 		if (err)
1203 			nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
1204 					     netdev_name(netdev));
1205 	} else if (event == NETDEV_UP) {
1206 		err = nfp_tunnel_offload_mac(app, netdev,
1207 					     NFP_TUNNEL_MAC_OFFLOAD_ADD);
1208 		if (err)
1209 			nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1210 					     netdev_name(netdev));
1211 	} else if (event == NETDEV_CHANGEADDR) {
1212 		/* Only offload addr change if netdev is already up. */
1213 		if (!(netdev->flags & IFF_UP))
1214 			return NOTIFY_OK;
1215 
1216 		err = nfp_tunnel_offload_mac(app, netdev,
1217 					     NFP_TUNNEL_MAC_OFFLOAD_MOD);
1218 		if (err)
1219 			nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
1220 					     netdev_name(netdev));
1221 	} else if (event == NETDEV_CHANGEUPPER) {
1222 		/* If a repr is attached to a bridge then tunnel packets
1223 		 * entering the physical port are directed through the bridge
1224 		 * datapath and cannot be directly detunneled. Therefore,
1225 		 * associated offloaded MACs and indexes should not be used
1226 		 * by fw for detunneling.
1227 		 */
1228 		struct netdev_notifier_changeupper_info *info = ptr;
1229 		struct net_device *upper = info->upper_dev;
1230 		struct nfp_flower_repr_priv *repr_priv;
1231 		struct nfp_repr *repr;
1232 
1233 		if (!nfp_netdev_is_nfp_repr(netdev) ||
1234 		    !nfp_flower_is_supported_bridge(upper))
1235 			return NOTIFY_OK;
1236 
1237 		repr = netdev_priv(netdev);
1238 		if (repr->app != app)
1239 			return NOTIFY_OK;
1240 
1241 		repr_priv = repr->app_priv;
1242 
1243 		if (info->linking) {
1244 			if (nfp_tunnel_offload_mac(app, netdev,
1245 						   NFP_TUNNEL_MAC_OFFLOAD_DEL))
1246 				nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
1247 						     netdev_name(netdev));
1248 			repr_priv->on_bridge = true;
1249 		} else {
1250 			repr_priv->on_bridge = false;
1251 
1252 			if (!(netdev->flags & IFF_UP))
1253 				return NOTIFY_OK;
1254 
1255 			if (nfp_tunnel_offload_mac(app, netdev,
1256 						   NFP_TUNNEL_MAC_OFFLOAD_ADD))
1257 				nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1258 						     netdev_name(netdev));
1259 		}
1260 	}
1261 	return NOTIFY_OK;
1262 }
1263 
1264 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
1265 				 struct nfp_fl_payload *flow)
1266 {
1267 	struct nfp_flower_priv *app_priv = app->priv;
1268 	struct nfp_tun_offloaded_mac *mac_entry;
1269 	struct nfp_tun_pre_tun_rule payload;
1270 	struct net_device *internal_dev;
1271 	int err;
1272 
1273 	if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
1274 		return -ENOSPC;
1275 
1276 	memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1277 
1278 	internal_dev = flow->pre_tun_rule.dev;
1279 	payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1280 	payload.host_ctx_id = flow->meta.host_ctx_id;
1281 
1282 	/* Lookup MAC index for the pre-tunnel rule egress device.
1283 	 * Note that because the device is always an internal port, it will
1284 	 * have a constant global index so does not need to be tracked.
1285 	 */
1286 	mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
1287 						     internal_dev->dev_addr);
1288 	if (!mac_entry)
1289 		return -ENOENT;
1290 
1291 	payload.port_idx = cpu_to_be16(mac_entry->index);
1292 
1293 	/* Copy mac id and vlan to flow - dev may not exist at delete time. */
1294 	flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
1295 	flow->pre_tun_rule.port_idx = payload.port_idx;
1296 
1297 	err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1298 				       sizeof(struct nfp_tun_pre_tun_rule),
1299 				       (unsigned char *)&payload, GFP_KERNEL);
1300 	if (err)
1301 		return err;
1302 
1303 	app_priv->pre_tun_rule_cnt++;
1304 
1305 	return 0;
1306 }
1307 
1308 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
1309 				     struct nfp_fl_payload *flow)
1310 {
1311 	struct nfp_flower_priv *app_priv = app->priv;
1312 	struct nfp_tun_pre_tun_rule payload;
1313 	u32 tmp_flags = 0;
1314 	int err;
1315 
1316 	memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1317 
1318 	tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
1319 	payload.flags = cpu_to_be32(tmp_flags);
1320 	payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1321 	payload.port_idx = flow->pre_tun_rule.port_idx;
1322 
1323 	err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1324 				       sizeof(struct nfp_tun_pre_tun_rule),
1325 				       (unsigned char *)&payload, GFP_KERNEL);
1326 	if (err)
1327 		return err;
1328 
1329 	app_priv->pre_tun_rule_cnt--;
1330 
1331 	return 0;
1332 }
1333 
1334 int nfp_tunnel_config_start(struct nfp_app *app)
1335 {
1336 	struct nfp_flower_priv *priv = app->priv;
1337 	int err;
1338 
1339 	/* Initialise rhash for MAC offload tracking. */
1340 	err = rhashtable_init(&priv->tun.offloaded_macs,
1341 			      &offloaded_macs_params);
1342 	if (err)
1343 		return err;
1344 
1345 	ida_init(&priv->tun.mac_off_ids);
1346 
1347 	/* Initialise priv data for IPv4/v6 offloading. */
1348 	mutex_init(&priv->tun.ipv4_off_lock);
1349 	INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
1350 	mutex_init(&priv->tun.ipv6_off_lock);
1351 	INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
1352 
1353 	/* Initialise priv data for neighbour offloading. */
1354 	spin_lock_init(&priv->tun.neigh_off_lock_v4);
1355 	INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4);
1356 	spin_lock_init(&priv->tun.neigh_off_lock_v6);
1357 	INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6);
1358 	priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
1359 
1360 	err = register_netevent_notifier(&priv->tun.neigh_nb);
1361 	if (err) {
1362 		rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1363 					    nfp_check_rhashtable_empty, NULL);
1364 		return err;
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 void nfp_tunnel_config_stop(struct nfp_app *app)
1371 {
1372 	struct nfp_offloaded_route *route_entry, *temp;
1373 	struct nfp_flower_priv *priv = app->priv;
1374 	struct nfp_ipv4_addr_entry *ip_entry;
1375 	struct nfp_tun_neigh_v6 ipv6_route;
1376 	struct nfp_tun_neigh ipv4_route;
1377 	struct list_head *ptr, *storage;
1378 
1379 	unregister_netevent_notifier(&priv->tun.neigh_nb);
1380 
1381 	ida_destroy(&priv->tun.mac_off_ids);
1382 
1383 	/* Free any memory that may be occupied by ipv4 list. */
1384 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
1385 		ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
1386 		list_del(&ip_entry->list);
1387 		kfree(ip_entry);
1388 	}
1389 
1390 	mutex_destroy(&priv->tun.ipv6_off_lock);
1391 
1392 	/* Free memory in the route list and remove entries from fw cache. */
1393 	list_for_each_entry_safe(route_entry, temp,
1394 				 &priv->tun.neigh_off_list_v4, list) {
1395 		memset(&ipv4_route, 0, sizeof(ipv4_route));
1396 		memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add,
1397 		       sizeof(ipv4_route.dst_ipv4));
1398 		list_del(&route_entry->list);
1399 		kfree(route_entry);
1400 
1401 		nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
1402 					 sizeof(struct nfp_tun_neigh),
1403 					 (unsigned char *)&ipv4_route,
1404 					 GFP_KERNEL);
1405 	}
1406 
1407 	list_for_each_entry_safe(route_entry, temp,
1408 				 &priv->tun.neigh_off_list_v6, list) {
1409 		memset(&ipv6_route, 0, sizeof(ipv6_route));
1410 		memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add,
1411 		       sizeof(ipv6_route.dst_ipv6));
1412 		list_del(&route_entry->list);
1413 		kfree(route_entry);
1414 
1415 		nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6,
1416 					 sizeof(struct nfp_tun_neigh),
1417 					 (unsigned char *)&ipv6_route,
1418 					 GFP_KERNEL);
1419 	}
1420 
1421 	/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
1422 	rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1423 				    nfp_check_rhashtable_empty, NULL);
1424 }
1425