1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
7 #include <linux/idr.h>
8 #include <net/dst_metadata.h>
9 #include <net/arp.h>
10 
11 #include "cmsg.h"
12 #include "main.h"
13 #include "../nfp_net_repr.h"
14 #include "../nfp_net.h"
15 
16 #define NFP_FL_MAX_ROUTES               32
17 
18 /**
19  * struct nfp_tun_active_tuns - periodic message of active tunnels
20  * @seq:		sequence number of the message
21  * @count:		number of tunnels report in message
22  * @flags:		options part of the request
23  * @tun_info.ipv4:		dest IPv4 address of active route
24  * @tun_info.egress_port:	port the encapsulated packet egressed
25  * @tun_info.extra:		reserved for future use
26  * @tun_info:		tunnels that have sent traffic in reported period
27  */
28 struct nfp_tun_active_tuns {
29 	__be32 seq;
30 	__be32 count;
31 	__be32 flags;
32 	struct route_ip_info {
33 		__be32 ipv4;
34 		__be32 egress_port;
35 		__be32 extra[2];
36 	} tun_info[];
37 };
38 
39 /**
40  * struct nfp_tun_neigh - neighbour/route entry on the NFP
41  * @dst_ipv4:	destination IPv4 address
42  * @src_ipv4:	source IPv4 address
43  * @dst_addr:	destination MAC address
44  * @src_addr:	source MAC address
45  * @port_id:	NFP port to output packet on - associated with source IPv4
46  */
47 struct nfp_tun_neigh {
48 	__be32 dst_ipv4;
49 	__be32 src_ipv4;
50 	u8 dst_addr[ETH_ALEN];
51 	u8 src_addr[ETH_ALEN];
52 	__be32 port_id;
53 };
54 
55 /**
56  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
57  * @ingress_port:	ingress port of packet that signalled request
58  * @ipv4_addr:		destination ipv4 address for route
59  * @reserved:		reserved for future use
60  */
61 struct nfp_tun_req_route_ipv4 {
62 	__be32 ingress_port;
63 	__be32 ipv4_addr;
64 	__be32 reserved[2];
65 };
66 
67 /**
68  * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
69  * @ipv4_addr:	destination of route
70  * @list:	list pointer
71  */
72 struct nfp_ipv4_route_entry {
73 	__be32 ipv4_addr;
74 	struct list_head list;
75 };
76 
77 #define NFP_FL_IPV4_ADDRS_MAX        32
78 
79 /**
80  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
81  * @count:	number of IPs populated in the array
82  * @ipv4_addr:	array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
83  */
84 struct nfp_tun_ipv4_addr {
85 	__be32 count;
86 	__be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
87 };
88 
89 /**
90  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
91  * @ipv4_addr:	IP address
92  * @ref_count:	number of rules currently using this IP
93  * @list:	list pointer
94  */
95 struct nfp_ipv4_addr_entry {
96 	__be32 ipv4_addr;
97 	int ref_count;
98 	struct list_head list;
99 };
100 
101 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG	0x2
102 
103 /**
104  * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
105  * @flags:	MAC address offload options
106  * @count:	number of MAC addresses in the message (should be 1)
107  * @index:	index of MAC address in the lookup table
108  * @addr:	interface MAC address
109  */
110 struct nfp_tun_mac_addr_offload {
111 	__be16 flags;
112 	__be16 count;
113 	__be16 index;
114 	u8 addr[ETH_ALEN];
115 };
116 
117 enum nfp_flower_mac_offload_cmd {
118 	NFP_TUNNEL_MAC_OFFLOAD_ADD =		0,
119 	NFP_TUNNEL_MAC_OFFLOAD_DEL =		1,
120 	NFP_TUNNEL_MAC_OFFLOAD_MOD =		2,
121 };
122 
123 #define NFP_MAX_MAC_INDEX       0xff
124 
125 /**
126  * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
127  * @ht_node:	Hashtable entry
128  * @addr:	Offloaded MAC address
129  * @index:	Offloaded index for given MAC address
130  * @ref_count:	Number of devs using this MAC address
131  * @repr_list:	List of reprs sharing this MAC address
132  */
133 struct nfp_tun_offloaded_mac {
134 	struct rhash_head ht_node;
135 	u8 addr[ETH_ALEN];
136 	u16 index;
137 	int ref_count;
138 	struct list_head repr_list;
139 };
140 
141 static const struct rhashtable_params offloaded_macs_params = {
142 	.key_offset	= offsetof(struct nfp_tun_offloaded_mac, addr),
143 	.head_offset	= offsetof(struct nfp_tun_offloaded_mac, ht_node),
144 	.key_len	= ETH_ALEN,
145 	.automatic_shrinking	= true,
146 };
147 
148 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
149 {
150 	struct nfp_tun_active_tuns *payload;
151 	struct net_device *netdev;
152 	int count, i, pay_len;
153 	struct neighbour *n;
154 	__be32 ipv4_addr;
155 	u32 port;
156 
157 	payload = nfp_flower_cmsg_get_data(skb);
158 	count = be32_to_cpu(payload->count);
159 	if (count > NFP_FL_MAX_ROUTES) {
160 		nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
161 		return;
162 	}
163 
164 	pay_len = nfp_flower_cmsg_get_data_len(skb);
165 	if (pay_len != sizeof(struct nfp_tun_active_tuns) +
166 	    sizeof(struct route_ip_info) * count) {
167 		nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
168 		return;
169 	}
170 
171 	for (i = 0; i < count; i++) {
172 		ipv4_addr = payload->tun_info[i].ipv4;
173 		port = be32_to_cpu(payload->tun_info[i].egress_port);
174 		netdev = nfp_app_dev_get(app, port, NULL);
175 		if (!netdev)
176 			continue;
177 
178 		n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
179 		if (!n)
180 			continue;
181 
182 		/* Update the used timestamp of neighbour */
183 		neigh_event_send(n, NULL);
184 		neigh_release(n);
185 	}
186 }
187 
188 static int
189 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
190 			 gfp_t flag)
191 {
192 	struct sk_buff *skb;
193 	unsigned char *msg;
194 
195 	skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
196 	if (!skb)
197 		return -ENOMEM;
198 
199 	msg = nfp_flower_cmsg_get_data(skb);
200 	memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
201 
202 	nfp_ctrl_tx(app->ctrl, skb);
203 	return 0;
204 }
205 
206 static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
207 {
208 	struct nfp_flower_priv *priv = app->priv;
209 	struct nfp_ipv4_route_entry *entry;
210 	struct list_head *ptr, *storage;
211 
212 	spin_lock_bh(&priv->tun.neigh_off_lock);
213 	list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
214 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
215 		if (entry->ipv4_addr == ipv4_addr) {
216 			spin_unlock_bh(&priv->tun.neigh_off_lock);
217 			return true;
218 		}
219 	}
220 	spin_unlock_bh(&priv->tun.neigh_off_lock);
221 	return false;
222 }
223 
224 static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
225 {
226 	struct nfp_flower_priv *priv = app->priv;
227 	struct nfp_ipv4_route_entry *entry;
228 	struct list_head *ptr, *storage;
229 
230 	spin_lock_bh(&priv->tun.neigh_off_lock);
231 	list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
232 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
233 		if (entry->ipv4_addr == ipv4_addr) {
234 			spin_unlock_bh(&priv->tun.neigh_off_lock);
235 			return;
236 		}
237 	}
238 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
239 	if (!entry) {
240 		spin_unlock_bh(&priv->tun.neigh_off_lock);
241 		nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
242 		return;
243 	}
244 
245 	entry->ipv4_addr = ipv4_addr;
246 	list_add_tail(&entry->list, &priv->tun.neigh_off_list);
247 	spin_unlock_bh(&priv->tun.neigh_off_lock);
248 }
249 
250 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
251 {
252 	struct nfp_flower_priv *priv = app->priv;
253 	struct nfp_ipv4_route_entry *entry;
254 	struct list_head *ptr, *storage;
255 
256 	spin_lock_bh(&priv->tun.neigh_off_lock);
257 	list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
258 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
259 		if (entry->ipv4_addr == ipv4_addr) {
260 			list_del(&entry->list);
261 			kfree(entry);
262 			break;
263 		}
264 	}
265 	spin_unlock_bh(&priv->tun.neigh_off_lock);
266 }
267 
268 static void
269 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
270 		    struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
271 {
272 	struct nfp_tun_neigh payload;
273 	u32 port_id;
274 
275 	port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
276 	if (!port_id)
277 		return;
278 
279 	memset(&payload, 0, sizeof(struct nfp_tun_neigh));
280 	payload.dst_ipv4 = flow->daddr;
281 
282 	/* If entry has expired send dst IP with all other fields 0. */
283 	if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
284 		nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
285 		/* Trigger ARP to verify invalid neighbour state. */
286 		neigh_event_send(neigh, NULL);
287 		goto send_msg;
288 	}
289 
290 	/* Have a valid neighbour so populate rest of entry. */
291 	payload.src_ipv4 = flow->saddr;
292 	ether_addr_copy(payload.src_addr, netdev->dev_addr);
293 	neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
294 	payload.port_id = cpu_to_be32(port_id);
295 	/* Add destination of new route to NFP cache. */
296 	nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
297 
298 send_msg:
299 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
300 				 sizeof(struct nfp_tun_neigh),
301 				 (unsigned char *)&payload, flag);
302 }
303 
304 static int
305 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
306 			    void *ptr)
307 {
308 	struct nfp_flower_priv *app_priv;
309 	struct netevent_redirect *redir;
310 	struct flowi4 flow = {};
311 	struct neighbour *n;
312 	struct nfp_app *app;
313 	struct rtable *rt;
314 	int err;
315 
316 	switch (event) {
317 	case NETEVENT_REDIRECT:
318 		redir = (struct netevent_redirect *)ptr;
319 		n = redir->neigh;
320 		break;
321 	case NETEVENT_NEIGH_UPDATE:
322 		n = (struct neighbour *)ptr;
323 		break;
324 	default:
325 		return NOTIFY_DONE;
326 	}
327 
328 	flow.daddr = *(__be32 *)n->primary_key;
329 
330 	/* Only concerned with route changes for representors. */
331 	if (!nfp_netdev_is_nfp_repr(n->dev))
332 		return NOTIFY_DONE;
333 
334 	app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
335 	app = app_priv->app;
336 
337 	/* Only concerned with changes to routes already added to NFP. */
338 	if (!nfp_tun_has_route(app, flow.daddr))
339 		return NOTIFY_DONE;
340 
341 #if IS_ENABLED(CONFIG_INET)
342 	/* Do a route lookup to populate flow data. */
343 	rt = ip_route_output_key(dev_net(n->dev), &flow);
344 	err = PTR_ERR_OR_ZERO(rt);
345 	if (err)
346 		return NOTIFY_DONE;
347 
348 	ip_rt_put(rt);
349 #else
350 	return NOTIFY_DONE;
351 #endif
352 
353 	flow.flowi4_proto = IPPROTO_UDP;
354 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
355 
356 	return NOTIFY_OK;
357 }
358 
359 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
360 {
361 	struct nfp_tun_req_route_ipv4 *payload;
362 	struct net_device *netdev;
363 	struct flowi4 flow = {};
364 	struct neighbour *n;
365 	struct rtable *rt;
366 	int err;
367 
368 	payload = nfp_flower_cmsg_get_data(skb);
369 
370 	netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
371 	if (!netdev)
372 		goto route_fail_warning;
373 
374 	flow.daddr = payload->ipv4_addr;
375 	flow.flowi4_proto = IPPROTO_UDP;
376 
377 #if IS_ENABLED(CONFIG_INET)
378 	/* Do a route lookup on same namespace as ingress port. */
379 	rt = ip_route_output_key(dev_net(netdev), &flow);
380 	err = PTR_ERR_OR_ZERO(rt);
381 	if (err)
382 		goto route_fail_warning;
383 #else
384 	goto route_fail_warning;
385 #endif
386 
387 	/* Get the neighbour entry for the lookup */
388 	n = dst_neigh_lookup(&rt->dst, &flow.daddr);
389 	ip_rt_put(rt);
390 	if (!n)
391 		goto route_fail_warning;
392 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
393 	neigh_release(n);
394 	return;
395 
396 route_fail_warning:
397 	nfp_flower_cmsg_warn(app, "Requested route not found.\n");
398 }
399 
400 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
401 {
402 	struct nfp_flower_priv *priv = app->priv;
403 	struct nfp_ipv4_addr_entry *entry;
404 	struct nfp_tun_ipv4_addr payload;
405 	struct list_head *ptr, *storage;
406 	int count;
407 
408 	memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
409 	mutex_lock(&priv->tun.ipv4_off_lock);
410 	count = 0;
411 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
412 		if (count >= NFP_FL_IPV4_ADDRS_MAX) {
413 			mutex_unlock(&priv->tun.ipv4_off_lock);
414 			nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
415 			return;
416 		}
417 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
418 		payload.ipv4_addr[count++] = entry->ipv4_addr;
419 	}
420 	payload.count = cpu_to_be32(count);
421 	mutex_unlock(&priv->tun.ipv4_off_lock);
422 
423 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
424 				 sizeof(struct nfp_tun_ipv4_addr),
425 				 &payload, GFP_KERNEL);
426 }
427 
428 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
429 {
430 	struct nfp_flower_priv *priv = app->priv;
431 	struct nfp_ipv4_addr_entry *entry;
432 	struct list_head *ptr, *storage;
433 
434 	mutex_lock(&priv->tun.ipv4_off_lock);
435 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
436 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
437 		if (entry->ipv4_addr == ipv4) {
438 			entry->ref_count++;
439 			mutex_unlock(&priv->tun.ipv4_off_lock);
440 			return;
441 		}
442 	}
443 
444 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
445 	if (!entry) {
446 		mutex_unlock(&priv->tun.ipv4_off_lock);
447 		nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
448 		return;
449 	}
450 	entry->ipv4_addr = ipv4;
451 	entry->ref_count = 1;
452 	list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
453 	mutex_unlock(&priv->tun.ipv4_off_lock);
454 
455 	nfp_tun_write_ipv4_list(app);
456 }
457 
458 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
459 {
460 	struct nfp_flower_priv *priv = app->priv;
461 	struct nfp_ipv4_addr_entry *entry;
462 	struct list_head *ptr, *storage;
463 
464 	mutex_lock(&priv->tun.ipv4_off_lock);
465 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
466 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
467 		if (entry->ipv4_addr == ipv4) {
468 			entry->ref_count--;
469 			if (!entry->ref_count) {
470 				list_del(&entry->list);
471 				kfree(entry);
472 			}
473 			break;
474 		}
475 	}
476 	mutex_unlock(&priv->tun.ipv4_off_lock);
477 
478 	nfp_tun_write_ipv4_list(app);
479 }
480 
481 static int
482 __nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
483 {
484 	struct nfp_tun_mac_addr_offload payload;
485 
486 	memset(&payload, 0, sizeof(payload));
487 
488 	if (del)
489 		payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
490 
491 	/* FW supports multiple MACs per cmsg but restrict to single. */
492 	payload.count = cpu_to_be16(1);
493 	payload.index = cpu_to_be16(idx);
494 	ether_addr_copy(payload.addr, mac);
495 
496 	return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
497 					sizeof(struct nfp_tun_mac_addr_offload),
498 					&payload, GFP_KERNEL);
499 }
500 
501 static bool nfp_tunnel_port_is_phy_repr(int port)
502 {
503 	if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
504 	    NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
505 		return true;
506 
507 	return false;
508 }
509 
510 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
511 {
512 	return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
513 }
514 
515 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
516 {
517 	return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
518 }
519 
520 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
521 {
522 	return nfp_mac_idx >> 8;
523 }
524 
525 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
526 {
527 	return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
528 }
529 
530 static struct nfp_tun_offloaded_mac *
531 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
532 {
533 	struct nfp_flower_priv *priv = app->priv;
534 
535 	return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
536 				      offloaded_macs_params);
537 }
538 
539 static void
540 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
541 					   struct net_device *netdev, bool mod)
542 {
543 	if (nfp_netdev_is_nfp_repr(netdev)) {
544 		struct nfp_flower_repr_priv *repr_priv;
545 		struct nfp_repr *repr;
546 
547 		repr = netdev_priv(netdev);
548 		repr_priv = repr->app_priv;
549 
550 		/* If modifing MAC, remove repr from old list first. */
551 		if (mod)
552 			list_del(&repr_priv->mac_list);
553 
554 		list_add_tail(&repr_priv->mac_list, &entry->repr_list);
555 	}
556 
557 	entry->ref_count++;
558 }
559 
560 static int
561 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
562 			  int port, bool mod)
563 {
564 	struct nfp_flower_priv *priv = app->priv;
565 	int ida_idx = NFP_MAX_MAC_INDEX, err;
566 	struct nfp_tun_offloaded_mac *entry;
567 	u16 nfp_mac_idx = 0;
568 
569 	entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
570 	if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
571 		nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
572 		return 0;
573 	}
574 
575 	/* Assign a global index if non-repr or MAC address is now shared. */
576 	if (entry || !port) {
577 		ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
578 					 NFP_MAX_MAC_INDEX, GFP_KERNEL);
579 		if (ida_idx < 0)
580 			return ida_idx;
581 
582 		nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
583 	} else {
584 		nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
585 	}
586 
587 	if (!entry) {
588 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
589 		if (!entry) {
590 			err = -ENOMEM;
591 			goto err_free_ida;
592 		}
593 
594 		ether_addr_copy(entry->addr, netdev->dev_addr);
595 		INIT_LIST_HEAD(&entry->repr_list);
596 
597 		if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
598 					   &entry->ht_node,
599 					   offloaded_macs_params)) {
600 			err = -ENOMEM;
601 			goto err_free_entry;
602 		}
603 	}
604 
605 	err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
606 				       nfp_mac_idx, false);
607 	if (err) {
608 		/* If not shared then free. */
609 		if (!entry->ref_count)
610 			goto err_remove_hash;
611 		goto err_free_ida;
612 	}
613 
614 	entry->index = nfp_mac_idx;
615 	nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
616 
617 	return 0;
618 
619 err_remove_hash:
620 	rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
621 			       offloaded_macs_params);
622 err_free_entry:
623 	kfree(entry);
624 err_free_ida:
625 	if (ida_idx != NFP_MAX_MAC_INDEX)
626 		ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
627 
628 	return err;
629 }
630 
631 static int
632 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
633 			  u8 *mac, bool mod)
634 {
635 	struct nfp_flower_priv *priv = app->priv;
636 	struct nfp_flower_repr_priv *repr_priv;
637 	struct nfp_tun_offloaded_mac *entry;
638 	struct nfp_repr *repr;
639 	int ida_idx;
640 
641 	entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
642 	if (!entry)
643 		return 0;
644 
645 	entry->ref_count--;
646 	/* If del is part of a mod then mac_list is still in use elsewheree. */
647 	if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
648 		repr = netdev_priv(netdev);
649 		repr_priv = repr->app_priv;
650 		list_del(&repr_priv->mac_list);
651 	}
652 
653 	/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
654 	if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
655 		u16 nfp_mac_idx;
656 		int port, err;
657 
658 		repr_priv = list_first_entry(&entry->repr_list,
659 					     struct nfp_flower_repr_priv,
660 					     mac_list);
661 		repr = repr_priv->nfp_repr;
662 		port = nfp_repr_get_port_id(repr->netdev);
663 		nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
664 		err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
665 		if (err) {
666 			nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
667 					     netdev_name(netdev));
668 			return 0;
669 		}
670 
671 		ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
672 		ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
673 		entry->index = nfp_mac_idx;
674 		return 0;
675 	}
676 
677 	if (entry->ref_count)
678 		return 0;
679 
680 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
681 					    &entry->ht_node,
682 					    offloaded_macs_params));
683 	/* If MAC has global ID then extract and free the ida entry. */
684 	if (nfp_tunnel_is_mac_idx_global(entry->index)) {
685 		ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
686 		ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
687 	}
688 
689 	kfree(entry);
690 
691 	return __nfp_tunnel_offload_mac(app, mac, 0, true);
692 }
693 
694 static int
695 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
696 		       enum nfp_flower_mac_offload_cmd cmd)
697 {
698 	struct nfp_flower_non_repr_priv *nr_priv = NULL;
699 	bool non_repr = false, *mac_offloaded;
700 	u8 *off_mac = NULL;
701 	int err, port = 0;
702 
703 	if (nfp_netdev_is_nfp_repr(netdev)) {
704 		struct nfp_flower_repr_priv *repr_priv;
705 		struct nfp_repr *repr;
706 
707 		repr = netdev_priv(netdev);
708 		if (repr->app != app)
709 			return 0;
710 
711 		repr_priv = repr->app_priv;
712 		mac_offloaded = &repr_priv->mac_offloaded;
713 		off_mac = &repr_priv->offloaded_mac_addr[0];
714 		port = nfp_repr_get_port_id(netdev);
715 		if (!nfp_tunnel_port_is_phy_repr(port))
716 			return 0;
717 	} else if (nfp_fl_is_netdev_to_offload(netdev)) {
718 		nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
719 		if (!nr_priv)
720 			return -ENOMEM;
721 
722 		mac_offloaded = &nr_priv->mac_offloaded;
723 		off_mac = &nr_priv->offloaded_mac_addr[0];
724 		non_repr = true;
725 	} else {
726 		return 0;
727 	}
728 
729 	if (!is_valid_ether_addr(netdev->dev_addr)) {
730 		err = -EINVAL;
731 		goto err_put_non_repr_priv;
732 	}
733 
734 	if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
735 		cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
736 
737 	switch (cmd) {
738 	case NFP_TUNNEL_MAC_OFFLOAD_ADD:
739 		err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
740 		if (err)
741 			goto err_put_non_repr_priv;
742 
743 		if (non_repr)
744 			__nfp_flower_non_repr_priv_get(nr_priv);
745 
746 		*mac_offloaded = true;
747 		ether_addr_copy(off_mac, netdev->dev_addr);
748 		break;
749 	case NFP_TUNNEL_MAC_OFFLOAD_DEL:
750 		/* Only attempt delete if add was successful. */
751 		if (!*mac_offloaded)
752 			break;
753 
754 		if (non_repr)
755 			__nfp_flower_non_repr_priv_put(nr_priv);
756 
757 		*mac_offloaded = false;
758 
759 		err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
760 						false);
761 		if (err)
762 			goto err_put_non_repr_priv;
763 
764 		break;
765 	case NFP_TUNNEL_MAC_OFFLOAD_MOD:
766 		/* Ignore if changing to the same address. */
767 		if (ether_addr_equal(netdev->dev_addr, off_mac))
768 			break;
769 
770 		err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
771 		if (err)
772 			goto err_put_non_repr_priv;
773 
774 		/* Delete the previous MAC address. */
775 		err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
776 		if (err)
777 			nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
778 					     netdev_name(netdev));
779 
780 		ether_addr_copy(off_mac, netdev->dev_addr);
781 		break;
782 	default:
783 		err = -EINVAL;
784 		goto err_put_non_repr_priv;
785 	}
786 
787 	if (non_repr)
788 		__nfp_flower_non_repr_priv_put(nr_priv);
789 
790 	return 0;
791 
792 err_put_non_repr_priv:
793 	if (non_repr)
794 		__nfp_flower_non_repr_priv_put(nr_priv);
795 
796 	return err;
797 }
798 
799 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
800 				 struct net_device *netdev,
801 				 unsigned long event, void *ptr)
802 {
803 	int err;
804 
805 	if (event == NETDEV_DOWN) {
806 		err = nfp_tunnel_offload_mac(app, netdev,
807 					     NFP_TUNNEL_MAC_OFFLOAD_DEL);
808 		if (err)
809 			nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
810 					     netdev_name(netdev));
811 	} else if (event == NETDEV_UP) {
812 		err = nfp_tunnel_offload_mac(app, netdev,
813 					     NFP_TUNNEL_MAC_OFFLOAD_ADD);
814 		if (err)
815 			nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
816 					     netdev_name(netdev));
817 	} else if (event == NETDEV_CHANGEADDR) {
818 		/* Only offload addr change if netdev is already up. */
819 		if (!(netdev->flags & IFF_UP))
820 			return NOTIFY_OK;
821 
822 		err = nfp_tunnel_offload_mac(app, netdev,
823 					     NFP_TUNNEL_MAC_OFFLOAD_MOD);
824 		if (err)
825 			nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
826 					     netdev_name(netdev));
827 	}
828 	return NOTIFY_OK;
829 }
830 
831 int nfp_tunnel_config_start(struct nfp_app *app)
832 {
833 	struct nfp_flower_priv *priv = app->priv;
834 	int err;
835 
836 	/* Initialise rhash for MAC offload tracking. */
837 	err = rhashtable_init(&priv->tun.offloaded_macs,
838 			      &offloaded_macs_params);
839 	if (err)
840 		return err;
841 
842 	ida_init(&priv->tun.mac_off_ids);
843 
844 	/* Initialise priv data for IPv4 offloading. */
845 	mutex_init(&priv->tun.ipv4_off_lock);
846 	INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
847 
848 	/* Initialise priv data for neighbour offloading. */
849 	spin_lock_init(&priv->tun.neigh_off_lock);
850 	INIT_LIST_HEAD(&priv->tun.neigh_off_list);
851 	priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
852 
853 	err = register_netevent_notifier(&priv->tun.neigh_nb);
854 	if (err) {
855 		rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
856 					    nfp_check_rhashtable_empty, NULL);
857 		return err;
858 	}
859 
860 	return 0;
861 }
862 
863 void nfp_tunnel_config_stop(struct nfp_app *app)
864 {
865 	struct nfp_flower_priv *priv = app->priv;
866 	struct nfp_ipv4_route_entry *route_entry;
867 	struct nfp_ipv4_addr_entry *ip_entry;
868 	struct list_head *ptr, *storage;
869 
870 	unregister_netevent_notifier(&priv->tun.neigh_nb);
871 
872 	ida_destroy(&priv->tun.mac_off_ids);
873 
874 	/* Free any memory that may be occupied by ipv4 list. */
875 	list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
876 		ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
877 		list_del(&ip_entry->list);
878 		kfree(ip_entry);
879 	}
880 
881 	/* Free any memory that may be occupied by the route list. */
882 	list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
883 		route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
884 					 list);
885 		list_del(&route_entry->list);
886 		kfree(route_entry);
887 	}
888 
889 	/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
890 	rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
891 				    nfp_check_rhashtable_empty, NULL);
892 }
893