1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/inetdevice.h>
7 #include <net/inet_dscp.h>
8 #include <net/switchdev.h>
9 #include <linux/rhashtable.h>
10 #include <net/nexthop.h>
11 #include <net/arp.h>
12 #include <linux/if_vlan.h>
13 #include <linux/if_macvlan.h>
14 #include <net/netevent.h>
15 
16 #include "prestera.h"
17 #include "prestera_router_hw.h"
18 
19 struct prestera_kern_neigh_cache_key {
20 	struct prestera_ip_addr addr;
21 	struct net_device *dev;
22 };
23 
24 struct prestera_kern_neigh_cache {
25 	struct prestera_kern_neigh_cache_key key;
26 	struct rhash_head ht_node;
27 	struct list_head kern_fib_cache_list;
28 	/* Hold prepared nh_neigh info if is in_kernel */
29 	struct prestera_neigh_info nh_neigh_info;
30 	/* Indicate if neighbour is reachable by direct route */
31 	bool reachable;
32 	/* Lock cache if neigh is present in kernel */
33 	bool in_kernel;
34 };
35 struct prestera_kern_fib_cache_key {
36 	struct prestera_ip_addr addr;
37 	u32 prefix_len;
38 	u32 kern_tb_id; /* tb_id from kernel (not fixed) */
39 };
40 
41 /* Subscribing on neighbours in kernel */
42 struct prestera_kern_fib_cache {
43 	struct prestera_kern_fib_cache_key key;
44 	struct {
45 		struct prestera_fib_key fib_key;
46 		enum prestera_fib_type fib_type;
47 		struct prestera_nexthop_group_key nh_grp_key;
48 	} lpm_info; /* hold prepared lpm info */
49 	/* Indicate if route is not overlapped by another table */
50 	struct rhash_head ht_node; /* node of prestera_router */
51 	struct prestera_kern_neigh_cache_head {
52 		struct prestera_kern_fib_cache *this;
53 		struct list_head head;
54 		struct prestera_kern_neigh_cache *n_cache;
55 	} kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX];
56 	union {
57 		struct fib_notifier_info info; /* point to any of 4/6 */
58 		struct fib_entry_notifier_info fen4_info;
59 	};
60 	bool reachable;
61 };
62 
63 static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = {
64 	.key_offset  = offsetof(struct prestera_kern_neigh_cache, key),
65 	.head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node),
66 	.key_len     = sizeof(struct prestera_kern_neigh_cache_key),
67 	.automatic_shrinking = true,
68 };
69 
70 static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
71 	.key_offset  = offsetof(struct prestera_kern_fib_cache, key),
72 	.head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
73 	.key_len     = sizeof(struct prestera_kern_fib_cache_key),
74 	.automatic_shrinking = true,
75 };
76 
77 /* This util to be used, to convert kernel rules for default vr in hw_vr */
78 static u32 prestera_fix_tb_id(u32 tb_id)
79 {
80 	if (tb_id == RT_TABLE_UNSPEC ||
81 	    tb_id == RT_TABLE_LOCAL ||
82 	    tb_id == RT_TABLE_DEFAULT)
83 		tb_id = RT_TABLE_MAIN;
84 
85 	return tb_id;
86 }
87 
88 static void
89 prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info,
90 				     struct prestera_kern_fib_cache_key *key)
91 {
92 	struct fib_entry_notifier_info *fen_info =
93 		container_of(info, struct fib_entry_notifier_info, info);
94 
95 	memset(key, 0, sizeof(*key));
96 	key->addr.v = PRESTERA_IPV4;
97 	key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
98 	key->prefix_len = fen_info->dst_len;
99 	key->kern_tb_id = fen_info->tb_id;
100 }
101 
102 static int prestera_util_nhc2nc_key(struct prestera_switch *sw,
103 				    struct fib_nh_common *nhc,
104 				    struct prestera_kern_neigh_cache_key *nk)
105 {
106 	memset(nk, 0, sizeof(*nk));
107 	if (nhc->nhc_gw_family == AF_INET) {
108 		nk->addr.v = PRESTERA_IPV4;
109 		nk->addr.u.ipv4 = nhc->nhc_gw.ipv4;
110 	} else {
111 		nk->addr.v = PRESTERA_IPV6;
112 		nk->addr.u.ipv6 = nhc->nhc_gw.ipv6;
113 	}
114 
115 	nk->dev = nhc->nhc_dev;
116 	return 0;
117 }
118 
119 static void
120 prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck,
121 			    struct prestera_nh_neigh_key *nk)
122 {
123 	memset(nk, 0, sizeof(*nk));
124 	nk->addr = ck->addr;
125 	nk->rif = (void *)ck->dev;
126 }
127 
128 static bool
129 prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw,
130 				 struct fib_nh_common *nhc,
131 				 struct prestera_kern_neigh_cache_key *nk)
132 {
133 	struct prestera_kern_neigh_cache_key tk;
134 	int err;
135 
136 	err = prestera_util_nhc2nc_key(sw, nhc, &tk);
137 	if (err)
138 		return false;
139 
140 	if (memcmp(&tk, nk, sizeof(tk)))
141 		return false;
142 
143 	return true;
144 }
145 
146 static int
147 prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
148 			   struct prestera_kern_neigh_cache_key *key)
149 {
150 	memset(key, 0, sizeof(*key));
151 	if (n->tbl->family == AF_INET) {
152 		key->addr.v = PRESTERA_IPV4;
153 		key->addr.u.ipv4 = *(__be32 *)n->primary_key;
154 	} else {
155 		return -ENOENT;
156 	}
157 
158 	key->dev = n->dev;
159 
160 	return 0;
161 }
162 
163 static bool __prestera_fi_is_direct(struct fib_info *fi)
164 {
165 	struct fib_nh *fib_nh;
166 
167 	if (fib_info_num_path(fi) == 1) {
168 		fib_nh = fib_info_nh(fi, 0);
169 		if (fib_nh->fib_nh_gw_family == AF_UNSPEC)
170 			return true;
171 	}
172 
173 	return false;
174 }
175 
176 static bool prestera_fi_is_direct(struct fib_info *fi)
177 {
178 	if (fi->fib_type != RTN_UNICAST)
179 		return false;
180 
181 	return __prestera_fi_is_direct(fi);
182 }
183 
184 static bool prestera_fi_is_nh(struct fib_info *fi)
185 {
186 	if (fi->fib_type != RTN_UNICAST)
187 		return false;
188 
189 	return !__prestera_fi_is_direct(fi);
190 }
191 
192 static bool __prestera_fi6_is_direct(struct fib6_info *fi)
193 {
194 	if (!fi->fib6_nh->nh_common.nhc_gw_family)
195 		return true;
196 
197 	return false;
198 }
199 
200 static bool prestera_fi6_is_direct(struct fib6_info *fi)
201 {
202 	if (fi->fib6_type != RTN_UNICAST)
203 		return false;
204 
205 	return __prestera_fi6_is_direct(fi);
206 }
207 
208 static bool prestera_fi6_is_nh(struct fib6_info *fi)
209 {
210 	if (fi->fib6_type != RTN_UNICAST)
211 		return false;
212 
213 	return !__prestera_fi6_is_direct(fi);
214 }
215 
216 static bool prestera_fib_info_is_direct(struct fib_notifier_info *info)
217 {
218 	struct fib6_entry_notifier_info *fen6_info =
219 		container_of(info, struct fib6_entry_notifier_info, info);
220 	struct fib_entry_notifier_info *fen_info =
221 		container_of(info, struct fib_entry_notifier_info, info);
222 
223 	if (info->family == AF_INET)
224 		return prestera_fi_is_direct(fen_info->fi);
225 	else
226 		return prestera_fi6_is_direct(fen6_info->rt);
227 }
228 
229 static bool prestera_fib_info_is_nh(struct fib_notifier_info *info)
230 {
231 	struct fib6_entry_notifier_info *fen6_info =
232 		container_of(info, struct fib6_entry_notifier_info, info);
233 	struct fib_entry_notifier_info *fen_info =
234 		container_of(info, struct fib_entry_notifier_info, info);
235 
236 	if (info->family == AF_INET)
237 		return prestera_fi_is_nh(fen_info->fi);
238 	else
239 		return prestera_fi6_is_nh(fen6_info->rt);
240 }
241 
242 /* must be called with rcu_read_lock() */
243 static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id,
244 					__be32 *addr)
245 {
246 	struct flowi4 fl4;
247 
248 	/* TODO: walkthrough appropriate tables in kernel
249 	 * to know if the same prefix exists in several tables
250 	 */
251 	memset(&fl4, 0, sizeof(fl4));
252 	fl4.daddr = *addr;
253 	return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */);
254 }
255 
256 static bool
257 __prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
258 				       struct net_device *dev)
259 {
260 	struct fib_nh *fib_nh;
261 	struct fib_result res;
262 	bool reachable;
263 
264 	reachable = false;
265 
266 	if (!prestera_util_kern_get_route(&res, tb_id, addr))
267 		if (prestera_fi_is_direct(res.fi)) {
268 			fib_nh = fib_info_nh(res.fi, 0);
269 			if (dev == fib_nh->fib_nh_dev)
270 				reachable = true;
271 		}
272 
273 	return reachable;
274 }
275 
276 /* Check if neigh route is reachable */
277 static bool
278 prestera_util_kern_n_is_reachable(u32 tb_id,
279 				  struct prestera_ip_addr *addr,
280 				  struct net_device *dev)
281 {
282 	if (addr->v == PRESTERA_IPV4)
283 		return __prestera_util_kern_n_is_reachable_v4(tb_id,
284 							      &addr->u.ipv4,
285 							      dev);
286 	else
287 		return false;
288 }
289 
290 static void prestera_util_kern_set_neigh_offload(struct neighbour *n,
291 						 bool offloaded)
292 {
293 	if (offloaded)
294 		n->flags |= NTF_OFFLOADED;
295 	else
296 		n->flags &= ~NTF_OFFLOADED;
297 }
298 
299 static void
300 prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap)
301 {
302 		if (offloaded)
303 			nhc->nhc_flags |= RTNH_F_OFFLOAD;
304 		else
305 			nhc->nhc_flags &= ~RTNH_F_OFFLOAD;
306 
307 		if (trap)
308 			nhc->nhc_flags |= RTNH_F_TRAP;
309 		else
310 			nhc->nhc_flags &= ~RTNH_F_TRAP;
311 }
312 
313 static struct fib_nh_common *
314 prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
315 {
316 	struct fib6_entry_notifier_info *fen6_info;
317 	struct fib_entry_notifier_info *fen4_info;
318 	struct fib6_info *iter;
319 
320 	if (info->family == AF_INET) {
321 		fen4_info = container_of(info, struct fib_entry_notifier_info,
322 					 info);
323 		return &fib_info_nh(fen4_info->fi, n)->nh_common;
324 	} else if (info->family == AF_INET6) {
325 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
326 					 info);
327 		if (!n)
328 			return &fen6_info->rt->fib6_nh->nh_common;
329 
330 		list_for_each_entry(iter, &fen6_info->rt->fib6_siblings,
331 				    fib6_siblings) {
332 			if (!--n)
333 				return &iter->fib6_nh->nh_common;
334 		}
335 	}
336 
337 	/* if family is incorrect - than upper functions has BUG */
338 	/* if doesn't find requested index - there is alsi bug, because
339 	 * valid index must be produced by nhs, which checks list length
340 	 */
341 	WARN(1, "Invalid parameters passed to %s n=%d i=%p",
342 	     __func__, n, info);
343 	return NULL;
344 }
345 
346 static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info)
347 {
348 	struct fib6_entry_notifier_info *fen6_info;
349 	struct fib_entry_notifier_info *fen4_info;
350 
351 	if (info->family == AF_INET) {
352 		fen4_info = container_of(info, struct fib_entry_notifier_info,
353 					 info);
354 		return fib_info_num_path(fen4_info->fi);
355 	} else if (info->family == AF_INET6) {
356 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
357 					 info);
358 		return fen6_info->rt->fib6_nsiblings + 1;
359 	}
360 
361 	return 0;
362 }
363 
364 static unsigned char
365 prestera_kern_fib_info_type(struct fib_notifier_info *info)
366 {
367 	struct fib6_entry_notifier_info *fen6_info;
368 	struct fib_entry_notifier_info *fen4_info;
369 
370 	if (info->family == AF_INET) {
371 		fen4_info = container_of(info, struct fib_entry_notifier_info,
372 					 info);
373 		return fen4_info->fi->fib_type;
374 	} else if (info->family == AF_INET6) {
375 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
376 					 info);
377 		/* TODO: ECMP in ipv6 is several routes.
378 		 * Every route has single nh.
379 		 */
380 		return fen6_info->rt->fib6_type;
381 	}
382 
383 	return RTN_UNSPEC;
384 }
385 
386 /* Decided, that uc_nh route with key==nh is obviously neighbour route */
387 static bool
388 prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node)
389 {
390 	if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH)
391 		return false;
392 
393 	if (fib_node->info.nh_grp->nh_neigh_head[1].neigh)
394 		return false;
395 
396 	if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh)
397 		return false;
398 
399 	if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr,
400 		   &fib_node->key.addr, sizeof(struct prestera_ip_addr)))
401 		return false;
402 
403 	return true;
404 }
405 
406 static int prestera_dev_if_type(const struct net_device *dev)
407 {
408 	struct macvlan_dev *vlan;
409 
410 	if (is_vlan_dev(dev) &&
411 	    netif_is_bridge_master(vlan_dev_real_dev(dev))) {
412 		return PRESTERA_IF_VID_E;
413 	} else if (netif_is_bridge_master(dev)) {
414 		return PRESTERA_IF_VID_E;
415 	} else if (netif_is_lag_master(dev)) {
416 		return PRESTERA_IF_LAG_E;
417 	} else if (netif_is_macvlan(dev)) {
418 		vlan = netdev_priv(dev);
419 		return prestera_dev_if_type(vlan->lowerdev);
420 	} else {
421 		return PRESTERA_IF_PORT_E;
422 	}
423 }
424 
425 static int
426 prestera_neigh_iface_init(struct prestera_switch *sw,
427 			  struct prestera_iface *iface,
428 			  struct neighbour *n)
429 {
430 	struct prestera_port *port;
431 
432 	iface->vlan_id = 0; /* TODO: vlan egress */
433 	iface->type = prestera_dev_if_type(n->dev);
434 	if (iface->type != PRESTERA_IF_PORT_E)
435 		return -EINVAL;
436 
437 	if (!prestera_netdev_check(n->dev))
438 		return -EINVAL;
439 
440 	port = netdev_priv(n->dev);
441 	iface->dev_port.hw_dev_num = port->dev_id;
442 	iface->dev_port.port_num = port->hw_id;
443 
444 	return 0;
445 }
446 
447 static struct prestera_kern_neigh_cache *
448 prestera_kern_neigh_cache_find(struct prestera_switch *sw,
449 			       struct prestera_kern_neigh_cache_key *key)
450 {
451 	struct prestera_kern_neigh_cache *n_cache;
452 
453 	n_cache =
454 	 rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key,
455 				__prestera_kern_neigh_cache_ht_params);
456 	return IS_ERR(n_cache) ? NULL : n_cache;
457 }
458 
459 static void
460 __prestera_kern_neigh_cache_destruct(struct prestera_switch *sw,
461 				     struct prestera_kern_neigh_cache *n_cache)
462 {
463 	dev_put(n_cache->key.dev);
464 }
465 
466 static void
467 __prestera_kern_neigh_cache_destroy(struct prestera_switch *sw,
468 				    struct prestera_kern_neigh_cache *n_cache)
469 {
470 	rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht,
471 			       &n_cache->ht_node,
472 			       __prestera_kern_neigh_cache_ht_params);
473 	__prestera_kern_neigh_cache_destruct(sw, n_cache);
474 	kfree(n_cache);
475 }
476 
477 static struct prestera_kern_neigh_cache *
478 __prestera_kern_neigh_cache_create(struct prestera_switch *sw,
479 				   struct prestera_kern_neigh_cache_key *key)
480 {
481 	struct prestera_kern_neigh_cache *n_cache;
482 	int err;
483 
484 	n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL);
485 	if (!n_cache)
486 		goto err_kzalloc;
487 
488 	memcpy(&n_cache->key, key, sizeof(*key));
489 	dev_hold(n_cache->key.dev);
490 
491 	INIT_LIST_HEAD(&n_cache->kern_fib_cache_list);
492 	err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht,
493 				     &n_cache->ht_node,
494 				     __prestera_kern_neigh_cache_ht_params);
495 	if (err)
496 		goto err_ht_insert;
497 
498 	return n_cache;
499 
500 err_ht_insert:
501 	dev_put(n_cache->key.dev);
502 	kfree(n_cache);
503 err_kzalloc:
504 	return NULL;
505 }
506 
507 static struct prestera_kern_neigh_cache *
508 prestera_kern_neigh_cache_get(struct prestera_switch *sw,
509 			      struct prestera_kern_neigh_cache_key *key)
510 {
511 	struct prestera_kern_neigh_cache *n_cache;
512 
513 	n_cache = prestera_kern_neigh_cache_find(sw, key);
514 	if (!n_cache)
515 		n_cache = __prestera_kern_neigh_cache_create(sw, key);
516 
517 	return n_cache;
518 }
519 
520 static struct prestera_kern_neigh_cache *
521 prestera_kern_neigh_cache_put(struct prestera_switch *sw,
522 			      struct prestera_kern_neigh_cache *n_cache)
523 {
524 	if (!n_cache->in_kernel &&
525 	    list_empty(&n_cache->kern_fib_cache_list)) {
526 		__prestera_kern_neigh_cache_destroy(sw, n_cache);
527 		return NULL;
528 	}
529 
530 	return n_cache;
531 }
532 
533 static struct prestera_kern_fib_cache *
534 prestera_kern_fib_cache_find(struct prestera_switch *sw,
535 			     struct prestera_kern_fib_cache_key *key)
536 {
537 	struct prestera_kern_fib_cache *fib_cache;
538 
539 	fib_cache =
540 	 rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
541 				__prestera_kern_fib_cache_ht_params);
542 	return fib_cache;
543 }
544 
545 static void
546 __prestera_kern_fib_cache_destruct(struct prestera_switch *sw,
547 				   struct prestera_kern_fib_cache *fib_cache)
548 {
549 	struct prestera_kern_neigh_cache *n_cache;
550 	int i;
551 
552 	for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) {
553 		n_cache = fib_cache->kern_neigh_cache_head[i].n_cache;
554 		if (n_cache) {
555 			list_del(&fib_cache->kern_neigh_cache_head[i].head);
556 			prestera_kern_neigh_cache_put(sw, n_cache);
557 		}
558 	}
559 
560 	fib_info_put(fib_cache->fen4_info.fi);
561 }
562 
563 static void
564 prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
565 				struct prestera_kern_fib_cache *fib_cache)
566 {
567 	rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
568 			       &fib_cache->ht_node,
569 			       __prestera_kern_fib_cache_ht_params);
570 	__prestera_kern_fib_cache_destruct(sw, fib_cache);
571 	kfree(fib_cache);
572 }
573 
574 static int
575 __prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw,
576 				     struct prestera_kern_fib_cache *fc)
577 {
578 	struct prestera_kern_neigh_cache_key nc_key;
579 	struct prestera_kern_neigh_cache *n_cache;
580 	struct fib_nh_common *nhc;
581 	int i, nhs, err;
582 
583 	if (!prestera_fib_info_is_nh(&fc->info))
584 		return 0;
585 
586 	nhs = prestera_kern_fib_info_nhs(&fc->info);
587 	if (nhs > PRESTERA_NHGR_SIZE_MAX)
588 		return 0;
589 
590 	for (i = 0; i < nhs; i++) {
591 		nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i);
592 		err = prestera_util_nhc2nc_key(sw, nhc, &nc_key);
593 		if (err)
594 			return 0;
595 
596 		n_cache = prestera_kern_neigh_cache_get(sw, &nc_key);
597 		if (!n_cache)
598 			return 0;
599 
600 		fc->kern_neigh_cache_head[i].this = fc;
601 		fc->kern_neigh_cache_head[i].n_cache = n_cache;
602 		list_add(&fc->kern_neigh_cache_head[i].head,
603 			 &n_cache->kern_fib_cache_list);
604 	}
605 
606 	return 0;
607 }
608 
609 /* Operations on fi (offload, etc) must be wrapped in utils.
610  * This function just create storage.
611  */
612 static struct prestera_kern_fib_cache *
613 prestera_kern_fib_cache_create(struct prestera_switch *sw,
614 			       struct prestera_kern_fib_cache_key *key,
615 			       struct fib_notifier_info *info)
616 {
617 	struct fib_entry_notifier_info *fen_info =
618 		container_of(info, struct fib_entry_notifier_info, info);
619 	struct prestera_kern_fib_cache *fib_cache;
620 	int err;
621 
622 	fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
623 	if (!fib_cache)
624 		goto err_kzalloc;
625 
626 	memcpy(&fib_cache->key, key, sizeof(*key));
627 	fib_info_hold(fen_info->fi);
628 	memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info));
629 
630 	err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
631 				     &fib_cache->ht_node,
632 				     __prestera_kern_fib_cache_ht_params);
633 	if (err)
634 		goto err_ht_insert;
635 
636 	/* Handle nexthops */
637 	err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache);
638 	if (err)
639 		goto out; /* Not critical */
640 
641 out:
642 	return fib_cache;
643 
644 err_ht_insert:
645 	fib_info_put(fen_info->fi);
646 	kfree(fib_cache);
647 err_kzalloc:
648 	return NULL;
649 }
650 
651 static void
652 __prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw,
653 				    struct prestera_kern_fib_cache *fibc,
654 				    struct prestera_kern_neigh_cache *nc,
655 				    bool offloaded, bool trap)
656 {
657 	struct fib_nh_common *nhc;
658 	int i, nhs;
659 
660 	nhs = prestera_kern_fib_info_nhs(&fibc->info);
661 	for (i = 0; i < nhs; i++) {
662 		nhc = prestera_kern_fib_info_nhc(&fibc->info, i);
663 		if (!nc) {
664 			prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
665 			continue;
666 		}
667 
668 		if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) {
669 			prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
670 			break;
671 		}
672 	}
673 }
674 
675 static void
676 __prestera_k_arb_n_offload_set(struct prestera_switch *sw,
677 			       struct prestera_kern_neigh_cache *nc,
678 			       bool offloaded)
679 {
680 	struct neighbour *n;
681 
682 	n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
683 			 nc->key.dev);
684 	if (!n)
685 		return;
686 
687 	prestera_util_kern_set_neigh_offload(n, offloaded);
688 	neigh_release(n);
689 }
690 
691 static void
692 __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
693 				     struct prestera_kern_fib_cache *fc,
694 				     bool fail, bool offload, bool trap)
695 {
696 	struct fib_rt_info fri;
697 
698 	switch (fc->key.addr.v) {
699 	case PRESTERA_IPV4:
700 		fri.fi = fc->fen4_info.fi;
701 		fri.tb_id = fc->key.kern_tb_id;
702 		fri.dst = fc->key.addr.u.ipv4;
703 		fri.dst_len = fc->key.prefix_len;
704 		fri.dscp = fc->fen4_info.dscp;
705 		fri.type = fc->fen4_info.type;
706 		/* flags begin */
707 		fri.offload = offload;
708 		fri.trap = trap;
709 		fri.offload_failed = fail;
710 		/* flags end */
711 		fib_alias_hw_flags_set(&init_net, &fri);
712 		return;
713 	case PRESTERA_IPV6:
714 		/* TODO */
715 		return;
716 	}
717 }
718 
719 static void
720 __prestera_k_arb_n_lpm_set(struct prestera_switch *sw,
721 			   struct prestera_kern_neigh_cache *n_cache,
722 			   bool enabled)
723 {
724 	struct prestera_nexthop_group_key nh_grp_key;
725 	struct prestera_kern_fib_cache_key fc_key;
726 	struct prestera_kern_fib_cache *fib_cache;
727 	struct prestera_fib_node *fib_node;
728 	struct prestera_fib_key fib_key;
729 
730 	/* Exception for fc with prefix 32: LPM entry is already used by fib */
731 	memset(&fc_key, 0, sizeof(fc_key));
732 	fc_key.addr = n_cache->key.addr;
733 	fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
734 	/* But better to use tb_id of route, which pointed to this neighbour. */
735 	/* We take it from rif, because rif inconsistent.
736 	 * Must be separated in_rif and out_rif.
737 	 * Also note: for each fib pointed to this neigh should be separated
738 	 *            neigh lpm entry (for each ingress vr)
739 	 */
740 	fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev);
741 	fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
742 	memset(&fib_key, 0, sizeof(fib_key));
743 	fib_key.addr = n_cache->key.addr;
744 	fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
745 	fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id);
746 	fib_node = prestera_fib_node_find(sw, &fib_key);
747 	if (!fib_cache || !fib_cache->reachable) {
748 		if (!enabled && fib_node) {
749 			if (prestera_fib_node_util_is_neighbour(fib_node))
750 				prestera_fib_node_destroy(sw, fib_node);
751 			return;
752 		}
753 	}
754 
755 	if (enabled && !fib_node) {
756 		memset(&nh_grp_key, 0, sizeof(nh_grp_key));
757 		prestera_util_nc_key2nh_key(&n_cache->key,
758 					    &nh_grp_key.neigh[0]);
759 		fib_node = prestera_fib_node_create(sw, &fib_key,
760 						    PRESTERA_FIB_TYPE_UC_NH,
761 						    &nh_grp_key);
762 		if (!fib_node)
763 			pr_err("%s failed ip=%pI4n", "prestera_fib_node_create",
764 			       &fib_key.addr.u.ipv4);
765 		return;
766 	}
767 }
768 
769 static void
770 __prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw,
771 				   struct prestera_kern_neigh_cache *nc)
772 {
773 	if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev),
774 					      &nc->key.addr, nc->key.dev))
775 		nc->reachable = true;
776 	else
777 		nc->reachable = false;
778 }
779 
780 /* Kernel neighbour -> neigh_cache info */
781 static void
782 __prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw,
783 				 struct prestera_kern_neigh_cache *nc)
784 {
785 	struct neighbour *n;
786 	int err;
787 
788 	memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info));
789 	n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev);
790 	if (!n)
791 		goto out;
792 
793 	read_lock_bh(&n->lock);
794 	if (n->nud_state & NUD_VALID && !n->dead) {
795 		err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface,
796 						n);
797 		if (err)
798 			goto n_read_out;
799 
800 		memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN);
801 		nc->nh_neigh_info.connected = true;
802 	}
803 n_read_out:
804 	read_unlock_bh(&n->lock);
805 out:
806 	nc->in_kernel = nc->nh_neigh_info.connected;
807 	if (n)
808 		neigh_release(n);
809 }
810 
811 /* neigh_cache info -> lpm update */
812 static void
813 __prestera_k_arb_nc_apply(struct prestera_switch *sw,
814 			  struct prestera_kern_neigh_cache *nc)
815 {
816 	struct prestera_kern_neigh_cache_head *nhead;
817 	struct prestera_nh_neigh_key nh_key;
818 	struct prestera_nh_neigh *nh_neigh;
819 	int err;
820 
821 	__prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel);
822 	__prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel);
823 
824 	prestera_util_nc_key2nh_key(&nc->key, &nh_key);
825 	nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
826 	if (!nh_neigh)
827 		goto out;
828 
829 	/* Do hw update only if something changed to prevent nh flap */
830 	if (memcmp(&nc->nh_neigh_info, &nh_neigh->info,
831 		   sizeof(nh_neigh->info))) {
832 		memcpy(&nh_neigh->info, &nc->nh_neigh_info,
833 		       sizeof(nh_neigh->info));
834 		err = prestera_nh_neigh_set(sw, nh_neigh);
835 		if (err) {
836 			pr_err("%s failed with err=%d ip=%pI4n mac=%pM",
837 			       "prestera_nh_neigh_set", err,
838 			       &nh_neigh->key.addr.u.ipv4,
839 			       &nh_neigh->info.ha[0]);
840 			goto out;
841 		}
842 	}
843 
844 out:
845 	list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) {
846 		__prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc,
847 						    nc->in_kernel,
848 						    !nc->in_kernel);
849 	}
850 }
851 
852 static int
853 __prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
854 				     struct prestera_kern_fib_cache *fc)
855 {
856 	struct fib_nh_common *nhc;
857 	int nh_cnt;
858 
859 	memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
860 
861 	switch (prestera_kern_fib_info_type(&fc->info)) {
862 	case RTN_UNICAST:
863 		if (prestera_fib_info_is_direct(&fc->info) &&
864 		    fc->key.prefix_len ==
865 			PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) {
866 			/* This is special case.
867 			 * When prefix is 32. Than we will have conflict in lpm
868 			 * for direct route - once TRAP added, there is no
869 			 * place for neighbour entry. So represent direct route
870 			 * with prefix 32, as NH. So neighbour will be resolved
871 			 * as nexthop of this route.
872 			 */
873 			nhc = prestera_kern_fib_info_nhc(&fc->info, 0);
874 			fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH;
875 			fc->lpm_info.nh_grp_key.neigh[0].addr =
876 				fc->key.addr;
877 			fc->lpm_info.nh_grp_key.neigh[0].rif =
878 				nhc->nhc_dev;
879 
880 			break;
881 		}
882 
883 		/* We can also get nh_grp_key from fi. This will be correct to
884 		 * because cache not always represent, what actually written to
885 		 * lpm. But we use nh cache, as well for now (for this case).
886 		 */
887 		for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
888 			if (!fc->kern_neigh_cache_head[nh_cnt].n_cache)
889 				break;
890 
891 			fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr =
892 				fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr;
893 			fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif =
894 				fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev;
895 		}
896 
897 		fc->lpm_info.fib_type = nh_cnt ?
898 					PRESTERA_FIB_TYPE_UC_NH :
899 					PRESTERA_FIB_TYPE_TRAP;
900 		break;
901 	/* Unsupported. Leave it for kernel: */
902 	case RTN_BROADCAST:
903 	case RTN_MULTICAST:
904 	/* Routes we must trap by design: */
905 	case RTN_LOCAL:
906 	case RTN_UNREACHABLE:
907 	case RTN_PROHIBIT:
908 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
909 		break;
910 	case RTN_BLACKHOLE:
911 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
912 		break;
913 	default:
914 		dev_err(sw->dev->dev, "Unsupported fib_type");
915 		return -EOPNOTSUPP;
916 	}
917 
918 	fc->lpm_info.fib_key.addr = fc->key.addr;
919 	fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
920 	fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
921 
922 	return 0;
923 }
924 
925 static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
926 				      struct prestera_kern_fib_cache *fc,
927 				      bool enabled)
928 {
929 	struct prestera_fib_node *fib_node;
930 
931 	fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
932 	if (fib_node)
933 		prestera_fib_node_destroy(sw, fib_node);
934 
935 	if (!enabled)
936 		return 0;
937 
938 	fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
939 					    fc->lpm_info.fib_type,
940 					    &fc->lpm_info.nh_grp_key);
941 
942 	if (!fib_node) {
943 		dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
944 			&fc->key.addr.u.ipv4, fc->key.prefix_len,
945 			fc->key.kern_tb_id);
946 		return -ENOENT;
947 	}
948 
949 	return 0;
950 }
951 
952 static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
953 				     struct prestera_kern_fib_cache *fc)
954 {
955 	int err;
956 
957 	err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
958 	if (err)
959 		return err;
960 
961 	err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
962 	if (err) {
963 		__prestera_k_arb_fib_lpm_offload_set(sw, fc,
964 						     true, false, false);
965 		return err;
966 	}
967 
968 	switch (fc->lpm_info.fib_type) {
969 	case PRESTERA_FIB_TYPE_UC_NH:
970 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
971 						     fc->reachable, false);
972 		break;
973 	case PRESTERA_FIB_TYPE_TRAP:
974 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
975 						     false, fc->reachable);
976 		break;
977 	case PRESTERA_FIB_TYPE_DROP:
978 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
979 						     fc->reachable);
980 		break;
981 	case PRESTERA_FIB_TYPE_INVALID:
982 		break;
983 	}
984 
985 	return 0;
986 }
987 
988 static struct prestera_kern_fib_cache *
989 __prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
990 				   struct prestera_kern_fib_cache *fc)
991 {
992 	struct prestera_kern_fib_cache_key fc_key;
993 	struct prestera_kern_fib_cache *rfc;
994 
995 	/* TODO: parse kernel rules */
996 	rfc = NULL;
997 	if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
998 		memcpy(&fc_key, &fc->key, sizeof(fc_key));
999 		fc_key.kern_tb_id = RT_TABLE_MAIN;
1000 		rfc = prestera_kern_fib_cache_find(sw, &fc_key);
1001 	}
1002 
1003 	return rfc;
1004 }
1005 
1006 static struct prestera_kern_fib_cache *
1007 __prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
1008 				     struct prestera_kern_fib_cache *fc)
1009 {
1010 	struct prestera_kern_fib_cache_key fc_key;
1011 	struct prestera_kern_fib_cache *rfc;
1012 
1013 	/* TODO: parse kernel rules */
1014 	rfc = NULL;
1015 	if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
1016 		memcpy(&fc_key, &fc->key, sizeof(fc_key));
1017 		fc_key.kern_tb_id = RT_TABLE_LOCAL;
1018 		rfc = prestera_kern_fib_cache_find(sw, &fc_key);
1019 	}
1020 
1021 	return rfc;
1022 }
1023 
1024 /* Propagate kernel event to hw */
1025 static void prestera_k_arb_n_evt(struct prestera_switch *sw,
1026 				 struct neighbour *n)
1027 {
1028 	struct prestera_kern_neigh_cache_key n_key;
1029 	struct prestera_kern_neigh_cache *n_cache;
1030 	int err;
1031 
1032 	err = prestera_util_neigh2nc_key(sw, n, &n_key);
1033 	if (err)
1034 		return;
1035 
1036 	n_cache = prestera_kern_neigh_cache_find(sw, &n_key);
1037 	if (!n_cache) {
1038 		n_cache = prestera_kern_neigh_cache_get(sw, &n_key);
1039 		if (!n_cache)
1040 			return;
1041 		__prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
1042 	}
1043 
1044 	__prestera_k_arb_nc_kern_n_fetch(sw, n_cache);
1045 	__prestera_k_arb_nc_apply(sw, n_cache);
1046 
1047 	prestera_kern_neigh_cache_put(sw, n_cache);
1048 }
1049 
1050 static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw)
1051 {
1052 	struct prestera_kern_neigh_cache *n_cache;
1053 	struct rhashtable_iter iter;
1054 
1055 	rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
1056 	rhashtable_walk_start(&iter);
1057 	while (1) {
1058 		n_cache = rhashtable_walk_next(&iter);
1059 
1060 		if (!n_cache)
1061 			break;
1062 
1063 		if (IS_ERR(n_cache))
1064 			continue;
1065 
1066 		rhashtable_walk_stop(&iter);
1067 		__prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
1068 		__prestera_k_arb_nc_apply(sw, n_cache);
1069 		rhashtable_walk_start(&iter);
1070 	}
1071 	rhashtable_walk_stop(&iter);
1072 	rhashtable_walk_exit(&iter);
1073 }
1074 
1075 static int
1076 prestera_k_arb_fib_evt(struct prestera_switch *sw,
1077 		       bool replace, /* replace or del */
1078 		       struct fib_notifier_info *info)
1079 {
1080 	struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
1081 	struct prestera_kern_fib_cache_key fc_key;
1082 	struct prestera_kern_fib_cache *fib_cache;
1083 	int err;
1084 
1085 	prestera_util_fen_info2fib_cache_key(info, &fc_key);
1086 	fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
1087 	if (fib_cache) {
1088 		fib_cache->reachable = false;
1089 		err = __prestera_k_arb_fc_apply(sw, fib_cache);
1090 		if (err)
1091 			dev_err(sw->dev->dev,
1092 				"Applying destroyed fib_cache failed");
1093 
1094 		bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
1095 		tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
1096 		if (!tfib_cache && bfib_cache) {
1097 			bfib_cache->reachable = true;
1098 			err = __prestera_k_arb_fc_apply(sw, bfib_cache);
1099 			if (err)
1100 				dev_err(sw->dev->dev,
1101 					"Applying fib_cache btm failed");
1102 		}
1103 
1104 		prestera_kern_fib_cache_destroy(sw, fib_cache);
1105 	}
1106 
1107 	if (replace) {
1108 		fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info);
1109 		if (!fib_cache) {
1110 			dev_err(sw->dev->dev, "fib_cache == NULL");
1111 			return -ENOENT;
1112 		}
1113 
1114 		bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
1115 		tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
1116 		if (!tfib_cache)
1117 			fib_cache->reachable = true;
1118 
1119 		if (bfib_cache) {
1120 			bfib_cache->reachable = false;
1121 			err = __prestera_k_arb_fc_apply(sw, bfib_cache);
1122 			if (err)
1123 				dev_err(sw->dev->dev,
1124 					"Applying fib_cache btm failed");
1125 		}
1126 
1127 		err = __prestera_k_arb_fc_apply(sw, fib_cache);
1128 		if (err)
1129 			dev_err(sw->dev->dev, "Applying fib_cache failed");
1130 	}
1131 
1132 	/* Update all neighs to resolve overlapped and apply related */
1133 	__prestera_k_arb_fib_evt2nc(sw);
1134 
1135 	return 0;
1136 }
1137 
1138 static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg)
1139 {
1140 	struct prestera_kern_neigh_cache *n_cache = ptr;
1141 	struct prestera_switch *sw = arg;
1142 
1143 	if (!list_empty(&n_cache->kern_fib_cache_list)) {
1144 		WARN_ON(1); /* BUG */
1145 		return;
1146 	}
1147 	__prestera_k_arb_n_offload_set(sw, n_cache, false);
1148 	n_cache->in_kernel = false;
1149 	/* No need to destroy lpm.
1150 	 * It will be aborted by destroy_ht
1151 	 */
1152 	__prestera_kern_neigh_cache_destruct(sw, n_cache);
1153 	kfree(n_cache);
1154 }
1155 
1156 static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg)
1157 {
1158 	struct prestera_kern_fib_cache *fib_cache = ptr;
1159 	struct prestera_switch *sw = arg;
1160 
1161 	__prestera_k_arb_fib_lpm_offload_set(sw, fib_cache,
1162 					     false, false,
1163 					     false);
1164 	__prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL,
1165 					    false, false);
1166 	/* No need to destroy lpm.
1167 	 * It will be aborted by destroy_ht
1168 	 */
1169 	__prestera_kern_fib_cache_destruct(sw, fib_cache);
1170 	kfree(fib_cache);
1171 }
1172 
1173 static void prestera_k_arb_abort(struct prestera_switch *sw)
1174 {
1175 	/* Function to remove all arbiter entries and related hw objects. */
1176 	/* Sequence:
1177 	 *   1) Clear arbiter tables, but don't touch hw
1178 	 *   2) Clear hw
1179 	 * We use such approach, because arbiter object is not directly mapped
1180 	 * to hw. So deletion of one arbiter object may even lead to creation of
1181 	 * hw object (e.g. in case of overlapped routes).
1182 	 */
1183 	rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht,
1184 				    __prestera_k_arb_abort_fib_ht_cb,
1185 				    sw);
1186 	rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht,
1187 				    __prestera_k_arb_abort_neigh_ht_cb,
1188 				    sw);
1189 }
1190 
1191 static int __prestera_inetaddr_port_event(struct net_device *port_dev,
1192 					  unsigned long event,
1193 					  struct netlink_ext_ack *extack)
1194 {
1195 	struct prestera_port *port = netdev_priv(port_dev);
1196 	struct prestera_rif_entry_key re_key = {};
1197 	struct prestera_rif_entry *re;
1198 	u32 kern_tb_id;
1199 	int err;
1200 
1201 	err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
1202 	if (err) {
1203 		NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix");
1204 		return err;
1205 	}
1206 
1207 	kern_tb_id = l3mdev_fib_table(port_dev);
1208 	re_key.iface.type = PRESTERA_IF_PORT_E;
1209 	re_key.iface.dev_port.hw_dev_num  = port->dev_id;
1210 	re_key.iface.dev_port.port_num  = port->hw_id;
1211 	re = prestera_rif_entry_find(port->sw, &re_key);
1212 
1213 	switch (event) {
1214 	case NETDEV_UP:
1215 		if (re) {
1216 			NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
1217 			return -EEXIST;
1218 		}
1219 		re = prestera_rif_entry_create(port->sw, &re_key,
1220 					       prestera_fix_tb_id(kern_tb_id),
1221 					       port_dev->dev_addr);
1222 		if (!re) {
1223 			NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
1224 			return -EINVAL;
1225 		}
1226 		dev_hold(port_dev);
1227 		break;
1228 	case NETDEV_DOWN:
1229 		if (!re) {
1230 			NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
1231 			return -EEXIST;
1232 		}
1233 		prestera_rif_entry_destroy(port->sw, re);
1234 		dev_put(port_dev);
1235 		break;
1236 	}
1237 
1238 	return 0;
1239 }
1240 
1241 static int __prestera_inetaddr_event(struct prestera_switch *sw,
1242 				     struct net_device *dev,
1243 				     unsigned long event,
1244 				     struct netlink_ext_ack *extack)
1245 {
1246 	if (!prestera_netdev_check(dev) || netif_is_any_bridge_port(dev) ||
1247 	    netif_is_lag_port(dev))
1248 		return 0;
1249 
1250 	return __prestera_inetaddr_port_event(dev, event, extack);
1251 }
1252 
1253 static int __prestera_inetaddr_cb(struct notifier_block *nb,
1254 				  unsigned long event, void *ptr)
1255 {
1256 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1257 	struct net_device *dev = ifa->ifa_dev->dev;
1258 	struct prestera_router *router = container_of(nb,
1259 						      struct prestera_router,
1260 						      inetaddr_nb);
1261 	struct in_device *idev;
1262 	int err = 0;
1263 
1264 	if (event != NETDEV_DOWN)
1265 		goto out;
1266 
1267 	/* Ignore if this is not latest address */
1268 	idev = __in_dev_get_rtnl(dev);
1269 	if (idev && idev->ifa_list)
1270 		goto out;
1271 
1272 	err = __prestera_inetaddr_event(router->sw, dev, event, NULL);
1273 out:
1274 	return notifier_from_errno(err);
1275 }
1276 
1277 static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
1278 					unsigned long event, void *ptr)
1279 {
1280 	struct in_validator_info *ivi = (struct in_validator_info *)ptr;
1281 	struct net_device *dev = ivi->ivi_dev->dev;
1282 	struct prestera_router *router = container_of(nb,
1283 						      struct prestera_router,
1284 						      inetaddr_valid_nb);
1285 	struct in_device *idev;
1286 	int err = 0;
1287 
1288 	if (event != NETDEV_UP)
1289 		goto out;
1290 
1291 	/* Ignore if this is not first address */
1292 	idev = __in_dev_get_rtnl(dev);
1293 	if (idev && idev->ifa_list)
1294 		goto out;
1295 
1296 	if (ipv4_is_multicast(ivi->ivi_addr)) {
1297 		NL_SET_ERR_MSG_MOD(ivi->extack,
1298 				   "Multicast addr on RIF is not supported");
1299 		err = -EINVAL;
1300 		goto out;
1301 	}
1302 
1303 	err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack);
1304 out:
1305 	return notifier_from_errno(err);
1306 }
1307 
1308 struct prestera_fib_event_work {
1309 	struct work_struct work;
1310 	struct prestera_switch *sw;
1311 	struct fib_entry_notifier_info fen_info;
1312 	unsigned long event;
1313 };
1314 
1315 static void __prestera_router_fib_event_work(struct work_struct *work)
1316 {
1317 	struct prestera_fib_event_work *fib_work =
1318 			container_of(work, struct prestera_fib_event_work, work);
1319 	struct prestera_switch *sw = fib_work->sw;
1320 	int err;
1321 
1322 	rtnl_lock();
1323 
1324 	switch (fib_work->event) {
1325 	case FIB_EVENT_ENTRY_REPLACE:
1326 		err = prestera_k_arb_fib_evt(sw, true,
1327 					     &fib_work->fen_info.info);
1328 		if (err)
1329 			goto err_out;
1330 
1331 		break;
1332 	case FIB_EVENT_ENTRY_DEL:
1333 		err = prestera_k_arb_fib_evt(sw, false,
1334 					     &fib_work->fen_info.info);
1335 		if (err)
1336 			goto err_out;
1337 
1338 		break;
1339 	}
1340 
1341 	goto out;
1342 
1343 err_out:
1344 	dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
1345 		&fib_work->fen_info.dst,
1346 		fib_work->fen_info.dst_len);
1347 out:
1348 	fib_info_put(fib_work->fen_info.fi);
1349 	rtnl_unlock();
1350 	kfree(fib_work);
1351 }
1352 
1353 /* Called with rcu_read_lock() */
1354 static int __prestera_router_fib_event(struct notifier_block *nb,
1355 				       unsigned long event, void *ptr)
1356 {
1357 	struct prestera_fib_event_work *fib_work;
1358 	struct fib_entry_notifier_info *fen_info;
1359 	struct fib_notifier_info *info = ptr;
1360 	struct prestera_router *router;
1361 
1362 	if (info->family != AF_INET)
1363 		return NOTIFY_DONE;
1364 
1365 	router = container_of(nb, struct prestera_router, fib_nb);
1366 
1367 	switch (event) {
1368 	case FIB_EVENT_ENTRY_REPLACE:
1369 	case FIB_EVENT_ENTRY_DEL:
1370 		fen_info = container_of(info, struct fib_entry_notifier_info,
1371 					info);
1372 		if (!fen_info->fi)
1373 			return NOTIFY_DONE;
1374 
1375 		fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
1376 		if (WARN_ON(!fib_work))
1377 			return NOTIFY_BAD;
1378 
1379 		fib_info_hold(fen_info->fi);
1380 		fib_work->fen_info = *fen_info;
1381 		fib_work->event = event;
1382 		fib_work->sw = router->sw;
1383 		INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
1384 		prestera_queue_work(&fib_work->work);
1385 		break;
1386 	default:
1387 		return NOTIFY_DONE;
1388 	}
1389 
1390 	return NOTIFY_DONE;
1391 }
1392 
1393 struct prestera_netevent_work {
1394 	struct work_struct work;
1395 	struct prestera_switch *sw;
1396 	struct neighbour *n;
1397 };
1398 
1399 static void prestera_router_neigh_event_work(struct work_struct *work)
1400 {
1401 	struct prestera_netevent_work *net_work =
1402 		container_of(work, struct prestera_netevent_work, work);
1403 	struct prestera_switch *sw = net_work->sw;
1404 	struct neighbour *n = net_work->n;
1405 
1406 	/* neigh - its not hw related object. It stored only in kernel. So... */
1407 	rtnl_lock();
1408 
1409 	prestera_k_arb_n_evt(sw, n);
1410 
1411 	neigh_release(n);
1412 	rtnl_unlock();
1413 	kfree(net_work);
1414 }
1415 
1416 static int prestera_router_netevent_event(struct notifier_block *nb,
1417 					  unsigned long event, void *ptr)
1418 {
1419 	struct prestera_netevent_work *net_work;
1420 	struct prestera_router *router;
1421 	struct neighbour *n = ptr;
1422 
1423 	router = container_of(nb, struct prestera_router, netevent_nb);
1424 
1425 	switch (event) {
1426 	case NETEVENT_NEIGH_UPDATE:
1427 		if (n->tbl->family != AF_INET)
1428 			return NOTIFY_DONE;
1429 
1430 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
1431 		if (WARN_ON(!net_work))
1432 			return NOTIFY_BAD;
1433 
1434 		neigh_clone(n);
1435 		net_work->n = n;
1436 		net_work->sw = router->sw;
1437 		INIT_WORK(&net_work->work, prestera_router_neigh_event_work);
1438 		prestera_queue_work(&net_work->work);
1439 	}
1440 
1441 	return NOTIFY_DONE;
1442 }
1443 
1444 int prestera_router_init(struct prestera_switch *sw)
1445 {
1446 	struct prestera_router *router;
1447 	int err, nhgrp_cache_bytes;
1448 
1449 	router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
1450 	if (!router)
1451 		return -ENOMEM;
1452 
1453 	sw->router = router;
1454 	router->sw = sw;
1455 
1456 	err = prestera_router_hw_init(sw);
1457 	if (err)
1458 		goto err_router_lib_init;
1459 
1460 	err = rhashtable_init(&router->kern_fib_cache_ht,
1461 			      &__prestera_kern_fib_cache_ht_params);
1462 	if (err)
1463 		goto err_kern_fib_cache_ht_init;
1464 
1465 	err = rhashtable_init(&router->kern_neigh_cache_ht,
1466 			      &__prestera_kern_neigh_cache_ht_params);
1467 	if (err)
1468 		goto err_kern_neigh_cache_ht_init;
1469 
1470 	nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1;
1471 	router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL);
1472 	if (!router->nhgrp_hw_state_cache) {
1473 		err = -ENOMEM;
1474 		goto err_nh_state_cache_alloc;
1475 	}
1476 
1477 	router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
1478 	err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
1479 	if (err)
1480 		goto err_register_inetaddr_validator_notifier;
1481 
1482 	router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb;
1483 	err = register_inetaddr_notifier(&router->inetaddr_nb);
1484 	if (err)
1485 		goto err_register_inetaddr_notifier;
1486 
1487 	router->netevent_nb.notifier_call = prestera_router_netevent_event;
1488 	err = register_netevent_notifier(&router->netevent_nb);
1489 	if (err)
1490 		goto err_register_netevent_notifier;
1491 
1492 	router->fib_nb.notifier_call = __prestera_router_fib_event;
1493 	err = register_fib_notifier(&init_net, &router->fib_nb,
1494 				    /* TODO: flush fib entries */ NULL, NULL);
1495 	if (err)
1496 		goto err_register_fib_notifier;
1497 
1498 	return 0;
1499 
1500 err_register_fib_notifier:
1501 	unregister_netevent_notifier(&router->netevent_nb);
1502 err_register_netevent_notifier:
1503 	unregister_inetaddr_notifier(&router->inetaddr_nb);
1504 err_register_inetaddr_notifier:
1505 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
1506 err_register_inetaddr_validator_notifier:
1507 	kfree(router->nhgrp_hw_state_cache);
1508 err_nh_state_cache_alloc:
1509 	rhashtable_destroy(&router->kern_neigh_cache_ht);
1510 err_kern_neigh_cache_ht_init:
1511 	rhashtable_destroy(&router->kern_fib_cache_ht);
1512 err_kern_fib_cache_ht_init:
1513 	prestera_router_hw_fini(sw);
1514 err_router_lib_init:
1515 	kfree(sw->router);
1516 	return err;
1517 }
1518 
1519 void prestera_router_fini(struct prestera_switch *sw)
1520 {
1521 	unregister_fib_notifier(&init_net, &sw->router->fib_nb);
1522 	unregister_netevent_notifier(&sw->router->netevent_nb);
1523 	unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
1524 	unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
1525 	prestera_queue_drain();
1526 
1527 	prestera_k_arb_abort(sw);
1528 
1529 	kfree(sw->router->nhgrp_hw_state_cache);
1530 	rhashtable_destroy(&sw->router->kern_fib_cache_ht);
1531 	prestera_router_hw_fini(sw);
1532 	kfree(sw->router);
1533 	sw->router = NULL;
1534 }
1535