1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <net/netevent.h>
23 #include <net/neighbour.h>
24 #include <net/arp.h>
25 #include <net/inet_dscp.h>
26 #include <net/ip_fib.h>
27 #include <net/ip6_fib.h>
28 #include <net/nexthop.h>
29 #include <net/fib_rules.h>
30 #include <net/ip_tunnels.h>
31 #include <net/l3mdev.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
34 #include <net/ipv6.h>
35 #include <net/fib_notifier.h>
36 #include <net/switchdev.h>
37 
38 #include "spectrum.h"
39 #include "core.h"
40 #include "reg.h"
41 #include "spectrum_cnt.h"
42 #include "spectrum_dpipe.h"
43 #include "spectrum_ipip.h"
44 #include "spectrum_mr.h"
45 #include "spectrum_mr_tcam.h"
46 #include "spectrum_router.h"
47 #include "spectrum_span.h"
48 
49 struct mlxsw_sp_fib;
50 struct mlxsw_sp_vr;
51 struct mlxsw_sp_lpm_tree;
52 struct mlxsw_sp_rif_ops;
53 
54 struct mlxsw_sp_rif {
55 	struct list_head nexthop_list;
56 	struct list_head neigh_list;
57 	struct net_device *dev; /* NULL for underlay RIF */
58 	struct mlxsw_sp_fid *fid;
59 	unsigned char addr[ETH_ALEN];
60 	int mtu;
61 	u16 rif_index;
62 	u8 mac_profile_id;
63 	u8 rif_entries;
64 	u16 vr_id;
65 	const struct mlxsw_sp_rif_ops *ops;
66 	struct mlxsw_sp *mlxsw_sp;
67 
68 	unsigned int counter_ingress;
69 	bool counter_ingress_valid;
70 	unsigned int counter_egress;
71 	bool counter_egress_valid;
72 };
73 
74 struct mlxsw_sp_rif_params {
75 	struct net_device *dev;
76 	union {
77 		u16 system_port;
78 		u16 lag_id;
79 	};
80 	u16 vid;
81 	bool lag;
82 	bool double_entry;
83 };
84 
85 struct mlxsw_sp_rif_subport {
86 	struct mlxsw_sp_rif common;
87 	refcount_t ref_count;
88 	union {
89 		u16 system_port;
90 		u16 lag_id;
91 	};
92 	u16 vid;
93 	bool lag;
94 };
95 
96 struct mlxsw_sp_rif_ipip_lb {
97 	struct mlxsw_sp_rif common;
98 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
99 	u16 ul_vr_id;	/* Spectrum-1. */
100 	u16 ul_rif_id;	/* Spectrum-2+. */
101 };
102 
103 struct mlxsw_sp_rif_params_ipip_lb {
104 	struct mlxsw_sp_rif_params common;
105 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
106 };
107 
108 struct mlxsw_sp_rif_ops {
109 	enum mlxsw_sp_rif_type type;
110 	size_t rif_size;
111 
112 	void (*setup)(struct mlxsw_sp_rif *rif,
113 		      const struct mlxsw_sp_rif_params *params);
114 	int (*configure)(struct mlxsw_sp_rif *rif,
115 			 struct netlink_ext_ack *extack);
116 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
117 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
118 					 struct netlink_ext_ack *extack);
119 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
120 };
121 
122 struct mlxsw_sp_rif_mac_profile {
123 	unsigned char mac_prefix[ETH_ALEN];
124 	refcount_t ref_count;
125 	u8 id;
126 };
127 
128 struct mlxsw_sp_router_ops {
129 	int (*init)(struct mlxsw_sp *mlxsw_sp);
130 	int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
131 };
132 
133 static struct mlxsw_sp_rif *
134 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
135 			 const struct net_device *dev);
136 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
137 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
138 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
139 				  struct mlxsw_sp_lpm_tree *lpm_tree);
140 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
141 				     const struct mlxsw_sp_fib *fib,
142 				     u8 tree_id);
143 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
144 				       const struct mlxsw_sp_fib *fib);
145 
146 static unsigned int *
147 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
148 			   enum mlxsw_sp_rif_counter_dir dir)
149 {
150 	switch (dir) {
151 	case MLXSW_SP_RIF_COUNTER_EGRESS:
152 		return &rif->counter_egress;
153 	case MLXSW_SP_RIF_COUNTER_INGRESS:
154 		return &rif->counter_ingress;
155 	}
156 	return NULL;
157 }
158 
159 static bool
160 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
161 			       enum mlxsw_sp_rif_counter_dir dir)
162 {
163 	switch (dir) {
164 	case MLXSW_SP_RIF_COUNTER_EGRESS:
165 		return rif->counter_egress_valid;
166 	case MLXSW_SP_RIF_COUNTER_INGRESS:
167 		return rif->counter_ingress_valid;
168 	}
169 	return false;
170 }
171 
172 static void
173 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
174 			       enum mlxsw_sp_rif_counter_dir dir,
175 			       bool valid)
176 {
177 	switch (dir) {
178 	case MLXSW_SP_RIF_COUNTER_EGRESS:
179 		rif->counter_egress_valid = valid;
180 		break;
181 	case MLXSW_SP_RIF_COUNTER_INGRESS:
182 		rif->counter_ingress_valid = valid;
183 		break;
184 	}
185 }
186 
187 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
188 				     unsigned int counter_index, bool enable,
189 				     enum mlxsw_sp_rif_counter_dir dir)
190 {
191 	char ritr_pl[MLXSW_REG_RITR_LEN];
192 	bool is_egress = false;
193 	int err;
194 
195 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
196 		is_egress = true;
197 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
198 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
199 	if (err)
200 		return err;
201 
202 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
203 				    is_egress);
204 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
205 }
206 
207 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
208 				   struct mlxsw_sp_rif *rif,
209 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
210 {
211 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
212 	unsigned int *p_counter_index;
213 	bool valid;
214 	int err;
215 
216 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
217 	if (!valid)
218 		return -EINVAL;
219 
220 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
221 	if (!p_counter_index)
222 		return -EINVAL;
223 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
224 			     MLXSW_REG_RICNT_OPCODE_NOP);
225 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
226 	if (err)
227 		return err;
228 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
229 	return 0;
230 }
231 
232 struct mlxsw_sp_rif_counter_set_basic {
233 	u64 good_unicast_packets;
234 	u64 good_multicast_packets;
235 	u64 good_broadcast_packets;
236 	u64 good_unicast_bytes;
237 	u64 good_multicast_bytes;
238 	u64 good_broadcast_bytes;
239 	u64 error_packets;
240 	u64 discard_packets;
241 	u64 error_bytes;
242 	u64 discard_bytes;
243 };
244 
245 static int
246 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
247 				 enum mlxsw_sp_rif_counter_dir dir,
248 				 struct mlxsw_sp_rif_counter_set_basic *set)
249 {
250 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
251 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
252 	unsigned int *p_counter_index;
253 	int err;
254 
255 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
256 		return -EINVAL;
257 
258 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
259 	if (!p_counter_index)
260 		return -EINVAL;
261 
262 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
263 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
264 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
265 	if (err)
266 		return err;
267 
268 	if (!set)
269 		return 0;
270 
271 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME)				\
272 		(set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
273 
274 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
275 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
276 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
277 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
278 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
279 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
280 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
281 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
282 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
283 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
284 
285 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
286 
287 	return 0;
288 }
289 
290 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
291 				      unsigned int counter_index)
292 {
293 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
294 
295 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
296 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
297 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
298 }
299 
300 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
301 			       enum mlxsw_sp_rif_counter_dir dir)
302 {
303 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
304 	unsigned int *p_counter_index;
305 	int err;
306 
307 	if (mlxsw_sp_rif_counter_valid_get(rif, dir))
308 		return 0;
309 
310 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
311 	if (!p_counter_index)
312 		return -EINVAL;
313 
314 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
315 				     p_counter_index);
316 	if (err)
317 		return err;
318 
319 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
320 	if (err)
321 		goto err_counter_clear;
322 
323 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
324 					*p_counter_index, true, dir);
325 	if (err)
326 		goto err_counter_edit;
327 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
328 	return 0;
329 
330 err_counter_edit:
331 err_counter_clear:
332 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
333 			      *p_counter_index);
334 	return err;
335 }
336 
337 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
338 			       enum mlxsw_sp_rif_counter_dir dir)
339 {
340 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
341 	unsigned int *p_counter_index;
342 
343 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
344 		return;
345 
346 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
347 	if (WARN_ON(!p_counter_index))
348 		return;
349 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
350 				  *p_counter_index, false, dir);
351 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
352 			      *p_counter_index);
353 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
354 }
355 
356 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
357 {
358 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
359 	struct devlink *devlink;
360 
361 	devlink = priv_to_devlink(mlxsw_sp->core);
362 	if (!devlink_dpipe_table_counter_enabled(devlink,
363 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
364 		return;
365 	mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
366 }
367 
368 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
369 {
370 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
371 }
372 
373 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
374 
375 struct mlxsw_sp_prefix_usage {
376 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
377 };
378 
379 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
380 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
381 
382 static bool
383 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
384 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
385 {
386 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
387 }
388 
389 static void
390 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
391 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
392 {
393 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
394 }
395 
396 static void
397 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
398 			  unsigned char prefix_len)
399 {
400 	set_bit(prefix_len, prefix_usage->b);
401 }
402 
403 static void
404 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
405 			    unsigned char prefix_len)
406 {
407 	clear_bit(prefix_len, prefix_usage->b);
408 }
409 
410 struct mlxsw_sp_fib_key {
411 	unsigned char addr[sizeof(struct in6_addr)];
412 	unsigned char prefix_len;
413 };
414 
415 enum mlxsw_sp_fib_entry_type {
416 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
417 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
418 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
419 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
420 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
421 
422 	/* This is a special case of local delivery, where a packet should be
423 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
424 	 * because that's a type of next hop, not of FIB entry. (There can be
425 	 * several next hops in a REMOTE entry, and some of them may be
426 	 * encapsulating entries.)
427 	 */
428 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
429 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
430 };
431 
432 struct mlxsw_sp_nexthop_group_info;
433 struct mlxsw_sp_nexthop_group;
434 struct mlxsw_sp_fib_entry;
435 
436 struct mlxsw_sp_fib_node {
437 	struct mlxsw_sp_fib_entry *fib_entry;
438 	struct list_head list;
439 	struct rhash_head ht_node;
440 	struct mlxsw_sp_fib *fib;
441 	struct mlxsw_sp_fib_key key;
442 };
443 
444 struct mlxsw_sp_fib_entry_decap {
445 	struct mlxsw_sp_ipip_entry *ipip_entry;
446 	u32 tunnel_index;
447 };
448 
449 struct mlxsw_sp_fib_entry {
450 	struct mlxsw_sp_fib_node *fib_node;
451 	enum mlxsw_sp_fib_entry_type type;
452 	struct list_head nexthop_group_node;
453 	struct mlxsw_sp_nexthop_group *nh_group;
454 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
455 };
456 
457 struct mlxsw_sp_fib4_entry {
458 	struct mlxsw_sp_fib_entry common;
459 	struct fib_info *fi;
460 	u32 tb_id;
461 	dscp_t dscp;
462 	u8 type;
463 };
464 
465 struct mlxsw_sp_fib6_entry {
466 	struct mlxsw_sp_fib_entry common;
467 	struct list_head rt6_list;
468 	unsigned int nrt6;
469 };
470 
471 struct mlxsw_sp_rt6 {
472 	struct list_head list;
473 	struct fib6_info *rt;
474 };
475 
476 struct mlxsw_sp_lpm_tree {
477 	u8 id; /* tree ID */
478 	unsigned int ref_count;
479 	enum mlxsw_sp_l3proto proto;
480 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
481 	struct mlxsw_sp_prefix_usage prefix_usage;
482 };
483 
484 struct mlxsw_sp_fib {
485 	struct rhashtable ht;
486 	struct list_head node_list;
487 	struct mlxsw_sp_vr *vr;
488 	struct mlxsw_sp_lpm_tree *lpm_tree;
489 	enum mlxsw_sp_l3proto proto;
490 };
491 
492 struct mlxsw_sp_vr {
493 	u16 id; /* virtual router ID */
494 	u32 tb_id; /* kernel fib table id */
495 	unsigned int rif_count;
496 	struct mlxsw_sp_fib *fib4;
497 	struct mlxsw_sp_fib *fib6;
498 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
499 	struct mlxsw_sp_rif *ul_rif;
500 	refcount_t ul_rif_refcnt;
501 };
502 
503 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
504 
505 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
506 						struct mlxsw_sp_vr *vr,
507 						enum mlxsw_sp_l3proto proto)
508 {
509 	struct mlxsw_sp_lpm_tree *lpm_tree;
510 	struct mlxsw_sp_fib *fib;
511 	int err;
512 
513 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
514 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
515 	if (!fib)
516 		return ERR_PTR(-ENOMEM);
517 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
518 	if (err)
519 		goto err_rhashtable_init;
520 	INIT_LIST_HEAD(&fib->node_list);
521 	fib->proto = proto;
522 	fib->vr = vr;
523 	fib->lpm_tree = lpm_tree;
524 	mlxsw_sp_lpm_tree_hold(lpm_tree);
525 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
526 	if (err)
527 		goto err_lpm_tree_bind;
528 	return fib;
529 
530 err_lpm_tree_bind:
531 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
532 err_rhashtable_init:
533 	kfree(fib);
534 	return ERR_PTR(err);
535 }
536 
537 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
538 				 struct mlxsw_sp_fib *fib)
539 {
540 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
541 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
542 	WARN_ON(!list_empty(&fib->node_list));
543 	rhashtable_destroy(&fib->ht);
544 	kfree(fib);
545 }
546 
547 static struct mlxsw_sp_lpm_tree *
548 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
549 {
550 	static struct mlxsw_sp_lpm_tree *lpm_tree;
551 	int i;
552 
553 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
554 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
555 		if (lpm_tree->ref_count == 0)
556 			return lpm_tree;
557 	}
558 	return NULL;
559 }
560 
561 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
562 				   struct mlxsw_sp_lpm_tree *lpm_tree)
563 {
564 	char ralta_pl[MLXSW_REG_RALTA_LEN];
565 
566 	mlxsw_reg_ralta_pack(ralta_pl, true,
567 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
568 			     lpm_tree->id);
569 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
570 }
571 
572 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
573 				   struct mlxsw_sp_lpm_tree *lpm_tree)
574 {
575 	char ralta_pl[MLXSW_REG_RALTA_LEN];
576 
577 	mlxsw_reg_ralta_pack(ralta_pl, false,
578 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
579 			     lpm_tree->id);
580 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
581 }
582 
583 static int
584 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
585 				  struct mlxsw_sp_prefix_usage *prefix_usage,
586 				  struct mlxsw_sp_lpm_tree *lpm_tree)
587 {
588 	char ralst_pl[MLXSW_REG_RALST_LEN];
589 	u8 root_bin = 0;
590 	u8 prefix;
591 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
592 
593 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
594 		root_bin = prefix;
595 
596 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
597 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
598 		if (prefix == 0)
599 			continue;
600 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
601 					 MLXSW_REG_RALST_BIN_NO_CHILD);
602 		last_prefix = prefix;
603 	}
604 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
605 }
606 
607 static struct mlxsw_sp_lpm_tree *
608 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
609 			 struct mlxsw_sp_prefix_usage *prefix_usage,
610 			 enum mlxsw_sp_l3proto proto)
611 {
612 	struct mlxsw_sp_lpm_tree *lpm_tree;
613 	int err;
614 
615 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
616 	if (!lpm_tree)
617 		return ERR_PTR(-EBUSY);
618 	lpm_tree->proto = proto;
619 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
620 	if (err)
621 		return ERR_PTR(err);
622 
623 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
624 						lpm_tree);
625 	if (err)
626 		goto err_left_struct_set;
627 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
628 	       sizeof(lpm_tree->prefix_usage));
629 	memset(&lpm_tree->prefix_ref_count, 0,
630 	       sizeof(lpm_tree->prefix_ref_count));
631 	lpm_tree->ref_count = 1;
632 	return lpm_tree;
633 
634 err_left_struct_set:
635 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
636 	return ERR_PTR(err);
637 }
638 
639 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
640 				      struct mlxsw_sp_lpm_tree *lpm_tree)
641 {
642 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
643 }
644 
645 static struct mlxsw_sp_lpm_tree *
646 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
647 		      struct mlxsw_sp_prefix_usage *prefix_usage,
648 		      enum mlxsw_sp_l3proto proto)
649 {
650 	struct mlxsw_sp_lpm_tree *lpm_tree;
651 	int i;
652 
653 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
654 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
655 		if (lpm_tree->ref_count != 0 &&
656 		    lpm_tree->proto == proto &&
657 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
658 					     prefix_usage)) {
659 			mlxsw_sp_lpm_tree_hold(lpm_tree);
660 			return lpm_tree;
661 		}
662 	}
663 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
664 }
665 
666 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
667 {
668 	lpm_tree->ref_count++;
669 }
670 
671 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
672 				  struct mlxsw_sp_lpm_tree *lpm_tree)
673 {
674 	if (--lpm_tree->ref_count == 0)
675 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
676 }
677 
678 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
679 
680 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
681 {
682 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
683 	struct mlxsw_sp_lpm_tree *lpm_tree;
684 	u64 max_trees;
685 	int err, i;
686 
687 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
688 		return -EIO;
689 
690 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
691 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
692 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
693 					     sizeof(struct mlxsw_sp_lpm_tree),
694 					     GFP_KERNEL);
695 	if (!mlxsw_sp->router->lpm.trees)
696 		return -ENOMEM;
697 
698 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
699 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
700 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
701 	}
702 
703 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
704 					 MLXSW_SP_L3_PROTO_IPV4);
705 	if (IS_ERR(lpm_tree)) {
706 		err = PTR_ERR(lpm_tree);
707 		goto err_ipv4_tree_get;
708 	}
709 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
710 
711 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
712 					 MLXSW_SP_L3_PROTO_IPV6);
713 	if (IS_ERR(lpm_tree)) {
714 		err = PTR_ERR(lpm_tree);
715 		goto err_ipv6_tree_get;
716 	}
717 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
718 
719 	return 0;
720 
721 err_ipv6_tree_get:
722 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
723 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
724 err_ipv4_tree_get:
725 	kfree(mlxsw_sp->router->lpm.trees);
726 	return err;
727 }
728 
729 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
730 {
731 	struct mlxsw_sp_lpm_tree *lpm_tree;
732 
733 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
734 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
735 
736 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
737 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
738 
739 	kfree(mlxsw_sp->router->lpm.trees);
740 }
741 
742 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
743 {
744 	return !!vr->fib4 || !!vr->fib6 ||
745 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
746 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
747 }
748 
749 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
750 {
751 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
752 	struct mlxsw_sp_vr *vr;
753 	int i;
754 
755 	for (i = 0; i < max_vrs; i++) {
756 		vr = &mlxsw_sp->router->vrs[i];
757 		if (!mlxsw_sp_vr_is_used(vr))
758 			return vr;
759 	}
760 	return NULL;
761 }
762 
763 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
764 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
765 {
766 	char raltb_pl[MLXSW_REG_RALTB_LEN];
767 
768 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
769 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
770 			     tree_id);
771 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
772 }
773 
774 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
775 				       const struct mlxsw_sp_fib *fib)
776 {
777 	char raltb_pl[MLXSW_REG_RALTB_LEN];
778 
779 	/* Bind to tree 0 which is default */
780 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
781 			     (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
782 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
783 }
784 
785 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
786 {
787 	/* For our purpose, squash main, default and local tables into one */
788 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
789 		tb_id = RT_TABLE_MAIN;
790 	return tb_id;
791 }
792 
793 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
794 					    u32 tb_id)
795 {
796 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
797 	struct mlxsw_sp_vr *vr;
798 	int i;
799 
800 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
801 
802 	for (i = 0; i < max_vrs; i++) {
803 		vr = &mlxsw_sp->router->vrs[i];
804 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
805 			return vr;
806 	}
807 	return NULL;
808 }
809 
810 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
811 				u16 *vr_id)
812 {
813 	struct mlxsw_sp_vr *vr;
814 	int err = 0;
815 
816 	mutex_lock(&mlxsw_sp->router->lock);
817 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
818 	if (!vr) {
819 		err = -ESRCH;
820 		goto out;
821 	}
822 	*vr_id = vr->id;
823 out:
824 	mutex_unlock(&mlxsw_sp->router->lock);
825 	return err;
826 }
827 
828 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
829 					    enum mlxsw_sp_l3proto proto)
830 {
831 	switch (proto) {
832 	case MLXSW_SP_L3_PROTO_IPV4:
833 		return vr->fib4;
834 	case MLXSW_SP_L3_PROTO_IPV6:
835 		return vr->fib6;
836 	}
837 	return NULL;
838 }
839 
840 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
841 					      u32 tb_id,
842 					      struct netlink_ext_ack *extack)
843 {
844 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
845 	struct mlxsw_sp_fib *fib4;
846 	struct mlxsw_sp_fib *fib6;
847 	struct mlxsw_sp_vr *vr;
848 	int err;
849 
850 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
851 	if (!vr) {
852 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
853 		return ERR_PTR(-EBUSY);
854 	}
855 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
856 	if (IS_ERR(fib4))
857 		return ERR_CAST(fib4);
858 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
859 	if (IS_ERR(fib6)) {
860 		err = PTR_ERR(fib6);
861 		goto err_fib6_create;
862 	}
863 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
864 					     MLXSW_SP_L3_PROTO_IPV4);
865 	if (IS_ERR(mr4_table)) {
866 		err = PTR_ERR(mr4_table);
867 		goto err_mr4_table_create;
868 	}
869 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
870 					     MLXSW_SP_L3_PROTO_IPV6);
871 	if (IS_ERR(mr6_table)) {
872 		err = PTR_ERR(mr6_table);
873 		goto err_mr6_table_create;
874 	}
875 
876 	vr->fib4 = fib4;
877 	vr->fib6 = fib6;
878 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
879 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
880 	vr->tb_id = tb_id;
881 	return vr;
882 
883 err_mr6_table_create:
884 	mlxsw_sp_mr_table_destroy(mr4_table);
885 err_mr4_table_create:
886 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
887 err_fib6_create:
888 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
889 	return ERR_PTR(err);
890 }
891 
892 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
893 				struct mlxsw_sp_vr *vr)
894 {
895 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
896 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
897 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
898 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
899 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
900 	vr->fib6 = NULL;
901 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
902 	vr->fib4 = NULL;
903 }
904 
905 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
906 					   struct netlink_ext_ack *extack)
907 {
908 	struct mlxsw_sp_vr *vr;
909 
910 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
911 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
912 	if (!vr)
913 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
914 	return vr;
915 }
916 
917 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
918 {
919 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
920 	    list_empty(&vr->fib6->node_list) &&
921 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
922 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
923 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
924 }
925 
926 static bool
927 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
928 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
929 {
930 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
931 
932 	if (!mlxsw_sp_vr_is_used(vr))
933 		return false;
934 	if (fib->lpm_tree->id == tree_id)
935 		return true;
936 	return false;
937 }
938 
939 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
940 					struct mlxsw_sp_fib *fib,
941 					struct mlxsw_sp_lpm_tree *new_tree)
942 {
943 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
944 	int err;
945 
946 	fib->lpm_tree = new_tree;
947 	mlxsw_sp_lpm_tree_hold(new_tree);
948 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
949 	if (err)
950 		goto err_tree_bind;
951 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
952 	return 0;
953 
954 err_tree_bind:
955 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
956 	fib->lpm_tree = old_tree;
957 	return err;
958 }
959 
960 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
961 					 struct mlxsw_sp_fib *fib,
962 					 struct mlxsw_sp_lpm_tree *new_tree)
963 {
964 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
965 	enum mlxsw_sp_l3proto proto = fib->proto;
966 	struct mlxsw_sp_lpm_tree *old_tree;
967 	u8 old_id, new_id = new_tree->id;
968 	struct mlxsw_sp_vr *vr;
969 	int i, err;
970 
971 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
972 	old_id = old_tree->id;
973 
974 	for (i = 0; i < max_vrs; i++) {
975 		vr = &mlxsw_sp->router->vrs[i];
976 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
977 			continue;
978 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
979 						   mlxsw_sp_vr_fib(vr, proto),
980 						   new_tree);
981 		if (err)
982 			goto err_tree_replace;
983 	}
984 
985 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
986 	       sizeof(new_tree->prefix_ref_count));
987 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
988 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
989 
990 	return 0;
991 
992 err_tree_replace:
993 	for (i--; i >= 0; i--) {
994 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
995 			continue;
996 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
997 					     mlxsw_sp_vr_fib(vr, proto),
998 					     old_tree);
999 	}
1000 	return err;
1001 }
1002 
1003 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1004 {
1005 	struct mlxsw_sp_vr *vr;
1006 	u64 max_vrs;
1007 	int i;
1008 
1009 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1010 		return -EIO;
1011 
1012 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1013 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1014 					GFP_KERNEL);
1015 	if (!mlxsw_sp->router->vrs)
1016 		return -ENOMEM;
1017 
1018 	for (i = 0; i < max_vrs; i++) {
1019 		vr = &mlxsw_sp->router->vrs[i];
1020 		vr->id = i;
1021 	}
1022 
1023 	return 0;
1024 }
1025 
1026 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1027 
1028 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1029 {
1030 	/* At this stage we're guaranteed not to have new incoming
1031 	 * FIB notifications and the work queue is free from FIBs
1032 	 * sitting on top of mlxsw netdevs. However, we can still
1033 	 * have other FIBs queued. Flush the queue before flushing
1034 	 * the device's tables. No need for locks, as we're the only
1035 	 * writer.
1036 	 */
1037 	mlxsw_core_flush_owq();
1038 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1039 	kfree(mlxsw_sp->router->vrs);
1040 }
1041 
1042 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1043 {
1044 	struct net_device *d;
1045 	u32 tb_id;
1046 
1047 	rcu_read_lock();
1048 	d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1049 	if (d)
1050 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1051 	else
1052 		tb_id = RT_TABLE_MAIN;
1053 	rcu_read_unlock();
1054 
1055 	return tb_id;
1056 }
1057 
1058 static struct mlxsw_sp_rif *
1059 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1060 		    const struct mlxsw_sp_rif_params *params,
1061 		    struct netlink_ext_ack *extack);
1062 
1063 static struct mlxsw_sp_rif_ipip_lb *
1064 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1065 				enum mlxsw_sp_ipip_type ipipt,
1066 				struct net_device *ol_dev,
1067 				struct netlink_ext_ack *extack)
1068 {
1069 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1070 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1071 	struct mlxsw_sp_rif *rif;
1072 
1073 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1074 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1075 		.common.dev = ol_dev,
1076 		.common.lag = false,
1077 		.common.double_entry = ipip_ops->double_rif_entry,
1078 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1079 	};
1080 
1081 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1082 	if (IS_ERR(rif))
1083 		return ERR_CAST(rif);
1084 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1085 }
1086 
1087 static struct mlxsw_sp_ipip_entry *
1088 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1089 			  enum mlxsw_sp_ipip_type ipipt,
1090 			  struct net_device *ol_dev)
1091 {
1092 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1093 	struct mlxsw_sp_ipip_entry *ipip_entry;
1094 	struct mlxsw_sp_ipip_entry *ret = NULL;
1095 	int err;
1096 
1097 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1098 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1099 	if (!ipip_entry)
1100 		return ERR_PTR(-ENOMEM);
1101 
1102 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1103 							    ol_dev, NULL);
1104 	if (IS_ERR(ipip_entry->ol_lb)) {
1105 		ret = ERR_CAST(ipip_entry->ol_lb);
1106 		goto err_ol_ipip_lb_create;
1107 	}
1108 
1109 	ipip_entry->ipipt = ipipt;
1110 	ipip_entry->ol_dev = ol_dev;
1111 	ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1112 
1113 	err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1114 	if (err) {
1115 		ret = ERR_PTR(err);
1116 		goto err_rem_ip_addr_set;
1117 	}
1118 
1119 	return ipip_entry;
1120 
1121 err_rem_ip_addr_set:
1122 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1123 err_ol_ipip_lb_create:
1124 	kfree(ipip_entry);
1125 	return ret;
1126 }
1127 
1128 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1129 					struct mlxsw_sp_ipip_entry *ipip_entry)
1130 {
1131 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1132 		mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1133 
1134 	ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1135 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1136 	kfree(ipip_entry);
1137 }
1138 
1139 static bool
1140 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1141 				  const enum mlxsw_sp_l3proto ul_proto,
1142 				  union mlxsw_sp_l3addr saddr,
1143 				  u32 ul_tb_id,
1144 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1145 {
1146 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1147 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1148 	union mlxsw_sp_l3addr tun_saddr;
1149 
1150 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1151 		return false;
1152 
1153 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1154 	return tun_ul_tb_id == ul_tb_id &&
1155 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1156 }
1157 
1158 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1159 						 enum mlxsw_sp_ipip_type ipipt)
1160 {
1161 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1162 
1163 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1164 
1165 	/* Not all tunnels require to increase the default pasing depth
1166 	 * (96 bytes).
1167 	 */
1168 	if (ipip_ops->inc_parsing_depth)
1169 		return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1170 
1171 	return 0;
1172 }
1173 
1174 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1175 						  enum mlxsw_sp_ipip_type ipipt)
1176 {
1177 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1178 		mlxsw_sp->router->ipip_ops_arr[ipipt];
1179 
1180 	if (ipip_ops->inc_parsing_depth)
1181 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1182 }
1183 
1184 static int
1185 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1186 			      struct mlxsw_sp_fib_entry *fib_entry,
1187 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1188 {
1189 	u32 tunnel_index;
1190 	int err;
1191 
1192 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1193 				  1, &tunnel_index);
1194 	if (err)
1195 		return err;
1196 
1197 	err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1198 						    ipip_entry->ipipt);
1199 	if (err)
1200 		goto err_parsing_depth_inc;
1201 
1202 	ipip_entry->decap_fib_entry = fib_entry;
1203 	fib_entry->decap.ipip_entry = ipip_entry;
1204 	fib_entry->decap.tunnel_index = tunnel_index;
1205 
1206 	return 0;
1207 
1208 err_parsing_depth_inc:
1209 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1210 			   fib_entry->decap.tunnel_index);
1211 	return err;
1212 }
1213 
1214 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1215 					  struct mlxsw_sp_fib_entry *fib_entry)
1216 {
1217 	enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1218 
1219 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1220 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1221 	fib_entry->decap.ipip_entry = NULL;
1222 	mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1223 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1224 			   1, fib_entry->decap.tunnel_index);
1225 }
1226 
1227 static struct mlxsw_sp_fib_node *
1228 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1229 			 size_t addr_len, unsigned char prefix_len);
1230 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1231 				     struct mlxsw_sp_fib_entry *fib_entry);
1232 
1233 static void
1234 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1235 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1236 {
1237 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1238 
1239 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1240 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1241 
1242 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1243 }
1244 
1245 static void
1246 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1247 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1248 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1249 {
1250 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1251 					  ipip_entry))
1252 		return;
1253 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1254 
1255 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1256 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1257 }
1258 
1259 static struct mlxsw_sp_fib_entry *
1260 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1261 				     enum mlxsw_sp_l3proto proto,
1262 				     const union mlxsw_sp_l3addr *addr,
1263 				     enum mlxsw_sp_fib_entry_type type)
1264 {
1265 	struct mlxsw_sp_fib_node *fib_node;
1266 	unsigned char addr_prefix_len;
1267 	struct mlxsw_sp_fib *fib;
1268 	struct mlxsw_sp_vr *vr;
1269 	const void *addrp;
1270 	size_t addr_len;
1271 	u32 addr4;
1272 
1273 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1274 	if (!vr)
1275 		return NULL;
1276 	fib = mlxsw_sp_vr_fib(vr, proto);
1277 
1278 	switch (proto) {
1279 	case MLXSW_SP_L3_PROTO_IPV4:
1280 		addr4 = be32_to_cpu(addr->addr4);
1281 		addrp = &addr4;
1282 		addr_len = 4;
1283 		addr_prefix_len = 32;
1284 		break;
1285 	case MLXSW_SP_L3_PROTO_IPV6:
1286 		addrp = &addr->addr6;
1287 		addr_len = 16;
1288 		addr_prefix_len = 128;
1289 		break;
1290 	default:
1291 		WARN_ON(1);
1292 		return NULL;
1293 	}
1294 
1295 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1296 					    addr_prefix_len);
1297 	if (!fib_node || fib_node->fib_entry->type != type)
1298 		return NULL;
1299 
1300 	return fib_node->fib_entry;
1301 }
1302 
1303 /* Given an IPIP entry, find the corresponding decap route. */
1304 static struct mlxsw_sp_fib_entry *
1305 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1306 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1307 {
1308 	static struct mlxsw_sp_fib_node *fib_node;
1309 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1310 	unsigned char saddr_prefix_len;
1311 	union mlxsw_sp_l3addr saddr;
1312 	struct mlxsw_sp_fib *ul_fib;
1313 	struct mlxsw_sp_vr *ul_vr;
1314 	const void *saddrp;
1315 	size_t saddr_len;
1316 	u32 ul_tb_id;
1317 	u32 saddr4;
1318 
1319 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1320 
1321 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1322 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1323 	if (!ul_vr)
1324 		return NULL;
1325 
1326 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1327 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1328 					   ipip_entry->ol_dev);
1329 
1330 	switch (ipip_ops->ul_proto) {
1331 	case MLXSW_SP_L3_PROTO_IPV4:
1332 		saddr4 = be32_to_cpu(saddr.addr4);
1333 		saddrp = &saddr4;
1334 		saddr_len = 4;
1335 		saddr_prefix_len = 32;
1336 		break;
1337 	case MLXSW_SP_L3_PROTO_IPV6:
1338 		saddrp = &saddr.addr6;
1339 		saddr_len = 16;
1340 		saddr_prefix_len = 128;
1341 		break;
1342 	default:
1343 		WARN_ON(1);
1344 		return NULL;
1345 	}
1346 
1347 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1348 					    saddr_prefix_len);
1349 	if (!fib_node ||
1350 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1351 		return NULL;
1352 
1353 	return fib_node->fib_entry;
1354 }
1355 
1356 static struct mlxsw_sp_ipip_entry *
1357 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1358 			   enum mlxsw_sp_ipip_type ipipt,
1359 			   struct net_device *ol_dev)
1360 {
1361 	struct mlxsw_sp_ipip_entry *ipip_entry;
1362 
1363 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1364 	if (IS_ERR(ipip_entry))
1365 		return ipip_entry;
1366 
1367 	list_add_tail(&ipip_entry->ipip_list_node,
1368 		      &mlxsw_sp->router->ipip_list);
1369 
1370 	return ipip_entry;
1371 }
1372 
1373 static void
1374 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1375 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1376 {
1377 	list_del(&ipip_entry->ipip_list_node);
1378 	mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1379 }
1380 
1381 static bool
1382 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1383 				  const struct net_device *ul_dev,
1384 				  enum mlxsw_sp_l3proto ul_proto,
1385 				  union mlxsw_sp_l3addr ul_dip,
1386 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1387 {
1388 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1389 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1390 
1391 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1392 		return false;
1393 
1394 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1395 						 ul_tb_id, ipip_entry);
1396 }
1397 
1398 /* Given decap parameters, find the corresponding IPIP entry. */
1399 static struct mlxsw_sp_ipip_entry *
1400 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1401 				  enum mlxsw_sp_l3proto ul_proto,
1402 				  union mlxsw_sp_l3addr ul_dip)
1403 {
1404 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1405 	struct net_device *ul_dev;
1406 
1407 	rcu_read_lock();
1408 
1409 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1410 	if (!ul_dev)
1411 		goto out_unlock;
1412 
1413 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1414 			    ipip_list_node)
1415 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1416 						      ul_proto, ul_dip,
1417 						      ipip_entry))
1418 			goto out_unlock;
1419 
1420 	rcu_read_unlock();
1421 
1422 	return NULL;
1423 
1424 out_unlock:
1425 	rcu_read_unlock();
1426 	return ipip_entry;
1427 }
1428 
1429 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1430 				      const struct net_device *dev,
1431 				      enum mlxsw_sp_ipip_type *p_type)
1432 {
1433 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1434 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1435 	enum mlxsw_sp_ipip_type ipipt;
1436 
1437 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1438 		ipip_ops = router->ipip_ops_arr[ipipt];
1439 		if (dev->type == ipip_ops->dev_type) {
1440 			if (p_type)
1441 				*p_type = ipipt;
1442 			return true;
1443 		}
1444 	}
1445 	return false;
1446 }
1447 
1448 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1449 				       const struct net_device *dev)
1450 {
1451 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1452 }
1453 
1454 static struct mlxsw_sp_ipip_entry *
1455 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1456 				   const struct net_device *ol_dev)
1457 {
1458 	struct mlxsw_sp_ipip_entry *ipip_entry;
1459 
1460 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1461 			    ipip_list_node)
1462 		if (ipip_entry->ol_dev == ol_dev)
1463 			return ipip_entry;
1464 
1465 	return NULL;
1466 }
1467 
1468 static struct mlxsw_sp_ipip_entry *
1469 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1470 				   const struct net_device *ul_dev,
1471 				   struct mlxsw_sp_ipip_entry *start)
1472 {
1473 	struct mlxsw_sp_ipip_entry *ipip_entry;
1474 
1475 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1476 					ipip_list_node);
1477 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1478 				     ipip_list_node) {
1479 		struct net_device *ol_dev = ipip_entry->ol_dev;
1480 		struct net_device *ipip_ul_dev;
1481 
1482 		rcu_read_lock();
1483 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1484 		rcu_read_unlock();
1485 
1486 		if (ipip_ul_dev == ul_dev)
1487 			return ipip_entry;
1488 	}
1489 
1490 	return NULL;
1491 }
1492 
1493 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1494 				       const struct net_device *dev)
1495 {
1496 	return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1497 }
1498 
1499 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1500 						const struct net_device *ol_dev,
1501 						enum mlxsw_sp_ipip_type ipipt)
1502 {
1503 	const struct mlxsw_sp_ipip_ops *ops
1504 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1505 
1506 	return ops->can_offload(mlxsw_sp, ol_dev);
1507 }
1508 
1509 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1510 						struct net_device *ol_dev)
1511 {
1512 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1513 	struct mlxsw_sp_ipip_entry *ipip_entry;
1514 	enum mlxsw_sp_l3proto ul_proto;
1515 	union mlxsw_sp_l3addr saddr;
1516 	u32 ul_tb_id;
1517 
1518 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1519 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1520 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1521 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1522 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1523 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1524 							  saddr, ul_tb_id,
1525 							  NULL)) {
1526 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1527 								ol_dev);
1528 			if (IS_ERR(ipip_entry))
1529 				return PTR_ERR(ipip_entry);
1530 		}
1531 	}
1532 
1533 	return 0;
1534 }
1535 
1536 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1537 						   struct net_device *ol_dev)
1538 {
1539 	struct mlxsw_sp_ipip_entry *ipip_entry;
1540 
1541 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1542 	if (ipip_entry)
1543 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1544 }
1545 
1546 static void
1547 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1548 				struct mlxsw_sp_ipip_entry *ipip_entry)
1549 {
1550 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1551 
1552 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1553 	if (decap_fib_entry)
1554 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1555 						  decap_fib_entry);
1556 }
1557 
1558 static int
1559 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1560 			u16 ul_rif_id, bool enable)
1561 {
1562 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1563 	enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1564 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1565 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1566 	char ritr_pl[MLXSW_REG_RITR_LEN];
1567 	struct in6_addr *saddr6;
1568 	u32 saddr4;
1569 
1570 	ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1571 	switch (lb_cf.ul_protocol) {
1572 	case MLXSW_SP_L3_PROTO_IPV4:
1573 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1574 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1575 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1576 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1577 						   ipip_options, ul_vr_id,
1578 						   ul_rif_id, saddr4,
1579 						   lb_cf.okey);
1580 		break;
1581 
1582 	case MLXSW_SP_L3_PROTO_IPV6:
1583 		saddr6 = &lb_cf.saddr.addr6;
1584 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1585 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1586 		mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1587 						   ipip_options, ul_vr_id,
1588 						   ul_rif_id, saddr6,
1589 						   lb_cf.okey);
1590 		break;
1591 	}
1592 
1593 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1594 }
1595 
1596 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1597 						 struct net_device *ol_dev)
1598 {
1599 	struct mlxsw_sp_ipip_entry *ipip_entry;
1600 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1601 	int err = 0;
1602 
1603 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1604 	if (ipip_entry) {
1605 		lb_rif = ipip_entry->ol_lb;
1606 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1607 					      lb_rif->ul_rif_id, true);
1608 		if (err)
1609 			goto out;
1610 		lb_rif->common.mtu = ol_dev->mtu;
1611 	}
1612 
1613 out:
1614 	return err;
1615 }
1616 
1617 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1618 						struct net_device *ol_dev)
1619 {
1620 	struct mlxsw_sp_ipip_entry *ipip_entry;
1621 
1622 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1623 	if (ipip_entry)
1624 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1625 }
1626 
1627 static void
1628 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1629 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1630 {
1631 	if (ipip_entry->decap_fib_entry)
1632 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1633 }
1634 
1635 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1636 						  struct net_device *ol_dev)
1637 {
1638 	struct mlxsw_sp_ipip_entry *ipip_entry;
1639 
1640 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1641 	if (ipip_entry)
1642 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1643 }
1644 
1645 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1646 					 struct mlxsw_sp_rif *old_rif,
1647 					 struct mlxsw_sp_rif *new_rif);
1648 static int
1649 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1650 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1651 				 bool keep_encap,
1652 				 struct netlink_ext_ack *extack)
1653 {
1654 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1655 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1656 
1657 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1658 						     ipip_entry->ipipt,
1659 						     ipip_entry->ol_dev,
1660 						     extack);
1661 	if (IS_ERR(new_lb_rif))
1662 		return PTR_ERR(new_lb_rif);
1663 	ipip_entry->ol_lb = new_lb_rif;
1664 
1665 	if (keep_encap)
1666 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1667 					     &new_lb_rif->common);
1668 
1669 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1670 
1671 	return 0;
1672 }
1673 
1674 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1675 					struct mlxsw_sp_rif *rif);
1676 
1677 /**
1678  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1679  * @mlxsw_sp: mlxsw_sp.
1680  * @ipip_entry: IPIP entry.
1681  * @recreate_loopback: Recreates the associated loopback RIF.
1682  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1683  *              relevant when recreate_loopback is true.
1684  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1685  *                   is only relevant when recreate_loopback is false.
1686  * @extack: extack.
1687  *
1688  * Return: Non-zero value on failure.
1689  */
1690 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1691 					struct mlxsw_sp_ipip_entry *ipip_entry,
1692 					bool recreate_loopback,
1693 					bool keep_encap,
1694 					bool update_nexthops,
1695 					struct netlink_ext_ack *extack)
1696 {
1697 	int err;
1698 
1699 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1700 	 * recreate it. That creates a window of opportunity where RALUE and
1701 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1702 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1703 	 * of RALUE, demote the decap route back.
1704 	 */
1705 	if (ipip_entry->decap_fib_entry)
1706 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1707 
1708 	if (recreate_loopback) {
1709 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1710 						       keep_encap, extack);
1711 		if (err)
1712 			return err;
1713 	} else if (update_nexthops) {
1714 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1715 					    &ipip_entry->ol_lb->common);
1716 	}
1717 
1718 	if (ipip_entry->ol_dev->flags & IFF_UP)
1719 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1720 
1721 	return 0;
1722 }
1723 
1724 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1725 						struct net_device *ol_dev,
1726 						struct netlink_ext_ack *extack)
1727 {
1728 	struct mlxsw_sp_ipip_entry *ipip_entry =
1729 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1730 
1731 	if (!ipip_entry)
1732 		return 0;
1733 
1734 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1735 						   true, false, false, extack);
1736 }
1737 
1738 static int
1739 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1740 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1741 				     struct net_device *ul_dev,
1742 				     bool *demote_this,
1743 				     struct netlink_ext_ack *extack)
1744 {
1745 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1746 	enum mlxsw_sp_l3proto ul_proto;
1747 	union mlxsw_sp_l3addr saddr;
1748 
1749 	/* Moving underlay to a different VRF might cause local address
1750 	 * conflict, and the conflicting tunnels need to be demoted.
1751 	 */
1752 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1753 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1754 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1755 						 saddr, ul_tb_id,
1756 						 ipip_entry)) {
1757 		*demote_this = true;
1758 		return 0;
1759 	}
1760 
1761 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1762 						   true, true, false, extack);
1763 }
1764 
1765 static int
1766 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1767 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1768 				    struct net_device *ul_dev)
1769 {
1770 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1771 						   false, false, true, NULL);
1772 }
1773 
1774 static int
1775 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1776 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1777 				      struct net_device *ul_dev)
1778 {
1779 	/* A down underlay device causes encapsulated packets to not be
1780 	 * forwarded, but decap still works. So refresh next hops without
1781 	 * touching anything else.
1782 	 */
1783 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1784 						   false, false, true, NULL);
1785 }
1786 
1787 static int
1788 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1789 					struct net_device *ol_dev,
1790 					struct netlink_ext_ack *extack)
1791 {
1792 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1793 	struct mlxsw_sp_ipip_entry *ipip_entry;
1794 	int err;
1795 
1796 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1797 	if (!ipip_entry)
1798 		/* A change might make a tunnel eligible for offloading, but
1799 		 * that is currently not implemented. What falls to slow path
1800 		 * stays there.
1801 		 */
1802 		return 0;
1803 
1804 	/* A change might make a tunnel not eligible for offloading. */
1805 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1806 						 ipip_entry->ipipt)) {
1807 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1808 		return 0;
1809 	}
1810 
1811 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1812 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1813 	return err;
1814 }
1815 
1816 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1817 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1818 {
1819 	struct net_device *ol_dev = ipip_entry->ol_dev;
1820 
1821 	if (ol_dev->flags & IFF_UP)
1822 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1823 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1824 }
1825 
1826 /* The configuration where several tunnels have the same local address in the
1827  * same underlay table needs special treatment in the HW. That is currently not
1828  * implemented in the driver. This function finds and demotes the first tunnel
1829  * with a given source address, except the one passed in the argument
1830  * `except'.
1831  */
1832 bool
1833 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1834 				     enum mlxsw_sp_l3proto ul_proto,
1835 				     union mlxsw_sp_l3addr saddr,
1836 				     u32 ul_tb_id,
1837 				     const struct mlxsw_sp_ipip_entry *except)
1838 {
1839 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1840 
1841 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1842 				 ipip_list_node) {
1843 		if (ipip_entry != except &&
1844 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1845 						      ul_tb_id, ipip_entry)) {
1846 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1847 			return true;
1848 		}
1849 	}
1850 
1851 	return false;
1852 }
1853 
1854 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1855 						     struct net_device *ul_dev)
1856 {
1857 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1858 
1859 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1860 				 ipip_list_node) {
1861 		struct net_device *ol_dev = ipip_entry->ol_dev;
1862 		struct net_device *ipip_ul_dev;
1863 
1864 		rcu_read_lock();
1865 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1866 		rcu_read_unlock();
1867 		if (ipip_ul_dev == ul_dev)
1868 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1869 	}
1870 }
1871 
1872 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1873 					    struct net_device *ol_dev,
1874 					    unsigned long event,
1875 					    struct netdev_notifier_info *info)
1876 {
1877 	struct netdev_notifier_changeupper_info *chup;
1878 	struct netlink_ext_ack *extack;
1879 	int err = 0;
1880 
1881 	switch (event) {
1882 	case NETDEV_REGISTER:
1883 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1884 		break;
1885 	case NETDEV_UNREGISTER:
1886 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1887 		break;
1888 	case NETDEV_UP:
1889 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1890 		break;
1891 	case NETDEV_DOWN:
1892 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1893 		break;
1894 	case NETDEV_CHANGEUPPER:
1895 		chup = container_of(info, typeof(*chup), info);
1896 		extack = info->extack;
1897 		if (netif_is_l3_master(chup->upper_dev))
1898 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1899 								   ol_dev,
1900 								   extack);
1901 		break;
1902 	case NETDEV_CHANGE:
1903 		extack = info->extack;
1904 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1905 							      ol_dev, extack);
1906 		break;
1907 	case NETDEV_CHANGEMTU:
1908 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1909 		break;
1910 	}
1911 	return err;
1912 }
1913 
1914 static int
1915 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1916 				   struct mlxsw_sp_ipip_entry *ipip_entry,
1917 				   struct net_device *ul_dev,
1918 				   bool *demote_this,
1919 				   unsigned long event,
1920 				   struct netdev_notifier_info *info)
1921 {
1922 	struct netdev_notifier_changeupper_info *chup;
1923 	struct netlink_ext_ack *extack;
1924 
1925 	switch (event) {
1926 	case NETDEV_CHANGEUPPER:
1927 		chup = container_of(info, typeof(*chup), info);
1928 		extack = info->extack;
1929 		if (netif_is_l3_master(chup->upper_dev))
1930 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1931 								    ipip_entry,
1932 								    ul_dev,
1933 								    demote_this,
1934 								    extack);
1935 		break;
1936 
1937 	case NETDEV_UP:
1938 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1939 							   ul_dev);
1940 	case NETDEV_DOWN:
1941 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1942 							     ipip_entry,
1943 							     ul_dev);
1944 	}
1945 	return 0;
1946 }
1947 
1948 static int
1949 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1950 				 struct net_device *ul_dev,
1951 				 unsigned long event,
1952 				 struct netdev_notifier_info *info)
1953 {
1954 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1955 	int err;
1956 
1957 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1958 								ul_dev,
1959 								ipip_entry))) {
1960 		struct mlxsw_sp_ipip_entry *prev;
1961 		bool demote_this = false;
1962 
1963 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1964 							 ul_dev, &demote_this,
1965 							 event, info);
1966 		if (err) {
1967 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1968 								 ul_dev);
1969 			return err;
1970 		}
1971 
1972 		if (demote_this) {
1973 			if (list_is_first(&ipip_entry->ipip_list_node,
1974 					  &mlxsw_sp->router->ipip_list))
1975 				prev = NULL;
1976 			else
1977 				/* This can't be cached from previous iteration,
1978 				 * because that entry could be gone now.
1979 				 */
1980 				prev = list_prev_entry(ipip_entry,
1981 						       ipip_list_node);
1982 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1983 			ipip_entry = prev;
1984 		}
1985 	}
1986 
1987 	return 0;
1988 }
1989 
1990 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1991 				      enum mlxsw_sp_l3proto ul_proto,
1992 				      const union mlxsw_sp_l3addr *ul_sip,
1993 				      u32 tunnel_index)
1994 {
1995 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1996 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1997 	struct mlxsw_sp_fib_entry *fib_entry;
1998 	int err = 0;
1999 
2000 	mutex_lock(&mlxsw_sp->router->lock);
2001 
2002 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2003 		err = -EINVAL;
2004 		goto out;
2005 	}
2006 
2007 	router->nve_decap_config.ul_tb_id = ul_tb_id;
2008 	router->nve_decap_config.tunnel_index = tunnel_index;
2009 	router->nve_decap_config.ul_proto = ul_proto;
2010 	router->nve_decap_config.ul_sip = *ul_sip;
2011 	router->nve_decap_config.valid = true;
2012 
2013 	/* It is valid to create a tunnel with a local IP and only later
2014 	 * assign this IP address to a local interface
2015 	 */
2016 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2017 							 ul_proto, ul_sip,
2018 							 type);
2019 	if (!fib_entry)
2020 		goto out;
2021 
2022 	fib_entry->decap.tunnel_index = tunnel_index;
2023 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2024 
2025 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2026 	if (err)
2027 		goto err_fib_entry_update;
2028 
2029 	goto out;
2030 
2031 err_fib_entry_update:
2032 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2033 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2034 out:
2035 	mutex_unlock(&mlxsw_sp->router->lock);
2036 	return err;
2037 }
2038 
2039 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2040 				      enum mlxsw_sp_l3proto ul_proto,
2041 				      const union mlxsw_sp_l3addr *ul_sip)
2042 {
2043 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2044 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2045 	struct mlxsw_sp_fib_entry *fib_entry;
2046 
2047 	mutex_lock(&mlxsw_sp->router->lock);
2048 
2049 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2050 		goto out;
2051 
2052 	router->nve_decap_config.valid = false;
2053 
2054 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2055 							 ul_proto, ul_sip,
2056 							 type);
2057 	if (!fib_entry)
2058 		goto out;
2059 
2060 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2061 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2062 out:
2063 	mutex_unlock(&mlxsw_sp->router->lock);
2064 }
2065 
2066 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2067 					 u32 ul_tb_id,
2068 					 enum mlxsw_sp_l3proto ul_proto,
2069 					 const union mlxsw_sp_l3addr *ul_sip)
2070 {
2071 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2072 
2073 	return router->nve_decap_config.valid &&
2074 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2075 	       router->nve_decap_config.ul_proto == ul_proto &&
2076 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2077 		       sizeof(*ul_sip));
2078 }
2079 
2080 struct mlxsw_sp_neigh_key {
2081 	struct neighbour *n;
2082 };
2083 
2084 struct mlxsw_sp_neigh_entry {
2085 	struct list_head rif_list_node;
2086 	struct rhash_head ht_node;
2087 	struct mlxsw_sp_neigh_key key;
2088 	u16 rif;
2089 	bool connected;
2090 	unsigned char ha[ETH_ALEN];
2091 	struct list_head nexthop_list; /* list of nexthops using
2092 					* this neigh entry
2093 					*/
2094 	struct list_head nexthop_neighs_list_node;
2095 	unsigned int counter_index;
2096 	bool counter_valid;
2097 };
2098 
2099 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2100 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2101 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2102 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2103 };
2104 
2105 struct mlxsw_sp_neigh_entry *
2106 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2107 			struct mlxsw_sp_neigh_entry *neigh_entry)
2108 {
2109 	if (!neigh_entry) {
2110 		if (list_empty(&rif->neigh_list))
2111 			return NULL;
2112 		else
2113 			return list_first_entry(&rif->neigh_list,
2114 						typeof(*neigh_entry),
2115 						rif_list_node);
2116 	}
2117 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2118 		return NULL;
2119 	return list_next_entry(neigh_entry, rif_list_node);
2120 }
2121 
2122 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2123 {
2124 	return neigh_entry->key.n->tbl->family;
2125 }
2126 
2127 unsigned char *
2128 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2129 {
2130 	return neigh_entry->ha;
2131 }
2132 
2133 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2134 {
2135 	struct neighbour *n;
2136 
2137 	n = neigh_entry->key.n;
2138 	return ntohl(*((__be32 *) n->primary_key));
2139 }
2140 
2141 struct in6_addr *
2142 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2143 {
2144 	struct neighbour *n;
2145 
2146 	n = neigh_entry->key.n;
2147 	return (struct in6_addr *) &n->primary_key;
2148 }
2149 
2150 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2151 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2152 			       u64 *p_counter)
2153 {
2154 	if (!neigh_entry->counter_valid)
2155 		return -EINVAL;
2156 
2157 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2158 					 p_counter, NULL);
2159 }
2160 
2161 static struct mlxsw_sp_neigh_entry *
2162 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2163 			   u16 rif)
2164 {
2165 	struct mlxsw_sp_neigh_entry *neigh_entry;
2166 
2167 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2168 	if (!neigh_entry)
2169 		return NULL;
2170 
2171 	neigh_entry->key.n = n;
2172 	neigh_entry->rif = rif;
2173 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2174 
2175 	return neigh_entry;
2176 }
2177 
2178 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2179 {
2180 	kfree(neigh_entry);
2181 }
2182 
2183 static int
2184 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2185 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2186 {
2187 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2188 				      &neigh_entry->ht_node,
2189 				      mlxsw_sp_neigh_ht_params);
2190 }
2191 
2192 static void
2193 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2194 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2195 {
2196 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2197 			       &neigh_entry->ht_node,
2198 			       mlxsw_sp_neigh_ht_params);
2199 }
2200 
2201 static bool
2202 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2203 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2204 {
2205 	struct devlink *devlink;
2206 	const char *table_name;
2207 
2208 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2209 	case AF_INET:
2210 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2211 		break;
2212 	case AF_INET6:
2213 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2214 		break;
2215 	default:
2216 		WARN_ON(1);
2217 		return false;
2218 	}
2219 
2220 	devlink = priv_to_devlink(mlxsw_sp->core);
2221 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2222 }
2223 
2224 static void
2225 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2226 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2227 {
2228 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2229 		return;
2230 
2231 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2232 		return;
2233 
2234 	neigh_entry->counter_valid = true;
2235 }
2236 
2237 static void
2238 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2239 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2240 {
2241 	if (!neigh_entry->counter_valid)
2242 		return;
2243 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2244 				   neigh_entry->counter_index);
2245 	neigh_entry->counter_valid = false;
2246 }
2247 
2248 static struct mlxsw_sp_neigh_entry *
2249 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2250 {
2251 	struct mlxsw_sp_neigh_entry *neigh_entry;
2252 	struct mlxsw_sp_rif *rif;
2253 	int err;
2254 
2255 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2256 	if (!rif)
2257 		return ERR_PTR(-EINVAL);
2258 
2259 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2260 	if (!neigh_entry)
2261 		return ERR_PTR(-ENOMEM);
2262 
2263 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2264 	if (err)
2265 		goto err_neigh_entry_insert;
2266 
2267 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2268 	atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2269 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2270 
2271 	return neigh_entry;
2272 
2273 err_neigh_entry_insert:
2274 	mlxsw_sp_neigh_entry_free(neigh_entry);
2275 	return ERR_PTR(err);
2276 }
2277 
2278 static void
2279 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2280 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2281 {
2282 	list_del(&neigh_entry->rif_list_node);
2283 	atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2284 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2285 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2286 	mlxsw_sp_neigh_entry_free(neigh_entry);
2287 }
2288 
2289 static struct mlxsw_sp_neigh_entry *
2290 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2291 {
2292 	struct mlxsw_sp_neigh_key key;
2293 
2294 	key.n = n;
2295 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2296 				      &key, mlxsw_sp_neigh_ht_params);
2297 }
2298 
2299 static void
2300 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2301 {
2302 	unsigned long interval;
2303 
2304 #if IS_ENABLED(CONFIG_IPV6)
2305 	interval = min_t(unsigned long,
2306 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2307 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2308 #else
2309 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2310 #endif
2311 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2312 }
2313 
2314 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2315 						   char *rauhtd_pl,
2316 						   int ent_index)
2317 {
2318 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2319 	struct net_device *dev;
2320 	struct neighbour *n;
2321 	__be32 dipn;
2322 	u32 dip;
2323 	u16 rif;
2324 
2325 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2326 
2327 	if (WARN_ON_ONCE(rif >= max_rifs))
2328 		return;
2329 	if (!mlxsw_sp->router->rifs[rif]) {
2330 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2331 		return;
2332 	}
2333 
2334 	dipn = htonl(dip);
2335 	dev = mlxsw_sp->router->rifs[rif]->dev;
2336 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2337 	if (!n)
2338 		return;
2339 
2340 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2341 	neigh_event_send(n, NULL);
2342 	neigh_release(n);
2343 }
2344 
2345 #if IS_ENABLED(CONFIG_IPV6)
2346 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2347 						   char *rauhtd_pl,
2348 						   int rec_index)
2349 {
2350 	struct net_device *dev;
2351 	struct neighbour *n;
2352 	struct in6_addr dip;
2353 	u16 rif;
2354 
2355 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2356 					 (char *) &dip);
2357 
2358 	if (!mlxsw_sp->router->rifs[rif]) {
2359 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2360 		return;
2361 	}
2362 
2363 	dev = mlxsw_sp->router->rifs[rif]->dev;
2364 	n = neigh_lookup(&nd_tbl, &dip, dev);
2365 	if (!n)
2366 		return;
2367 
2368 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2369 	neigh_event_send(n, NULL);
2370 	neigh_release(n);
2371 }
2372 #else
2373 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2374 						   char *rauhtd_pl,
2375 						   int rec_index)
2376 {
2377 }
2378 #endif
2379 
2380 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2381 						   char *rauhtd_pl,
2382 						   int rec_index)
2383 {
2384 	u8 num_entries;
2385 	int i;
2386 
2387 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2388 								rec_index);
2389 	/* Hardware starts counting at 0, so add 1. */
2390 	num_entries++;
2391 
2392 	/* Each record consists of several neighbour entries. */
2393 	for (i = 0; i < num_entries; i++) {
2394 		int ent_index;
2395 
2396 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2397 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2398 						       ent_index);
2399 	}
2400 
2401 }
2402 
2403 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2404 						   char *rauhtd_pl,
2405 						   int rec_index)
2406 {
2407 	/* One record contains one entry. */
2408 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2409 					       rec_index);
2410 }
2411 
2412 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2413 					      char *rauhtd_pl, int rec_index)
2414 {
2415 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2416 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2417 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2418 						       rec_index);
2419 		break;
2420 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2421 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2422 						       rec_index);
2423 		break;
2424 	}
2425 }
2426 
2427 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2428 {
2429 	u8 num_rec, last_rec_index, num_entries;
2430 
2431 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2432 	last_rec_index = num_rec - 1;
2433 
2434 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2435 		return false;
2436 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2437 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2438 		return true;
2439 
2440 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2441 								last_rec_index);
2442 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2443 		return true;
2444 	return false;
2445 }
2446 
2447 static int
2448 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2449 				       char *rauhtd_pl,
2450 				       enum mlxsw_reg_rauhtd_type type)
2451 {
2452 	int i, num_rec;
2453 	int err;
2454 
2455 	/* Ensure the RIF we read from the device does not change mid-dump. */
2456 	mutex_lock(&mlxsw_sp->router->lock);
2457 	do {
2458 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2459 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2460 				      rauhtd_pl);
2461 		if (err) {
2462 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2463 			break;
2464 		}
2465 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2466 		for (i = 0; i < num_rec; i++)
2467 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2468 							  i);
2469 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2470 	mutex_unlock(&mlxsw_sp->router->lock);
2471 
2472 	return err;
2473 }
2474 
2475 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2476 {
2477 	enum mlxsw_reg_rauhtd_type type;
2478 	char *rauhtd_pl;
2479 	int err;
2480 
2481 	if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2482 		return 0;
2483 
2484 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2485 	if (!rauhtd_pl)
2486 		return -ENOMEM;
2487 
2488 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2489 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2490 	if (err)
2491 		goto out;
2492 
2493 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2494 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2495 out:
2496 	kfree(rauhtd_pl);
2497 	return err;
2498 }
2499 
2500 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2501 {
2502 	struct mlxsw_sp_neigh_entry *neigh_entry;
2503 
2504 	mutex_lock(&mlxsw_sp->router->lock);
2505 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2506 			    nexthop_neighs_list_node)
2507 		/* If this neigh have nexthops, make the kernel think this neigh
2508 		 * is active regardless of the traffic.
2509 		 */
2510 		neigh_event_send(neigh_entry->key.n, NULL);
2511 	mutex_unlock(&mlxsw_sp->router->lock);
2512 }
2513 
2514 static void
2515 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2516 {
2517 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2518 
2519 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2520 			       msecs_to_jiffies(interval));
2521 }
2522 
2523 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2524 {
2525 	struct mlxsw_sp_router *router;
2526 	int err;
2527 
2528 	router = container_of(work, struct mlxsw_sp_router,
2529 			      neighs_update.dw.work);
2530 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2531 	if (err)
2532 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2533 
2534 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2535 
2536 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2537 }
2538 
2539 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2540 {
2541 	struct mlxsw_sp_neigh_entry *neigh_entry;
2542 	struct mlxsw_sp_router *router;
2543 
2544 	router = container_of(work, struct mlxsw_sp_router,
2545 			      nexthop_probe_dw.work);
2546 	/* Iterate over nexthop neighbours, find those who are unresolved and
2547 	 * send arp on them. This solves the chicken-egg problem when
2548 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2549 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2550 	 * using different nexthop.
2551 	 */
2552 	mutex_lock(&router->lock);
2553 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2554 			    nexthop_neighs_list_node)
2555 		if (!neigh_entry->connected)
2556 			neigh_event_send(neigh_entry->key.n, NULL);
2557 	mutex_unlock(&router->lock);
2558 
2559 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2560 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2561 }
2562 
2563 static void
2564 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2565 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2566 			      bool removing, bool dead);
2567 
2568 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2569 {
2570 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2571 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2572 }
2573 
2574 static int
2575 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2576 				struct mlxsw_sp_neigh_entry *neigh_entry,
2577 				enum mlxsw_reg_rauht_op op)
2578 {
2579 	struct neighbour *n = neigh_entry->key.n;
2580 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2581 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2582 
2583 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2584 			      dip);
2585 	if (neigh_entry->counter_valid)
2586 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2587 					     neigh_entry->counter_index);
2588 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2589 }
2590 
2591 static int
2592 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2593 				struct mlxsw_sp_neigh_entry *neigh_entry,
2594 				enum mlxsw_reg_rauht_op op)
2595 {
2596 	struct neighbour *n = neigh_entry->key.n;
2597 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2598 	const char *dip = n->primary_key;
2599 
2600 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2601 			      dip);
2602 	if (neigh_entry->counter_valid)
2603 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2604 					     neigh_entry->counter_index);
2605 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2606 }
2607 
2608 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2609 {
2610 	struct neighbour *n = neigh_entry->key.n;
2611 
2612 	/* Packets with a link-local destination address are trapped
2613 	 * after LPM lookup and never reach the neighbour table, so
2614 	 * there is no need to program such neighbours to the device.
2615 	 */
2616 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2617 	    IPV6_ADDR_LINKLOCAL)
2618 		return true;
2619 	return false;
2620 }
2621 
2622 static void
2623 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2624 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2625 			    bool adding)
2626 {
2627 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2628 	int err;
2629 
2630 	if (!adding && !neigh_entry->connected)
2631 		return;
2632 	neigh_entry->connected = adding;
2633 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2634 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2635 						      op);
2636 		if (err)
2637 			return;
2638 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2639 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2640 			return;
2641 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2642 						      op);
2643 		if (err)
2644 			return;
2645 	} else {
2646 		WARN_ON_ONCE(1);
2647 		return;
2648 	}
2649 
2650 	if (adding)
2651 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2652 	else
2653 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2654 }
2655 
2656 void
2657 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2658 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2659 				    bool adding)
2660 {
2661 	if (adding)
2662 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2663 	else
2664 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2665 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2666 }
2667 
2668 struct mlxsw_sp_netevent_work {
2669 	struct work_struct work;
2670 	struct mlxsw_sp *mlxsw_sp;
2671 	struct neighbour *n;
2672 };
2673 
2674 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2675 {
2676 	struct mlxsw_sp_netevent_work *net_work =
2677 		container_of(work, struct mlxsw_sp_netevent_work, work);
2678 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2679 	struct mlxsw_sp_neigh_entry *neigh_entry;
2680 	struct neighbour *n = net_work->n;
2681 	unsigned char ha[ETH_ALEN];
2682 	bool entry_connected;
2683 	u8 nud_state, dead;
2684 
2685 	/* If these parameters are changed after we release the lock,
2686 	 * then we are guaranteed to receive another event letting us
2687 	 * know about it.
2688 	 */
2689 	read_lock_bh(&n->lock);
2690 	memcpy(ha, n->ha, ETH_ALEN);
2691 	nud_state = n->nud_state;
2692 	dead = n->dead;
2693 	read_unlock_bh(&n->lock);
2694 
2695 	mutex_lock(&mlxsw_sp->router->lock);
2696 	mlxsw_sp_span_respin(mlxsw_sp);
2697 
2698 	entry_connected = nud_state & NUD_VALID && !dead;
2699 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2700 	if (!entry_connected && !neigh_entry)
2701 		goto out;
2702 	if (!neigh_entry) {
2703 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2704 		if (IS_ERR(neigh_entry))
2705 			goto out;
2706 	}
2707 
2708 	if (neigh_entry->connected && entry_connected &&
2709 	    !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2710 		goto out;
2711 
2712 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2713 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2714 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2715 				      dead);
2716 
2717 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2718 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2719 
2720 out:
2721 	mutex_unlock(&mlxsw_sp->router->lock);
2722 	neigh_release(n);
2723 	kfree(net_work);
2724 }
2725 
2726 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2727 
2728 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2729 {
2730 	struct mlxsw_sp_netevent_work *net_work =
2731 		container_of(work, struct mlxsw_sp_netevent_work, work);
2732 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2733 
2734 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2735 	kfree(net_work);
2736 }
2737 
2738 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2739 
2740 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2741 {
2742 	struct mlxsw_sp_netevent_work *net_work =
2743 		container_of(work, struct mlxsw_sp_netevent_work, work);
2744 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2745 
2746 	__mlxsw_sp_router_init(mlxsw_sp);
2747 	kfree(net_work);
2748 }
2749 
2750 static int mlxsw_sp_router_schedule_work(struct net *net,
2751 					 struct notifier_block *nb,
2752 					 void (*cb)(struct work_struct *))
2753 {
2754 	struct mlxsw_sp_netevent_work *net_work;
2755 	struct mlxsw_sp_router *router;
2756 
2757 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2758 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2759 		return NOTIFY_DONE;
2760 
2761 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2762 	if (!net_work)
2763 		return NOTIFY_BAD;
2764 
2765 	INIT_WORK(&net_work->work, cb);
2766 	net_work->mlxsw_sp = router->mlxsw_sp;
2767 	mlxsw_core_schedule_work(&net_work->work);
2768 	return NOTIFY_DONE;
2769 }
2770 
2771 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2772 					  unsigned long event, void *ptr)
2773 {
2774 	struct mlxsw_sp_netevent_work *net_work;
2775 	struct mlxsw_sp_port *mlxsw_sp_port;
2776 	struct mlxsw_sp *mlxsw_sp;
2777 	unsigned long interval;
2778 	struct neigh_parms *p;
2779 	struct neighbour *n;
2780 
2781 	switch (event) {
2782 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2783 		p = ptr;
2784 
2785 		/* We don't care about changes in the default table. */
2786 		if (!p->dev || (p->tbl->family != AF_INET &&
2787 				p->tbl->family != AF_INET6))
2788 			return NOTIFY_DONE;
2789 
2790 		/* We are in atomic context and can't take RTNL mutex,
2791 		 * so use RCU variant to walk the device chain.
2792 		 */
2793 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2794 		if (!mlxsw_sp_port)
2795 			return NOTIFY_DONE;
2796 
2797 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2798 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2799 		mlxsw_sp->router->neighs_update.interval = interval;
2800 
2801 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2802 		break;
2803 	case NETEVENT_NEIGH_UPDATE:
2804 		n = ptr;
2805 
2806 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2807 			return NOTIFY_DONE;
2808 
2809 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2810 		if (!mlxsw_sp_port)
2811 			return NOTIFY_DONE;
2812 
2813 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2814 		if (!net_work) {
2815 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2816 			return NOTIFY_BAD;
2817 		}
2818 
2819 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2820 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2821 		net_work->n = n;
2822 
2823 		/* Take a reference to ensure the neighbour won't be
2824 		 * destructed until we drop the reference in delayed
2825 		 * work.
2826 		 */
2827 		neigh_clone(n);
2828 		mlxsw_core_schedule_work(&net_work->work);
2829 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2830 		break;
2831 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2832 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2833 		return mlxsw_sp_router_schedule_work(ptr, nb,
2834 				mlxsw_sp_router_mp_hash_event_work);
2835 
2836 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2837 		return mlxsw_sp_router_schedule_work(ptr, nb,
2838 				mlxsw_sp_router_update_priority_work);
2839 	}
2840 
2841 	return NOTIFY_DONE;
2842 }
2843 
2844 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2845 {
2846 	int err;
2847 
2848 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2849 			      &mlxsw_sp_neigh_ht_params);
2850 	if (err)
2851 		return err;
2852 
2853 	/* Initialize the polling interval according to the default
2854 	 * table.
2855 	 */
2856 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2857 
2858 	/* Create the delayed works for the activity_update */
2859 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2860 			  mlxsw_sp_router_neighs_update_work);
2861 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2862 			  mlxsw_sp_router_probe_unresolved_nexthops);
2863 	atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2864 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2865 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2866 	return 0;
2867 }
2868 
2869 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2870 {
2871 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2872 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2873 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2874 }
2875 
2876 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2877 					 struct mlxsw_sp_rif *rif)
2878 {
2879 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2880 
2881 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2882 				 rif_list_node) {
2883 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2884 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2885 	}
2886 }
2887 
2888 enum mlxsw_sp_nexthop_type {
2889 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2890 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2891 };
2892 
2893 enum mlxsw_sp_nexthop_action {
2894 	/* Nexthop forwards packets to an egress RIF */
2895 	MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2896 	/* Nexthop discards packets */
2897 	MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2898 	/* Nexthop traps packets */
2899 	MLXSW_SP_NEXTHOP_ACTION_TRAP,
2900 };
2901 
2902 struct mlxsw_sp_nexthop_key {
2903 	struct fib_nh *fib_nh;
2904 };
2905 
2906 struct mlxsw_sp_nexthop {
2907 	struct list_head neigh_list_node; /* member of neigh entry list */
2908 	struct list_head rif_list_node;
2909 	struct list_head router_list_node;
2910 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2911 						   * this nexthop belongs to
2912 						   */
2913 	struct rhash_head ht_node;
2914 	struct neigh_table *neigh_tbl;
2915 	struct mlxsw_sp_nexthop_key key;
2916 	unsigned char gw_addr[sizeof(struct in6_addr)];
2917 	int ifindex;
2918 	int nh_weight;
2919 	int norm_nh_weight;
2920 	int num_adj_entries;
2921 	struct mlxsw_sp_rif *rif;
2922 	u8 should_offload:1, /* set indicates this nexthop should be written
2923 			      * to the adjacency table.
2924 			      */
2925 	   offloaded:1, /* set indicates this nexthop was written to the
2926 			 * adjacency table.
2927 			 */
2928 	   update:1; /* set indicates this nexthop should be updated in the
2929 		      * adjacency table (f.e., its MAC changed).
2930 		      */
2931 	enum mlxsw_sp_nexthop_action action;
2932 	enum mlxsw_sp_nexthop_type type;
2933 	union {
2934 		struct mlxsw_sp_neigh_entry *neigh_entry;
2935 		struct mlxsw_sp_ipip_entry *ipip_entry;
2936 	};
2937 	unsigned int counter_index;
2938 	bool counter_valid;
2939 };
2940 
2941 enum mlxsw_sp_nexthop_group_type {
2942 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2943 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2944 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
2945 };
2946 
2947 struct mlxsw_sp_nexthop_group_info {
2948 	struct mlxsw_sp_nexthop_group *nh_grp;
2949 	u32 adj_index;
2950 	u16 ecmp_size;
2951 	u16 count;
2952 	int sum_norm_weight;
2953 	u8 adj_index_valid:1,
2954 	   gateway:1, /* routes using the group use a gateway */
2955 	   is_resilient:1;
2956 	struct list_head list; /* member in nh_res_grp_list */
2957 	struct mlxsw_sp_nexthop nexthops[];
2958 #define nh_rif	nexthops[0].rif
2959 };
2960 
2961 struct mlxsw_sp_nexthop_group_vr_key {
2962 	u16 vr_id;
2963 	enum mlxsw_sp_l3proto proto;
2964 };
2965 
2966 struct mlxsw_sp_nexthop_group_vr_entry {
2967 	struct list_head list; /* member in vr_list */
2968 	struct rhash_head ht_node; /* member in vr_ht */
2969 	refcount_t ref_count;
2970 	struct mlxsw_sp_nexthop_group_vr_key key;
2971 };
2972 
2973 struct mlxsw_sp_nexthop_group {
2974 	struct rhash_head ht_node;
2975 	struct list_head fib_list; /* list of fib entries that use this group */
2976 	union {
2977 		struct {
2978 			struct fib_info *fi;
2979 		} ipv4;
2980 		struct {
2981 			u32 id;
2982 		} obj;
2983 	};
2984 	struct mlxsw_sp_nexthop_group_info *nhgi;
2985 	struct list_head vr_list;
2986 	struct rhashtable vr_ht;
2987 	enum mlxsw_sp_nexthop_group_type type;
2988 	bool can_destroy;
2989 };
2990 
2991 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2992 				    struct mlxsw_sp_nexthop *nh)
2993 {
2994 	struct devlink *devlink;
2995 
2996 	devlink = priv_to_devlink(mlxsw_sp->core);
2997 	if (!devlink_dpipe_table_counter_enabled(devlink,
2998 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2999 		return;
3000 
3001 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3002 		return;
3003 
3004 	nh->counter_valid = true;
3005 }
3006 
3007 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3008 				   struct mlxsw_sp_nexthop *nh)
3009 {
3010 	if (!nh->counter_valid)
3011 		return;
3012 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3013 	nh->counter_valid = false;
3014 }
3015 
3016 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3017 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3018 {
3019 	if (!nh->counter_valid)
3020 		return -EINVAL;
3021 
3022 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3023 					 p_counter, NULL);
3024 }
3025 
3026 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3027 					       struct mlxsw_sp_nexthop *nh)
3028 {
3029 	if (!nh) {
3030 		if (list_empty(&router->nexthop_list))
3031 			return NULL;
3032 		else
3033 			return list_first_entry(&router->nexthop_list,
3034 						typeof(*nh), router_list_node);
3035 	}
3036 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3037 		return NULL;
3038 	return list_next_entry(nh, router_list_node);
3039 }
3040 
3041 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3042 {
3043 	return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3044 }
3045 
3046 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3047 {
3048 	if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3049 	    !mlxsw_sp_nexthop_is_forward(nh))
3050 		return NULL;
3051 	return nh->neigh_entry->ha;
3052 }
3053 
3054 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3055 			     u32 *p_adj_size, u32 *p_adj_hash_index)
3056 {
3057 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3058 	u32 adj_hash_index = 0;
3059 	int i;
3060 
3061 	if (!nh->offloaded || !nhgi->adj_index_valid)
3062 		return -EINVAL;
3063 
3064 	*p_adj_index = nhgi->adj_index;
3065 	*p_adj_size = nhgi->ecmp_size;
3066 
3067 	for (i = 0; i < nhgi->count; i++) {
3068 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3069 
3070 		if (nh_iter == nh)
3071 			break;
3072 		if (nh_iter->offloaded)
3073 			adj_hash_index += nh_iter->num_adj_entries;
3074 	}
3075 
3076 	*p_adj_hash_index = adj_hash_index;
3077 	return 0;
3078 }
3079 
3080 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3081 {
3082 	return nh->rif;
3083 }
3084 
3085 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3086 {
3087 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3088 	int i;
3089 
3090 	for (i = 0; i < nhgi->count; i++) {
3091 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3092 
3093 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3094 			return true;
3095 	}
3096 	return false;
3097 }
3098 
3099 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3100 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3101 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3102 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3103 	.automatic_shrinking = true,
3104 };
3105 
3106 static struct mlxsw_sp_nexthop_group_vr_entry *
3107 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3108 				       const struct mlxsw_sp_fib *fib)
3109 {
3110 	struct mlxsw_sp_nexthop_group_vr_key key;
3111 
3112 	memset(&key, 0, sizeof(key));
3113 	key.vr_id = fib->vr->id;
3114 	key.proto = fib->proto;
3115 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3116 				      mlxsw_sp_nexthop_group_vr_ht_params);
3117 }
3118 
3119 static int
3120 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3121 				       const struct mlxsw_sp_fib *fib)
3122 {
3123 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3124 	int err;
3125 
3126 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3127 	if (!vr_entry)
3128 		return -ENOMEM;
3129 
3130 	vr_entry->key.vr_id = fib->vr->id;
3131 	vr_entry->key.proto = fib->proto;
3132 	refcount_set(&vr_entry->ref_count, 1);
3133 
3134 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3135 				     mlxsw_sp_nexthop_group_vr_ht_params);
3136 	if (err)
3137 		goto err_hashtable_insert;
3138 
3139 	list_add(&vr_entry->list, &nh_grp->vr_list);
3140 
3141 	return 0;
3142 
3143 err_hashtable_insert:
3144 	kfree(vr_entry);
3145 	return err;
3146 }
3147 
3148 static void
3149 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3150 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3151 {
3152 	list_del(&vr_entry->list);
3153 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3154 			       mlxsw_sp_nexthop_group_vr_ht_params);
3155 	kfree(vr_entry);
3156 }
3157 
3158 static int
3159 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3160 			       const struct mlxsw_sp_fib *fib)
3161 {
3162 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3163 
3164 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3165 	if (vr_entry) {
3166 		refcount_inc(&vr_entry->ref_count);
3167 		return 0;
3168 	}
3169 
3170 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3171 }
3172 
3173 static void
3174 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3175 				 const struct mlxsw_sp_fib *fib)
3176 {
3177 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3178 
3179 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3180 	if (WARN_ON_ONCE(!vr_entry))
3181 		return;
3182 
3183 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3184 		return;
3185 
3186 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3187 }
3188 
3189 struct mlxsw_sp_nexthop_group_cmp_arg {
3190 	enum mlxsw_sp_nexthop_group_type type;
3191 	union {
3192 		struct fib_info *fi;
3193 		struct mlxsw_sp_fib6_entry *fib6_entry;
3194 		u32 id;
3195 	};
3196 };
3197 
3198 static bool
3199 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3200 				    const struct in6_addr *gw, int ifindex,
3201 				    int weight)
3202 {
3203 	int i;
3204 
3205 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3206 		const struct mlxsw_sp_nexthop *nh;
3207 
3208 		nh = &nh_grp->nhgi->nexthops[i];
3209 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3210 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3211 			return true;
3212 	}
3213 
3214 	return false;
3215 }
3216 
3217 static bool
3218 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3219 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3220 {
3221 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3222 
3223 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3224 		return false;
3225 
3226 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3227 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3228 		struct in6_addr *gw;
3229 		int ifindex, weight;
3230 
3231 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3232 		weight = fib6_nh->fib_nh_weight;
3233 		gw = &fib6_nh->fib_nh_gw6;
3234 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3235 							 weight))
3236 			return false;
3237 	}
3238 
3239 	return true;
3240 }
3241 
3242 static int
3243 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3244 {
3245 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3246 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3247 
3248 	if (nh_grp->type != cmp_arg->type)
3249 		return 1;
3250 
3251 	switch (cmp_arg->type) {
3252 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3253 		return cmp_arg->fi != nh_grp->ipv4.fi;
3254 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3255 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3256 						    cmp_arg->fib6_entry);
3257 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3258 		return cmp_arg->id != nh_grp->obj.id;
3259 	default:
3260 		WARN_ON(1);
3261 		return 1;
3262 	}
3263 }
3264 
3265 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3266 {
3267 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3268 	const struct mlxsw_sp_nexthop *nh;
3269 	struct fib_info *fi;
3270 	unsigned int val;
3271 	int i;
3272 
3273 	switch (nh_grp->type) {
3274 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3275 		fi = nh_grp->ipv4.fi;
3276 		return jhash(&fi, sizeof(fi), seed);
3277 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3278 		val = nh_grp->nhgi->count;
3279 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3280 			nh = &nh_grp->nhgi->nexthops[i];
3281 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3282 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3283 		}
3284 		return jhash(&val, sizeof(val), seed);
3285 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3286 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3287 	default:
3288 		WARN_ON(1);
3289 		return 0;
3290 	}
3291 }
3292 
3293 static u32
3294 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3295 {
3296 	unsigned int val = fib6_entry->nrt6;
3297 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3298 
3299 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3300 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3301 		struct net_device *dev = fib6_nh->fib_nh_dev;
3302 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3303 
3304 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3305 		val ^= jhash(gw, sizeof(*gw), seed);
3306 	}
3307 
3308 	return jhash(&val, sizeof(val), seed);
3309 }
3310 
3311 static u32
3312 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3313 {
3314 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3315 
3316 	switch (cmp_arg->type) {
3317 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3318 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3319 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3320 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3321 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3322 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3323 	default:
3324 		WARN_ON(1);
3325 		return 0;
3326 	}
3327 }
3328 
3329 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3330 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3331 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3332 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3333 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3334 };
3335 
3336 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3337 					 struct mlxsw_sp_nexthop_group *nh_grp)
3338 {
3339 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3340 	    !nh_grp->nhgi->gateway)
3341 		return 0;
3342 
3343 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3344 				      &nh_grp->ht_node,
3345 				      mlxsw_sp_nexthop_group_ht_params);
3346 }
3347 
3348 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3349 					  struct mlxsw_sp_nexthop_group *nh_grp)
3350 {
3351 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3352 	    !nh_grp->nhgi->gateway)
3353 		return;
3354 
3355 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3356 			       &nh_grp->ht_node,
3357 			       mlxsw_sp_nexthop_group_ht_params);
3358 }
3359 
3360 static struct mlxsw_sp_nexthop_group *
3361 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3362 			       struct fib_info *fi)
3363 {
3364 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3365 
3366 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3367 	cmp_arg.fi = fi;
3368 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3369 				      &cmp_arg,
3370 				      mlxsw_sp_nexthop_group_ht_params);
3371 }
3372 
3373 static struct mlxsw_sp_nexthop_group *
3374 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3375 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3376 {
3377 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3378 
3379 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3380 	cmp_arg.fib6_entry = fib6_entry;
3381 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3382 				      &cmp_arg,
3383 				      mlxsw_sp_nexthop_group_ht_params);
3384 }
3385 
3386 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3387 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3388 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3389 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3390 };
3391 
3392 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3393 				   struct mlxsw_sp_nexthop *nh)
3394 {
3395 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3396 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3397 }
3398 
3399 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3400 				    struct mlxsw_sp_nexthop *nh)
3401 {
3402 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3403 			       mlxsw_sp_nexthop_ht_params);
3404 }
3405 
3406 static struct mlxsw_sp_nexthop *
3407 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3408 			struct mlxsw_sp_nexthop_key key)
3409 {
3410 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3411 				      mlxsw_sp_nexthop_ht_params);
3412 }
3413 
3414 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3415 					     enum mlxsw_sp_l3proto proto,
3416 					     u16 vr_id,
3417 					     u32 adj_index, u16 ecmp_size,
3418 					     u32 new_adj_index,
3419 					     u16 new_ecmp_size)
3420 {
3421 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3422 
3423 	mlxsw_reg_raleu_pack(raleu_pl,
3424 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3425 			     adj_index, ecmp_size, new_adj_index,
3426 			     new_ecmp_size);
3427 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3428 }
3429 
3430 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3431 					  struct mlxsw_sp_nexthop_group *nh_grp,
3432 					  u32 old_adj_index, u16 old_ecmp_size)
3433 {
3434 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3435 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3436 	int err;
3437 
3438 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3439 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3440 							vr_entry->key.proto,
3441 							vr_entry->key.vr_id,
3442 							old_adj_index,
3443 							old_ecmp_size,
3444 							nhgi->adj_index,
3445 							nhgi->ecmp_size);
3446 		if (err)
3447 			goto err_mass_update_vr;
3448 	}
3449 	return 0;
3450 
3451 err_mass_update_vr:
3452 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3453 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3454 						  vr_entry->key.vr_id,
3455 						  nhgi->adj_index,
3456 						  nhgi->ecmp_size,
3457 						  old_adj_index, old_ecmp_size);
3458 	return err;
3459 }
3460 
3461 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3462 					 u32 adj_index,
3463 					 struct mlxsw_sp_nexthop *nh,
3464 					 bool force, char *ratr_pl)
3465 {
3466 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3467 	enum mlxsw_reg_ratr_op op;
3468 	u16 rif_index;
3469 
3470 	rif_index = nh->rif ? nh->rif->rif_index :
3471 			      mlxsw_sp->router->lb_rif_index;
3472 	op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3473 		     MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3474 	mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3475 			    adj_index, rif_index);
3476 	switch (nh->action) {
3477 	case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3478 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3479 		break;
3480 	case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3481 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3482 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3483 		break;
3484 	case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3485 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3486 					       MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3487 		mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3488 		break;
3489 	default:
3490 		WARN_ON_ONCE(1);
3491 		return -EINVAL;
3492 	}
3493 	if (nh->counter_valid)
3494 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3495 	else
3496 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3497 
3498 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3499 }
3500 
3501 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3502 				struct mlxsw_sp_nexthop *nh, bool force,
3503 				char *ratr_pl)
3504 {
3505 	int i;
3506 
3507 	for (i = 0; i < nh->num_adj_entries; i++) {
3508 		int err;
3509 
3510 		err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3511 						    nh, force, ratr_pl);
3512 		if (err)
3513 			return err;
3514 	}
3515 
3516 	return 0;
3517 }
3518 
3519 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3520 					  u32 adj_index,
3521 					  struct mlxsw_sp_nexthop *nh,
3522 					  bool force, char *ratr_pl)
3523 {
3524 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3525 
3526 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3527 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3528 					force, ratr_pl);
3529 }
3530 
3531 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3532 					u32 adj_index,
3533 					struct mlxsw_sp_nexthop *nh, bool force,
3534 					char *ratr_pl)
3535 {
3536 	int i;
3537 
3538 	for (i = 0; i < nh->num_adj_entries; i++) {
3539 		int err;
3540 
3541 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3542 						     nh, force, ratr_pl);
3543 		if (err)
3544 			return err;
3545 	}
3546 
3547 	return 0;
3548 }
3549 
3550 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3551 				   struct mlxsw_sp_nexthop *nh, bool force,
3552 				   char *ratr_pl)
3553 {
3554 	/* When action is discard or trap, the nexthop must be
3555 	 * programmed as an Ethernet nexthop.
3556 	 */
3557 	if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3558 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3559 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3560 		return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3561 						   force, ratr_pl);
3562 	else
3563 		return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3564 						    force, ratr_pl);
3565 }
3566 
3567 static int
3568 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3569 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3570 			      bool reallocate)
3571 {
3572 	char ratr_pl[MLXSW_REG_RATR_LEN];
3573 	u32 adj_index = nhgi->adj_index; /* base */
3574 	struct mlxsw_sp_nexthop *nh;
3575 	int i;
3576 
3577 	for (i = 0; i < nhgi->count; i++) {
3578 		nh = &nhgi->nexthops[i];
3579 
3580 		if (!nh->should_offload) {
3581 			nh->offloaded = 0;
3582 			continue;
3583 		}
3584 
3585 		if (nh->update || reallocate) {
3586 			int err = 0;
3587 
3588 			err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3589 						      true, ratr_pl);
3590 			if (err)
3591 				return err;
3592 			nh->update = 0;
3593 			nh->offloaded = 1;
3594 		}
3595 		adj_index += nh->num_adj_entries;
3596 	}
3597 	return 0;
3598 }
3599 
3600 static int
3601 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3602 				    struct mlxsw_sp_nexthop_group *nh_grp)
3603 {
3604 	struct mlxsw_sp_fib_entry *fib_entry;
3605 	int err;
3606 
3607 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3608 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3609 		if (err)
3610 			return err;
3611 	}
3612 	return 0;
3613 }
3614 
3615 struct mlxsw_sp_adj_grp_size_range {
3616 	u16 start; /* Inclusive */
3617 	u16 end; /* Inclusive */
3618 };
3619 
3620 /* Ordered by range start value */
3621 static const struct mlxsw_sp_adj_grp_size_range
3622 mlxsw_sp1_adj_grp_size_ranges[] = {
3623 	{ .start = 1, .end = 64 },
3624 	{ .start = 512, .end = 512 },
3625 	{ .start = 1024, .end = 1024 },
3626 	{ .start = 2048, .end = 2048 },
3627 	{ .start = 4096, .end = 4096 },
3628 };
3629 
3630 /* Ordered by range start value */
3631 static const struct mlxsw_sp_adj_grp_size_range
3632 mlxsw_sp2_adj_grp_size_ranges[] = {
3633 	{ .start = 1, .end = 128 },
3634 	{ .start = 256, .end = 256 },
3635 	{ .start = 512, .end = 512 },
3636 	{ .start = 1024, .end = 1024 },
3637 	{ .start = 2048, .end = 2048 },
3638 	{ .start = 4096, .end = 4096 },
3639 };
3640 
3641 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3642 					   u16 *p_adj_grp_size)
3643 {
3644 	int i;
3645 
3646 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3647 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3648 
3649 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3650 
3651 		if (*p_adj_grp_size >= size_range->start &&
3652 		    *p_adj_grp_size <= size_range->end)
3653 			return;
3654 
3655 		if (*p_adj_grp_size <= size_range->end) {
3656 			*p_adj_grp_size = size_range->end;
3657 			return;
3658 		}
3659 	}
3660 }
3661 
3662 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3663 					     u16 *p_adj_grp_size,
3664 					     unsigned int alloc_size)
3665 {
3666 	int i;
3667 
3668 	for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3669 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3670 
3671 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3672 
3673 		if (alloc_size >= size_range->end) {
3674 			*p_adj_grp_size = size_range->end;
3675 			return;
3676 		}
3677 	}
3678 }
3679 
3680 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3681 				     u16 *p_adj_grp_size)
3682 {
3683 	unsigned int alloc_size;
3684 	int err;
3685 
3686 	/* Round up the requested group size to the next size supported
3687 	 * by the device and make sure the request can be satisfied.
3688 	 */
3689 	mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3690 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3691 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3692 					      *p_adj_grp_size, &alloc_size);
3693 	if (err)
3694 		return err;
3695 	/* It is possible the allocation results in more allocated
3696 	 * entries than requested. Try to use as much of them as
3697 	 * possible.
3698 	 */
3699 	mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3700 
3701 	return 0;
3702 }
3703 
3704 static void
3705 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3706 {
3707 	int i, g = 0, sum_norm_weight = 0;
3708 	struct mlxsw_sp_nexthop *nh;
3709 
3710 	for (i = 0; i < nhgi->count; i++) {
3711 		nh = &nhgi->nexthops[i];
3712 
3713 		if (!nh->should_offload)
3714 			continue;
3715 		if (g > 0)
3716 			g = gcd(nh->nh_weight, g);
3717 		else
3718 			g = nh->nh_weight;
3719 	}
3720 
3721 	for (i = 0; i < nhgi->count; i++) {
3722 		nh = &nhgi->nexthops[i];
3723 
3724 		if (!nh->should_offload)
3725 			continue;
3726 		nh->norm_nh_weight = nh->nh_weight / g;
3727 		sum_norm_weight += nh->norm_nh_weight;
3728 	}
3729 
3730 	nhgi->sum_norm_weight = sum_norm_weight;
3731 }
3732 
3733 static void
3734 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3735 {
3736 	int i, weight = 0, lower_bound = 0;
3737 	int total = nhgi->sum_norm_weight;
3738 	u16 ecmp_size = nhgi->ecmp_size;
3739 
3740 	for (i = 0; i < nhgi->count; i++) {
3741 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3742 		int upper_bound;
3743 
3744 		if (!nh->should_offload)
3745 			continue;
3746 		weight += nh->norm_nh_weight;
3747 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3748 		nh->num_adj_entries = upper_bound - lower_bound;
3749 		lower_bound = upper_bound;
3750 	}
3751 }
3752 
3753 static struct mlxsw_sp_nexthop *
3754 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3755 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3756 
3757 static void
3758 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3759 					struct mlxsw_sp_nexthop_group *nh_grp)
3760 {
3761 	int i;
3762 
3763 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3764 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3765 
3766 		if (nh->offloaded)
3767 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3768 		else
3769 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3770 	}
3771 }
3772 
3773 static void
3774 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3775 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3776 {
3777 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3778 
3779 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3780 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3781 		struct mlxsw_sp_nexthop *nh;
3782 
3783 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3784 		if (nh && nh->offloaded)
3785 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3786 		else
3787 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3788 	}
3789 }
3790 
3791 static void
3792 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3793 					struct mlxsw_sp_nexthop_group *nh_grp)
3794 {
3795 	struct mlxsw_sp_fib6_entry *fib6_entry;
3796 
3797 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3798 	 * the same struct, so we need to iterate over all the routes using the
3799 	 * nexthop group and set / clear the offload indication for them.
3800 	 */
3801 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3802 			    common.nexthop_group_node)
3803 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3804 }
3805 
3806 static void
3807 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3808 					const struct mlxsw_sp_nexthop *nh,
3809 					u16 bucket_index)
3810 {
3811 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3812 	bool offload = false, trap = false;
3813 
3814 	if (nh->offloaded) {
3815 		if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3816 			trap = true;
3817 		else
3818 			offload = true;
3819 	}
3820 	nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3821 				    bucket_index, offload, trap);
3822 }
3823 
3824 static void
3825 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3826 					   struct mlxsw_sp_nexthop_group *nh_grp)
3827 {
3828 	int i;
3829 
3830 	/* Do not update the flags if the nexthop group is being destroyed
3831 	 * since:
3832 	 * 1. The nexthop objects is being deleted, in which case the flags are
3833 	 * irrelevant.
3834 	 * 2. The nexthop group was replaced by a newer group, in which case
3835 	 * the flags of the nexthop object were already updated based on the
3836 	 * new group.
3837 	 */
3838 	if (nh_grp->can_destroy)
3839 		return;
3840 
3841 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3842 			     nh_grp->nhgi->adj_index_valid, false);
3843 
3844 	/* Update flags of individual nexthop buckets in case of a resilient
3845 	 * nexthop group.
3846 	 */
3847 	if (!nh_grp->nhgi->is_resilient)
3848 		return;
3849 
3850 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3851 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3852 
3853 		mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3854 	}
3855 }
3856 
3857 static void
3858 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3859 				       struct mlxsw_sp_nexthop_group *nh_grp)
3860 {
3861 	switch (nh_grp->type) {
3862 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3863 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3864 		break;
3865 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3866 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3867 		break;
3868 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3869 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3870 		break;
3871 	}
3872 }
3873 
3874 static int
3875 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3876 			       struct mlxsw_sp_nexthop_group *nh_grp)
3877 {
3878 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3879 	u16 ecmp_size, old_ecmp_size;
3880 	struct mlxsw_sp_nexthop *nh;
3881 	bool offload_change = false;
3882 	u32 adj_index;
3883 	bool old_adj_index_valid;
3884 	u32 old_adj_index;
3885 	int i, err2, err;
3886 
3887 	if (!nhgi->gateway)
3888 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3889 
3890 	for (i = 0; i < nhgi->count; i++) {
3891 		nh = &nhgi->nexthops[i];
3892 
3893 		if (nh->should_offload != nh->offloaded) {
3894 			offload_change = true;
3895 			if (nh->should_offload)
3896 				nh->update = 1;
3897 		}
3898 	}
3899 	if (!offload_change) {
3900 		/* Nothing was added or removed, so no need to reallocate. Just
3901 		 * update MAC on existing adjacency indexes.
3902 		 */
3903 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3904 		if (err) {
3905 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3906 			goto set_trap;
3907 		}
3908 		/* Flags of individual nexthop buckets might need to be
3909 		 * updated.
3910 		 */
3911 		mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3912 		return 0;
3913 	}
3914 	mlxsw_sp_nexthop_group_normalize(nhgi);
3915 	if (!nhgi->sum_norm_weight) {
3916 		/* No neigh of this group is connected so we just set
3917 		 * the trap and let everthing flow through kernel.
3918 		 */
3919 		err = 0;
3920 		goto set_trap;
3921 	}
3922 
3923 	ecmp_size = nhgi->sum_norm_weight;
3924 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3925 	if (err)
3926 		/* No valid allocation size available. */
3927 		goto set_trap;
3928 
3929 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3930 				  ecmp_size, &adj_index);
3931 	if (err) {
3932 		/* We ran out of KVD linear space, just set the
3933 		 * trap and let everything flow through kernel.
3934 		 */
3935 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3936 		goto set_trap;
3937 	}
3938 	old_adj_index_valid = nhgi->adj_index_valid;
3939 	old_adj_index = nhgi->adj_index;
3940 	old_ecmp_size = nhgi->ecmp_size;
3941 	nhgi->adj_index_valid = 1;
3942 	nhgi->adj_index = adj_index;
3943 	nhgi->ecmp_size = ecmp_size;
3944 	mlxsw_sp_nexthop_group_rebalance(nhgi);
3945 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
3946 	if (err) {
3947 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3948 		goto set_trap;
3949 	}
3950 
3951 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3952 
3953 	if (!old_adj_index_valid) {
3954 		/* The trap was set for fib entries, so we have to call
3955 		 * fib entry update to unset it and use adjacency index.
3956 		 */
3957 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3958 		if (err) {
3959 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3960 			goto set_trap;
3961 		}
3962 		return 0;
3963 	}
3964 
3965 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3966 					     old_adj_index, old_ecmp_size);
3967 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3968 			   old_ecmp_size, old_adj_index);
3969 	if (err) {
3970 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3971 		goto set_trap;
3972 	}
3973 
3974 	return 0;
3975 
3976 set_trap:
3977 	old_adj_index_valid = nhgi->adj_index_valid;
3978 	nhgi->adj_index_valid = 0;
3979 	for (i = 0; i < nhgi->count; i++) {
3980 		nh = &nhgi->nexthops[i];
3981 		nh->offloaded = 0;
3982 	}
3983 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3984 	if (err2)
3985 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3986 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3987 	if (old_adj_index_valid)
3988 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3989 				   nhgi->ecmp_size, nhgi->adj_index);
3990 	return err;
3991 }
3992 
3993 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3994 					    bool removing)
3995 {
3996 	if (!removing) {
3997 		nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3998 		nh->should_offload = 1;
3999 	} else if (nh->nhgi->is_resilient) {
4000 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4001 		nh->should_offload = 1;
4002 	} else {
4003 		nh->should_offload = 0;
4004 	}
4005 	nh->update = 1;
4006 }
4007 
4008 static int
4009 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4010 				    struct mlxsw_sp_neigh_entry *neigh_entry)
4011 {
4012 	struct neighbour *n, *old_n = neigh_entry->key.n;
4013 	struct mlxsw_sp_nexthop *nh;
4014 	bool entry_connected;
4015 	u8 nud_state, dead;
4016 	int err;
4017 
4018 	nh = list_first_entry(&neigh_entry->nexthop_list,
4019 			      struct mlxsw_sp_nexthop, neigh_list_node);
4020 
4021 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4022 	if (!n) {
4023 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4024 		if (IS_ERR(n))
4025 			return PTR_ERR(n);
4026 		neigh_event_send(n, NULL);
4027 	}
4028 
4029 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4030 	neigh_entry->key.n = n;
4031 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4032 	if (err)
4033 		goto err_neigh_entry_insert;
4034 
4035 	read_lock_bh(&n->lock);
4036 	nud_state = n->nud_state;
4037 	dead = n->dead;
4038 	read_unlock_bh(&n->lock);
4039 	entry_connected = nud_state & NUD_VALID && !dead;
4040 
4041 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4042 			    neigh_list_node) {
4043 		neigh_release(old_n);
4044 		neigh_clone(n);
4045 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4046 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4047 	}
4048 
4049 	neigh_release(n);
4050 
4051 	return 0;
4052 
4053 err_neigh_entry_insert:
4054 	neigh_entry->key.n = old_n;
4055 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4056 	neigh_release(n);
4057 	return err;
4058 }
4059 
4060 static void
4061 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4062 			      struct mlxsw_sp_neigh_entry *neigh_entry,
4063 			      bool removing, bool dead)
4064 {
4065 	struct mlxsw_sp_nexthop *nh;
4066 
4067 	if (list_empty(&neigh_entry->nexthop_list))
4068 		return;
4069 
4070 	if (dead) {
4071 		int err;
4072 
4073 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4074 							  neigh_entry);
4075 		if (err)
4076 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4077 		return;
4078 	}
4079 
4080 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4081 			    neigh_list_node) {
4082 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4083 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4084 	}
4085 }
4086 
4087 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4088 				      struct mlxsw_sp_rif *rif)
4089 {
4090 	if (nh->rif)
4091 		return;
4092 
4093 	nh->rif = rif;
4094 	list_add(&nh->rif_list_node, &rif->nexthop_list);
4095 }
4096 
4097 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4098 {
4099 	if (!nh->rif)
4100 		return;
4101 
4102 	list_del(&nh->rif_list_node);
4103 	nh->rif = NULL;
4104 }
4105 
4106 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4107 				       struct mlxsw_sp_nexthop *nh)
4108 {
4109 	struct mlxsw_sp_neigh_entry *neigh_entry;
4110 	struct neighbour *n;
4111 	u8 nud_state, dead;
4112 	int err;
4113 
4114 	if (!nh->nhgi->gateway || nh->neigh_entry)
4115 		return 0;
4116 
4117 	/* Take a reference of neigh here ensuring that neigh would
4118 	 * not be destructed before the nexthop entry is finished.
4119 	 * The reference is taken either in neigh_lookup() or
4120 	 * in neigh_create() in case n is not found.
4121 	 */
4122 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4123 	if (!n) {
4124 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4125 		if (IS_ERR(n))
4126 			return PTR_ERR(n);
4127 		neigh_event_send(n, NULL);
4128 	}
4129 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4130 	if (!neigh_entry) {
4131 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4132 		if (IS_ERR(neigh_entry)) {
4133 			err = -EINVAL;
4134 			goto err_neigh_entry_create;
4135 		}
4136 	}
4137 
4138 	/* If that is the first nexthop connected to that neigh, add to
4139 	 * nexthop_neighs_list
4140 	 */
4141 	if (list_empty(&neigh_entry->nexthop_list))
4142 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4143 			      &mlxsw_sp->router->nexthop_neighs_list);
4144 
4145 	nh->neigh_entry = neigh_entry;
4146 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4147 	read_lock_bh(&n->lock);
4148 	nud_state = n->nud_state;
4149 	dead = n->dead;
4150 	read_unlock_bh(&n->lock);
4151 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4152 
4153 	return 0;
4154 
4155 err_neigh_entry_create:
4156 	neigh_release(n);
4157 	return err;
4158 }
4159 
4160 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4161 					struct mlxsw_sp_nexthop *nh)
4162 {
4163 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4164 	struct neighbour *n;
4165 
4166 	if (!neigh_entry)
4167 		return;
4168 	n = neigh_entry->key.n;
4169 
4170 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4171 	list_del(&nh->neigh_list_node);
4172 	nh->neigh_entry = NULL;
4173 
4174 	/* If that is the last nexthop connected to that neigh, remove from
4175 	 * nexthop_neighs_list
4176 	 */
4177 	if (list_empty(&neigh_entry->nexthop_list))
4178 		list_del(&neigh_entry->nexthop_neighs_list_node);
4179 
4180 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4181 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4182 
4183 	neigh_release(n);
4184 }
4185 
4186 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4187 {
4188 	struct net_device *ul_dev;
4189 	bool is_up;
4190 
4191 	rcu_read_lock();
4192 	ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4193 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4194 	rcu_read_unlock();
4195 
4196 	return is_up;
4197 }
4198 
4199 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4200 				       struct mlxsw_sp_nexthop *nh,
4201 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4202 {
4203 	bool removing;
4204 
4205 	if (!nh->nhgi->gateway || nh->ipip_entry)
4206 		return;
4207 
4208 	nh->ipip_entry = ipip_entry;
4209 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4210 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4211 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4212 }
4213 
4214 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4215 				       struct mlxsw_sp_nexthop *nh)
4216 {
4217 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4218 
4219 	if (!ipip_entry)
4220 		return;
4221 
4222 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4223 	nh->ipip_entry = NULL;
4224 }
4225 
4226 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4227 					const struct fib_nh *fib_nh,
4228 					enum mlxsw_sp_ipip_type *p_ipipt)
4229 {
4230 	struct net_device *dev = fib_nh->fib_nh_dev;
4231 
4232 	return dev &&
4233 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4234 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4235 }
4236 
4237 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4238 				      struct mlxsw_sp_nexthop *nh,
4239 				      const struct net_device *dev)
4240 {
4241 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4242 	struct mlxsw_sp_ipip_entry *ipip_entry;
4243 	struct mlxsw_sp_rif *rif;
4244 	int err;
4245 
4246 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4247 	if (ipip_entry) {
4248 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4249 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4250 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4251 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4252 			return 0;
4253 		}
4254 	}
4255 
4256 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4257 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4258 	if (!rif)
4259 		return 0;
4260 
4261 	mlxsw_sp_nexthop_rif_init(nh, rif);
4262 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4263 	if (err)
4264 		goto err_neigh_init;
4265 
4266 	return 0;
4267 
4268 err_neigh_init:
4269 	mlxsw_sp_nexthop_rif_fini(nh);
4270 	return err;
4271 }
4272 
4273 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4274 				       struct mlxsw_sp_nexthop *nh)
4275 {
4276 	switch (nh->type) {
4277 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4278 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4279 		mlxsw_sp_nexthop_rif_fini(nh);
4280 		break;
4281 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4282 		mlxsw_sp_nexthop_rif_fini(nh);
4283 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4284 		break;
4285 	}
4286 }
4287 
4288 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4289 				  struct mlxsw_sp_nexthop_group *nh_grp,
4290 				  struct mlxsw_sp_nexthop *nh,
4291 				  struct fib_nh *fib_nh)
4292 {
4293 	struct net_device *dev = fib_nh->fib_nh_dev;
4294 	struct in_device *in_dev;
4295 	int err;
4296 
4297 	nh->nhgi = nh_grp->nhgi;
4298 	nh->key.fib_nh = fib_nh;
4299 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4300 	nh->nh_weight = fib_nh->fib_nh_weight;
4301 #else
4302 	nh->nh_weight = 1;
4303 #endif
4304 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4305 	nh->neigh_tbl = &arp_tbl;
4306 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4307 	if (err)
4308 		return err;
4309 
4310 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4311 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4312 
4313 	if (!dev)
4314 		return 0;
4315 	nh->ifindex = dev->ifindex;
4316 
4317 	rcu_read_lock();
4318 	in_dev = __in_dev_get_rcu(dev);
4319 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4320 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4321 		rcu_read_unlock();
4322 		return 0;
4323 	}
4324 	rcu_read_unlock();
4325 
4326 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4327 	if (err)
4328 		goto err_nexthop_neigh_init;
4329 
4330 	return 0;
4331 
4332 err_nexthop_neigh_init:
4333 	list_del(&nh->router_list_node);
4334 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4335 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4336 	return err;
4337 }
4338 
4339 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4340 				   struct mlxsw_sp_nexthop *nh)
4341 {
4342 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4343 	list_del(&nh->router_list_node);
4344 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4345 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4346 }
4347 
4348 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4349 				    unsigned long event, struct fib_nh *fib_nh)
4350 {
4351 	struct mlxsw_sp_nexthop_key key;
4352 	struct mlxsw_sp_nexthop *nh;
4353 
4354 	key.fib_nh = fib_nh;
4355 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4356 	if (!nh)
4357 		return;
4358 
4359 	switch (event) {
4360 	case FIB_EVENT_NH_ADD:
4361 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4362 		break;
4363 	case FIB_EVENT_NH_DEL:
4364 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4365 		break;
4366 	}
4367 
4368 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4369 }
4370 
4371 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4372 					struct mlxsw_sp_rif *rif)
4373 {
4374 	struct mlxsw_sp_nexthop *nh;
4375 	bool removing;
4376 
4377 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4378 		switch (nh->type) {
4379 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4380 			removing = false;
4381 			break;
4382 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4383 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4384 			break;
4385 		default:
4386 			WARN_ON(1);
4387 			continue;
4388 		}
4389 
4390 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4391 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4392 	}
4393 }
4394 
4395 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4396 					 struct mlxsw_sp_rif *old_rif,
4397 					 struct mlxsw_sp_rif *new_rif)
4398 {
4399 	struct mlxsw_sp_nexthop *nh;
4400 
4401 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4402 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4403 		nh->rif = new_rif;
4404 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4405 }
4406 
4407 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4408 					   struct mlxsw_sp_rif *rif)
4409 {
4410 	struct mlxsw_sp_nexthop *nh, *tmp;
4411 
4412 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4413 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4414 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4415 	}
4416 }
4417 
4418 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4419 {
4420 	enum mlxsw_reg_ratr_trap_action trap_action;
4421 	char ratr_pl[MLXSW_REG_RATR_LEN];
4422 	int err;
4423 
4424 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4425 				  &mlxsw_sp->router->adj_trap_index);
4426 	if (err)
4427 		return err;
4428 
4429 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4430 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4431 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4432 			    mlxsw_sp->router->adj_trap_index,
4433 			    mlxsw_sp->router->lb_rif_index);
4434 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4435 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4436 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4437 	if (err)
4438 		goto err_ratr_write;
4439 
4440 	return 0;
4441 
4442 err_ratr_write:
4443 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4444 			   mlxsw_sp->router->adj_trap_index);
4445 	return err;
4446 }
4447 
4448 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4449 {
4450 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4451 			   mlxsw_sp->router->adj_trap_index);
4452 }
4453 
4454 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4455 {
4456 	int err;
4457 
4458 	if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4459 		return 0;
4460 
4461 	err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4462 	if (err)
4463 		return err;
4464 
4465 	refcount_set(&mlxsw_sp->router->num_groups, 1);
4466 
4467 	return 0;
4468 }
4469 
4470 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4471 {
4472 	if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4473 		return;
4474 
4475 	mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4476 }
4477 
4478 static void
4479 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4480 			     const struct mlxsw_sp_nexthop_group *nh_grp,
4481 			     unsigned long *activity)
4482 {
4483 	char *ratrad_pl;
4484 	int i, err;
4485 
4486 	ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4487 	if (!ratrad_pl)
4488 		return;
4489 
4490 	mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4491 			      nh_grp->nhgi->count);
4492 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4493 	if (err)
4494 		goto out;
4495 
4496 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4497 		if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4498 			continue;
4499 		bitmap_set(activity, i, 1);
4500 	}
4501 
4502 out:
4503 	kfree(ratrad_pl);
4504 }
4505 
4506 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4507 
4508 static void
4509 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4510 				const struct mlxsw_sp_nexthop_group *nh_grp)
4511 {
4512 	unsigned long *activity;
4513 
4514 	activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4515 	if (!activity)
4516 		return;
4517 
4518 	mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4519 	nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4520 					nh_grp->nhgi->count, activity);
4521 
4522 	bitmap_free(activity);
4523 }
4524 
4525 static void
4526 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4527 {
4528 	unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4529 
4530 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4531 			       msecs_to_jiffies(interval));
4532 }
4533 
4534 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4535 {
4536 	struct mlxsw_sp_nexthop_group_info *nhgi;
4537 	struct mlxsw_sp_router *router;
4538 	bool reschedule = false;
4539 
4540 	router = container_of(work, struct mlxsw_sp_router,
4541 			      nh_grp_activity_dw.work);
4542 
4543 	mutex_lock(&router->lock);
4544 
4545 	list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4546 		mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4547 		reschedule = true;
4548 	}
4549 
4550 	mutex_unlock(&router->lock);
4551 
4552 	if (!reschedule)
4553 		return;
4554 	mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4555 }
4556 
4557 static int
4558 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4559 				     const struct nh_notifier_single_info *nh,
4560 				     struct netlink_ext_ack *extack)
4561 {
4562 	int err = -EINVAL;
4563 
4564 	if (nh->is_fdb)
4565 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4566 	else if (nh->has_encap)
4567 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4568 	else
4569 		err = 0;
4570 
4571 	return err;
4572 }
4573 
4574 static int
4575 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4576 					  const struct nh_notifier_single_info *nh,
4577 					  struct netlink_ext_ack *extack)
4578 {
4579 	int err;
4580 
4581 	err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4582 	if (err)
4583 		return err;
4584 
4585 	/* Device only nexthops with an IPIP device are programmed as
4586 	 * encapsulating adjacency entries.
4587 	 */
4588 	if (!nh->gw_family && !nh->is_reject &&
4589 	    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4590 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4591 		return -EINVAL;
4592 	}
4593 
4594 	return 0;
4595 }
4596 
4597 static int
4598 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4599 				    const struct nh_notifier_grp_info *nh_grp,
4600 				    struct netlink_ext_ack *extack)
4601 {
4602 	int i;
4603 
4604 	if (nh_grp->is_fdb) {
4605 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4606 		return -EINVAL;
4607 	}
4608 
4609 	for (i = 0; i < nh_grp->num_nh; i++) {
4610 		const struct nh_notifier_single_info *nh;
4611 		int err;
4612 
4613 		nh = &nh_grp->nh_entries[i].nh;
4614 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4615 								extack);
4616 		if (err)
4617 			return err;
4618 	}
4619 
4620 	return 0;
4621 }
4622 
4623 static int
4624 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4625 					     const struct nh_notifier_res_table_info *nh_res_table,
4626 					     struct netlink_ext_ack *extack)
4627 {
4628 	unsigned int alloc_size;
4629 	bool valid_size = false;
4630 	int err, i;
4631 
4632 	if (nh_res_table->num_nh_buckets < 32) {
4633 		NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4634 		return -EINVAL;
4635 	}
4636 
4637 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4638 		const struct mlxsw_sp_adj_grp_size_range *size_range;
4639 
4640 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4641 
4642 		if (nh_res_table->num_nh_buckets >= size_range->start &&
4643 		    nh_res_table->num_nh_buckets <= size_range->end) {
4644 			valid_size = true;
4645 			break;
4646 		}
4647 	}
4648 
4649 	if (!valid_size) {
4650 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4651 		return -EINVAL;
4652 	}
4653 
4654 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4655 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4656 					      nh_res_table->num_nh_buckets,
4657 					      &alloc_size);
4658 	if (err || nh_res_table->num_nh_buckets != alloc_size) {
4659 		NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4660 		return -EINVAL;
4661 	}
4662 
4663 	return 0;
4664 }
4665 
4666 static int
4667 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4668 					const struct nh_notifier_res_table_info *nh_res_table,
4669 					struct netlink_ext_ack *extack)
4670 {
4671 	int err;
4672 	u16 i;
4673 
4674 	err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4675 							   nh_res_table,
4676 							   extack);
4677 	if (err)
4678 		return err;
4679 
4680 	for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4681 		const struct nh_notifier_single_info *nh;
4682 		int err;
4683 
4684 		nh = &nh_res_table->nhs[i];
4685 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4686 								extack);
4687 		if (err)
4688 			return err;
4689 	}
4690 
4691 	return 0;
4692 }
4693 
4694 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4695 					 unsigned long event,
4696 					 struct nh_notifier_info *info)
4697 {
4698 	struct nh_notifier_single_info *nh;
4699 
4700 	if (event != NEXTHOP_EVENT_REPLACE &&
4701 	    event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4702 	    event != NEXTHOP_EVENT_BUCKET_REPLACE)
4703 		return 0;
4704 
4705 	switch (info->type) {
4706 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4707 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4708 							    info->extack);
4709 	case NH_NOTIFIER_INFO_TYPE_GRP:
4710 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4711 							   info->nh_grp,
4712 							   info->extack);
4713 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4714 		return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4715 							       info->nh_res_table,
4716 							       info->extack);
4717 	case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4718 		nh = &info->nh_res_bucket->new_nh;
4719 		return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4720 								 info->extack);
4721 	default:
4722 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4723 		return -EOPNOTSUPP;
4724 	}
4725 }
4726 
4727 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4728 					    const struct nh_notifier_info *info)
4729 {
4730 	const struct net_device *dev;
4731 
4732 	switch (info->type) {
4733 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4734 		dev = info->nh->dev;
4735 		return info->nh->gw_family || info->nh->is_reject ||
4736 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4737 	case NH_NOTIFIER_INFO_TYPE_GRP:
4738 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4739 		/* Already validated earlier. */
4740 		return true;
4741 	default:
4742 		return false;
4743 	}
4744 }
4745 
4746 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4747 						struct mlxsw_sp_nexthop *nh)
4748 {
4749 	u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4750 
4751 	nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4752 	nh->should_offload = 1;
4753 	/* While nexthops that discard packets do not forward packets
4754 	 * via an egress RIF, they still need to be programmed using a
4755 	 * valid RIF, so use the loopback RIF created during init.
4756 	 */
4757 	nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4758 }
4759 
4760 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4761 						struct mlxsw_sp_nexthop *nh)
4762 {
4763 	nh->rif = NULL;
4764 	nh->should_offload = 0;
4765 }
4766 
4767 static int
4768 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4769 			  struct mlxsw_sp_nexthop_group *nh_grp,
4770 			  struct mlxsw_sp_nexthop *nh,
4771 			  struct nh_notifier_single_info *nh_obj, int weight)
4772 {
4773 	struct net_device *dev = nh_obj->dev;
4774 	int err;
4775 
4776 	nh->nhgi = nh_grp->nhgi;
4777 	nh->nh_weight = weight;
4778 
4779 	switch (nh_obj->gw_family) {
4780 	case AF_INET:
4781 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4782 		nh->neigh_tbl = &arp_tbl;
4783 		break;
4784 	case AF_INET6:
4785 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4786 #if IS_ENABLED(CONFIG_IPV6)
4787 		nh->neigh_tbl = &nd_tbl;
4788 #endif
4789 		break;
4790 	}
4791 
4792 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4793 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4794 	nh->ifindex = dev->ifindex;
4795 
4796 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4797 	if (err)
4798 		goto err_type_init;
4799 
4800 	if (nh_obj->is_reject)
4801 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4802 
4803 	/* In a resilient nexthop group, all the nexthops must be written to
4804 	 * the adjacency table. Even if they do not have a valid neighbour or
4805 	 * RIF.
4806 	 */
4807 	if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4808 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4809 		nh->should_offload = 1;
4810 	}
4811 
4812 	return 0;
4813 
4814 err_type_init:
4815 	list_del(&nh->router_list_node);
4816 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4817 	return err;
4818 }
4819 
4820 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4821 				      struct mlxsw_sp_nexthop *nh)
4822 {
4823 	if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4824 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4825 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4826 	list_del(&nh->router_list_node);
4827 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4828 	nh->should_offload = 0;
4829 }
4830 
4831 static int
4832 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4833 				     struct mlxsw_sp_nexthop_group *nh_grp,
4834 				     struct nh_notifier_info *info)
4835 {
4836 	struct mlxsw_sp_nexthop_group_info *nhgi;
4837 	struct mlxsw_sp_nexthop *nh;
4838 	bool is_resilient = false;
4839 	unsigned int nhs;
4840 	int err, i;
4841 
4842 	switch (info->type) {
4843 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4844 		nhs = 1;
4845 		break;
4846 	case NH_NOTIFIER_INFO_TYPE_GRP:
4847 		nhs = info->nh_grp->num_nh;
4848 		break;
4849 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4850 		nhs = info->nh_res_table->num_nh_buckets;
4851 		is_resilient = true;
4852 		break;
4853 	default:
4854 		return -EINVAL;
4855 	}
4856 
4857 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4858 	if (!nhgi)
4859 		return -ENOMEM;
4860 	nh_grp->nhgi = nhgi;
4861 	nhgi->nh_grp = nh_grp;
4862 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4863 	nhgi->is_resilient = is_resilient;
4864 	nhgi->count = nhs;
4865 	for (i = 0; i < nhgi->count; i++) {
4866 		struct nh_notifier_single_info *nh_obj;
4867 		int weight;
4868 
4869 		nh = &nhgi->nexthops[i];
4870 		switch (info->type) {
4871 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
4872 			nh_obj = info->nh;
4873 			weight = 1;
4874 			break;
4875 		case NH_NOTIFIER_INFO_TYPE_GRP:
4876 			nh_obj = &info->nh_grp->nh_entries[i].nh;
4877 			weight = info->nh_grp->nh_entries[i].weight;
4878 			break;
4879 		case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4880 			nh_obj = &info->nh_res_table->nhs[i];
4881 			weight = 1;
4882 			break;
4883 		default:
4884 			err = -EINVAL;
4885 			goto err_nexthop_obj_init;
4886 		}
4887 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4888 						weight);
4889 		if (err)
4890 			goto err_nexthop_obj_init;
4891 	}
4892 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
4893 	if (err)
4894 		goto err_group_inc;
4895 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4896 	if (err) {
4897 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4898 		goto err_group_refresh;
4899 	}
4900 
4901 	/* Add resilient nexthop groups to a list so that the activity of their
4902 	 * nexthop buckets will be periodically queried and cleared.
4903 	 */
4904 	if (nhgi->is_resilient) {
4905 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4906 			mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4907 		list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4908 	}
4909 
4910 	return 0;
4911 
4912 err_group_refresh:
4913 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4914 err_group_inc:
4915 	i = nhgi->count;
4916 err_nexthop_obj_init:
4917 	for (i--; i >= 0; i--) {
4918 		nh = &nhgi->nexthops[i];
4919 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4920 	}
4921 	kfree(nhgi);
4922 	return err;
4923 }
4924 
4925 static void
4926 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4927 				     struct mlxsw_sp_nexthop_group *nh_grp)
4928 {
4929 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4930 	struct mlxsw_sp_router *router = mlxsw_sp->router;
4931 	int i;
4932 
4933 	if (nhgi->is_resilient) {
4934 		list_del(&nhgi->list);
4935 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4936 			cancel_delayed_work(&router->nh_grp_activity_dw);
4937 	}
4938 
4939 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4940 	for (i = nhgi->count - 1; i >= 0; i--) {
4941 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4942 
4943 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4944 	}
4945 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4946 	WARN_ON_ONCE(nhgi->adj_index_valid);
4947 	kfree(nhgi);
4948 }
4949 
4950 static struct mlxsw_sp_nexthop_group *
4951 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
4952 				  struct nh_notifier_info *info)
4953 {
4954 	struct mlxsw_sp_nexthop_group *nh_grp;
4955 	int err;
4956 
4957 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4958 	if (!nh_grp)
4959 		return ERR_PTR(-ENOMEM);
4960 	INIT_LIST_HEAD(&nh_grp->vr_list);
4961 	err = rhashtable_init(&nh_grp->vr_ht,
4962 			      &mlxsw_sp_nexthop_group_vr_ht_params);
4963 	if (err)
4964 		goto err_nexthop_group_vr_ht_init;
4965 	INIT_LIST_HEAD(&nh_grp->fib_list);
4966 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4967 	nh_grp->obj.id = info->id;
4968 
4969 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
4970 	if (err)
4971 		goto err_nexthop_group_info_init;
4972 
4973 	nh_grp->can_destroy = false;
4974 
4975 	return nh_grp;
4976 
4977 err_nexthop_group_info_init:
4978 	rhashtable_destroy(&nh_grp->vr_ht);
4979 err_nexthop_group_vr_ht_init:
4980 	kfree(nh_grp);
4981 	return ERR_PTR(err);
4982 }
4983 
4984 static void
4985 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
4986 				   struct mlxsw_sp_nexthop_group *nh_grp)
4987 {
4988 	if (!nh_grp->can_destroy)
4989 		return;
4990 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
4991 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
4992 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
4993 	rhashtable_destroy(&nh_grp->vr_ht);
4994 	kfree(nh_grp);
4995 }
4996 
4997 static struct mlxsw_sp_nexthop_group *
4998 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
4999 {
5000 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5001 
5002 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5003 	cmp_arg.id = id;
5004 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5005 				      &cmp_arg,
5006 				      mlxsw_sp_nexthop_group_ht_params);
5007 }
5008 
5009 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5010 					  struct mlxsw_sp_nexthop_group *nh_grp)
5011 {
5012 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5013 }
5014 
5015 static int
5016 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5017 				   struct mlxsw_sp_nexthop_group *nh_grp,
5018 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
5019 				   struct netlink_ext_ack *extack)
5020 {
5021 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5022 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5023 	int err;
5024 
5025 	old_nh_grp->nhgi = new_nhgi;
5026 	new_nhgi->nh_grp = old_nh_grp;
5027 	nh_grp->nhgi = old_nhgi;
5028 	old_nhgi->nh_grp = nh_grp;
5029 
5030 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5031 		/* Both the old adjacency index and the new one are valid.
5032 		 * Routes are currently using the old one. Tell the device to
5033 		 * replace the old adjacency index with the new one.
5034 		 */
5035 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5036 						     old_nhgi->adj_index,
5037 						     old_nhgi->ecmp_size);
5038 		if (err) {
5039 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5040 			goto err_out;
5041 		}
5042 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5043 		/* The old adjacency index is valid, while the new one is not.
5044 		 * Iterate over all the routes using the group and change them
5045 		 * to trap packets to the CPU.
5046 		 */
5047 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5048 		if (err) {
5049 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5050 			goto err_out;
5051 		}
5052 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5053 		/* The old adjacency index is invalid, while the new one is.
5054 		 * Iterate over all the routes using the group and change them
5055 		 * to forward packets using the new valid index.
5056 		 */
5057 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5058 		if (err) {
5059 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5060 			goto err_out;
5061 		}
5062 	}
5063 
5064 	/* Make sure the flags are set / cleared based on the new nexthop group
5065 	 * information.
5066 	 */
5067 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5068 
5069 	/* At this point 'nh_grp' is just a shell that is not used by anyone
5070 	 * and its nexthop group info is the old info that was just replaced
5071 	 * with the new one. Remove it.
5072 	 */
5073 	nh_grp->can_destroy = true;
5074 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5075 
5076 	return 0;
5077 
5078 err_out:
5079 	old_nhgi->nh_grp = old_nh_grp;
5080 	nh_grp->nhgi = new_nhgi;
5081 	new_nhgi->nh_grp = nh_grp;
5082 	old_nh_grp->nhgi = old_nhgi;
5083 	return err;
5084 }
5085 
5086 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5087 				    struct nh_notifier_info *info)
5088 {
5089 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5090 	struct netlink_ext_ack *extack = info->extack;
5091 	int err;
5092 
5093 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5094 	if (IS_ERR(nh_grp))
5095 		return PTR_ERR(nh_grp);
5096 
5097 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5098 	if (!old_nh_grp)
5099 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5100 	else
5101 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5102 							 old_nh_grp, extack);
5103 
5104 	if (err) {
5105 		nh_grp->can_destroy = true;
5106 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5107 	}
5108 
5109 	return err;
5110 }
5111 
5112 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5113 				     struct nh_notifier_info *info)
5114 {
5115 	struct mlxsw_sp_nexthop_group *nh_grp;
5116 
5117 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5118 	if (!nh_grp)
5119 		return;
5120 
5121 	nh_grp->can_destroy = true;
5122 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5123 
5124 	/* If the group still has routes using it, then defer the delete
5125 	 * operation until the last route using it is deleted.
5126 	 */
5127 	if (!list_empty(&nh_grp->fib_list))
5128 		return;
5129 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5130 }
5131 
5132 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5133 					     u32 adj_index, char *ratr_pl)
5134 {
5135 	MLXSW_REG_ZERO(ratr, ratr_pl);
5136 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5137 	mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5138 	mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5139 
5140 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5141 }
5142 
5143 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5144 {
5145 	/* Clear the opcode and activity on both the old and new payload as
5146 	 * they are irrelevant for the comparison.
5147 	 */
5148 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5149 	mlxsw_reg_ratr_a_set(ratr_pl, 0);
5150 	mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5151 	mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5152 
5153 	/* If the contents of the adjacency entry are consistent with the
5154 	 * replacement request, then replacement was successful.
5155 	 */
5156 	if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5157 		return 0;
5158 
5159 	return -EINVAL;
5160 }
5161 
5162 static int
5163 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5164 				       struct mlxsw_sp_nexthop *nh,
5165 				       struct nh_notifier_info *info)
5166 {
5167 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5168 	struct netlink_ext_ack *extack = info->extack;
5169 	bool force = info->nh_res_bucket->force;
5170 	char ratr_pl_new[MLXSW_REG_RATR_LEN];
5171 	char ratr_pl[MLXSW_REG_RATR_LEN];
5172 	u32 adj_index;
5173 	int err;
5174 
5175 	/* No point in trying an atomic replacement if the idle timer interval
5176 	 * is smaller than the interval in which we query and clear activity.
5177 	 */
5178 	if (!force && info->nh_res_bucket->idle_timer_ms <
5179 	    MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5180 		force = true;
5181 
5182 	adj_index = nh->nhgi->adj_index + bucket_index;
5183 	err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5184 	if (err) {
5185 		NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5186 		return err;
5187 	}
5188 
5189 	if (!force) {
5190 		err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5191 							ratr_pl_new);
5192 		if (err) {
5193 			NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5194 			return err;
5195 		}
5196 
5197 		err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5198 		if (err) {
5199 			NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5200 			return err;
5201 		}
5202 	}
5203 
5204 	nh->update = 0;
5205 	nh->offloaded = 1;
5206 	mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5207 
5208 	return 0;
5209 }
5210 
5211 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5212 					       struct nh_notifier_info *info)
5213 {
5214 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5215 	struct netlink_ext_ack *extack = info->extack;
5216 	struct mlxsw_sp_nexthop_group_info *nhgi;
5217 	struct nh_notifier_single_info *nh_obj;
5218 	struct mlxsw_sp_nexthop_group *nh_grp;
5219 	struct mlxsw_sp_nexthop *nh;
5220 	int err;
5221 
5222 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5223 	if (!nh_grp) {
5224 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5225 		return -EINVAL;
5226 	}
5227 
5228 	nhgi = nh_grp->nhgi;
5229 
5230 	if (bucket_index >= nhgi->count) {
5231 		NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5232 		return -EINVAL;
5233 	}
5234 
5235 	nh = &nhgi->nexthops[bucket_index];
5236 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5237 
5238 	nh_obj = &info->nh_res_bucket->new_nh;
5239 	err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5240 	if (err) {
5241 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5242 		goto err_nexthop_obj_init;
5243 	}
5244 
5245 	err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5246 	if (err)
5247 		goto err_nexthop_obj_bucket_adj_update;
5248 
5249 	return 0;
5250 
5251 err_nexthop_obj_bucket_adj_update:
5252 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5253 err_nexthop_obj_init:
5254 	nh_obj = &info->nh_res_bucket->old_nh;
5255 	mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5256 	/* The old adjacency entry was not overwritten */
5257 	nh->update = 0;
5258 	nh->offloaded = 1;
5259 	return err;
5260 }
5261 
5262 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5263 				      unsigned long event, void *ptr)
5264 {
5265 	struct nh_notifier_info *info = ptr;
5266 	struct mlxsw_sp_router *router;
5267 	int err = 0;
5268 
5269 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5270 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5271 	if (err)
5272 		goto out;
5273 
5274 	mutex_lock(&router->lock);
5275 
5276 	switch (event) {
5277 	case NEXTHOP_EVENT_REPLACE:
5278 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5279 		break;
5280 	case NEXTHOP_EVENT_DEL:
5281 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5282 		break;
5283 	case NEXTHOP_EVENT_BUCKET_REPLACE:
5284 		err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5285 							  info);
5286 		break;
5287 	default:
5288 		break;
5289 	}
5290 
5291 	mutex_unlock(&router->lock);
5292 
5293 out:
5294 	return notifier_from_errno(err);
5295 }
5296 
5297 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5298 				   struct fib_info *fi)
5299 {
5300 	const struct fib_nh *nh = fib_info_nh(fi, 0);
5301 
5302 	return nh->fib_nh_gw_family ||
5303 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5304 }
5305 
5306 static int
5307 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5308 				  struct mlxsw_sp_nexthop_group *nh_grp)
5309 {
5310 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5311 	struct mlxsw_sp_nexthop_group_info *nhgi;
5312 	struct mlxsw_sp_nexthop *nh;
5313 	int err, i;
5314 
5315 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5316 	if (!nhgi)
5317 		return -ENOMEM;
5318 	nh_grp->nhgi = nhgi;
5319 	nhgi->nh_grp = nh_grp;
5320 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5321 	nhgi->count = nhs;
5322 	for (i = 0; i < nhgi->count; i++) {
5323 		struct fib_nh *fib_nh;
5324 
5325 		nh = &nhgi->nexthops[i];
5326 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5327 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5328 		if (err)
5329 			goto err_nexthop4_init;
5330 	}
5331 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5332 	if (err)
5333 		goto err_group_inc;
5334 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5335 	if (err)
5336 		goto err_group_refresh;
5337 
5338 	return 0;
5339 
5340 err_group_refresh:
5341 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5342 err_group_inc:
5343 	i = nhgi->count;
5344 err_nexthop4_init:
5345 	for (i--; i >= 0; i--) {
5346 		nh = &nhgi->nexthops[i];
5347 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5348 	}
5349 	kfree(nhgi);
5350 	return err;
5351 }
5352 
5353 static void
5354 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5355 				  struct mlxsw_sp_nexthop_group *nh_grp)
5356 {
5357 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5358 	int i;
5359 
5360 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5361 	for (i = nhgi->count - 1; i >= 0; i--) {
5362 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5363 
5364 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5365 	}
5366 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5367 	WARN_ON_ONCE(nhgi->adj_index_valid);
5368 	kfree(nhgi);
5369 }
5370 
5371 static struct mlxsw_sp_nexthop_group *
5372 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5373 {
5374 	struct mlxsw_sp_nexthop_group *nh_grp;
5375 	int err;
5376 
5377 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5378 	if (!nh_grp)
5379 		return ERR_PTR(-ENOMEM);
5380 	INIT_LIST_HEAD(&nh_grp->vr_list);
5381 	err = rhashtable_init(&nh_grp->vr_ht,
5382 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5383 	if (err)
5384 		goto err_nexthop_group_vr_ht_init;
5385 	INIT_LIST_HEAD(&nh_grp->fib_list);
5386 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5387 	nh_grp->ipv4.fi = fi;
5388 	fib_info_hold(fi);
5389 
5390 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5391 	if (err)
5392 		goto err_nexthop_group_info_init;
5393 
5394 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5395 	if (err)
5396 		goto err_nexthop_group_insert;
5397 
5398 	nh_grp->can_destroy = true;
5399 
5400 	return nh_grp;
5401 
5402 err_nexthop_group_insert:
5403 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5404 err_nexthop_group_info_init:
5405 	fib_info_put(fi);
5406 	rhashtable_destroy(&nh_grp->vr_ht);
5407 err_nexthop_group_vr_ht_init:
5408 	kfree(nh_grp);
5409 	return ERR_PTR(err);
5410 }
5411 
5412 static void
5413 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5414 				struct mlxsw_sp_nexthop_group *nh_grp)
5415 {
5416 	if (!nh_grp->can_destroy)
5417 		return;
5418 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5419 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5420 	fib_info_put(nh_grp->ipv4.fi);
5421 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5422 	rhashtable_destroy(&nh_grp->vr_ht);
5423 	kfree(nh_grp);
5424 }
5425 
5426 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5427 				       struct mlxsw_sp_fib_entry *fib_entry,
5428 				       struct fib_info *fi)
5429 {
5430 	struct mlxsw_sp_nexthop_group *nh_grp;
5431 
5432 	if (fi->nh) {
5433 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5434 							   fi->nh->id);
5435 		if (WARN_ON_ONCE(!nh_grp))
5436 			return -EINVAL;
5437 		goto out;
5438 	}
5439 
5440 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5441 	if (!nh_grp) {
5442 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5443 		if (IS_ERR(nh_grp))
5444 			return PTR_ERR(nh_grp);
5445 	}
5446 out:
5447 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5448 	fib_entry->nh_group = nh_grp;
5449 	return 0;
5450 }
5451 
5452 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5453 					struct mlxsw_sp_fib_entry *fib_entry)
5454 {
5455 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5456 
5457 	list_del(&fib_entry->nexthop_group_node);
5458 	if (!list_empty(&nh_grp->fib_list))
5459 		return;
5460 
5461 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5462 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5463 		return;
5464 	}
5465 
5466 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5467 }
5468 
5469 static bool
5470 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5471 {
5472 	struct mlxsw_sp_fib4_entry *fib4_entry;
5473 
5474 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5475 				  common);
5476 	return !fib4_entry->dscp;
5477 }
5478 
5479 static bool
5480 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5481 {
5482 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5483 
5484 	switch (fib_entry->fib_node->fib->proto) {
5485 	case MLXSW_SP_L3_PROTO_IPV4:
5486 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5487 			return false;
5488 		break;
5489 	case MLXSW_SP_L3_PROTO_IPV6:
5490 		break;
5491 	}
5492 
5493 	switch (fib_entry->type) {
5494 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5495 		return !!nh_group->nhgi->adj_index_valid;
5496 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5497 		return !!nh_group->nhgi->nh_rif;
5498 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5499 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5500 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5501 		return true;
5502 	default:
5503 		return false;
5504 	}
5505 }
5506 
5507 static struct mlxsw_sp_nexthop *
5508 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5509 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5510 {
5511 	int i;
5512 
5513 	for (i = 0; i < nh_grp->nhgi->count; i++) {
5514 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5515 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5516 
5517 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5518 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5519 				    &rt->fib6_nh->fib_nh_gw6))
5520 			return nh;
5521 	}
5522 
5523 	return NULL;
5524 }
5525 
5526 static void
5527 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5528 				      struct fib_entry_notifier_info *fen_info)
5529 {
5530 	u32 *p_dst = (u32 *) &fen_info->dst;
5531 	struct fib_rt_info fri;
5532 
5533 	fri.fi = fen_info->fi;
5534 	fri.tb_id = fen_info->tb_id;
5535 	fri.dst = cpu_to_be32(*p_dst);
5536 	fri.dst_len = fen_info->dst_len;
5537 	fri.dscp = fen_info->dscp;
5538 	fri.type = fen_info->type;
5539 	fri.offload = false;
5540 	fri.trap = false;
5541 	fri.offload_failed = true;
5542 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5543 }
5544 
5545 static void
5546 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5547 				 struct mlxsw_sp_fib_entry *fib_entry)
5548 {
5549 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5550 	int dst_len = fib_entry->fib_node->key.prefix_len;
5551 	struct mlxsw_sp_fib4_entry *fib4_entry;
5552 	struct fib_rt_info fri;
5553 	bool should_offload;
5554 
5555 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5556 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5557 				  common);
5558 	fri.fi = fib4_entry->fi;
5559 	fri.tb_id = fib4_entry->tb_id;
5560 	fri.dst = cpu_to_be32(*p_dst);
5561 	fri.dst_len = dst_len;
5562 	fri.dscp = fib4_entry->dscp;
5563 	fri.type = fib4_entry->type;
5564 	fri.offload = should_offload;
5565 	fri.trap = !should_offload;
5566 	fri.offload_failed = false;
5567 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5568 }
5569 
5570 static void
5571 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5572 				   struct mlxsw_sp_fib_entry *fib_entry)
5573 {
5574 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5575 	int dst_len = fib_entry->fib_node->key.prefix_len;
5576 	struct mlxsw_sp_fib4_entry *fib4_entry;
5577 	struct fib_rt_info fri;
5578 
5579 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5580 				  common);
5581 	fri.fi = fib4_entry->fi;
5582 	fri.tb_id = fib4_entry->tb_id;
5583 	fri.dst = cpu_to_be32(*p_dst);
5584 	fri.dst_len = dst_len;
5585 	fri.dscp = fib4_entry->dscp;
5586 	fri.type = fib4_entry->type;
5587 	fri.offload = false;
5588 	fri.trap = false;
5589 	fri.offload_failed = false;
5590 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5591 }
5592 
5593 #if IS_ENABLED(CONFIG_IPV6)
5594 static void
5595 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5596 				      struct fib6_info **rt_arr,
5597 				      unsigned int nrt6)
5598 {
5599 	int i;
5600 
5601 	/* In IPv6 a multipath route is represented using multiple routes, so
5602 	 * we need to set the flags on all of them.
5603 	 */
5604 	for (i = 0; i < nrt6; i++)
5605 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5606 				       false, false, true);
5607 }
5608 #else
5609 static void
5610 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5611 				      struct fib6_info **rt_arr,
5612 				      unsigned int nrt6)
5613 {
5614 }
5615 #endif
5616 
5617 #if IS_ENABLED(CONFIG_IPV6)
5618 static void
5619 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5620 				 struct mlxsw_sp_fib_entry *fib_entry)
5621 {
5622 	struct mlxsw_sp_fib6_entry *fib6_entry;
5623 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5624 	bool should_offload;
5625 
5626 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5627 
5628 	/* In IPv6 a multipath route is represented using multiple routes, so
5629 	 * we need to set the flags on all of them.
5630 	 */
5631 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5632 				  common);
5633 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5634 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5635 				       should_offload, !should_offload, false);
5636 }
5637 #else
5638 static void
5639 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5640 				 struct mlxsw_sp_fib_entry *fib_entry)
5641 {
5642 }
5643 #endif
5644 
5645 #if IS_ENABLED(CONFIG_IPV6)
5646 static void
5647 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5648 				   struct mlxsw_sp_fib_entry *fib_entry)
5649 {
5650 	struct mlxsw_sp_fib6_entry *fib6_entry;
5651 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5652 
5653 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5654 				  common);
5655 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5656 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5657 				       false, false, false);
5658 }
5659 #else
5660 static void
5661 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5662 				   struct mlxsw_sp_fib_entry *fib_entry)
5663 {
5664 }
5665 #endif
5666 
5667 static void
5668 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5669 				struct mlxsw_sp_fib_entry *fib_entry)
5670 {
5671 	switch (fib_entry->fib_node->fib->proto) {
5672 	case MLXSW_SP_L3_PROTO_IPV4:
5673 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5674 		break;
5675 	case MLXSW_SP_L3_PROTO_IPV6:
5676 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5677 		break;
5678 	}
5679 }
5680 
5681 static void
5682 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5683 				  struct mlxsw_sp_fib_entry *fib_entry)
5684 {
5685 	switch (fib_entry->fib_node->fib->proto) {
5686 	case MLXSW_SP_L3_PROTO_IPV4:
5687 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5688 		break;
5689 	case MLXSW_SP_L3_PROTO_IPV6:
5690 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5691 		break;
5692 	}
5693 }
5694 
5695 static void
5696 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5697 				    struct mlxsw_sp_fib_entry *fib_entry,
5698 				    enum mlxsw_reg_ralue_op op)
5699 {
5700 	switch (op) {
5701 	case MLXSW_REG_RALUE_OP_WRITE_WRITE:
5702 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5703 		break;
5704 	case MLXSW_REG_RALUE_OP_WRITE_DELETE:
5705 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5706 		break;
5707 	default:
5708 		break;
5709 	}
5710 }
5711 
5712 static void
5713 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
5714 			      const struct mlxsw_sp_fib_entry *fib_entry,
5715 			      enum mlxsw_reg_ralue_op op)
5716 {
5717 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5718 	enum mlxsw_reg_ralxx_protocol proto;
5719 	u32 *p_dip;
5720 
5721 	proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
5722 
5723 	switch (fib->proto) {
5724 	case MLXSW_SP_L3_PROTO_IPV4:
5725 		p_dip = (u32 *) fib_entry->fib_node->key.addr;
5726 		mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
5727 				      fib_entry->fib_node->key.prefix_len,
5728 				      *p_dip);
5729 		break;
5730 	case MLXSW_SP_L3_PROTO_IPV6:
5731 		mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
5732 				      fib_entry->fib_node->key.prefix_len,
5733 				      fib_entry->fib_node->key.addr);
5734 		break;
5735 	}
5736 }
5737 
5738 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5739 					struct mlxsw_sp_fib_entry *fib_entry,
5740 					enum mlxsw_reg_ralue_op op)
5741 {
5742 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5743 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5744 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5745 	enum mlxsw_reg_ralue_trap_action trap_action;
5746 	u16 trap_id = 0;
5747 	u32 adjacency_index = 0;
5748 	u16 ecmp_size = 0;
5749 
5750 	/* In case the nexthop group adjacency index is valid, use it
5751 	 * with provided ECMP size. Otherwise, setup trap and pass
5752 	 * traffic to kernel.
5753 	 */
5754 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5755 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5756 		adjacency_index = nhgi->adj_index;
5757 		ecmp_size = nhgi->ecmp_size;
5758 	} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5759 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5760 		adjacency_index = mlxsw_sp->router->adj_trap_index;
5761 		ecmp_size = 1;
5762 	} else {
5763 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5764 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5765 	}
5766 
5767 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5768 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
5769 					adjacency_index, ecmp_size);
5770 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5771 }
5772 
5773 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5774 				       struct mlxsw_sp_fib_entry *fib_entry,
5775 				       enum mlxsw_reg_ralue_op op)
5776 {
5777 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5778 	enum mlxsw_reg_ralue_trap_action trap_action;
5779 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5780 	u16 trap_id = 0;
5781 	u16 rif_index = 0;
5782 
5783 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5784 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5785 		rif_index = rif->rif_index;
5786 	} else {
5787 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5788 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5789 	}
5790 
5791 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5792 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
5793 				       rif_index);
5794 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5795 }
5796 
5797 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5798 				      struct mlxsw_sp_fib_entry *fib_entry,
5799 				      enum mlxsw_reg_ralue_op op)
5800 {
5801 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5802 
5803 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5804 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5805 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5806 }
5807 
5808 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5809 					   struct mlxsw_sp_fib_entry *fib_entry,
5810 					   enum mlxsw_reg_ralue_op op)
5811 {
5812 	enum mlxsw_reg_ralue_trap_action trap_action;
5813 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5814 
5815 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5816 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5817 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
5818 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5819 }
5820 
5821 static int
5822 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5823 				  struct mlxsw_sp_fib_entry *fib_entry,
5824 				  enum mlxsw_reg_ralue_op op)
5825 {
5826 	enum mlxsw_reg_ralue_trap_action trap_action;
5827 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5828 	u16 trap_id;
5829 
5830 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5831 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5832 
5833 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5834 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
5835 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5836 }
5837 
5838 static int
5839 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5840 				 struct mlxsw_sp_fib_entry *fib_entry,
5841 				 enum mlxsw_reg_ralue_op op)
5842 {
5843 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5844 	const struct mlxsw_sp_ipip_ops *ipip_ops;
5845 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5846 	int err;
5847 
5848 	if (WARN_ON(!ipip_entry))
5849 		return -EINVAL;
5850 
5851 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5852 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5853 				     fib_entry->decap.tunnel_index);
5854 	if (err)
5855 		return err;
5856 
5857 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5858 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
5859 					   fib_entry->decap.tunnel_index);
5860 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5861 }
5862 
5863 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5864 					   struct mlxsw_sp_fib_entry *fib_entry,
5865 					   enum mlxsw_reg_ralue_op op)
5866 {
5867 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5868 
5869 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5870 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
5871 					   fib_entry->decap.tunnel_index);
5872 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5873 }
5874 
5875 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5876 				   struct mlxsw_sp_fib_entry *fib_entry,
5877 				   enum mlxsw_reg_ralue_op op)
5878 {
5879 	switch (fib_entry->type) {
5880 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5881 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
5882 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5883 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
5884 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
5885 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
5886 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5887 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
5888 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
5889 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
5890 							 op);
5891 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5892 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
5893 							fib_entry, op);
5894 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5895 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
5896 	}
5897 	return -EINVAL;
5898 }
5899 
5900 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5901 				 struct mlxsw_sp_fib_entry *fib_entry,
5902 				 enum mlxsw_reg_ralue_op op)
5903 {
5904 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
5905 
5906 	if (err)
5907 		return err;
5908 
5909 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
5910 
5911 	return err;
5912 }
5913 
5914 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
5915 				     struct mlxsw_sp_fib_entry *fib_entry)
5916 {
5917 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
5918 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
5919 }
5920 
5921 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
5922 				  struct mlxsw_sp_fib_entry *fib_entry)
5923 {
5924 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
5925 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
5926 }
5927 
5928 static int
5929 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5930 			     const struct fib_entry_notifier_info *fen_info,
5931 			     struct mlxsw_sp_fib_entry *fib_entry)
5932 {
5933 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
5934 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
5935 	struct mlxsw_sp_router *router = mlxsw_sp->router;
5936 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
5937 	int ifindex = nhgi->nexthops[0].ifindex;
5938 	struct mlxsw_sp_ipip_entry *ipip_entry;
5939 
5940 	switch (fen_info->type) {
5941 	case RTN_LOCAL:
5942 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
5943 							       MLXSW_SP_L3_PROTO_IPV4, dip);
5944 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
5945 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
5946 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
5947 							     fib_entry,
5948 							     ipip_entry);
5949 		}
5950 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
5951 						 MLXSW_SP_L3_PROTO_IPV4,
5952 						 &dip)) {
5953 			u32 tunnel_index;
5954 
5955 			tunnel_index = router->nve_decap_config.tunnel_index;
5956 			fib_entry->decap.tunnel_index = tunnel_index;
5957 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
5958 			return 0;
5959 		}
5960 		fallthrough;
5961 	case RTN_BROADCAST:
5962 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5963 		return 0;
5964 	case RTN_BLACKHOLE:
5965 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5966 		return 0;
5967 	case RTN_UNREACHABLE:
5968 	case RTN_PROHIBIT:
5969 		/* Packets hitting these routes need to be trapped, but
5970 		 * can do so with a lower priority than packets directed
5971 		 * at the host, so use action type local instead of trap.
5972 		 */
5973 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5974 		return 0;
5975 	case RTN_UNICAST:
5976 		if (nhgi->gateway)
5977 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5978 		else
5979 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5980 		return 0;
5981 	default:
5982 		return -EINVAL;
5983 	}
5984 }
5985 
5986 static void
5987 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
5988 			      struct mlxsw_sp_fib_entry *fib_entry)
5989 {
5990 	switch (fib_entry->type) {
5991 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5992 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
5993 		break;
5994 	default:
5995 		break;
5996 	}
5997 }
5998 
5999 static void
6000 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6001 			       struct mlxsw_sp_fib4_entry *fib4_entry)
6002 {
6003 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6004 }
6005 
6006 static struct mlxsw_sp_fib4_entry *
6007 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6008 			   struct mlxsw_sp_fib_node *fib_node,
6009 			   const struct fib_entry_notifier_info *fen_info)
6010 {
6011 	struct mlxsw_sp_fib4_entry *fib4_entry;
6012 	struct mlxsw_sp_fib_entry *fib_entry;
6013 	int err;
6014 
6015 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6016 	if (!fib4_entry)
6017 		return ERR_PTR(-ENOMEM);
6018 	fib_entry = &fib4_entry->common;
6019 
6020 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6021 	if (err)
6022 		goto err_nexthop4_group_get;
6023 
6024 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6025 					     fib_node->fib);
6026 	if (err)
6027 		goto err_nexthop_group_vr_link;
6028 
6029 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6030 	if (err)
6031 		goto err_fib4_entry_type_set;
6032 
6033 	fib4_entry->fi = fen_info->fi;
6034 	fib_info_hold(fib4_entry->fi);
6035 	fib4_entry->tb_id = fen_info->tb_id;
6036 	fib4_entry->type = fen_info->type;
6037 	fib4_entry->dscp = fen_info->dscp;
6038 
6039 	fib_entry->fib_node = fib_node;
6040 
6041 	return fib4_entry;
6042 
6043 err_fib4_entry_type_set:
6044 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6045 err_nexthop_group_vr_link:
6046 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6047 err_nexthop4_group_get:
6048 	kfree(fib4_entry);
6049 	return ERR_PTR(err);
6050 }
6051 
6052 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6053 					struct mlxsw_sp_fib4_entry *fib4_entry)
6054 {
6055 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6056 
6057 	fib_info_put(fib4_entry->fi);
6058 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6059 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6060 					 fib_node->fib);
6061 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6062 	kfree(fib4_entry);
6063 }
6064 
6065 static struct mlxsw_sp_fib4_entry *
6066 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6067 			   const struct fib_entry_notifier_info *fen_info)
6068 {
6069 	struct mlxsw_sp_fib4_entry *fib4_entry;
6070 	struct mlxsw_sp_fib_node *fib_node;
6071 	struct mlxsw_sp_fib *fib;
6072 	struct mlxsw_sp_vr *vr;
6073 
6074 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6075 	if (!vr)
6076 		return NULL;
6077 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6078 
6079 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6080 					    sizeof(fen_info->dst),
6081 					    fen_info->dst_len);
6082 	if (!fib_node)
6083 		return NULL;
6084 
6085 	fib4_entry = container_of(fib_node->fib_entry,
6086 				  struct mlxsw_sp_fib4_entry, common);
6087 	if (fib4_entry->tb_id == fen_info->tb_id &&
6088 	    fib4_entry->dscp == fen_info->dscp &&
6089 	    fib4_entry->type == fen_info->type &&
6090 	    fib4_entry->fi == fen_info->fi)
6091 		return fib4_entry;
6092 
6093 	return NULL;
6094 }
6095 
6096 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6097 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6098 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6099 	.key_len = sizeof(struct mlxsw_sp_fib_key),
6100 	.automatic_shrinking = true,
6101 };
6102 
6103 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6104 				    struct mlxsw_sp_fib_node *fib_node)
6105 {
6106 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6107 				      mlxsw_sp_fib_ht_params);
6108 }
6109 
6110 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6111 				     struct mlxsw_sp_fib_node *fib_node)
6112 {
6113 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6114 			       mlxsw_sp_fib_ht_params);
6115 }
6116 
6117 static struct mlxsw_sp_fib_node *
6118 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6119 			 size_t addr_len, unsigned char prefix_len)
6120 {
6121 	struct mlxsw_sp_fib_key key;
6122 
6123 	memset(&key, 0, sizeof(key));
6124 	memcpy(key.addr, addr, addr_len);
6125 	key.prefix_len = prefix_len;
6126 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6127 }
6128 
6129 static struct mlxsw_sp_fib_node *
6130 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6131 			 size_t addr_len, unsigned char prefix_len)
6132 {
6133 	struct mlxsw_sp_fib_node *fib_node;
6134 
6135 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6136 	if (!fib_node)
6137 		return NULL;
6138 
6139 	list_add(&fib_node->list, &fib->node_list);
6140 	memcpy(fib_node->key.addr, addr, addr_len);
6141 	fib_node->key.prefix_len = prefix_len;
6142 
6143 	return fib_node;
6144 }
6145 
6146 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6147 {
6148 	list_del(&fib_node->list);
6149 	kfree(fib_node);
6150 }
6151 
6152 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6153 				      struct mlxsw_sp_fib_node *fib_node)
6154 {
6155 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6156 	struct mlxsw_sp_fib *fib = fib_node->fib;
6157 	struct mlxsw_sp_lpm_tree *lpm_tree;
6158 	int err;
6159 
6160 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6161 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6162 		goto out;
6163 
6164 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6165 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6166 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6167 					 fib->proto);
6168 	if (IS_ERR(lpm_tree))
6169 		return PTR_ERR(lpm_tree);
6170 
6171 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6172 	if (err)
6173 		goto err_lpm_tree_replace;
6174 
6175 out:
6176 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6177 	return 0;
6178 
6179 err_lpm_tree_replace:
6180 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6181 	return err;
6182 }
6183 
6184 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6185 					 struct mlxsw_sp_fib_node *fib_node)
6186 {
6187 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6188 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6189 	struct mlxsw_sp_fib *fib = fib_node->fib;
6190 	int err;
6191 
6192 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6193 		return;
6194 	/* Try to construct a new LPM tree from the current prefix usage
6195 	 * minus the unused one. If we fail, continue using the old one.
6196 	 */
6197 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6198 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6199 				    fib_node->key.prefix_len);
6200 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6201 					 fib->proto);
6202 	if (IS_ERR(lpm_tree))
6203 		return;
6204 
6205 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6206 	if (err)
6207 		goto err_lpm_tree_replace;
6208 
6209 	return;
6210 
6211 err_lpm_tree_replace:
6212 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6213 }
6214 
6215 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6216 				  struct mlxsw_sp_fib_node *fib_node,
6217 				  struct mlxsw_sp_fib *fib)
6218 {
6219 	int err;
6220 
6221 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
6222 	if (err)
6223 		return err;
6224 	fib_node->fib = fib;
6225 
6226 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6227 	if (err)
6228 		goto err_fib_lpm_tree_link;
6229 
6230 	return 0;
6231 
6232 err_fib_lpm_tree_link:
6233 	fib_node->fib = NULL;
6234 	mlxsw_sp_fib_node_remove(fib, fib_node);
6235 	return err;
6236 }
6237 
6238 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6239 				   struct mlxsw_sp_fib_node *fib_node)
6240 {
6241 	struct mlxsw_sp_fib *fib = fib_node->fib;
6242 
6243 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6244 	fib_node->fib = NULL;
6245 	mlxsw_sp_fib_node_remove(fib, fib_node);
6246 }
6247 
6248 static struct mlxsw_sp_fib_node *
6249 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6250 		      size_t addr_len, unsigned char prefix_len,
6251 		      enum mlxsw_sp_l3proto proto)
6252 {
6253 	struct mlxsw_sp_fib_node *fib_node;
6254 	struct mlxsw_sp_fib *fib;
6255 	struct mlxsw_sp_vr *vr;
6256 	int err;
6257 
6258 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6259 	if (IS_ERR(vr))
6260 		return ERR_CAST(vr);
6261 	fib = mlxsw_sp_vr_fib(vr, proto);
6262 
6263 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6264 	if (fib_node)
6265 		return fib_node;
6266 
6267 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6268 	if (!fib_node) {
6269 		err = -ENOMEM;
6270 		goto err_fib_node_create;
6271 	}
6272 
6273 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6274 	if (err)
6275 		goto err_fib_node_init;
6276 
6277 	return fib_node;
6278 
6279 err_fib_node_init:
6280 	mlxsw_sp_fib_node_destroy(fib_node);
6281 err_fib_node_create:
6282 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6283 	return ERR_PTR(err);
6284 }
6285 
6286 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6287 				  struct mlxsw_sp_fib_node *fib_node)
6288 {
6289 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6290 
6291 	if (fib_node->fib_entry)
6292 		return;
6293 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6294 	mlxsw_sp_fib_node_destroy(fib_node);
6295 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6296 }
6297 
6298 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6299 					struct mlxsw_sp_fib_entry *fib_entry)
6300 {
6301 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6302 	int err;
6303 
6304 	fib_node->fib_entry = fib_entry;
6305 
6306 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6307 	if (err)
6308 		goto err_fib_entry_update;
6309 
6310 	return 0;
6311 
6312 err_fib_entry_update:
6313 	fib_node->fib_entry = NULL;
6314 	return err;
6315 }
6316 
6317 static void
6318 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6319 			       struct mlxsw_sp_fib_entry *fib_entry)
6320 {
6321 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6322 
6323 	mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6324 	fib_node->fib_entry = NULL;
6325 }
6326 
6327 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6328 {
6329 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6330 	struct mlxsw_sp_fib4_entry *fib4_replaced;
6331 
6332 	if (!fib_node->fib_entry)
6333 		return true;
6334 
6335 	fib4_replaced = container_of(fib_node->fib_entry,
6336 				     struct mlxsw_sp_fib4_entry, common);
6337 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6338 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
6339 		return false;
6340 
6341 	return true;
6342 }
6343 
6344 static int
6345 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6346 			     const struct fib_entry_notifier_info *fen_info)
6347 {
6348 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6349 	struct mlxsw_sp_fib_entry *replaced;
6350 	struct mlxsw_sp_fib_node *fib_node;
6351 	int err;
6352 
6353 	if (fen_info->fi->nh &&
6354 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6355 		return 0;
6356 
6357 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6358 					 &fen_info->dst, sizeof(fen_info->dst),
6359 					 fen_info->dst_len,
6360 					 MLXSW_SP_L3_PROTO_IPV4);
6361 	if (IS_ERR(fib_node)) {
6362 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6363 		return PTR_ERR(fib_node);
6364 	}
6365 
6366 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6367 	if (IS_ERR(fib4_entry)) {
6368 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6369 		err = PTR_ERR(fib4_entry);
6370 		goto err_fib4_entry_create;
6371 	}
6372 
6373 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6374 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6375 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6376 		return 0;
6377 	}
6378 
6379 	replaced = fib_node->fib_entry;
6380 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6381 	if (err) {
6382 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6383 		goto err_fib_node_entry_link;
6384 	}
6385 
6386 	/* Nothing to replace */
6387 	if (!replaced)
6388 		return 0;
6389 
6390 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6391 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6392 				     common);
6393 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6394 
6395 	return 0;
6396 
6397 err_fib_node_entry_link:
6398 	fib_node->fib_entry = replaced;
6399 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6400 err_fib4_entry_create:
6401 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6402 	return err;
6403 }
6404 
6405 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6406 				     struct fib_entry_notifier_info *fen_info)
6407 {
6408 	struct mlxsw_sp_fib4_entry *fib4_entry;
6409 	struct mlxsw_sp_fib_node *fib_node;
6410 
6411 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6412 	if (!fib4_entry)
6413 		return;
6414 	fib_node = fib4_entry->common.fib_node;
6415 
6416 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6417 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6418 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6419 }
6420 
6421 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6422 {
6423 	/* Multicast routes aren't supported, so ignore them. Neighbour
6424 	 * Discovery packets are specifically trapped.
6425 	 */
6426 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6427 		return true;
6428 
6429 	/* Cloned routes are irrelevant in the forwarding path. */
6430 	if (rt->fib6_flags & RTF_CACHE)
6431 		return true;
6432 
6433 	return false;
6434 }
6435 
6436 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6437 {
6438 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6439 
6440 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6441 	if (!mlxsw_sp_rt6)
6442 		return ERR_PTR(-ENOMEM);
6443 
6444 	/* In case of route replace, replaced route is deleted with
6445 	 * no notification. Take reference to prevent accessing freed
6446 	 * memory.
6447 	 */
6448 	mlxsw_sp_rt6->rt = rt;
6449 	fib6_info_hold(rt);
6450 
6451 	return mlxsw_sp_rt6;
6452 }
6453 
6454 #if IS_ENABLED(CONFIG_IPV6)
6455 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6456 {
6457 	fib6_info_release(rt);
6458 }
6459 #else
6460 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6461 {
6462 }
6463 #endif
6464 
6465 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6466 {
6467 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6468 
6469 	if (!mlxsw_sp_rt6->rt->nh)
6470 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6471 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6472 	kfree(mlxsw_sp_rt6);
6473 }
6474 
6475 static struct fib6_info *
6476 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6477 {
6478 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6479 				list)->rt;
6480 }
6481 
6482 static struct mlxsw_sp_rt6 *
6483 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6484 			    const struct fib6_info *rt)
6485 {
6486 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6487 
6488 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6489 		if (mlxsw_sp_rt6->rt == rt)
6490 			return mlxsw_sp_rt6;
6491 	}
6492 
6493 	return NULL;
6494 }
6495 
6496 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6497 					const struct fib6_info *rt,
6498 					enum mlxsw_sp_ipip_type *ret)
6499 {
6500 	return rt->fib6_nh->fib_nh_dev &&
6501 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6502 }
6503 
6504 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6505 				  struct mlxsw_sp_nexthop_group *nh_grp,
6506 				  struct mlxsw_sp_nexthop *nh,
6507 				  const struct fib6_info *rt)
6508 {
6509 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6510 	int err;
6511 
6512 	nh->nhgi = nh_grp->nhgi;
6513 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6514 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6515 #if IS_ENABLED(CONFIG_IPV6)
6516 	nh->neigh_tbl = &nd_tbl;
6517 #endif
6518 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6519 
6520 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6521 
6522 	if (!dev)
6523 		return 0;
6524 	nh->ifindex = dev->ifindex;
6525 
6526 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6527 	if (err)
6528 		goto err_nexthop_type_init;
6529 
6530 	return 0;
6531 
6532 err_nexthop_type_init:
6533 	list_del(&nh->router_list_node);
6534 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6535 	return err;
6536 }
6537 
6538 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6539 				   struct mlxsw_sp_nexthop *nh)
6540 {
6541 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6542 	list_del(&nh->router_list_node);
6543 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6544 }
6545 
6546 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6547 				    const struct fib6_info *rt)
6548 {
6549 	return rt->fib6_nh->fib_nh_gw_family ||
6550 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6551 }
6552 
6553 static int
6554 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6555 				  struct mlxsw_sp_nexthop_group *nh_grp,
6556 				  struct mlxsw_sp_fib6_entry *fib6_entry)
6557 {
6558 	struct mlxsw_sp_nexthop_group_info *nhgi;
6559 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6560 	struct mlxsw_sp_nexthop *nh;
6561 	int err, i;
6562 
6563 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6564 		       GFP_KERNEL);
6565 	if (!nhgi)
6566 		return -ENOMEM;
6567 	nh_grp->nhgi = nhgi;
6568 	nhgi->nh_grp = nh_grp;
6569 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6570 					struct mlxsw_sp_rt6, list);
6571 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6572 	nhgi->count = fib6_entry->nrt6;
6573 	for (i = 0; i < nhgi->count; i++) {
6574 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
6575 
6576 		nh = &nhgi->nexthops[i];
6577 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6578 		if (err)
6579 			goto err_nexthop6_init;
6580 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6581 	}
6582 	nh_grp->nhgi = nhgi;
6583 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6584 	if (err)
6585 		goto err_group_inc;
6586 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6587 	if (err)
6588 		goto err_group_refresh;
6589 
6590 	return 0;
6591 
6592 err_group_refresh:
6593 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6594 err_group_inc:
6595 	i = nhgi->count;
6596 err_nexthop6_init:
6597 	for (i--; i >= 0; i--) {
6598 		nh = &nhgi->nexthops[i];
6599 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6600 	}
6601 	kfree(nhgi);
6602 	return err;
6603 }
6604 
6605 static void
6606 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6607 				  struct mlxsw_sp_nexthop_group *nh_grp)
6608 {
6609 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6610 	int i;
6611 
6612 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6613 	for (i = nhgi->count - 1; i >= 0; i--) {
6614 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6615 
6616 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6617 	}
6618 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6619 	WARN_ON_ONCE(nhgi->adj_index_valid);
6620 	kfree(nhgi);
6621 }
6622 
6623 static struct mlxsw_sp_nexthop_group *
6624 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6625 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6626 {
6627 	struct mlxsw_sp_nexthop_group *nh_grp;
6628 	int err;
6629 
6630 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6631 	if (!nh_grp)
6632 		return ERR_PTR(-ENOMEM);
6633 	INIT_LIST_HEAD(&nh_grp->vr_list);
6634 	err = rhashtable_init(&nh_grp->vr_ht,
6635 			      &mlxsw_sp_nexthop_group_vr_ht_params);
6636 	if (err)
6637 		goto err_nexthop_group_vr_ht_init;
6638 	INIT_LIST_HEAD(&nh_grp->fib_list);
6639 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6640 
6641 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6642 	if (err)
6643 		goto err_nexthop_group_info_init;
6644 
6645 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6646 	if (err)
6647 		goto err_nexthop_group_insert;
6648 
6649 	nh_grp->can_destroy = true;
6650 
6651 	return nh_grp;
6652 
6653 err_nexthop_group_insert:
6654 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6655 err_nexthop_group_info_init:
6656 	rhashtable_destroy(&nh_grp->vr_ht);
6657 err_nexthop_group_vr_ht_init:
6658 	kfree(nh_grp);
6659 	return ERR_PTR(err);
6660 }
6661 
6662 static void
6663 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6664 				struct mlxsw_sp_nexthop_group *nh_grp)
6665 {
6666 	if (!nh_grp->can_destroy)
6667 		return;
6668 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6669 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6670 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6671 	rhashtable_destroy(&nh_grp->vr_ht);
6672 	kfree(nh_grp);
6673 }
6674 
6675 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6676 				       struct mlxsw_sp_fib6_entry *fib6_entry)
6677 {
6678 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6679 	struct mlxsw_sp_nexthop_group *nh_grp;
6680 
6681 	if (rt->nh) {
6682 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6683 							   rt->nh->id);
6684 		if (WARN_ON_ONCE(!nh_grp))
6685 			return -EINVAL;
6686 		goto out;
6687 	}
6688 
6689 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6690 	if (!nh_grp) {
6691 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6692 		if (IS_ERR(nh_grp))
6693 			return PTR_ERR(nh_grp);
6694 	}
6695 
6696 	/* The route and the nexthop are described by the same struct, so we
6697 	 * need to the update the nexthop offload indication for the new route.
6698 	 */
6699 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6700 
6701 out:
6702 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6703 		      &nh_grp->fib_list);
6704 	fib6_entry->common.nh_group = nh_grp;
6705 
6706 	return 0;
6707 }
6708 
6709 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6710 					struct mlxsw_sp_fib_entry *fib_entry)
6711 {
6712 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6713 
6714 	list_del(&fib_entry->nexthop_group_node);
6715 	if (!list_empty(&nh_grp->fib_list))
6716 		return;
6717 
6718 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6719 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6720 		return;
6721 	}
6722 
6723 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6724 }
6725 
6726 static int
6727 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6728 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6729 {
6730 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6731 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6732 	int err;
6733 
6734 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6735 	fib6_entry->common.nh_group = NULL;
6736 	list_del(&fib6_entry->common.nexthop_group_node);
6737 
6738 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6739 	if (err)
6740 		goto err_nexthop6_group_get;
6741 
6742 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6743 					     fib_node->fib);
6744 	if (err)
6745 		goto err_nexthop_group_vr_link;
6746 
6747 	/* In case this entry is offloaded, then the adjacency index
6748 	 * currently associated with it in the device's table is that
6749 	 * of the old group. Start using the new one instead.
6750 	 */
6751 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
6752 	if (err)
6753 		goto err_fib_entry_update;
6754 
6755 	if (list_empty(&old_nh_grp->fib_list))
6756 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6757 
6758 	return 0;
6759 
6760 err_fib_entry_update:
6761 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6762 					 fib_node->fib);
6763 err_nexthop_group_vr_link:
6764 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6765 err_nexthop6_group_get:
6766 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6767 		      &old_nh_grp->fib_list);
6768 	fib6_entry->common.nh_group = old_nh_grp;
6769 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6770 	return err;
6771 }
6772 
6773 static int
6774 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6775 				struct mlxsw_sp_fib6_entry *fib6_entry,
6776 				struct fib6_info **rt_arr, unsigned int nrt6)
6777 {
6778 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6779 	int err, i;
6780 
6781 	for (i = 0; i < nrt6; i++) {
6782 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6783 		if (IS_ERR(mlxsw_sp_rt6)) {
6784 			err = PTR_ERR(mlxsw_sp_rt6);
6785 			goto err_rt6_unwind;
6786 		}
6787 
6788 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6789 		fib6_entry->nrt6++;
6790 	}
6791 
6792 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
6793 	if (err)
6794 		goto err_rt6_unwind;
6795 
6796 	return 0;
6797 
6798 err_rt6_unwind:
6799 	for (; i > 0; i--) {
6800 		fib6_entry->nrt6--;
6801 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6802 					       struct mlxsw_sp_rt6, list);
6803 		list_del(&mlxsw_sp_rt6->list);
6804 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6805 	}
6806 	return err;
6807 }
6808 
6809 static void
6810 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6811 				struct mlxsw_sp_fib6_entry *fib6_entry,
6812 				struct fib6_info **rt_arr, unsigned int nrt6)
6813 {
6814 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6815 	int i;
6816 
6817 	for (i = 0; i < nrt6; i++) {
6818 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6819 							   rt_arr[i]);
6820 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6821 			continue;
6822 
6823 		fib6_entry->nrt6--;
6824 		list_del(&mlxsw_sp_rt6->list);
6825 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6826 	}
6827 
6828 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
6829 }
6830 
6831 static int
6832 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
6833 				   struct mlxsw_sp_fib_entry *fib_entry,
6834 				   const struct fib6_info *rt)
6835 {
6836 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6837 	union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
6838 	u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
6839 	struct mlxsw_sp_router *router = mlxsw_sp->router;
6840 	int ifindex = nhgi->nexthops[0].ifindex;
6841 	struct mlxsw_sp_ipip_entry *ipip_entry;
6842 
6843 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6844 	ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6845 						       MLXSW_SP_L3_PROTO_IPV6,
6846 						       dip);
6847 
6848 	if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6849 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6850 		return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
6851 						     ipip_entry);
6852 	}
6853 	if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6854 					 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
6855 		u32 tunnel_index;
6856 
6857 		tunnel_index = router->nve_decap_config.tunnel_index;
6858 		fib_entry->decap.tunnel_index = tunnel_index;
6859 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6860 	}
6861 
6862 	return 0;
6863 }
6864 
6865 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6866 					struct mlxsw_sp_fib_entry *fib_entry,
6867 					const struct fib6_info *rt)
6868 {
6869 	if (rt->fib6_flags & RTF_LOCAL)
6870 		return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
6871 							  rt);
6872 	if (rt->fib6_flags & RTF_ANYCAST)
6873 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6874 	else if (rt->fib6_type == RTN_BLACKHOLE)
6875 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6876 	else if (rt->fib6_flags & RTF_REJECT)
6877 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6878 	else if (fib_entry->nh_group->nhgi->gateway)
6879 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6880 	else
6881 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6882 
6883 	return 0;
6884 }
6885 
6886 static void
6887 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
6888 {
6889 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
6890 
6891 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
6892 				 list) {
6893 		fib6_entry->nrt6--;
6894 		list_del(&mlxsw_sp_rt6->list);
6895 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6896 	}
6897 }
6898 
6899 static struct mlxsw_sp_fib6_entry *
6900 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
6901 			   struct mlxsw_sp_fib_node *fib_node,
6902 			   struct fib6_info **rt_arr, unsigned int nrt6)
6903 {
6904 	struct mlxsw_sp_fib6_entry *fib6_entry;
6905 	struct mlxsw_sp_fib_entry *fib_entry;
6906 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6907 	int err, i;
6908 
6909 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
6910 	if (!fib6_entry)
6911 		return ERR_PTR(-ENOMEM);
6912 	fib_entry = &fib6_entry->common;
6913 
6914 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
6915 
6916 	for (i = 0; i < nrt6; i++) {
6917 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6918 		if (IS_ERR(mlxsw_sp_rt6)) {
6919 			err = PTR_ERR(mlxsw_sp_rt6);
6920 			goto err_rt6_unwind;
6921 		}
6922 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6923 		fib6_entry->nrt6++;
6924 	}
6925 
6926 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6927 	if (err)
6928 		goto err_rt6_unwind;
6929 
6930 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6931 					     fib_node->fib);
6932 	if (err)
6933 		goto err_nexthop_group_vr_link;
6934 
6935 	err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
6936 	if (err)
6937 		goto err_fib6_entry_type_set;
6938 
6939 	fib_entry->fib_node = fib_node;
6940 
6941 	return fib6_entry;
6942 
6943 err_fib6_entry_type_set:
6944 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6945 err_nexthop_group_vr_link:
6946 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
6947 err_rt6_unwind:
6948 	for (; i > 0; i--) {
6949 		fib6_entry->nrt6--;
6950 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6951 					       struct mlxsw_sp_rt6, list);
6952 		list_del(&mlxsw_sp_rt6->list);
6953 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6954 	}
6955 	kfree(fib6_entry);
6956 	return ERR_PTR(err);
6957 }
6958 
6959 static void
6960 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6961 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6962 {
6963 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
6964 }
6965 
6966 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6967 					struct mlxsw_sp_fib6_entry *fib6_entry)
6968 {
6969 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6970 
6971 	mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
6972 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6973 					 fib_node->fib);
6974 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6975 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
6976 	WARN_ON(fib6_entry->nrt6);
6977 	kfree(fib6_entry);
6978 }
6979 
6980 static struct mlxsw_sp_fib6_entry *
6981 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6982 			   const struct fib6_info *rt)
6983 {
6984 	struct mlxsw_sp_fib6_entry *fib6_entry;
6985 	struct mlxsw_sp_fib_node *fib_node;
6986 	struct mlxsw_sp_fib *fib;
6987 	struct fib6_info *cmp_rt;
6988 	struct mlxsw_sp_vr *vr;
6989 
6990 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
6991 	if (!vr)
6992 		return NULL;
6993 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
6994 
6995 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
6996 					    sizeof(rt->fib6_dst.addr),
6997 					    rt->fib6_dst.plen);
6998 	if (!fib_node)
6999 		return NULL;
7000 
7001 	fib6_entry = container_of(fib_node->fib_entry,
7002 				  struct mlxsw_sp_fib6_entry, common);
7003 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7004 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7005 	    rt->fib6_metric == cmp_rt->fib6_metric &&
7006 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7007 		return fib6_entry;
7008 
7009 	return NULL;
7010 }
7011 
7012 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7013 {
7014 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7015 	struct mlxsw_sp_fib6_entry *fib6_replaced;
7016 	struct fib6_info *rt, *rt_replaced;
7017 
7018 	if (!fib_node->fib_entry)
7019 		return true;
7020 
7021 	fib6_replaced = container_of(fib_node->fib_entry,
7022 				     struct mlxsw_sp_fib6_entry,
7023 				     common);
7024 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7025 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7026 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7027 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7028 		return false;
7029 
7030 	return true;
7031 }
7032 
7033 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7034 					struct fib6_info **rt_arr,
7035 					unsigned int nrt6)
7036 {
7037 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7038 	struct mlxsw_sp_fib_entry *replaced;
7039 	struct mlxsw_sp_fib_node *fib_node;
7040 	struct fib6_info *rt = rt_arr[0];
7041 	int err;
7042 
7043 	if (rt->fib6_src.plen)
7044 		return -EINVAL;
7045 
7046 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7047 		return 0;
7048 
7049 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7050 		return 0;
7051 
7052 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7053 					 &rt->fib6_dst.addr,
7054 					 sizeof(rt->fib6_dst.addr),
7055 					 rt->fib6_dst.plen,
7056 					 MLXSW_SP_L3_PROTO_IPV6);
7057 	if (IS_ERR(fib_node))
7058 		return PTR_ERR(fib_node);
7059 
7060 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7061 						nrt6);
7062 	if (IS_ERR(fib6_entry)) {
7063 		err = PTR_ERR(fib6_entry);
7064 		goto err_fib6_entry_create;
7065 	}
7066 
7067 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7068 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7069 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7070 		return 0;
7071 	}
7072 
7073 	replaced = fib_node->fib_entry;
7074 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7075 	if (err)
7076 		goto err_fib_node_entry_link;
7077 
7078 	/* Nothing to replace */
7079 	if (!replaced)
7080 		return 0;
7081 
7082 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7083 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7084 				     common);
7085 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7086 
7087 	return 0;
7088 
7089 err_fib_node_entry_link:
7090 	fib_node->fib_entry = replaced;
7091 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7092 err_fib6_entry_create:
7093 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7094 	return err;
7095 }
7096 
7097 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7098 				       struct fib6_info **rt_arr,
7099 				       unsigned int nrt6)
7100 {
7101 	struct mlxsw_sp_fib6_entry *fib6_entry;
7102 	struct mlxsw_sp_fib_node *fib_node;
7103 	struct fib6_info *rt = rt_arr[0];
7104 	int err;
7105 
7106 	if (rt->fib6_src.plen)
7107 		return -EINVAL;
7108 
7109 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7110 		return 0;
7111 
7112 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7113 					 &rt->fib6_dst.addr,
7114 					 sizeof(rt->fib6_dst.addr),
7115 					 rt->fib6_dst.plen,
7116 					 MLXSW_SP_L3_PROTO_IPV6);
7117 	if (IS_ERR(fib_node))
7118 		return PTR_ERR(fib_node);
7119 
7120 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7121 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7122 		return -EINVAL;
7123 	}
7124 
7125 	fib6_entry = container_of(fib_node->fib_entry,
7126 				  struct mlxsw_sp_fib6_entry, common);
7127 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7128 					      nrt6);
7129 	if (err)
7130 		goto err_fib6_entry_nexthop_add;
7131 
7132 	return 0;
7133 
7134 err_fib6_entry_nexthop_add:
7135 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7136 	return err;
7137 }
7138 
7139 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7140 				     struct fib6_info **rt_arr,
7141 				     unsigned int nrt6)
7142 {
7143 	struct mlxsw_sp_fib6_entry *fib6_entry;
7144 	struct mlxsw_sp_fib_node *fib_node;
7145 	struct fib6_info *rt = rt_arr[0];
7146 
7147 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7148 		return;
7149 
7150 	/* Multipath routes are first added to the FIB trie and only then
7151 	 * notified. If we vetoed the addition, we will get a delete
7152 	 * notification for a route we do not have. Therefore, do not warn if
7153 	 * route was not found.
7154 	 */
7155 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7156 	if (!fib6_entry)
7157 		return;
7158 
7159 	/* If not all the nexthops are deleted, then only reduce the nexthop
7160 	 * group.
7161 	 */
7162 	if (nrt6 != fib6_entry->nrt6) {
7163 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7164 						nrt6);
7165 		return;
7166 	}
7167 
7168 	fib_node = fib6_entry->common.fib_node;
7169 
7170 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7171 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7172 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7173 }
7174 
7175 static struct mlxsw_sp_mr_table *
7176 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7177 {
7178 	if (family == RTNL_FAMILY_IPMR)
7179 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7180 	else
7181 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7182 }
7183 
7184 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7185 				     struct mfc_entry_notifier_info *men_info,
7186 				     bool replace)
7187 {
7188 	struct mlxsw_sp_mr_table *mrt;
7189 	struct mlxsw_sp_vr *vr;
7190 
7191 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7192 	if (IS_ERR(vr))
7193 		return PTR_ERR(vr);
7194 
7195 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7196 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7197 }
7198 
7199 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7200 				      struct mfc_entry_notifier_info *men_info)
7201 {
7202 	struct mlxsw_sp_mr_table *mrt;
7203 	struct mlxsw_sp_vr *vr;
7204 
7205 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7206 	if (WARN_ON(!vr))
7207 		return;
7208 
7209 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7210 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7211 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7212 }
7213 
7214 static int
7215 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7216 			      struct vif_entry_notifier_info *ven_info)
7217 {
7218 	struct mlxsw_sp_mr_table *mrt;
7219 	struct mlxsw_sp_rif *rif;
7220 	struct mlxsw_sp_vr *vr;
7221 
7222 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7223 	if (IS_ERR(vr))
7224 		return PTR_ERR(vr);
7225 
7226 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7227 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7228 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7229 				   ven_info->vif_index,
7230 				   ven_info->vif_flags, rif);
7231 }
7232 
7233 static void
7234 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7235 			      struct vif_entry_notifier_info *ven_info)
7236 {
7237 	struct mlxsw_sp_mr_table *mrt;
7238 	struct mlxsw_sp_vr *vr;
7239 
7240 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7241 	if (WARN_ON(!vr))
7242 		return;
7243 
7244 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7245 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7246 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7247 }
7248 
7249 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7250 				     struct mlxsw_sp_fib_node *fib_node)
7251 {
7252 	struct mlxsw_sp_fib4_entry *fib4_entry;
7253 
7254 	fib4_entry = container_of(fib_node->fib_entry,
7255 				  struct mlxsw_sp_fib4_entry, common);
7256 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7257 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7258 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7259 }
7260 
7261 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7262 				     struct mlxsw_sp_fib_node *fib_node)
7263 {
7264 	struct mlxsw_sp_fib6_entry *fib6_entry;
7265 
7266 	fib6_entry = container_of(fib_node->fib_entry,
7267 				  struct mlxsw_sp_fib6_entry, common);
7268 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7269 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7270 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7271 }
7272 
7273 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7274 				    struct mlxsw_sp_fib_node *fib_node)
7275 {
7276 	switch (fib_node->fib->proto) {
7277 	case MLXSW_SP_L3_PROTO_IPV4:
7278 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7279 		break;
7280 	case MLXSW_SP_L3_PROTO_IPV6:
7281 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7282 		break;
7283 	}
7284 }
7285 
7286 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7287 				  struct mlxsw_sp_vr *vr,
7288 				  enum mlxsw_sp_l3proto proto)
7289 {
7290 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7291 	struct mlxsw_sp_fib_node *fib_node, *tmp;
7292 
7293 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7294 		bool do_break = &tmp->list == &fib->node_list;
7295 
7296 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7297 		if (do_break)
7298 			break;
7299 	}
7300 }
7301 
7302 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7303 {
7304 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
7305 	int i, j;
7306 
7307 	for (i = 0; i < max_vrs; i++) {
7308 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7309 
7310 		if (!mlxsw_sp_vr_is_used(vr))
7311 			continue;
7312 
7313 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7314 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7315 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7316 
7317 		/* If virtual router was only used for IPv4, then it's no
7318 		 * longer used.
7319 		 */
7320 		if (!mlxsw_sp_vr_is_used(vr))
7321 			continue;
7322 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7323 	}
7324 }
7325 
7326 struct mlxsw_sp_fib6_event_work {
7327 	struct fib6_info **rt_arr;
7328 	unsigned int nrt6;
7329 };
7330 
7331 struct mlxsw_sp_fib_event_work {
7332 	struct work_struct work;
7333 	union {
7334 		struct mlxsw_sp_fib6_event_work fib6_work;
7335 		struct fib_entry_notifier_info fen_info;
7336 		struct fib_rule_notifier_info fr_info;
7337 		struct fib_nh_notifier_info fnh_info;
7338 		struct mfc_entry_notifier_info men_info;
7339 		struct vif_entry_notifier_info ven_info;
7340 	};
7341 	struct mlxsw_sp *mlxsw_sp;
7342 	unsigned long event;
7343 };
7344 
7345 static int
7346 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7347 			       struct fib6_entry_notifier_info *fen6_info)
7348 {
7349 	struct fib6_info *rt = fen6_info->rt;
7350 	struct fib6_info **rt_arr;
7351 	struct fib6_info *iter;
7352 	unsigned int nrt6;
7353 	int i = 0;
7354 
7355 	nrt6 = fen6_info->nsiblings + 1;
7356 
7357 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7358 	if (!rt_arr)
7359 		return -ENOMEM;
7360 
7361 	fib6_work->rt_arr = rt_arr;
7362 	fib6_work->nrt6 = nrt6;
7363 
7364 	rt_arr[0] = rt;
7365 	fib6_info_hold(rt);
7366 
7367 	if (!fen6_info->nsiblings)
7368 		return 0;
7369 
7370 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7371 		if (i == fen6_info->nsiblings)
7372 			break;
7373 
7374 		rt_arr[i + 1] = iter;
7375 		fib6_info_hold(iter);
7376 		i++;
7377 	}
7378 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7379 
7380 	return 0;
7381 }
7382 
7383 static void
7384 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7385 {
7386 	int i;
7387 
7388 	for (i = 0; i < fib6_work->nrt6; i++)
7389 		mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7390 	kfree(fib6_work->rt_arr);
7391 }
7392 
7393 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7394 {
7395 	struct mlxsw_sp_fib_event_work *fib_work =
7396 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7397 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7398 	int err;
7399 
7400 	mutex_lock(&mlxsw_sp->router->lock);
7401 	mlxsw_sp_span_respin(mlxsw_sp);
7402 
7403 	switch (fib_work->event) {
7404 	case FIB_EVENT_ENTRY_REPLACE:
7405 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7406 						   &fib_work->fen_info);
7407 		if (err) {
7408 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7409 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7410 							      &fib_work->fen_info);
7411 		}
7412 		fib_info_put(fib_work->fen_info.fi);
7413 		break;
7414 	case FIB_EVENT_ENTRY_DEL:
7415 		mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7416 		fib_info_put(fib_work->fen_info.fi);
7417 		break;
7418 	case FIB_EVENT_NH_ADD:
7419 	case FIB_EVENT_NH_DEL:
7420 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7421 					fib_work->fnh_info.fib_nh);
7422 		fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7423 		break;
7424 	}
7425 	mutex_unlock(&mlxsw_sp->router->lock);
7426 	kfree(fib_work);
7427 }
7428 
7429 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7430 {
7431 	struct mlxsw_sp_fib_event_work *fib_work =
7432 		    container_of(work, struct mlxsw_sp_fib_event_work, work);
7433 	struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7434 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7435 	int err;
7436 
7437 	mutex_lock(&mlxsw_sp->router->lock);
7438 	mlxsw_sp_span_respin(mlxsw_sp);
7439 
7440 	switch (fib_work->event) {
7441 	case FIB_EVENT_ENTRY_REPLACE:
7442 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7443 						   fib6_work->rt_arr,
7444 						   fib6_work->nrt6);
7445 		if (err) {
7446 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7447 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7448 							      fib6_work->rt_arr,
7449 							      fib6_work->nrt6);
7450 		}
7451 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7452 		break;
7453 	case FIB_EVENT_ENTRY_APPEND:
7454 		err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7455 						  fib6_work->rt_arr,
7456 						  fib6_work->nrt6);
7457 		if (err) {
7458 			dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7459 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7460 							      fib6_work->rt_arr,
7461 							      fib6_work->nrt6);
7462 		}
7463 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7464 		break;
7465 	case FIB_EVENT_ENTRY_DEL:
7466 		mlxsw_sp_router_fib6_del(mlxsw_sp,
7467 					 fib6_work->rt_arr,
7468 					 fib6_work->nrt6);
7469 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7470 		break;
7471 	}
7472 	mutex_unlock(&mlxsw_sp->router->lock);
7473 	kfree(fib_work);
7474 }
7475 
7476 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7477 {
7478 	struct mlxsw_sp_fib_event_work *fib_work =
7479 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7480 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7481 	bool replace;
7482 	int err;
7483 
7484 	rtnl_lock();
7485 	mutex_lock(&mlxsw_sp->router->lock);
7486 	switch (fib_work->event) {
7487 	case FIB_EVENT_ENTRY_REPLACE:
7488 	case FIB_EVENT_ENTRY_ADD:
7489 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7490 
7491 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7492 						replace);
7493 		if (err)
7494 			dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7495 		mr_cache_put(fib_work->men_info.mfc);
7496 		break;
7497 	case FIB_EVENT_ENTRY_DEL:
7498 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7499 		mr_cache_put(fib_work->men_info.mfc);
7500 		break;
7501 	case FIB_EVENT_VIF_ADD:
7502 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7503 						    &fib_work->ven_info);
7504 		if (err)
7505 			dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7506 		dev_put(fib_work->ven_info.dev);
7507 		break;
7508 	case FIB_EVENT_VIF_DEL:
7509 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7510 					      &fib_work->ven_info);
7511 		dev_put(fib_work->ven_info.dev);
7512 		break;
7513 	}
7514 	mutex_unlock(&mlxsw_sp->router->lock);
7515 	rtnl_unlock();
7516 	kfree(fib_work);
7517 }
7518 
7519 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7520 				       struct fib_notifier_info *info)
7521 {
7522 	struct fib_entry_notifier_info *fen_info;
7523 	struct fib_nh_notifier_info *fnh_info;
7524 
7525 	switch (fib_work->event) {
7526 	case FIB_EVENT_ENTRY_REPLACE:
7527 	case FIB_EVENT_ENTRY_DEL:
7528 		fen_info = container_of(info, struct fib_entry_notifier_info,
7529 					info);
7530 		fib_work->fen_info = *fen_info;
7531 		/* Take reference on fib_info to prevent it from being
7532 		 * freed while work is queued. Release it afterwards.
7533 		 */
7534 		fib_info_hold(fib_work->fen_info.fi);
7535 		break;
7536 	case FIB_EVENT_NH_ADD:
7537 	case FIB_EVENT_NH_DEL:
7538 		fnh_info = container_of(info, struct fib_nh_notifier_info,
7539 					info);
7540 		fib_work->fnh_info = *fnh_info;
7541 		fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
7542 		break;
7543 	}
7544 }
7545 
7546 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
7547 				      struct fib_notifier_info *info)
7548 {
7549 	struct fib6_entry_notifier_info *fen6_info;
7550 	int err;
7551 
7552 	switch (fib_work->event) {
7553 	case FIB_EVENT_ENTRY_REPLACE:
7554 	case FIB_EVENT_ENTRY_APPEND:
7555 	case FIB_EVENT_ENTRY_DEL:
7556 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
7557 					 info);
7558 		err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
7559 						     fen6_info);
7560 		if (err)
7561 			return err;
7562 		break;
7563 	}
7564 
7565 	return 0;
7566 }
7567 
7568 static void
7569 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
7570 			    struct fib_notifier_info *info)
7571 {
7572 	switch (fib_work->event) {
7573 	case FIB_EVENT_ENTRY_REPLACE:
7574 	case FIB_EVENT_ENTRY_ADD:
7575 	case FIB_EVENT_ENTRY_DEL:
7576 		memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
7577 		mr_cache_hold(fib_work->men_info.mfc);
7578 		break;
7579 	case FIB_EVENT_VIF_ADD:
7580 	case FIB_EVENT_VIF_DEL:
7581 		memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
7582 		dev_hold(fib_work->ven_info.dev);
7583 		break;
7584 	}
7585 }
7586 
7587 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7588 					  struct fib_notifier_info *info,
7589 					  struct mlxsw_sp *mlxsw_sp)
7590 {
7591 	struct netlink_ext_ack *extack = info->extack;
7592 	struct fib_rule_notifier_info *fr_info;
7593 	struct fib_rule *rule;
7594 	int err = 0;
7595 
7596 	/* nothing to do at the moment */
7597 	if (event == FIB_EVENT_RULE_DEL)
7598 		return 0;
7599 
7600 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
7601 	rule = fr_info->rule;
7602 
7603 	/* Rule only affects locally generated traffic */
7604 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7605 		return 0;
7606 
7607 	switch (info->family) {
7608 	case AF_INET:
7609 		if (!fib4_rule_default(rule) && !rule->l3mdev)
7610 			err = -EOPNOTSUPP;
7611 		break;
7612 	case AF_INET6:
7613 		if (!fib6_rule_default(rule) && !rule->l3mdev)
7614 			err = -EOPNOTSUPP;
7615 		break;
7616 	case RTNL_FAMILY_IPMR:
7617 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
7618 			err = -EOPNOTSUPP;
7619 		break;
7620 	case RTNL_FAMILY_IP6MR:
7621 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7622 			err = -EOPNOTSUPP;
7623 		break;
7624 	}
7625 
7626 	if (err < 0)
7627 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7628 
7629 	return err;
7630 }
7631 
7632 /* Called with rcu_read_lock() */
7633 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7634 				     unsigned long event, void *ptr)
7635 {
7636 	struct mlxsw_sp_fib_event_work *fib_work;
7637 	struct fib_notifier_info *info = ptr;
7638 	struct mlxsw_sp_router *router;
7639 	int err;
7640 
7641 	if ((info->family != AF_INET && info->family != AF_INET6 &&
7642 	     info->family != RTNL_FAMILY_IPMR &&
7643 	     info->family != RTNL_FAMILY_IP6MR))
7644 		return NOTIFY_DONE;
7645 
7646 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7647 
7648 	switch (event) {
7649 	case FIB_EVENT_RULE_ADD:
7650 	case FIB_EVENT_RULE_DEL:
7651 		err = mlxsw_sp_router_fib_rule_event(event, info,
7652 						     router->mlxsw_sp);
7653 		return notifier_from_errno(err);
7654 	case FIB_EVENT_ENTRY_ADD:
7655 	case FIB_EVENT_ENTRY_REPLACE:
7656 	case FIB_EVENT_ENTRY_APPEND:
7657 		if (info->family == AF_INET) {
7658 			struct fib_entry_notifier_info *fen_info = ptr;
7659 
7660 			if (fen_info->fi->fib_nh_is_v6) {
7661 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7662 				return notifier_from_errno(-EINVAL);
7663 			}
7664 		}
7665 		break;
7666 	}
7667 
7668 	fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
7669 	if (!fib_work)
7670 		return NOTIFY_BAD;
7671 
7672 	fib_work->mlxsw_sp = router->mlxsw_sp;
7673 	fib_work->event = event;
7674 
7675 	switch (info->family) {
7676 	case AF_INET:
7677 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
7678 		mlxsw_sp_router_fib4_event(fib_work, info);
7679 		break;
7680 	case AF_INET6:
7681 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
7682 		err = mlxsw_sp_router_fib6_event(fib_work, info);
7683 		if (err)
7684 			goto err_fib_event;
7685 		break;
7686 	case RTNL_FAMILY_IP6MR:
7687 	case RTNL_FAMILY_IPMR:
7688 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
7689 		mlxsw_sp_router_fibmr_event(fib_work, info);
7690 		break;
7691 	}
7692 
7693 	mlxsw_core_schedule_work(&fib_work->work);
7694 
7695 	return NOTIFY_DONE;
7696 
7697 err_fib_event:
7698 	kfree(fib_work);
7699 	return NOTIFY_BAD;
7700 }
7701 
7702 static struct mlxsw_sp_rif *
7703 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7704 			 const struct net_device *dev)
7705 {
7706 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7707 	int i;
7708 
7709 	for (i = 0; i < max_rifs; i++)
7710 		if (mlxsw_sp->router->rifs[i] &&
7711 		    mlxsw_sp->router->rifs[i]->dev == dev)
7712 			return mlxsw_sp->router->rifs[i];
7713 
7714 	return NULL;
7715 }
7716 
7717 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7718 			 const struct net_device *dev)
7719 {
7720 	struct mlxsw_sp_rif *rif;
7721 
7722 	mutex_lock(&mlxsw_sp->router->lock);
7723 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7724 	mutex_unlock(&mlxsw_sp->router->lock);
7725 
7726 	return rif;
7727 }
7728 
7729 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7730 {
7731 	struct mlxsw_sp_rif *rif;
7732 	u16 vid = 0;
7733 
7734 	mutex_lock(&mlxsw_sp->router->lock);
7735 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7736 	if (!rif)
7737 		goto out;
7738 
7739 	/* We only return the VID for VLAN RIFs. Otherwise we return an
7740 	 * invalid value (0).
7741 	 */
7742 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7743 		goto out;
7744 
7745 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7746 
7747 out:
7748 	mutex_unlock(&mlxsw_sp->router->lock);
7749 	return vid;
7750 }
7751 
7752 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7753 {
7754 	char ritr_pl[MLXSW_REG_RITR_LEN];
7755 	int err;
7756 
7757 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7758 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7759 	if (err)
7760 		return err;
7761 
7762 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
7763 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7764 }
7765 
7766 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
7767 					  struct mlxsw_sp_rif *rif)
7768 {
7769 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
7770 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
7771 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
7772 }
7773 
7774 static bool
7775 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
7776 			   unsigned long event)
7777 {
7778 	struct inet6_dev *inet6_dev;
7779 	bool addr_list_empty = true;
7780 	struct in_device *idev;
7781 
7782 	switch (event) {
7783 	case NETDEV_UP:
7784 		return rif == NULL;
7785 	case NETDEV_DOWN:
7786 		rcu_read_lock();
7787 		idev = __in_dev_get_rcu(dev);
7788 		if (idev && idev->ifa_list)
7789 			addr_list_empty = false;
7790 
7791 		inet6_dev = __in6_dev_get(dev);
7792 		if (addr_list_empty && inet6_dev &&
7793 		    !list_empty(&inet6_dev->addr_list))
7794 			addr_list_empty = false;
7795 		rcu_read_unlock();
7796 
7797 		/* macvlans do not have a RIF, but rather piggy back on the
7798 		 * RIF of their lower device.
7799 		 */
7800 		if (netif_is_macvlan(dev) && addr_list_empty)
7801 			return true;
7802 
7803 		if (rif && addr_list_empty &&
7804 		    !netif_is_l3_slave(rif->dev))
7805 			return true;
7806 		/* It is possible we already removed the RIF ourselves
7807 		 * if it was assigned to a netdev that is now a bridge
7808 		 * or LAG slave.
7809 		 */
7810 		return false;
7811 	}
7812 
7813 	return false;
7814 }
7815 
7816 static enum mlxsw_sp_rif_type
7817 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
7818 		      const struct net_device *dev)
7819 {
7820 	enum mlxsw_sp_fid_type type;
7821 
7822 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
7823 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
7824 
7825 	/* Otherwise RIF type is derived from the type of the underlying FID. */
7826 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
7827 		type = MLXSW_SP_FID_TYPE_8021Q;
7828 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
7829 		type = MLXSW_SP_FID_TYPE_8021Q;
7830 	else if (netif_is_bridge_master(dev))
7831 		type = MLXSW_SP_FID_TYPE_8021D;
7832 	else
7833 		type = MLXSW_SP_FID_TYPE_RFID;
7834 
7835 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
7836 }
7837 
7838 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
7839 				    u8 rif_entries)
7840 {
7841 	*p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
7842 				      rif_entries);
7843 	if (*p_rif_index == 0)
7844 		return -ENOBUFS;
7845 	*p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
7846 
7847 	/* RIF indexes must be aligned to the allocation size. */
7848 	WARN_ON_ONCE(*p_rif_index % rif_entries);
7849 
7850 	return 0;
7851 }
7852 
7853 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7854 				    u8 rif_entries)
7855 {
7856 	gen_pool_free(mlxsw_sp->router->rifs_table,
7857 		      MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
7858 }
7859 
7860 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
7861 					       u16 vr_id,
7862 					       struct net_device *l3_dev)
7863 {
7864 	struct mlxsw_sp_rif *rif;
7865 
7866 	rif = kzalloc(rif_size, GFP_KERNEL);
7867 	if (!rif)
7868 		return NULL;
7869 
7870 	INIT_LIST_HEAD(&rif->nexthop_list);
7871 	INIT_LIST_HEAD(&rif->neigh_list);
7872 	if (l3_dev) {
7873 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
7874 		rif->mtu = l3_dev->mtu;
7875 		rif->dev = l3_dev;
7876 	}
7877 	rif->vr_id = vr_id;
7878 	rif->rif_index = rif_index;
7879 
7880 	return rif;
7881 }
7882 
7883 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
7884 					   u16 rif_index)
7885 {
7886 	return mlxsw_sp->router->rifs[rif_index];
7887 }
7888 
7889 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
7890 {
7891 	return rif->rif_index;
7892 }
7893 
7894 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7895 {
7896 	return lb_rif->common.rif_index;
7897 }
7898 
7899 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7900 {
7901 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
7902 	struct mlxsw_sp_vr *ul_vr;
7903 
7904 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
7905 	if (WARN_ON(IS_ERR(ul_vr)))
7906 		return 0;
7907 
7908 	return ul_vr->id;
7909 }
7910 
7911 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7912 {
7913 	return lb_rif->ul_rif_id;
7914 }
7915 
7916 static bool
7917 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
7918 {
7919 	return mlxsw_sp_rif_counter_valid_get(rif,
7920 					      MLXSW_SP_RIF_COUNTER_EGRESS) &&
7921 	       mlxsw_sp_rif_counter_valid_get(rif,
7922 					      MLXSW_SP_RIF_COUNTER_INGRESS);
7923 }
7924 
7925 static int
7926 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
7927 {
7928 	int err;
7929 
7930 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
7931 	if (err)
7932 		return err;
7933 
7934 	/* Clear stale data. */
7935 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
7936 					       MLXSW_SP_RIF_COUNTER_INGRESS,
7937 					       NULL);
7938 	if (err)
7939 		goto err_clear_ingress;
7940 
7941 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
7942 	if (err)
7943 		goto err_alloc_egress;
7944 
7945 	/* Clear stale data. */
7946 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
7947 					       MLXSW_SP_RIF_COUNTER_EGRESS,
7948 					       NULL);
7949 	if (err)
7950 		goto err_clear_egress;
7951 
7952 	return 0;
7953 
7954 err_clear_egress:
7955 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
7956 err_alloc_egress:
7957 err_clear_ingress:
7958 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
7959 	return err;
7960 }
7961 
7962 static void
7963 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
7964 {
7965 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
7966 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
7967 }
7968 
7969 static void
7970 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
7971 					  struct netdev_notifier_offload_xstats_info *info)
7972 {
7973 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
7974 		return;
7975 	netdev_offload_xstats_report_used(info->report_used);
7976 }
7977 
7978 static int
7979 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
7980 				    struct rtnl_hw_stats64 *p_stats)
7981 {
7982 	struct mlxsw_sp_rif_counter_set_basic ingress;
7983 	struct mlxsw_sp_rif_counter_set_basic egress;
7984 	int err;
7985 
7986 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
7987 					       MLXSW_SP_RIF_COUNTER_INGRESS,
7988 					       &ingress);
7989 	if (err)
7990 		return err;
7991 
7992 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
7993 					       MLXSW_SP_RIF_COUNTER_EGRESS,
7994 					       &egress);
7995 	if (err)
7996 		return err;
7997 
7998 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX)		\
7999 		((SET.good_unicast_ ## SFX) +		\
8000 		 (SET.good_multicast_ ## SFX) +		\
8001 		 (SET.good_broadcast_ ## SFX))
8002 
8003 	p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8004 	p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8005 	p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8006 	p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8007 	p_stats->rx_errors = ingress.error_packets;
8008 	p_stats->tx_errors = egress.error_packets;
8009 	p_stats->rx_dropped = ingress.discard_packets;
8010 	p_stats->tx_dropped = egress.discard_packets;
8011 	p_stats->multicast = ingress.good_multicast_packets +
8012 			     ingress.good_broadcast_packets;
8013 
8014 #undef MLXSW_SP_ROUTER_ALL_GOOD
8015 
8016 	return 0;
8017 }
8018 
8019 static int
8020 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8021 					   struct netdev_notifier_offload_xstats_info *info)
8022 {
8023 	struct rtnl_hw_stats64 stats = {};
8024 	int err;
8025 
8026 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8027 		return 0;
8028 
8029 	err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8030 	if (err)
8031 		return err;
8032 
8033 	netdev_offload_xstats_report_delta(info->report_delta, &stats);
8034 	return 0;
8035 }
8036 
8037 struct mlxsw_sp_router_hwstats_notify_work {
8038 	struct work_struct work;
8039 	struct net_device *dev;
8040 };
8041 
8042 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8043 {
8044 	struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8045 		container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8046 			     work);
8047 
8048 	rtnl_lock();
8049 	rtnl_offload_xstats_notify(hws_work->dev);
8050 	rtnl_unlock();
8051 	dev_put(hws_work->dev);
8052 	kfree(hws_work);
8053 }
8054 
8055 static void
8056 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8057 {
8058 	struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8059 
8060 	/* To collect notification payload, the core ends up sending another
8061 	 * notifier block message, which would deadlock on the attempt to
8062 	 * acquire the router lock again. Just postpone the notification until
8063 	 * later.
8064 	 */
8065 
8066 	hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8067 	if (!hws_work)
8068 		return;
8069 
8070 	INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8071 	dev_hold(dev);
8072 	hws_work->dev = dev;
8073 	mlxsw_core_schedule_work(&hws_work->work);
8074 }
8075 
8076 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8077 {
8078 	return rif->dev->ifindex;
8079 }
8080 
8081 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8082 {
8083 	return rif->dev;
8084 }
8085 
8086 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8087 {
8088 	struct rtnl_hw_stats64 stats = {};
8089 
8090 	if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8091 		netdev_offload_xstats_push_delta(rif->dev,
8092 						 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8093 						 &stats);
8094 }
8095 
8096 static struct mlxsw_sp_rif *
8097 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8098 		    const struct mlxsw_sp_rif_params *params,
8099 		    struct netlink_ext_ack *extack)
8100 {
8101 	u8 rif_entries = params->double_entry ? 2 : 1;
8102 	u32 tb_id = l3mdev_fib_table(params->dev);
8103 	const struct mlxsw_sp_rif_ops *ops;
8104 	struct mlxsw_sp_fid *fid = NULL;
8105 	enum mlxsw_sp_rif_type type;
8106 	struct mlxsw_sp_rif *rif;
8107 	struct mlxsw_sp_vr *vr;
8108 	u16 rif_index;
8109 	int i, err;
8110 
8111 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8112 	ops = mlxsw_sp->router->rif_ops_arr[type];
8113 
8114 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8115 	if (IS_ERR(vr))
8116 		return ERR_CAST(vr);
8117 	vr->rif_count++;
8118 
8119 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8120 	if (err) {
8121 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8122 		goto err_rif_index_alloc;
8123 	}
8124 
8125 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8126 	if (!rif) {
8127 		err = -ENOMEM;
8128 		goto err_rif_alloc;
8129 	}
8130 	dev_hold(rif->dev);
8131 	mlxsw_sp->router->rifs[rif_index] = rif;
8132 	rif->mlxsw_sp = mlxsw_sp;
8133 	rif->ops = ops;
8134 	rif->rif_entries = rif_entries;
8135 
8136 	if (ops->fid_get) {
8137 		fid = ops->fid_get(rif, extack);
8138 		if (IS_ERR(fid)) {
8139 			err = PTR_ERR(fid);
8140 			goto err_fid_get;
8141 		}
8142 		rif->fid = fid;
8143 	}
8144 
8145 	if (ops->setup)
8146 		ops->setup(rif, params);
8147 
8148 	err = ops->configure(rif, extack);
8149 	if (err)
8150 		goto err_configure;
8151 
8152 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8153 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8154 		if (err)
8155 			goto err_mr_rif_add;
8156 	}
8157 
8158 	if (netdev_offload_xstats_enabled(rif->dev,
8159 					  NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8160 		err = mlxsw_sp_router_port_l3_stats_enable(rif);
8161 		if (err)
8162 			goto err_stats_enable;
8163 		mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8164 	} else {
8165 		mlxsw_sp_rif_counters_alloc(rif);
8166 	}
8167 
8168 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8169 	return rif;
8170 
8171 err_stats_enable:
8172 err_mr_rif_add:
8173 	for (i--; i >= 0; i--)
8174 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8175 	ops->deconfigure(rif);
8176 err_configure:
8177 	if (fid)
8178 		mlxsw_sp_fid_put(fid);
8179 err_fid_get:
8180 	mlxsw_sp->router->rifs[rif_index] = NULL;
8181 	dev_put(rif->dev);
8182 	kfree(rif);
8183 err_rif_alloc:
8184 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8185 err_rif_index_alloc:
8186 	vr->rif_count--;
8187 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8188 	return ERR_PTR(err);
8189 }
8190 
8191 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8192 {
8193 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
8194 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8195 	struct mlxsw_sp_fid *fid = rif->fid;
8196 	u8 rif_entries = rif->rif_entries;
8197 	u16 rif_index = rif->rif_index;
8198 	struct mlxsw_sp_vr *vr;
8199 	int i;
8200 
8201 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8202 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8203 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
8204 
8205 	if (netdev_offload_xstats_enabled(rif->dev,
8206 					  NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8207 		mlxsw_sp_rif_push_l3_stats(rif);
8208 		mlxsw_sp_router_port_l3_stats_disable(rif);
8209 		mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8210 	} else {
8211 		mlxsw_sp_rif_counters_free(rif);
8212 	}
8213 
8214 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8215 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8216 	ops->deconfigure(rif);
8217 	if (fid)
8218 		/* Loopback RIFs are not associated with a FID. */
8219 		mlxsw_sp_fid_put(fid);
8220 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8221 	dev_put(rif->dev);
8222 	kfree(rif);
8223 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8224 	vr->rif_count--;
8225 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8226 }
8227 
8228 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8229 				 struct net_device *dev)
8230 {
8231 	struct mlxsw_sp_rif *rif;
8232 
8233 	mutex_lock(&mlxsw_sp->router->lock);
8234 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8235 	if (!rif)
8236 		goto out;
8237 	mlxsw_sp_rif_destroy(rif);
8238 out:
8239 	mutex_unlock(&mlxsw_sp->router->lock);
8240 }
8241 
8242 static void
8243 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8244 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8245 {
8246 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8247 
8248 	params->vid = mlxsw_sp_port_vlan->vid;
8249 	params->lag = mlxsw_sp_port->lagged;
8250 	if (params->lag)
8251 		params->lag_id = mlxsw_sp_port->lag_id;
8252 	else
8253 		params->system_port = mlxsw_sp_port->local_port;
8254 }
8255 
8256 static struct mlxsw_sp_rif_subport *
8257 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8258 {
8259 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
8260 }
8261 
8262 static struct mlxsw_sp_rif *
8263 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8264 			 const struct mlxsw_sp_rif_params *params,
8265 			 struct netlink_ext_ack *extack)
8266 {
8267 	struct mlxsw_sp_rif_subport *rif_subport;
8268 	struct mlxsw_sp_rif *rif;
8269 
8270 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8271 	if (!rif)
8272 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8273 
8274 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8275 	refcount_inc(&rif_subport->ref_count);
8276 	return rif;
8277 }
8278 
8279 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8280 {
8281 	struct mlxsw_sp_rif_subport *rif_subport;
8282 
8283 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8284 	if (!refcount_dec_and_test(&rif_subport->ref_count))
8285 		return;
8286 
8287 	mlxsw_sp_rif_destroy(rif);
8288 }
8289 
8290 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8291 						struct mlxsw_sp_rif_mac_profile *profile,
8292 						struct netlink_ext_ack *extack)
8293 {
8294 	u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8295 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8296 	int id;
8297 
8298 	id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8299 		       max_rif_mac_profiles, GFP_KERNEL);
8300 
8301 	if (id >= 0) {
8302 		profile->id = id;
8303 		return 0;
8304 	}
8305 
8306 	if (id == -ENOSPC)
8307 		NL_SET_ERR_MSG_MOD(extack,
8308 				   "Exceeded number of supported router interface MAC profiles");
8309 
8310 	return id;
8311 }
8312 
8313 static struct mlxsw_sp_rif_mac_profile *
8314 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8315 {
8316 	struct mlxsw_sp_rif_mac_profile *profile;
8317 
8318 	profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8319 			     mac_profile);
8320 	WARN_ON(!profile);
8321 	return profile;
8322 }
8323 
8324 static struct mlxsw_sp_rif_mac_profile *
8325 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8326 {
8327 	struct mlxsw_sp_rif_mac_profile *profile;
8328 
8329 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8330 	if (!profile)
8331 		return NULL;
8332 
8333 	ether_addr_copy(profile->mac_prefix, mac);
8334 	refcount_set(&profile->ref_count, 1);
8335 	return profile;
8336 }
8337 
8338 static struct mlxsw_sp_rif_mac_profile *
8339 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8340 {
8341 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8342 	struct mlxsw_sp_rif_mac_profile *profile;
8343 	int id;
8344 
8345 	idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8346 		if (ether_addr_equal_masked(profile->mac_prefix, mac,
8347 					    mlxsw_sp->mac_mask))
8348 			return profile;
8349 	}
8350 
8351 	return NULL;
8352 }
8353 
8354 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8355 {
8356 	const struct mlxsw_sp *mlxsw_sp = priv;
8357 
8358 	return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8359 }
8360 
8361 static u64 mlxsw_sp_rifs_occ_get(void *priv)
8362 {
8363 	const struct mlxsw_sp *mlxsw_sp = priv;
8364 
8365 	return atomic_read(&mlxsw_sp->router->rifs_count);
8366 }
8367 
8368 static struct mlxsw_sp_rif_mac_profile *
8369 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8370 				struct netlink_ext_ack *extack)
8371 {
8372 	struct mlxsw_sp_rif_mac_profile *profile;
8373 	int err;
8374 
8375 	profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8376 	if (!profile)
8377 		return ERR_PTR(-ENOMEM);
8378 
8379 	err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8380 	if (err)
8381 		goto profile_index_alloc_err;
8382 
8383 	atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8384 	return profile;
8385 
8386 profile_index_alloc_err:
8387 	kfree(profile);
8388 	return ERR_PTR(err);
8389 }
8390 
8391 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8392 					     u8 mac_profile)
8393 {
8394 	struct mlxsw_sp_rif_mac_profile *profile;
8395 
8396 	atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8397 	profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8398 	kfree(profile);
8399 }
8400 
8401 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8402 					const char *mac, u8 *p_mac_profile,
8403 					struct netlink_ext_ack *extack)
8404 {
8405 	struct mlxsw_sp_rif_mac_profile *profile;
8406 
8407 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8408 	if (profile) {
8409 		refcount_inc(&profile->ref_count);
8410 		goto out;
8411 	}
8412 
8413 	profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8414 	if (IS_ERR(profile))
8415 		return PTR_ERR(profile);
8416 
8417 out:
8418 	*p_mac_profile = profile->id;
8419 	return 0;
8420 }
8421 
8422 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8423 					 u8 mac_profile)
8424 {
8425 	struct mlxsw_sp_rif_mac_profile *profile;
8426 
8427 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8428 			   mac_profile);
8429 	if (WARN_ON(!profile))
8430 		return;
8431 
8432 	if (!refcount_dec_and_test(&profile->ref_count))
8433 		return;
8434 
8435 	mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8436 }
8437 
8438 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8439 {
8440 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8441 	struct mlxsw_sp_rif_mac_profile *profile;
8442 
8443 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8444 			   rif->mac_profile_id);
8445 	if (WARN_ON(!profile))
8446 		return false;
8447 
8448 	return refcount_read(&profile->ref_count) > 1;
8449 }
8450 
8451 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8452 					 const char *new_mac)
8453 {
8454 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8455 	struct mlxsw_sp_rif_mac_profile *profile;
8456 
8457 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8458 			   rif->mac_profile_id);
8459 	if (WARN_ON(!profile))
8460 		return -EINVAL;
8461 
8462 	ether_addr_copy(profile->mac_prefix, new_mac);
8463 	return 0;
8464 }
8465 
8466 static int
8467 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8468 				 struct mlxsw_sp_rif *rif,
8469 				 const char *new_mac,
8470 				 struct netlink_ext_ack *extack)
8471 {
8472 	u8 mac_profile;
8473 	int err;
8474 
8475 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8476 	    !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8477 		return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8478 
8479 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8480 					   &mac_profile, extack);
8481 	if (err)
8482 		return err;
8483 
8484 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8485 	rif->mac_profile_id = mac_profile;
8486 	return 0;
8487 }
8488 
8489 static int
8490 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8491 				 struct net_device *l3_dev,
8492 				 struct netlink_ext_ack *extack)
8493 {
8494 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8495 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8496 	struct mlxsw_sp_rif_params params = {
8497 		.dev = l3_dev,
8498 	};
8499 	u16 vid = mlxsw_sp_port_vlan->vid;
8500 	struct mlxsw_sp_rif *rif;
8501 	struct mlxsw_sp_fid *fid;
8502 	int err;
8503 
8504 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
8505 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
8506 	if (IS_ERR(rif))
8507 		return PTR_ERR(rif);
8508 
8509 	/* FID was already created, just take a reference */
8510 	fid = rif->ops->fid_get(rif, extack);
8511 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8512 	if (err)
8513 		goto err_fid_port_vid_map;
8514 
8515 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8516 	if (err)
8517 		goto err_port_vid_learning_set;
8518 
8519 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8520 					BR_STATE_FORWARDING);
8521 	if (err)
8522 		goto err_port_vid_stp_set;
8523 
8524 	mlxsw_sp_port_vlan->fid = fid;
8525 
8526 	return 0;
8527 
8528 err_port_vid_stp_set:
8529 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8530 err_port_vid_learning_set:
8531 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8532 err_fid_port_vid_map:
8533 	mlxsw_sp_fid_put(fid);
8534 	mlxsw_sp_rif_subport_put(rif);
8535 	return err;
8536 }
8537 
8538 static void
8539 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8540 {
8541 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8542 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8543 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8544 	u16 vid = mlxsw_sp_port_vlan->vid;
8545 
8546 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8547 		return;
8548 
8549 	mlxsw_sp_port_vlan->fid = NULL;
8550 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8551 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8552 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8553 	mlxsw_sp_fid_put(fid);
8554 	mlxsw_sp_rif_subport_put(rif);
8555 }
8556 
8557 int
8558 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8559 			       struct net_device *l3_dev,
8560 			       struct netlink_ext_ack *extack)
8561 {
8562 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8563 	struct mlxsw_sp_rif *rif;
8564 	int err = 0;
8565 
8566 	mutex_lock(&mlxsw_sp->router->lock);
8567 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8568 	if (!rif)
8569 		goto out;
8570 
8571 	err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8572 					       extack);
8573 out:
8574 	mutex_unlock(&mlxsw_sp->router->lock);
8575 	return err;
8576 }
8577 
8578 void
8579 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8580 {
8581 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8582 
8583 	mutex_lock(&mlxsw_sp->router->lock);
8584 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8585 	mutex_unlock(&mlxsw_sp->router->lock);
8586 }
8587 
8588 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8589 					     struct net_device *port_dev,
8590 					     unsigned long event, u16 vid,
8591 					     struct netlink_ext_ack *extack)
8592 {
8593 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8594 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8595 
8596 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8597 	if (WARN_ON(!mlxsw_sp_port_vlan))
8598 		return -EINVAL;
8599 
8600 	switch (event) {
8601 	case NETDEV_UP:
8602 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8603 							l3_dev, extack);
8604 	case NETDEV_DOWN:
8605 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8606 		break;
8607 	}
8608 
8609 	return 0;
8610 }
8611 
8612 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8613 					unsigned long event,
8614 					struct netlink_ext_ack *extack)
8615 {
8616 	if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
8617 		return 0;
8618 
8619 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8620 						 MLXSW_SP_DEFAULT_VID, extack);
8621 }
8622 
8623 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8624 					 struct net_device *lag_dev,
8625 					 unsigned long event, u16 vid,
8626 					 struct netlink_ext_ack *extack)
8627 {
8628 	struct net_device *port_dev;
8629 	struct list_head *iter;
8630 	int err;
8631 
8632 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8633 		if (mlxsw_sp_port_dev_check(port_dev)) {
8634 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8635 								port_dev,
8636 								event, vid,
8637 								extack);
8638 			if (err)
8639 				return err;
8640 		}
8641 	}
8642 
8643 	return 0;
8644 }
8645 
8646 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8647 				       unsigned long event,
8648 				       struct netlink_ext_ack *extack)
8649 {
8650 	if (netif_is_bridge_port(lag_dev))
8651 		return 0;
8652 
8653 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8654 					     MLXSW_SP_DEFAULT_VID, extack);
8655 }
8656 
8657 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8658 					  struct net_device *l3_dev,
8659 					  unsigned long event,
8660 					  struct netlink_ext_ack *extack)
8661 {
8662 	struct mlxsw_sp_rif_params params = {
8663 		.dev = l3_dev,
8664 	};
8665 	struct mlxsw_sp_rif *rif;
8666 
8667 	switch (event) {
8668 	case NETDEV_UP:
8669 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8670 			u16 proto;
8671 
8672 			br_vlan_get_proto(l3_dev, &proto);
8673 			if (proto == ETH_P_8021AD) {
8674 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8675 				return -EOPNOTSUPP;
8676 			}
8677 		}
8678 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8679 		if (IS_ERR(rif))
8680 			return PTR_ERR(rif);
8681 		break;
8682 	case NETDEV_DOWN:
8683 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8684 		mlxsw_sp_rif_destroy(rif);
8685 		break;
8686 	}
8687 
8688 	return 0;
8689 }
8690 
8691 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8692 					struct net_device *vlan_dev,
8693 					unsigned long event,
8694 					struct netlink_ext_ack *extack)
8695 {
8696 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8697 	u16 vid = vlan_dev_vlan_id(vlan_dev);
8698 
8699 	if (netif_is_bridge_port(vlan_dev))
8700 		return 0;
8701 
8702 	if (mlxsw_sp_port_dev_check(real_dev))
8703 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8704 							 event, vid, extack);
8705 	else if (netif_is_lag_master(real_dev))
8706 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8707 						     vid, extack);
8708 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8709 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8710 						      extack);
8711 
8712 	return 0;
8713 }
8714 
8715 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8716 {
8717 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
8718 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8719 
8720 	return ether_addr_equal_masked(mac, vrrp4, mask);
8721 }
8722 
8723 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
8724 {
8725 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
8726 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8727 
8728 	return ether_addr_equal_masked(mac, vrrp6, mask);
8729 }
8730 
8731 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8732 				const u8 *mac, bool adding)
8733 {
8734 	char ritr_pl[MLXSW_REG_RITR_LEN];
8735 	u8 vrrp_id = adding ? mac[5] : 0;
8736 	int err;
8737 
8738 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
8739 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
8740 		return 0;
8741 
8742 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8743 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8744 	if (err)
8745 		return err;
8746 
8747 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
8748 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
8749 	else
8750 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
8751 
8752 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8753 }
8754 
8755 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
8756 				    const struct net_device *macvlan_dev,
8757 				    struct netlink_ext_ack *extack)
8758 {
8759 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8760 	struct mlxsw_sp_rif *rif;
8761 	int err;
8762 
8763 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8764 	if (!rif) {
8765 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
8766 		return -EOPNOTSUPP;
8767 	}
8768 
8769 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8770 				  mlxsw_sp_fid_index(rif->fid), true);
8771 	if (err)
8772 		return err;
8773 
8774 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
8775 				   macvlan_dev->dev_addr, true);
8776 	if (err)
8777 		goto err_rif_vrrp_add;
8778 
8779 	/* Make sure the bridge driver does not have this MAC pointing at
8780 	 * some other port.
8781 	 */
8782 	if (rif->ops->fdb_del)
8783 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
8784 
8785 	return 0;
8786 
8787 err_rif_vrrp_add:
8788 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8789 			    mlxsw_sp_fid_index(rif->fid), false);
8790 	return err;
8791 }
8792 
8793 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8794 				       const struct net_device *macvlan_dev)
8795 {
8796 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8797 	struct mlxsw_sp_rif *rif;
8798 
8799 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8800 	/* If we do not have a RIF, then we already took care of
8801 	 * removing the macvlan's MAC during RIF deletion.
8802 	 */
8803 	if (!rif)
8804 		return;
8805 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
8806 			     false);
8807 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8808 			    mlxsw_sp_fid_index(rif->fid), false);
8809 }
8810 
8811 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8812 			      const struct net_device *macvlan_dev)
8813 {
8814 	mutex_lock(&mlxsw_sp->router->lock);
8815 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8816 	mutex_unlock(&mlxsw_sp->router->lock);
8817 }
8818 
8819 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
8820 					   struct net_device *macvlan_dev,
8821 					   unsigned long event,
8822 					   struct netlink_ext_ack *extack)
8823 {
8824 	switch (event) {
8825 	case NETDEV_UP:
8826 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
8827 	case NETDEV_DOWN:
8828 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8829 		break;
8830 	}
8831 
8832 	return 0;
8833 }
8834 
8835 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
8836 				     struct net_device *dev,
8837 				     unsigned long event,
8838 				     struct netlink_ext_ack *extack)
8839 {
8840 	if (mlxsw_sp_port_dev_check(dev))
8841 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
8842 	else if (netif_is_lag_master(dev))
8843 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
8844 	else if (netif_is_bridge_master(dev))
8845 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
8846 						      extack);
8847 	else if (is_vlan_dev(dev))
8848 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
8849 						    extack);
8850 	else if (netif_is_macvlan(dev))
8851 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
8852 						       extack);
8853 	else
8854 		return 0;
8855 }
8856 
8857 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
8858 				   unsigned long event, void *ptr)
8859 {
8860 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
8861 	struct net_device *dev = ifa->ifa_dev->dev;
8862 	struct mlxsw_sp_router *router;
8863 	struct mlxsw_sp_rif *rif;
8864 	int err = 0;
8865 
8866 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
8867 	if (event == NETDEV_UP)
8868 		return NOTIFY_DONE;
8869 
8870 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
8871 	mutex_lock(&router->lock);
8872 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
8873 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8874 		goto out;
8875 
8876 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
8877 out:
8878 	mutex_unlock(&router->lock);
8879 	return notifier_from_errno(err);
8880 }
8881 
8882 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
8883 				  unsigned long event, void *ptr)
8884 {
8885 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
8886 	struct net_device *dev = ivi->ivi_dev->dev;
8887 	struct mlxsw_sp *mlxsw_sp;
8888 	struct mlxsw_sp_rif *rif;
8889 	int err = 0;
8890 
8891 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8892 	if (!mlxsw_sp)
8893 		return NOTIFY_DONE;
8894 
8895 	mutex_lock(&mlxsw_sp->router->lock);
8896 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8897 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8898 		goto out;
8899 
8900 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
8901 out:
8902 	mutex_unlock(&mlxsw_sp->router->lock);
8903 	return notifier_from_errno(err);
8904 }
8905 
8906 struct mlxsw_sp_inet6addr_event_work {
8907 	struct work_struct work;
8908 	struct mlxsw_sp *mlxsw_sp;
8909 	struct net_device *dev;
8910 	unsigned long event;
8911 };
8912 
8913 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
8914 {
8915 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
8916 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
8917 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
8918 	struct net_device *dev = inet6addr_work->dev;
8919 	unsigned long event = inet6addr_work->event;
8920 	struct mlxsw_sp_rif *rif;
8921 
8922 	rtnl_lock();
8923 	mutex_lock(&mlxsw_sp->router->lock);
8924 
8925 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8926 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8927 		goto out;
8928 
8929 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
8930 out:
8931 	mutex_unlock(&mlxsw_sp->router->lock);
8932 	rtnl_unlock();
8933 	dev_put(dev);
8934 	kfree(inet6addr_work);
8935 }
8936 
8937 /* Called with rcu_read_lock() */
8938 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
8939 				    unsigned long event, void *ptr)
8940 {
8941 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
8942 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
8943 	struct net_device *dev = if6->idev->dev;
8944 	struct mlxsw_sp_router *router;
8945 
8946 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
8947 	if (event == NETDEV_UP)
8948 		return NOTIFY_DONE;
8949 
8950 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
8951 	if (!inet6addr_work)
8952 		return NOTIFY_BAD;
8953 
8954 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
8955 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
8956 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
8957 	inet6addr_work->dev = dev;
8958 	inet6addr_work->event = event;
8959 	dev_hold(dev);
8960 	mlxsw_core_schedule_work(&inet6addr_work->work);
8961 
8962 	return NOTIFY_DONE;
8963 }
8964 
8965 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
8966 				   unsigned long event, void *ptr)
8967 {
8968 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
8969 	struct net_device *dev = i6vi->i6vi_dev->dev;
8970 	struct mlxsw_sp *mlxsw_sp;
8971 	struct mlxsw_sp_rif *rif;
8972 	int err = 0;
8973 
8974 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8975 	if (!mlxsw_sp)
8976 		return NOTIFY_DONE;
8977 
8978 	mutex_lock(&mlxsw_sp->router->lock);
8979 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8980 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8981 		goto out;
8982 
8983 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
8984 out:
8985 	mutex_unlock(&mlxsw_sp->router->lock);
8986 	return notifier_from_errno(err);
8987 }
8988 
8989 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8990 			     const char *mac, int mtu, u8 mac_profile)
8991 {
8992 	char ritr_pl[MLXSW_REG_RITR_LEN];
8993 	int err;
8994 
8995 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8996 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8997 	if (err)
8998 		return err;
8999 
9000 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9001 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9002 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9003 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9004 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9005 }
9006 
9007 static int
9008 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9009 				  struct mlxsw_sp_rif *rif,
9010 				  struct netlink_ext_ack *extack)
9011 {
9012 	struct net_device *dev = rif->dev;
9013 	u8 old_mac_profile;
9014 	u16 fid_index;
9015 	int err;
9016 
9017 	fid_index = mlxsw_sp_fid_index(rif->fid);
9018 
9019 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9020 	if (err)
9021 		return err;
9022 
9023 	old_mac_profile = rif->mac_profile_id;
9024 	err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9025 					       extack);
9026 	if (err)
9027 		goto err_rif_mac_profile_replace;
9028 
9029 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9030 				dev->mtu, rif->mac_profile_id);
9031 	if (err)
9032 		goto err_rif_edit;
9033 
9034 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9035 	if (err)
9036 		goto err_rif_fdb_op;
9037 
9038 	if (rif->mtu != dev->mtu) {
9039 		struct mlxsw_sp_vr *vr;
9040 		int i;
9041 
9042 		/* The RIF is relevant only to its mr_table instance, as unlike
9043 		 * unicast routing, in multicast routing a RIF cannot be shared
9044 		 * between several multicast routing tables.
9045 		 */
9046 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
9047 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9048 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9049 						   rif, dev->mtu);
9050 	}
9051 
9052 	ether_addr_copy(rif->addr, dev->dev_addr);
9053 	rif->mtu = dev->mtu;
9054 
9055 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9056 
9057 	return 0;
9058 
9059 err_rif_fdb_op:
9060 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9061 			  old_mac_profile);
9062 err_rif_edit:
9063 	mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9064 err_rif_mac_profile_replace:
9065 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9066 	return err;
9067 }
9068 
9069 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9070 			    struct netdev_notifier_pre_changeaddr_info *info)
9071 {
9072 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9073 	struct mlxsw_sp_rif_mac_profile *profile;
9074 	struct netlink_ext_ack *extack;
9075 	u8 max_rif_mac_profiles;
9076 	u64 occ;
9077 
9078 	extack = netdev_notifier_info_to_extack(&info->info);
9079 
9080 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9081 	if (profile)
9082 		return 0;
9083 
9084 	max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9085 	occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9086 	if (occ < max_rif_mac_profiles)
9087 		return 0;
9088 
9089 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9090 		return 0;
9091 
9092 	NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9093 	return -ENOBUFS;
9094 }
9095 
9096 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9097 {
9098 	switch (event) {
9099 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9100 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9101 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9102 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9103 		return true;
9104 	}
9105 
9106 	return false;
9107 }
9108 
9109 static int
9110 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9111 					unsigned long event,
9112 					struct netdev_notifier_offload_xstats_info *info)
9113 {
9114 	switch (info->type) {
9115 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9116 		break;
9117 	default:
9118 		return 0;
9119 	}
9120 
9121 	switch (event) {
9122 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9123 		return mlxsw_sp_router_port_l3_stats_enable(rif);
9124 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9125 		mlxsw_sp_router_port_l3_stats_disable(rif);
9126 		return 0;
9127 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9128 		mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9129 		return 0;
9130 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9131 		return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9132 	}
9133 
9134 	WARN_ON_ONCE(1);
9135 	return 0;
9136 }
9137 
9138 static int
9139 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9140 				      struct net_device *dev,
9141 				      unsigned long event,
9142 				      struct netdev_notifier_offload_xstats_info *info)
9143 {
9144 	struct mlxsw_sp_rif *rif;
9145 
9146 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9147 	if (!rif)
9148 		return 0;
9149 
9150 	return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9151 }
9152 
9153 static bool mlxsw_sp_is_router_event(unsigned long event)
9154 {
9155 	switch (event) {
9156 	case NETDEV_PRE_CHANGEADDR:
9157 	case NETDEV_CHANGEADDR:
9158 	case NETDEV_CHANGEMTU:
9159 		return true;
9160 	default:
9161 		return false;
9162 	}
9163 }
9164 
9165 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9166 						unsigned long event, void *ptr)
9167 {
9168 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9169 	struct mlxsw_sp *mlxsw_sp;
9170 	struct mlxsw_sp_rif *rif;
9171 
9172 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9173 	if (!mlxsw_sp)
9174 		return 0;
9175 
9176 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9177 	if (!rif)
9178 		return 0;
9179 
9180 	switch (event) {
9181 	case NETDEV_CHANGEMTU:
9182 	case NETDEV_CHANGEADDR:
9183 		return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9184 	case NETDEV_PRE_CHANGEADDR:
9185 		return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9186 	default:
9187 		WARN_ON_ONCE(1);
9188 		break;
9189 	}
9190 
9191 	return 0;
9192 }
9193 
9194 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9195 				  struct net_device *l3_dev,
9196 				  struct netlink_ext_ack *extack)
9197 {
9198 	struct mlxsw_sp_rif *rif;
9199 
9200 	/* If netdev is already associated with a RIF, then we need to
9201 	 * destroy it and create a new one with the new virtual router ID.
9202 	 */
9203 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9204 	if (rif)
9205 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
9206 					  extack);
9207 
9208 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
9209 }
9210 
9211 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9212 				    struct net_device *l3_dev)
9213 {
9214 	struct mlxsw_sp_rif *rif;
9215 
9216 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9217 	if (!rif)
9218 		return;
9219 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
9220 }
9221 
9222 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9223 {
9224 	struct netdev_notifier_changeupper_info *info = ptr;
9225 
9226 	if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9227 		return false;
9228 	return netif_is_l3_master(info->upper_dev);
9229 }
9230 
9231 static int
9232 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9233 			     struct netdev_notifier_changeupper_info *info)
9234 {
9235 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9236 	int err = 0;
9237 
9238 	/* We do not create a RIF for a macvlan, but only use it to
9239 	 * direct more MAC addresses to the router.
9240 	 */
9241 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9242 		return 0;
9243 
9244 	switch (event) {
9245 	case NETDEV_PRECHANGEUPPER:
9246 		break;
9247 	case NETDEV_CHANGEUPPER:
9248 		if (info->linking) {
9249 			struct netlink_ext_ack *extack;
9250 
9251 			extack = netdev_notifier_info_to_extack(&info->info);
9252 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9253 		} else {
9254 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9255 		}
9256 		break;
9257 	}
9258 
9259 	return err;
9260 }
9261 
9262 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
9263 					   unsigned long event, void *ptr)
9264 {
9265 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
9266 	struct mlxsw_sp_router *router;
9267 	struct mlxsw_sp *mlxsw_sp;
9268 	int err = 0;
9269 
9270 	router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
9271 	mlxsw_sp = router->mlxsw_sp;
9272 
9273 	mutex_lock(&mlxsw_sp->router->lock);
9274 
9275 	if (mlxsw_sp_is_offload_xstats_event(event))
9276 		err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
9277 							    event, ptr);
9278 	else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
9279 		err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
9280 						       event, ptr);
9281 	else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
9282 		err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
9283 						       event, ptr);
9284 	else if (mlxsw_sp_is_router_event(event))
9285 		err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
9286 	else if (mlxsw_sp_is_vrf_event(event, ptr))
9287 		err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
9288 
9289 	mutex_unlock(&mlxsw_sp->router->lock);
9290 
9291 	return notifier_from_errno(err);
9292 }
9293 
9294 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9295 					struct netdev_nested_priv *priv)
9296 {
9297 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9298 
9299 	if (!netif_is_macvlan(dev))
9300 		return 0;
9301 
9302 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9303 				   mlxsw_sp_fid_index(rif->fid), false);
9304 }
9305 
9306 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9307 {
9308 	struct netdev_nested_priv priv = {
9309 		.data = (void *)rif,
9310 	};
9311 
9312 	if (!netif_is_macvlan_port(rif->dev))
9313 		return 0;
9314 
9315 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
9316 	return netdev_walk_all_upper_dev_rcu(rif->dev,
9317 					     __mlxsw_sp_rif_macvlan_flush, &priv);
9318 }
9319 
9320 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9321 				       const struct mlxsw_sp_rif_params *params)
9322 {
9323 	struct mlxsw_sp_rif_subport *rif_subport;
9324 
9325 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9326 	refcount_set(&rif_subport->ref_count, 1);
9327 	rif_subport->vid = params->vid;
9328 	rif_subport->lag = params->lag;
9329 	if (params->lag)
9330 		rif_subport->lag_id = params->lag_id;
9331 	else
9332 		rif_subport->system_port = params->system_port;
9333 }
9334 
9335 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9336 {
9337 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9338 	struct mlxsw_sp_rif_subport *rif_subport;
9339 	char ritr_pl[MLXSW_REG_RITR_LEN];
9340 	u16 efid;
9341 
9342 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9343 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9344 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
9345 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9346 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9347 	efid = mlxsw_sp_fid_index(rif->fid);
9348 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9349 				  rif_subport->lag ? rif_subport->lag_id :
9350 						     rif_subport->system_port,
9351 				  efid, 0);
9352 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9353 }
9354 
9355 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
9356 					  struct netlink_ext_ack *extack)
9357 {
9358 	u8 mac_profile;
9359 	int err;
9360 
9361 	err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
9362 					   &mac_profile, extack);
9363 	if (err)
9364 		return err;
9365 	rif->mac_profile_id = mac_profile;
9366 
9367 	err = mlxsw_sp_rif_subport_op(rif, true);
9368 	if (err)
9369 		goto err_rif_subport_op;
9370 
9371 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9372 				  mlxsw_sp_fid_index(rif->fid), true);
9373 	if (err)
9374 		goto err_rif_fdb_op;
9375 
9376 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9377 	if (err)
9378 		goto err_fid_rif_set;
9379 
9380 	return 0;
9381 
9382 err_fid_rif_set:
9383 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9384 			    mlxsw_sp_fid_index(rif->fid), false);
9385 err_rif_fdb_op:
9386 	mlxsw_sp_rif_subport_op(rif, false);
9387 err_rif_subport_op:
9388 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
9389 	return err;
9390 }
9391 
9392 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9393 {
9394 	struct mlxsw_sp_fid *fid = rif->fid;
9395 
9396 	mlxsw_sp_fid_rif_unset(fid);
9397 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9398 			    mlxsw_sp_fid_index(fid), false);
9399 	mlxsw_sp_rif_macvlan_flush(rif);
9400 	mlxsw_sp_rif_subport_op(rif, false);
9401 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9402 }
9403 
9404 static struct mlxsw_sp_fid *
9405 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9406 			     struct netlink_ext_ack *extack)
9407 {
9408 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9409 }
9410 
9411 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9412 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
9413 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
9414 	.setup			= mlxsw_sp_rif_subport_setup,
9415 	.configure		= mlxsw_sp_rif_subport_configure,
9416 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
9417 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
9418 };
9419 
9420 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
9421 {
9422 	enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
9423 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9424 	char ritr_pl[MLXSW_REG_RITR_LEN];
9425 
9426 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9427 			    rif->dev->mtu);
9428 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9429 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9430 	mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
9431 
9432 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9433 }
9434 
9435 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9436 {
9437 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9438 }
9439 
9440 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
9441 				      struct netlink_ext_ack *extack)
9442 {
9443 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9444 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9445 	u8 mac_profile;
9446 	int err;
9447 
9448 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9449 					   &mac_profile, extack);
9450 	if (err)
9451 		return err;
9452 	rif->mac_profile_id = mac_profile;
9453 
9454 	err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
9455 	if (err)
9456 		goto err_rif_fid_op;
9457 
9458 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9459 				     mlxsw_sp_router_port(mlxsw_sp), true);
9460 	if (err)
9461 		goto err_fid_mc_flood_set;
9462 
9463 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9464 				     mlxsw_sp_router_port(mlxsw_sp), true);
9465 	if (err)
9466 		goto err_fid_bc_flood_set;
9467 
9468 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9469 				  mlxsw_sp_fid_index(rif->fid), true);
9470 	if (err)
9471 		goto err_rif_fdb_op;
9472 
9473 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9474 	if (err)
9475 		goto err_fid_rif_set;
9476 
9477 	return 0;
9478 
9479 err_fid_rif_set:
9480 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9481 			    mlxsw_sp_fid_index(rif->fid), false);
9482 err_rif_fdb_op:
9483 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9484 			       mlxsw_sp_router_port(mlxsw_sp), false);
9485 err_fid_bc_flood_set:
9486 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9487 			       mlxsw_sp_router_port(mlxsw_sp), false);
9488 err_fid_mc_flood_set:
9489 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
9490 err_rif_fid_op:
9491 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9492 	return err;
9493 }
9494 
9495 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9496 {
9497 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9498 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9499 	struct mlxsw_sp_fid *fid = rif->fid;
9500 
9501 	mlxsw_sp_fid_rif_unset(fid);
9502 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9503 			    mlxsw_sp_fid_index(fid), false);
9504 	mlxsw_sp_rif_macvlan_flush(rif);
9505 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9506 			       mlxsw_sp_router_port(mlxsw_sp), false);
9507 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9508 			       mlxsw_sp_router_port(mlxsw_sp), false);
9509 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
9510 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9511 }
9512 
9513 static struct mlxsw_sp_fid *
9514 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9515 			 struct netlink_ext_ack *extack)
9516 {
9517 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9518 }
9519 
9520 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9521 {
9522 	struct switchdev_notifier_fdb_info info = {};
9523 	struct net_device *dev;
9524 
9525 	dev = br_fdb_find_port(rif->dev, mac, 0);
9526 	if (!dev)
9527 		return;
9528 
9529 	info.addr = mac;
9530 	info.vid = 0;
9531 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9532 				 NULL);
9533 }
9534 
9535 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9536 	.type			= MLXSW_SP_RIF_TYPE_FID,
9537 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9538 	.configure		= mlxsw_sp_rif_fid_configure,
9539 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
9540 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
9541 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
9542 };
9543 
9544 static struct mlxsw_sp_fid *
9545 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9546 			  struct netlink_ext_ack *extack)
9547 {
9548 	struct net_device *br_dev;
9549 	u16 vid;
9550 	int err;
9551 
9552 	if (is_vlan_dev(rif->dev)) {
9553 		vid = vlan_dev_vlan_id(rif->dev);
9554 		br_dev = vlan_dev_real_dev(rif->dev);
9555 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
9556 			return ERR_PTR(-EINVAL);
9557 	} else {
9558 		err = br_vlan_get_pvid(rif->dev, &vid);
9559 		if (err < 0 || !vid) {
9560 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9561 			return ERR_PTR(-EINVAL);
9562 		}
9563 	}
9564 
9565 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9566 }
9567 
9568 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9569 {
9570 	struct switchdev_notifier_fdb_info info = {};
9571 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9572 	struct net_device *br_dev;
9573 	struct net_device *dev;
9574 
9575 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9576 	dev = br_fdb_find_port(br_dev, mac, vid);
9577 	if (!dev)
9578 		return;
9579 
9580 	info.addr = mac;
9581 	info.vid = vid;
9582 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9583 				 NULL);
9584 }
9585 
9586 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
9587 				bool enable)
9588 {
9589 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9590 	char ritr_pl[MLXSW_REG_RITR_LEN];
9591 
9592 	mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
9593 				    rif->dev->mtu, rif->dev->dev_addr,
9594 				    rif->mac_profile_id, vid, efid);
9595 
9596 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9597 }
9598 
9599 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
9600 				       struct netlink_ext_ack *extack)
9601 {
9602 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9603 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9604 	u8 mac_profile;
9605 	int err;
9606 
9607 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9608 					   &mac_profile, extack);
9609 	if (err)
9610 		return err;
9611 	rif->mac_profile_id = mac_profile;
9612 
9613 	err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
9614 	if (err)
9615 		goto err_rif_vlan_fid_op;
9616 
9617 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9618 				     mlxsw_sp_router_port(mlxsw_sp), true);
9619 	if (err)
9620 		goto err_fid_mc_flood_set;
9621 
9622 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9623 				     mlxsw_sp_router_port(mlxsw_sp), true);
9624 	if (err)
9625 		goto err_fid_bc_flood_set;
9626 
9627 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9628 				  mlxsw_sp_fid_index(rif->fid), true);
9629 	if (err)
9630 		goto err_rif_fdb_op;
9631 
9632 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9633 	if (err)
9634 		goto err_fid_rif_set;
9635 
9636 	return 0;
9637 
9638 err_fid_rif_set:
9639 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9640 			    mlxsw_sp_fid_index(rif->fid), false);
9641 err_rif_fdb_op:
9642 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9643 			       mlxsw_sp_router_port(mlxsw_sp), false);
9644 err_fid_bc_flood_set:
9645 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9646 			       mlxsw_sp_router_port(mlxsw_sp), false);
9647 err_fid_mc_flood_set:
9648 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
9649 err_rif_vlan_fid_op:
9650 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9651 	return err;
9652 }
9653 
9654 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
9655 {
9656 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9657 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9658 
9659 	mlxsw_sp_fid_rif_unset(rif->fid);
9660 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9661 			    mlxsw_sp_fid_index(rif->fid), false);
9662 	mlxsw_sp_rif_macvlan_flush(rif);
9663 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9664 			       mlxsw_sp_router_port(mlxsw_sp), false);
9665 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9666 			       mlxsw_sp_router_port(mlxsw_sp), false);
9667 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
9668 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9669 }
9670 
9671 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
9672 					struct netlink_ext_ack *extack)
9673 {
9674 	return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
9675 }
9676 
9677 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
9678 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
9679 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9680 	.configure		= mlxsw_sp1_rif_vlan_configure,
9681 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
9682 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
9683 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
9684 };
9685 
9686 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
9687 					struct netlink_ext_ack *extack)
9688 {
9689 	u16 efid = mlxsw_sp_fid_index(rif->fid);
9690 
9691 	return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
9692 }
9693 
9694 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
9695 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
9696 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9697 	.configure		= mlxsw_sp2_rif_vlan_configure,
9698 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
9699 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
9700 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
9701 };
9702 
9703 static struct mlxsw_sp_rif_ipip_lb *
9704 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9705 {
9706 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9707 }
9708 
9709 static void
9710 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9711 			   const struct mlxsw_sp_rif_params *params)
9712 {
9713 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9714 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
9715 
9716 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9717 				 common);
9718 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9719 	rif_lb->lb_config = params_lb->lb_config;
9720 }
9721 
9722 static int
9723 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9724 				struct netlink_ext_ack *extack)
9725 {
9726 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9727 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9728 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9729 	struct mlxsw_sp_vr *ul_vr;
9730 	int err;
9731 
9732 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
9733 	if (IS_ERR(ul_vr))
9734 		return PTR_ERR(ul_vr);
9735 
9736 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9737 	if (err)
9738 		goto err_loopback_op;
9739 
9740 	lb_rif->ul_vr_id = ul_vr->id;
9741 	lb_rif->ul_rif_id = 0;
9742 	++ul_vr->rif_count;
9743 	return 0;
9744 
9745 err_loopback_op:
9746 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9747 	return err;
9748 }
9749 
9750 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9751 {
9752 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9753 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9754 	struct mlxsw_sp_vr *ul_vr;
9755 
9756 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9757 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9758 
9759 	--ul_vr->rif_count;
9760 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9761 }
9762 
9763 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9764 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9765 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9766 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9767 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
9768 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
9769 };
9770 
9771 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9772 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9773 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp1_rif_vlan_ops,
9774 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9775 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
9776 };
9777 
9778 static int
9779 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9780 {
9781 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9782 	char ritr_pl[MLXSW_REG_RITR_LEN];
9783 
9784 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9785 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9786 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9787 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
9788 
9789 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9790 }
9791 
9792 static struct mlxsw_sp_rif *
9793 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9794 		       struct netlink_ext_ack *extack)
9795 {
9796 	struct mlxsw_sp_rif *ul_rif;
9797 	u8 rif_entries = 1;
9798 	u16 rif_index;
9799 	int err;
9800 
9801 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
9802 	if (err) {
9803 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9804 		return ERR_PTR(err);
9805 	}
9806 
9807 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9808 	if (!ul_rif) {
9809 		err = -ENOMEM;
9810 		goto err_rif_alloc;
9811 	}
9812 
9813 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
9814 	ul_rif->mlxsw_sp = mlxsw_sp;
9815 	ul_rif->rif_entries = rif_entries;
9816 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9817 	if (err)
9818 		goto ul_rif_op_err;
9819 
9820 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
9821 	return ul_rif;
9822 
9823 ul_rif_op_err:
9824 	mlxsw_sp->router->rifs[rif_index] = NULL;
9825 	kfree(ul_rif);
9826 err_rif_alloc:
9827 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
9828 	return ERR_PTR(err);
9829 }
9830 
9831 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9832 {
9833 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9834 	u8 rif_entries = ul_rif->rif_entries;
9835 	u16 rif_index = ul_rif->rif_index;
9836 
9837 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
9838 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9839 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
9840 	kfree(ul_rif);
9841 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
9842 }
9843 
9844 static struct mlxsw_sp_rif *
9845 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
9846 		    struct netlink_ext_ack *extack)
9847 {
9848 	struct mlxsw_sp_vr *vr;
9849 	int err;
9850 
9851 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
9852 	if (IS_ERR(vr))
9853 		return ERR_CAST(vr);
9854 
9855 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
9856 		return vr->ul_rif;
9857 
9858 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
9859 	if (IS_ERR(vr->ul_rif)) {
9860 		err = PTR_ERR(vr->ul_rif);
9861 		goto err_ul_rif_create;
9862 	}
9863 
9864 	vr->rif_count++;
9865 	refcount_set(&vr->ul_rif_refcnt, 1);
9866 
9867 	return vr->ul_rif;
9868 
9869 err_ul_rif_create:
9870 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9871 	return ERR_PTR(err);
9872 }
9873 
9874 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
9875 {
9876 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9877 	struct mlxsw_sp_vr *vr;
9878 
9879 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
9880 
9881 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
9882 		return;
9883 
9884 	vr->rif_count--;
9885 	mlxsw_sp_ul_rif_destroy(ul_rif);
9886 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9887 }
9888 
9889 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
9890 			       u16 *ul_rif_index)
9891 {
9892 	struct mlxsw_sp_rif *ul_rif;
9893 	int err = 0;
9894 
9895 	mutex_lock(&mlxsw_sp->router->lock);
9896 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9897 	if (IS_ERR(ul_rif)) {
9898 		err = PTR_ERR(ul_rif);
9899 		goto out;
9900 	}
9901 	*ul_rif_index = ul_rif->rif_index;
9902 out:
9903 	mutex_unlock(&mlxsw_sp->router->lock);
9904 	return err;
9905 }
9906 
9907 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
9908 {
9909 	struct mlxsw_sp_rif *ul_rif;
9910 
9911 	mutex_lock(&mlxsw_sp->router->lock);
9912 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
9913 	if (WARN_ON(!ul_rif))
9914 		goto out;
9915 
9916 	mlxsw_sp_ul_rif_put(ul_rif);
9917 out:
9918 	mutex_unlock(&mlxsw_sp->router->lock);
9919 }
9920 
9921 static int
9922 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9923 				struct netlink_ext_ack *extack)
9924 {
9925 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9926 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9927 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9928 	struct mlxsw_sp_rif *ul_rif;
9929 	int err;
9930 
9931 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, extack);
9932 	if (IS_ERR(ul_rif))
9933 		return PTR_ERR(ul_rif);
9934 
9935 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
9936 	if (err)
9937 		goto err_loopback_op;
9938 
9939 	lb_rif->ul_vr_id = 0;
9940 	lb_rif->ul_rif_id = ul_rif->rif_index;
9941 
9942 	return 0;
9943 
9944 err_loopback_op:
9945 	mlxsw_sp_ul_rif_put(ul_rif);
9946 	return err;
9947 }
9948 
9949 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9950 {
9951 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9952 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9953 	struct mlxsw_sp_rif *ul_rif;
9954 
9955 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
9956 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
9957 	mlxsw_sp_ul_rif_put(ul_rif);
9958 }
9959 
9960 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
9961 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9962 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9963 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9964 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
9965 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
9966 };
9967 
9968 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
9969 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9970 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp2_rif_vlan_ops,
9971 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9972 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
9973 };
9974 
9975 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
9976 {
9977 	struct gen_pool *rifs_table;
9978 	int err;
9979 
9980 	rifs_table = gen_pool_create(0, -1);
9981 	if (!rifs_table)
9982 		return -ENOMEM;
9983 
9984 	gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
9985 			  NULL);
9986 
9987 	err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
9988 			   MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
9989 	if (err)
9990 		goto err_gen_pool_add;
9991 
9992 	mlxsw_sp->router->rifs_table = rifs_table;
9993 
9994 	return 0;
9995 
9996 err_gen_pool_add:
9997 	gen_pool_destroy(rifs_table);
9998 	return err;
9999 }
10000 
10001 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
10002 {
10003 	gen_pool_destroy(mlxsw_sp->router->rifs_table);
10004 }
10005 
10006 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10007 {
10008 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10009 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10010 	struct mlxsw_core *core = mlxsw_sp->core;
10011 	int err;
10012 
10013 	if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10014 		return -EIO;
10015 	mlxsw_sp->router->max_rif_mac_profile =
10016 		MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10017 
10018 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
10019 					 sizeof(struct mlxsw_sp_rif *),
10020 					 GFP_KERNEL);
10021 	if (!mlxsw_sp->router->rifs)
10022 		return -ENOMEM;
10023 
10024 	err = mlxsw_sp_rifs_table_init(mlxsw_sp);
10025 	if (err)
10026 		goto err_rifs_table_init;
10027 
10028 	idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10029 	atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10030 	atomic_set(&mlxsw_sp->router->rifs_count, 0);
10031 	devl_resource_occ_get_register(devlink,
10032 				       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10033 				       mlxsw_sp_rif_mac_profiles_occ_get,
10034 				       mlxsw_sp);
10035 	devl_resource_occ_get_register(devlink,
10036 				       MLXSW_SP_RESOURCE_RIFS,
10037 				       mlxsw_sp_rifs_occ_get,
10038 				       mlxsw_sp);
10039 
10040 	return 0;
10041 
10042 err_rifs_table_init:
10043 	kfree(mlxsw_sp->router->rifs);
10044 	return err;
10045 }
10046 
10047 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10048 {
10049 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10050 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10051 	int i;
10052 
10053 	WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
10054 	for (i = 0; i < max_rifs; i++)
10055 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10056 
10057 	devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
10058 	devl_resource_occ_get_unregister(devlink,
10059 					 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10060 	WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10061 	idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10062 	mlxsw_sp_rifs_table_fini(mlxsw_sp);
10063 	kfree(mlxsw_sp->router->rifs);
10064 }
10065 
10066 static int
10067 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10068 {
10069 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10070 
10071 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10072 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10073 }
10074 
10075 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10076 {
10077 	int err;
10078 
10079 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10080 
10081 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10082 	if (err)
10083 		return err;
10084 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10085 	if (err)
10086 		return err;
10087 
10088 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10089 }
10090 
10091 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10092 {
10093 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10094 	return mlxsw_sp_ipips_init(mlxsw_sp);
10095 }
10096 
10097 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10098 {
10099 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10100 	return mlxsw_sp_ipips_init(mlxsw_sp);
10101 }
10102 
10103 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10104 {
10105 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10106 }
10107 
10108 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10109 {
10110 	struct mlxsw_sp_router *router;
10111 
10112 	/* Flush pending FIB notifications and then flush the device's
10113 	 * table before requesting another dump. The FIB notification
10114 	 * block is unregistered, so no need to take RTNL.
10115 	 */
10116 	mlxsw_core_flush_owq();
10117 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10118 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10119 }
10120 
10121 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10122 struct mlxsw_sp_mp_hash_config {
10123 	DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10124 	DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10125 	DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10126 	DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10127 	bool inc_parsing_depth;
10128 };
10129 
10130 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10131 	bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10132 
10133 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10134 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10135 
10136 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10137 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10138 
10139 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10140 {
10141 	unsigned long *inner_headers = config->inner_headers;
10142 	unsigned long *inner_fields = config->inner_fields;
10143 
10144 	/* IPv4 inner */
10145 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10146 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10147 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10148 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10149 	/* IPv6 inner */
10150 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10151 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10152 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10153 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10154 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10155 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10156 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10157 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10158 }
10159 
10160 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10161 {
10162 	unsigned long *headers = config->headers;
10163 	unsigned long *fields = config->fields;
10164 
10165 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10166 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10167 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10168 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10169 }
10170 
10171 static void
10172 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
10173 			      u32 hash_fields)
10174 {
10175 	unsigned long *inner_headers = config->inner_headers;
10176 	unsigned long *inner_fields = config->inner_fields;
10177 
10178 	/* IPv4 Inner */
10179 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10180 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10181 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
10182 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10183 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
10184 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10185 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10186 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
10187 	/* IPv6 inner */
10188 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10189 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10190 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
10191 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10192 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10193 	}
10194 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
10195 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10196 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10197 	}
10198 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10199 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10200 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
10201 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10202 	/* L4 inner */
10203 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
10204 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
10205 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
10206 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
10207 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
10208 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
10209 }
10210 
10211 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
10212 				   struct mlxsw_sp_mp_hash_config *config)
10213 {
10214 	struct net *net = mlxsw_sp_net(mlxsw_sp);
10215 	unsigned long *headers = config->headers;
10216 	unsigned long *fields = config->fields;
10217 	u32 hash_fields;
10218 
10219 	switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
10220 	case 0:
10221 		mlxsw_sp_mp4_hash_outer_addr(config);
10222 		break;
10223 	case 1:
10224 		mlxsw_sp_mp4_hash_outer_addr(config);
10225 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10226 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10227 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10228 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10229 		break;
10230 	case 2:
10231 		/* Outer */
10232 		mlxsw_sp_mp4_hash_outer_addr(config);
10233 		/* Inner */
10234 		mlxsw_sp_mp_hash_inner_l3(config);
10235 		break;
10236 	case 3:
10237 		hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
10238 		/* Outer */
10239 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10240 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10241 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10242 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
10243 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10244 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
10245 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10246 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10247 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10248 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10249 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10250 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10251 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10252 		/* Inner */
10253 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10254 		break;
10255 	}
10256 }
10257 
10258 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10259 {
10260 	unsigned long *headers = config->headers;
10261 	unsigned long *fields = config->fields;
10262 
10263 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10264 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10265 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10266 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10267 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10268 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10269 }
10270 
10271 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
10272 				   struct mlxsw_sp_mp_hash_config *config)
10273 {
10274 	u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
10275 	unsigned long *headers = config->headers;
10276 	unsigned long *fields = config->fields;
10277 
10278 	switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
10279 	case 0:
10280 		mlxsw_sp_mp6_hash_outer_addr(config);
10281 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10282 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10283 		break;
10284 	case 1:
10285 		mlxsw_sp_mp6_hash_outer_addr(config);
10286 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10287 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10288 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10289 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10290 		break;
10291 	case 2:
10292 		/* Outer */
10293 		mlxsw_sp_mp6_hash_outer_addr(config);
10294 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10295 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10296 		/* Inner */
10297 		mlxsw_sp_mp_hash_inner_l3(config);
10298 		config->inc_parsing_depth = true;
10299 		break;
10300 	case 3:
10301 		/* Outer */
10302 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10303 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10304 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10305 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
10306 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10307 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10308 		}
10309 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
10310 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10311 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10312 		}
10313 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10314 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10315 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
10316 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10317 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10318 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10319 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10320 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10321 		/* Inner */
10322 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10323 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
10324 			config->inc_parsing_depth = true;
10325 		break;
10326 	}
10327 }
10328 
10329 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
10330 						 bool old_inc_parsing_depth,
10331 						 bool new_inc_parsing_depth)
10332 {
10333 	int err;
10334 
10335 	if (!old_inc_parsing_depth && new_inc_parsing_depth) {
10336 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
10337 		if (err)
10338 			return err;
10339 		mlxsw_sp->router->inc_parsing_depth = true;
10340 	} else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
10341 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
10342 		mlxsw_sp->router->inc_parsing_depth = false;
10343 	}
10344 
10345 	return 0;
10346 }
10347 
10348 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10349 {
10350 	bool old_inc_parsing_depth, new_inc_parsing_depth;
10351 	struct mlxsw_sp_mp_hash_config config = {};
10352 	char recr2_pl[MLXSW_REG_RECR2_LEN];
10353 	unsigned long bit;
10354 	u32 seed;
10355 	int err;
10356 
10357 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
10358 	mlxsw_reg_recr2_pack(recr2_pl, seed);
10359 	mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
10360 	mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
10361 
10362 	old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10363 	new_inc_parsing_depth = config.inc_parsing_depth;
10364 	err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
10365 						    old_inc_parsing_depth,
10366 						    new_inc_parsing_depth);
10367 	if (err)
10368 		return err;
10369 
10370 	for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
10371 		mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
10372 	for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
10373 		mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
10374 	for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
10375 		mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
10376 	for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
10377 		mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
10378 
10379 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
10380 	if (err)
10381 		goto err_reg_write;
10382 
10383 	return 0;
10384 
10385 err_reg_write:
10386 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
10387 					      old_inc_parsing_depth);
10388 	return err;
10389 }
10390 
10391 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
10392 {
10393 	bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10394 
10395 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
10396 					      false);
10397 }
10398 #else
10399 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10400 {
10401 	return 0;
10402 }
10403 
10404 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
10405 {
10406 }
10407 #endif
10408 
10409 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
10410 {
10411 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
10412 	unsigned int i;
10413 
10414 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
10415 
10416 	/* HW is determining switch priority based on DSCP-bits, but the
10417 	 * kernel is still doing that based on the ToS. Since there's a
10418 	 * mismatch in bits we need to make sure to translate the right
10419 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
10420 	 */
10421 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
10422 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
10423 
10424 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
10425 }
10426 
10427 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
10428 {
10429 	struct net *net = mlxsw_sp_net(mlxsw_sp);
10430 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
10431 	u64 max_rifs;
10432 	bool usp;
10433 
10434 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
10435 		return -EIO;
10436 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10437 	usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
10438 
10439 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
10440 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
10441 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
10442 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10443 }
10444 
10445 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10446 {
10447 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
10448 
10449 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
10450 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10451 }
10452 
10453 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
10454 {
10455 	u16 lb_rif_index;
10456 	int err;
10457 
10458 	/* Create a generic loopback RIF associated with the main table
10459 	 * (default VRF). Any table can be used, but the main table exists
10460 	 * anyway, so we do not waste resources.
10461 	 */
10462 	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
10463 					 &lb_rif_index);
10464 	if (err)
10465 		return err;
10466 
10467 	mlxsw_sp->router->lb_rif_index = lb_rif_index;
10468 
10469 	return 0;
10470 }
10471 
10472 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10473 {
10474 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
10475 }
10476 
10477 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10478 {
10479 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10480 
10481 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10482 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10483 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10484 
10485 	return 0;
10486 }
10487 
10488 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10489 	.init = mlxsw_sp1_router_init,
10490 	.ipips_init = mlxsw_sp1_ipips_init,
10491 };
10492 
10493 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10494 {
10495 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10496 
10497 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10498 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10499 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10500 
10501 	return 0;
10502 }
10503 
10504 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10505 	.init = mlxsw_sp2_router_init,
10506 	.ipips_init = mlxsw_sp2_ipips_init,
10507 };
10508 
10509 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
10510 			 struct netlink_ext_ack *extack)
10511 {
10512 	struct mlxsw_sp_router *router;
10513 	int err;
10514 
10515 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
10516 	if (!router)
10517 		return -ENOMEM;
10518 	mutex_init(&router->lock);
10519 	mlxsw_sp->router = router;
10520 	router->mlxsw_sp = mlxsw_sp;
10521 
10522 	err = mlxsw_sp->router_ops->init(mlxsw_sp);
10523 	if (err)
10524 		goto err_router_ops_init;
10525 
10526 	INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
10527 	INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
10528 			  mlxsw_sp_nh_grp_activity_work);
10529 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
10530 	err = __mlxsw_sp_router_init(mlxsw_sp);
10531 	if (err)
10532 		goto err_router_init;
10533 
10534 	err = mlxsw_sp_rifs_init(mlxsw_sp);
10535 	if (err)
10536 		goto err_rifs_init;
10537 
10538 	err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
10539 	if (err)
10540 		goto err_ipips_init;
10541 
10542 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
10543 			      &mlxsw_sp_nexthop_ht_params);
10544 	if (err)
10545 		goto err_nexthop_ht_init;
10546 
10547 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
10548 			      &mlxsw_sp_nexthop_group_ht_params);
10549 	if (err)
10550 		goto err_nexthop_group_ht_init;
10551 
10552 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
10553 	err = mlxsw_sp_lpm_init(mlxsw_sp);
10554 	if (err)
10555 		goto err_lpm_init;
10556 
10557 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
10558 	if (err)
10559 		goto err_mr_init;
10560 
10561 	err = mlxsw_sp_vrs_init(mlxsw_sp);
10562 	if (err)
10563 		goto err_vrs_init;
10564 
10565 	err = mlxsw_sp_lb_rif_init(mlxsw_sp);
10566 	if (err)
10567 		goto err_lb_rif_init;
10568 
10569 	err = mlxsw_sp_neigh_init(mlxsw_sp);
10570 	if (err)
10571 		goto err_neigh_init;
10572 
10573 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
10574 	if (err)
10575 		goto err_mp_hash_init;
10576 
10577 	err = mlxsw_sp_dscp_init(mlxsw_sp);
10578 	if (err)
10579 		goto err_dscp_init;
10580 
10581 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
10582 	err = register_inetaddr_notifier(&router->inetaddr_nb);
10583 	if (err)
10584 		goto err_register_inetaddr_notifier;
10585 
10586 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10587 	err = register_inet6addr_notifier(&router->inet6addr_nb);
10588 	if (err)
10589 		goto err_register_inet6addr_notifier;
10590 
10591 	mlxsw_sp->router->netevent_nb.notifier_call =
10592 		mlxsw_sp_router_netevent_event;
10593 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10594 	if (err)
10595 		goto err_register_netevent_notifier;
10596 
10597 	mlxsw_sp->router->nexthop_nb.notifier_call =
10598 		mlxsw_sp_nexthop_obj_event;
10599 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10600 					&mlxsw_sp->router->nexthop_nb,
10601 					extack);
10602 	if (err)
10603 		goto err_register_nexthop_notifier;
10604 
10605 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10606 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10607 				    &mlxsw_sp->router->fib_nb,
10608 				    mlxsw_sp_router_fib_dump_flush, extack);
10609 	if (err)
10610 		goto err_register_fib_notifier;
10611 
10612 	mlxsw_sp->router->netdevice_nb.notifier_call =
10613 		mlxsw_sp_router_netdevice_event;
10614 	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
10615 					      &mlxsw_sp->router->netdevice_nb);
10616 	if (err)
10617 		goto err_register_netdev_notifier;
10618 
10619 	return 0;
10620 
10621 err_register_netdev_notifier:
10622 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10623 				&mlxsw_sp->router->fib_nb);
10624 err_register_fib_notifier:
10625 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10626 				    &mlxsw_sp->router->nexthop_nb);
10627 err_register_nexthop_notifier:
10628 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10629 err_register_netevent_notifier:
10630 	unregister_inet6addr_notifier(&router->inet6addr_nb);
10631 err_register_inet6addr_notifier:
10632 	unregister_inetaddr_notifier(&router->inetaddr_nb);
10633 err_register_inetaddr_notifier:
10634 	mlxsw_core_flush_owq();
10635 err_dscp_init:
10636 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
10637 err_mp_hash_init:
10638 	mlxsw_sp_neigh_fini(mlxsw_sp);
10639 err_neigh_init:
10640 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
10641 err_lb_rif_init:
10642 	mlxsw_sp_vrs_fini(mlxsw_sp);
10643 err_vrs_init:
10644 	mlxsw_sp_mr_fini(mlxsw_sp);
10645 err_mr_init:
10646 	mlxsw_sp_lpm_fini(mlxsw_sp);
10647 err_lpm_init:
10648 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10649 err_nexthop_group_ht_init:
10650 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10651 err_nexthop_ht_init:
10652 	mlxsw_sp_ipips_fini(mlxsw_sp);
10653 err_ipips_init:
10654 	mlxsw_sp_rifs_fini(mlxsw_sp);
10655 err_rifs_init:
10656 	__mlxsw_sp_router_fini(mlxsw_sp);
10657 err_router_init:
10658 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10659 err_router_ops_init:
10660 	mutex_destroy(&mlxsw_sp->router->lock);
10661 	kfree(mlxsw_sp->router);
10662 	return err;
10663 }
10664 
10665 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10666 {
10667 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
10668 					  &mlxsw_sp->router->netdevice_nb);
10669 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10670 				&mlxsw_sp->router->fib_nb);
10671 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10672 				    &mlxsw_sp->router->nexthop_nb);
10673 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10674 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
10675 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
10676 	mlxsw_core_flush_owq();
10677 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
10678 	mlxsw_sp_neigh_fini(mlxsw_sp);
10679 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
10680 	mlxsw_sp_vrs_fini(mlxsw_sp);
10681 	mlxsw_sp_mr_fini(mlxsw_sp);
10682 	mlxsw_sp_lpm_fini(mlxsw_sp);
10683 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10684 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10685 	mlxsw_sp_ipips_fini(mlxsw_sp);
10686 	mlxsw_sp_rifs_fini(mlxsw_sp);
10687 	__mlxsw_sp_router_fini(mlxsw_sp);
10688 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10689 	mutex_destroy(&mlxsw_sp->router->lock);
10690 	kfree(mlxsw_sp->router);
10691 }
10692