1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
32 #include <net/ipv6.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
35 
36 #include "spectrum.h"
37 #include "core.h"
38 #include "reg.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
46 
47 struct mlxsw_sp_fib;
48 struct mlxsw_sp_vr;
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
51 
52 struct mlxsw_sp_rif {
53 	struct list_head nexthop_list;
54 	struct list_head neigh_list;
55 	struct net_device *dev; /* NULL for underlay RIF */
56 	struct mlxsw_sp_fid *fid;
57 	unsigned char addr[ETH_ALEN];
58 	int mtu;
59 	u16 rif_index;
60 	u8 mac_profile_id;
61 	u16 vr_id;
62 	const struct mlxsw_sp_rif_ops *ops;
63 	struct mlxsw_sp *mlxsw_sp;
64 
65 	unsigned int counter_ingress;
66 	bool counter_ingress_valid;
67 	unsigned int counter_egress;
68 	bool counter_egress_valid;
69 };
70 
71 struct mlxsw_sp_rif_params {
72 	struct net_device *dev;
73 	union {
74 		u16 system_port;
75 		u16 lag_id;
76 	};
77 	u16 vid;
78 	bool lag;
79 };
80 
81 struct mlxsw_sp_rif_subport {
82 	struct mlxsw_sp_rif common;
83 	refcount_t ref_count;
84 	union {
85 		u16 system_port;
86 		u16 lag_id;
87 	};
88 	u16 vid;
89 	bool lag;
90 };
91 
92 struct mlxsw_sp_rif_ipip_lb {
93 	struct mlxsw_sp_rif common;
94 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
95 	u16 ul_vr_id; /* Reserved for Spectrum-2. */
96 	u16 ul_rif_id; /* Reserved for Spectrum. */
97 };
98 
99 struct mlxsw_sp_rif_params_ipip_lb {
100 	struct mlxsw_sp_rif_params common;
101 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
102 };
103 
104 struct mlxsw_sp_rif_ops {
105 	enum mlxsw_sp_rif_type type;
106 	size_t rif_size;
107 
108 	void (*setup)(struct mlxsw_sp_rif *rif,
109 		      const struct mlxsw_sp_rif_params *params);
110 	int (*configure)(struct mlxsw_sp_rif *rif,
111 			 struct netlink_ext_ack *extack);
112 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
113 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
114 					 struct netlink_ext_ack *extack);
115 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
116 };
117 
118 struct mlxsw_sp_rif_mac_profile {
119 	unsigned char mac_prefix[ETH_ALEN];
120 	refcount_t ref_count;
121 	u8 id;
122 };
123 
124 struct mlxsw_sp_router_ops {
125 	int (*init)(struct mlxsw_sp *mlxsw_sp);
126 	int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
127 };
128 
129 static struct mlxsw_sp_rif *
130 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
131 			 const struct net_device *dev);
132 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
133 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
134 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
135 				  struct mlxsw_sp_lpm_tree *lpm_tree);
136 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
137 				     const struct mlxsw_sp_fib *fib,
138 				     u8 tree_id);
139 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
140 				       const struct mlxsw_sp_fib *fib);
141 
142 static unsigned int *
143 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
144 			   enum mlxsw_sp_rif_counter_dir dir)
145 {
146 	switch (dir) {
147 	case MLXSW_SP_RIF_COUNTER_EGRESS:
148 		return &rif->counter_egress;
149 	case MLXSW_SP_RIF_COUNTER_INGRESS:
150 		return &rif->counter_ingress;
151 	}
152 	return NULL;
153 }
154 
155 static bool
156 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
157 			       enum mlxsw_sp_rif_counter_dir dir)
158 {
159 	switch (dir) {
160 	case MLXSW_SP_RIF_COUNTER_EGRESS:
161 		return rif->counter_egress_valid;
162 	case MLXSW_SP_RIF_COUNTER_INGRESS:
163 		return rif->counter_ingress_valid;
164 	}
165 	return false;
166 }
167 
168 static void
169 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
170 			       enum mlxsw_sp_rif_counter_dir dir,
171 			       bool valid)
172 {
173 	switch (dir) {
174 	case MLXSW_SP_RIF_COUNTER_EGRESS:
175 		rif->counter_egress_valid = valid;
176 		break;
177 	case MLXSW_SP_RIF_COUNTER_INGRESS:
178 		rif->counter_ingress_valid = valid;
179 		break;
180 	}
181 }
182 
183 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
184 				     unsigned int counter_index, bool enable,
185 				     enum mlxsw_sp_rif_counter_dir dir)
186 {
187 	char ritr_pl[MLXSW_REG_RITR_LEN];
188 	bool is_egress = false;
189 	int err;
190 
191 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
192 		is_egress = true;
193 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
194 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
195 	if (err)
196 		return err;
197 
198 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
199 				    is_egress);
200 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
201 }
202 
203 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
204 				   struct mlxsw_sp_rif *rif,
205 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
206 {
207 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
208 	unsigned int *p_counter_index;
209 	bool valid;
210 	int err;
211 
212 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
213 	if (!valid)
214 		return -EINVAL;
215 
216 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
217 	if (!p_counter_index)
218 		return -EINVAL;
219 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
220 			     MLXSW_REG_RICNT_OPCODE_NOP);
221 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
222 	if (err)
223 		return err;
224 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
225 	return 0;
226 }
227 
228 struct mlxsw_sp_rif_counter_set_basic {
229 	u64 good_unicast_packets;
230 	u64 good_multicast_packets;
231 	u64 good_broadcast_packets;
232 	u64 good_unicast_bytes;
233 	u64 good_multicast_bytes;
234 	u64 good_broadcast_bytes;
235 	u64 error_packets;
236 	u64 discard_packets;
237 	u64 error_bytes;
238 	u64 discard_bytes;
239 };
240 
241 static int
242 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
243 				 enum mlxsw_sp_rif_counter_dir dir,
244 				 struct mlxsw_sp_rif_counter_set_basic *set)
245 {
246 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
247 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
248 	unsigned int *p_counter_index;
249 	int err;
250 
251 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
252 		return -EINVAL;
253 
254 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
255 	if (!p_counter_index)
256 		return -EINVAL;
257 
258 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
259 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
260 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
261 	if (err)
262 		return err;
263 
264 	if (!set)
265 		return 0;
266 
267 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME)				\
268 		(set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
269 
270 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
271 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
272 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
273 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
274 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
275 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
276 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
277 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
278 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
279 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
280 
281 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
282 
283 	return 0;
284 }
285 
286 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
287 				      unsigned int counter_index)
288 {
289 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
290 
291 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
292 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
293 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
294 }
295 
296 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
297 			       enum mlxsw_sp_rif_counter_dir dir)
298 {
299 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
300 	unsigned int *p_counter_index;
301 	int err;
302 
303 	if (mlxsw_sp_rif_counter_valid_get(rif, dir))
304 		return 0;
305 
306 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
307 	if (!p_counter_index)
308 		return -EINVAL;
309 
310 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
311 				     p_counter_index);
312 	if (err)
313 		return err;
314 
315 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
316 	if (err)
317 		goto err_counter_clear;
318 
319 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
320 					*p_counter_index, true, dir);
321 	if (err)
322 		goto err_counter_edit;
323 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
324 	return 0;
325 
326 err_counter_edit:
327 err_counter_clear:
328 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
329 			      *p_counter_index);
330 	return err;
331 }
332 
333 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
334 			       enum mlxsw_sp_rif_counter_dir dir)
335 {
336 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
337 	unsigned int *p_counter_index;
338 
339 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
340 		return;
341 
342 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
343 	if (WARN_ON(!p_counter_index))
344 		return;
345 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
346 				  *p_counter_index, false, dir);
347 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
348 			      *p_counter_index);
349 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
350 }
351 
352 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
353 {
354 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
355 	struct devlink *devlink;
356 
357 	devlink = priv_to_devlink(mlxsw_sp->core);
358 	if (!devlink_dpipe_table_counter_enabled(devlink,
359 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
360 		return;
361 	mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
362 }
363 
364 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
365 {
366 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
367 }
368 
369 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
370 
371 struct mlxsw_sp_prefix_usage {
372 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
373 };
374 
375 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
376 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
377 
378 static bool
379 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
380 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
381 {
382 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
383 }
384 
385 static void
386 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
387 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
388 {
389 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
390 }
391 
392 static void
393 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
394 			  unsigned char prefix_len)
395 {
396 	set_bit(prefix_len, prefix_usage->b);
397 }
398 
399 static void
400 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
401 			    unsigned char prefix_len)
402 {
403 	clear_bit(prefix_len, prefix_usage->b);
404 }
405 
406 struct mlxsw_sp_fib_key {
407 	unsigned char addr[sizeof(struct in6_addr)];
408 	unsigned char prefix_len;
409 };
410 
411 enum mlxsw_sp_fib_entry_type {
412 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
413 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
414 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
415 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
416 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
417 
418 	/* This is a special case of local delivery, where a packet should be
419 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
420 	 * because that's a type of next hop, not of FIB entry. (There can be
421 	 * several next hops in a REMOTE entry, and some of them may be
422 	 * encapsulating entries.)
423 	 */
424 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
425 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
426 };
427 
428 struct mlxsw_sp_nexthop_group_info;
429 struct mlxsw_sp_nexthop_group;
430 struct mlxsw_sp_fib_entry;
431 
432 struct mlxsw_sp_fib_node {
433 	struct mlxsw_sp_fib_entry *fib_entry;
434 	struct list_head list;
435 	struct rhash_head ht_node;
436 	struct mlxsw_sp_fib *fib;
437 	struct mlxsw_sp_fib_key key;
438 };
439 
440 struct mlxsw_sp_fib_entry_decap {
441 	struct mlxsw_sp_ipip_entry *ipip_entry;
442 	u32 tunnel_index;
443 };
444 
445 static struct mlxsw_sp_fib_entry_priv *
446 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
447 {
448 	struct mlxsw_sp_fib_entry_priv *priv;
449 
450 	if (!ll_ops->fib_entry_priv_size)
451 		/* No need to have priv */
452 		return NULL;
453 
454 	priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
455 	if (!priv)
456 		return ERR_PTR(-ENOMEM);
457 	refcount_set(&priv->refcnt, 1);
458 	return priv;
459 }
460 
461 static void
462 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
463 {
464 	kfree(priv);
465 }
466 
467 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
468 {
469 	refcount_inc(&priv->refcnt);
470 }
471 
472 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
473 {
474 	if (!priv || !refcount_dec_and_test(&priv->refcnt))
475 		return;
476 	mlxsw_sp_fib_entry_priv_destroy(priv);
477 }
478 
479 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
480 						struct mlxsw_sp_fib_entry_priv *priv)
481 {
482 	if (!priv)
483 		return;
484 	mlxsw_sp_fib_entry_priv_hold(priv);
485 	list_add(&priv->list, &op_ctx->fib_entry_priv_list);
486 }
487 
488 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
489 {
490 	struct mlxsw_sp_fib_entry_priv *priv, *tmp;
491 
492 	list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
493 		mlxsw_sp_fib_entry_priv_put(priv);
494 	INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
495 }
496 
497 struct mlxsw_sp_fib_entry {
498 	struct mlxsw_sp_fib_node *fib_node;
499 	enum mlxsw_sp_fib_entry_type type;
500 	struct list_head nexthop_group_node;
501 	struct mlxsw_sp_nexthop_group *nh_group;
502 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
503 	struct mlxsw_sp_fib_entry_priv *priv;
504 };
505 
506 struct mlxsw_sp_fib4_entry {
507 	struct mlxsw_sp_fib_entry common;
508 	struct fib_info *fi;
509 	u32 tb_id;
510 	u8 tos;
511 	u8 type;
512 };
513 
514 struct mlxsw_sp_fib6_entry {
515 	struct mlxsw_sp_fib_entry common;
516 	struct list_head rt6_list;
517 	unsigned int nrt6;
518 };
519 
520 struct mlxsw_sp_rt6 {
521 	struct list_head list;
522 	struct fib6_info *rt;
523 };
524 
525 struct mlxsw_sp_lpm_tree {
526 	u8 id; /* tree ID */
527 	unsigned int ref_count;
528 	enum mlxsw_sp_l3proto proto;
529 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
530 	struct mlxsw_sp_prefix_usage prefix_usage;
531 };
532 
533 struct mlxsw_sp_fib {
534 	struct rhashtable ht;
535 	struct list_head node_list;
536 	struct mlxsw_sp_vr *vr;
537 	struct mlxsw_sp_lpm_tree *lpm_tree;
538 	enum mlxsw_sp_l3proto proto;
539 	const struct mlxsw_sp_router_ll_ops *ll_ops;
540 };
541 
542 struct mlxsw_sp_vr {
543 	u16 id; /* virtual router ID */
544 	u32 tb_id; /* kernel fib table id */
545 	unsigned int rif_count;
546 	struct mlxsw_sp_fib *fib4;
547 	struct mlxsw_sp_fib *fib6;
548 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
549 	struct mlxsw_sp_rif *ul_rif;
550 	refcount_t ul_rif_refcnt;
551 };
552 
553 static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
554 					 enum mlxsw_sp_l3proto proto)
555 {
556 	return 0;
557 }
558 
559 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
560 {
561 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
562 			       xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
563 }
564 
565 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
566 {
567 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
568 			       xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
569 }
570 
571 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
572 {
573 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
574 			       xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
575 }
576 
577 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
578 
579 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
580 						struct mlxsw_sp_vr *vr,
581 						enum mlxsw_sp_l3proto proto)
582 {
583 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
584 	struct mlxsw_sp_lpm_tree *lpm_tree;
585 	struct mlxsw_sp_fib *fib;
586 	int err;
587 
588 	err = ll_ops->init(mlxsw_sp, vr->id, proto);
589 	if (err)
590 		return ERR_PTR(err);
591 
592 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
593 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
594 	if (!fib)
595 		return ERR_PTR(-ENOMEM);
596 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
597 	if (err)
598 		goto err_rhashtable_init;
599 	INIT_LIST_HEAD(&fib->node_list);
600 	fib->proto = proto;
601 	fib->vr = vr;
602 	fib->lpm_tree = lpm_tree;
603 	fib->ll_ops = ll_ops;
604 	mlxsw_sp_lpm_tree_hold(lpm_tree);
605 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
606 	if (err)
607 		goto err_lpm_tree_bind;
608 	return fib;
609 
610 err_lpm_tree_bind:
611 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
612 err_rhashtable_init:
613 	kfree(fib);
614 	return ERR_PTR(err);
615 }
616 
617 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
618 				 struct mlxsw_sp_fib *fib)
619 {
620 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
621 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
622 	WARN_ON(!list_empty(&fib->node_list));
623 	rhashtable_destroy(&fib->ht);
624 	kfree(fib);
625 }
626 
627 static struct mlxsw_sp_lpm_tree *
628 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
629 {
630 	static struct mlxsw_sp_lpm_tree *lpm_tree;
631 	int i;
632 
633 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
634 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
635 		if (lpm_tree->ref_count == 0)
636 			return lpm_tree;
637 	}
638 	return NULL;
639 }
640 
641 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
642 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
643 				   struct mlxsw_sp_lpm_tree *lpm_tree)
644 {
645 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
646 
647 	mlxsw_reg_xralta_pack(xralta_pl, true,
648 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
649 			      lpm_tree->id);
650 	return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
651 }
652 
653 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
654 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
655 				   struct mlxsw_sp_lpm_tree *lpm_tree)
656 {
657 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
658 
659 	mlxsw_reg_xralta_pack(xralta_pl, false,
660 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
661 			      lpm_tree->id);
662 	ll_ops->ralta_write(mlxsw_sp, xralta_pl);
663 }
664 
665 static int
666 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
667 				  const struct mlxsw_sp_router_ll_ops *ll_ops,
668 				  struct mlxsw_sp_prefix_usage *prefix_usage,
669 				  struct mlxsw_sp_lpm_tree *lpm_tree)
670 {
671 	char xralst_pl[MLXSW_REG_XRALST_LEN];
672 	u8 root_bin = 0;
673 	u8 prefix;
674 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
675 
676 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
677 		root_bin = prefix;
678 
679 	mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
680 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
681 		if (prefix == 0)
682 			continue;
683 		mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
684 					  MLXSW_REG_RALST_BIN_NO_CHILD);
685 		last_prefix = prefix;
686 	}
687 	return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
688 }
689 
690 static struct mlxsw_sp_lpm_tree *
691 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
692 			 const struct mlxsw_sp_router_ll_ops *ll_ops,
693 			 struct mlxsw_sp_prefix_usage *prefix_usage,
694 			 enum mlxsw_sp_l3proto proto)
695 {
696 	struct mlxsw_sp_lpm_tree *lpm_tree;
697 	int err;
698 
699 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
700 	if (!lpm_tree)
701 		return ERR_PTR(-EBUSY);
702 	lpm_tree->proto = proto;
703 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
704 	if (err)
705 		return ERR_PTR(err);
706 
707 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
708 	if (err)
709 		goto err_left_struct_set;
710 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
711 	       sizeof(lpm_tree->prefix_usage));
712 	memset(&lpm_tree->prefix_ref_count, 0,
713 	       sizeof(lpm_tree->prefix_ref_count));
714 	lpm_tree->ref_count = 1;
715 	return lpm_tree;
716 
717 err_left_struct_set:
718 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
719 	return ERR_PTR(err);
720 }
721 
722 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
723 				      const struct mlxsw_sp_router_ll_ops *ll_ops,
724 				      struct mlxsw_sp_lpm_tree *lpm_tree)
725 {
726 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
727 }
728 
729 static struct mlxsw_sp_lpm_tree *
730 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
731 		      struct mlxsw_sp_prefix_usage *prefix_usage,
732 		      enum mlxsw_sp_l3proto proto)
733 {
734 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
735 	struct mlxsw_sp_lpm_tree *lpm_tree;
736 	int i;
737 
738 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
739 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
740 		if (lpm_tree->ref_count != 0 &&
741 		    lpm_tree->proto == proto &&
742 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
743 					     prefix_usage)) {
744 			mlxsw_sp_lpm_tree_hold(lpm_tree);
745 			return lpm_tree;
746 		}
747 	}
748 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
749 }
750 
751 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
752 {
753 	lpm_tree->ref_count++;
754 }
755 
756 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
757 				  struct mlxsw_sp_lpm_tree *lpm_tree)
758 {
759 	const struct mlxsw_sp_router_ll_ops *ll_ops =
760 				mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
761 
762 	if (--lpm_tree->ref_count == 0)
763 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
764 }
765 
766 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
767 
768 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
769 {
770 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
771 	struct mlxsw_sp_lpm_tree *lpm_tree;
772 	u64 max_trees;
773 	int err, i;
774 
775 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
776 		return -EIO;
777 
778 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
779 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
780 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
781 					     sizeof(struct mlxsw_sp_lpm_tree),
782 					     GFP_KERNEL);
783 	if (!mlxsw_sp->router->lpm.trees)
784 		return -ENOMEM;
785 
786 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
787 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
788 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
789 	}
790 
791 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
792 					 MLXSW_SP_L3_PROTO_IPV4);
793 	if (IS_ERR(lpm_tree)) {
794 		err = PTR_ERR(lpm_tree);
795 		goto err_ipv4_tree_get;
796 	}
797 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
798 
799 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
800 					 MLXSW_SP_L3_PROTO_IPV6);
801 	if (IS_ERR(lpm_tree)) {
802 		err = PTR_ERR(lpm_tree);
803 		goto err_ipv6_tree_get;
804 	}
805 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
806 
807 	return 0;
808 
809 err_ipv6_tree_get:
810 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
811 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
812 err_ipv4_tree_get:
813 	kfree(mlxsw_sp->router->lpm.trees);
814 	return err;
815 }
816 
817 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
818 {
819 	struct mlxsw_sp_lpm_tree *lpm_tree;
820 
821 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
822 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
823 
824 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
825 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
826 
827 	kfree(mlxsw_sp->router->lpm.trees);
828 }
829 
830 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
831 {
832 	return !!vr->fib4 || !!vr->fib6 ||
833 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
834 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
835 }
836 
837 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
838 {
839 	struct mlxsw_sp_vr *vr;
840 	int i;
841 
842 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
843 		vr = &mlxsw_sp->router->vrs[i];
844 		if (!mlxsw_sp_vr_is_used(vr))
845 			return vr;
846 	}
847 	return NULL;
848 }
849 
850 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
851 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
852 {
853 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
854 
855 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
856 			      (enum mlxsw_reg_ralxx_protocol) fib->proto,
857 			      tree_id);
858 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
859 }
860 
861 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
862 				       const struct mlxsw_sp_fib *fib)
863 {
864 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
865 
866 	/* Bind to tree 0 which is default */
867 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
868 			      (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
869 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
870 }
871 
872 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
873 {
874 	/* For our purpose, squash main, default and local tables into one */
875 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
876 		tb_id = RT_TABLE_MAIN;
877 	return tb_id;
878 }
879 
880 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
881 					    u32 tb_id)
882 {
883 	struct mlxsw_sp_vr *vr;
884 	int i;
885 
886 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
887 
888 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
889 		vr = &mlxsw_sp->router->vrs[i];
890 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
891 			return vr;
892 	}
893 	return NULL;
894 }
895 
896 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
897 				u16 *vr_id)
898 {
899 	struct mlxsw_sp_vr *vr;
900 	int err = 0;
901 
902 	mutex_lock(&mlxsw_sp->router->lock);
903 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
904 	if (!vr) {
905 		err = -ESRCH;
906 		goto out;
907 	}
908 	*vr_id = vr->id;
909 out:
910 	mutex_unlock(&mlxsw_sp->router->lock);
911 	return err;
912 }
913 
914 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
915 					    enum mlxsw_sp_l3proto proto)
916 {
917 	switch (proto) {
918 	case MLXSW_SP_L3_PROTO_IPV4:
919 		return vr->fib4;
920 	case MLXSW_SP_L3_PROTO_IPV6:
921 		return vr->fib6;
922 	}
923 	return NULL;
924 }
925 
926 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
927 					      u32 tb_id,
928 					      struct netlink_ext_ack *extack)
929 {
930 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
931 	struct mlxsw_sp_fib *fib4;
932 	struct mlxsw_sp_fib *fib6;
933 	struct mlxsw_sp_vr *vr;
934 	int err;
935 
936 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
937 	if (!vr) {
938 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
939 		return ERR_PTR(-EBUSY);
940 	}
941 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
942 	if (IS_ERR(fib4))
943 		return ERR_CAST(fib4);
944 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
945 	if (IS_ERR(fib6)) {
946 		err = PTR_ERR(fib6);
947 		goto err_fib6_create;
948 	}
949 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
950 					     MLXSW_SP_L3_PROTO_IPV4);
951 	if (IS_ERR(mr4_table)) {
952 		err = PTR_ERR(mr4_table);
953 		goto err_mr4_table_create;
954 	}
955 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
956 					     MLXSW_SP_L3_PROTO_IPV6);
957 	if (IS_ERR(mr6_table)) {
958 		err = PTR_ERR(mr6_table);
959 		goto err_mr6_table_create;
960 	}
961 
962 	vr->fib4 = fib4;
963 	vr->fib6 = fib6;
964 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
965 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
966 	vr->tb_id = tb_id;
967 	return vr;
968 
969 err_mr6_table_create:
970 	mlxsw_sp_mr_table_destroy(mr4_table);
971 err_mr4_table_create:
972 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
973 err_fib6_create:
974 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
975 	return ERR_PTR(err);
976 }
977 
978 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
979 				struct mlxsw_sp_vr *vr)
980 {
981 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
982 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
983 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
984 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
985 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
986 	vr->fib6 = NULL;
987 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
988 	vr->fib4 = NULL;
989 }
990 
991 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
992 					   struct netlink_ext_ack *extack)
993 {
994 	struct mlxsw_sp_vr *vr;
995 
996 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
997 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
998 	if (!vr)
999 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
1000 	return vr;
1001 }
1002 
1003 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
1004 {
1005 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
1006 	    list_empty(&vr->fib6->node_list) &&
1007 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
1008 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
1009 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
1010 }
1011 
1012 static bool
1013 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
1014 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
1015 {
1016 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
1017 
1018 	if (!mlxsw_sp_vr_is_used(vr))
1019 		return false;
1020 	if (fib->lpm_tree->id == tree_id)
1021 		return true;
1022 	return false;
1023 }
1024 
1025 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
1026 					struct mlxsw_sp_fib *fib,
1027 					struct mlxsw_sp_lpm_tree *new_tree)
1028 {
1029 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
1030 	int err;
1031 
1032 	fib->lpm_tree = new_tree;
1033 	mlxsw_sp_lpm_tree_hold(new_tree);
1034 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
1035 	if (err)
1036 		goto err_tree_bind;
1037 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1038 	return 0;
1039 
1040 err_tree_bind:
1041 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
1042 	fib->lpm_tree = old_tree;
1043 	return err;
1044 }
1045 
1046 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
1047 					 struct mlxsw_sp_fib *fib,
1048 					 struct mlxsw_sp_lpm_tree *new_tree)
1049 {
1050 	enum mlxsw_sp_l3proto proto = fib->proto;
1051 	struct mlxsw_sp_lpm_tree *old_tree;
1052 	u8 old_id, new_id = new_tree->id;
1053 	struct mlxsw_sp_vr *vr;
1054 	int i, err;
1055 
1056 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
1057 	old_id = old_tree->id;
1058 
1059 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
1060 		vr = &mlxsw_sp->router->vrs[i];
1061 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1062 			continue;
1063 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1064 						   mlxsw_sp_vr_fib(vr, proto),
1065 						   new_tree);
1066 		if (err)
1067 			goto err_tree_replace;
1068 	}
1069 
1070 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1071 	       sizeof(new_tree->prefix_ref_count));
1072 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1073 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1074 
1075 	return 0;
1076 
1077 err_tree_replace:
1078 	for (i--; i >= 0; i--) {
1079 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1080 			continue;
1081 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1082 					     mlxsw_sp_vr_fib(vr, proto),
1083 					     old_tree);
1084 	}
1085 	return err;
1086 }
1087 
1088 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1089 {
1090 	struct mlxsw_sp_vr *vr;
1091 	u64 max_vrs;
1092 	int i;
1093 
1094 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1095 		return -EIO;
1096 
1097 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1098 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1099 					GFP_KERNEL);
1100 	if (!mlxsw_sp->router->vrs)
1101 		return -ENOMEM;
1102 
1103 	for (i = 0; i < max_vrs; i++) {
1104 		vr = &mlxsw_sp->router->vrs[i];
1105 		vr->id = i;
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1112 
1113 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1114 {
1115 	/* At this stage we're guaranteed not to have new incoming
1116 	 * FIB notifications and the work queue is free from FIBs
1117 	 * sitting on top of mlxsw netdevs. However, we can still
1118 	 * have other FIBs queued. Flush the queue before flushing
1119 	 * the device's tables. No need for locks, as we're the only
1120 	 * writer.
1121 	 */
1122 	mlxsw_core_flush_owq();
1123 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1124 	kfree(mlxsw_sp->router->vrs);
1125 }
1126 
1127 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1128 {
1129 	struct net_device *d;
1130 	u32 tb_id;
1131 
1132 	rcu_read_lock();
1133 	d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1134 	if (d)
1135 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1136 	else
1137 		tb_id = RT_TABLE_MAIN;
1138 	rcu_read_unlock();
1139 
1140 	return tb_id;
1141 }
1142 
1143 static struct mlxsw_sp_rif *
1144 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1145 		    const struct mlxsw_sp_rif_params *params,
1146 		    struct netlink_ext_ack *extack);
1147 
1148 static struct mlxsw_sp_rif_ipip_lb *
1149 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1150 				enum mlxsw_sp_ipip_type ipipt,
1151 				struct net_device *ol_dev,
1152 				struct netlink_ext_ack *extack)
1153 {
1154 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1155 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1156 	struct mlxsw_sp_rif *rif;
1157 
1158 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1159 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1160 		.common.dev = ol_dev,
1161 		.common.lag = false,
1162 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1163 	};
1164 
1165 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1166 	if (IS_ERR(rif))
1167 		return ERR_CAST(rif);
1168 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1169 }
1170 
1171 static struct mlxsw_sp_ipip_entry *
1172 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1173 			  enum mlxsw_sp_ipip_type ipipt,
1174 			  struct net_device *ol_dev)
1175 {
1176 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1177 	struct mlxsw_sp_ipip_entry *ipip_entry;
1178 	struct mlxsw_sp_ipip_entry *ret = NULL;
1179 	int err;
1180 
1181 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1182 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1183 	if (!ipip_entry)
1184 		return ERR_PTR(-ENOMEM);
1185 
1186 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1187 							    ol_dev, NULL);
1188 	if (IS_ERR(ipip_entry->ol_lb)) {
1189 		ret = ERR_CAST(ipip_entry->ol_lb);
1190 		goto err_ol_ipip_lb_create;
1191 	}
1192 
1193 	ipip_entry->ipipt = ipipt;
1194 	ipip_entry->ol_dev = ol_dev;
1195 	ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1196 
1197 	err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1198 	if (err) {
1199 		ret = ERR_PTR(err);
1200 		goto err_rem_ip_addr_set;
1201 	}
1202 
1203 	return ipip_entry;
1204 
1205 err_rem_ip_addr_set:
1206 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1207 err_ol_ipip_lb_create:
1208 	kfree(ipip_entry);
1209 	return ret;
1210 }
1211 
1212 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1213 					struct mlxsw_sp_ipip_entry *ipip_entry)
1214 {
1215 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1216 		mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1217 
1218 	ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1219 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1220 	kfree(ipip_entry);
1221 }
1222 
1223 static bool
1224 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1225 				  const enum mlxsw_sp_l3proto ul_proto,
1226 				  union mlxsw_sp_l3addr saddr,
1227 				  u32 ul_tb_id,
1228 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1229 {
1230 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1231 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1232 	union mlxsw_sp_l3addr tun_saddr;
1233 
1234 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1235 		return false;
1236 
1237 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1238 	return tun_ul_tb_id == ul_tb_id &&
1239 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1240 }
1241 
1242 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1243 						 enum mlxsw_sp_ipip_type ipipt)
1244 {
1245 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1246 
1247 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1248 
1249 	/* Not all tunnels require to increase the default pasing depth
1250 	 * (96 bytes).
1251 	 */
1252 	if (ipip_ops->inc_parsing_depth)
1253 		return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1254 
1255 	return 0;
1256 }
1257 
1258 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1259 						  enum mlxsw_sp_ipip_type ipipt)
1260 {
1261 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1262 		mlxsw_sp->router->ipip_ops_arr[ipipt];
1263 
1264 	if (ipip_ops->inc_parsing_depth)
1265 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1266 }
1267 
1268 static int
1269 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1270 			      struct mlxsw_sp_fib_entry *fib_entry,
1271 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1272 {
1273 	u32 tunnel_index;
1274 	int err;
1275 
1276 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1277 				  1, &tunnel_index);
1278 	if (err)
1279 		return err;
1280 
1281 	err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1282 						    ipip_entry->ipipt);
1283 	if (err)
1284 		goto err_parsing_depth_inc;
1285 
1286 	ipip_entry->decap_fib_entry = fib_entry;
1287 	fib_entry->decap.ipip_entry = ipip_entry;
1288 	fib_entry->decap.tunnel_index = tunnel_index;
1289 
1290 	return 0;
1291 
1292 err_parsing_depth_inc:
1293 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1294 			   fib_entry->decap.tunnel_index);
1295 	return err;
1296 }
1297 
1298 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1299 					  struct mlxsw_sp_fib_entry *fib_entry)
1300 {
1301 	enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1302 
1303 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1304 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1305 	fib_entry->decap.ipip_entry = NULL;
1306 	mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1307 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1308 			   1, fib_entry->decap.tunnel_index);
1309 }
1310 
1311 static struct mlxsw_sp_fib_node *
1312 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1313 			 size_t addr_len, unsigned char prefix_len);
1314 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1315 				     struct mlxsw_sp_fib_entry *fib_entry);
1316 
1317 static void
1318 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1319 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1320 {
1321 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1322 
1323 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1324 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1325 
1326 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1327 }
1328 
1329 static void
1330 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1331 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1332 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1333 {
1334 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1335 					  ipip_entry))
1336 		return;
1337 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1338 
1339 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1340 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1341 }
1342 
1343 static struct mlxsw_sp_fib_entry *
1344 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1345 				     enum mlxsw_sp_l3proto proto,
1346 				     const union mlxsw_sp_l3addr *addr,
1347 				     enum mlxsw_sp_fib_entry_type type)
1348 {
1349 	struct mlxsw_sp_fib_node *fib_node;
1350 	unsigned char addr_prefix_len;
1351 	struct mlxsw_sp_fib *fib;
1352 	struct mlxsw_sp_vr *vr;
1353 	const void *addrp;
1354 	size_t addr_len;
1355 	u32 addr4;
1356 
1357 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1358 	if (!vr)
1359 		return NULL;
1360 	fib = mlxsw_sp_vr_fib(vr, proto);
1361 
1362 	switch (proto) {
1363 	case MLXSW_SP_L3_PROTO_IPV4:
1364 		addr4 = be32_to_cpu(addr->addr4);
1365 		addrp = &addr4;
1366 		addr_len = 4;
1367 		addr_prefix_len = 32;
1368 		break;
1369 	case MLXSW_SP_L3_PROTO_IPV6:
1370 		addrp = &addr->addr6;
1371 		addr_len = 16;
1372 		addr_prefix_len = 128;
1373 		break;
1374 	default:
1375 		WARN_ON(1);
1376 		return NULL;
1377 	}
1378 
1379 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1380 					    addr_prefix_len);
1381 	if (!fib_node || fib_node->fib_entry->type != type)
1382 		return NULL;
1383 
1384 	return fib_node->fib_entry;
1385 }
1386 
1387 /* Given an IPIP entry, find the corresponding decap route. */
1388 static struct mlxsw_sp_fib_entry *
1389 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1390 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1391 {
1392 	static struct mlxsw_sp_fib_node *fib_node;
1393 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1394 	unsigned char saddr_prefix_len;
1395 	union mlxsw_sp_l3addr saddr;
1396 	struct mlxsw_sp_fib *ul_fib;
1397 	struct mlxsw_sp_vr *ul_vr;
1398 	const void *saddrp;
1399 	size_t saddr_len;
1400 	u32 ul_tb_id;
1401 	u32 saddr4;
1402 
1403 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1404 
1405 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1406 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1407 	if (!ul_vr)
1408 		return NULL;
1409 
1410 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1411 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1412 					   ipip_entry->ol_dev);
1413 
1414 	switch (ipip_ops->ul_proto) {
1415 	case MLXSW_SP_L3_PROTO_IPV4:
1416 		saddr4 = be32_to_cpu(saddr.addr4);
1417 		saddrp = &saddr4;
1418 		saddr_len = 4;
1419 		saddr_prefix_len = 32;
1420 		break;
1421 	case MLXSW_SP_L3_PROTO_IPV6:
1422 		saddrp = &saddr.addr6;
1423 		saddr_len = 16;
1424 		saddr_prefix_len = 128;
1425 		break;
1426 	default:
1427 		WARN_ON(1);
1428 		return NULL;
1429 	}
1430 
1431 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1432 					    saddr_prefix_len);
1433 	if (!fib_node ||
1434 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1435 		return NULL;
1436 
1437 	return fib_node->fib_entry;
1438 }
1439 
1440 static struct mlxsw_sp_ipip_entry *
1441 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1442 			   enum mlxsw_sp_ipip_type ipipt,
1443 			   struct net_device *ol_dev)
1444 {
1445 	struct mlxsw_sp_ipip_entry *ipip_entry;
1446 
1447 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1448 	if (IS_ERR(ipip_entry))
1449 		return ipip_entry;
1450 
1451 	list_add_tail(&ipip_entry->ipip_list_node,
1452 		      &mlxsw_sp->router->ipip_list);
1453 
1454 	return ipip_entry;
1455 }
1456 
1457 static void
1458 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1459 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1460 {
1461 	list_del(&ipip_entry->ipip_list_node);
1462 	mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1463 }
1464 
1465 static bool
1466 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1467 				  const struct net_device *ul_dev,
1468 				  enum mlxsw_sp_l3proto ul_proto,
1469 				  union mlxsw_sp_l3addr ul_dip,
1470 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1471 {
1472 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1473 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1474 
1475 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1476 		return false;
1477 
1478 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1479 						 ul_tb_id, ipip_entry);
1480 }
1481 
1482 /* Given decap parameters, find the corresponding IPIP entry. */
1483 static struct mlxsw_sp_ipip_entry *
1484 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1485 				  enum mlxsw_sp_l3proto ul_proto,
1486 				  union mlxsw_sp_l3addr ul_dip)
1487 {
1488 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1489 	struct net_device *ul_dev;
1490 
1491 	rcu_read_lock();
1492 
1493 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1494 	if (!ul_dev)
1495 		goto out_unlock;
1496 
1497 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1498 			    ipip_list_node)
1499 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1500 						      ul_proto, ul_dip,
1501 						      ipip_entry))
1502 			goto out_unlock;
1503 
1504 	rcu_read_unlock();
1505 
1506 	return NULL;
1507 
1508 out_unlock:
1509 	rcu_read_unlock();
1510 	return ipip_entry;
1511 }
1512 
1513 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1514 				      const struct net_device *dev,
1515 				      enum mlxsw_sp_ipip_type *p_type)
1516 {
1517 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1518 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1519 	enum mlxsw_sp_ipip_type ipipt;
1520 
1521 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1522 		ipip_ops = router->ipip_ops_arr[ipipt];
1523 		if (dev->type == ipip_ops->dev_type) {
1524 			if (p_type)
1525 				*p_type = ipipt;
1526 			return true;
1527 		}
1528 	}
1529 	return false;
1530 }
1531 
1532 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1533 				const struct net_device *dev)
1534 {
1535 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1536 }
1537 
1538 static struct mlxsw_sp_ipip_entry *
1539 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1540 				   const struct net_device *ol_dev)
1541 {
1542 	struct mlxsw_sp_ipip_entry *ipip_entry;
1543 
1544 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1545 			    ipip_list_node)
1546 		if (ipip_entry->ol_dev == ol_dev)
1547 			return ipip_entry;
1548 
1549 	return NULL;
1550 }
1551 
1552 static struct mlxsw_sp_ipip_entry *
1553 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1554 				   const struct net_device *ul_dev,
1555 				   struct mlxsw_sp_ipip_entry *start)
1556 {
1557 	struct mlxsw_sp_ipip_entry *ipip_entry;
1558 
1559 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1560 					ipip_list_node);
1561 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1562 				     ipip_list_node) {
1563 		struct net_device *ol_dev = ipip_entry->ol_dev;
1564 		struct net_device *ipip_ul_dev;
1565 
1566 		rcu_read_lock();
1567 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1568 		rcu_read_unlock();
1569 
1570 		if (ipip_ul_dev == ul_dev)
1571 			return ipip_entry;
1572 	}
1573 
1574 	return NULL;
1575 }
1576 
1577 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1578 				const struct net_device *dev)
1579 {
1580 	bool is_ipip_ul;
1581 
1582 	mutex_lock(&mlxsw_sp->router->lock);
1583 	is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1584 	mutex_unlock(&mlxsw_sp->router->lock);
1585 
1586 	return is_ipip_ul;
1587 }
1588 
1589 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1590 						const struct net_device *ol_dev,
1591 						enum mlxsw_sp_ipip_type ipipt)
1592 {
1593 	const struct mlxsw_sp_ipip_ops *ops
1594 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1595 
1596 	return ops->can_offload(mlxsw_sp, ol_dev);
1597 }
1598 
1599 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1600 						struct net_device *ol_dev)
1601 {
1602 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1603 	struct mlxsw_sp_ipip_entry *ipip_entry;
1604 	enum mlxsw_sp_l3proto ul_proto;
1605 	union mlxsw_sp_l3addr saddr;
1606 	u32 ul_tb_id;
1607 
1608 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1609 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1610 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1611 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1612 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1613 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1614 							  saddr, ul_tb_id,
1615 							  NULL)) {
1616 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1617 								ol_dev);
1618 			if (IS_ERR(ipip_entry))
1619 				return PTR_ERR(ipip_entry);
1620 		}
1621 	}
1622 
1623 	return 0;
1624 }
1625 
1626 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1627 						   struct net_device *ol_dev)
1628 {
1629 	struct mlxsw_sp_ipip_entry *ipip_entry;
1630 
1631 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1632 	if (ipip_entry)
1633 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1634 }
1635 
1636 static void
1637 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1638 				struct mlxsw_sp_ipip_entry *ipip_entry)
1639 {
1640 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1641 
1642 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1643 	if (decap_fib_entry)
1644 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1645 						  decap_fib_entry);
1646 }
1647 
1648 static int
1649 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1650 			u16 ul_rif_id, bool enable)
1651 {
1652 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1653 	enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1654 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1655 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1656 	char ritr_pl[MLXSW_REG_RITR_LEN];
1657 	struct in6_addr *saddr6;
1658 	u32 saddr4;
1659 
1660 	ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1661 	switch (lb_cf.ul_protocol) {
1662 	case MLXSW_SP_L3_PROTO_IPV4:
1663 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1664 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1665 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1666 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1667 						   ipip_options, ul_vr_id,
1668 						   ul_rif_id, saddr4,
1669 						   lb_cf.okey);
1670 		break;
1671 
1672 	case MLXSW_SP_L3_PROTO_IPV6:
1673 		saddr6 = &lb_cf.saddr.addr6;
1674 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1675 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1676 		mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1677 						   ipip_options, ul_vr_id,
1678 						   ul_rif_id, saddr6,
1679 						   lb_cf.okey);
1680 		break;
1681 	}
1682 
1683 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1684 }
1685 
1686 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1687 						 struct net_device *ol_dev)
1688 {
1689 	struct mlxsw_sp_ipip_entry *ipip_entry;
1690 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1691 	int err = 0;
1692 
1693 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1694 	if (ipip_entry) {
1695 		lb_rif = ipip_entry->ol_lb;
1696 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1697 					      lb_rif->ul_rif_id, true);
1698 		if (err)
1699 			goto out;
1700 		lb_rif->common.mtu = ol_dev->mtu;
1701 	}
1702 
1703 out:
1704 	return err;
1705 }
1706 
1707 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1708 						struct net_device *ol_dev)
1709 {
1710 	struct mlxsw_sp_ipip_entry *ipip_entry;
1711 
1712 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1713 	if (ipip_entry)
1714 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1715 }
1716 
1717 static void
1718 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1719 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1720 {
1721 	if (ipip_entry->decap_fib_entry)
1722 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1723 }
1724 
1725 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1726 						  struct net_device *ol_dev)
1727 {
1728 	struct mlxsw_sp_ipip_entry *ipip_entry;
1729 
1730 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1731 	if (ipip_entry)
1732 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1733 }
1734 
1735 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1736 					 struct mlxsw_sp_rif *old_rif,
1737 					 struct mlxsw_sp_rif *new_rif);
1738 static int
1739 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1740 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1741 				 bool keep_encap,
1742 				 struct netlink_ext_ack *extack)
1743 {
1744 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1745 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1746 
1747 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1748 						     ipip_entry->ipipt,
1749 						     ipip_entry->ol_dev,
1750 						     extack);
1751 	if (IS_ERR(new_lb_rif))
1752 		return PTR_ERR(new_lb_rif);
1753 	ipip_entry->ol_lb = new_lb_rif;
1754 
1755 	if (keep_encap)
1756 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1757 					     &new_lb_rif->common);
1758 
1759 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1760 
1761 	return 0;
1762 }
1763 
1764 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1765 					struct mlxsw_sp_rif *rif);
1766 
1767 /**
1768  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1769  * @mlxsw_sp: mlxsw_sp.
1770  * @ipip_entry: IPIP entry.
1771  * @recreate_loopback: Recreates the associated loopback RIF.
1772  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1773  *              relevant when recreate_loopback is true.
1774  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1775  *                   is only relevant when recreate_loopback is false.
1776  * @extack: extack.
1777  *
1778  * Return: Non-zero value on failure.
1779  */
1780 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1781 					struct mlxsw_sp_ipip_entry *ipip_entry,
1782 					bool recreate_loopback,
1783 					bool keep_encap,
1784 					bool update_nexthops,
1785 					struct netlink_ext_ack *extack)
1786 {
1787 	int err;
1788 
1789 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1790 	 * recreate it. That creates a window of opportunity where RALUE and
1791 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1792 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1793 	 * of RALUE, demote the decap route back.
1794 	 */
1795 	if (ipip_entry->decap_fib_entry)
1796 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1797 
1798 	if (recreate_loopback) {
1799 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1800 						       keep_encap, extack);
1801 		if (err)
1802 			return err;
1803 	} else if (update_nexthops) {
1804 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1805 					    &ipip_entry->ol_lb->common);
1806 	}
1807 
1808 	if (ipip_entry->ol_dev->flags & IFF_UP)
1809 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1810 
1811 	return 0;
1812 }
1813 
1814 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1815 						struct net_device *ol_dev,
1816 						struct netlink_ext_ack *extack)
1817 {
1818 	struct mlxsw_sp_ipip_entry *ipip_entry =
1819 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1820 
1821 	if (!ipip_entry)
1822 		return 0;
1823 
1824 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1825 						   true, false, false, extack);
1826 }
1827 
1828 static int
1829 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1830 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1831 				     struct net_device *ul_dev,
1832 				     bool *demote_this,
1833 				     struct netlink_ext_ack *extack)
1834 {
1835 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1836 	enum mlxsw_sp_l3proto ul_proto;
1837 	union mlxsw_sp_l3addr saddr;
1838 
1839 	/* Moving underlay to a different VRF might cause local address
1840 	 * conflict, and the conflicting tunnels need to be demoted.
1841 	 */
1842 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1843 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1844 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1845 						 saddr, ul_tb_id,
1846 						 ipip_entry)) {
1847 		*demote_this = true;
1848 		return 0;
1849 	}
1850 
1851 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1852 						   true, true, false, extack);
1853 }
1854 
1855 static int
1856 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1857 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1858 				    struct net_device *ul_dev)
1859 {
1860 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1861 						   false, false, true, NULL);
1862 }
1863 
1864 static int
1865 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1866 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1867 				      struct net_device *ul_dev)
1868 {
1869 	/* A down underlay device causes encapsulated packets to not be
1870 	 * forwarded, but decap still works. So refresh next hops without
1871 	 * touching anything else.
1872 	 */
1873 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1874 						   false, false, true, NULL);
1875 }
1876 
1877 static int
1878 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1879 					struct net_device *ol_dev,
1880 					struct netlink_ext_ack *extack)
1881 {
1882 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1883 	struct mlxsw_sp_ipip_entry *ipip_entry;
1884 	int err;
1885 
1886 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1887 	if (!ipip_entry)
1888 		/* A change might make a tunnel eligible for offloading, but
1889 		 * that is currently not implemented. What falls to slow path
1890 		 * stays there.
1891 		 */
1892 		return 0;
1893 
1894 	/* A change might make a tunnel not eligible for offloading. */
1895 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1896 						 ipip_entry->ipipt)) {
1897 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1898 		return 0;
1899 	}
1900 
1901 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1902 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1903 	return err;
1904 }
1905 
1906 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1907 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1908 {
1909 	struct net_device *ol_dev = ipip_entry->ol_dev;
1910 
1911 	if (ol_dev->flags & IFF_UP)
1912 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1913 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1914 }
1915 
1916 /* The configuration where several tunnels have the same local address in the
1917  * same underlay table needs special treatment in the HW. That is currently not
1918  * implemented in the driver. This function finds and demotes the first tunnel
1919  * with a given source address, except the one passed in in the argument
1920  * `except'.
1921  */
1922 bool
1923 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1924 				     enum mlxsw_sp_l3proto ul_proto,
1925 				     union mlxsw_sp_l3addr saddr,
1926 				     u32 ul_tb_id,
1927 				     const struct mlxsw_sp_ipip_entry *except)
1928 {
1929 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1930 
1931 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1932 				 ipip_list_node) {
1933 		if (ipip_entry != except &&
1934 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1935 						      ul_tb_id, ipip_entry)) {
1936 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1937 			return true;
1938 		}
1939 	}
1940 
1941 	return false;
1942 }
1943 
1944 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1945 						     struct net_device *ul_dev)
1946 {
1947 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1948 
1949 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1950 				 ipip_list_node) {
1951 		struct net_device *ol_dev = ipip_entry->ol_dev;
1952 		struct net_device *ipip_ul_dev;
1953 
1954 		rcu_read_lock();
1955 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1956 		rcu_read_unlock();
1957 		if (ipip_ul_dev == ul_dev)
1958 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1959 	}
1960 }
1961 
1962 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1963 				     struct net_device *ol_dev,
1964 				     unsigned long event,
1965 				     struct netdev_notifier_info *info)
1966 {
1967 	struct netdev_notifier_changeupper_info *chup;
1968 	struct netlink_ext_ack *extack;
1969 	int err = 0;
1970 
1971 	mutex_lock(&mlxsw_sp->router->lock);
1972 	switch (event) {
1973 	case NETDEV_REGISTER:
1974 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1975 		break;
1976 	case NETDEV_UNREGISTER:
1977 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1978 		break;
1979 	case NETDEV_UP:
1980 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1981 		break;
1982 	case NETDEV_DOWN:
1983 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1984 		break;
1985 	case NETDEV_CHANGEUPPER:
1986 		chup = container_of(info, typeof(*chup), info);
1987 		extack = info->extack;
1988 		if (netif_is_l3_master(chup->upper_dev))
1989 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1990 								   ol_dev,
1991 								   extack);
1992 		break;
1993 	case NETDEV_CHANGE:
1994 		extack = info->extack;
1995 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1996 							      ol_dev, extack);
1997 		break;
1998 	case NETDEV_CHANGEMTU:
1999 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2000 		break;
2001 	}
2002 	mutex_unlock(&mlxsw_sp->router->lock);
2003 	return err;
2004 }
2005 
2006 static int
2007 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2008 				   struct mlxsw_sp_ipip_entry *ipip_entry,
2009 				   struct net_device *ul_dev,
2010 				   bool *demote_this,
2011 				   unsigned long event,
2012 				   struct netdev_notifier_info *info)
2013 {
2014 	struct netdev_notifier_changeupper_info *chup;
2015 	struct netlink_ext_ack *extack;
2016 
2017 	switch (event) {
2018 	case NETDEV_CHANGEUPPER:
2019 		chup = container_of(info, typeof(*chup), info);
2020 		extack = info->extack;
2021 		if (netif_is_l3_master(chup->upper_dev))
2022 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2023 								    ipip_entry,
2024 								    ul_dev,
2025 								    demote_this,
2026 								    extack);
2027 		break;
2028 
2029 	case NETDEV_UP:
2030 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2031 							   ul_dev);
2032 	case NETDEV_DOWN:
2033 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2034 							     ipip_entry,
2035 							     ul_dev);
2036 	}
2037 	return 0;
2038 }
2039 
2040 int
2041 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2042 				 struct net_device *ul_dev,
2043 				 unsigned long event,
2044 				 struct netdev_notifier_info *info)
2045 {
2046 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2047 	int err = 0;
2048 
2049 	mutex_lock(&mlxsw_sp->router->lock);
2050 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2051 								ul_dev,
2052 								ipip_entry))) {
2053 		struct mlxsw_sp_ipip_entry *prev;
2054 		bool demote_this = false;
2055 
2056 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2057 							 ul_dev, &demote_this,
2058 							 event, info);
2059 		if (err) {
2060 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2061 								 ul_dev);
2062 			break;
2063 		}
2064 
2065 		if (demote_this) {
2066 			if (list_is_first(&ipip_entry->ipip_list_node,
2067 					  &mlxsw_sp->router->ipip_list))
2068 				prev = NULL;
2069 			else
2070 				/* This can't be cached from previous iteration,
2071 				 * because that entry could be gone now.
2072 				 */
2073 				prev = list_prev_entry(ipip_entry,
2074 						       ipip_list_node);
2075 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2076 			ipip_entry = prev;
2077 		}
2078 	}
2079 	mutex_unlock(&mlxsw_sp->router->lock);
2080 
2081 	return err;
2082 }
2083 
2084 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2085 				      enum mlxsw_sp_l3proto ul_proto,
2086 				      const union mlxsw_sp_l3addr *ul_sip,
2087 				      u32 tunnel_index)
2088 {
2089 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2090 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2091 	struct mlxsw_sp_fib_entry *fib_entry;
2092 	int err = 0;
2093 
2094 	mutex_lock(&mlxsw_sp->router->lock);
2095 
2096 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2097 		err = -EINVAL;
2098 		goto out;
2099 	}
2100 
2101 	router->nve_decap_config.ul_tb_id = ul_tb_id;
2102 	router->nve_decap_config.tunnel_index = tunnel_index;
2103 	router->nve_decap_config.ul_proto = ul_proto;
2104 	router->nve_decap_config.ul_sip = *ul_sip;
2105 	router->nve_decap_config.valid = true;
2106 
2107 	/* It is valid to create a tunnel with a local IP and only later
2108 	 * assign this IP address to a local interface
2109 	 */
2110 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2111 							 ul_proto, ul_sip,
2112 							 type);
2113 	if (!fib_entry)
2114 		goto out;
2115 
2116 	fib_entry->decap.tunnel_index = tunnel_index;
2117 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2118 
2119 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2120 	if (err)
2121 		goto err_fib_entry_update;
2122 
2123 	goto out;
2124 
2125 err_fib_entry_update:
2126 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2127 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2128 out:
2129 	mutex_unlock(&mlxsw_sp->router->lock);
2130 	return err;
2131 }
2132 
2133 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2134 				      enum mlxsw_sp_l3proto ul_proto,
2135 				      const union mlxsw_sp_l3addr *ul_sip)
2136 {
2137 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2138 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2139 	struct mlxsw_sp_fib_entry *fib_entry;
2140 
2141 	mutex_lock(&mlxsw_sp->router->lock);
2142 
2143 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2144 		goto out;
2145 
2146 	router->nve_decap_config.valid = false;
2147 
2148 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2149 							 ul_proto, ul_sip,
2150 							 type);
2151 	if (!fib_entry)
2152 		goto out;
2153 
2154 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2155 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2156 out:
2157 	mutex_unlock(&mlxsw_sp->router->lock);
2158 }
2159 
2160 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2161 					 u32 ul_tb_id,
2162 					 enum mlxsw_sp_l3proto ul_proto,
2163 					 const union mlxsw_sp_l3addr *ul_sip)
2164 {
2165 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2166 
2167 	return router->nve_decap_config.valid &&
2168 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2169 	       router->nve_decap_config.ul_proto == ul_proto &&
2170 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2171 		       sizeof(*ul_sip));
2172 }
2173 
2174 struct mlxsw_sp_neigh_key {
2175 	struct neighbour *n;
2176 };
2177 
2178 struct mlxsw_sp_neigh_entry {
2179 	struct list_head rif_list_node;
2180 	struct rhash_head ht_node;
2181 	struct mlxsw_sp_neigh_key key;
2182 	u16 rif;
2183 	bool connected;
2184 	unsigned char ha[ETH_ALEN];
2185 	struct list_head nexthop_list; /* list of nexthops using
2186 					* this neigh entry
2187 					*/
2188 	struct list_head nexthop_neighs_list_node;
2189 	unsigned int counter_index;
2190 	bool counter_valid;
2191 };
2192 
2193 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2194 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2195 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2196 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2197 };
2198 
2199 struct mlxsw_sp_neigh_entry *
2200 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2201 			struct mlxsw_sp_neigh_entry *neigh_entry)
2202 {
2203 	if (!neigh_entry) {
2204 		if (list_empty(&rif->neigh_list))
2205 			return NULL;
2206 		else
2207 			return list_first_entry(&rif->neigh_list,
2208 						typeof(*neigh_entry),
2209 						rif_list_node);
2210 	}
2211 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2212 		return NULL;
2213 	return list_next_entry(neigh_entry, rif_list_node);
2214 }
2215 
2216 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2217 {
2218 	return neigh_entry->key.n->tbl->family;
2219 }
2220 
2221 unsigned char *
2222 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2223 {
2224 	return neigh_entry->ha;
2225 }
2226 
2227 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2228 {
2229 	struct neighbour *n;
2230 
2231 	n = neigh_entry->key.n;
2232 	return ntohl(*((__be32 *) n->primary_key));
2233 }
2234 
2235 struct in6_addr *
2236 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2237 {
2238 	struct neighbour *n;
2239 
2240 	n = neigh_entry->key.n;
2241 	return (struct in6_addr *) &n->primary_key;
2242 }
2243 
2244 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2245 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2246 			       u64 *p_counter)
2247 {
2248 	if (!neigh_entry->counter_valid)
2249 		return -EINVAL;
2250 
2251 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2252 					 p_counter, NULL);
2253 }
2254 
2255 static struct mlxsw_sp_neigh_entry *
2256 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2257 			   u16 rif)
2258 {
2259 	struct mlxsw_sp_neigh_entry *neigh_entry;
2260 
2261 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2262 	if (!neigh_entry)
2263 		return NULL;
2264 
2265 	neigh_entry->key.n = n;
2266 	neigh_entry->rif = rif;
2267 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2268 
2269 	return neigh_entry;
2270 }
2271 
2272 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2273 {
2274 	kfree(neigh_entry);
2275 }
2276 
2277 static int
2278 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2279 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2280 {
2281 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2282 				      &neigh_entry->ht_node,
2283 				      mlxsw_sp_neigh_ht_params);
2284 }
2285 
2286 static void
2287 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2288 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2289 {
2290 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2291 			       &neigh_entry->ht_node,
2292 			       mlxsw_sp_neigh_ht_params);
2293 }
2294 
2295 static bool
2296 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2297 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2298 {
2299 	struct devlink *devlink;
2300 	const char *table_name;
2301 
2302 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2303 	case AF_INET:
2304 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2305 		break;
2306 	case AF_INET6:
2307 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2308 		break;
2309 	default:
2310 		WARN_ON(1);
2311 		return false;
2312 	}
2313 
2314 	devlink = priv_to_devlink(mlxsw_sp->core);
2315 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2316 }
2317 
2318 static void
2319 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2320 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2321 {
2322 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2323 		return;
2324 
2325 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2326 		return;
2327 
2328 	neigh_entry->counter_valid = true;
2329 }
2330 
2331 static void
2332 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2333 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2334 {
2335 	if (!neigh_entry->counter_valid)
2336 		return;
2337 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2338 				   neigh_entry->counter_index);
2339 	neigh_entry->counter_valid = false;
2340 }
2341 
2342 static struct mlxsw_sp_neigh_entry *
2343 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2344 {
2345 	struct mlxsw_sp_neigh_entry *neigh_entry;
2346 	struct mlxsw_sp_rif *rif;
2347 	int err;
2348 
2349 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2350 	if (!rif)
2351 		return ERR_PTR(-EINVAL);
2352 
2353 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2354 	if (!neigh_entry)
2355 		return ERR_PTR(-ENOMEM);
2356 
2357 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2358 	if (err)
2359 		goto err_neigh_entry_insert;
2360 
2361 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2362 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2363 
2364 	return neigh_entry;
2365 
2366 err_neigh_entry_insert:
2367 	mlxsw_sp_neigh_entry_free(neigh_entry);
2368 	return ERR_PTR(err);
2369 }
2370 
2371 static void
2372 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2373 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2374 {
2375 	list_del(&neigh_entry->rif_list_node);
2376 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2377 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2378 	mlxsw_sp_neigh_entry_free(neigh_entry);
2379 }
2380 
2381 static struct mlxsw_sp_neigh_entry *
2382 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2383 {
2384 	struct mlxsw_sp_neigh_key key;
2385 
2386 	key.n = n;
2387 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2388 				      &key, mlxsw_sp_neigh_ht_params);
2389 }
2390 
2391 static void
2392 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2393 {
2394 	unsigned long interval;
2395 
2396 #if IS_ENABLED(CONFIG_IPV6)
2397 	interval = min_t(unsigned long,
2398 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2399 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2400 #else
2401 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2402 #endif
2403 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2404 }
2405 
2406 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2407 						   char *rauhtd_pl,
2408 						   int ent_index)
2409 {
2410 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2411 	struct net_device *dev;
2412 	struct neighbour *n;
2413 	__be32 dipn;
2414 	u32 dip;
2415 	u16 rif;
2416 
2417 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2418 
2419 	if (WARN_ON_ONCE(rif >= max_rifs))
2420 		return;
2421 	if (!mlxsw_sp->router->rifs[rif]) {
2422 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2423 		return;
2424 	}
2425 
2426 	dipn = htonl(dip);
2427 	dev = mlxsw_sp->router->rifs[rif]->dev;
2428 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2429 	if (!n)
2430 		return;
2431 
2432 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2433 	neigh_event_send(n, NULL);
2434 	neigh_release(n);
2435 }
2436 
2437 #if IS_ENABLED(CONFIG_IPV6)
2438 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2439 						   char *rauhtd_pl,
2440 						   int rec_index)
2441 {
2442 	struct net_device *dev;
2443 	struct neighbour *n;
2444 	struct in6_addr dip;
2445 	u16 rif;
2446 
2447 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2448 					 (char *) &dip);
2449 
2450 	if (!mlxsw_sp->router->rifs[rif]) {
2451 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2452 		return;
2453 	}
2454 
2455 	dev = mlxsw_sp->router->rifs[rif]->dev;
2456 	n = neigh_lookup(&nd_tbl, &dip, dev);
2457 	if (!n)
2458 		return;
2459 
2460 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2461 	neigh_event_send(n, NULL);
2462 	neigh_release(n);
2463 }
2464 #else
2465 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2466 						   char *rauhtd_pl,
2467 						   int rec_index)
2468 {
2469 }
2470 #endif
2471 
2472 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2473 						   char *rauhtd_pl,
2474 						   int rec_index)
2475 {
2476 	u8 num_entries;
2477 	int i;
2478 
2479 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2480 								rec_index);
2481 	/* Hardware starts counting at 0, so add 1. */
2482 	num_entries++;
2483 
2484 	/* Each record consists of several neighbour entries. */
2485 	for (i = 0; i < num_entries; i++) {
2486 		int ent_index;
2487 
2488 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2489 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2490 						       ent_index);
2491 	}
2492 
2493 }
2494 
2495 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2496 						   char *rauhtd_pl,
2497 						   int rec_index)
2498 {
2499 	/* One record contains one entry. */
2500 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2501 					       rec_index);
2502 }
2503 
2504 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2505 					      char *rauhtd_pl, int rec_index)
2506 {
2507 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2508 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2509 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2510 						       rec_index);
2511 		break;
2512 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2513 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2514 						       rec_index);
2515 		break;
2516 	}
2517 }
2518 
2519 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2520 {
2521 	u8 num_rec, last_rec_index, num_entries;
2522 
2523 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2524 	last_rec_index = num_rec - 1;
2525 
2526 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2527 		return false;
2528 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2529 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2530 		return true;
2531 
2532 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2533 								last_rec_index);
2534 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2535 		return true;
2536 	return false;
2537 }
2538 
2539 static int
2540 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2541 				       char *rauhtd_pl,
2542 				       enum mlxsw_reg_rauhtd_type type)
2543 {
2544 	int i, num_rec;
2545 	int err;
2546 
2547 	/* Ensure the RIF we read from the device does not change mid-dump. */
2548 	mutex_lock(&mlxsw_sp->router->lock);
2549 	do {
2550 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2551 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2552 				      rauhtd_pl);
2553 		if (err) {
2554 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2555 			break;
2556 		}
2557 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2558 		for (i = 0; i < num_rec; i++)
2559 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2560 							  i);
2561 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2562 	mutex_unlock(&mlxsw_sp->router->lock);
2563 
2564 	return err;
2565 }
2566 
2567 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2568 {
2569 	enum mlxsw_reg_rauhtd_type type;
2570 	char *rauhtd_pl;
2571 	int err;
2572 
2573 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2574 	if (!rauhtd_pl)
2575 		return -ENOMEM;
2576 
2577 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2578 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2579 	if (err)
2580 		goto out;
2581 
2582 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2583 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2584 out:
2585 	kfree(rauhtd_pl);
2586 	return err;
2587 }
2588 
2589 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2590 {
2591 	struct mlxsw_sp_neigh_entry *neigh_entry;
2592 
2593 	mutex_lock(&mlxsw_sp->router->lock);
2594 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2595 			    nexthop_neighs_list_node)
2596 		/* If this neigh have nexthops, make the kernel think this neigh
2597 		 * is active regardless of the traffic.
2598 		 */
2599 		neigh_event_send(neigh_entry->key.n, NULL);
2600 	mutex_unlock(&mlxsw_sp->router->lock);
2601 }
2602 
2603 static void
2604 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2605 {
2606 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2607 
2608 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2609 			       msecs_to_jiffies(interval));
2610 }
2611 
2612 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2613 {
2614 	struct mlxsw_sp_router *router;
2615 	int err;
2616 
2617 	router = container_of(work, struct mlxsw_sp_router,
2618 			      neighs_update.dw.work);
2619 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2620 	if (err)
2621 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2622 
2623 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2624 
2625 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2626 }
2627 
2628 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2629 {
2630 	struct mlxsw_sp_neigh_entry *neigh_entry;
2631 	struct mlxsw_sp_router *router;
2632 
2633 	router = container_of(work, struct mlxsw_sp_router,
2634 			      nexthop_probe_dw.work);
2635 	/* Iterate over nexthop neighbours, find those who are unresolved and
2636 	 * send arp on them. This solves the chicken-egg problem when
2637 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2638 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2639 	 * using different nexthop.
2640 	 */
2641 	mutex_lock(&router->lock);
2642 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2643 			    nexthop_neighs_list_node)
2644 		if (!neigh_entry->connected)
2645 			neigh_event_send(neigh_entry->key.n, NULL);
2646 	mutex_unlock(&router->lock);
2647 
2648 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2649 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2650 }
2651 
2652 static void
2653 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2654 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2655 			      bool removing, bool dead);
2656 
2657 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2658 {
2659 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2660 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2661 }
2662 
2663 static int
2664 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2665 				struct mlxsw_sp_neigh_entry *neigh_entry,
2666 				enum mlxsw_reg_rauht_op op)
2667 {
2668 	struct neighbour *n = neigh_entry->key.n;
2669 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2670 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2671 
2672 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2673 			      dip);
2674 	if (neigh_entry->counter_valid)
2675 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2676 					     neigh_entry->counter_index);
2677 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2678 }
2679 
2680 static int
2681 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2682 				struct mlxsw_sp_neigh_entry *neigh_entry,
2683 				enum mlxsw_reg_rauht_op op)
2684 {
2685 	struct neighbour *n = neigh_entry->key.n;
2686 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2687 	const char *dip = n->primary_key;
2688 
2689 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2690 			      dip);
2691 	if (neigh_entry->counter_valid)
2692 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2693 					     neigh_entry->counter_index);
2694 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2695 }
2696 
2697 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2698 {
2699 	struct neighbour *n = neigh_entry->key.n;
2700 
2701 	/* Packets with a link-local destination address are trapped
2702 	 * after LPM lookup and never reach the neighbour table, so
2703 	 * there is no need to program such neighbours to the device.
2704 	 */
2705 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2706 	    IPV6_ADDR_LINKLOCAL)
2707 		return true;
2708 	return false;
2709 }
2710 
2711 static void
2712 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2713 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2714 			    bool adding)
2715 {
2716 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2717 	int err;
2718 
2719 	if (!adding && !neigh_entry->connected)
2720 		return;
2721 	neigh_entry->connected = adding;
2722 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2723 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2724 						      op);
2725 		if (err)
2726 			return;
2727 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2728 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2729 			return;
2730 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2731 						      op);
2732 		if (err)
2733 			return;
2734 	} else {
2735 		WARN_ON_ONCE(1);
2736 		return;
2737 	}
2738 
2739 	if (adding)
2740 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2741 	else
2742 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2743 }
2744 
2745 void
2746 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2747 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2748 				    bool adding)
2749 {
2750 	if (adding)
2751 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2752 	else
2753 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2754 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2755 }
2756 
2757 struct mlxsw_sp_netevent_work {
2758 	struct work_struct work;
2759 	struct mlxsw_sp *mlxsw_sp;
2760 	struct neighbour *n;
2761 };
2762 
2763 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2764 {
2765 	struct mlxsw_sp_netevent_work *net_work =
2766 		container_of(work, struct mlxsw_sp_netevent_work, work);
2767 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2768 	struct mlxsw_sp_neigh_entry *neigh_entry;
2769 	struct neighbour *n = net_work->n;
2770 	unsigned char ha[ETH_ALEN];
2771 	bool entry_connected;
2772 	u8 nud_state, dead;
2773 
2774 	/* If these parameters are changed after we release the lock,
2775 	 * then we are guaranteed to receive another event letting us
2776 	 * know about it.
2777 	 */
2778 	read_lock_bh(&n->lock);
2779 	memcpy(ha, n->ha, ETH_ALEN);
2780 	nud_state = n->nud_state;
2781 	dead = n->dead;
2782 	read_unlock_bh(&n->lock);
2783 
2784 	mutex_lock(&mlxsw_sp->router->lock);
2785 	mlxsw_sp_span_respin(mlxsw_sp);
2786 
2787 	entry_connected = nud_state & NUD_VALID && !dead;
2788 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2789 	if (!entry_connected && !neigh_entry)
2790 		goto out;
2791 	if (!neigh_entry) {
2792 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2793 		if (IS_ERR(neigh_entry))
2794 			goto out;
2795 	}
2796 
2797 	if (neigh_entry->connected && entry_connected &&
2798 	    !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2799 		goto out;
2800 
2801 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2802 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2803 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2804 				      dead);
2805 
2806 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2807 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2808 
2809 out:
2810 	mutex_unlock(&mlxsw_sp->router->lock);
2811 	neigh_release(n);
2812 	kfree(net_work);
2813 }
2814 
2815 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2816 
2817 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2818 {
2819 	struct mlxsw_sp_netevent_work *net_work =
2820 		container_of(work, struct mlxsw_sp_netevent_work, work);
2821 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2822 
2823 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2824 	kfree(net_work);
2825 }
2826 
2827 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2828 
2829 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2830 {
2831 	struct mlxsw_sp_netevent_work *net_work =
2832 		container_of(work, struct mlxsw_sp_netevent_work, work);
2833 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2834 
2835 	__mlxsw_sp_router_init(mlxsw_sp);
2836 	kfree(net_work);
2837 }
2838 
2839 static int mlxsw_sp_router_schedule_work(struct net *net,
2840 					 struct notifier_block *nb,
2841 					 void (*cb)(struct work_struct *))
2842 {
2843 	struct mlxsw_sp_netevent_work *net_work;
2844 	struct mlxsw_sp_router *router;
2845 
2846 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2847 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2848 		return NOTIFY_DONE;
2849 
2850 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2851 	if (!net_work)
2852 		return NOTIFY_BAD;
2853 
2854 	INIT_WORK(&net_work->work, cb);
2855 	net_work->mlxsw_sp = router->mlxsw_sp;
2856 	mlxsw_core_schedule_work(&net_work->work);
2857 	return NOTIFY_DONE;
2858 }
2859 
2860 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2861 					  unsigned long event, void *ptr)
2862 {
2863 	struct mlxsw_sp_netevent_work *net_work;
2864 	struct mlxsw_sp_port *mlxsw_sp_port;
2865 	struct mlxsw_sp *mlxsw_sp;
2866 	unsigned long interval;
2867 	struct neigh_parms *p;
2868 	struct neighbour *n;
2869 
2870 	switch (event) {
2871 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2872 		p = ptr;
2873 
2874 		/* We don't care about changes in the default table. */
2875 		if (!p->dev || (p->tbl->family != AF_INET &&
2876 				p->tbl->family != AF_INET6))
2877 			return NOTIFY_DONE;
2878 
2879 		/* We are in atomic context and can't take RTNL mutex,
2880 		 * so use RCU variant to walk the device chain.
2881 		 */
2882 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2883 		if (!mlxsw_sp_port)
2884 			return NOTIFY_DONE;
2885 
2886 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2887 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2888 		mlxsw_sp->router->neighs_update.interval = interval;
2889 
2890 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2891 		break;
2892 	case NETEVENT_NEIGH_UPDATE:
2893 		n = ptr;
2894 
2895 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2896 			return NOTIFY_DONE;
2897 
2898 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2899 		if (!mlxsw_sp_port)
2900 			return NOTIFY_DONE;
2901 
2902 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2903 		if (!net_work) {
2904 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2905 			return NOTIFY_BAD;
2906 		}
2907 
2908 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2909 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2910 		net_work->n = n;
2911 
2912 		/* Take a reference to ensure the neighbour won't be
2913 		 * destructed until we drop the reference in delayed
2914 		 * work.
2915 		 */
2916 		neigh_clone(n);
2917 		mlxsw_core_schedule_work(&net_work->work);
2918 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2919 		break;
2920 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2921 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2922 		return mlxsw_sp_router_schedule_work(ptr, nb,
2923 				mlxsw_sp_router_mp_hash_event_work);
2924 
2925 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2926 		return mlxsw_sp_router_schedule_work(ptr, nb,
2927 				mlxsw_sp_router_update_priority_work);
2928 	}
2929 
2930 	return NOTIFY_DONE;
2931 }
2932 
2933 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2934 {
2935 	int err;
2936 
2937 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2938 			      &mlxsw_sp_neigh_ht_params);
2939 	if (err)
2940 		return err;
2941 
2942 	/* Initialize the polling interval according to the default
2943 	 * table.
2944 	 */
2945 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2946 
2947 	/* Create the delayed works for the activity_update */
2948 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2949 			  mlxsw_sp_router_neighs_update_work);
2950 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2951 			  mlxsw_sp_router_probe_unresolved_nexthops);
2952 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2953 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2954 	return 0;
2955 }
2956 
2957 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2958 {
2959 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2960 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2961 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2962 }
2963 
2964 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2965 					 struct mlxsw_sp_rif *rif)
2966 {
2967 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2968 
2969 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2970 				 rif_list_node) {
2971 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2972 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2973 	}
2974 }
2975 
2976 enum mlxsw_sp_nexthop_type {
2977 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2978 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2979 };
2980 
2981 enum mlxsw_sp_nexthop_action {
2982 	/* Nexthop forwards packets to an egress RIF */
2983 	MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2984 	/* Nexthop discards packets */
2985 	MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2986 	/* Nexthop traps packets */
2987 	MLXSW_SP_NEXTHOP_ACTION_TRAP,
2988 };
2989 
2990 struct mlxsw_sp_nexthop_key {
2991 	struct fib_nh *fib_nh;
2992 };
2993 
2994 struct mlxsw_sp_nexthop {
2995 	struct list_head neigh_list_node; /* member of neigh entry list */
2996 	struct list_head rif_list_node;
2997 	struct list_head router_list_node;
2998 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2999 						   * this nexthop belongs to
3000 						   */
3001 	struct rhash_head ht_node;
3002 	struct neigh_table *neigh_tbl;
3003 	struct mlxsw_sp_nexthop_key key;
3004 	unsigned char gw_addr[sizeof(struct in6_addr)];
3005 	int ifindex;
3006 	int nh_weight;
3007 	int norm_nh_weight;
3008 	int num_adj_entries;
3009 	struct mlxsw_sp_rif *rif;
3010 	u8 should_offload:1, /* set indicates this nexthop should be written
3011 			      * to the adjacency table.
3012 			      */
3013 	   offloaded:1, /* set indicates this nexthop was written to the
3014 			 * adjacency table.
3015 			 */
3016 	   update:1; /* set indicates this nexthop should be updated in the
3017 		      * adjacency table (f.e., its MAC changed).
3018 		      */
3019 	enum mlxsw_sp_nexthop_action action;
3020 	enum mlxsw_sp_nexthop_type type;
3021 	union {
3022 		struct mlxsw_sp_neigh_entry *neigh_entry;
3023 		struct mlxsw_sp_ipip_entry *ipip_entry;
3024 	};
3025 	unsigned int counter_index;
3026 	bool counter_valid;
3027 };
3028 
3029 enum mlxsw_sp_nexthop_group_type {
3030 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3031 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3032 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3033 };
3034 
3035 struct mlxsw_sp_nexthop_group_info {
3036 	struct mlxsw_sp_nexthop_group *nh_grp;
3037 	u32 adj_index;
3038 	u16 ecmp_size;
3039 	u16 count;
3040 	int sum_norm_weight;
3041 	u8 adj_index_valid:1,
3042 	   gateway:1, /* routes using the group use a gateway */
3043 	   is_resilient:1;
3044 	struct list_head list; /* member in nh_res_grp_list */
3045 	struct mlxsw_sp_nexthop nexthops[0];
3046 #define nh_rif	nexthops[0].rif
3047 };
3048 
3049 struct mlxsw_sp_nexthop_group_vr_key {
3050 	u16 vr_id;
3051 	enum mlxsw_sp_l3proto proto;
3052 };
3053 
3054 struct mlxsw_sp_nexthop_group_vr_entry {
3055 	struct list_head list; /* member in vr_list */
3056 	struct rhash_head ht_node; /* member in vr_ht */
3057 	refcount_t ref_count;
3058 	struct mlxsw_sp_nexthop_group_vr_key key;
3059 };
3060 
3061 struct mlxsw_sp_nexthop_group {
3062 	struct rhash_head ht_node;
3063 	struct list_head fib_list; /* list of fib entries that use this group */
3064 	union {
3065 		struct {
3066 			struct fib_info *fi;
3067 		} ipv4;
3068 		struct {
3069 			u32 id;
3070 		} obj;
3071 	};
3072 	struct mlxsw_sp_nexthop_group_info *nhgi;
3073 	struct list_head vr_list;
3074 	struct rhashtable vr_ht;
3075 	enum mlxsw_sp_nexthop_group_type type;
3076 	bool can_destroy;
3077 };
3078 
3079 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3080 				    struct mlxsw_sp_nexthop *nh)
3081 {
3082 	struct devlink *devlink;
3083 
3084 	devlink = priv_to_devlink(mlxsw_sp->core);
3085 	if (!devlink_dpipe_table_counter_enabled(devlink,
3086 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3087 		return;
3088 
3089 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3090 		return;
3091 
3092 	nh->counter_valid = true;
3093 }
3094 
3095 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3096 				   struct mlxsw_sp_nexthop *nh)
3097 {
3098 	if (!nh->counter_valid)
3099 		return;
3100 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3101 	nh->counter_valid = false;
3102 }
3103 
3104 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3105 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3106 {
3107 	if (!nh->counter_valid)
3108 		return -EINVAL;
3109 
3110 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3111 					 p_counter, NULL);
3112 }
3113 
3114 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3115 					       struct mlxsw_sp_nexthop *nh)
3116 {
3117 	if (!nh) {
3118 		if (list_empty(&router->nexthop_list))
3119 			return NULL;
3120 		else
3121 			return list_first_entry(&router->nexthop_list,
3122 						typeof(*nh), router_list_node);
3123 	}
3124 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3125 		return NULL;
3126 	return list_next_entry(nh, router_list_node);
3127 }
3128 
3129 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3130 {
3131 	return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3132 }
3133 
3134 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3135 {
3136 	if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3137 	    !mlxsw_sp_nexthop_is_forward(nh))
3138 		return NULL;
3139 	return nh->neigh_entry->ha;
3140 }
3141 
3142 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3143 			     u32 *p_adj_size, u32 *p_adj_hash_index)
3144 {
3145 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3146 	u32 adj_hash_index = 0;
3147 	int i;
3148 
3149 	if (!nh->offloaded || !nhgi->adj_index_valid)
3150 		return -EINVAL;
3151 
3152 	*p_adj_index = nhgi->adj_index;
3153 	*p_adj_size = nhgi->ecmp_size;
3154 
3155 	for (i = 0; i < nhgi->count; i++) {
3156 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3157 
3158 		if (nh_iter == nh)
3159 			break;
3160 		if (nh_iter->offloaded)
3161 			adj_hash_index += nh_iter->num_adj_entries;
3162 	}
3163 
3164 	*p_adj_hash_index = adj_hash_index;
3165 	return 0;
3166 }
3167 
3168 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3169 {
3170 	return nh->rif;
3171 }
3172 
3173 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3174 {
3175 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3176 	int i;
3177 
3178 	for (i = 0; i < nhgi->count; i++) {
3179 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3180 
3181 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3182 			return true;
3183 	}
3184 	return false;
3185 }
3186 
3187 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3188 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3189 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3190 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3191 	.automatic_shrinking = true,
3192 };
3193 
3194 static struct mlxsw_sp_nexthop_group_vr_entry *
3195 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3196 				       const struct mlxsw_sp_fib *fib)
3197 {
3198 	struct mlxsw_sp_nexthop_group_vr_key key;
3199 
3200 	memset(&key, 0, sizeof(key));
3201 	key.vr_id = fib->vr->id;
3202 	key.proto = fib->proto;
3203 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3204 				      mlxsw_sp_nexthop_group_vr_ht_params);
3205 }
3206 
3207 static int
3208 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3209 				       const struct mlxsw_sp_fib *fib)
3210 {
3211 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3212 	int err;
3213 
3214 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3215 	if (!vr_entry)
3216 		return -ENOMEM;
3217 
3218 	vr_entry->key.vr_id = fib->vr->id;
3219 	vr_entry->key.proto = fib->proto;
3220 	refcount_set(&vr_entry->ref_count, 1);
3221 
3222 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3223 				     mlxsw_sp_nexthop_group_vr_ht_params);
3224 	if (err)
3225 		goto err_hashtable_insert;
3226 
3227 	list_add(&vr_entry->list, &nh_grp->vr_list);
3228 
3229 	return 0;
3230 
3231 err_hashtable_insert:
3232 	kfree(vr_entry);
3233 	return err;
3234 }
3235 
3236 static void
3237 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3238 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3239 {
3240 	list_del(&vr_entry->list);
3241 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3242 			       mlxsw_sp_nexthop_group_vr_ht_params);
3243 	kfree(vr_entry);
3244 }
3245 
3246 static int
3247 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3248 			       const struct mlxsw_sp_fib *fib)
3249 {
3250 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3251 
3252 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3253 	if (vr_entry) {
3254 		refcount_inc(&vr_entry->ref_count);
3255 		return 0;
3256 	}
3257 
3258 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3259 }
3260 
3261 static void
3262 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3263 				 const struct mlxsw_sp_fib *fib)
3264 {
3265 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3266 
3267 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3268 	if (WARN_ON_ONCE(!vr_entry))
3269 		return;
3270 
3271 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3272 		return;
3273 
3274 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3275 }
3276 
3277 struct mlxsw_sp_nexthop_group_cmp_arg {
3278 	enum mlxsw_sp_nexthop_group_type type;
3279 	union {
3280 		struct fib_info *fi;
3281 		struct mlxsw_sp_fib6_entry *fib6_entry;
3282 		u32 id;
3283 	};
3284 };
3285 
3286 static bool
3287 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3288 				    const struct in6_addr *gw, int ifindex,
3289 				    int weight)
3290 {
3291 	int i;
3292 
3293 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3294 		const struct mlxsw_sp_nexthop *nh;
3295 
3296 		nh = &nh_grp->nhgi->nexthops[i];
3297 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3298 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3299 			return true;
3300 	}
3301 
3302 	return false;
3303 }
3304 
3305 static bool
3306 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3307 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3308 {
3309 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3310 
3311 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3312 		return false;
3313 
3314 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3315 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3316 		struct in6_addr *gw;
3317 		int ifindex, weight;
3318 
3319 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3320 		weight = fib6_nh->fib_nh_weight;
3321 		gw = &fib6_nh->fib_nh_gw6;
3322 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3323 							 weight))
3324 			return false;
3325 	}
3326 
3327 	return true;
3328 }
3329 
3330 static int
3331 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3332 {
3333 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3334 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3335 
3336 	if (nh_grp->type != cmp_arg->type)
3337 		return 1;
3338 
3339 	switch (cmp_arg->type) {
3340 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3341 		return cmp_arg->fi != nh_grp->ipv4.fi;
3342 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3343 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3344 						    cmp_arg->fib6_entry);
3345 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3346 		return cmp_arg->id != nh_grp->obj.id;
3347 	default:
3348 		WARN_ON(1);
3349 		return 1;
3350 	}
3351 }
3352 
3353 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3354 {
3355 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3356 	const struct mlxsw_sp_nexthop *nh;
3357 	struct fib_info *fi;
3358 	unsigned int val;
3359 	int i;
3360 
3361 	switch (nh_grp->type) {
3362 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3363 		fi = nh_grp->ipv4.fi;
3364 		return jhash(&fi, sizeof(fi), seed);
3365 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3366 		val = nh_grp->nhgi->count;
3367 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3368 			nh = &nh_grp->nhgi->nexthops[i];
3369 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3370 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3371 		}
3372 		return jhash(&val, sizeof(val), seed);
3373 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3374 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3375 	default:
3376 		WARN_ON(1);
3377 		return 0;
3378 	}
3379 }
3380 
3381 static u32
3382 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3383 {
3384 	unsigned int val = fib6_entry->nrt6;
3385 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3386 
3387 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3388 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3389 		struct net_device *dev = fib6_nh->fib_nh_dev;
3390 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3391 
3392 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3393 		val ^= jhash(gw, sizeof(*gw), seed);
3394 	}
3395 
3396 	return jhash(&val, sizeof(val), seed);
3397 }
3398 
3399 static u32
3400 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3401 {
3402 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3403 
3404 	switch (cmp_arg->type) {
3405 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3406 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3407 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3408 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3409 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3410 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3411 	default:
3412 		WARN_ON(1);
3413 		return 0;
3414 	}
3415 }
3416 
3417 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3418 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3419 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3420 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3421 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3422 };
3423 
3424 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3425 					 struct mlxsw_sp_nexthop_group *nh_grp)
3426 {
3427 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3428 	    !nh_grp->nhgi->gateway)
3429 		return 0;
3430 
3431 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3432 				      &nh_grp->ht_node,
3433 				      mlxsw_sp_nexthop_group_ht_params);
3434 }
3435 
3436 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3437 					  struct mlxsw_sp_nexthop_group *nh_grp)
3438 {
3439 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3440 	    !nh_grp->nhgi->gateway)
3441 		return;
3442 
3443 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3444 			       &nh_grp->ht_node,
3445 			       mlxsw_sp_nexthop_group_ht_params);
3446 }
3447 
3448 static struct mlxsw_sp_nexthop_group *
3449 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3450 			       struct fib_info *fi)
3451 {
3452 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3453 
3454 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3455 	cmp_arg.fi = fi;
3456 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3457 				      &cmp_arg,
3458 				      mlxsw_sp_nexthop_group_ht_params);
3459 }
3460 
3461 static struct mlxsw_sp_nexthop_group *
3462 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3463 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3464 {
3465 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3466 
3467 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3468 	cmp_arg.fib6_entry = fib6_entry;
3469 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3470 				      &cmp_arg,
3471 				      mlxsw_sp_nexthop_group_ht_params);
3472 }
3473 
3474 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3475 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3476 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3477 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3478 };
3479 
3480 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3481 				   struct mlxsw_sp_nexthop *nh)
3482 {
3483 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3484 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3485 }
3486 
3487 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3488 				    struct mlxsw_sp_nexthop *nh)
3489 {
3490 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3491 			       mlxsw_sp_nexthop_ht_params);
3492 }
3493 
3494 static struct mlxsw_sp_nexthop *
3495 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3496 			struct mlxsw_sp_nexthop_key key)
3497 {
3498 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3499 				      mlxsw_sp_nexthop_ht_params);
3500 }
3501 
3502 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3503 					     enum mlxsw_sp_l3proto proto,
3504 					     u16 vr_id,
3505 					     u32 adj_index, u16 ecmp_size,
3506 					     u32 new_adj_index,
3507 					     u16 new_ecmp_size)
3508 {
3509 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3510 
3511 	mlxsw_reg_raleu_pack(raleu_pl,
3512 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3513 			     adj_index, ecmp_size, new_adj_index,
3514 			     new_ecmp_size);
3515 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3516 }
3517 
3518 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3519 					  struct mlxsw_sp_nexthop_group *nh_grp,
3520 					  u32 old_adj_index, u16 old_ecmp_size)
3521 {
3522 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3523 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3524 	int err;
3525 
3526 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3527 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3528 							vr_entry->key.proto,
3529 							vr_entry->key.vr_id,
3530 							old_adj_index,
3531 							old_ecmp_size,
3532 							nhgi->adj_index,
3533 							nhgi->ecmp_size);
3534 		if (err)
3535 			goto err_mass_update_vr;
3536 	}
3537 	return 0;
3538 
3539 err_mass_update_vr:
3540 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3541 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3542 						  vr_entry->key.vr_id,
3543 						  nhgi->adj_index,
3544 						  nhgi->ecmp_size,
3545 						  old_adj_index, old_ecmp_size);
3546 	return err;
3547 }
3548 
3549 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3550 					 u32 adj_index,
3551 					 struct mlxsw_sp_nexthop *nh,
3552 					 bool force, char *ratr_pl)
3553 {
3554 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3555 	enum mlxsw_reg_ratr_op op;
3556 	u16 rif_index;
3557 
3558 	rif_index = nh->rif ? nh->rif->rif_index :
3559 			      mlxsw_sp->router->lb_rif_index;
3560 	op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3561 		     MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3562 	mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3563 			    adj_index, rif_index);
3564 	switch (nh->action) {
3565 	case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3566 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3567 		break;
3568 	case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3569 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3570 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3571 		break;
3572 	case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3573 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3574 					       MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3575 		mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3576 		break;
3577 	default:
3578 		WARN_ON_ONCE(1);
3579 		return -EINVAL;
3580 	}
3581 	if (nh->counter_valid)
3582 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3583 	else
3584 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3585 
3586 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3587 }
3588 
3589 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3590 				struct mlxsw_sp_nexthop *nh, bool force,
3591 				char *ratr_pl)
3592 {
3593 	int i;
3594 
3595 	for (i = 0; i < nh->num_adj_entries; i++) {
3596 		int err;
3597 
3598 		err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3599 						    nh, force, ratr_pl);
3600 		if (err)
3601 			return err;
3602 	}
3603 
3604 	return 0;
3605 }
3606 
3607 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3608 					  u32 adj_index,
3609 					  struct mlxsw_sp_nexthop *nh,
3610 					  bool force, char *ratr_pl)
3611 {
3612 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3613 
3614 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3615 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3616 					force, ratr_pl);
3617 }
3618 
3619 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3620 					u32 adj_index,
3621 					struct mlxsw_sp_nexthop *nh, bool force,
3622 					char *ratr_pl)
3623 {
3624 	int i;
3625 
3626 	for (i = 0; i < nh->num_adj_entries; i++) {
3627 		int err;
3628 
3629 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3630 						     nh, force, ratr_pl);
3631 		if (err)
3632 			return err;
3633 	}
3634 
3635 	return 0;
3636 }
3637 
3638 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3639 				   struct mlxsw_sp_nexthop *nh, bool force,
3640 				   char *ratr_pl)
3641 {
3642 	/* When action is discard or trap, the nexthop must be
3643 	 * programmed as an Ethernet nexthop.
3644 	 */
3645 	if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3646 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3647 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3648 		return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3649 						   force, ratr_pl);
3650 	else
3651 		return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3652 						    force, ratr_pl);
3653 }
3654 
3655 static int
3656 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3657 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3658 			      bool reallocate)
3659 {
3660 	char ratr_pl[MLXSW_REG_RATR_LEN];
3661 	u32 adj_index = nhgi->adj_index; /* base */
3662 	struct mlxsw_sp_nexthop *nh;
3663 	int i;
3664 
3665 	for (i = 0; i < nhgi->count; i++) {
3666 		nh = &nhgi->nexthops[i];
3667 
3668 		if (!nh->should_offload) {
3669 			nh->offloaded = 0;
3670 			continue;
3671 		}
3672 
3673 		if (nh->update || reallocate) {
3674 			int err = 0;
3675 
3676 			err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3677 						      true, ratr_pl);
3678 			if (err)
3679 				return err;
3680 			nh->update = 0;
3681 			nh->offloaded = 1;
3682 		}
3683 		adj_index += nh->num_adj_entries;
3684 	}
3685 	return 0;
3686 }
3687 
3688 static int
3689 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3690 				    struct mlxsw_sp_nexthop_group *nh_grp)
3691 {
3692 	struct mlxsw_sp_fib_entry *fib_entry;
3693 	int err;
3694 
3695 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3696 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3697 		if (err)
3698 			return err;
3699 	}
3700 	return 0;
3701 }
3702 
3703 struct mlxsw_sp_adj_grp_size_range {
3704 	u16 start; /* Inclusive */
3705 	u16 end; /* Inclusive */
3706 };
3707 
3708 /* Ordered by range start value */
3709 static const struct mlxsw_sp_adj_grp_size_range
3710 mlxsw_sp1_adj_grp_size_ranges[] = {
3711 	{ .start = 1, .end = 64 },
3712 	{ .start = 512, .end = 512 },
3713 	{ .start = 1024, .end = 1024 },
3714 	{ .start = 2048, .end = 2048 },
3715 	{ .start = 4096, .end = 4096 },
3716 };
3717 
3718 /* Ordered by range start value */
3719 static const struct mlxsw_sp_adj_grp_size_range
3720 mlxsw_sp2_adj_grp_size_ranges[] = {
3721 	{ .start = 1, .end = 128 },
3722 	{ .start = 256, .end = 256 },
3723 	{ .start = 512, .end = 512 },
3724 	{ .start = 1024, .end = 1024 },
3725 	{ .start = 2048, .end = 2048 },
3726 	{ .start = 4096, .end = 4096 },
3727 };
3728 
3729 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3730 					   u16 *p_adj_grp_size)
3731 {
3732 	int i;
3733 
3734 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3735 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3736 
3737 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3738 
3739 		if (*p_adj_grp_size >= size_range->start &&
3740 		    *p_adj_grp_size <= size_range->end)
3741 			return;
3742 
3743 		if (*p_adj_grp_size <= size_range->end) {
3744 			*p_adj_grp_size = size_range->end;
3745 			return;
3746 		}
3747 	}
3748 }
3749 
3750 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3751 					     u16 *p_adj_grp_size,
3752 					     unsigned int alloc_size)
3753 {
3754 	int i;
3755 
3756 	for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3757 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3758 
3759 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3760 
3761 		if (alloc_size >= size_range->end) {
3762 			*p_adj_grp_size = size_range->end;
3763 			return;
3764 		}
3765 	}
3766 }
3767 
3768 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3769 				     u16 *p_adj_grp_size)
3770 {
3771 	unsigned int alloc_size;
3772 	int err;
3773 
3774 	/* Round up the requested group size to the next size supported
3775 	 * by the device and make sure the request can be satisfied.
3776 	 */
3777 	mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3778 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3779 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3780 					      *p_adj_grp_size, &alloc_size);
3781 	if (err)
3782 		return err;
3783 	/* It is possible the allocation results in more allocated
3784 	 * entries than requested. Try to use as much of them as
3785 	 * possible.
3786 	 */
3787 	mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3788 
3789 	return 0;
3790 }
3791 
3792 static void
3793 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3794 {
3795 	int i, g = 0, sum_norm_weight = 0;
3796 	struct mlxsw_sp_nexthop *nh;
3797 
3798 	for (i = 0; i < nhgi->count; i++) {
3799 		nh = &nhgi->nexthops[i];
3800 
3801 		if (!nh->should_offload)
3802 			continue;
3803 		if (g > 0)
3804 			g = gcd(nh->nh_weight, g);
3805 		else
3806 			g = nh->nh_weight;
3807 	}
3808 
3809 	for (i = 0; i < nhgi->count; i++) {
3810 		nh = &nhgi->nexthops[i];
3811 
3812 		if (!nh->should_offload)
3813 			continue;
3814 		nh->norm_nh_weight = nh->nh_weight / g;
3815 		sum_norm_weight += nh->norm_nh_weight;
3816 	}
3817 
3818 	nhgi->sum_norm_weight = sum_norm_weight;
3819 }
3820 
3821 static void
3822 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3823 {
3824 	int i, weight = 0, lower_bound = 0;
3825 	int total = nhgi->sum_norm_weight;
3826 	u16 ecmp_size = nhgi->ecmp_size;
3827 
3828 	for (i = 0; i < nhgi->count; i++) {
3829 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3830 		int upper_bound;
3831 
3832 		if (!nh->should_offload)
3833 			continue;
3834 		weight += nh->norm_nh_weight;
3835 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3836 		nh->num_adj_entries = upper_bound - lower_bound;
3837 		lower_bound = upper_bound;
3838 	}
3839 }
3840 
3841 static struct mlxsw_sp_nexthop *
3842 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3843 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3844 
3845 static void
3846 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3847 					struct mlxsw_sp_nexthop_group *nh_grp)
3848 {
3849 	int i;
3850 
3851 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3852 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3853 
3854 		if (nh->offloaded)
3855 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3856 		else
3857 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3858 	}
3859 }
3860 
3861 static void
3862 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3863 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3864 {
3865 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3866 
3867 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3868 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3869 		struct mlxsw_sp_nexthop *nh;
3870 
3871 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3872 		if (nh && nh->offloaded)
3873 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3874 		else
3875 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3876 	}
3877 }
3878 
3879 static void
3880 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3881 					struct mlxsw_sp_nexthop_group *nh_grp)
3882 {
3883 	struct mlxsw_sp_fib6_entry *fib6_entry;
3884 
3885 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3886 	 * the same struct, so we need to iterate over all the routes using the
3887 	 * nexthop group and set / clear the offload indication for them.
3888 	 */
3889 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3890 			    common.nexthop_group_node)
3891 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3892 }
3893 
3894 static void
3895 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3896 					const struct mlxsw_sp_nexthop *nh,
3897 					u16 bucket_index)
3898 {
3899 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3900 	bool offload = false, trap = false;
3901 
3902 	if (nh->offloaded) {
3903 		if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3904 			trap = true;
3905 		else
3906 			offload = true;
3907 	}
3908 	nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3909 				    bucket_index, offload, trap);
3910 }
3911 
3912 static void
3913 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3914 					   struct mlxsw_sp_nexthop_group *nh_grp)
3915 {
3916 	int i;
3917 
3918 	/* Do not update the flags if the nexthop group is being destroyed
3919 	 * since:
3920 	 * 1. The nexthop objects is being deleted, in which case the flags are
3921 	 * irrelevant.
3922 	 * 2. The nexthop group was replaced by a newer group, in which case
3923 	 * the flags of the nexthop object were already updated based on the
3924 	 * new group.
3925 	 */
3926 	if (nh_grp->can_destroy)
3927 		return;
3928 
3929 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3930 			     nh_grp->nhgi->adj_index_valid, false);
3931 
3932 	/* Update flags of individual nexthop buckets in case of a resilient
3933 	 * nexthop group.
3934 	 */
3935 	if (!nh_grp->nhgi->is_resilient)
3936 		return;
3937 
3938 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3939 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3940 
3941 		mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3942 	}
3943 }
3944 
3945 static void
3946 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3947 				       struct mlxsw_sp_nexthop_group *nh_grp)
3948 {
3949 	switch (nh_grp->type) {
3950 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3951 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3952 		break;
3953 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3954 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3955 		break;
3956 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3957 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3958 		break;
3959 	}
3960 }
3961 
3962 static int
3963 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3964 			       struct mlxsw_sp_nexthop_group *nh_grp)
3965 {
3966 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3967 	u16 ecmp_size, old_ecmp_size;
3968 	struct mlxsw_sp_nexthop *nh;
3969 	bool offload_change = false;
3970 	u32 adj_index;
3971 	bool old_adj_index_valid;
3972 	u32 old_adj_index;
3973 	int i, err2, err;
3974 
3975 	if (!nhgi->gateway)
3976 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3977 
3978 	for (i = 0; i < nhgi->count; i++) {
3979 		nh = &nhgi->nexthops[i];
3980 
3981 		if (nh->should_offload != nh->offloaded) {
3982 			offload_change = true;
3983 			if (nh->should_offload)
3984 				nh->update = 1;
3985 		}
3986 	}
3987 	if (!offload_change) {
3988 		/* Nothing was added or removed, so no need to reallocate. Just
3989 		 * update MAC on existing adjacency indexes.
3990 		 */
3991 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3992 		if (err) {
3993 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3994 			goto set_trap;
3995 		}
3996 		/* Flags of individual nexthop buckets might need to be
3997 		 * updated.
3998 		 */
3999 		mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4000 		return 0;
4001 	}
4002 	mlxsw_sp_nexthop_group_normalize(nhgi);
4003 	if (!nhgi->sum_norm_weight) {
4004 		/* No neigh of this group is connected so we just set
4005 		 * the trap and let everthing flow through kernel.
4006 		 */
4007 		err = 0;
4008 		goto set_trap;
4009 	}
4010 
4011 	ecmp_size = nhgi->sum_norm_weight;
4012 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4013 	if (err)
4014 		/* No valid allocation size available. */
4015 		goto set_trap;
4016 
4017 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4018 				  ecmp_size, &adj_index);
4019 	if (err) {
4020 		/* We ran out of KVD linear space, just set the
4021 		 * trap and let everything flow through kernel.
4022 		 */
4023 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4024 		goto set_trap;
4025 	}
4026 	old_adj_index_valid = nhgi->adj_index_valid;
4027 	old_adj_index = nhgi->adj_index;
4028 	old_ecmp_size = nhgi->ecmp_size;
4029 	nhgi->adj_index_valid = 1;
4030 	nhgi->adj_index = adj_index;
4031 	nhgi->ecmp_size = ecmp_size;
4032 	mlxsw_sp_nexthop_group_rebalance(nhgi);
4033 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4034 	if (err) {
4035 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4036 		goto set_trap;
4037 	}
4038 
4039 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4040 
4041 	if (!old_adj_index_valid) {
4042 		/* The trap was set for fib entries, so we have to call
4043 		 * fib entry update to unset it and use adjacency index.
4044 		 */
4045 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4046 		if (err) {
4047 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4048 			goto set_trap;
4049 		}
4050 		return 0;
4051 	}
4052 
4053 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4054 					     old_adj_index, old_ecmp_size);
4055 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4056 			   old_ecmp_size, old_adj_index);
4057 	if (err) {
4058 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4059 		goto set_trap;
4060 	}
4061 
4062 	return 0;
4063 
4064 set_trap:
4065 	old_adj_index_valid = nhgi->adj_index_valid;
4066 	nhgi->adj_index_valid = 0;
4067 	for (i = 0; i < nhgi->count; i++) {
4068 		nh = &nhgi->nexthops[i];
4069 		nh->offloaded = 0;
4070 	}
4071 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4072 	if (err2)
4073 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4074 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4075 	if (old_adj_index_valid)
4076 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4077 				   nhgi->ecmp_size, nhgi->adj_index);
4078 	return err;
4079 }
4080 
4081 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4082 					    bool removing)
4083 {
4084 	if (!removing) {
4085 		nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4086 		nh->should_offload = 1;
4087 	} else if (nh->nhgi->is_resilient) {
4088 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4089 		nh->should_offload = 1;
4090 	} else {
4091 		nh->should_offload = 0;
4092 	}
4093 	nh->update = 1;
4094 }
4095 
4096 static int
4097 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4098 				    struct mlxsw_sp_neigh_entry *neigh_entry)
4099 {
4100 	struct neighbour *n, *old_n = neigh_entry->key.n;
4101 	struct mlxsw_sp_nexthop *nh;
4102 	bool entry_connected;
4103 	u8 nud_state, dead;
4104 	int err;
4105 
4106 	nh = list_first_entry(&neigh_entry->nexthop_list,
4107 			      struct mlxsw_sp_nexthop, neigh_list_node);
4108 
4109 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4110 	if (!n) {
4111 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4112 		if (IS_ERR(n))
4113 			return PTR_ERR(n);
4114 		neigh_event_send(n, NULL);
4115 	}
4116 
4117 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4118 	neigh_entry->key.n = n;
4119 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4120 	if (err)
4121 		goto err_neigh_entry_insert;
4122 
4123 	read_lock_bh(&n->lock);
4124 	nud_state = n->nud_state;
4125 	dead = n->dead;
4126 	read_unlock_bh(&n->lock);
4127 	entry_connected = nud_state & NUD_VALID && !dead;
4128 
4129 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4130 			    neigh_list_node) {
4131 		neigh_release(old_n);
4132 		neigh_clone(n);
4133 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4134 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4135 	}
4136 
4137 	neigh_release(n);
4138 
4139 	return 0;
4140 
4141 err_neigh_entry_insert:
4142 	neigh_entry->key.n = old_n;
4143 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4144 	neigh_release(n);
4145 	return err;
4146 }
4147 
4148 static void
4149 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4150 			      struct mlxsw_sp_neigh_entry *neigh_entry,
4151 			      bool removing, bool dead)
4152 {
4153 	struct mlxsw_sp_nexthop *nh;
4154 
4155 	if (list_empty(&neigh_entry->nexthop_list))
4156 		return;
4157 
4158 	if (dead) {
4159 		int err;
4160 
4161 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4162 							  neigh_entry);
4163 		if (err)
4164 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4165 		return;
4166 	}
4167 
4168 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4169 			    neigh_list_node) {
4170 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4171 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4172 	}
4173 }
4174 
4175 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4176 				      struct mlxsw_sp_rif *rif)
4177 {
4178 	if (nh->rif)
4179 		return;
4180 
4181 	nh->rif = rif;
4182 	list_add(&nh->rif_list_node, &rif->nexthop_list);
4183 }
4184 
4185 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4186 {
4187 	if (!nh->rif)
4188 		return;
4189 
4190 	list_del(&nh->rif_list_node);
4191 	nh->rif = NULL;
4192 }
4193 
4194 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4195 				       struct mlxsw_sp_nexthop *nh)
4196 {
4197 	struct mlxsw_sp_neigh_entry *neigh_entry;
4198 	struct neighbour *n;
4199 	u8 nud_state, dead;
4200 	int err;
4201 
4202 	if (!nh->nhgi->gateway || nh->neigh_entry)
4203 		return 0;
4204 
4205 	/* Take a reference of neigh here ensuring that neigh would
4206 	 * not be destructed before the nexthop entry is finished.
4207 	 * The reference is taken either in neigh_lookup() or
4208 	 * in neigh_create() in case n is not found.
4209 	 */
4210 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4211 	if (!n) {
4212 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4213 		if (IS_ERR(n))
4214 			return PTR_ERR(n);
4215 		neigh_event_send(n, NULL);
4216 	}
4217 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4218 	if (!neigh_entry) {
4219 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4220 		if (IS_ERR(neigh_entry)) {
4221 			err = -EINVAL;
4222 			goto err_neigh_entry_create;
4223 		}
4224 	}
4225 
4226 	/* If that is the first nexthop connected to that neigh, add to
4227 	 * nexthop_neighs_list
4228 	 */
4229 	if (list_empty(&neigh_entry->nexthop_list))
4230 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4231 			      &mlxsw_sp->router->nexthop_neighs_list);
4232 
4233 	nh->neigh_entry = neigh_entry;
4234 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4235 	read_lock_bh(&n->lock);
4236 	nud_state = n->nud_state;
4237 	dead = n->dead;
4238 	read_unlock_bh(&n->lock);
4239 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4240 
4241 	return 0;
4242 
4243 err_neigh_entry_create:
4244 	neigh_release(n);
4245 	return err;
4246 }
4247 
4248 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4249 					struct mlxsw_sp_nexthop *nh)
4250 {
4251 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4252 	struct neighbour *n;
4253 
4254 	if (!neigh_entry)
4255 		return;
4256 	n = neigh_entry->key.n;
4257 
4258 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4259 	list_del(&nh->neigh_list_node);
4260 	nh->neigh_entry = NULL;
4261 
4262 	/* If that is the last nexthop connected to that neigh, remove from
4263 	 * nexthop_neighs_list
4264 	 */
4265 	if (list_empty(&neigh_entry->nexthop_list))
4266 		list_del(&neigh_entry->nexthop_neighs_list_node);
4267 
4268 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4269 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4270 
4271 	neigh_release(n);
4272 }
4273 
4274 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4275 {
4276 	struct net_device *ul_dev;
4277 	bool is_up;
4278 
4279 	rcu_read_lock();
4280 	ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4281 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4282 	rcu_read_unlock();
4283 
4284 	return is_up;
4285 }
4286 
4287 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4288 				       struct mlxsw_sp_nexthop *nh,
4289 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4290 {
4291 	bool removing;
4292 
4293 	if (!nh->nhgi->gateway || nh->ipip_entry)
4294 		return;
4295 
4296 	nh->ipip_entry = ipip_entry;
4297 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4298 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4299 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4300 }
4301 
4302 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4303 				       struct mlxsw_sp_nexthop *nh)
4304 {
4305 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4306 
4307 	if (!ipip_entry)
4308 		return;
4309 
4310 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4311 	nh->ipip_entry = NULL;
4312 }
4313 
4314 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4315 					const struct fib_nh *fib_nh,
4316 					enum mlxsw_sp_ipip_type *p_ipipt)
4317 {
4318 	struct net_device *dev = fib_nh->fib_nh_dev;
4319 
4320 	return dev &&
4321 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4322 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4323 }
4324 
4325 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4326 				      struct mlxsw_sp_nexthop *nh,
4327 				      const struct net_device *dev)
4328 {
4329 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4330 	struct mlxsw_sp_ipip_entry *ipip_entry;
4331 	struct mlxsw_sp_rif *rif;
4332 	int err;
4333 
4334 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4335 	if (ipip_entry) {
4336 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4337 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4338 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4339 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4340 			return 0;
4341 		}
4342 	}
4343 
4344 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4345 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4346 	if (!rif)
4347 		return 0;
4348 
4349 	mlxsw_sp_nexthop_rif_init(nh, rif);
4350 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4351 	if (err)
4352 		goto err_neigh_init;
4353 
4354 	return 0;
4355 
4356 err_neigh_init:
4357 	mlxsw_sp_nexthop_rif_fini(nh);
4358 	return err;
4359 }
4360 
4361 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4362 				       struct mlxsw_sp_nexthop *nh)
4363 {
4364 	switch (nh->type) {
4365 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4366 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4367 		mlxsw_sp_nexthop_rif_fini(nh);
4368 		break;
4369 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4370 		mlxsw_sp_nexthop_rif_fini(nh);
4371 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4372 		break;
4373 	}
4374 }
4375 
4376 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4377 				  struct mlxsw_sp_nexthop_group *nh_grp,
4378 				  struct mlxsw_sp_nexthop *nh,
4379 				  struct fib_nh *fib_nh)
4380 {
4381 	struct net_device *dev = fib_nh->fib_nh_dev;
4382 	struct in_device *in_dev;
4383 	int err;
4384 
4385 	nh->nhgi = nh_grp->nhgi;
4386 	nh->key.fib_nh = fib_nh;
4387 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4388 	nh->nh_weight = fib_nh->fib_nh_weight;
4389 #else
4390 	nh->nh_weight = 1;
4391 #endif
4392 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4393 	nh->neigh_tbl = &arp_tbl;
4394 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4395 	if (err)
4396 		return err;
4397 
4398 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4399 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4400 
4401 	if (!dev)
4402 		return 0;
4403 	nh->ifindex = dev->ifindex;
4404 
4405 	rcu_read_lock();
4406 	in_dev = __in_dev_get_rcu(dev);
4407 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4408 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4409 		rcu_read_unlock();
4410 		return 0;
4411 	}
4412 	rcu_read_unlock();
4413 
4414 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4415 	if (err)
4416 		goto err_nexthop_neigh_init;
4417 
4418 	return 0;
4419 
4420 err_nexthop_neigh_init:
4421 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4422 	return err;
4423 }
4424 
4425 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4426 				   struct mlxsw_sp_nexthop *nh)
4427 {
4428 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4429 	list_del(&nh->router_list_node);
4430 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4431 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4432 }
4433 
4434 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4435 				    unsigned long event, struct fib_nh *fib_nh)
4436 {
4437 	struct mlxsw_sp_nexthop_key key;
4438 	struct mlxsw_sp_nexthop *nh;
4439 
4440 	key.fib_nh = fib_nh;
4441 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4442 	if (!nh)
4443 		return;
4444 
4445 	switch (event) {
4446 	case FIB_EVENT_NH_ADD:
4447 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4448 		break;
4449 	case FIB_EVENT_NH_DEL:
4450 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4451 		break;
4452 	}
4453 
4454 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4455 }
4456 
4457 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4458 					struct mlxsw_sp_rif *rif)
4459 {
4460 	struct mlxsw_sp_nexthop *nh;
4461 	bool removing;
4462 
4463 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4464 		switch (nh->type) {
4465 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4466 			removing = false;
4467 			break;
4468 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4469 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4470 			break;
4471 		default:
4472 			WARN_ON(1);
4473 			continue;
4474 		}
4475 
4476 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4477 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4478 	}
4479 }
4480 
4481 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4482 					 struct mlxsw_sp_rif *old_rif,
4483 					 struct mlxsw_sp_rif *new_rif)
4484 {
4485 	struct mlxsw_sp_nexthop *nh;
4486 
4487 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4488 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4489 		nh->rif = new_rif;
4490 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4491 }
4492 
4493 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4494 					   struct mlxsw_sp_rif *rif)
4495 {
4496 	struct mlxsw_sp_nexthop *nh, *tmp;
4497 
4498 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4499 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4500 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4501 	}
4502 }
4503 
4504 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4505 {
4506 	enum mlxsw_reg_ratr_trap_action trap_action;
4507 	char ratr_pl[MLXSW_REG_RATR_LEN];
4508 	int err;
4509 
4510 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4511 				  &mlxsw_sp->router->adj_trap_index);
4512 	if (err)
4513 		return err;
4514 
4515 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4516 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4517 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4518 			    mlxsw_sp->router->adj_trap_index,
4519 			    mlxsw_sp->router->lb_rif_index);
4520 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4521 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4522 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4523 	if (err)
4524 		goto err_ratr_write;
4525 
4526 	return 0;
4527 
4528 err_ratr_write:
4529 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4530 			   mlxsw_sp->router->adj_trap_index);
4531 	return err;
4532 }
4533 
4534 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4535 {
4536 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4537 			   mlxsw_sp->router->adj_trap_index);
4538 }
4539 
4540 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4541 {
4542 	int err;
4543 
4544 	if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4545 		return 0;
4546 
4547 	err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4548 	if (err)
4549 		return err;
4550 
4551 	refcount_set(&mlxsw_sp->router->num_groups, 1);
4552 
4553 	return 0;
4554 }
4555 
4556 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4557 {
4558 	if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4559 		return;
4560 
4561 	mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4562 }
4563 
4564 static void
4565 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4566 			     const struct mlxsw_sp_nexthop_group *nh_grp,
4567 			     unsigned long *activity)
4568 {
4569 	char *ratrad_pl;
4570 	int i, err;
4571 
4572 	ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4573 	if (!ratrad_pl)
4574 		return;
4575 
4576 	mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4577 			      nh_grp->nhgi->count);
4578 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4579 	if (err)
4580 		goto out;
4581 
4582 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4583 		if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4584 			continue;
4585 		bitmap_set(activity, i, 1);
4586 	}
4587 
4588 out:
4589 	kfree(ratrad_pl);
4590 }
4591 
4592 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4593 
4594 static void
4595 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4596 				const struct mlxsw_sp_nexthop_group *nh_grp)
4597 {
4598 	unsigned long *activity;
4599 
4600 	activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4601 	if (!activity)
4602 		return;
4603 
4604 	mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4605 	nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4606 					nh_grp->nhgi->count, activity);
4607 
4608 	bitmap_free(activity);
4609 }
4610 
4611 static void
4612 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4613 {
4614 	unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4615 
4616 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4617 			       msecs_to_jiffies(interval));
4618 }
4619 
4620 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4621 {
4622 	struct mlxsw_sp_nexthop_group_info *nhgi;
4623 	struct mlxsw_sp_router *router;
4624 	bool reschedule = false;
4625 
4626 	router = container_of(work, struct mlxsw_sp_router,
4627 			      nh_grp_activity_dw.work);
4628 
4629 	mutex_lock(&router->lock);
4630 
4631 	list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4632 		mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4633 		reschedule = true;
4634 	}
4635 
4636 	mutex_unlock(&router->lock);
4637 
4638 	if (!reschedule)
4639 		return;
4640 	mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4641 }
4642 
4643 static int
4644 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4645 				     const struct nh_notifier_single_info *nh,
4646 				     struct netlink_ext_ack *extack)
4647 {
4648 	int err = -EINVAL;
4649 
4650 	if (nh->is_fdb)
4651 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4652 	else if (nh->has_encap)
4653 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4654 	else
4655 		err = 0;
4656 
4657 	return err;
4658 }
4659 
4660 static int
4661 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4662 					  const struct nh_notifier_single_info *nh,
4663 					  struct netlink_ext_ack *extack)
4664 {
4665 	int err;
4666 
4667 	err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4668 	if (err)
4669 		return err;
4670 
4671 	/* Device only nexthops with an IPIP device are programmed as
4672 	 * encapsulating adjacency entries.
4673 	 */
4674 	if (!nh->gw_family && !nh->is_reject &&
4675 	    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4676 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4677 		return -EINVAL;
4678 	}
4679 
4680 	return 0;
4681 }
4682 
4683 static int
4684 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4685 				    const struct nh_notifier_grp_info *nh_grp,
4686 				    struct netlink_ext_ack *extack)
4687 {
4688 	int i;
4689 
4690 	if (nh_grp->is_fdb) {
4691 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4692 		return -EINVAL;
4693 	}
4694 
4695 	for (i = 0; i < nh_grp->num_nh; i++) {
4696 		const struct nh_notifier_single_info *nh;
4697 		int err;
4698 
4699 		nh = &nh_grp->nh_entries[i].nh;
4700 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4701 								extack);
4702 		if (err)
4703 			return err;
4704 	}
4705 
4706 	return 0;
4707 }
4708 
4709 static int
4710 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4711 					     const struct nh_notifier_res_table_info *nh_res_table,
4712 					     struct netlink_ext_ack *extack)
4713 {
4714 	unsigned int alloc_size;
4715 	bool valid_size = false;
4716 	int err, i;
4717 
4718 	if (nh_res_table->num_nh_buckets < 32) {
4719 		NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4720 		return -EINVAL;
4721 	}
4722 
4723 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4724 		const struct mlxsw_sp_adj_grp_size_range *size_range;
4725 
4726 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4727 
4728 		if (nh_res_table->num_nh_buckets >= size_range->start &&
4729 		    nh_res_table->num_nh_buckets <= size_range->end) {
4730 			valid_size = true;
4731 			break;
4732 		}
4733 	}
4734 
4735 	if (!valid_size) {
4736 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4737 		return -EINVAL;
4738 	}
4739 
4740 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4741 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4742 					      nh_res_table->num_nh_buckets,
4743 					      &alloc_size);
4744 	if (err || nh_res_table->num_nh_buckets != alloc_size) {
4745 		NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4746 		return -EINVAL;
4747 	}
4748 
4749 	return 0;
4750 }
4751 
4752 static int
4753 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4754 					const struct nh_notifier_res_table_info *nh_res_table,
4755 					struct netlink_ext_ack *extack)
4756 {
4757 	int err;
4758 	u16 i;
4759 
4760 	err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4761 							   nh_res_table,
4762 							   extack);
4763 	if (err)
4764 		return err;
4765 
4766 	for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4767 		const struct nh_notifier_single_info *nh;
4768 		int err;
4769 
4770 		nh = &nh_res_table->nhs[i];
4771 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4772 								extack);
4773 		if (err)
4774 			return err;
4775 	}
4776 
4777 	return 0;
4778 }
4779 
4780 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4781 					 unsigned long event,
4782 					 struct nh_notifier_info *info)
4783 {
4784 	struct nh_notifier_single_info *nh;
4785 
4786 	if (event != NEXTHOP_EVENT_REPLACE &&
4787 	    event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4788 	    event != NEXTHOP_EVENT_BUCKET_REPLACE)
4789 		return 0;
4790 
4791 	switch (info->type) {
4792 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4793 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4794 							    info->extack);
4795 	case NH_NOTIFIER_INFO_TYPE_GRP:
4796 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4797 							   info->nh_grp,
4798 							   info->extack);
4799 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4800 		return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4801 							       info->nh_res_table,
4802 							       info->extack);
4803 	case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4804 		nh = &info->nh_res_bucket->new_nh;
4805 		return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4806 								 info->extack);
4807 	default:
4808 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4809 		return -EOPNOTSUPP;
4810 	}
4811 }
4812 
4813 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4814 					    const struct nh_notifier_info *info)
4815 {
4816 	const struct net_device *dev;
4817 
4818 	switch (info->type) {
4819 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4820 		dev = info->nh->dev;
4821 		return info->nh->gw_family || info->nh->is_reject ||
4822 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4823 	case NH_NOTIFIER_INFO_TYPE_GRP:
4824 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4825 		/* Already validated earlier. */
4826 		return true;
4827 	default:
4828 		return false;
4829 	}
4830 }
4831 
4832 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4833 						struct mlxsw_sp_nexthop *nh)
4834 {
4835 	u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4836 
4837 	nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4838 	nh->should_offload = 1;
4839 	/* While nexthops that discard packets do not forward packets
4840 	 * via an egress RIF, they still need to be programmed using a
4841 	 * valid RIF, so use the loopback RIF created during init.
4842 	 */
4843 	nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4844 }
4845 
4846 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4847 						struct mlxsw_sp_nexthop *nh)
4848 {
4849 	nh->rif = NULL;
4850 	nh->should_offload = 0;
4851 }
4852 
4853 static int
4854 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4855 			  struct mlxsw_sp_nexthop_group *nh_grp,
4856 			  struct mlxsw_sp_nexthop *nh,
4857 			  struct nh_notifier_single_info *nh_obj, int weight)
4858 {
4859 	struct net_device *dev = nh_obj->dev;
4860 	int err;
4861 
4862 	nh->nhgi = nh_grp->nhgi;
4863 	nh->nh_weight = weight;
4864 
4865 	switch (nh_obj->gw_family) {
4866 	case AF_INET:
4867 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4868 		nh->neigh_tbl = &arp_tbl;
4869 		break;
4870 	case AF_INET6:
4871 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4872 #if IS_ENABLED(CONFIG_IPV6)
4873 		nh->neigh_tbl = &nd_tbl;
4874 #endif
4875 		break;
4876 	}
4877 
4878 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4879 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4880 	nh->ifindex = dev->ifindex;
4881 
4882 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4883 	if (err)
4884 		goto err_type_init;
4885 
4886 	if (nh_obj->is_reject)
4887 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4888 
4889 	/* In a resilient nexthop group, all the nexthops must be written to
4890 	 * the adjacency table. Even if they do not have a valid neighbour or
4891 	 * RIF.
4892 	 */
4893 	if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4894 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4895 		nh->should_offload = 1;
4896 	}
4897 
4898 	return 0;
4899 
4900 err_type_init:
4901 	list_del(&nh->router_list_node);
4902 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4903 	return err;
4904 }
4905 
4906 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4907 				      struct mlxsw_sp_nexthop *nh)
4908 {
4909 	if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4910 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4911 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4912 	list_del(&nh->router_list_node);
4913 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4914 	nh->should_offload = 0;
4915 }
4916 
4917 static int
4918 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4919 				     struct mlxsw_sp_nexthop_group *nh_grp,
4920 				     struct nh_notifier_info *info)
4921 {
4922 	struct mlxsw_sp_nexthop_group_info *nhgi;
4923 	struct mlxsw_sp_nexthop *nh;
4924 	bool is_resilient = false;
4925 	unsigned int nhs;
4926 	int err, i;
4927 
4928 	switch (info->type) {
4929 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4930 		nhs = 1;
4931 		break;
4932 	case NH_NOTIFIER_INFO_TYPE_GRP:
4933 		nhs = info->nh_grp->num_nh;
4934 		break;
4935 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4936 		nhs = info->nh_res_table->num_nh_buckets;
4937 		is_resilient = true;
4938 		break;
4939 	default:
4940 		return -EINVAL;
4941 	}
4942 
4943 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4944 	if (!nhgi)
4945 		return -ENOMEM;
4946 	nh_grp->nhgi = nhgi;
4947 	nhgi->nh_grp = nh_grp;
4948 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4949 	nhgi->is_resilient = is_resilient;
4950 	nhgi->count = nhs;
4951 	for (i = 0; i < nhgi->count; i++) {
4952 		struct nh_notifier_single_info *nh_obj;
4953 		int weight;
4954 
4955 		nh = &nhgi->nexthops[i];
4956 		switch (info->type) {
4957 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
4958 			nh_obj = info->nh;
4959 			weight = 1;
4960 			break;
4961 		case NH_NOTIFIER_INFO_TYPE_GRP:
4962 			nh_obj = &info->nh_grp->nh_entries[i].nh;
4963 			weight = info->nh_grp->nh_entries[i].weight;
4964 			break;
4965 		case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4966 			nh_obj = &info->nh_res_table->nhs[i];
4967 			weight = 1;
4968 			break;
4969 		default:
4970 			err = -EINVAL;
4971 			goto err_nexthop_obj_init;
4972 		}
4973 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4974 						weight);
4975 		if (err)
4976 			goto err_nexthop_obj_init;
4977 	}
4978 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
4979 	if (err)
4980 		goto err_group_inc;
4981 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4982 	if (err) {
4983 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4984 		goto err_group_refresh;
4985 	}
4986 
4987 	/* Add resilient nexthop groups to a list so that the activity of their
4988 	 * nexthop buckets will be periodically queried and cleared.
4989 	 */
4990 	if (nhgi->is_resilient) {
4991 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4992 			mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4993 		list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4994 	}
4995 
4996 	return 0;
4997 
4998 err_group_refresh:
4999 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5000 err_group_inc:
5001 	i = nhgi->count;
5002 err_nexthop_obj_init:
5003 	for (i--; i >= 0; i--) {
5004 		nh = &nhgi->nexthops[i];
5005 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5006 	}
5007 	kfree(nhgi);
5008 	return err;
5009 }
5010 
5011 static void
5012 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5013 				     struct mlxsw_sp_nexthop_group *nh_grp)
5014 {
5015 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5016 	struct mlxsw_sp_router *router = mlxsw_sp->router;
5017 	int i;
5018 
5019 	if (nhgi->is_resilient) {
5020 		list_del(&nhgi->list);
5021 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5022 			cancel_delayed_work(&router->nh_grp_activity_dw);
5023 	}
5024 
5025 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5026 	for (i = nhgi->count - 1; i >= 0; i--) {
5027 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5028 
5029 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5030 	}
5031 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5032 	WARN_ON_ONCE(nhgi->adj_index_valid);
5033 	kfree(nhgi);
5034 }
5035 
5036 static struct mlxsw_sp_nexthop_group *
5037 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5038 				  struct nh_notifier_info *info)
5039 {
5040 	struct mlxsw_sp_nexthop_group *nh_grp;
5041 	int err;
5042 
5043 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5044 	if (!nh_grp)
5045 		return ERR_PTR(-ENOMEM);
5046 	INIT_LIST_HEAD(&nh_grp->vr_list);
5047 	err = rhashtable_init(&nh_grp->vr_ht,
5048 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5049 	if (err)
5050 		goto err_nexthop_group_vr_ht_init;
5051 	INIT_LIST_HEAD(&nh_grp->fib_list);
5052 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5053 	nh_grp->obj.id = info->id;
5054 
5055 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5056 	if (err)
5057 		goto err_nexthop_group_info_init;
5058 
5059 	nh_grp->can_destroy = false;
5060 
5061 	return nh_grp;
5062 
5063 err_nexthop_group_info_init:
5064 	rhashtable_destroy(&nh_grp->vr_ht);
5065 err_nexthop_group_vr_ht_init:
5066 	kfree(nh_grp);
5067 	return ERR_PTR(err);
5068 }
5069 
5070 static void
5071 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5072 				   struct mlxsw_sp_nexthop_group *nh_grp)
5073 {
5074 	if (!nh_grp->can_destroy)
5075 		return;
5076 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5077 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5078 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5079 	rhashtable_destroy(&nh_grp->vr_ht);
5080 	kfree(nh_grp);
5081 }
5082 
5083 static struct mlxsw_sp_nexthop_group *
5084 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5085 {
5086 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5087 
5088 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5089 	cmp_arg.id = id;
5090 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5091 				      &cmp_arg,
5092 				      mlxsw_sp_nexthop_group_ht_params);
5093 }
5094 
5095 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5096 					  struct mlxsw_sp_nexthop_group *nh_grp)
5097 {
5098 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5099 }
5100 
5101 static int
5102 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5103 				   struct mlxsw_sp_nexthop_group *nh_grp,
5104 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
5105 				   struct netlink_ext_ack *extack)
5106 {
5107 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5108 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5109 	int err;
5110 
5111 	old_nh_grp->nhgi = new_nhgi;
5112 	new_nhgi->nh_grp = old_nh_grp;
5113 	nh_grp->nhgi = old_nhgi;
5114 	old_nhgi->nh_grp = nh_grp;
5115 
5116 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5117 		/* Both the old adjacency index and the new one are valid.
5118 		 * Routes are currently using the old one. Tell the device to
5119 		 * replace the old adjacency index with the new one.
5120 		 */
5121 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5122 						     old_nhgi->adj_index,
5123 						     old_nhgi->ecmp_size);
5124 		if (err) {
5125 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5126 			goto err_out;
5127 		}
5128 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5129 		/* The old adjacency index is valid, while the new one is not.
5130 		 * Iterate over all the routes using the group and change them
5131 		 * to trap packets to the CPU.
5132 		 */
5133 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5134 		if (err) {
5135 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5136 			goto err_out;
5137 		}
5138 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5139 		/* The old adjacency index is invalid, while the new one is.
5140 		 * Iterate over all the routes using the group and change them
5141 		 * to forward packets using the new valid index.
5142 		 */
5143 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5144 		if (err) {
5145 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5146 			goto err_out;
5147 		}
5148 	}
5149 
5150 	/* Make sure the flags are set / cleared based on the new nexthop group
5151 	 * information.
5152 	 */
5153 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5154 
5155 	/* At this point 'nh_grp' is just a shell that is not used by anyone
5156 	 * and its nexthop group info is the old info that was just replaced
5157 	 * with the new one. Remove it.
5158 	 */
5159 	nh_grp->can_destroy = true;
5160 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5161 
5162 	return 0;
5163 
5164 err_out:
5165 	old_nhgi->nh_grp = old_nh_grp;
5166 	nh_grp->nhgi = new_nhgi;
5167 	new_nhgi->nh_grp = nh_grp;
5168 	old_nh_grp->nhgi = old_nhgi;
5169 	return err;
5170 }
5171 
5172 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5173 				    struct nh_notifier_info *info)
5174 {
5175 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5176 	struct netlink_ext_ack *extack = info->extack;
5177 	int err;
5178 
5179 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5180 	if (IS_ERR(nh_grp))
5181 		return PTR_ERR(nh_grp);
5182 
5183 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5184 	if (!old_nh_grp)
5185 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5186 	else
5187 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5188 							 old_nh_grp, extack);
5189 
5190 	if (err) {
5191 		nh_grp->can_destroy = true;
5192 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5193 	}
5194 
5195 	return err;
5196 }
5197 
5198 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5199 				     struct nh_notifier_info *info)
5200 {
5201 	struct mlxsw_sp_nexthop_group *nh_grp;
5202 
5203 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5204 	if (!nh_grp)
5205 		return;
5206 
5207 	nh_grp->can_destroy = true;
5208 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5209 
5210 	/* If the group still has routes using it, then defer the delete
5211 	 * operation until the last route using it is deleted.
5212 	 */
5213 	if (!list_empty(&nh_grp->fib_list))
5214 		return;
5215 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5216 }
5217 
5218 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5219 					     u32 adj_index, char *ratr_pl)
5220 {
5221 	MLXSW_REG_ZERO(ratr, ratr_pl);
5222 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5223 	mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5224 	mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5225 
5226 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5227 }
5228 
5229 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5230 {
5231 	/* Clear the opcode and activity on both the old and new payload as
5232 	 * they are irrelevant for the comparison.
5233 	 */
5234 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5235 	mlxsw_reg_ratr_a_set(ratr_pl, 0);
5236 	mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5237 	mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5238 
5239 	/* If the contents of the adjacency entry are consistent with the
5240 	 * replacement request, then replacement was successful.
5241 	 */
5242 	if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5243 		return 0;
5244 
5245 	return -EINVAL;
5246 }
5247 
5248 static int
5249 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5250 				       struct mlxsw_sp_nexthop *nh,
5251 				       struct nh_notifier_info *info)
5252 {
5253 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5254 	struct netlink_ext_ack *extack = info->extack;
5255 	bool force = info->nh_res_bucket->force;
5256 	char ratr_pl_new[MLXSW_REG_RATR_LEN];
5257 	char ratr_pl[MLXSW_REG_RATR_LEN];
5258 	u32 adj_index;
5259 	int err;
5260 
5261 	/* No point in trying an atomic replacement if the idle timer interval
5262 	 * is smaller than the interval in which we query and clear activity.
5263 	 */
5264 	if (!force && info->nh_res_bucket->idle_timer_ms <
5265 	    MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5266 		force = true;
5267 
5268 	adj_index = nh->nhgi->adj_index + bucket_index;
5269 	err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5270 	if (err) {
5271 		NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5272 		return err;
5273 	}
5274 
5275 	if (!force) {
5276 		err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5277 							ratr_pl_new);
5278 		if (err) {
5279 			NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5280 			return err;
5281 		}
5282 
5283 		err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5284 		if (err) {
5285 			NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5286 			return err;
5287 		}
5288 	}
5289 
5290 	nh->update = 0;
5291 	nh->offloaded = 1;
5292 	mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5293 
5294 	return 0;
5295 }
5296 
5297 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5298 					       struct nh_notifier_info *info)
5299 {
5300 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5301 	struct netlink_ext_ack *extack = info->extack;
5302 	struct mlxsw_sp_nexthop_group_info *nhgi;
5303 	struct nh_notifier_single_info *nh_obj;
5304 	struct mlxsw_sp_nexthop_group *nh_grp;
5305 	struct mlxsw_sp_nexthop *nh;
5306 	int err;
5307 
5308 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5309 	if (!nh_grp) {
5310 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5311 		return -EINVAL;
5312 	}
5313 
5314 	nhgi = nh_grp->nhgi;
5315 
5316 	if (bucket_index >= nhgi->count) {
5317 		NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5318 		return -EINVAL;
5319 	}
5320 
5321 	nh = &nhgi->nexthops[bucket_index];
5322 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5323 
5324 	nh_obj = &info->nh_res_bucket->new_nh;
5325 	err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5326 	if (err) {
5327 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5328 		goto err_nexthop_obj_init;
5329 	}
5330 
5331 	err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5332 	if (err)
5333 		goto err_nexthop_obj_bucket_adj_update;
5334 
5335 	return 0;
5336 
5337 err_nexthop_obj_bucket_adj_update:
5338 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5339 err_nexthop_obj_init:
5340 	nh_obj = &info->nh_res_bucket->old_nh;
5341 	mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5342 	/* The old adjacency entry was not overwritten */
5343 	nh->update = 0;
5344 	nh->offloaded = 1;
5345 	return err;
5346 }
5347 
5348 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5349 				      unsigned long event, void *ptr)
5350 {
5351 	struct nh_notifier_info *info = ptr;
5352 	struct mlxsw_sp_router *router;
5353 	int err = 0;
5354 
5355 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5356 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5357 	if (err)
5358 		goto out;
5359 
5360 	mutex_lock(&router->lock);
5361 
5362 	switch (event) {
5363 	case NEXTHOP_EVENT_REPLACE:
5364 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5365 		break;
5366 	case NEXTHOP_EVENT_DEL:
5367 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5368 		break;
5369 	case NEXTHOP_EVENT_BUCKET_REPLACE:
5370 		err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5371 							  info);
5372 		break;
5373 	default:
5374 		break;
5375 	}
5376 
5377 	mutex_unlock(&router->lock);
5378 
5379 out:
5380 	return notifier_from_errno(err);
5381 }
5382 
5383 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5384 				   struct fib_info *fi)
5385 {
5386 	const struct fib_nh *nh = fib_info_nh(fi, 0);
5387 
5388 	return nh->fib_nh_scope == RT_SCOPE_LINK ||
5389 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5390 }
5391 
5392 static int
5393 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5394 				  struct mlxsw_sp_nexthop_group *nh_grp)
5395 {
5396 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5397 	struct mlxsw_sp_nexthop_group_info *nhgi;
5398 	struct mlxsw_sp_nexthop *nh;
5399 	int err, i;
5400 
5401 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5402 	if (!nhgi)
5403 		return -ENOMEM;
5404 	nh_grp->nhgi = nhgi;
5405 	nhgi->nh_grp = nh_grp;
5406 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5407 	nhgi->count = nhs;
5408 	for (i = 0; i < nhgi->count; i++) {
5409 		struct fib_nh *fib_nh;
5410 
5411 		nh = &nhgi->nexthops[i];
5412 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5413 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5414 		if (err)
5415 			goto err_nexthop4_init;
5416 	}
5417 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5418 	if (err)
5419 		goto err_group_inc;
5420 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5421 	if (err)
5422 		goto err_group_refresh;
5423 
5424 	return 0;
5425 
5426 err_group_refresh:
5427 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5428 err_group_inc:
5429 	i = nhgi->count;
5430 err_nexthop4_init:
5431 	for (i--; i >= 0; i--) {
5432 		nh = &nhgi->nexthops[i];
5433 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5434 	}
5435 	kfree(nhgi);
5436 	return err;
5437 }
5438 
5439 static void
5440 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5441 				  struct mlxsw_sp_nexthop_group *nh_grp)
5442 {
5443 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5444 	int i;
5445 
5446 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5447 	for (i = nhgi->count - 1; i >= 0; i--) {
5448 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5449 
5450 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5451 	}
5452 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5453 	WARN_ON_ONCE(nhgi->adj_index_valid);
5454 	kfree(nhgi);
5455 }
5456 
5457 static struct mlxsw_sp_nexthop_group *
5458 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5459 {
5460 	struct mlxsw_sp_nexthop_group *nh_grp;
5461 	int err;
5462 
5463 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5464 	if (!nh_grp)
5465 		return ERR_PTR(-ENOMEM);
5466 	INIT_LIST_HEAD(&nh_grp->vr_list);
5467 	err = rhashtable_init(&nh_grp->vr_ht,
5468 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5469 	if (err)
5470 		goto err_nexthop_group_vr_ht_init;
5471 	INIT_LIST_HEAD(&nh_grp->fib_list);
5472 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5473 	nh_grp->ipv4.fi = fi;
5474 	fib_info_hold(fi);
5475 
5476 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5477 	if (err)
5478 		goto err_nexthop_group_info_init;
5479 
5480 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5481 	if (err)
5482 		goto err_nexthop_group_insert;
5483 
5484 	nh_grp->can_destroy = true;
5485 
5486 	return nh_grp;
5487 
5488 err_nexthop_group_insert:
5489 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5490 err_nexthop_group_info_init:
5491 	fib_info_put(fi);
5492 	rhashtable_destroy(&nh_grp->vr_ht);
5493 err_nexthop_group_vr_ht_init:
5494 	kfree(nh_grp);
5495 	return ERR_PTR(err);
5496 }
5497 
5498 static void
5499 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5500 				struct mlxsw_sp_nexthop_group *nh_grp)
5501 {
5502 	if (!nh_grp->can_destroy)
5503 		return;
5504 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5505 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5506 	fib_info_put(nh_grp->ipv4.fi);
5507 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5508 	rhashtable_destroy(&nh_grp->vr_ht);
5509 	kfree(nh_grp);
5510 }
5511 
5512 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5513 				       struct mlxsw_sp_fib_entry *fib_entry,
5514 				       struct fib_info *fi)
5515 {
5516 	struct mlxsw_sp_nexthop_group *nh_grp;
5517 
5518 	if (fi->nh) {
5519 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5520 							   fi->nh->id);
5521 		if (WARN_ON_ONCE(!nh_grp))
5522 			return -EINVAL;
5523 		goto out;
5524 	}
5525 
5526 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5527 	if (!nh_grp) {
5528 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5529 		if (IS_ERR(nh_grp))
5530 			return PTR_ERR(nh_grp);
5531 	}
5532 out:
5533 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5534 	fib_entry->nh_group = nh_grp;
5535 	return 0;
5536 }
5537 
5538 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5539 					struct mlxsw_sp_fib_entry *fib_entry)
5540 {
5541 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5542 
5543 	list_del(&fib_entry->nexthop_group_node);
5544 	if (!list_empty(&nh_grp->fib_list))
5545 		return;
5546 
5547 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5548 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5549 		return;
5550 	}
5551 
5552 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5553 }
5554 
5555 static bool
5556 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5557 {
5558 	struct mlxsw_sp_fib4_entry *fib4_entry;
5559 
5560 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5561 				  common);
5562 	return !fib4_entry->tos;
5563 }
5564 
5565 static bool
5566 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5567 {
5568 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5569 
5570 	switch (fib_entry->fib_node->fib->proto) {
5571 	case MLXSW_SP_L3_PROTO_IPV4:
5572 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5573 			return false;
5574 		break;
5575 	case MLXSW_SP_L3_PROTO_IPV6:
5576 		break;
5577 	}
5578 
5579 	switch (fib_entry->type) {
5580 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5581 		return !!nh_group->nhgi->adj_index_valid;
5582 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5583 		return !!nh_group->nhgi->nh_rif;
5584 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5585 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5586 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5587 		return true;
5588 	default:
5589 		return false;
5590 	}
5591 }
5592 
5593 static struct mlxsw_sp_nexthop *
5594 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5595 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5596 {
5597 	int i;
5598 
5599 	for (i = 0; i < nh_grp->nhgi->count; i++) {
5600 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5601 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5602 
5603 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5604 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5605 				    &rt->fib6_nh->fib_nh_gw6))
5606 			return nh;
5607 	}
5608 
5609 	return NULL;
5610 }
5611 
5612 static void
5613 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5614 				      struct fib_entry_notifier_info *fen_info)
5615 {
5616 	u32 *p_dst = (u32 *) &fen_info->dst;
5617 	struct fib_rt_info fri;
5618 
5619 	fri.fi = fen_info->fi;
5620 	fri.tb_id = fen_info->tb_id;
5621 	fri.dst = cpu_to_be32(*p_dst);
5622 	fri.dst_len = fen_info->dst_len;
5623 	fri.tos = fen_info->tos;
5624 	fri.type = fen_info->type;
5625 	fri.offload = false;
5626 	fri.trap = false;
5627 	fri.offload_failed = true;
5628 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5629 }
5630 
5631 static void
5632 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5633 				 struct mlxsw_sp_fib_entry *fib_entry)
5634 {
5635 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5636 	int dst_len = fib_entry->fib_node->key.prefix_len;
5637 	struct mlxsw_sp_fib4_entry *fib4_entry;
5638 	struct fib_rt_info fri;
5639 	bool should_offload;
5640 
5641 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5642 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5643 				  common);
5644 	fri.fi = fib4_entry->fi;
5645 	fri.tb_id = fib4_entry->tb_id;
5646 	fri.dst = cpu_to_be32(*p_dst);
5647 	fri.dst_len = dst_len;
5648 	fri.tos = fib4_entry->tos;
5649 	fri.type = fib4_entry->type;
5650 	fri.offload = should_offload;
5651 	fri.trap = !should_offload;
5652 	fri.offload_failed = false;
5653 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5654 }
5655 
5656 static void
5657 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5658 				   struct mlxsw_sp_fib_entry *fib_entry)
5659 {
5660 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5661 	int dst_len = fib_entry->fib_node->key.prefix_len;
5662 	struct mlxsw_sp_fib4_entry *fib4_entry;
5663 	struct fib_rt_info fri;
5664 
5665 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5666 				  common);
5667 	fri.fi = fib4_entry->fi;
5668 	fri.tb_id = fib4_entry->tb_id;
5669 	fri.dst = cpu_to_be32(*p_dst);
5670 	fri.dst_len = dst_len;
5671 	fri.tos = fib4_entry->tos;
5672 	fri.type = fib4_entry->type;
5673 	fri.offload = false;
5674 	fri.trap = false;
5675 	fri.offload_failed = false;
5676 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5677 }
5678 
5679 #if IS_ENABLED(CONFIG_IPV6)
5680 static void
5681 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5682 				      struct fib6_info **rt_arr,
5683 				      unsigned int nrt6)
5684 {
5685 	int i;
5686 
5687 	/* In IPv6 a multipath route is represented using multiple routes, so
5688 	 * we need to set the flags on all of them.
5689 	 */
5690 	for (i = 0; i < nrt6; i++)
5691 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5692 				       false, false, true);
5693 }
5694 #else
5695 static void
5696 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5697 				      struct fib6_info **rt_arr,
5698 				      unsigned int nrt6)
5699 {
5700 }
5701 #endif
5702 
5703 #if IS_ENABLED(CONFIG_IPV6)
5704 static void
5705 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5706 				 struct mlxsw_sp_fib_entry *fib_entry)
5707 {
5708 	struct mlxsw_sp_fib6_entry *fib6_entry;
5709 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5710 	bool should_offload;
5711 
5712 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5713 
5714 	/* In IPv6 a multipath route is represented using multiple routes, so
5715 	 * we need to set the flags on all of them.
5716 	 */
5717 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5718 				  common);
5719 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5720 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5721 				       should_offload, !should_offload, false);
5722 }
5723 #else
5724 static void
5725 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5726 				 struct mlxsw_sp_fib_entry *fib_entry)
5727 {
5728 }
5729 #endif
5730 
5731 #if IS_ENABLED(CONFIG_IPV6)
5732 static void
5733 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5734 				   struct mlxsw_sp_fib_entry *fib_entry)
5735 {
5736 	struct mlxsw_sp_fib6_entry *fib6_entry;
5737 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5738 
5739 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5740 				  common);
5741 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5742 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5743 				       false, false, false);
5744 }
5745 #else
5746 static void
5747 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5748 				   struct mlxsw_sp_fib_entry *fib_entry)
5749 {
5750 }
5751 #endif
5752 
5753 static void
5754 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5755 				struct mlxsw_sp_fib_entry *fib_entry)
5756 {
5757 	switch (fib_entry->fib_node->fib->proto) {
5758 	case MLXSW_SP_L3_PROTO_IPV4:
5759 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5760 		break;
5761 	case MLXSW_SP_L3_PROTO_IPV6:
5762 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5763 		break;
5764 	}
5765 }
5766 
5767 static void
5768 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5769 				  struct mlxsw_sp_fib_entry *fib_entry)
5770 {
5771 	switch (fib_entry->fib_node->fib->proto) {
5772 	case MLXSW_SP_L3_PROTO_IPV4:
5773 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5774 		break;
5775 	case MLXSW_SP_L3_PROTO_IPV6:
5776 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5777 		break;
5778 	}
5779 }
5780 
5781 static void
5782 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5783 				    struct mlxsw_sp_fib_entry *fib_entry,
5784 				    enum mlxsw_sp_fib_entry_op op)
5785 {
5786 	switch (op) {
5787 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5788 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5789 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5790 		break;
5791 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5792 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5793 		break;
5794 	default:
5795 		break;
5796 	}
5797 }
5798 
5799 struct mlxsw_sp_fib_entry_op_ctx_basic {
5800 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5801 };
5802 
5803 static void
5804 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5805 					enum mlxsw_sp_l3proto proto,
5806 					enum mlxsw_sp_fib_entry_op op,
5807 					u16 virtual_router, u8 prefix_len,
5808 					unsigned char *addr,
5809 					struct mlxsw_sp_fib_entry_priv *priv)
5810 {
5811 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5812 	enum mlxsw_reg_ralxx_protocol ralxx_proto;
5813 	char *ralue_pl = op_ctx_basic->ralue_pl;
5814 	enum mlxsw_reg_ralue_op ralue_op;
5815 
5816 	ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
5817 
5818 	switch (op) {
5819 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5820 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5821 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
5822 		break;
5823 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5824 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
5825 		break;
5826 	default:
5827 		WARN_ON_ONCE(1);
5828 		return;
5829 	}
5830 
5831 	switch (proto) {
5832 	case MLXSW_SP_L3_PROTO_IPV4:
5833 		mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
5834 				      virtual_router, prefix_len, (u32 *) addr);
5835 		break;
5836 	case MLXSW_SP_L3_PROTO_IPV6:
5837 		mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
5838 				      virtual_router, prefix_len, addr);
5839 		break;
5840 	}
5841 }
5842 
5843 static void
5844 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5845 						   enum mlxsw_reg_ralue_trap_action trap_action,
5846 						   u16 trap_id, u32 adjacency_index, u16 ecmp_size)
5847 {
5848 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5849 
5850 	mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
5851 					trap_id, adjacency_index, ecmp_size);
5852 }
5853 
5854 static void
5855 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5856 						  enum mlxsw_reg_ralue_trap_action trap_action,
5857 						  u16 trap_id, u16 local_erif)
5858 {
5859 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5860 
5861 	mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
5862 				       trap_id, local_erif);
5863 }
5864 
5865 static void
5866 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
5867 {
5868 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5869 
5870 	mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
5871 }
5872 
5873 static void
5874 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5875 						      u32 tunnel_ptr)
5876 {
5877 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5878 
5879 	mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
5880 }
5881 
5882 static int
5883 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5884 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5885 					  bool *postponed_for_bulk)
5886 {
5887 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5888 
5889 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5890 			       op_ctx_basic->ralue_pl);
5891 }
5892 
5893 static bool
5894 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
5895 {
5896 	return true;
5897 }
5898 
5899 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5900 				    struct mlxsw_sp_fib_entry *fib_entry,
5901 				    enum mlxsw_sp_fib_entry_op op)
5902 {
5903 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5904 
5905 	mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
5906 	fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
5907 				    fib_entry->fib_node->key.prefix_len,
5908 				    fib_entry->fib_node->key.addr,
5909 				    fib_entry->priv);
5910 }
5911 
5912 static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5913 				     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5914 				     const struct mlxsw_sp_router_ll_ops *ll_ops)
5915 {
5916 	bool postponed_for_bulk = false;
5917 	int err;
5918 
5919 	err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
5920 	if (!postponed_for_bulk)
5921 		mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
5922 	return err;
5923 }
5924 
5925 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5926 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5927 					struct mlxsw_sp_fib_entry *fib_entry,
5928 					enum mlxsw_sp_fib_entry_op op)
5929 {
5930 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5931 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5932 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5933 	enum mlxsw_reg_ralue_trap_action trap_action;
5934 	u16 trap_id = 0;
5935 	u32 adjacency_index = 0;
5936 	u16 ecmp_size = 0;
5937 
5938 	/* In case the nexthop group adjacency index is valid, use it
5939 	 * with provided ECMP size. Otherwise, setup trap and pass
5940 	 * traffic to kernel.
5941 	 */
5942 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5943 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5944 		adjacency_index = nhgi->adj_index;
5945 		ecmp_size = nhgi->ecmp_size;
5946 	} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5947 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5948 		adjacency_index = mlxsw_sp->router->adj_trap_index;
5949 		ecmp_size = 1;
5950 	} else {
5951 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5952 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5953 	}
5954 
5955 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5956 	ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
5957 					  adjacency_index, ecmp_size);
5958 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5959 }
5960 
5961 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5962 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5963 				       struct mlxsw_sp_fib_entry *fib_entry,
5964 				       enum mlxsw_sp_fib_entry_op op)
5965 {
5966 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5967 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5968 	enum mlxsw_reg_ralue_trap_action trap_action;
5969 	u16 trap_id = 0;
5970 	u16 rif_index = 0;
5971 
5972 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5973 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5974 		rif_index = rif->rif_index;
5975 	} else {
5976 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5977 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5978 	}
5979 
5980 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5981 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
5982 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5983 }
5984 
5985 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5986 				      struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5987 				      struct mlxsw_sp_fib_entry *fib_entry,
5988 				      enum mlxsw_sp_fib_entry_op op)
5989 {
5990 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5991 
5992 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5993 	ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5994 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5995 }
5996 
5997 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5998 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5999 					   struct mlxsw_sp_fib_entry *fib_entry,
6000 					   enum mlxsw_sp_fib_entry_op op)
6001 {
6002 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6003 	enum mlxsw_reg_ralue_trap_action trap_action;
6004 
6005 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
6006 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6007 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
6008 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6009 }
6010 
6011 static int
6012 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
6013 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6014 				  struct mlxsw_sp_fib_entry *fib_entry,
6015 				  enum mlxsw_sp_fib_entry_op op)
6016 {
6017 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6018 	enum mlxsw_reg_ralue_trap_action trap_action;
6019 	u16 trap_id;
6020 
6021 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6022 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
6023 
6024 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6025 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
6026 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6027 }
6028 
6029 static int
6030 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
6031 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6032 				 struct mlxsw_sp_fib_entry *fib_entry,
6033 				 enum mlxsw_sp_fib_entry_op op)
6034 {
6035 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6036 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
6037 	const struct mlxsw_sp_ipip_ops *ipip_ops;
6038 	int err;
6039 
6040 	if (WARN_ON(!ipip_entry))
6041 		return -EINVAL;
6042 
6043 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
6044 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
6045 				     fib_entry->decap.tunnel_index);
6046 	if (err)
6047 		return err;
6048 
6049 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6050 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
6051 					     fib_entry->decap.tunnel_index);
6052 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6053 }
6054 
6055 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
6056 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6057 					   struct mlxsw_sp_fib_entry *fib_entry,
6058 					   enum mlxsw_sp_fib_entry_op op)
6059 {
6060 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6061 
6062 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6063 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
6064 					     fib_entry->decap.tunnel_index);
6065 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6066 }
6067 
6068 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6069 				   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6070 				   struct mlxsw_sp_fib_entry *fib_entry,
6071 				   enum mlxsw_sp_fib_entry_op op)
6072 {
6073 	switch (fib_entry->type) {
6074 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6075 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
6076 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6077 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
6078 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6079 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
6080 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6081 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
6082 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6083 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
6084 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6085 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
6086 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6087 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
6088 	}
6089 	return -EINVAL;
6090 }
6091 
6092 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6093 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6094 				 struct mlxsw_sp_fib_entry *fib_entry,
6095 				 enum mlxsw_sp_fib_entry_op op)
6096 {
6097 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
6098 
6099 	if (err)
6100 		return err;
6101 
6102 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6103 
6104 	return err;
6105 }
6106 
6107 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6108 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6109 				       struct mlxsw_sp_fib_entry *fib_entry,
6110 				       bool is_new)
6111 {
6112 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6113 				     is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
6114 					      MLXSW_SP_FIB_ENTRY_OP_UPDATE);
6115 }
6116 
6117 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6118 				     struct mlxsw_sp_fib_entry *fib_entry)
6119 {
6120 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6121 
6122 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6123 	return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
6124 }
6125 
6126 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6127 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6128 				  struct mlxsw_sp_fib_entry *fib_entry)
6129 {
6130 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6131 
6132 	if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
6133 		return 0;
6134 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6135 				     MLXSW_SP_FIB_ENTRY_OP_DELETE);
6136 }
6137 
6138 static int
6139 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6140 			     const struct fib_entry_notifier_info *fen_info,
6141 			     struct mlxsw_sp_fib_entry *fib_entry)
6142 {
6143 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6144 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6145 	struct mlxsw_sp_router *router = mlxsw_sp->router;
6146 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6147 	int ifindex = nhgi->nexthops[0].ifindex;
6148 	struct mlxsw_sp_ipip_entry *ipip_entry;
6149 
6150 	switch (fen_info->type) {
6151 	case RTN_LOCAL:
6152 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6153 							       MLXSW_SP_L3_PROTO_IPV4, dip);
6154 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6155 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6156 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6157 							     fib_entry,
6158 							     ipip_entry);
6159 		}
6160 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6161 						 MLXSW_SP_L3_PROTO_IPV4,
6162 						 &dip)) {
6163 			u32 tunnel_index;
6164 
6165 			tunnel_index = router->nve_decap_config.tunnel_index;
6166 			fib_entry->decap.tunnel_index = tunnel_index;
6167 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6168 			return 0;
6169 		}
6170 		fallthrough;
6171 	case RTN_BROADCAST:
6172 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6173 		return 0;
6174 	case RTN_BLACKHOLE:
6175 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6176 		return 0;
6177 	case RTN_UNREACHABLE:
6178 	case RTN_PROHIBIT:
6179 		/* Packets hitting these routes need to be trapped, but
6180 		 * can do so with a lower priority than packets directed
6181 		 * at the host, so use action type local instead of trap.
6182 		 */
6183 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6184 		return 0;
6185 	case RTN_UNICAST:
6186 		if (nhgi->gateway)
6187 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6188 		else
6189 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6190 		return 0;
6191 	default:
6192 		return -EINVAL;
6193 	}
6194 }
6195 
6196 static void
6197 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6198 			      struct mlxsw_sp_fib_entry *fib_entry)
6199 {
6200 	switch (fib_entry->type) {
6201 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6202 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6203 		break;
6204 	default:
6205 		break;
6206 	}
6207 }
6208 
6209 static void
6210 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6211 			       struct mlxsw_sp_fib4_entry *fib4_entry)
6212 {
6213 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6214 }
6215 
6216 static struct mlxsw_sp_fib4_entry *
6217 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6218 			   struct mlxsw_sp_fib_node *fib_node,
6219 			   const struct fib_entry_notifier_info *fen_info)
6220 {
6221 	struct mlxsw_sp_fib4_entry *fib4_entry;
6222 	struct mlxsw_sp_fib_entry *fib_entry;
6223 	int err;
6224 
6225 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6226 	if (!fib4_entry)
6227 		return ERR_PTR(-ENOMEM);
6228 	fib_entry = &fib4_entry->common;
6229 
6230 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6231 	if (IS_ERR(fib_entry->priv)) {
6232 		err = PTR_ERR(fib_entry->priv);
6233 		goto err_fib_entry_priv_create;
6234 	}
6235 
6236 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6237 	if (err)
6238 		goto err_nexthop4_group_get;
6239 
6240 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6241 					     fib_node->fib);
6242 	if (err)
6243 		goto err_nexthop_group_vr_link;
6244 
6245 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6246 	if (err)
6247 		goto err_fib4_entry_type_set;
6248 
6249 	fib4_entry->fi = fen_info->fi;
6250 	fib_info_hold(fib4_entry->fi);
6251 	fib4_entry->tb_id = fen_info->tb_id;
6252 	fib4_entry->type = fen_info->type;
6253 	fib4_entry->tos = fen_info->tos;
6254 
6255 	fib_entry->fib_node = fib_node;
6256 
6257 	return fib4_entry;
6258 
6259 err_fib4_entry_type_set:
6260 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6261 err_nexthop_group_vr_link:
6262 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6263 err_nexthop4_group_get:
6264 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6265 err_fib_entry_priv_create:
6266 	kfree(fib4_entry);
6267 	return ERR_PTR(err);
6268 }
6269 
6270 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6271 					struct mlxsw_sp_fib4_entry *fib4_entry)
6272 {
6273 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6274 
6275 	fib_info_put(fib4_entry->fi);
6276 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6277 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6278 					 fib_node->fib);
6279 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6280 	mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
6281 	kfree(fib4_entry);
6282 }
6283 
6284 static struct mlxsw_sp_fib4_entry *
6285 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6286 			   const struct fib_entry_notifier_info *fen_info)
6287 {
6288 	struct mlxsw_sp_fib4_entry *fib4_entry;
6289 	struct mlxsw_sp_fib_node *fib_node;
6290 	struct mlxsw_sp_fib *fib;
6291 	struct mlxsw_sp_vr *vr;
6292 
6293 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6294 	if (!vr)
6295 		return NULL;
6296 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6297 
6298 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6299 					    sizeof(fen_info->dst),
6300 					    fen_info->dst_len);
6301 	if (!fib_node)
6302 		return NULL;
6303 
6304 	fib4_entry = container_of(fib_node->fib_entry,
6305 				  struct mlxsw_sp_fib4_entry, common);
6306 	if (fib4_entry->tb_id == fen_info->tb_id &&
6307 	    fib4_entry->tos == fen_info->tos &&
6308 	    fib4_entry->type == fen_info->type &&
6309 	    fib4_entry->fi == fen_info->fi)
6310 		return fib4_entry;
6311 
6312 	return NULL;
6313 }
6314 
6315 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6316 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6317 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6318 	.key_len = sizeof(struct mlxsw_sp_fib_key),
6319 	.automatic_shrinking = true,
6320 };
6321 
6322 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6323 				    struct mlxsw_sp_fib_node *fib_node)
6324 {
6325 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6326 				      mlxsw_sp_fib_ht_params);
6327 }
6328 
6329 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6330 				     struct mlxsw_sp_fib_node *fib_node)
6331 {
6332 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6333 			       mlxsw_sp_fib_ht_params);
6334 }
6335 
6336 static struct mlxsw_sp_fib_node *
6337 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6338 			 size_t addr_len, unsigned char prefix_len)
6339 {
6340 	struct mlxsw_sp_fib_key key;
6341 
6342 	memset(&key, 0, sizeof(key));
6343 	memcpy(key.addr, addr, addr_len);
6344 	key.prefix_len = prefix_len;
6345 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6346 }
6347 
6348 static struct mlxsw_sp_fib_node *
6349 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6350 			 size_t addr_len, unsigned char prefix_len)
6351 {
6352 	struct mlxsw_sp_fib_node *fib_node;
6353 
6354 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6355 	if (!fib_node)
6356 		return NULL;
6357 
6358 	list_add(&fib_node->list, &fib->node_list);
6359 	memcpy(fib_node->key.addr, addr, addr_len);
6360 	fib_node->key.prefix_len = prefix_len;
6361 
6362 	return fib_node;
6363 }
6364 
6365 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6366 {
6367 	list_del(&fib_node->list);
6368 	kfree(fib_node);
6369 }
6370 
6371 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6372 				      struct mlxsw_sp_fib_node *fib_node)
6373 {
6374 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6375 	struct mlxsw_sp_fib *fib = fib_node->fib;
6376 	struct mlxsw_sp_lpm_tree *lpm_tree;
6377 	int err;
6378 
6379 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6380 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6381 		goto out;
6382 
6383 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6384 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6385 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6386 					 fib->proto);
6387 	if (IS_ERR(lpm_tree))
6388 		return PTR_ERR(lpm_tree);
6389 
6390 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6391 	if (err)
6392 		goto err_lpm_tree_replace;
6393 
6394 out:
6395 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6396 	return 0;
6397 
6398 err_lpm_tree_replace:
6399 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6400 	return err;
6401 }
6402 
6403 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6404 					 struct mlxsw_sp_fib_node *fib_node)
6405 {
6406 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6407 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6408 	struct mlxsw_sp_fib *fib = fib_node->fib;
6409 	int err;
6410 
6411 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6412 		return;
6413 	/* Try to construct a new LPM tree from the current prefix usage
6414 	 * minus the unused one. If we fail, continue using the old one.
6415 	 */
6416 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6417 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6418 				    fib_node->key.prefix_len);
6419 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6420 					 fib->proto);
6421 	if (IS_ERR(lpm_tree))
6422 		return;
6423 
6424 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6425 	if (err)
6426 		goto err_lpm_tree_replace;
6427 
6428 	return;
6429 
6430 err_lpm_tree_replace:
6431 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6432 }
6433 
6434 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6435 				  struct mlxsw_sp_fib_node *fib_node,
6436 				  struct mlxsw_sp_fib *fib)
6437 {
6438 	int err;
6439 
6440 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
6441 	if (err)
6442 		return err;
6443 	fib_node->fib = fib;
6444 
6445 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6446 	if (err)
6447 		goto err_fib_lpm_tree_link;
6448 
6449 	return 0;
6450 
6451 err_fib_lpm_tree_link:
6452 	fib_node->fib = NULL;
6453 	mlxsw_sp_fib_node_remove(fib, fib_node);
6454 	return err;
6455 }
6456 
6457 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6458 				   struct mlxsw_sp_fib_node *fib_node)
6459 {
6460 	struct mlxsw_sp_fib *fib = fib_node->fib;
6461 
6462 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6463 	fib_node->fib = NULL;
6464 	mlxsw_sp_fib_node_remove(fib, fib_node);
6465 }
6466 
6467 static struct mlxsw_sp_fib_node *
6468 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6469 		      size_t addr_len, unsigned char prefix_len,
6470 		      enum mlxsw_sp_l3proto proto)
6471 {
6472 	struct mlxsw_sp_fib_node *fib_node;
6473 	struct mlxsw_sp_fib *fib;
6474 	struct mlxsw_sp_vr *vr;
6475 	int err;
6476 
6477 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6478 	if (IS_ERR(vr))
6479 		return ERR_CAST(vr);
6480 	fib = mlxsw_sp_vr_fib(vr, proto);
6481 
6482 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6483 	if (fib_node)
6484 		return fib_node;
6485 
6486 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6487 	if (!fib_node) {
6488 		err = -ENOMEM;
6489 		goto err_fib_node_create;
6490 	}
6491 
6492 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6493 	if (err)
6494 		goto err_fib_node_init;
6495 
6496 	return fib_node;
6497 
6498 err_fib_node_init:
6499 	mlxsw_sp_fib_node_destroy(fib_node);
6500 err_fib_node_create:
6501 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6502 	return ERR_PTR(err);
6503 }
6504 
6505 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6506 				  struct mlxsw_sp_fib_node *fib_node)
6507 {
6508 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6509 
6510 	if (fib_node->fib_entry)
6511 		return;
6512 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6513 	mlxsw_sp_fib_node_destroy(fib_node);
6514 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6515 }
6516 
6517 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6518 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6519 					struct mlxsw_sp_fib_entry *fib_entry)
6520 {
6521 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6522 	bool is_new = !fib_node->fib_entry;
6523 	int err;
6524 
6525 	fib_node->fib_entry = fib_entry;
6526 
6527 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
6528 	if (err)
6529 		goto err_fib_entry_update;
6530 
6531 	return 0;
6532 
6533 err_fib_entry_update:
6534 	fib_node->fib_entry = NULL;
6535 	return err;
6536 }
6537 
6538 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6539 					    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6540 					    struct mlxsw_sp_fib_entry *fib_entry)
6541 {
6542 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6543 	int err;
6544 
6545 	err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
6546 	fib_node->fib_entry = NULL;
6547 	return err;
6548 }
6549 
6550 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6551 					   struct mlxsw_sp_fib_entry *fib_entry)
6552 {
6553 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6554 
6555 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6556 	__mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
6557 }
6558 
6559 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6560 {
6561 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6562 	struct mlxsw_sp_fib4_entry *fib4_replaced;
6563 
6564 	if (!fib_node->fib_entry)
6565 		return true;
6566 
6567 	fib4_replaced = container_of(fib_node->fib_entry,
6568 				     struct mlxsw_sp_fib4_entry, common);
6569 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6570 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
6571 		return false;
6572 
6573 	return true;
6574 }
6575 
6576 static int
6577 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6578 			     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6579 			     const struct fib_entry_notifier_info *fen_info)
6580 {
6581 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6582 	struct mlxsw_sp_fib_entry *replaced;
6583 	struct mlxsw_sp_fib_node *fib_node;
6584 	int err;
6585 
6586 	if (fen_info->fi->nh &&
6587 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6588 		return 0;
6589 
6590 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6591 					 &fen_info->dst, sizeof(fen_info->dst),
6592 					 fen_info->dst_len,
6593 					 MLXSW_SP_L3_PROTO_IPV4);
6594 	if (IS_ERR(fib_node)) {
6595 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6596 		return PTR_ERR(fib_node);
6597 	}
6598 
6599 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6600 	if (IS_ERR(fib4_entry)) {
6601 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6602 		err = PTR_ERR(fib4_entry);
6603 		goto err_fib4_entry_create;
6604 	}
6605 
6606 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6607 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6608 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6609 		return 0;
6610 	}
6611 
6612 	replaced = fib_node->fib_entry;
6613 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
6614 	if (err) {
6615 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6616 		goto err_fib_node_entry_link;
6617 	}
6618 
6619 	/* Nothing to replace */
6620 	if (!replaced)
6621 		return 0;
6622 
6623 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6624 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6625 				     common);
6626 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6627 
6628 	return 0;
6629 
6630 err_fib_node_entry_link:
6631 	fib_node->fib_entry = replaced;
6632 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6633 err_fib4_entry_create:
6634 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6635 	return err;
6636 }
6637 
6638 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6639 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6640 				    struct fib_entry_notifier_info *fen_info)
6641 {
6642 	struct mlxsw_sp_fib4_entry *fib4_entry;
6643 	struct mlxsw_sp_fib_node *fib_node;
6644 	int err;
6645 
6646 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6647 	if (!fib4_entry)
6648 		return 0;
6649 	fib_node = fib4_entry->common.fib_node;
6650 
6651 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
6652 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6653 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6654 	return err;
6655 }
6656 
6657 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6658 {
6659 	/* Multicast routes aren't supported, so ignore them. Neighbour
6660 	 * Discovery packets are specifically trapped.
6661 	 */
6662 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6663 		return true;
6664 
6665 	/* Cloned routes are irrelevant in the forwarding path. */
6666 	if (rt->fib6_flags & RTF_CACHE)
6667 		return true;
6668 
6669 	return false;
6670 }
6671 
6672 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6673 {
6674 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6675 
6676 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6677 	if (!mlxsw_sp_rt6)
6678 		return ERR_PTR(-ENOMEM);
6679 
6680 	/* In case of route replace, replaced route is deleted with
6681 	 * no notification. Take reference to prevent accessing freed
6682 	 * memory.
6683 	 */
6684 	mlxsw_sp_rt6->rt = rt;
6685 	fib6_info_hold(rt);
6686 
6687 	return mlxsw_sp_rt6;
6688 }
6689 
6690 #if IS_ENABLED(CONFIG_IPV6)
6691 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6692 {
6693 	fib6_info_release(rt);
6694 }
6695 #else
6696 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6697 {
6698 }
6699 #endif
6700 
6701 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6702 {
6703 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6704 
6705 	if (!mlxsw_sp_rt6->rt->nh)
6706 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6707 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6708 	kfree(mlxsw_sp_rt6);
6709 }
6710 
6711 static struct fib6_info *
6712 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6713 {
6714 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6715 				list)->rt;
6716 }
6717 
6718 static struct mlxsw_sp_rt6 *
6719 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6720 			    const struct fib6_info *rt)
6721 {
6722 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6723 
6724 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6725 		if (mlxsw_sp_rt6->rt == rt)
6726 			return mlxsw_sp_rt6;
6727 	}
6728 
6729 	return NULL;
6730 }
6731 
6732 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6733 					const struct fib6_info *rt,
6734 					enum mlxsw_sp_ipip_type *ret)
6735 {
6736 	return rt->fib6_nh->fib_nh_dev &&
6737 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6738 }
6739 
6740 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6741 				  struct mlxsw_sp_nexthop_group *nh_grp,
6742 				  struct mlxsw_sp_nexthop *nh,
6743 				  const struct fib6_info *rt)
6744 {
6745 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6746 
6747 	nh->nhgi = nh_grp->nhgi;
6748 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6749 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6750 #if IS_ENABLED(CONFIG_IPV6)
6751 	nh->neigh_tbl = &nd_tbl;
6752 #endif
6753 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6754 
6755 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6756 
6757 	if (!dev)
6758 		return 0;
6759 	nh->ifindex = dev->ifindex;
6760 
6761 	return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6762 }
6763 
6764 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6765 				   struct mlxsw_sp_nexthop *nh)
6766 {
6767 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6768 	list_del(&nh->router_list_node);
6769 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6770 }
6771 
6772 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6773 				    const struct fib6_info *rt)
6774 {
6775 	return rt->fib6_nh->fib_nh_gw_family ||
6776 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6777 }
6778 
6779 static int
6780 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6781 				  struct mlxsw_sp_nexthop_group *nh_grp,
6782 				  struct mlxsw_sp_fib6_entry *fib6_entry)
6783 {
6784 	struct mlxsw_sp_nexthop_group_info *nhgi;
6785 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6786 	struct mlxsw_sp_nexthop *nh;
6787 	int err, i;
6788 
6789 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6790 		       GFP_KERNEL);
6791 	if (!nhgi)
6792 		return -ENOMEM;
6793 	nh_grp->nhgi = nhgi;
6794 	nhgi->nh_grp = nh_grp;
6795 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6796 					struct mlxsw_sp_rt6, list);
6797 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6798 	nhgi->count = fib6_entry->nrt6;
6799 	for (i = 0; i < nhgi->count; i++) {
6800 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
6801 
6802 		nh = &nhgi->nexthops[i];
6803 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6804 		if (err)
6805 			goto err_nexthop6_init;
6806 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6807 	}
6808 	nh_grp->nhgi = nhgi;
6809 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6810 	if (err)
6811 		goto err_group_inc;
6812 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6813 	if (err)
6814 		goto err_group_refresh;
6815 
6816 	return 0;
6817 
6818 err_group_refresh:
6819 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6820 err_group_inc:
6821 	i = nhgi->count;
6822 err_nexthop6_init:
6823 	for (i--; i >= 0; i--) {
6824 		nh = &nhgi->nexthops[i];
6825 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6826 	}
6827 	kfree(nhgi);
6828 	return err;
6829 }
6830 
6831 static void
6832 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6833 				  struct mlxsw_sp_nexthop_group *nh_grp)
6834 {
6835 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6836 	int i;
6837 
6838 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6839 	for (i = nhgi->count - 1; i >= 0; i--) {
6840 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6841 
6842 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6843 	}
6844 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6845 	WARN_ON_ONCE(nhgi->adj_index_valid);
6846 	kfree(nhgi);
6847 }
6848 
6849 static struct mlxsw_sp_nexthop_group *
6850 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6851 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6852 {
6853 	struct mlxsw_sp_nexthop_group *nh_grp;
6854 	int err;
6855 
6856 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6857 	if (!nh_grp)
6858 		return ERR_PTR(-ENOMEM);
6859 	INIT_LIST_HEAD(&nh_grp->vr_list);
6860 	err = rhashtable_init(&nh_grp->vr_ht,
6861 			      &mlxsw_sp_nexthop_group_vr_ht_params);
6862 	if (err)
6863 		goto err_nexthop_group_vr_ht_init;
6864 	INIT_LIST_HEAD(&nh_grp->fib_list);
6865 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6866 
6867 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6868 	if (err)
6869 		goto err_nexthop_group_info_init;
6870 
6871 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6872 	if (err)
6873 		goto err_nexthop_group_insert;
6874 
6875 	nh_grp->can_destroy = true;
6876 
6877 	return nh_grp;
6878 
6879 err_nexthop_group_insert:
6880 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6881 err_nexthop_group_info_init:
6882 	rhashtable_destroy(&nh_grp->vr_ht);
6883 err_nexthop_group_vr_ht_init:
6884 	kfree(nh_grp);
6885 	return ERR_PTR(err);
6886 }
6887 
6888 static void
6889 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6890 				struct mlxsw_sp_nexthop_group *nh_grp)
6891 {
6892 	if (!nh_grp->can_destroy)
6893 		return;
6894 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6895 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6896 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6897 	rhashtable_destroy(&nh_grp->vr_ht);
6898 	kfree(nh_grp);
6899 }
6900 
6901 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6902 				       struct mlxsw_sp_fib6_entry *fib6_entry)
6903 {
6904 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6905 	struct mlxsw_sp_nexthop_group *nh_grp;
6906 
6907 	if (rt->nh) {
6908 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6909 							   rt->nh->id);
6910 		if (WARN_ON_ONCE(!nh_grp))
6911 			return -EINVAL;
6912 		goto out;
6913 	}
6914 
6915 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6916 	if (!nh_grp) {
6917 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6918 		if (IS_ERR(nh_grp))
6919 			return PTR_ERR(nh_grp);
6920 	}
6921 
6922 	/* The route and the nexthop are described by the same struct, so we
6923 	 * need to the update the nexthop offload indication for the new route.
6924 	 */
6925 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6926 
6927 out:
6928 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6929 		      &nh_grp->fib_list);
6930 	fib6_entry->common.nh_group = nh_grp;
6931 
6932 	return 0;
6933 }
6934 
6935 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6936 					struct mlxsw_sp_fib_entry *fib_entry)
6937 {
6938 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6939 
6940 	list_del(&fib_entry->nexthop_group_node);
6941 	if (!list_empty(&nh_grp->fib_list))
6942 		return;
6943 
6944 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6945 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6946 		return;
6947 	}
6948 
6949 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6950 }
6951 
6952 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6953 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6954 					  struct mlxsw_sp_fib6_entry *fib6_entry)
6955 {
6956 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6957 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6958 	int err;
6959 
6960 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6961 	fib6_entry->common.nh_group = NULL;
6962 	list_del(&fib6_entry->common.nexthop_group_node);
6963 
6964 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6965 	if (err)
6966 		goto err_nexthop6_group_get;
6967 
6968 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6969 					     fib_node->fib);
6970 	if (err)
6971 		goto err_nexthop_group_vr_link;
6972 
6973 	/* In case this entry is offloaded, then the adjacency index
6974 	 * currently associated with it in the device's table is that
6975 	 * of the old group. Start using the new one instead.
6976 	 */
6977 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
6978 					  &fib6_entry->common, false);
6979 	if (err)
6980 		goto err_fib_entry_update;
6981 
6982 	if (list_empty(&old_nh_grp->fib_list))
6983 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6984 
6985 	return 0;
6986 
6987 err_fib_entry_update:
6988 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6989 					 fib_node->fib);
6990 err_nexthop_group_vr_link:
6991 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6992 err_nexthop6_group_get:
6993 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6994 		      &old_nh_grp->fib_list);
6995 	fib6_entry->common.nh_group = old_nh_grp;
6996 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6997 	return err;
6998 }
6999 
7000 static int
7001 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
7002 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7003 				struct mlxsw_sp_fib6_entry *fib6_entry,
7004 				struct fib6_info **rt_arr, unsigned int nrt6)
7005 {
7006 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7007 	int err, i;
7008 
7009 	for (i = 0; i < nrt6; i++) {
7010 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7011 		if (IS_ERR(mlxsw_sp_rt6)) {
7012 			err = PTR_ERR(mlxsw_sp_rt6);
7013 			goto err_rt6_create;
7014 		}
7015 
7016 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7017 		fib6_entry->nrt6++;
7018 	}
7019 
7020 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
7021 	if (err)
7022 		goto err_nexthop6_group_update;
7023 
7024 	return 0;
7025 
7026 err_nexthop6_group_update:
7027 	i = nrt6;
7028 err_rt6_create:
7029 	for (i--; i >= 0; i--) {
7030 		fib6_entry->nrt6--;
7031 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7032 					       struct mlxsw_sp_rt6, list);
7033 		list_del(&mlxsw_sp_rt6->list);
7034 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7035 	}
7036 	return err;
7037 }
7038 
7039 static void
7040 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
7041 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7042 				struct mlxsw_sp_fib6_entry *fib6_entry,
7043 				struct fib6_info **rt_arr, unsigned int nrt6)
7044 {
7045 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7046 	int i;
7047 
7048 	for (i = 0; i < nrt6; i++) {
7049 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
7050 							   rt_arr[i]);
7051 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
7052 			continue;
7053 
7054 		fib6_entry->nrt6--;
7055 		list_del(&mlxsw_sp_rt6->list);
7056 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7057 	}
7058 
7059 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
7060 }
7061 
7062 static int
7063 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7064 				   struct mlxsw_sp_fib_entry *fib_entry,
7065 				   const struct fib6_info *rt)
7066 {
7067 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7068 	union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7069 	u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7070 	struct mlxsw_sp_router *router = mlxsw_sp->router;
7071 	int ifindex = nhgi->nexthops[0].ifindex;
7072 	struct mlxsw_sp_ipip_entry *ipip_entry;
7073 
7074 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7075 	ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7076 						       MLXSW_SP_L3_PROTO_IPV6,
7077 						       dip);
7078 
7079 	if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7080 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7081 		return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7082 						     ipip_entry);
7083 	}
7084 	if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7085 					 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7086 		u32 tunnel_index;
7087 
7088 		tunnel_index = router->nve_decap_config.tunnel_index;
7089 		fib_entry->decap.tunnel_index = tunnel_index;
7090 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7091 	}
7092 
7093 	return 0;
7094 }
7095 
7096 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7097 					struct mlxsw_sp_fib_entry *fib_entry,
7098 					const struct fib6_info *rt)
7099 {
7100 	if (rt->fib6_flags & RTF_LOCAL)
7101 		return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7102 							  rt);
7103 	if (rt->fib6_flags & RTF_ANYCAST)
7104 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7105 	else if (rt->fib6_type == RTN_BLACKHOLE)
7106 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7107 	else if (rt->fib6_flags & RTF_REJECT)
7108 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7109 	else if (fib_entry->nh_group->nhgi->gateway)
7110 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7111 	else
7112 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7113 
7114 	return 0;
7115 }
7116 
7117 static void
7118 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7119 {
7120 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7121 
7122 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7123 				 list) {
7124 		fib6_entry->nrt6--;
7125 		list_del(&mlxsw_sp_rt6->list);
7126 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7127 	}
7128 }
7129 
7130 static struct mlxsw_sp_fib6_entry *
7131 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7132 			   struct mlxsw_sp_fib_node *fib_node,
7133 			   struct fib6_info **rt_arr, unsigned int nrt6)
7134 {
7135 	struct mlxsw_sp_fib6_entry *fib6_entry;
7136 	struct mlxsw_sp_fib_entry *fib_entry;
7137 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7138 	int err, i;
7139 
7140 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7141 	if (!fib6_entry)
7142 		return ERR_PTR(-ENOMEM);
7143 	fib_entry = &fib6_entry->common;
7144 
7145 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
7146 	if (IS_ERR(fib_entry->priv)) {
7147 		err = PTR_ERR(fib_entry->priv);
7148 		goto err_fib_entry_priv_create;
7149 	}
7150 
7151 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
7152 
7153 	for (i = 0; i < nrt6; i++) {
7154 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7155 		if (IS_ERR(mlxsw_sp_rt6)) {
7156 			err = PTR_ERR(mlxsw_sp_rt6);
7157 			goto err_rt6_create;
7158 		}
7159 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7160 		fib6_entry->nrt6++;
7161 	}
7162 
7163 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7164 	if (err)
7165 		goto err_nexthop6_group_get;
7166 
7167 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7168 					     fib_node->fib);
7169 	if (err)
7170 		goto err_nexthop_group_vr_link;
7171 
7172 	err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7173 	if (err)
7174 		goto err_fib6_entry_type_set;
7175 
7176 	fib_entry->fib_node = fib_node;
7177 
7178 	return fib6_entry;
7179 
7180 err_fib6_entry_type_set:
7181 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7182 err_nexthop_group_vr_link:
7183 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7184 err_nexthop6_group_get:
7185 	i = nrt6;
7186 err_rt6_create:
7187 	for (i--; i >= 0; i--) {
7188 		fib6_entry->nrt6--;
7189 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7190 					       struct mlxsw_sp_rt6, list);
7191 		list_del(&mlxsw_sp_rt6->list);
7192 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7193 	}
7194 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
7195 err_fib_entry_priv_create:
7196 	kfree(fib6_entry);
7197 	return ERR_PTR(err);
7198 }
7199 
7200 static void
7201 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7202 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7203 {
7204 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7205 }
7206 
7207 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7208 					struct mlxsw_sp_fib6_entry *fib6_entry)
7209 {
7210 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7211 
7212 	mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7213 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7214 					 fib_node->fib);
7215 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7216 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7217 	WARN_ON(fib6_entry->nrt6);
7218 	mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
7219 	kfree(fib6_entry);
7220 }
7221 
7222 static struct mlxsw_sp_fib6_entry *
7223 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7224 			   const struct fib6_info *rt)
7225 {
7226 	struct mlxsw_sp_fib6_entry *fib6_entry;
7227 	struct mlxsw_sp_fib_node *fib_node;
7228 	struct mlxsw_sp_fib *fib;
7229 	struct fib6_info *cmp_rt;
7230 	struct mlxsw_sp_vr *vr;
7231 
7232 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7233 	if (!vr)
7234 		return NULL;
7235 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7236 
7237 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7238 					    sizeof(rt->fib6_dst.addr),
7239 					    rt->fib6_dst.plen);
7240 	if (!fib_node)
7241 		return NULL;
7242 
7243 	fib6_entry = container_of(fib_node->fib_entry,
7244 				  struct mlxsw_sp_fib6_entry, common);
7245 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7246 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7247 	    rt->fib6_metric == cmp_rt->fib6_metric &&
7248 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7249 		return fib6_entry;
7250 
7251 	return NULL;
7252 }
7253 
7254 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7255 {
7256 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7257 	struct mlxsw_sp_fib6_entry *fib6_replaced;
7258 	struct fib6_info *rt, *rt_replaced;
7259 
7260 	if (!fib_node->fib_entry)
7261 		return true;
7262 
7263 	fib6_replaced = container_of(fib_node->fib_entry,
7264 				     struct mlxsw_sp_fib6_entry,
7265 				     common);
7266 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7267 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7268 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7269 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7270 		return false;
7271 
7272 	return true;
7273 }
7274 
7275 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7276 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7277 					struct fib6_info **rt_arr, unsigned int nrt6)
7278 {
7279 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7280 	struct mlxsw_sp_fib_entry *replaced;
7281 	struct mlxsw_sp_fib_node *fib_node;
7282 	struct fib6_info *rt = rt_arr[0];
7283 	int err;
7284 
7285 	if (rt->fib6_src.plen)
7286 		return -EINVAL;
7287 
7288 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7289 		return 0;
7290 
7291 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7292 		return 0;
7293 
7294 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7295 					 &rt->fib6_dst.addr,
7296 					 sizeof(rt->fib6_dst.addr),
7297 					 rt->fib6_dst.plen,
7298 					 MLXSW_SP_L3_PROTO_IPV6);
7299 	if (IS_ERR(fib_node))
7300 		return PTR_ERR(fib_node);
7301 
7302 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7303 						nrt6);
7304 	if (IS_ERR(fib6_entry)) {
7305 		err = PTR_ERR(fib6_entry);
7306 		goto err_fib6_entry_create;
7307 	}
7308 
7309 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7310 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7311 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7312 		return 0;
7313 	}
7314 
7315 	replaced = fib_node->fib_entry;
7316 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
7317 	if (err)
7318 		goto err_fib_node_entry_link;
7319 
7320 	/* Nothing to replace */
7321 	if (!replaced)
7322 		return 0;
7323 
7324 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7325 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7326 				     common);
7327 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7328 
7329 	return 0;
7330 
7331 err_fib_node_entry_link:
7332 	fib_node->fib_entry = replaced;
7333 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7334 err_fib6_entry_create:
7335 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7336 	return err;
7337 }
7338 
7339 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7340 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7341 				       struct fib6_info **rt_arr, unsigned int nrt6)
7342 {
7343 	struct mlxsw_sp_fib6_entry *fib6_entry;
7344 	struct mlxsw_sp_fib_node *fib_node;
7345 	struct fib6_info *rt = rt_arr[0];
7346 	int err;
7347 
7348 	if (rt->fib6_src.plen)
7349 		return -EINVAL;
7350 
7351 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7352 		return 0;
7353 
7354 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7355 					 &rt->fib6_dst.addr,
7356 					 sizeof(rt->fib6_dst.addr),
7357 					 rt->fib6_dst.plen,
7358 					 MLXSW_SP_L3_PROTO_IPV6);
7359 	if (IS_ERR(fib_node))
7360 		return PTR_ERR(fib_node);
7361 
7362 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7363 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7364 		return -EINVAL;
7365 	}
7366 
7367 	fib6_entry = container_of(fib_node->fib_entry,
7368 				  struct mlxsw_sp_fib6_entry, common);
7369 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7370 	if (err)
7371 		goto err_fib6_entry_nexthop_add;
7372 
7373 	return 0;
7374 
7375 err_fib6_entry_nexthop_add:
7376 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7377 	return err;
7378 }
7379 
7380 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7381 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7382 				    struct fib6_info **rt_arr, unsigned int nrt6)
7383 {
7384 	struct mlxsw_sp_fib6_entry *fib6_entry;
7385 	struct mlxsw_sp_fib_node *fib_node;
7386 	struct fib6_info *rt = rt_arr[0];
7387 	int err;
7388 
7389 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7390 		return 0;
7391 
7392 	/* Multipath routes are first added to the FIB trie and only then
7393 	 * notified. If we vetoed the addition, we will get a delete
7394 	 * notification for a route we do not have. Therefore, do not warn if
7395 	 * route was not found.
7396 	 */
7397 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7398 	if (!fib6_entry)
7399 		return 0;
7400 
7401 	/* If not all the nexthops are deleted, then only reduce the nexthop
7402 	 * group.
7403 	 */
7404 	if (nrt6 != fib6_entry->nrt6) {
7405 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7406 		return 0;
7407 	}
7408 
7409 	fib_node = fib6_entry->common.fib_node;
7410 
7411 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
7412 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7413 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7414 	return err;
7415 }
7416 
7417 static struct mlxsw_sp_mr_table *
7418 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7419 {
7420 	if (family == RTNL_FAMILY_IPMR)
7421 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7422 	else
7423 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7424 }
7425 
7426 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7427 				     struct mfc_entry_notifier_info *men_info,
7428 				     bool replace)
7429 {
7430 	struct mlxsw_sp_mr_table *mrt;
7431 	struct mlxsw_sp_vr *vr;
7432 
7433 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7434 	if (IS_ERR(vr))
7435 		return PTR_ERR(vr);
7436 
7437 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7438 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7439 }
7440 
7441 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7442 				      struct mfc_entry_notifier_info *men_info)
7443 {
7444 	struct mlxsw_sp_mr_table *mrt;
7445 	struct mlxsw_sp_vr *vr;
7446 
7447 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7448 	if (WARN_ON(!vr))
7449 		return;
7450 
7451 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7452 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7453 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7454 }
7455 
7456 static int
7457 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7458 			      struct vif_entry_notifier_info *ven_info)
7459 {
7460 	struct mlxsw_sp_mr_table *mrt;
7461 	struct mlxsw_sp_rif *rif;
7462 	struct mlxsw_sp_vr *vr;
7463 
7464 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7465 	if (IS_ERR(vr))
7466 		return PTR_ERR(vr);
7467 
7468 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7469 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7470 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7471 				   ven_info->vif_index,
7472 				   ven_info->vif_flags, rif);
7473 }
7474 
7475 static void
7476 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7477 			      struct vif_entry_notifier_info *ven_info)
7478 {
7479 	struct mlxsw_sp_mr_table *mrt;
7480 	struct mlxsw_sp_vr *vr;
7481 
7482 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7483 	if (WARN_ON(!vr))
7484 		return;
7485 
7486 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7487 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7488 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7489 }
7490 
7491 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7492 				     struct mlxsw_sp_fib_node *fib_node)
7493 {
7494 	struct mlxsw_sp_fib4_entry *fib4_entry;
7495 
7496 	fib4_entry = container_of(fib_node->fib_entry,
7497 				  struct mlxsw_sp_fib4_entry, common);
7498 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7499 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7500 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7501 }
7502 
7503 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7504 				     struct mlxsw_sp_fib_node *fib_node)
7505 {
7506 	struct mlxsw_sp_fib6_entry *fib6_entry;
7507 
7508 	fib6_entry = container_of(fib_node->fib_entry,
7509 				  struct mlxsw_sp_fib6_entry, common);
7510 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7511 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7512 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7513 }
7514 
7515 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7516 				    struct mlxsw_sp_fib_node *fib_node)
7517 {
7518 	switch (fib_node->fib->proto) {
7519 	case MLXSW_SP_L3_PROTO_IPV4:
7520 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7521 		break;
7522 	case MLXSW_SP_L3_PROTO_IPV6:
7523 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7524 		break;
7525 	}
7526 }
7527 
7528 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7529 				  struct mlxsw_sp_vr *vr,
7530 				  enum mlxsw_sp_l3proto proto)
7531 {
7532 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7533 	struct mlxsw_sp_fib_node *fib_node, *tmp;
7534 
7535 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7536 		bool do_break = &tmp->list == &fib->node_list;
7537 
7538 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7539 		if (do_break)
7540 			break;
7541 	}
7542 }
7543 
7544 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7545 {
7546 	int i, j;
7547 
7548 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7549 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7550 
7551 		if (!mlxsw_sp_vr_is_used(vr))
7552 			continue;
7553 
7554 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7555 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7556 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7557 
7558 		/* If virtual router was only used for IPv4, then it's no
7559 		 * longer used.
7560 		 */
7561 		if (!mlxsw_sp_vr_is_used(vr))
7562 			continue;
7563 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7564 	}
7565 }
7566 
7567 struct mlxsw_sp_fib6_event {
7568 	struct fib6_info **rt_arr;
7569 	unsigned int nrt6;
7570 };
7571 
7572 struct mlxsw_sp_fib_event {
7573 	struct list_head list; /* node in fib queue */
7574 	union {
7575 		struct mlxsw_sp_fib6_event fib6_event;
7576 		struct fib_entry_notifier_info fen_info;
7577 		struct fib_rule_notifier_info fr_info;
7578 		struct fib_nh_notifier_info fnh_info;
7579 		struct mfc_entry_notifier_info men_info;
7580 		struct vif_entry_notifier_info ven_info;
7581 	};
7582 	struct mlxsw_sp *mlxsw_sp;
7583 	unsigned long event;
7584 	int family;
7585 };
7586 
7587 static int
7588 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
7589 				struct fib6_entry_notifier_info *fen6_info)
7590 {
7591 	struct fib6_info *rt = fen6_info->rt;
7592 	struct fib6_info **rt_arr;
7593 	struct fib6_info *iter;
7594 	unsigned int nrt6;
7595 	int i = 0;
7596 
7597 	nrt6 = fen6_info->nsiblings + 1;
7598 
7599 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7600 	if (!rt_arr)
7601 		return -ENOMEM;
7602 
7603 	fib6_event->rt_arr = rt_arr;
7604 	fib6_event->nrt6 = nrt6;
7605 
7606 	rt_arr[0] = rt;
7607 	fib6_info_hold(rt);
7608 
7609 	if (!fen6_info->nsiblings)
7610 		return 0;
7611 
7612 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7613 		if (i == fen6_info->nsiblings)
7614 			break;
7615 
7616 		rt_arr[i + 1] = iter;
7617 		fib6_info_hold(iter);
7618 		i++;
7619 	}
7620 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7621 
7622 	return 0;
7623 }
7624 
7625 static void
7626 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
7627 {
7628 	int i;
7629 
7630 	for (i = 0; i < fib6_event->nrt6; i++)
7631 		mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
7632 	kfree(fib6_event->rt_arr);
7633 }
7634 
7635 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
7636 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7637 					       struct mlxsw_sp_fib_event *fib_event)
7638 {
7639 	int err;
7640 
7641 	mlxsw_sp_span_respin(mlxsw_sp);
7642 
7643 	switch (fib_event->event) {
7644 	case FIB_EVENT_ENTRY_REPLACE:
7645 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
7646 		if (err) {
7647 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7648 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7649 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7650 							      &fib_event->fen_info);
7651 		}
7652 		fib_info_put(fib_event->fen_info.fi);
7653 		break;
7654 	case FIB_EVENT_ENTRY_DEL:
7655 		err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
7656 		if (err)
7657 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7658 		fib_info_put(fib_event->fen_info.fi);
7659 		break;
7660 	case FIB_EVENT_NH_ADD:
7661 	case FIB_EVENT_NH_DEL:
7662 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
7663 		fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
7664 		break;
7665 	}
7666 }
7667 
7668 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
7669 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7670 					       struct mlxsw_sp_fib_event *fib_event)
7671 {
7672 	struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
7673 	int err;
7674 
7675 	mlxsw_sp_span_respin(mlxsw_sp);
7676 
7677 	switch (fib_event->event) {
7678 	case FIB_EVENT_ENTRY_REPLACE:
7679 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7680 						   fib_event->fib6_event.nrt6);
7681 		if (err) {
7682 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7683 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7684 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7685 							      fib6_event->rt_arr,
7686 							      fib6_event->nrt6);
7687 		}
7688 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7689 		break;
7690 	case FIB_EVENT_ENTRY_APPEND:
7691 		err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7692 						  fib_event->fib6_event.nrt6);
7693 		if (err) {
7694 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7695 			dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7696 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7697 							      fib6_event->rt_arr,
7698 							      fib6_event->nrt6);
7699 		}
7700 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7701 		break;
7702 	case FIB_EVENT_ENTRY_DEL:
7703 		err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7704 					       fib_event->fib6_event.nrt6);
7705 		if (err)
7706 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7707 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7708 		break;
7709 	}
7710 }
7711 
7712 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
7713 						struct mlxsw_sp_fib_event *fib_event)
7714 {
7715 	bool replace;
7716 	int err;
7717 
7718 	rtnl_lock();
7719 	mutex_lock(&mlxsw_sp->router->lock);
7720 	switch (fib_event->event) {
7721 	case FIB_EVENT_ENTRY_REPLACE:
7722 	case FIB_EVENT_ENTRY_ADD:
7723 		replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
7724 
7725 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
7726 		if (err)
7727 			dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7728 		mr_cache_put(fib_event->men_info.mfc);
7729 		break;
7730 	case FIB_EVENT_ENTRY_DEL:
7731 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
7732 		mr_cache_put(fib_event->men_info.mfc);
7733 		break;
7734 	case FIB_EVENT_VIF_ADD:
7735 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7736 						    &fib_event->ven_info);
7737 		if (err)
7738 			dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7739 		dev_put(fib_event->ven_info.dev);
7740 		break;
7741 	case FIB_EVENT_VIF_DEL:
7742 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
7743 		dev_put(fib_event->ven_info.dev);
7744 		break;
7745 	}
7746 	mutex_unlock(&mlxsw_sp->router->lock);
7747 	rtnl_unlock();
7748 }
7749 
7750 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
7751 {
7752 	struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
7753 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
7754 	struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
7755 	struct mlxsw_sp_fib_event *next_fib_event;
7756 	struct mlxsw_sp_fib_event *fib_event;
7757 	int last_family = AF_UNSPEC;
7758 	LIST_HEAD(fib_event_queue);
7759 
7760 	spin_lock_bh(&router->fib_event_queue_lock);
7761 	list_splice_init(&router->fib_event_queue, &fib_event_queue);
7762 	spin_unlock_bh(&router->fib_event_queue_lock);
7763 
7764 	/* Router lock is held here to make sure per-instance
7765 	 * operation context is not used in between FIB4/6 events
7766 	 * processing.
7767 	 */
7768 	mutex_lock(&router->lock);
7769 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7770 	list_for_each_entry_safe(fib_event, next_fib_event,
7771 				 &fib_event_queue, list) {
7772 		/* Check if the next entry in the queue exists and it is
7773 		 * of the same type (family and event) as the currect one.
7774 		 * In that case it is permitted to do the bulking
7775 		 * of multiple FIB entries to a single register write.
7776 		 */
7777 		op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
7778 				  fib_event->family == next_fib_event->family &&
7779 				  fib_event->event == next_fib_event->event;
7780 		op_ctx->event = fib_event->event;
7781 
7782 		/* In case family of this and the previous entry are different, context
7783 		 * reinitialization is going to be needed now, indicate that.
7784 		 * Note that since last_family is initialized to AF_UNSPEC, this is always
7785 		 * going to happen for the first entry processed in the work.
7786 		 */
7787 		if (fib_event->family != last_family)
7788 			op_ctx->initialized = false;
7789 
7790 		switch (fib_event->family) {
7791 		case AF_INET:
7792 			mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
7793 							   fib_event);
7794 			break;
7795 		case AF_INET6:
7796 			mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
7797 							   fib_event);
7798 			break;
7799 		case RTNL_FAMILY_IP6MR:
7800 		case RTNL_FAMILY_IPMR:
7801 			/* Unlock here as inside FIBMR the lock is taken again
7802 			 * under RTNL. The per-instance operation context
7803 			 * is not used by FIBMR.
7804 			 */
7805 			mutex_unlock(&router->lock);
7806 			mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
7807 							    fib_event);
7808 			mutex_lock(&router->lock);
7809 			break;
7810 		default:
7811 			WARN_ON_ONCE(1);
7812 		}
7813 		last_family = fib_event->family;
7814 		kfree(fib_event);
7815 		cond_resched();
7816 	}
7817 	WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
7818 	mutex_unlock(&router->lock);
7819 }
7820 
7821 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
7822 				       struct fib_notifier_info *info)
7823 {
7824 	struct fib_entry_notifier_info *fen_info;
7825 	struct fib_nh_notifier_info *fnh_info;
7826 
7827 	switch (fib_event->event) {
7828 	case FIB_EVENT_ENTRY_REPLACE:
7829 	case FIB_EVENT_ENTRY_DEL:
7830 		fen_info = container_of(info, struct fib_entry_notifier_info,
7831 					info);
7832 		fib_event->fen_info = *fen_info;
7833 		/* Take reference on fib_info to prevent it from being
7834 		 * freed while event is queued. Release it afterwards.
7835 		 */
7836 		fib_info_hold(fib_event->fen_info.fi);
7837 		break;
7838 	case FIB_EVENT_NH_ADD:
7839 	case FIB_EVENT_NH_DEL:
7840 		fnh_info = container_of(info, struct fib_nh_notifier_info,
7841 					info);
7842 		fib_event->fnh_info = *fnh_info;
7843 		fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
7844 		break;
7845 	}
7846 }
7847 
7848 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
7849 				      struct fib_notifier_info *info)
7850 {
7851 	struct fib6_entry_notifier_info *fen6_info;
7852 	int err;
7853 
7854 	switch (fib_event->event) {
7855 	case FIB_EVENT_ENTRY_REPLACE:
7856 	case FIB_EVENT_ENTRY_APPEND:
7857 	case FIB_EVENT_ENTRY_DEL:
7858 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
7859 					 info);
7860 		err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
7861 						      fen6_info);
7862 		if (err)
7863 			return err;
7864 		break;
7865 	}
7866 
7867 	return 0;
7868 }
7869 
7870 static void
7871 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
7872 			    struct fib_notifier_info *info)
7873 {
7874 	switch (fib_event->event) {
7875 	case FIB_EVENT_ENTRY_REPLACE:
7876 	case FIB_EVENT_ENTRY_ADD:
7877 	case FIB_EVENT_ENTRY_DEL:
7878 		memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
7879 		mr_cache_hold(fib_event->men_info.mfc);
7880 		break;
7881 	case FIB_EVENT_VIF_ADD:
7882 	case FIB_EVENT_VIF_DEL:
7883 		memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
7884 		dev_hold(fib_event->ven_info.dev);
7885 		break;
7886 	}
7887 }
7888 
7889 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7890 					  struct fib_notifier_info *info,
7891 					  struct mlxsw_sp *mlxsw_sp)
7892 {
7893 	struct netlink_ext_ack *extack = info->extack;
7894 	struct fib_rule_notifier_info *fr_info;
7895 	struct fib_rule *rule;
7896 	int err = 0;
7897 
7898 	/* nothing to do at the moment */
7899 	if (event == FIB_EVENT_RULE_DEL)
7900 		return 0;
7901 
7902 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
7903 	rule = fr_info->rule;
7904 
7905 	/* Rule only affects locally generated traffic */
7906 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7907 		return 0;
7908 
7909 	switch (info->family) {
7910 	case AF_INET:
7911 		if (!fib4_rule_default(rule) && !rule->l3mdev)
7912 			err = -EOPNOTSUPP;
7913 		break;
7914 	case AF_INET6:
7915 		if (!fib6_rule_default(rule) && !rule->l3mdev)
7916 			err = -EOPNOTSUPP;
7917 		break;
7918 	case RTNL_FAMILY_IPMR:
7919 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
7920 			err = -EOPNOTSUPP;
7921 		break;
7922 	case RTNL_FAMILY_IP6MR:
7923 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7924 			err = -EOPNOTSUPP;
7925 		break;
7926 	}
7927 
7928 	if (err < 0)
7929 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7930 
7931 	return err;
7932 }
7933 
7934 /* Called with rcu_read_lock() */
7935 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7936 				     unsigned long event, void *ptr)
7937 {
7938 	struct mlxsw_sp_fib_event *fib_event;
7939 	struct fib_notifier_info *info = ptr;
7940 	struct mlxsw_sp_router *router;
7941 	int err;
7942 
7943 	if ((info->family != AF_INET && info->family != AF_INET6 &&
7944 	     info->family != RTNL_FAMILY_IPMR &&
7945 	     info->family != RTNL_FAMILY_IP6MR))
7946 		return NOTIFY_DONE;
7947 
7948 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7949 
7950 	switch (event) {
7951 	case FIB_EVENT_RULE_ADD:
7952 	case FIB_EVENT_RULE_DEL:
7953 		err = mlxsw_sp_router_fib_rule_event(event, info,
7954 						     router->mlxsw_sp);
7955 		return notifier_from_errno(err);
7956 	case FIB_EVENT_ENTRY_ADD:
7957 	case FIB_EVENT_ENTRY_REPLACE:
7958 	case FIB_EVENT_ENTRY_APPEND:
7959 		if (info->family == AF_INET) {
7960 			struct fib_entry_notifier_info *fen_info = ptr;
7961 
7962 			if (fen_info->fi->fib_nh_is_v6) {
7963 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7964 				return notifier_from_errno(-EINVAL);
7965 			}
7966 		}
7967 		break;
7968 	}
7969 
7970 	fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
7971 	if (!fib_event)
7972 		return NOTIFY_BAD;
7973 
7974 	fib_event->mlxsw_sp = router->mlxsw_sp;
7975 	fib_event->event = event;
7976 	fib_event->family = info->family;
7977 
7978 	switch (info->family) {
7979 	case AF_INET:
7980 		mlxsw_sp_router_fib4_event(fib_event, info);
7981 		break;
7982 	case AF_INET6:
7983 		err = mlxsw_sp_router_fib6_event(fib_event, info);
7984 		if (err)
7985 			goto err_fib_event;
7986 		break;
7987 	case RTNL_FAMILY_IP6MR:
7988 	case RTNL_FAMILY_IPMR:
7989 		mlxsw_sp_router_fibmr_event(fib_event, info);
7990 		break;
7991 	}
7992 
7993 	/* Enqueue the event and trigger the work */
7994 	spin_lock_bh(&router->fib_event_queue_lock);
7995 	list_add_tail(&fib_event->list, &router->fib_event_queue);
7996 	spin_unlock_bh(&router->fib_event_queue_lock);
7997 	mlxsw_core_schedule_work(&router->fib_event_work);
7998 
7999 	return NOTIFY_DONE;
8000 
8001 err_fib_event:
8002 	kfree(fib_event);
8003 	return NOTIFY_BAD;
8004 }
8005 
8006 static struct mlxsw_sp_rif *
8007 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
8008 			 const struct net_device *dev)
8009 {
8010 	int i;
8011 
8012 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
8013 		if (mlxsw_sp->router->rifs[i] &&
8014 		    mlxsw_sp->router->rifs[i]->dev == dev)
8015 			return mlxsw_sp->router->rifs[i];
8016 
8017 	return NULL;
8018 }
8019 
8020 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
8021 			 const struct net_device *dev)
8022 {
8023 	struct mlxsw_sp_rif *rif;
8024 
8025 	mutex_lock(&mlxsw_sp->router->lock);
8026 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8027 	mutex_unlock(&mlxsw_sp->router->lock);
8028 
8029 	return rif;
8030 }
8031 
8032 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
8033 {
8034 	struct mlxsw_sp_rif *rif;
8035 	u16 vid = 0;
8036 
8037 	mutex_lock(&mlxsw_sp->router->lock);
8038 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8039 	if (!rif)
8040 		goto out;
8041 
8042 	/* We only return the VID for VLAN RIFs. Otherwise we return an
8043 	 * invalid value (0).
8044 	 */
8045 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
8046 		goto out;
8047 
8048 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
8049 
8050 out:
8051 	mutex_unlock(&mlxsw_sp->router->lock);
8052 	return vid;
8053 }
8054 
8055 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
8056 {
8057 	char ritr_pl[MLXSW_REG_RITR_LEN];
8058 	int err;
8059 
8060 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
8061 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8062 	if (err)
8063 		return err;
8064 
8065 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
8066 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8067 }
8068 
8069 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8070 					  struct mlxsw_sp_rif *rif)
8071 {
8072 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8073 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8074 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8075 }
8076 
8077 static bool
8078 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8079 			   unsigned long event)
8080 {
8081 	struct inet6_dev *inet6_dev;
8082 	bool addr_list_empty = true;
8083 	struct in_device *idev;
8084 
8085 	switch (event) {
8086 	case NETDEV_UP:
8087 		return rif == NULL;
8088 	case NETDEV_DOWN:
8089 		rcu_read_lock();
8090 		idev = __in_dev_get_rcu(dev);
8091 		if (idev && idev->ifa_list)
8092 			addr_list_empty = false;
8093 
8094 		inet6_dev = __in6_dev_get(dev);
8095 		if (addr_list_empty && inet6_dev &&
8096 		    !list_empty(&inet6_dev->addr_list))
8097 			addr_list_empty = false;
8098 		rcu_read_unlock();
8099 
8100 		/* macvlans do not have a RIF, but rather piggy back on the
8101 		 * RIF of their lower device.
8102 		 */
8103 		if (netif_is_macvlan(dev) && addr_list_empty)
8104 			return true;
8105 
8106 		if (rif && addr_list_empty &&
8107 		    !netif_is_l3_slave(rif->dev))
8108 			return true;
8109 		/* It is possible we already removed the RIF ourselves
8110 		 * if it was assigned to a netdev that is now a bridge
8111 		 * or LAG slave.
8112 		 */
8113 		return false;
8114 	}
8115 
8116 	return false;
8117 }
8118 
8119 static enum mlxsw_sp_rif_type
8120 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8121 		      const struct net_device *dev)
8122 {
8123 	enum mlxsw_sp_fid_type type;
8124 
8125 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8126 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
8127 
8128 	/* Otherwise RIF type is derived from the type of the underlying FID. */
8129 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8130 		type = MLXSW_SP_FID_TYPE_8021Q;
8131 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8132 		type = MLXSW_SP_FID_TYPE_8021Q;
8133 	else if (netif_is_bridge_master(dev))
8134 		type = MLXSW_SP_FID_TYPE_8021D;
8135 	else
8136 		type = MLXSW_SP_FID_TYPE_RFID;
8137 
8138 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8139 }
8140 
8141 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
8142 {
8143 	int i;
8144 
8145 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8146 		if (!mlxsw_sp->router->rifs[i]) {
8147 			*p_rif_index = i;
8148 			return 0;
8149 		}
8150 	}
8151 
8152 	return -ENOBUFS;
8153 }
8154 
8155 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8156 					       u16 vr_id,
8157 					       struct net_device *l3_dev)
8158 {
8159 	struct mlxsw_sp_rif *rif;
8160 
8161 	rif = kzalloc(rif_size, GFP_KERNEL);
8162 	if (!rif)
8163 		return NULL;
8164 
8165 	INIT_LIST_HEAD(&rif->nexthop_list);
8166 	INIT_LIST_HEAD(&rif->neigh_list);
8167 	if (l3_dev) {
8168 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
8169 		rif->mtu = l3_dev->mtu;
8170 		rif->dev = l3_dev;
8171 	}
8172 	rif->vr_id = vr_id;
8173 	rif->rif_index = rif_index;
8174 
8175 	return rif;
8176 }
8177 
8178 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8179 					   u16 rif_index)
8180 {
8181 	return mlxsw_sp->router->rifs[rif_index];
8182 }
8183 
8184 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8185 {
8186 	return rif->rif_index;
8187 }
8188 
8189 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8190 {
8191 	return lb_rif->common.rif_index;
8192 }
8193 
8194 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8195 {
8196 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
8197 	struct mlxsw_sp_vr *ul_vr;
8198 
8199 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8200 	if (WARN_ON(IS_ERR(ul_vr)))
8201 		return 0;
8202 
8203 	return ul_vr->id;
8204 }
8205 
8206 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8207 {
8208 	return lb_rif->ul_rif_id;
8209 }
8210 
8211 static bool
8212 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8213 {
8214 	return mlxsw_sp_rif_counter_valid_get(rif,
8215 					      MLXSW_SP_RIF_COUNTER_EGRESS) &&
8216 	       mlxsw_sp_rif_counter_valid_get(rif,
8217 					      MLXSW_SP_RIF_COUNTER_INGRESS);
8218 }
8219 
8220 static int
8221 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8222 {
8223 	int err;
8224 
8225 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8226 	if (err)
8227 		return err;
8228 
8229 	/* Clear stale data. */
8230 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8231 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8232 					       NULL);
8233 	if (err)
8234 		goto err_clear_ingress;
8235 
8236 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8237 	if (err)
8238 		goto err_alloc_egress;
8239 
8240 	/* Clear stale data. */
8241 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8242 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8243 					       NULL);
8244 	if (err)
8245 		goto err_clear_egress;
8246 
8247 	return 0;
8248 
8249 err_clear_egress:
8250 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8251 err_alloc_egress:
8252 err_clear_ingress:
8253 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8254 	return err;
8255 }
8256 
8257 static void
8258 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8259 {
8260 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8261 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8262 }
8263 
8264 static void
8265 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8266 					  struct netdev_notifier_offload_xstats_info *info)
8267 {
8268 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8269 		return;
8270 	netdev_offload_xstats_report_used(info->report_used);
8271 }
8272 
8273 static int
8274 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8275 				    struct rtnl_hw_stats64 *p_stats)
8276 {
8277 	struct mlxsw_sp_rif_counter_set_basic ingress;
8278 	struct mlxsw_sp_rif_counter_set_basic egress;
8279 	int err;
8280 
8281 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8282 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8283 					       &ingress);
8284 	if (err)
8285 		return err;
8286 
8287 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8288 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8289 					       &egress);
8290 	if (err)
8291 		return err;
8292 
8293 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX)		\
8294 		((SET.good_unicast_ ## SFX) +		\
8295 		 (SET.good_multicast_ ## SFX) +		\
8296 		 (SET.good_broadcast_ ## SFX))
8297 
8298 	p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8299 	p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8300 	p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8301 	p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8302 	p_stats->rx_errors = ingress.error_packets;
8303 	p_stats->tx_errors = egress.error_packets;
8304 	p_stats->rx_dropped = ingress.discard_packets;
8305 	p_stats->tx_dropped = egress.discard_packets;
8306 	p_stats->multicast = ingress.good_multicast_packets +
8307 			     ingress.good_broadcast_packets;
8308 
8309 #undef MLXSW_SP_ROUTER_ALL_GOOD
8310 
8311 	return 0;
8312 }
8313 
8314 static int
8315 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8316 					   struct netdev_notifier_offload_xstats_info *info)
8317 {
8318 	struct rtnl_hw_stats64 stats = {};
8319 	int err;
8320 
8321 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8322 		return 0;
8323 
8324 	err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8325 	if (err)
8326 		return err;
8327 
8328 	netdev_offload_xstats_report_delta(info->report_delta, &stats);
8329 	return 0;
8330 }
8331 
8332 struct mlxsw_sp_router_hwstats_notify_work {
8333 	struct work_struct work;
8334 	struct net_device *dev;
8335 };
8336 
8337 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8338 {
8339 	struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8340 		container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8341 			     work);
8342 
8343 	rtnl_lock();
8344 	rtnl_offload_xstats_notify(hws_work->dev);
8345 	rtnl_unlock();
8346 	dev_put(hws_work->dev);
8347 	kfree(hws_work);
8348 }
8349 
8350 static void
8351 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8352 {
8353 	struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8354 
8355 	/* To collect notification payload, the core ends up sending another
8356 	 * notifier block message, which would deadlock on the attempt to
8357 	 * acquire the router lock again. Just postpone the notification until
8358 	 * later.
8359 	 */
8360 
8361 	hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8362 	if (!hws_work)
8363 		return;
8364 
8365 	INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8366 	dev_hold(dev);
8367 	hws_work->dev = dev;
8368 	mlxsw_core_schedule_work(&hws_work->work);
8369 }
8370 
8371 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8372 {
8373 	return rif->dev->ifindex;
8374 }
8375 
8376 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8377 {
8378 	return rif->dev;
8379 }
8380 
8381 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8382 {
8383 	struct rtnl_hw_stats64 stats = {};
8384 
8385 	if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8386 		netdev_offload_xstats_push_delta(rif->dev,
8387 						 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8388 						 &stats);
8389 }
8390 
8391 static struct mlxsw_sp_rif *
8392 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8393 		    const struct mlxsw_sp_rif_params *params,
8394 		    struct netlink_ext_ack *extack)
8395 {
8396 	u32 tb_id = l3mdev_fib_table(params->dev);
8397 	const struct mlxsw_sp_rif_ops *ops;
8398 	struct mlxsw_sp_fid *fid = NULL;
8399 	enum mlxsw_sp_rif_type type;
8400 	struct mlxsw_sp_rif *rif;
8401 	struct mlxsw_sp_vr *vr;
8402 	u16 rif_index;
8403 	int i, err;
8404 
8405 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8406 	ops = mlxsw_sp->router->rif_ops_arr[type];
8407 
8408 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8409 	if (IS_ERR(vr))
8410 		return ERR_CAST(vr);
8411 	vr->rif_count++;
8412 
8413 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8414 	if (err) {
8415 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8416 		goto err_rif_index_alloc;
8417 	}
8418 
8419 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8420 	if (!rif) {
8421 		err = -ENOMEM;
8422 		goto err_rif_alloc;
8423 	}
8424 	dev_hold(rif->dev);
8425 	mlxsw_sp->router->rifs[rif_index] = rif;
8426 	rif->mlxsw_sp = mlxsw_sp;
8427 	rif->ops = ops;
8428 
8429 	if (ops->fid_get) {
8430 		fid = ops->fid_get(rif, extack);
8431 		if (IS_ERR(fid)) {
8432 			err = PTR_ERR(fid);
8433 			goto err_fid_get;
8434 		}
8435 		rif->fid = fid;
8436 	}
8437 
8438 	if (ops->setup)
8439 		ops->setup(rif, params);
8440 
8441 	err = ops->configure(rif, extack);
8442 	if (err)
8443 		goto err_configure;
8444 
8445 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8446 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8447 		if (err)
8448 			goto err_mr_rif_add;
8449 	}
8450 
8451 	if (netdev_offload_xstats_enabled(rif->dev,
8452 					  NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8453 		err = mlxsw_sp_router_port_l3_stats_enable(rif);
8454 		if (err)
8455 			goto err_stats_enable;
8456 		mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8457 	} else {
8458 		mlxsw_sp_rif_counters_alloc(rif);
8459 	}
8460 
8461 	return rif;
8462 
8463 err_stats_enable:
8464 err_mr_rif_add:
8465 	for (i--; i >= 0; i--)
8466 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8467 	ops->deconfigure(rif);
8468 err_configure:
8469 	if (fid)
8470 		mlxsw_sp_fid_put(fid);
8471 err_fid_get:
8472 	mlxsw_sp->router->rifs[rif_index] = NULL;
8473 	dev_put(rif->dev);
8474 	kfree(rif);
8475 err_rif_alloc:
8476 err_rif_index_alloc:
8477 	vr->rif_count--;
8478 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8479 	return ERR_PTR(err);
8480 }
8481 
8482 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8483 {
8484 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
8485 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8486 	struct mlxsw_sp_fid *fid = rif->fid;
8487 	struct mlxsw_sp_vr *vr;
8488 	int i;
8489 
8490 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8491 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
8492 
8493 	if (netdev_offload_xstats_enabled(rif->dev,
8494 					  NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8495 		mlxsw_sp_rif_push_l3_stats(rif);
8496 		mlxsw_sp_router_port_l3_stats_disable(rif);
8497 		mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8498 	} else {
8499 		mlxsw_sp_rif_counters_free(rif);
8500 	}
8501 
8502 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8503 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8504 	ops->deconfigure(rif);
8505 	if (fid)
8506 		/* Loopback RIFs are not associated with a FID. */
8507 		mlxsw_sp_fid_put(fid);
8508 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8509 	dev_put(rif->dev);
8510 	kfree(rif);
8511 	vr->rif_count--;
8512 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8513 }
8514 
8515 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8516 				 struct net_device *dev)
8517 {
8518 	struct mlxsw_sp_rif *rif;
8519 
8520 	mutex_lock(&mlxsw_sp->router->lock);
8521 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8522 	if (!rif)
8523 		goto out;
8524 	mlxsw_sp_rif_destroy(rif);
8525 out:
8526 	mutex_unlock(&mlxsw_sp->router->lock);
8527 }
8528 
8529 static void
8530 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8531 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8532 {
8533 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8534 
8535 	params->vid = mlxsw_sp_port_vlan->vid;
8536 	params->lag = mlxsw_sp_port->lagged;
8537 	if (params->lag)
8538 		params->lag_id = mlxsw_sp_port->lag_id;
8539 	else
8540 		params->system_port = mlxsw_sp_port->local_port;
8541 }
8542 
8543 static struct mlxsw_sp_rif_subport *
8544 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8545 {
8546 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
8547 }
8548 
8549 static struct mlxsw_sp_rif *
8550 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8551 			 const struct mlxsw_sp_rif_params *params,
8552 			 struct netlink_ext_ack *extack)
8553 {
8554 	struct mlxsw_sp_rif_subport *rif_subport;
8555 	struct mlxsw_sp_rif *rif;
8556 
8557 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8558 	if (!rif)
8559 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8560 
8561 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8562 	refcount_inc(&rif_subport->ref_count);
8563 	return rif;
8564 }
8565 
8566 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8567 {
8568 	struct mlxsw_sp_rif_subport *rif_subport;
8569 
8570 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8571 	if (!refcount_dec_and_test(&rif_subport->ref_count))
8572 		return;
8573 
8574 	mlxsw_sp_rif_destroy(rif);
8575 }
8576 
8577 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8578 						struct mlxsw_sp_rif_mac_profile *profile,
8579 						struct netlink_ext_ack *extack)
8580 {
8581 	u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8582 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8583 	int id;
8584 
8585 	id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8586 		       max_rif_mac_profiles, GFP_KERNEL);
8587 
8588 	if (id >= 0) {
8589 		profile->id = id;
8590 		return 0;
8591 	}
8592 
8593 	if (id == -ENOSPC)
8594 		NL_SET_ERR_MSG_MOD(extack,
8595 				   "Exceeded number of supported router interface MAC profiles");
8596 
8597 	return id;
8598 }
8599 
8600 static struct mlxsw_sp_rif_mac_profile *
8601 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8602 {
8603 	struct mlxsw_sp_rif_mac_profile *profile;
8604 
8605 	profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8606 			     mac_profile);
8607 	WARN_ON(!profile);
8608 	return profile;
8609 }
8610 
8611 static struct mlxsw_sp_rif_mac_profile *
8612 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8613 {
8614 	struct mlxsw_sp_rif_mac_profile *profile;
8615 
8616 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8617 	if (!profile)
8618 		return NULL;
8619 
8620 	ether_addr_copy(profile->mac_prefix, mac);
8621 	refcount_set(&profile->ref_count, 1);
8622 	return profile;
8623 }
8624 
8625 static struct mlxsw_sp_rif_mac_profile *
8626 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8627 {
8628 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8629 	struct mlxsw_sp_rif_mac_profile *profile;
8630 	int id;
8631 
8632 	idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8633 		if (ether_addr_equal_masked(profile->mac_prefix, mac,
8634 					    mlxsw_sp->mac_mask))
8635 			return profile;
8636 	}
8637 
8638 	return NULL;
8639 }
8640 
8641 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8642 {
8643 	const struct mlxsw_sp *mlxsw_sp = priv;
8644 
8645 	return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8646 }
8647 
8648 static struct mlxsw_sp_rif_mac_profile *
8649 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8650 				struct netlink_ext_ack *extack)
8651 {
8652 	struct mlxsw_sp_rif_mac_profile *profile;
8653 	int err;
8654 
8655 	profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8656 	if (!profile)
8657 		return ERR_PTR(-ENOMEM);
8658 
8659 	err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8660 	if (err)
8661 		goto profile_index_alloc_err;
8662 
8663 	atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8664 	return profile;
8665 
8666 profile_index_alloc_err:
8667 	kfree(profile);
8668 	return ERR_PTR(err);
8669 }
8670 
8671 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8672 					     u8 mac_profile)
8673 {
8674 	struct mlxsw_sp_rif_mac_profile *profile;
8675 
8676 	atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8677 	profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8678 	kfree(profile);
8679 }
8680 
8681 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8682 					const char *mac, u8 *p_mac_profile,
8683 					struct netlink_ext_ack *extack)
8684 {
8685 	struct mlxsw_sp_rif_mac_profile *profile;
8686 
8687 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8688 	if (profile) {
8689 		refcount_inc(&profile->ref_count);
8690 		goto out;
8691 	}
8692 
8693 	profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8694 	if (IS_ERR(profile))
8695 		return PTR_ERR(profile);
8696 
8697 out:
8698 	*p_mac_profile = profile->id;
8699 	return 0;
8700 }
8701 
8702 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8703 					 u8 mac_profile)
8704 {
8705 	struct mlxsw_sp_rif_mac_profile *profile;
8706 
8707 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8708 			   mac_profile);
8709 	if (WARN_ON(!profile))
8710 		return;
8711 
8712 	if (!refcount_dec_and_test(&profile->ref_count))
8713 		return;
8714 
8715 	mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8716 }
8717 
8718 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8719 {
8720 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8721 	struct mlxsw_sp_rif_mac_profile *profile;
8722 
8723 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8724 			   rif->mac_profile_id);
8725 	if (WARN_ON(!profile))
8726 		return false;
8727 
8728 	return refcount_read(&profile->ref_count) > 1;
8729 }
8730 
8731 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8732 					 const char *new_mac)
8733 {
8734 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8735 	struct mlxsw_sp_rif_mac_profile *profile;
8736 
8737 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8738 			   rif->mac_profile_id);
8739 	if (WARN_ON(!profile))
8740 		return -EINVAL;
8741 
8742 	ether_addr_copy(profile->mac_prefix, new_mac);
8743 	return 0;
8744 }
8745 
8746 static int
8747 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8748 				 struct mlxsw_sp_rif *rif,
8749 				 const char *new_mac,
8750 				 struct netlink_ext_ack *extack)
8751 {
8752 	u8 mac_profile;
8753 	int err;
8754 
8755 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8756 	    !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8757 		return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8758 
8759 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8760 					   &mac_profile, extack);
8761 	if (err)
8762 		return err;
8763 
8764 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8765 	rif->mac_profile_id = mac_profile;
8766 	return 0;
8767 }
8768 
8769 static int
8770 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8771 				 struct net_device *l3_dev,
8772 				 struct netlink_ext_ack *extack)
8773 {
8774 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8775 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8776 	struct mlxsw_sp_rif_params params = {
8777 		.dev = l3_dev,
8778 	};
8779 	u16 vid = mlxsw_sp_port_vlan->vid;
8780 	struct mlxsw_sp_rif *rif;
8781 	struct mlxsw_sp_fid *fid;
8782 	int err;
8783 
8784 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
8785 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
8786 	if (IS_ERR(rif))
8787 		return PTR_ERR(rif);
8788 
8789 	/* FID was already created, just take a reference */
8790 	fid = rif->ops->fid_get(rif, extack);
8791 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8792 	if (err)
8793 		goto err_fid_port_vid_map;
8794 
8795 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8796 	if (err)
8797 		goto err_port_vid_learning_set;
8798 
8799 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8800 					BR_STATE_FORWARDING);
8801 	if (err)
8802 		goto err_port_vid_stp_set;
8803 
8804 	mlxsw_sp_port_vlan->fid = fid;
8805 
8806 	return 0;
8807 
8808 err_port_vid_stp_set:
8809 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8810 err_port_vid_learning_set:
8811 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8812 err_fid_port_vid_map:
8813 	mlxsw_sp_fid_put(fid);
8814 	mlxsw_sp_rif_subport_put(rif);
8815 	return err;
8816 }
8817 
8818 static void
8819 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8820 {
8821 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8822 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8823 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8824 	u16 vid = mlxsw_sp_port_vlan->vid;
8825 
8826 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8827 		return;
8828 
8829 	mlxsw_sp_port_vlan->fid = NULL;
8830 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8831 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8832 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8833 	mlxsw_sp_fid_put(fid);
8834 	mlxsw_sp_rif_subport_put(rif);
8835 }
8836 
8837 int
8838 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8839 			       struct net_device *l3_dev,
8840 			       struct netlink_ext_ack *extack)
8841 {
8842 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8843 	struct mlxsw_sp_rif *rif;
8844 	int err = 0;
8845 
8846 	mutex_lock(&mlxsw_sp->router->lock);
8847 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8848 	if (!rif)
8849 		goto out;
8850 
8851 	err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8852 					       extack);
8853 out:
8854 	mutex_unlock(&mlxsw_sp->router->lock);
8855 	return err;
8856 }
8857 
8858 void
8859 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8860 {
8861 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8862 
8863 	mutex_lock(&mlxsw_sp->router->lock);
8864 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8865 	mutex_unlock(&mlxsw_sp->router->lock);
8866 }
8867 
8868 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8869 					     struct net_device *port_dev,
8870 					     unsigned long event, u16 vid,
8871 					     struct netlink_ext_ack *extack)
8872 {
8873 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8874 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8875 
8876 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8877 	if (WARN_ON(!mlxsw_sp_port_vlan))
8878 		return -EINVAL;
8879 
8880 	switch (event) {
8881 	case NETDEV_UP:
8882 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8883 							l3_dev, extack);
8884 	case NETDEV_DOWN:
8885 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8886 		break;
8887 	}
8888 
8889 	return 0;
8890 }
8891 
8892 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8893 					unsigned long event,
8894 					struct netlink_ext_ack *extack)
8895 {
8896 	if (netif_is_bridge_port(port_dev) ||
8897 	    netif_is_lag_port(port_dev) ||
8898 	    netif_is_ovs_port(port_dev))
8899 		return 0;
8900 
8901 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8902 						 MLXSW_SP_DEFAULT_VID, extack);
8903 }
8904 
8905 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8906 					 struct net_device *lag_dev,
8907 					 unsigned long event, u16 vid,
8908 					 struct netlink_ext_ack *extack)
8909 {
8910 	struct net_device *port_dev;
8911 	struct list_head *iter;
8912 	int err;
8913 
8914 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8915 		if (mlxsw_sp_port_dev_check(port_dev)) {
8916 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8917 								port_dev,
8918 								event, vid,
8919 								extack);
8920 			if (err)
8921 				return err;
8922 		}
8923 	}
8924 
8925 	return 0;
8926 }
8927 
8928 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8929 				       unsigned long event,
8930 				       struct netlink_ext_ack *extack)
8931 {
8932 	if (netif_is_bridge_port(lag_dev))
8933 		return 0;
8934 
8935 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8936 					     MLXSW_SP_DEFAULT_VID, extack);
8937 }
8938 
8939 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8940 					  struct net_device *l3_dev,
8941 					  unsigned long event,
8942 					  struct netlink_ext_ack *extack)
8943 {
8944 	struct mlxsw_sp_rif_params params = {
8945 		.dev = l3_dev,
8946 	};
8947 	struct mlxsw_sp_rif *rif;
8948 
8949 	switch (event) {
8950 	case NETDEV_UP:
8951 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8952 			u16 proto;
8953 
8954 			br_vlan_get_proto(l3_dev, &proto);
8955 			if (proto == ETH_P_8021AD) {
8956 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8957 				return -EOPNOTSUPP;
8958 			}
8959 		}
8960 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8961 		if (IS_ERR(rif))
8962 			return PTR_ERR(rif);
8963 		break;
8964 	case NETDEV_DOWN:
8965 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8966 		mlxsw_sp_rif_destroy(rif);
8967 		break;
8968 	}
8969 
8970 	return 0;
8971 }
8972 
8973 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8974 					struct net_device *vlan_dev,
8975 					unsigned long event,
8976 					struct netlink_ext_ack *extack)
8977 {
8978 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8979 	u16 vid = vlan_dev_vlan_id(vlan_dev);
8980 
8981 	if (netif_is_bridge_port(vlan_dev))
8982 		return 0;
8983 
8984 	if (mlxsw_sp_port_dev_check(real_dev))
8985 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8986 							 event, vid, extack);
8987 	else if (netif_is_lag_master(real_dev))
8988 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8989 						     vid, extack);
8990 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8991 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8992 						      extack);
8993 
8994 	return 0;
8995 }
8996 
8997 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8998 {
8999 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9000 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9001 
9002 	return ether_addr_equal_masked(mac, vrrp4, mask);
9003 }
9004 
9005 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9006 {
9007 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9008 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9009 
9010 	return ether_addr_equal_masked(mac, vrrp6, mask);
9011 }
9012 
9013 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9014 				const u8 *mac, bool adding)
9015 {
9016 	char ritr_pl[MLXSW_REG_RITR_LEN];
9017 	u8 vrrp_id = adding ? mac[5] : 0;
9018 	int err;
9019 
9020 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9021 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9022 		return 0;
9023 
9024 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9025 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9026 	if (err)
9027 		return err;
9028 
9029 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9030 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9031 	else
9032 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9033 
9034 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9035 }
9036 
9037 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9038 				    const struct net_device *macvlan_dev,
9039 				    struct netlink_ext_ack *extack)
9040 {
9041 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9042 	struct mlxsw_sp_rif *rif;
9043 	int err;
9044 
9045 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9046 	if (!rif) {
9047 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
9048 		return -EOPNOTSUPP;
9049 	}
9050 
9051 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9052 				  mlxsw_sp_fid_index(rif->fid), true);
9053 	if (err)
9054 		return err;
9055 
9056 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9057 				   macvlan_dev->dev_addr, true);
9058 	if (err)
9059 		goto err_rif_vrrp_add;
9060 
9061 	/* Make sure the bridge driver does not have this MAC pointing at
9062 	 * some other port.
9063 	 */
9064 	if (rif->ops->fdb_del)
9065 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9066 
9067 	return 0;
9068 
9069 err_rif_vrrp_add:
9070 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9071 			    mlxsw_sp_fid_index(rif->fid), false);
9072 	return err;
9073 }
9074 
9075 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9076 				       const struct net_device *macvlan_dev)
9077 {
9078 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9079 	struct mlxsw_sp_rif *rif;
9080 
9081 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9082 	/* If we do not have a RIF, then we already took care of
9083 	 * removing the macvlan's MAC during RIF deletion.
9084 	 */
9085 	if (!rif)
9086 		return;
9087 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9088 			     false);
9089 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9090 			    mlxsw_sp_fid_index(rif->fid), false);
9091 }
9092 
9093 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9094 			      const struct net_device *macvlan_dev)
9095 {
9096 	mutex_lock(&mlxsw_sp->router->lock);
9097 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9098 	mutex_unlock(&mlxsw_sp->router->lock);
9099 }
9100 
9101 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9102 					   struct net_device *macvlan_dev,
9103 					   unsigned long event,
9104 					   struct netlink_ext_ack *extack)
9105 {
9106 	switch (event) {
9107 	case NETDEV_UP:
9108 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9109 	case NETDEV_DOWN:
9110 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9111 		break;
9112 	}
9113 
9114 	return 0;
9115 }
9116 
9117 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9118 				     struct net_device *dev,
9119 				     unsigned long event,
9120 				     struct netlink_ext_ack *extack)
9121 {
9122 	if (mlxsw_sp_port_dev_check(dev))
9123 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
9124 	else if (netif_is_lag_master(dev))
9125 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
9126 	else if (netif_is_bridge_master(dev))
9127 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
9128 						      extack);
9129 	else if (is_vlan_dev(dev))
9130 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9131 						    extack);
9132 	else if (netif_is_macvlan(dev))
9133 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9134 						       extack);
9135 	else
9136 		return 0;
9137 }
9138 
9139 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9140 				   unsigned long event, void *ptr)
9141 {
9142 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9143 	struct net_device *dev = ifa->ifa_dev->dev;
9144 	struct mlxsw_sp_router *router;
9145 	struct mlxsw_sp_rif *rif;
9146 	int err = 0;
9147 
9148 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9149 	if (event == NETDEV_UP)
9150 		return NOTIFY_DONE;
9151 
9152 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9153 	mutex_lock(&router->lock);
9154 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9155 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9156 		goto out;
9157 
9158 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
9159 out:
9160 	mutex_unlock(&router->lock);
9161 	return notifier_from_errno(err);
9162 }
9163 
9164 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9165 				  unsigned long event, void *ptr)
9166 {
9167 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9168 	struct net_device *dev = ivi->ivi_dev->dev;
9169 	struct mlxsw_sp *mlxsw_sp;
9170 	struct mlxsw_sp_rif *rif;
9171 	int err = 0;
9172 
9173 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9174 	if (!mlxsw_sp)
9175 		return NOTIFY_DONE;
9176 
9177 	mutex_lock(&mlxsw_sp->router->lock);
9178 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9179 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9180 		goto out;
9181 
9182 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
9183 out:
9184 	mutex_unlock(&mlxsw_sp->router->lock);
9185 	return notifier_from_errno(err);
9186 }
9187 
9188 struct mlxsw_sp_inet6addr_event_work {
9189 	struct work_struct work;
9190 	struct mlxsw_sp *mlxsw_sp;
9191 	struct net_device *dev;
9192 	unsigned long event;
9193 };
9194 
9195 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9196 {
9197 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9198 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9199 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9200 	struct net_device *dev = inet6addr_work->dev;
9201 	unsigned long event = inet6addr_work->event;
9202 	struct mlxsw_sp_rif *rif;
9203 
9204 	rtnl_lock();
9205 	mutex_lock(&mlxsw_sp->router->lock);
9206 
9207 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9208 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9209 		goto out;
9210 
9211 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
9212 out:
9213 	mutex_unlock(&mlxsw_sp->router->lock);
9214 	rtnl_unlock();
9215 	dev_put(dev);
9216 	kfree(inet6addr_work);
9217 }
9218 
9219 /* Called with rcu_read_lock() */
9220 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9221 				    unsigned long event, void *ptr)
9222 {
9223 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9224 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9225 	struct net_device *dev = if6->idev->dev;
9226 	struct mlxsw_sp_router *router;
9227 
9228 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9229 	if (event == NETDEV_UP)
9230 		return NOTIFY_DONE;
9231 
9232 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9233 	if (!inet6addr_work)
9234 		return NOTIFY_BAD;
9235 
9236 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9237 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9238 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9239 	inet6addr_work->dev = dev;
9240 	inet6addr_work->event = event;
9241 	dev_hold(dev);
9242 	mlxsw_core_schedule_work(&inet6addr_work->work);
9243 
9244 	return NOTIFY_DONE;
9245 }
9246 
9247 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9248 				   unsigned long event, void *ptr)
9249 {
9250 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9251 	struct net_device *dev = i6vi->i6vi_dev->dev;
9252 	struct mlxsw_sp *mlxsw_sp;
9253 	struct mlxsw_sp_rif *rif;
9254 	int err = 0;
9255 
9256 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9257 	if (!mlxsw_sp)
9258 		return NOTIFY_DONE;
9259 
9260 	mutex_lock(&mlxsw_sp->router->lock);
9261 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9262 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9263 		goto out;
9264 
9265 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
9266 out:
9267 	mutex_unlock(&mlxsw_sp->router->lock);
9268 	return notifier_from_errno(err);
9269 }
9270 
9271 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9272 			     const char *mac, int mtu, u8 mac_profile)
9273 {
9274 	char ritr_pl[MLXSW_REG_RITR_LEN];
9275 	int err;
9276 
9277 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9278 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9279 	if (err)
9280 		return err;
9281 
9282 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9283 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9284 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9285 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9286 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9287 }
9288 
9289 static int
9290 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9291 				  struct mlxsw_sp_rif *rif,
9292 				  struct netlink_ext_ack *extack)
9293 {
9294 	struct net_device *dev = rif->dev;
9295 	u8 old_mac_profile;
9296 	u16 fid_index;
9297 	int err;
9298 
9299 	fid_index = mlxsw_sp_fid_index(rif->fid);
9300 
9301 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9302 	if (err)
9303 		return err;
9304 
9305 	old_mac_profile = rif->mac_profile_id;
9306 	err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9307 					       extack);
9308 	if (err)
9309 		goto err_rif_mac_profile_replace;
9310 
9311 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9312 				dev->mtu, rif->mac_profile_id);
9313 	if (err)
9314 		goto err_rif_edit;
9315 
9316 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9317 	if (err)
9318 		goto err_rif_fdb_op;
9319 
9320 	if (rif->mtu != dev->mtu) {
9321 		struct mlxsw_sp_vr *vr;
9322 		int i;
9323 
9324 		/* The RIF is relevant only to its mr_table instance, as unlike
9325 		 * unicast routing, in multicast routing a RIF cannot be shared
9326 		 * between several multicast routing tables.
9327 		 */
9328 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
9329 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9330 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9331 						   rif, dev->mtu);
9332 	}
9333 
9334 	ether_addr_copy(rif->addr, dev->dev_addr);
9335 	rif->mtu = dev->mtu;
9336 
9337 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9338 
9339 	return 0;
9340 
9341 err_rif_fdb_op:
9342 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9343 			  old_mac_profile);
9344 err_rif_edit:
9345 	mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9346 err_rif_mac_profile_replace:
9347 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9348 	return err;
9349 }
9350 
9351 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9352 			    struct netdev_notifier_pre_changeaddr_info *info)
9353 {
9354 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9355 	struct mlxsw_sp_rif_mac_profile *profile;
9356 	struct netlink_ext_ack *extack;
9357 	u8 max_rif_mac_profiles;
9358 	u64 occ;
9359 
9360 	extack = netdev_notifier_info_to_extack(&info->info);
9361 
9362 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9363 	if (profile)
9364 		return 0;
9365 
9366 	max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9367 	occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9368 	if (occ < max_rif_mac_profiles)
9369 		return 0;
9370 
9371 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9372 		return 0;
9373 
9374 	NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9375 	return -ENOBUFS;
9376 }
9377 
9378 static int
9379 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9380 					unsigned long event,
9381 					struct netdev_notifier_offload_xstats_info *info)
9382 {
9383 	switch (info->type) {
9384 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9385 		break;
9386 	default:
9387 		return 0;
9388 	}
9389 
9390 	switch (event) {
9391 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9392 		return mlxsw_sp_router_port_l3_stats_enable(rif);
9393 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9394 		mlxsw_sp_router_port_l3_stats_disable(rif);
9395 		return 0;
9396 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9397 		mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9398 		return 0;
9399 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9400 		return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9401 	}
9402 
9403 	WARN_ON_ONCE(1);
9404 	return 0;
9405 }
9406 
9407 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9408 					 unsigned long event, void *ptr)
9409 {
9410 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9411 	struct mlxsw_sp *mlxsw_sp;
9412 	struct mlxsw_sp_rif *rif;
9413 	int err = 0;
9414 
9415 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9416 	if (!mlxsw_sp)
9417 		return 0;
9418 
9419 	mutex_lock(&mlxsw_sp->router->lock);
9420 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9421 	if (!rif)
9422 		goto out;
9423 
9424 	switch (event) {
9425 	case NETDEV_CHANGEMTU:
9426 	case NETDEV_CHANGEADDR:
9427 		err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9428 		break;
9429 	case NETDEV_PRE_CHANGEADDR:
9430 		err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9431 		break;
9432 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9433 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9434 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9435 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9436 		err = mlxsw_sp_router_port_offload_xstats_cmd(rif, event, ptr);
9437 		break;
9438 	default:
9439 		WARN_ON_ONCE(1);
9440 		break;
9441 	}
9442 
9443 out:
9444 	mutex_unlock(&mlxsw_sp->router->lock);
9445 	return err;
9446 }
9447 
9448 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9449 				  struct net_device *l3_dev,
9450 				  struct netlink_ext_ack *extack)
9451 {
9452 	struct mlxsw_sp_rif *rif;
9453 
9454 	/* If netdev is already associated with a RIF, then we need to
9455 	 * destroy it and create a new one with the new virtual router ID.
9456 	 */
9457 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9458 	if (rif)
9459 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
9460 					  extack);
9461 
9462 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
9463 }
9464 
9465 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9466 				    struct net_device *l3_dev)
9467 {
9468 	struct mlxsw_sp_rif *rif;
9469 
9470 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9471 	if (!rif)
9472 		return;
9473 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
9474 }
9475 
9476 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9477 				 struct netdev_notifier_changeupper_info *info)
9478 {
9479 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9480 	int err = 0;
9481 
9482 	/* We do not create a RIF for a macvlan, but only use it to
9483 	 * direct more MAC addresses to the router.
9484 	 */
9485 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9486 		return 0;
9487 
9488 	mutex_lock(&mlxsw_sp->router->lock);
9489 	switch (event) {
9490 	case NETDEV_PRECHANGEUPPER:
9491 		break;
9492 	case NETDEV_CHANGEUPPER:
9493 		if (info->linking) {
9494 			struct netlink_ext_ack *extack;
9495 
9496 			extack = netdev_notifier_info_to_extack(&info->info);
9497 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9498 		} else {
9499 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9500 		}
9501 		break;
9502 	}
9503 	mutex_unlock(&mlxsw_sp->router->lock);
9504 
9505 	return err;
9506 }
9507 
9508 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9509 					struct netdev_nested_priv *priv)
9510 {
9511 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9512 
9513 	if (!netif_is_macvlan(dev))
9514 		return 0;
9515 
9516 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9517 				   mlxsw_sp_fid_index(rif->fid), false);
9518 }
9519 
9520 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9521 {
9522 	struct netdev_nested_priv priv = {
9523 		.data = (void *)rif,
9524 	};
9525 
9526 	if (!netif_is_macvlan_port(rif->dev))
9527 		return 0;
9528 
9529 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
9530 	return netdev_walk_all_upper_dev_rcu(rif->dev,
9531 					     __mlxsw_sp_rif_macvlan_flush, &priv);
9532 }
9533 
9534 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9535 				       const struct mlxsw_sp_rif_params *params)
9536 {
9537 	struct mlxsw_sp_rif_subport *rif_subport;
9538 
9539 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9540 	refcount_set(&rif_subport->ref_count, 1);
9541 	rif_subport->vid = params->vid;
9542 	rif_subport->lag = params->lag;
9543 	if (params->lag)
9544 		rif_subport->lag_id = params->lag_id;
9545 	else
9546 		rif_subport->system_port = params->system_port;
9547 }
9548 
9549 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9550 {
9551 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9552 	struct mlxsw_sp_rif_subport *rif_subport;
9553 	char ritr_pl[MLXSW_REG_RITR_LEN];
9554 
9555 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9556 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9557 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
9558 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9559 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9560 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9561 				  rif_subport->lag ? rif_subport->lag_id :
9562 						     rif_subport->system_port,
9563 				  rif_subport->vid);
9564 
9565 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9566 }
9567 
9568 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
9569 					  struct netlink_ext_ack *extack)
9570 {
9571 	u8 mac_profile;
9572 	int err;
9573 
9574 	err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
9575 					   &mac_profile, extack);
9576 	if (err)
9577 		return err;
9578 	rif->mac_profile_id = mac_profile;
9579 
9580 	err = mlxsw_sp_rif_subport_op(rif, true);
9581 	if (err)
9582 		goto err_rif_subport_op;
9583 
9584 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9585 				  mlxsw_sp_fid_index(rif->fid), true);
9586 	if (err)
9587 		goto err_rif_fdb_op;
9588 
9589 	mlxsw_sp_fid_rif_set(rif->fid, rif);
9590 	return 0;
9591 
9592 err_rif_fdb_op:
9593 	mlxsw_sp_rif_subport_op(rif, false);
9594 err_rif_subport_op:
9595 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
9596 	return err;
9597 }
9598 
9599 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9600 {
9601 	struct mlxsw_sp_fid *fid = rif->fid;
9602 
9603 	mlxsw_sp_fid_rif_set(fid, NULL);
9604 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9605 			    mlxsw_sp_fid_index(fid), false);
9606 	mlxsw_sp_rif_macvlan_flush(rif);
9607 	mlxsw_sp_rif_subport_op(rif, false);
9608 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9609 }
9610 
9611 static struct mlxsw_sp_fid *
9612 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9613 			     struct netlink_ext_ack *extack)
9614 {
9615 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9616 }
9617 
9618 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9619 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
9620 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
9621 	.setup			= mlxsw_sp_rif_subport_setup,
9622 	.configure		= mlxsw_sp_rif_subport_configure,
9623 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
9624 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
9625 };
9626 
9627 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
9628 				    enum mlxsw_reg_ritr_if_type type,
9629 				    u16 vid_fid, bool enable)
9630 {
9631 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9632 	char ritr_pl[MLXSW_REG_RITR_LEN];
9633 
9634 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9635 			    rif->dev->mtu);
9636 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9637 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9638 	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
9639 
9640 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9641 }
9642 
9643 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9644 {
9645 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9646 }
9647 
9648 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
9649 				      struct netlink_ext_ack *extack)
9650 {
9651 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9652 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9653 	u8 mac_profile;
9654 	int err;
9655 
9656 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9657 					   &mac_profile, extack);
9658 	if (err)
9659 		return err;
9660 	rif->mac_profile_id = mac_profile;
9661 
9662 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
9663 				       true);
9664 	if (err)
9665 		goto err_rif_vlan_fid_op;
9666 
9667 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9668 				     mlxsw_sp_router_port(mlxsw_sp), true);
9669 	if (err)
9670 		goto err_fid_mc_flood_set;
9671 
9672 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9673 				     mlxsw_sp_router_port(mlxsw_sp), true);
9674 	if (err)
9675 		goto err_fid_bc_flood_set;
9676 
9677 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9678 				  mlxsw_sp_fid_index(rif->fid), true);
9679 	if (err)
9680 		goto err_rif_fdb_op;
9681 
9682 	mlxsw_sp_fid_rif_set(rif->fid, rif);
9683 	return 0;
9684 
9685 err_rif_fdb_op:
9686 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9687 			       mlxsw_sp_router_port(mlxsw_sp), false);
9688 err_fid_bc_flood_set:
9689 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9690 			       mlxsw_sp_router_port(mlxsw_sp), false);
9691 err_fid_mc_flood_set:
9692 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9693 err_rif_vlan_fid_op:
9694 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9695 	return err;
9696 }
9697 
9698 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9699 {
9700 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9701 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9702 	struct mlxsw_sp_fid *fid = rif->fid;
9703 
9704 	mlxsw_sp_fid_rif_set(fid, NULL);
9705 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9706 			    mlxsw_sp_fid_index(fid), false);
9707 	mlxsw_sp_rif_macvlan_flush(rif);
9708 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9709 			       mlxsw_sp_router_port(mlxsw_sp), false);
9710 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9711 			       mlxsw_sp_router_port(mlxsw_sp), false);
9712 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9713 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9714 }
9715 
9716 static struct mlxsw_sp_fid *
9717 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9718 			 struct netlink_ext_ack *extack)
9719 {
9720 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9721 }
9722 
9723 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9724 {
9725 	struct switchdev_notifier_fdb_info info = {};
9726 	struct net_device *dev;
9727 
9728 	dev = br_fdb_find_port(rif->dev, mac, 0);
9729 	if (!dev)
9730 		return;
9731 
9732 	info.addr = mac;
9733 	info.vid = 0;
9734 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9735 				 NULL);
9736 }
9737 
9738 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9739 	.type			= MLXSW_SP_RIF_TYPE_FID,
9740 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9741 	.configure		= mlxsw_sp_rif_fid_configure,
9742 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
9743 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
9744 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
9745 };
9746 
9747 static struct mlxsw_sp_fid *
9748 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9749 			  struct netlink_ext_ack *extack)
9750 {
9751 	struct net_device *br_dev;
9752 	u16 vid;
9753 	int err;
9754 
9755 	if (is_vlan_dev(rif->dev)) {
9756 		vid = vlan_dev_vlan_id(rif->dev);
9757 		br_dev = vlan_dev_real_dev(rif->dev);
9758 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
9759 			return ERR_PTR(-EINVAL);
9760 	} else {
9761 		err = br_vlan_get_pvid(rif->dev, &vid);
9762 		if (err < 0 || !vid) {
9763 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9764 			return ERR_PTR(-EINVAL);
9765 		}
9766 	}
9767 
9768 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9769 }
9770 
9771 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9772 {
9773 	struct switchdev_notifier_fdb_info info = {};
9774 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9775 	struct net_device *br_dev;
9776 	struct net_device *dev;
9777 
9778 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9779 	dev = br_fdb_find_port(br_dev, mac, vid);
9780 	if (!dev)
9781 		return;
9782 
9783 	info.addr = mac;
9784 	info.vid = vid;
9785 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9786 				 NULL);
9787 }
9788 
9789 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
9790 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
9791 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9792 	.configure		= mlxsw_sp_rif_fid_configure,
9793 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
9794 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
9795 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
9796 };
9797 
9798 static struct mlxsw_sp_rif_ipip_lb *
9799 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9800 {
9801 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9802 }
9803 
9804 static void
9805 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9806 			   const struct mlxsw_sp_rif_params *params)
9807 {
9808 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9809 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
9810 
9811 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9812 				 common);
9813 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9814 	rif_lb->lb_config = params_lb->lb_config;
9815 }
9816 
9817 static int
9818 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9819 				struct netlink_ext_ack *extack)
9820 {
9821 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9822 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9823 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9824 	struct mlxsw_sp_vr *ul_vr;
9825 	int err;
9826 
9827 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
9828 	if (IS_ERR(ul_vr))
9829 		return PTR_ERR(ul_vr);
9830 
9831 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9832 	if (err)
9833 		goto err_loopback_op;
9834 
9835 	lb_rif->ul_vr_id = ul_vr->id;
9836 	lb_rif->ul_rif_id = 0;
9837 	++ul_vr->rif_count;
9838 	return 0;
9839 
9840 err_loopback_op:
9841 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9842 	return err;
9843 }
9844 
9845 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9846 {
9847 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9848 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9849 	struct mlxsw_sp_vr *ul_vr;
9850 
9851 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9852 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9853 
9854 	--ul_vr->rif_count;
9855 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9856 }
9857 
9858 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9859 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9860 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9861 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9862 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
9863 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
9864 };
9865 
9866 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9867 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9868 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
9869 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9870 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
9871 };
9872 
9873 static int
9874 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9875 {
9876 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9877 	char ritr_pl[MLXSW_REG_RITR_LEN];
9878 
9879 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9880 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9881 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9882 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
9883 
9884 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9885 }
9886 
9887 static struct mlxsw_sp_rif *
9888 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9889 		       struct netlink_ext_ack *extack)
9890 {
9891 	struct mlxsw_sp_rif *ul_rif;
9892 	u16 rif_index;
9893 	int err;
9894 
9895 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
9896 	if (err) {
9897 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9898 		return ERR_PTR(err);
9899 	}
9900 
9901 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9902 	if (!ul_rif)
9903 		return ERR_PTR(-ENOMEM);
9904 
9905 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
9906 	ul_rif->mlxsw_sp = mlxsw_sp;
9907 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9908 	if (err)
9909 		goto ul_rif_op_err;
9910 
9911 	return ul_rif;
9912 
9913 ul_rif_op_err:
9914 	mlxsw_sp->router->rifs[rif_index] = NULL;
9915 	kfree(ul_rif);
9916 	return ERR_PTR(err);
9917 }
9918 
9919 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9920 {
9921 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9922 
9923 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9924 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
9925 	kfree(ul_rif);
9926 }
9927 
9928 static struct mlxsw_sp_rif *
9929 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
9930 		    struct netlink_ext_ack *extack)
9931 {
9932 	struct mlxsw_sp_vr *vr;
9933 	int err;
9934 
9935 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
9936 	if (IS_ERR(vr))
9937 		return ERR_CAST(vr);
9938 
9939 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
9940 		return vr->ul_rif;
9941 
9942 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
9943 	if (IS_ERR(vr->ul_rif)) {
9944 		err = PTR_ERR(vr->ul_rif);
9945 		goto err_ul_rif_create;
9946 	}
9947 
9948 	vr->rif_count++;
9949 	refcount_set(&vr->ul_rif_refcnt, 1);
9950 
9951 	return vr->ul_rif;
9952 
9953 err_ul_rif_create:
9954 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9955 	return ERR_PTR(err);
9956 }
9957 
9958 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
9959 {
9960 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9961 	struct mlxsw_sp_vr *vr;
9962 
9963 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
9964 
9965 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
9966 		return;
9967 
9968 	vr->rif_count--;
9969 	mlxsw_sp_ul_rif_destroy(ul_rif);
9970 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9971 }
9972 
9973 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
9974 			       u16 *ul_rif_index)
9975 {
9976 	struct mlxsw_sp_rif *ul_rif;
9977 	int err = 0;
9978 
9979 	mutex_lock(&mlxsw_sp->router->lock);
9980 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9981 	if (IS_ERR(ul_rif)) {
9982 		err = PTR_ERR(ul_rif);
9983 		goto out;
9984 	}
9985 	*ul_rif_index = ul_rif->rif_index;
9986 out:
9987 	mutex_unlock(&mlxsw_sp->router->lock);
9988 	return err;
9989 }
9990 
9991 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
9992 {
9993 	struct mlxsw_sp_rif *ul_rif;
9994 
9995 	mutex_lock(&mlxsw_sp->router->lock);
9996 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
9997 	if (WARN_ON(!ul_rif))
9998 		goto out;
9999 
10000 	mlxsw_sp_ul_rif_put(ul_rif);
10001 out:
10002 	mutex_unlock(&mlxsw_sp->router->lock);
10003 }
10004 
10005 static int
10006 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10007 				struct netlink_ext_ack *extack)
10008 {
10009 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10010 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
10011 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10012 	struct mlxsw_sp_rif *ul_rif;
10013 	int err;
10014 
10015 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
10016 	if (IS_ERR(ul_rif))
10017 		return PTR_ERR(ul_rif);
10018 
10019 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10020 	if (err)
10021 		goto err_loopback_op;
10022 
10023 	lb_rif->ul_vr_id = 0;
10024 	lb_rif->ul_rif_id = ul_rif->rif_index;
10025 
10026 	return 0;
10027 
10028 err_loopback_op:
10029 	mlxsw_sp_ul_rif_put(ul_rif);
10030 	return err;
10031 }
10032 
10033 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10034 {
10035 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10036 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10037 	struct mlxsw_sp_rif *ul_rif;
10038 
10039 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
10040 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
10041 	mlxsw_sp_ul_rif_put(ul_rif);
10042 }
10043 
10044 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
10045 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
10046 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
10047 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
10048 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
10049 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
10050 };
10051 
10052 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
10053 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
10054 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
10055 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
10056 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
10057 };
10058 
10059 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10060 {
10061 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10062 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10063 	struct mlxsw_core *core = mlxsw_sp->core;
10064 
10065 	if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10066 		return -EIO;
10067 	mlxsw_sp->router->max_rif_mac_profile =
10068 		MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10069 
10070 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
10071 					 sizeof(struct mlxsw_sp_rif *),
10072 					 GFP_KERNEL);
10073 	if (!mlxsw_sp->router->rifs)
10074 		return -ENOMEM;
10075 
10076 	idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10077 	atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10078 	devlink_resource_occ_get_register(devlink,
10079 					  MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10080 					  mlxsw_sp_rif_mac_profiles_occ_get,
10081 					  mlxsw_sp);
10082 
10083 	return 0;
10084 }
10085 
10086 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10087 {
10088 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10089 	int i;
10090 
10091 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
10092 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10093 
10094 	devlink_resource_occ_get_unregister(devlink,
10095 					    MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10096 	WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10097 	idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10098 	kfree(mlxsw_sp->router->rifs);
10099 }
10100 
10101 static int
10102 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10103 {
10104 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10105 
10106 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10107 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10108 }
10109 
10110 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10111 {
10112 	int err;
10113 
10114 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10115 
10116 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10117 	if (err)
10118 		return err;
10119 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10120 	if (err)
10121 		return err;
10122 
10123 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10124 }
10125 
10126 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10127 {
10128 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10129 	return mlxsw_sp_ipips_init(mlxsw_sp);
10130 }
10131 
10132 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10133 {
10134 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10135 	return mlxsw_sp_ipips_init(mlxsw_sp);
10136 }
10137 
10138 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10139 {
10140 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10141 }
10142 
10143 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10144 {
10145 	struct mlxsw_sp_router *router;
10146 
10147 	/* Flush pending FIB notifications and then flush the device's
10148 	 * table before requesting another dump. The FIB notification
10149 	 * block is unregistered, so no need to take RTNL.
10150 	 */
10151 	mlxsw_core_flush_owq();
10152 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10153 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10154 }
10155 
10156 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10157 struct mlxsw_sp_mp_hash_config {
10158 	DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10159 	DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10160 	DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10161 	DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10162 	bool inc_parsing_depth;
10163 };
10164 
10165 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10166 	bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10167 
10168 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10169 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10170 
10171 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10172 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10173 
10174 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10175 {
10176 	unsigned long *inner_headers = config->inner_headers;
10177 	unsigned long *inner_fields = config->inner_fields;
10178 
10179 	/* IPv4 inner */
10180 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10181 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10182 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10183 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10184 	/* IPv6 inner */
10185 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10186 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10187 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10188 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10189 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10190 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10191 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10192 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10193 }
10194 
10195 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10196 {
10197 	unsigned long *headers = config->headers;
10198 	unsigned long *fields = config->fields;
10199 
10200 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10201 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10202 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10203 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10204 }
10205 
10206 static void
10207 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
10208 			      u32 hash_fields)
10209 {
10210 	unsigned long *inner_headers = config->inner_headers;
10211 	unsigned long *inner_fields = config->inner_fields;
10212 
10213 	/* IPv4 Inner */
10214 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10215 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10216 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
10217 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10218 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
10219 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10220 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10221 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
10222 	/* IPv6 inner */
10223 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10224 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10225 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
10226 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10227 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10228 	}
10229 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
10230 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10231 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10232 	}
10233 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10234 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10235 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
10236 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10237 	/* L4 inner */
10238 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
10239 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
10240 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
10241 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
10242 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
10243 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
10244 }
10245 
10246 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
10247 				   struct mlxsw_sp_mp_hash_config *config)
10248 {
10249 	struct net *net = mlxsw_sp_net(mlxsw_sp);
10250 	unsigned long *headers = config->headers;
10251 	unsigned long *fields = config->fields;
10252 	u32 hash_fields;
10253 
10254 	switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
10255 	case 0:
10256 		mlxsw_sp_mp4_hash_outer_addr(config);
10257 		break;
10258 	case 1:
10259 		mlxsw_sp_mp4_hash_outer_addr(config);
10260 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10261 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10262 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10263 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10264 		break;
10265 	case 2:
10266 		/* Outer */
10267 		mlxsw_sp_mp4_hash_outer_addr(config);
10268 		/* Inner */
10269 		mlxsw_sp_mp_hash_inner_l3(config);
10270 		break;
10271 	case 3:
10272 		hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
10273 		/* Outer */
10274 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10275 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10276 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10277 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
10278 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10279 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
10280 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10281 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10282 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10283 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10284 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10285 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10286 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10287 		/* Inner */
10288 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10289 		break;
10290 	}
10291 }
10292 
10293 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10294 {
10295 	unsigned long *headers = config->headers;
10296 	unsigned long *fields = config->fields;
10297 
10298 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10299 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10300 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10301 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10302 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10303 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10304 }
10305 
10306 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
10307 				   struct mlxsw_sp_mp_hash_config *config)
10308 {
10309 	u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
10310 	unsigned long *headers = config->headers;
10311 	unsigned long *fields = config->fields;
10312 
10313 	switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
10314 	case 0:
10315 		mlxsw_sp_mp6_hash_outer_addr(config);
10316 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10317 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10318 		break;
10319 	case 1:
10320 		mlxsw_sp_mp6_hash_outer_addr(config);
10321 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10322 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10323 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10324 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10325 		break;
10326 	case 2:
10327 		/* Outer */
10328 		mlxsw_sp_mp6_hash_outer_addr(config);
10329 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10330 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10331 		/* Inner */
10332 		mlxsw_sp_mp_hash_inner_l3(config);
10333 		config->inc_parsing_depth = true;
10334 		break;
10335 	case 3:
10336 		/* Outer */
10337 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10338 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10339 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10340 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
10341 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10342 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10343 		}
10344 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
10345 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10346 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10347 		}
10348 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10349 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10350 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
10351 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10352 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10353 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10354 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10355 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10356 		/* Inner */
10357 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10358 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
10359 			config->inc_parsing_depth = true;
10360 		break;
10361 	}
10362 }
10363 
10364 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
10365 						 bool old_inc_parsing_depth,
10366 						 bool new_inc_parsing_depth)
10367 {
10368 	int err;
10369 
10370 	if (!old_inc_parsing_depth && new_inc_parsing_depth) {
10371 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
10372 		if (err)
10373 			return err;
10374 		mlxsw_sp->router->inc_parsing_depth = true;
10375 	} else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
10376 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
10377 		mlxsw_sp->router->inc_parsing_depth = false;
10378 	}
10379 
10380 	return 0;
10381 }
10382 
10383 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10384 {
10385 	bool old_inc_parsing_depth, new_inc_parsing_depth;
10386 	struct mlxsw_sp_mp_hash_config config = {};
10387 	char recr2_pl[MLXSW_REG_RECR2_LEN];
10388 	unsigned long bit;
10389 	u32 seed;
10390 	int err;
10391 
10392 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
10393 	mlxsw_reg_recr2_pack(recr2_pl, seed);
10394 	mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
10395 	mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
10396 
10397 	old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10398 	new_inc_parsing_depth = config.inc_parsing_depth;
10399 	err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
10400 						    old_inc_parsing_depth,
10401 						    new_inc_parsing_depth);
10402 	if (err)
10403 		return err;
10404 
10405 	for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
10406 		mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
10407 	for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
10408 		mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
10409 	for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
10410 		mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
10411 	for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
10412 		mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
10413 
10414 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
10415 	if (err)
10416 		goto err_reg_write;
10417 
10418 	return 0;
10419 
10420 err_reg_write:
10421 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
10422 					      old_inc_parsing_depth);
10423 	return err;
10424 }
10425 #else
10426 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10427 {
10428 	return 0;
10429 }
10430 #endif
10431 
10432 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
10433 {
10434 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
10435 	unsigned int i;
10436 
10437 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
10438 
10439 	/* HW is determining switch priority based on DSCP-bits, but the
10440 	 * kernel is still doing that based on the ToS. Since there's a
10441 	 * mismatch in bits we need to make sure to translate the right
10442 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
10443 	 */
10444 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
10445 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
10446 
10447 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
10448 }
10449 
10450 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
10451 {
10452 	struct net *net = mlxsw_sp_net(mlxsw_sp);
10453 	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
10454 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
10455 	u64 max_rifs;
10456 
10457 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
10458 		return -EIO;
10459 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10460 
10461 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
10462 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
10463 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
10464 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10465 }
10466 
10467 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10468 {
10469 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
10470 
10471 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
10472 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10473 }
10474 
10475 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
10476 	.init = mlxsw_sp_router_ll_basic_init,
10477 	.ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
10478 	.ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
10479 	.raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
10480 	.fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
10481 	.fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
10482 	.fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
10483 	.fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
10484 	.fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
10485 	.fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
10486 	.fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
10487 	.fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
10488 };
10489 
10490 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
10491 {
10492 	size_t max_size = 0;
10493 	int i;
10494 
10495 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
10496 		size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
10497 
10498 		if (size > max_size)
10499 			max_size = size;
10500 	}
10501 	router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
10502 				    GFP_KERNEL);
10503 	if (!router->ll_op_ctx)
10504 		return -ENOMEM;
10505 	INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
10506 	return 0;
10507 }
10508 
10509 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
10510 {
10511 	WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
10512 	kfree(router->ll_op_ctx);
10513 }
10514 
10515 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
10516 {
10517 	u16 lb_rif_index;
10518 	int err;
10519 
10520 	/* Create a generic loopback RIF associated with the main table
10521 	 * (default VRF). Any table can be used, but the main table exists
10522 	 * anyway, so we do not waste resources.
10523 	 */
10524 	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
10525 					 &lb_rif_index);
10526 	if (err)
10527 		return err;
10528 
10529 	mlxsw_sp->router->lb_rif_index = lb_rif_index;
10530 
10531 	return 0;
10532 }
10533 
10534 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10535 {
10536 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
10537 }
10538 
10539 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10540 {
10541 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10542 
10543 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10544 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10545 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10546 
10547 	return 0;
10548 }
10549 
10550 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10551 	.init = mlxsw_sp1_router_init,
10552 	.ipips_init = mlxsw_sp1_ipips_init,
10553 };
10554 
10555 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10556 {
10557 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10558 
10559 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10560 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10561 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10562 
10563 	return 0;
10564 }
10565 
10566 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10567 	.init = mlxsw_sp2_router_init,
10568 	.ipips_init = mlxsw_sp2_ipips_init,
10569 };
10570 
10571 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
10572 			 struct netlink_ext_ack *extack)
10573 {
10574 	struct mlxsw_sp_router *router;
10575 	int err;
10576 
10577 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
10578 	if (!router)
10579 		return -ENOMEM;
10580 	mutex_init(&router->lock);
10581 	mlxsw_sp->router = router;
10582 	router->mlxsw_sp = mlxsw_sp;
10583 
10584 	err = mlxsw_sp->router_ops->init(mlxsw_sp);
10585 	if (err)
10586 		goto err_router_ops_init;
10587 
10588 	err = mlxsw_sp_router_xm_init(mlxsw_sp);
10589 	if (err)
10590 		goto err_xm_init;
10591 
10592 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
10593 						       &mlxsw_sp_router_ll_xm_ops :
10594 						       &mlxsw_sp_router_ll_basic_ops;
10595 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
10596 
10597 	err = mlxsw_sp_router_ll_op_ctx_init(router);
10598 	if (err)
10599 		goto err_ll_op_ctx_init;
10600 
10601 	INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
10602 	INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
10603 			  mlxsw_sp_nh_grp_activity_work);
10604 
10605 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
10606 	err = __mlxsw_sp_router_init(mlxsw_sp);
10607 	if (err)
10608 		goto err_router_init;
10609 
10610 	err = mlxsw_sp_rifs_init(mlxsw_sp);
10611 	if (err)
10612 		goto err_rifs_init;
10613 
10614 	err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
10615 	if (err)
10616 		goto err_ipips_init;
10617 
10618 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
10619 			      &mlxsw_sp_nexthop_ht_params);
10620 	if (err)
10621 		goto err_nexthop_ht_init;
10622 
10623 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
10624 			      &mlxsw_sp_nexthop_group_ht_params);
10625 	if (err)
10626 		goto err_nexthop_group_ht_init;
10627 
10628 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
10629 	err = mlxsw_sp_lpm_init(mlxsw_sp);
10630 	if (err)
10631 		goto err_lpm_init;
10632 
10633 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
10634 	if (err)
10635 		goto err_mr_init;
10636 
10637 	err = mlxsw_sp_vrs_init(mlxsw_sp);
10638 	if (err)
10639 		goto err_vrs_init;
10640 
10641 	err = mlxsw_sp_lb_rif_init(mlxsw_sp);
10642 	if (err)
10643 		goto err_lb_rif_init;
10644 
10645 	err = mlxsw_sp_neigh_init(mlxsw_sp);
10646 	if (err)
10647 		goto err_neigh_init;
10648 
10649 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
10650 	if (err)
10651 		goto err_mp_hash_init;
10652 
10653 	err = mlxsw_sp_dscp_init(mlxsw_sp);
10654 	if (err)
10655 		goto err_dscp_init;
10656 
10657 	INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
10658 	INIT_LIST_HEAD(&router->fib_event_queue);
10659 	spin_lock_init(&router->fib_event_queue_lock);
10660 
10661 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
10662 	err = register_inetaddr_notifier(&router->inetaddr_nb);
10663 	if (err)
10664 		goto err_register_inetaddr_notifier;
10665 
10666 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10667 	err = register_inet6addr_notifier(&router->inet6addr_nb);
10668 	if (err)
10669 		goto err_register_inet6addr_notifier;
10670 
10671 	mlxsw_sp->router->netevent_nb.notifier_call =
10672 		mlxsw_sp_router_netevent_event;
10673 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10674 	if (err)
10675 		goto err_register_netevent_notifier;
10676 
10677 	mlxsw_sp->router->nexthop_nb.notifier_call =
10678 		mlxsw_sp_nexthop_obj_event;
10679 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10680 					&mlxsw_sp->router->nexthop_nb,
10681 					extack);
10682 	if (err)
10683 		goto err_register_nexthop_notifier;
10684 
10685 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10686 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10687 				    &mlxsw_sp->router->fib_nb,
10688 				    mlxsw_sp_router_fib_dump_flush, extack);
10689 	if (err)
10690 		goto err_register_fib_notifier;
10691 
10692 	return 0;
10693 
10694 err_register_fib_notifier:
10695 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10696 				    &mlxsw_sp->router->nexthop_nb);
10697 err_register_nexthop_notifier:
10698 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10699 err_register_netevent_notifier:
10700 	unregister_inet6addr_notifier(&router->inet6addr_nb);
10701 err_register_inet6addr_notifier:
10702 	unregister_inetaddr_notifier(&router->inetaddr_nb);
10703 err_register_inetaddr_notifier:
10704 	mlxsw_core_flush_owq();
10705 	WARN_ON(!list_empty(&router->fib_event_queue));
10706 err_dscp_init:
10707 err_mp_hash_init:
10708 	mlxsw_sp_neigh_fini(mlxsw_sp);
10709 err_neigh_init:
10710 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
10711 err_lb_rif_init:
10712 	mlxsw_sp_vrs_fini(mlxsw_sp);
10713 err_vrs_init:
10714 	mlxsw_sp_mr_fini(mlxsw_sp);
10715 err_mr_init:
10716 	mlxsw_sp_lpm_fini(mlxsw_sp);
10717 err_lpm_init:
10718 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10719 err_nexthop_group_ht_init:
10720 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10721 err_nexthop_ht_init:
10722 	mlxsw_sp_ipips_fini(mlxsw_sp);
10723 err_ipips_init:
10724 	mlxsw_sp_rifs_fini(mlxsw_sp);
10725 err_rifs_init:
10726 	__mlxsw_sp_router_fini(mlxsw_sp);
10727 err_router_init:
10728 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10729 	mlxsw_sp_router_ll_op_ctx_fini(router);
10730 err_ll_op_ctx_init:
10731 	mlxsw_sp_router_xm_fini(mlxsw_sp);
10732 err_xm_init:
10733 err_router_ops_init:
10734 	mutex_destroy(&mlxsw_sp->router->lock);
10735 	kfree(mlxsw_sp->router);
10736 	return err;
10737 }
10738 
10739 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10740 {
10741 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10742 				&mlxsw_sp->router->fib_nb);
10743 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10744 				    &mlxsw_sp->router->nexthop_nb);
10745 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10746 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
10747 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
10748 	mlxsw_core_flush_owq();
10749 	WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
10750 	mlxsw_sp_neigh_fini(mlxsw_sp);
10751 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
10752 	mlxsw_sp_vrs_fini(mlxsw_sp);
10753 	mlxsw_sp_mr_fini(mlxsw_sp);
10754 	mlxsw_sp_lpm_fini(mlxsw_sp);
10755 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10756 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10757 	mlxsw_sp_ipips_fini(mlxsw_sp);
10758 	mlxsw_sp_rifs_fini(mlxsw_sp);
10759 	__mlxsw_sp_router_fini(mlxsw_sp);
10760 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10761 	mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
10762 	mlxsw_sp_router_xm_fini(mlxsw_sp);
10763 	mutex_destroy(&mlxsw_sp->router->lock);
10764 	kfree(mlxsw_sp->router);
10765 }
10766