1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
32 #include <net/ipv6.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
35 
36 #include "spectrum.h"
37 #include "core.h"
38 #include "reg.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
46 
47 struct mlxsw_sp_fib;
48 struct mlxsw_sp_vr;
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
51 
52 struct mlxsw_sp_rif {
53 	struct list_head nexthop_list;
54 	struct list_head neigh_list;
55 	struct net_device *dev; /* NULL for underlay RIF */
56 	struct mlxsw_sp_fid *fid;
57 	unsigned char addr[ETH_ALEN];
58 	int mtu;
59 	u16 rif_index;
60 	u16 vr_id;
61 	const struct mlxsw_sp_rif_ops *ops;
62 	struct mlxsw_sp *mlxsw_sp;
63 
64 	unsigned int counter_ingress;
65 	bool counter_ingress_valid;
66 	unsigned int counter_egress;
67 	bool counter_egress_valid;
68 };
69 
70 struct mlxsw_sp_rif_params {
71 	struct net_device *dev;
72 	union {
73 		u16 system_port;
74 		u16 lag_id;
75 	};
76 	u16 vid;
77 	bool lag;
78 };
79 
80 struct mlxsw_sp_rif_subport {
81 	struct mlxsw_sp_rif common;
82 	refcount_t ref_count;
83 	union {
84 		u16 system_port;
85 		u16 lag_id;
86 	};
87 	u16 vid;
88 	bool lag;
89 };
90 
91 struct mlxsw_sp_rif_ipip_lb {
92 	struct mlxsw_sp_rif common;
93 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 	u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 	u16 ul_rif_id; /* Reserved for Spectrum. */
96 };
97 
98 struct mlxsw_sp_rif_params_ipip_lb {
99 	struct mlxsw_sp_rif_params common;
100 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
101 };
102 
103 struct mlxsw_sp_rif_ops {
104 	enum mlxsw_sp_rif_type type;
105 	size_t rif_size;
106 
107 	void (*setup)(struct mlxsw_sp_rif *rif,
108 		      const struct mlxsw_sp_rif_params *params);
109 	int (*configure)(struct mlxsw_sp_rif *rif);
110 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 					 struct netlink_ext_ack *extack);
113 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
114 };
115 
116 struct mlxsw_sp_router_ops {
117 	int (*init)(struct mlxsw_sp *mlxsw_sp);
118 };
119 
120 static struct mlxsw_sp_rif *
121 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
122 			 const struct net_device *dev);
123 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
124 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
125 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
126 				  struct mlxsw_sp_lpm_tree *lpm_tree);
127 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
128 				     const struct mlxsw_sp_fib *fib,
129 				     u8 tree_id);
130 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
131 				       const struct mlxsw_sp_fib *fib);
132 
133 static unsigned int *
134 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
135 			   enum mlxsw_sp_rif_counter_dir dir)
136 {
137 	switch (dir) {
138 	case MLXSW_SP_RIF_COUNTER_EGRESS:
139 		return &rif->counter_egress;
140 	case MLXSW_SP_RIF_COUNTER_INGRESS:
141 		return &rif->counter_ingress;
142 	}
143 	return NULL;
144 }
145 
146 static bool
147 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
148 			       enum mlxsw_sp_rif_counter_dir dir)
149 {
150 	switch (dir) {
151 	case MLXSW_SP_RIF_COUNTER_EGRESS:
152 		return rif->counter_egress_valid;
153 	case MLXSW_SP_RIF_COUNTER_INGRESS:
154 		return rif->counter_ingress_valid;
155 	}
156 	return false;
157 }
158 
159 static void
160 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
161 			       enum mlxsw_sp_rif_counter_dir dir,
162 			       bool valid)
163 {
164 	switch (dir) {
165 	case MLXSW_SP_RIF_COUNTER_EGRESS:
166 		rif->counter_egress_valid = valid;
167 		break;
168 	case MLXSW_SP_RIF_COUNTER_INGRESS:
169 		rif->counter_ingress_valid = valid;
170 		break;
171 	}
172 }
173 
174 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
175 				     unsigned int counter_index, bool enable,
176 				     enum mlxsw_sp_rif_counter_dir dir)
177 {
178 	char ritr_pl[MLXSW_REG_RITR_LEN];
179 	bool is_egress = false;
180 	int err;
181 
182 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
183 		is_egress = true;
184 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
185 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
186 	if (err)
187 		return err;
188 
189 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
190 				    is_egress);
191 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
192 }
193 
194 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
195 				   struct mlxsw_sp_rif *rif,
196 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
197 {
198 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
199 	unsigned int *p_counter_index;
200 	bool valid;
201 	int err;
202 
203 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
204 	if (!valid)
205 		return -EINVAL;
206 
207 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
208 	if (!p_counter_index)
209 		return -EINVAL;
210 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
211 			     MLXSW_REG_RICNT_OPCODE_NOP);
212 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
213 	if (err)
214 		return err;
215 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
216 	return 0;
217 }
218 
219 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
220 				      unsigned int counter_index)
221 {
222 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
223 
224 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
225 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
226 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
227 }
228 
229 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
230 			       struct mlxsw_sp_rif *rif,
231 			       enum mlxsw_sp_rif_counter_dir dir)
232 {
233 	unsigned int *p_counter_index;
234 	int err;
235 
236 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
237 	if (!p_counter_index)
238 		return -EINVAL;
239 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
240 				     p_counter_index);
241 	if (err)
242 		return err;
243 
244 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
245 	if (err)
246 		goto err_counter_clear;
247 
248 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
249 					*p_counter_index, true, dir);
250 	if (err)
251 		goto err_counter_edit;
252 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
253 	return 0;
254 
255 err_counter_edit:
256 err_counter_clear:
257 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
258 			      *p_counter_index);
259 	return err;
260 }
261 
262 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
263 			       struct mlxsw_sp_rif *rif,
264 			       enum mlxsw_sp_rif_counter_dir dir)
265 {
266 	unsigned int *p_counter_index;
267 
268 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
269 		return;
270 
271 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
272 	if (WARN_ON(!p_counter_index))
273 		return;
274 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
275 				  *p_counter_index, false, dir);
276 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
277 			      *p_counter_index);
278 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
279 }
280 
281 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
282 {
283 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
284 	struct devlink *devlink;
285 
286 	devlink = priv_to_devlink(mlxsw_sp->core);
287 	if (!devlink_dpipe_table_counter_enabled(devlink,
288 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
289 		return;
290 	mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
291 }
292 
293 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
294 {
295 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
296 
297 	mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
298 }
299 
300 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
301 
302 struct mlxsw_sp_prefix_usage {
303 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
304 };
305 
306 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
307 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
308 
309 static bool
310 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
311 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
312 {
313 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
314 }
315 
316 static void
317 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
318 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
319 {
320 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
321 }
322 
323 static void
324 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
325 			  unsigned char prefix_len)
326 {
327 	set_bit(prefix_len, prefix_usage->b);
328 }
329 
330 static void
331 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
332 			    unsigned char prefix_len)
333 {
334 	clear_bit(prefix_len, prefix_usage->b);
335 }
336 
337 struct mlxsw_sp_fib_key {
338 	unsigned char addr[sizeof(struct in6_addr)];
339 	unsigned char prefix_len;
340 };
341 
342 enum mlxsw_sp_fib_entry_type {
343 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
344 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
345 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
346 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
347 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
348 
349 	/* This is a special case of local delivery, where a packet should be
350 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
351 	 * because that's a type of next hop, not of FIB entry. (There can be
352 	 * several next hops in a REMOTE entry, and some of them may be
353 	 * encapsulating entries.)
354 	 */
355 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
356 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
357 };
358 
359 struct mlxsw_sp_nexthop_group_info;
360 struct mlxsw_sp_nexthop_group;
361 struct mlxsw_sp_fib_entry;
362 
363 struct mlxsw_sp_fib_node {
364 	struct mlxsw_sp_fib_entry *fib_entry;
365 	struct list_head list;
366 	struct rhash_head ht_node;
367 	struct mlxsw_sp_fib *fib;
368 	struct mlxsw_sp_fib_key key;
369 };
370 
371 struct mlxsw_sp_fib_entry_decap {
372 	struct mlxsw_sp_ipip_entry *ipip_entry;
373 	u32 tunnel_index;
374 };
375 
376 static struct mlxsw_sp_fib_entry_priv *
377 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
378 {
379 	struct mlxsw_sp_fib_entry_priv *priv;
380 
381 	if (!ll_ops->fib_entry_priv_size)
382 		/* No need to have priv */
383 		return NULL;
384 
385 	priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
386 	if (!priv)
387 		return ERR_PTR(-ENOMEM);
388 	refcount_set(&priv->refcnt, 1);
389 	return priv;
390 }
391 
392 static void
393 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
394 {
395 	kfree(priv);
396 }
397 
398 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
399 {
400 	refcount_inc(&priv->refcnt);
401 }
402 
403 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
404 {
405 	if (!priv || !refcount_dec_and_test(&priv->refcnt))
406 		return;
407 	mlxsw_sp_fib_entry_priv_destroy(priv);
408 }
409 
410 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
411 						struct mlxsw_sp_fib_entry_priv *priv)
412 {
413 	if (!priv)
414 		return;
415 	mlxsw_sp_fib_entry_priv_hold(priv);
416 	list_add(&priv->list, &op_ctx->fib_entry_priv_list);
417 }
418 
419 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
420 {
421 	struct mlxsw_sp_fib_entry_priv *priv, *tmp;
422 
423 	list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
424 		mlxsw_sp_fib_entry_priv_put(priv);
425 	INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
426 }
427 
428 struct mlxsw_sp_fib_entry {
429 	struct mlxsw_sp_fib_node *fib_node;
430 	enum mlxsw_sp_fib_entry_type type;
431 	struct list_head nexthop_group_node;
432 	struct mlxsw_sp_nexthop_group *nh_group;
433 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
434 	struct mlxsw_sp_fib_entry_priv *priv;
435 };
436 
437 struct mlxsw_sp_fib4_entry {
438 	struct mlxsw_sp_fib_entry common;
439 	struct fib_info *fi;
440 	u32 tb_id;
441 	u8 tos;
442 	u8 type;
443 };
444 
445 struct mlxsw_sp_fib6_entry {
446 	struct mlxsw_sp_fib_entry common;
447 	struct list_head rt6_list;
448 	unsigned int nrt6;
449 };
450 
451 struct mlxsw_sp_rt6 {
452 	struct list_head list;
453 	struct fib6_info *rt;
454 };
455 
456 struct mlxsw_sp_lpm_tree {
457 	u8 id; /* tree ID */
458 	unsigned int ref_count;
459 	enum mlxsw_sp_l3proto proto;
460 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
461 	struct mlxsw_sp_prefix_usage prefix_usage;
462 };
463 
464 struct mlxsw_sp_fib {
465 	struct rhashtable ht;
466 	struct list_head node_list;
467 	struct mlxsw_sp_vr *vr;
468 	struct mlxsw_sp_lpm_tree *lpm_tree;
469 	enum mlxsw_sp_l3proto proto;
470 	const struct mlxsw_sp_router_ll_ops *ll_ops;
471 };
472 
473 struct mlxsw_sp_vr {
474 	u16 id; /* virtual router ID */
475 	u32 tb_id; /* kernel fib table id */
476 	unsigned int rif_count;
477 	struct mlxsw_sp_fib *fib4;
478 	struct mlxsw_sp_fib *fib6;
479 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
480 	struct mlxsw_sp_rif *ul_rif;
481 	refcount_t ul_rif_refcnt;
482 };
483 
484 static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
485 					 enum mlxsw_sp_l3proto proto)
486 {
487 	return 0;
488 }
489 
490 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
491 {
492 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
493 			       xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
494 }
495 
496 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
497 {
498 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
499 			       xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
500 }
501 
502 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
503 {
504 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
505 			       xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
506 }
507 
508 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
509 
510 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
511 						struct mlxsw_sp_vr *vr,
512 						enum mlxsw_sp_l3proto proto)
513 {
514 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
515 	struct mlxsw_sp_lpm_tree *lpm_tree;
516 	struct mlxsw_sp_fib *fib;
517 	int err;
518 
519 	err = ll_ops->init(mlxsw_sp, vr->id, proto);
520 	if (err)
521 		return ERR_PTR(err);
522 
523 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
524 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
525 	if (!fib)
526 		return ERR_PTR(-ENOMEM);
527 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
528 	if (err)
529 		goto err_rhashtable_init;
530 	INIT_LIST_HEAD(&fib->node_list);
531 	fib->proto = proto;
532 	fib->vr = vr;
533 	fib->lpm_tree = lpm_tree;
534 	fib->ll_ops = ll_ops;
535 	mlxsw_sp_lpm_tree_hold(lpm_tree);
536 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
537 	if (err)
538 		goto err_lpm_tree_bind;
539 	return fib;
540 
541 err_lpm_tree_bind:
542 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
543 err_rhashtable_init:
544 	kfree(fib);
545 	return ERR_PTR(err);
546 }
547 
548 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
549 				 struct mlxsw_sp_fib *fib)
550 {
551 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
552 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
553 	WARN_ON(!list_empty(&fib->node_list));
554 	rhashtable_destroy(&fib->ht);
555 	kfree(fib);
556 }
557 
558 static struct mlxsw_sp_lpm_tree *
559 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
560 {
561 	static struct mlxsw_sp_lpm_tree *lpm_tree;
562 	int i;
563 
564 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
565 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
566 		if (lpm_tree->ref_count == 0)
567 			return lpm_tree;
568 	}
569 	return NULL;
570 }
571 
572 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
573 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
574 				   struct mlxsw_sp_lpm_tree *lpm_tree)
575 {
576 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
577 
578 	mlxsw_reg_xralta_pack(xralta_pl, true,
579 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
580 			      lpm_tree->id);
581 	return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
582 }
583 
584 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
585 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
586 				   struct mlxsw_sp_lpm_tree *lpm_tree)
587 {
588 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
589 
590 	mlxsw_reg_xralta_pack(xralta_pl, false,
591 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
592 			      lpm_tree->id);
593 	ll_ops->ralta_write(mlxsw_sp, xralta_pl);
594 }
595 
596 static int
597 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
598 				  const struct mlxsw_sp_router_ll_ops *ll_ops,
599 				  struct mlxsw_sp_prefix_usage *prefix_usage,
600 				  struct mlxsw_sp_lpm_tree *lpm_tree)
601 {
602 	char xralst_pl[MLXSW_REG_XRALST_LEN];
603 	u8 root_bin = 0;
604 	u8 prefix;
605 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
606 
607 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
608 		root_bin = prefix;
609 
610 	mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
611 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
612 		if (prefix == 0)
613 			continue;
614 		mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
615 					  MLXSW_REG_RALST_BIN_NO_CHILD);
616 		last_prefix = prefix;
617 	}
618 	return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
619 }
620 
621 static struct mlxsw_sp_lpm_tree *
622 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
623 			 const struct mlxsw_sp_router_ll_ops *ll_ops,
624 			 struct mlxsw_sp_prefix_usage *prefix_usage,
625 			 enum mlxsw_sp_l3proto proto)
626 {
627 	struct mlxsw_sp_lpm_tree *lpm_tree;
628 	int err;
629 
630 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
631 	if (!lpm_tree)
632 		return ERR_PTR(-EBUSY);
633 	lpm_tree->proto = proto;
634 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
635 	if (err)
636 		return ERR_PTR(err);
637 
638 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
639 	if (err)
640 		goto err_left_struct_set;
641 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
642 	       sizeof(lpm_tree->prefix_usage));
643 	memset(&lpm_tree->prefix_ref_count, 0,
644 	       sizeof(lpm_tree->prefix_ref_count));
645 	lpm_tree->ref_count = 1;
646 	return lpm_tree;
647 
648 err_left_struct_set:
649 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
650 	return ERR_PTR(err);
651 }
652 
653 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
654 				      const struct mlxsw_sp_router_ll_ops *ll_ops,
655 				      struct mlxsw_sp_lpm_tree *lpm_tree)
656 {
657 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
658 }
659 
660 static struct mlxsw_sp_lpm_tree *
661 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
662 		      struct mlxsw_sp_prefix_usage *prefix_usage,
663 		      enum mlxsw_sp_l3proto proto)
664 {
665 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
666 	struct mlxsw_sp_lpm_tree *lpm_tree;
667 	int i;
668 
669 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
670 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
671 		if (lpm_tree->ref_count != 0 &&
672 		    lpm_tree->proto == proto &&
673 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
674 					     prefix_usage)) {
675 			mlxsw_sp_lpm_tree_hold(lpm_tree);
676 			return lpm_tree;
677 		}
678 	}
679 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
680 }
681 
682 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
683 {
684 	lpm_tree->ref_count++;
685 }
686 
687 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
688 				  struct mlxsw_sp_lpm_tree *lpm_tree)
689 {
690 	const struct mlxsw_sp_router_ll_ops *ll_ops =
691 				mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
692 
693 	if (--lpm_tree->ref_count == 0)
694 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
695 }
696 
697 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
698 
699 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
700 {
701 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
702 	struct mlxsw_sp_lpm_tree *lpm_tree;
703 	u64 max_trees;
704 	int err, i;
705 
706 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
707 		return -EIO;
708 
709 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
710 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
711 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
712 					     sizeof(struct mlxsw_sp_lpm_tree),
713 					     GFP_KERNEL);
714 	if (!mlxsw_sp->router->lpm.trees)
715 		return -ENOMEM;
716 
717 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
718 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
719 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
720 	}
721 
722 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
723 					 MLXSW_SP_L3_PROTO_IPV4);
724 	if (IS_ERR(lpm_tree)) {
725 		err = PTR_ERR(lpm_tree);
726 		goto err_ipv4_tree_get;
727 	}
728 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
729 
730 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
731 					 MLXSW_SP_L3_PROTO_IPV6);
732 	if (IS_ERR(lpm_tree)) {
733 		err = PTR_ERR(lpm_tree);
734 		goto err_ipv6_tree_get;
735 	}
736 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
737 
738 	return 0;
739 
740 err_ipv6_tree_get:
741 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
742 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
743 err_ipv4_tree_get:
744 	kfree(mlxsw_sp->router->lpm.trees);
745 	return err;
746 }
747 
748 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
749 {
750 	struct mlxsw_sp_lpm_tree *lpm_tree;
751 
752 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
753 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
754 
755 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
756 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
757 
758 	kfree(mlxsw_sp->router->lpm.trees);
759 }
760 
761 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
762 {
763 	return !!vr->fib4 || !!vr->fib6 ||
764 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
765 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
766 }
767 
768 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
769 {
770 	struct mlxsw_sp_vr *vr;
771 	int i;
772 
773 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
774 		vr = &mlxsw_sp->router->vrs[i];
775 		if (!mlxsw_sp_vr_is_used(vr))
776 			return vr;
777 	}
778 	return NULL;
779 }
780 
781 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
782 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
783 {
784 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
785 
786 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
787 			      (enum mlxsw_reg_ralxx_protocol) fib->proto,
788 			      tree_id);
789 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
790 }
791 
792 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
793 				       const struct mlxsw_sp_fib *fib)
794 {
795 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
796 
797 	/* Bind to tree 0 which is default */
798 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
799 			      (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
800 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
801 }
802 
803 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
804 {
805 	/* For our purpose, squash main, default and local tables into one */
806 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
807 		tb_id = RT_TABLE_MAIN;
808 	return tb_id;
809 }
810 
811 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
812 					    u32 tb_id)
813 {
814 	struct mlxsw_sp_vr *vr;
815 	int i;
816 
817 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
818 
819 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
820 		vr = &mlxsw_sp->router->vrs[i];
821 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
822 			return vr;
823 	}
824 	return NULL;
825 }
826 
827 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
828 				u16 *vr_id)
829 {
830 	struct mlxsw_sp_vr *vr;
831 	int err = 0;
832 
833 	mutex_lock(&mlxsw_sp->router->lock);
834 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
835 	if (!vr) {
836 		err = -ESRCH;
837 		goto out;
838 	}
839 	*vr_id = vr->id;
840 out:
841 	mutex_unlock(&mlxsw_sp->router->lock);
842 	return err;
843 }
844 
845 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
846 					    enum mlxsw_sp_l3proto proto)
847 {
848 	switch (proto) {
849 	case MLXSW_SP_L3_PROTO_IPV4:
850 		return vr->fib4;
851 	case MLXSW_SP_L3_PROTO_IPV6:
852 		return vr->fib6;
853 	}
854 	return NULL;
855 }
856 
857 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
858 					      u32 tb_id,
859 					      struct netlink_ext_ack *extack)
860 {
861 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
862 	struct mlxsw_sp_fib *fib4;
863 	struct mlxsw_sp_fib *fib6;
864 	struct mlxsw_sp_vr *vr;
865 	int err;
866 
867 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
868 	if (!vr) {
869 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
870 		return ERR_PTR(-EBUSY);
871 	}
872 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
873 	if (IS_ERR(fib4))
874 		return ERR_CAST(fib4);
875 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
876 	if (IS_ERR(fib6)) {
877 		err = PTR_ERR(fib6);
878 		goto err_fib6_create;
879 	}
880 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
881 					     MLXSW_SP_L3_PROTO_IPV4);
882 	if (IS_ERR(mr4_table)) {
883 		err = PTR_ERR(mr4_table);
884 		goto err_mr4_table_create;
885 	}
886 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
887 					     MLXSW_SP_L3_PROTO_IPV6);
888 	if (IS_ERR(mr6_table)) {
889 		err = PTR_ERR(mr6_table);
890 		goto err_mr6_table_create;
891 	}
892 
893 	vr->fib4 = fib4;
894 	vr->fib6 = fib6;
895 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
896 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
897 	vr->tb_id = tb_id;
898 	return vr;
899 
900 err_mr6_table_create:
901 	mlxsw_sp_mr_table_destroy(mr4_table);
902 err_mr4_table_create:
903 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
904 err_fib6_create:
905 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
906 	return ERR_PTR(err);
907 }
908 
909 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
910 				struct mlxsw_sp_vr *vr)
911 {
912 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
913 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
914 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
915 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
916 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
917 	vr->fib6 = NULL;
918 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
919 	vr->fib4 = NULL;
920 }
921 
922 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
923 					   struct netlink_ext_ack *extack)
924 {
925 	struct mlxsw_sp_vr *vr;
926 
927 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
928 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
929 	if (!vr)
930 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
931 	return vr;
932 }
933 
934 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
935 {
936 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
937 	    list_empty(&vr->fib6->node_list) &&
938 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
939 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
940 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
941 }
942 
943 static bool
944 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
945 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
946 {
947 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
948 
949 	if (!mlxsw_sp_vr_is_used(vr))
950 		return false;
951 	if (fib->lpm_tree->id == tree_id)
952 		return true;
953 	return false;
954 }
955 
956 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
957 					struct mlxsw_sp_fib *fib,
958 					struct mlxsw_sp_lpm_tree *new_tree)
959 {
960 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
961 	int err;
962 
963 	fib->lpm_tree = new_tree;
964 	mlxsw_sp_lpm_tree_hold(new_tree);
965 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
966 	if (err)
967 		goto err_tree_bind;
968 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
969 	return 0;
970 
971 err_tree_bind:
972 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
973 	fib->lpm_tree = old_tree;
974 	return err;
975 }
976 
977 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
978 					 struct mlxsw_sp_fib *fib,
979 					 struct mlxsw_sp_lpm_tree *new_tree)
980 {
981 	enum mlxsw_sp_l3proto proto = fib->proto;
982 	struct mlxsw_sp_lpm_tree *old_tree;
983 	u8 old_id, new_id = new_tree->id;
984 	struct mlxsw_sp_vr *vr;
985 	int i, err;
986 
987 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
988 	old_id = old_tree->id;
989 
990 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
991 		vr = &mlxsw_sp->router->vrs[i];
992 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
993 			continue;
994 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
995 						   mlxsw_sp_vr_fib(vr, proto),
996 						   new_tree);
997 		if (err)
998 			goto err_tree_replace;
999 	}
1000 
1001 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1002 	       sizeof(new_tree->prefix_ref_count));
1003 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1004 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1005 
1006 	return 0;
1007 
1008 err_tree_replace:
1009 	for (i--; i >= 0; i--) {
1010 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1011 			continue;
1012 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1013 					     mlxsw_sp_vr_fib(vr, proto),
1014 					     old_tree);
1015 	}
1016 	return err;
1017 }
1018 
1019 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1020 {
1021 	struct mlxsw_sp_vr *vr;
1022 	u64 max_vrs;
1023 	int i;
1024 
1025 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1026 		return -EIO;
1027 
1028 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1029 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1030 					GFP_KERNEL);
1031 	if (!mlxsw_sp->router->vrs)
1032 		return -ENOMEM;
1033 
1034 	for (i = 0; i < max_vrs; i++) {
1035 		vr = &mlxsw_sp->router->vrs[i];
1036 		vr->id = i;
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1043 
1044 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1045 {
1046 	/* At this stage we're guaranteed not to have new incoming
1047 	 * FIB notifications and the work queue is free from FIBs
1048 	 * sitting on top of mlxsw netdevs. However, we can still
1049 	 * have other FIBs queued. Flush the queue before flushing
1050 	 * the device's tables. No need for locks, as we're the only
1051 	 * writer.
1052 	 */
1053 	mlxsw_core_flush_owq();
1054 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1055 	kfree(mlxsw_sp->router->vrs);
1056 }
1057 
1058 static struct net_device *
1059 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
1060 {
1061 	struct ip_tunnel *tun = netdev_priv(ol_dev);
1062 	struct net *net = dev_net(ol_dev);
1063 
1064 	return dev_get_by_index_rcu(net, tun->parms.link);
1065 }
1066 
1067 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1068 {
1069 	struct net_device *d;
1070 	u32 tb_id;
1071 
1072 	rcu_read_lock();
1073 	d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1074 	if (d)
1075 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1076 	else
1077 		tb_id = RT_TABLE_MAIN;
1078 	rcu_read_unlock();
1079 
1080 	return tb_id;
1081 }
1082 
1083 static struct mlxsw_sp_rif *
1084 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1085 		    const struct mlxsw_sp_rif_params *params,
1086 		    struct netlink_ext_ack *extack);
1087 
1088 static struct mlxsw_sp_rif_ipip_lb *
1089 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1090 				enum mlxsw_sp_ipip_type ipipt,
1091 				struct net_device *ol_dev,
1092 				struct netlink_ext_ack *extack)
1093 {
1094 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1095 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1096 	struct mlxsw_sp_rif *rif;
1097 
1098 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1099 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1100 		.common.dev = ol_dev,
1101 		.common.lag = false,
1102 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1103 	};
1104 
1105 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1106 	if (IS_ERR(rif))
1107 		return ERR_CAST(rif);
1108 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1109 }
1110 
1111 static struct mlxsw_sp_ipip_entry *
1112 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1113 			  enum mlxsw_sp_ipip_type ipipt,
1114 			  struct net_device *ol_dev)
1115 {
1116 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1117 	struct mlxsw_sp_ipip_entry *ipip_entry;
1118 	struct mlxsw_sp_ipip_entry *ret = NULL;
1119 
1120 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1121 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1122 	if (!ipip_entry)
1123 		return ERR_PTR(-ENOMEM);
1124 
1125 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1126 							    ol_dev, NULL);
1127 	if (IS_ERR(ipip_entry->ol_lb)) {
1128 		ret = ERR_CAST(ipip_entry->ol_lb);
1129 		goto err_ol_ipip_lb_create;
1130 	}
1131 
1132 	ipip_entry->ipipt = ipipt;
1133 	ipip_entry->ol_dev = ol_dev;
1134 
1135 	switch (ipip_ops->ul_proto) {
1136 	case MLXSW_SP_L3_PROTO_IPV4:
1137 		ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1138 		break;
1139 	case MLXSW_SP_L3_PROTO_IPV6:
1140 		WARN_ON(1);
1141 		break;
1142 	}
1143 
1144 	return ipip_entry;
1145 
1146 err_ol_ipip_lb_create:
1147 	kfree(ipip_entry);
1148 	return ret;
1149 }
1150 
1151 static void
1152 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1153 {
1154 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1155 	kfree(ipip_entry);
1156 }
1157 
1158 static bool
1159 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1160 				  const enum mlxsw_sp_l3proto ul_proto,
1161 				  union mlxsw_sp_l3addr saddr,
1162 				  u32 ul_tb_id,
1163 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1164 {
1165 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1166 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1167 	union mlxsw_sp_l3addr tun_saddr;
1168 
1169 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1170 		return false;
1171 
1172 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1173 	return tun_ul_tb_id == ul_tb_id &&
1174 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1175 }
1176 
1177 static int
1178 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1179 			      struct mlxsw_sp_fib_entry *fib_entry,
1180 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1181 {
1182 	u32 tunnel_index;
1183 	int err;
1184 
1185 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1186 				  1, &tunnel_index);
1187 	if (err)
1188 		return err;
1189 
1190 	ipip_entry->decap_fib_entry = fib_entry;
1191 	fib_entry->decap.ipip_entry = ipip_entry;
1192 	fib_entry->decap.tunnel_index = tunnel_index;
1193 	return 0;
1194 }
1195 
1196 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1197 					  struct mlxsw_sp_fib_entry *fib_entry)
1198 {
1199 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1200 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1201 	fib_entry->decap.ipip_entry = NULL;
1202 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1203 			   1, fib_entry->decap.tunnel_index);
1204 }
1205 
1206 static struct mlxsw_sp_fib_node *
1207 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1208 			 size_t addr_len, unsigned char prefix_len);
1209 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1210 				     struct mlxsw_sp_fib_entry *fib_entry);
1211 
1212 static void
1213 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1214 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1215 {
1216 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1217 
1218 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1219 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1220 
1221 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1222 }
1223 
1224 static void
1225 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1226 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1227 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1228 {
1229 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1230 					  ipip_entry))
1231 		return;
1232 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1233 
1234 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1235 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1236 }
1237 
1238 static struct mlxsw_sp_fib_entry *
1239 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1240 				     enum mlxsw_sp_l3proto proto,
1241 				     const union mlxsw_sp_l3addr *addr,
1242 				     enum mlxsw_sp_fib_entry_type type)
1243 {
1244 	struct mlxsw_sp_fib_node *fib_node;
1245 	unsigned char addr_prefix_len;
1246 	struct mlxsw_sp_fib *fib;
1247 	struct mlxsw_sp_vr *vr;
1248 	const void *addrp;
1249 	size_t addr_len;
1250 	u32 addr4;
1251 
1252 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1253 	if (!vr)
1254 		return NULL;
1255 	fib = mlxsw_sp_vr_fib(vr, proto);
1256 
1257 	switch (proto) {
1258 	case MLXSW_SP_L3_PROTO_IPV4:
1259 		addr4 = be32_to_cpu(addr->addr4);
1260 		addrp = &addr4;
1261 		addr_len = 4;
1262 		addr_prefix_len = 32;
1263 		break;
1264 	case MLXSW_SP_L3_PROTO_IPV6:
1265 	default:
1266 		WARN_ON(1);
1267 		return NULL;
1268 	}
1269 
1270 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1271 					    addr_prefix_len);
1272 	if (!fib_node || fib_node->fib_entry->type != type)
1273 		return NULL;
1274 
1275 	return fib_node->fib_entry;
1276 }
1277 
1278 /* Given an IPIP entry, find the corresponding decap route. */
1279 static struct mlxsw_sp_fib_entry *
1280 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1281 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1282 {
1283 	static struct mlxsw_sp_fib_node *fib_node;
1284 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1285 	unsigned char saddr_prefix_len;
1286 	union mlxsw_sp_l3addr saddr;
1287 	struct mlxsw_sp_fib *ul_fib;
1288 	struct mlxsw_sp_vr *ul_vr;
1289 	const void *saddrp;
1290 	size_t saddr_len;
1291 	u32 ul_tb_id;
1292 	u32 saddr4;
1293 
1294 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1295 
1296 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1297 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1298 	if (!ul_vr)
1299 		return NULL;
1300 
1301 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1302 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1303 					   ipip_entry->ol_dev);
1304 
1305 	switch (ipip_ops->ul_proto) {
1306 	case MLXSW_SP_L3_PROTO_IPV4:
1307 		saddr4 = be32_to_cpu(saddr.addr4);
1308 		saddrp = &saddr4;
1309 		saddr_len = 4;
1310 		saddr_prefix_len = 32;
1311 		break;
1312 	default:
1313 		WARN_ON(1);
1314 		return NULL;
1315 	}
1316 
1317 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1318 					    saddr_prefix_len);
1319 	if (!fib_node ||
1320 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1321 		return NULL;
1322 
1323 	return fib_node->fib_entry;
1324 }
1325 
1326 static struct mlxsw_sp_ipip_entry *
1327 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1328 			   enum mlxsw_sp_ipip_type ipipt,
1329 			   struct net_device *ol_dev)
1330 {
1331 	struct mlxsw_sp_ipip_entry *ipip_entry;
1332 
1333 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1334 	if (IS_ERR(ipip_entry))
1335 		return ipip_entry;
1336 
1337 	list_add_tail(&ipip_entry->ipip_list_node,
1338 		      &mlxsw_sp->router->ipip_list);
1339 
1340 	return ipip_entry;
1341 }
1342 
1343 static void
1344 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1345 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1346 {
1347 	list_del(&ipip_entry->ipip_list_node);
1348 	mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1349 }
1350 
1351 static bool
1352 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1353 				  const struct net_device *ul_dev,
1354 				  enum mlxsw_sp_l3proto ul_proto,
1355 				  union mlxsw_sp_l3addr ul_dip,
1356 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1357 {
1358 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1359 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1360 
1361 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1362 		return false;
1363 
1364 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1365 						 ul_tb_id, ipip_entry);
1366 }
1367 
1368 /* Given decap parameters, find the corresponding IPIP entry. */
1369 static struct mlxsw_sp_ipip_entry *
1370 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1371 				  enum mlxsw_sp_l3proto ul_proto,
1372 				  union mlxsw_sp_l3addr ul_dip)
1373 {
1374 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1375 	struct net_device *ul_dev;
1376 
1377 	rcu_read_lock();
1378 
1379 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1380 	if (!ul_dev)
1381 		goto out_unlock;
1382 
1383 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1384 			    ipip_list_node)
1385 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1386 						      ul_proto, ul_dip,
1387 						      ipip_entry))
1388 			goto out_unlock;
1389 
1390 	rcu_read_unlock();
1391 
1392 	return NULL;
1393 
1394 out_unlock:
1395 	rcu_read_unlock();
1396 	return ipip_entry;
1397 }
1398 
1399 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1400 				      const struct net_device *dev,
1401 				      enum mlxsw_sp_ipip_type *p_type)
1402 {
1403 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1404 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1405 	enum mlxsw_sp_ipip_type ipipt;
1406 
1407 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1408 		ipip_ops = router->ipip_ops_arr[ipipt];
1409 		if (dev->type == ipip_ops->dev_type) {
1410 			if (p_type)
1411 				*p_type = ipipt;
1412 			return true;
1413 		}
1414 	}
1415 	return false;
1416 }
1417 
1418 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1419 				const struct net_device *dev)
1420 {
1421 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1422 }
1423 
1424 static struct mlxsw_sp_ipip_entry *
1425 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1426 				   const struct net_device *ol_dev)
1427 {
1428 	struct mlxsw_sp_ipip_entry *ipip_entry;
1429 
1430 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1431 			    ipip_list_node)
1432 		if (ipip_entry->ol_dev == ol_dev)
1433 			return ipip_entry;
1434 
1435 	return NULL;
1436 }
1437 
1438 static struct mlxsw_sp_ipip_entry *
1439 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1440 				   const struct net_device *ul_dev,
1441 				   struct mlxsw_sp_ipip_entry *start)
1442 {
1443 	struct mlxsw_sp_ipip_entry *ipip_entry;
1444 
1445 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1446 					ipip_list_node);
1447 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1448 				     ipip_list_node) {
1449 		struct net_device *ol_dev = ipip_entry->ol_dev;
1450 		struct net_device *ipip_ul_dev;
1451 
1452 		rcu_read_lock();
1453 		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1454 		rcu_read_unlock();
1455 
1456 		if (ipip_ul_dev == ul_dev)
1457 			return ipip_entry;
1458 	}
1459 
1460 	return NULL;
1461 }
1462 
1463 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1464 				const struct net_device *dev)
1465 {
1466 	bool is_ipip_ul;
1467 
1468 	mutex_lock(&mlxsw_sp->router->lock);
1469 	is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1470 	mutex_unlock(&mlxsw_sp->router->lock);
1471 
1472 	return is_ipip_ul;
1473 }
1474 
1475 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1476 						const struct net_device *ol_dev,
1477 						enum mlxsw_sp_ipip_type ipipt)
1478 {
1479 	const struct mlxsw_sp_ipip_ops *ops
1480 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1481 
1482 	return ops->can_offload(mlxsw_sp, ol_dev);
1483 }
1484 
1485 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1486 						struct net_device *ol_dev)
1487 {
1488 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1489 	struct mlxsw_sp_ipip_entry *ipip_entry;
1490 	enum mlxsw_sp_l3proto ul_proto;
1491 	union mlxsw_sp_l3addr saddr;
1492 	u32 ul_tb_id;
1493 
1494 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1495 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1496 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1497 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1498 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1499 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1500 							  saddr, ul_tb_id,
1501 							  NULL)) {
1502 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1503 								ol_dev);
1504 			if (IS_ERR(ipip_entry))
1505 				return PTR_ERR(ipip_entry);
1506 		}
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1513 						   struct net_device *ol_dev)
1514 {
1515 	struct mlxsw_sp_ipip_entry *ipip_entry;
1516 
1517 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1518 	if (ipip_entry)
1519 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1520 }
1521 
1522 static void
1523 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1524 				struct mlxsw_sp_ipip_entry *ipip_entry)
1525 {
1526 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1527 
1528 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1529 	if (decap_fib_entry)
1530 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1531 						  decap_fib_entry);
1532 }
1533 
1534 static int
1535 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1536 			u16 ul_rif_id, bool enable)
1537 {
1538 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1539 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1540 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1541 	char ritr_pl[MLXSW_REG_RITR_LEN];
1542 	u32 saddr4;
1543 
1544 	switch (lb_cf.ul_protocol) {
1545 	case MLXSW_SP_L3_PROTO_IPV4:
1546 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1547 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1548 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1549 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1550 			    MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1551 			    ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1552 		break;
1553 
1554 	case MLXSW_SP_L3_PROTO_IPV6:
1555 		return -EAFNOSUPPORT;
1556 	}
1557 
1558 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1559 }
1560 
1561 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1562 						 struct net_device *ol_dev)
1563 {
1564 	struct mlxsw_sp_ipip_entry *ipip_entry;
1565 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1566 	int err = 0;
1567 
1568 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1569 	if (ipip_entry) {
1570 		lb_rif = ipip_entry->ol_lb;
1571 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1572 					      lb_rif->ul_rif_id, true);
1573 		if (err)
1574 			goto out;
1575 		lb_rif->common.mtu = ol_dev->mtu;
1576 	}
1577 
1578 out:
1579 	return err;
1580 }
1581 
1582 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1583 						struct net_device *ol_dev)
1584 {
1585 	struct mlxsw_sp_ipip_entry *ipip_entry;
1586 
1587 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1588 	if (ipip_entry)
1589 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1590 }
1591 
1592 static void
1593 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1594 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1595 {
1596 	if (ipip_entry->decap_fib_entry)
1597 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1598 }
1599 
1600 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1601 						  struct net_device *ol_dev)
1602 {
1603 	struct mlxsw_sp_ipip_entry *ipip_entry;
1604 
1605 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1606 	if (ipip_entry)
1607 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1608 }
1609 
1610 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1611 					 struct mlxsw_sp_rif *old_rif,
1612 					 struct mlxsw_sp_rif *new_rif);
1613 static int
1614 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1615 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1616 				 bool keep_encap,
1617 				 struct netlink_ext_ack *extack)
1618 {
1619 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1620 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1621 
1622 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1623 						     ipip_entry->ipipt,
1624 						     ipip_entry->ol_dev,
1625 						     extack);
1626 	if (IS_ERR(new_lb_rif))
1627 		return PTR_ERR(new_lb_rif);
1628 	ipip_entry->ol_lb = new_lb_rif;
1629 
1630 	if (keep_encap)
1631 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1632 					     &new_lb_rif->common);
1633 
1634 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1635 
1636 	return 0;
1637 }
1638 
1639 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1640 					struct mlxsw_sp_rif *rif);
1641 
1642 /**
1643  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1644  * @mlxsw_sp: mlxsw_sp.
1645  * @ipip_entry: IPIP entry.
1646  * @recreate_loopback: Recreates the associated loopback RIF.
1647  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1648  *              relevant when recreate_loopback is true.
1649  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1650  *                   is only relevant when recreate_loopback is false.
1651  * @extack: extack.
1652  *
1653  * Return: Non-zero value on failure.
1654  */
1655 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1656 					struct mlxsw_sp_ipip_entry *ipip_entry,
1657 					bool recreate_loopback,
1658 					bool keep_encap,
1659 					bool update_nexthops,
1660 					struct netlink_ext_ack *extack)
1661 {
1662 	int err;
1663 
1664 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1665 	 * recreate it. That creates a window of opportunity where RALUE and
1666 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1667 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1668 	 * of RALUE, demote the decap route back.
1669 	 */
1670 	if (ipip_entry->decap_fib_entry)
1671 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1672 
1673 	if (recreate_loopback) {
1674 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1675 						       keep_encap, extack);
1676 		if (err)
1677 			return err;
1678 	} else if (update_nexthops) {
1679 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1680 					    &ipip_entry->ol_lb->common);
1681 	}
1682 
1683 	if (ipip_entry->ol_dev->flags & IFF_UP)
1684 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1685 
1686 	return 0;
1687 }
1688 
1689 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1690 						struct net_device *ol_dev,
1691 						struct netlink_ext_ack *extack)
1692 {
1693 	struct mlxsw_sp_ipip_entry *ipip_entry =
1694 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1695 
1696 	if (!ipip_entry)
1697 		return 0;
1698 
1699 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1700 						   true, false, false, extack);
1701 }
1702 
1703 static int
1704 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1705 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1706 				     struct net_device *ul_dev,
1707 				     bool *demote_this,
1708 				     struct netlink_ext_ack *extack)
1709 {
1710 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1711 	enum mlxsw_sp_l3proto ul_proto;
1712 	union mlxsw_sp_l3addr saddr;
1713 
1714 	/* Moving underlay to a different VRF might cause local address
1715 	 * conflict, and the conflicting tunnels need to be demoted.
1716 	 */
1717 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1718 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1719 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1720 						 saddr, ul_tb_id,
1721 						 ipip_entry)) {
1722 		*demote_this = true;
1723 		return 0;
1724 	}
1725 
1726 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1727 						   true, true, false, extack);
1728 }
1729 
1730 static int
1731 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1732 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1733 				    struct net_device *ul_dev)
1734 {
1735 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1736 						   false, false, true, NULL);
1737 }
1738 
1739 static int
1740 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1741 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1742 				      struct net_device *ul_dev)
1743 {
1744 	/* A down underlay device causes encapsulated packets to not be
1745 	 * forwarded, but decap still works. So refresh next hops without
1746 	 * touching anything else.
1747 	 */
1748 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1749 						   false, false, true, NULL);
1750 }
1751 
1752 static int
1753 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1754 					struct net_device *ol_dev,
1755 					struct netlink_ext_ack *extack)
1756 {
1757 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1758 	struct mlxsw_sp_ipip_entry *ipip_entry;
1759 	int err;
1760 
1761 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1762 	if (!ipip_entry)
1763 		/* A change might make a tunnel eligible for offloading, but
1764 		 * that is currently not implemented. What falls to slow path
1765 		 * stays there.
1766 		 */
1767 		return 0;
1768 
1769 	/* A change might make a tunnel not eligible for offloading. */
1770 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1771 						 ipip_entry->ipipt)) {
1772 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1773 		return 0;
1774 	}
1775 
1776 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1777 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1778 	return err;
1779 }
1780 
1781 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1782 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1783 {
1784 	struct net_device *ol_dev = ipip_entry->ol_dev;
1785 
1786 	if (ol_dev->flags & IFF_UP)
1787 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1788 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1789 }
1790 
1791 /* The configuration where several tunnels have the same local address in the
1792  * same underlay table needs special treatment in the HW. That is currently not
1793  * implemented in the driver. This function finds and demotes the first tunnel
1794  * with a given source address, except the one passed in in the argument
1795  * `except'.
1796  */
1797 bool
1798 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1799 				     enum mlxsw_sp_l3proto ul_proto,
1800 				     union mlxsw_sp_l3addr saddr,
1801 				     u32 ul_tb_id,
1802 				     const struct mlxsw_sp_ipip_entry *except)
1803 {
1804 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1805 
1806 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1807 				 ipip_list_node) {
1808 		if (ipip_entry != except &&
1809 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1810 						      ul_tb_id, ipip_entry)) {
1811 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1812 			return true;
1813 		}
1814 	}
1815 
1816 	return false;
1817 }
1818 
1819 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1820 						     struct net_device *ul_dev)
1821 {
1822 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1823 
1824 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1825 				 ipip_list_node) {
1826 		struct net_device *ol_dev = ipip_entry->ol_dev;
1827 		struct net_device *ipip_ul_dev;
1828 
1829 		rcu_read_lock();
1830 		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1831 		rcu_read_unlock();
1832 		if (ipip_ul_dev == ul_dev)
1833 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1834 	}
1835 }
1836 
1837 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1838 				     struct net_device *ol_dev,
1839 				     unsigned long event,
1840 				     struct netdev_notifier_info *info)
1841 {
1842 	struct netdev_notifier_changeupper_info *chup;
1843 	struct netlink_ext_ack *extack;
1844 	int err = 0;
1845 
1846 	mutex_lock(&mlxsw_sp->router->lock);
1847 	switch (event) {
1848 	case NETDEV_REGISTER:
1849 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1850 		break;
1851 	case NETDEV_UNREGISTER:
1852 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1853 		break;
1854 	case NETDEV_UP:
1855 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1856 		break;
1857 	case NETDEV_DOWN:
1858 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1859 		break;
1860 	case NETDEV_CHANGEUPPER:
1861 		chup = container_of(info, typeof(*chup), info);
1862 		extack = info->extack;
1863 		if (netif_is_l3_master(chup->upper_dev))
1864 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1865 								   ol_dev,
1866 								   extack);
1867 		break;
1868 	case NETDEV_CHANGE:
1869 		extack = info->extack;
1870 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1871 							      ol_dev, extack);
1872 		break;
1873 	case NETDEV_CHANGEMTU:
1874 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1875 		break;
1876 	}
1877 	mutex_unlock(&mlxsw_sp->router->lock);
1878 	return err;
1879 }
1880 
1881 static int
1882 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1883 				   struct mlxsw_sp_ipip_entry *ipip_entry,
1884 				   struct net_device *ul_dev,
1885 				   bool *demote_this,
1886 				   unsigned long event,
1887 				   struct netdev_notifier_info *info)
1888 {
1889 	struct netdev_notifier_changeupper_info *chup;
1890 	struct netlink_ext_ack *extack;
1891 
1892 	switch (event) {
1893 	case NETDEV_CHANGEUPPER:
1894 		chup = container_of(info, typeof(*chup), info);
1895 		extack = info->extack;
1896 		if (netif_is_l3_master(chup->upper_dev))
1897 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1898 								    ipip_entry,
1899 								    ul_dev,
1900 								    demote_this,
1901 								    extack);
1902 		break;
1903 
1904 	case NETDEV_UP:
1905 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1906 							   ul_dev);
1907 	case NETDEV_DOWN:
1908 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1909 							     ipip_entry,
1910 							     ul_dev);
1911 	}
1912 	return 0;
1913 }
1914 
1915 int
1916 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1917 				 struct net_device *ul_dev,
1918 				 unsigned long event,
1919 				 struct netdev_notifier_info *info)
1920 {
1921 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1922 	int err = 0;
1923 
1924 	mutex_lock(&mlxsw_sp->router->lock);
1925 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1926 								ul_dev,
1927 								ipip_entry))) {
1928 		struct mlxsw_sp_ipip_entry *prev;
1929 		bool demote_this = false;
1930 
1931 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1932 							 ul_dev, &demote_this,
1933 							 event, info);
1934 		if (err) {
1935 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1936 								 ul_dev);
1937 			break;
1938 		}
1939 
1940 		if (demote_this) {
1941 			if (list_is_first(&ipip_entry->ipip_list_node,
1942 					  &mlxsw_sp->router->ipip_list))
1943 				prev = NULL;
1944 			else
1945 				/* This can't be cached from previous iteration,
1946 				 * because that entry could be gone now.
1947 				 */
1948 				prev = list_prev_entry(ipip_entry,
1949 						       ipip_list_node);
1950 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1951 			ipip_entry = prev;
1952 		}
1953 	}
1954 	mutex_unlock(&mlxsw_sp->router->lock);
1955 
1956 	return err;
1957 }
1958 
1959 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1960 				      enum mlxsw_sp_l3proto ul_proto,
1961 				      const union mlxsw_sp_l3addr *ul_sip,
1962 				      u32 tunnel_index)
1963 {
1964 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1965 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1966 	struct mlxsw_sp_fib_entry *fib_entry;
1967 	int err = 0;
1968 
1969 	mutex_lock(&mlxsw_sp->router->lock);
1970 
1971 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
1972 		err = -EINVAL;
1973 		goto out;
1974 	}
1975 
1976 	router->nve_decap_config.ul_tb_id = ul_tb_id;
1977 	router->nve_decap_config.tunnel_index = tunnel_index;
1978 	router->nve_decap_config.ul_proto = ul_proto;
1979 	router->nve_decap_config.ul_sip = *ul_sip;
1980 	router->nve_decap_config.valid = true;
1981 
1982 	/* It is valid to create a tunnel with a local IP and only later
1983 	 * assign this IP address to a local interface
1984 	 */
1985 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1986 							 ul_proto, ul_sip,
1987 							 type);
1988 	if (!fib_entry)
1989 		goto out;
1990 
1991 	fib_entry->decap.tunnel_index = tunnel_index;
1992 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1993 
1994 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1995 	if (err)
1996 		goto err_fib_entry_update;
1997 
1998 	goto out;
1999 
2000 err_fib_entry_update:
2001 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2002 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2003 out:
2004 	mutex_unlock(&mlxsw_sp->router->lock);
2005 	return err;
2006 }
2007 
2008 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2009 				      enum mlxsw_sp_l3proto ul_proto,
2010 				      const union mlxsw_sp_l3addr *ul_sip)
2011 {
2012 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2013 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2014 	struct mlxsw_sp_fib_entry *fib_entry;
2015 
2016 	mutex_lock(&mlxsw_sp->router->lock);
2017 
2018 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2019 		goto out;
2020 
2021 	router->nve_decap_config.valid = false;
2022 
2023 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2024 							 ul_proto, ul_sip,
2025 							 type);
2026 	if (!fib_entry)
2027 		goto out;
2028 
2029 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2030 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2031 out:
2032 	mutex_unlock(&mlxsw_sp->router->lock);
2033 }
2034 
2035 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2036 					 u32 ul_tb_id,
2037 					 enum mlxsw_sp_l3proto ul_proto,
2038 					 const union mlxsw_sp_l3addr *ul_sip)
2039 {
2040 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2041 
2042 	return router->nve_decap_config.valid &&
2043 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2044 	       router->nve_decap_config.ul_proto == ul_proto &&
2045 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2046 		       sizeof(*ul_sip));
2047 }
2048 
2049 struct mlxsw_sp_neigh_key {
2050 	struct neighbour *n;
2051 };
2052 
2053 struct mlxsw_sp_neigh_entry {
2054 	struct list_head rif_list_node;
2055 	struct rhash_head ht_node;
2056 	struct mlxsw_sp_neigh_key key;
2057 	u16 rif;
2058 	bool connected;
2059 	unsigned char ha[ETH_ALEN];
2060 	struct list_head nexthop_list; /* list of nexthops using
2061 					* this neigh entry
2062 					*/
2063 	struct list_head nexthop_neighs_list_node;
2064 	unsigned int counter_index;
2065 	bool counter_valid;
2066 };
2067 
2068 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2069 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2070 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2071 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2072 };
2073 
2074 struct mlxsw_sp_neigh_entry *
2075 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2076 			struct mlxsw_sp_neigh_entry *neigh_entry)
2077 {
2078 	if (!neigh_entry) {
2079 		if (list_empty(&rif->neigh_list))
2080 			return NULL;
2081 		else
2082 			return list_first_entry(&rif->neigh_list,
2083 						typeof(*neigh_entry),
2084 						rif_list_node);
2085 	}
2086 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2087 		return NULL;
2088 	return list_next_entry(neigh_entry, rif_list_node);
2089 }
2090 
2091 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2092 {
2093 	return neigh_entry->key.n->tbl->family;
2094 }
2095 
2096 unsigned char *
2097 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2098 {
2099 	return neigh_entry->ha;
2100 }
2101 
2102 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2103 {
2104 	struct neighbour *n;
2105 
2106 	n = neigh_entry->key.n;
2107 	return ntohl(*((__be32 *) n->primary_key));
2108 }
2109 
2110 struct in6_addr *
2111 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2112 {
2113 	struct neighbour *n;
2114 
2115 	n = neigh_entry->key.n;
2116 	return (struct in6_addr *) &n->primary_key;
2117 }
2118 
2119 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2120 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2121 			       u64 *p_counter)
2122 {
2123 	if (!neigh_entry->counter_valid)
2124 		return -EINVAL;
2125 
2126 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2127 					 p_counter, NULL);
2128 }
2129 
2130 static struct mlxsw_sp_neigh_entry *
2131 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2132 			   u16 rif)
2133 {
2134 	struct mlxsw_sp_neigh_entry *neigh_entry;
2135 
2136 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2137 	if (!neigh_entry)
2138 		return NULL;
2139 
2140 	neigh_entry->key.n = n;
2141 	neigh_entry->rif = rif;
2142 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2143 
2144 	return neigh_entry;
2145 }
2146 
2147 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2148 {
2149 	kfree(neigh_entry);
2150 }
2151 
2152 static int
2153 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2154 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2155 {
2156 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2157 				      &neigh_entry->ht_node,
2158 				      mlxsw_sp_neigh_ht_params);
2159 }
2160 
2161 static void
2162 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2163 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2164 {
2165 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2166 			       &neigh_entry->ht_node,
2167 			       mlxsw_sp_neigh_ht_params);
2168 }
2169 
2170 static bool
2171 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2172 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2173 {
2174 	struct devlink *devlink;
2175 	const char *table_name;
2176 
2177 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2178 	case AF_INET:
2179 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2180 		break;
2181 	case AF_INET6:
2182 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2183 		break;
2184 	default:
2185 		WARN_ON(1);
2186 		return false;
2187 	}
2188 
2189 	devlink = priv_to_devlink(mlxsw_sp->core);
2190 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2191 }
2192 
2193 static void
2194 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2195 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2196 {
2197 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2198 		return;
2199 
2200 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2201 		return;
2202 
2203 	neigh_entry->counter_valid = true;
2204 }
2205 
2206 static void
2207 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2208 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2209 {
2210 	if (!neigh_entry->counter_valid)
2211 		return;
2212 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2213 				   neigh_entry->counter_index);
2214 	neigh_entry->counter_valid = false;
2215 }
2216 
2217 static struct mlxsw_sp_neigh_entry *
2218 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2219 {
2220 	struct mlxsw_sp_neigh_entry *neigh_entry;
2221 	struct mlxsw_sp_rif *rif;
2222 	int err;
2223 
2224 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2225 	if (!rif)
2226 		return ERR_PTR(-EINVAL);
2227 
2228 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2229 	if (!neigh_entry)
2230 		return ERR_PTR(-ENOMEM);
2231 
2232 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2233 	if (err)
2234 		goto err_neigh_entry_insert;
2235 
2236 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2237 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2238 
2239 	return neigh_entry;
2240 
2241 err_neigh_entry_insert:
2242 	mlxsw_sp_neigh_entry_free(neigh_entry);
2243 	return ERR_PTR(err);
2244 }
2245 
2246 static void
2247 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2248 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2249 {
2250 	list_del(&neigh_entry->rif_list_node);
2251 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2252 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2253 	mlxsw_sp_neigh_entry_free(neigh_entry);
2254 }
2255 
2256 static struct mlxsw_sp_neigh_entry *
2257 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2258 {
2259 	struct mlxsw_sp_neigh_key key;
2260 
2261 	key.n = n;
2262 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2263 				      &key, mlxsw_sp_neigh_ht_params);
2264 }
2265 
2266 static void
2267 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2268 {
2269 	unsigned long interval;
2270 
2271 #if IS_ENABLED(CONFIG_IPV6)
2272 	interval = min_t(unsigned long,
2273 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2274 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2275 #else
2276 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2277 #endif
2278 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2279 }
2280 
2281 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2282 						   char *rauhtd_pl,
2283 						   int ent_index)
2284 {
2285 	struct net_device *dev;
2286 	struct neighbour *n;
2287 	__be32 dipn;
2288 	u32 dip;
2289 	u16 rif;
2290 
2291 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2292 
2293 	if (!mlxsw_sp->router->rifs[rif]) {
2294 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2295 		return;
2296 	}
2297 
2298 	dipn = htonl(dip);
2299 	dev = mlxsw_sp->router->rifs[rif]->dev;
2300 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2301 	if (!n)
2302 		return;
2303 
2304 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2305 	neigh_event_send(n, NULL);
2306 	neigh_release(n);
2307 }
2308 
2309 #if IS_ENABLED(CONFIG_IPV6)
2310 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2311 						   char *rauhtd_pl,
2312 						   int rec_index)
2313 {
2314 	struct net_device *dev;
2315 	struct neighbour *n;
2316 	struct in6_addr dip;
2317 	u16 rif;
2318 
2319 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2320 					 (char *) &dip);
2321 
2322 	if (!mlxsw_sp->router->rifs[rif]) {
2323 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2324 		return;
2325 	}
2326 
2327 	dev = mlxsw_sp->router->rifs[rif]->dev;
2328 	n = neigh_lookup(&nd_tbl, &dip, dev);
2329 	if (!n)
2330 		return;
2331 
2332 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2333 	neigh_event_send(n, NULL);
2334 	neigh_release(n);
2335 }
2336 #else
2337 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2338 						   char *rauhtd_pl,
2339 						   int rec_index)
2340 {
2341 }
2342 #endif
2343 
2344 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2345 						   char *rauhtd_pl,
2346 						   int rec_index)
2347 {
2348 	u8 num_entries;
2349 	int i;
2350 
2351 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2352 								rec_index);
2353 	/* Hardware starts counting at 0, so add 1. */
2354 	num_entries++;
2355 
2356 	/* Each record consists of several neighbour entries. */
2357 	for (i = 0; i < num_entries; i++) {
2358 		int ent_index;
2359 
2360 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2361 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2362 						       ent_index);
2363 	}
2364 
2365 }
2366 
2367 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2368 						   char *rauhtd_pl,
2369 						   int rec_index)
2370 {
2371 	/* One record contains one entry. */
2372 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2373 					       rec_index);
2374 }
2375 
2376 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2377 					      char *rauhtd_pl, int rec_index)
2378 {
2379 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2380 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2381 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2382 						       rec_index);
2383 		break;
2384 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2385 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2386 						       rec_index);
2387 		break;
2388 	}
2389 }
2390 
2391 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2392 {
2393 	u8 num_rec, last_rec_index, num_entries;
2394 
2395 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2396 	last_rec_index = num_rec - 1;
2397 
2398 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2399 		return false;
2400 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2401 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2402 		return true;
2403 
2404 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2405 								last_rec_index);
2406 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2407 		return true;
2408 	return false;
2409 }
2410 
2411 static int
2412 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2413 				       char *rauhtd_pl,
2414 				       enum mlxsw_reg_rauhtd_type type)
2415 {
2416 	int i, num_rec;
2417 	int err;
2418 
2419 	/* Ensure the RIF we read from the device does not change mid-dump. */
2420 	mutex_lock(&mlxsw_sp->router->lock);
2421 	do {
2422 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2423 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2424 				      rauhtd_pl);
2425 		if (err) {
2426 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2427 			break;
2428 		}
2429 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2430 		for (i = 0; i < num_rec; i++)
2431 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2432 							  i);
2433 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2434 	mutex_unlock(&mlxsw_sp->router->lock);
2435 
2436 	return err;
2437 }
2438 
2439 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2440 {
2441 	enum mlxsw_reg_rauhtd_type type;
2442 	char *rauhtd_pl;
2443 	int err;
2444 
2445 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2446 	if (!rauhtd_pl)
2447 		return -ENOMEM;
2448 
2449 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2450 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2451 	if (err)
2452 		goto out;
2453 
2454 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2455 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2456 out:
2457 	kfree(rauhtd_pl);
2458 	return err;
2459 }
2460 
2461 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2462 {
2463 	struct mlxsw_sp_neigh_entry *neigh_entry;
2464 
2465 	mutex_lock(&mlxsw_sp->router->lock);
2466 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2467 			    nexthop_neighs_list_node)
2468 		/* If this neigh have nexthops, make the kernel think this neigh
2469 		 * is active regardless of the traffic.
2470 		 */
2471 		neigh_event_send(neigh_entry->key.n, NULL);
2472 	mutex_unlock(&mlxsw_sp->router->lock);
2473 }
2474 
2475 static void
2476 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2477 {
2478 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2479 
2480 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2481 			       msecs_to_jiffies(interval));
2482 }
2483 
2484 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2485 {
2486 	struct mlxsw_sp_router *router;
2487 	int err;
2488 
2489 	router = container_of(work, struct mlxsw_sp_router,
2490 			      neighs_update.dw.work);
2491 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2492 	if (err)
2493 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2494 
2495 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2496 
2497 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2498 }
2499 
2500 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2501 {
2502 	struct mlxsw_sp_neigh_entry *neigh_entry;
2503 	struct mlxsw_sp_router *router;
2504 
2505 	router = container_of(work, struct mlxsw_sp_router,
2506 			      nexthop_probe_dw.work);
2507 	/* Iterate over nexthop neighbours, find those who are unresolved and
2508 	 * send arp on them. This solves the chicken-egg problem when
2509 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2510 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2511 	 * using different nexthop.
2512 	 */
2513 	mutex_lock(&router->lock);
2514 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2515 			    nexthop_neighs_list_node)
2516 		if (!neigh_entry->connected)
2517 			neigh_event_send(neigh_entry->key.n, NULL);
2518 	mutex_unlock(&router->lock);
2519 
2520 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2521 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2522 }
2523 
2524 static void
2525 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2526 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2527 			      bool removing, bool dead);
2528 
2529 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2530 {
2531 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2532 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2533 }
2534 
2535 static int
2536 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2537 				struct mlxsw_sp_neigh_entry *neigh_entry,
2538 				enum mlxsw_reg_rauht_op op)
2539 {
2540 	struct neighbour *n = neigh_entry->key.n;
2541 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2542 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2543 
2544 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2545 			      dip);
2546 	if (neigh_entry->counter_valid)
2547 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2548 					     neigh_entry->counter_index);
2549 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2550 }
2551 
2552 static int
2553 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2554 				struct mlxsw_sp_neigh_entry *neigh_entry,
2555 				enum mlxsw_reg_rauht_op op)
2556 {
2557 	struct neighbour *n = neigh_entry->key.n;
2558 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2559 	const char *dip = n->primary_key;
2560 
2561 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2562 			      dip);
2563 	if (neigh_entry->counter_valid)
2564 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2565 					     neigh_entry->counter_index);
2566 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2567 }
2568 
2569 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2570 {
2571 	struct neighbour *n = neigh_entry->key.n;
2572 
2573 	/* Packets with a link-local destination address are trapped
2574 	 * after LPM lookup and never reach the neighbour table, so
2575 	 * there is no need to program such neighbours to the device.
2576 	 */
2577 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2578 	    IPV6_ADDR_LINKLOCAL)
2579 		return true;
2580 	return false;
2581 }
2582 
2583 static void
2584 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2585 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2586 			    bool adding)
2587 {
2588 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2589 	int err;
2590 
2591 	if (!adding && !neigh_entry->connected)
2592 		return;
2593 	neigh_entry->connected = adding;
2594 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2595 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2596 						      op);
2597 		if (err)
2598 			return;
2599 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2600 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2601 			return;
2602 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2603 						      op);
2604 		if (err)
2605 			return;
2606 	} else {
2607 		WARN_ON_ONCE(1);
2608 		return;
2609 	}
2610 
2611 	if (adding)
2612 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2613 	else
2614 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2615 }
2616 
2617 void
2618 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2619 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2620 				    bool adding)
2621 {
2622 	if (adding)
2623 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2624 	else
2625 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2626 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2627 }
2628 
2629 struct mlxsw_sp_netevent_work {
2630 	struct work_struct work;
2631 	struct mlxsw_sp *mlxsw_sp;
2632 	struct neighbour *n;
2633 };
2634 
2635 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2636 {
2637 	struct mlxsw_sp_netevent_work *net_work =
2638 		container_of(work, struct mlxsw_sp_netevent_work, work);
2639 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2640 	struct mlxsw_sp_neigh_entry *neigh_entry;
2641 	struct neighbour *n = net_work->n;
2642 	unsigned char ha[ETH_ALEN];
2643 	bool entry_connected;
2644 	u8 nud_state, dead;
2645 
2646 	/* If these parameters are changed after we release the lock,
2647 	 * then we are guaranteed to receive another event letting us
2648 	 * know about it.
2649 	 */
2650 	read_lock_bh(&n->lock);
2651 	memcpy(ha, n->ha, ETH_ALEN);
2652 	nud_state = n->nud_state;
2653 	dead = n->dead;
2654 	read_unlock_bh(&n->lock);
2655 
2656 	mutex_lock(&mlxsw_sp->router->lock);
2657 	mlxsw_sp_span_respin(mlxsw_sp);
2658 
2659 	entry_connected = nud_state & NUD_VALID && !dead;
2660 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2661 	if (!entry_connected && !neigh_entry)
2662 		goto out;
2663 	if (!neigh_entry) {
2664 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2665 		if (IS_ERR(neigh_entry))
2666 			goto out;
2667 	}
2668 
2669 	if (neigh_entry->connected && entry_connected &&
2670 	    !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2671 		goto out;
2672 
2673 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2674 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2675 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2676 				      dead);
2677 
2678 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2679 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2680 
2681 out:
2682 	mutex_unlock(&mlxsw_sp->router->lock);
2683 	neigh_release(n);
2684 	kfree(net_work);
2685 }
2686 
2687 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2688 
2689 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2690 {
2691 	struct mlxsw_sp_netevent_work *net_work =
2692 		container_of(work, struct mlxsw_sp_netevent_work, work);
2693 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2694 
2695 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2696 	kfree(net_work);
2697 }
2698 
2699 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2700 
2701 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2702 {
2703 	struct mlxsw_sp_netevent_work *net_work =
2704 		container_of(work, struct mlxsw_sp_netevent_work, work);
2705 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2706 
2707 	__mlxsw_sp_router_init(mlxsw_sp);
2708 	kfree(net_work);
2709 }
2710 
2711 static int mlxsw_sp_router_schedule_work(struct net *net,
2712 					 struct notifier_block *nb,
2713 					 void (*cb)(struct work_struct *))
2714 {
2715 	struct mlxsw_sp_netevent_work *net_work;
2716 	struct mlxsw_sp_router *router;
2717 
2718 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2719 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2720 		return NOTIFY_DONE;
2721 
2722 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2723 	if (!net_work)
2724 		return NOTIFY_BAD;
2725 
2726 	INIT_WORK(&net_work->work, cb);
2727 	net_work->mlxsw_sp = router->mlxsw_sp;
2728 	mlxsw_core_schedule_work(&net_work->work);
2729 	return NOTIFY_DONE;
2730 }
2731 
2732 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2733 					  unsigned long event, void *ptr)
2734 {
2735 	struct mlxsw_sp_netevent_work *net_work;
2736 	struct mlxsw_sp_port *mlxsw_sp_port;
2737 	struct mlxsw_sp *mlxsw_sp;
2738 	unsigned long interval;
2739 	struct neigh_parms *p;
2740 	struct neighbour *n;
2741 
2742 	switch (event) {
2743 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2744 		p = ptr;
2745 
2746 		/* We don't care about changes in the default table. */
2747 		if (!p->dev || (p->tbl->family != AF_INET &&
2748 				p->tbl->family != AF_INET6))
2749 			return NOTIFY_DONE;
2750 
2751 		/* We are in atomic context and can't take RTNL mutex,
2752 		 * so use RCU variant to walk the device chain.
2753 		 */
2754 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2755 		if (!mlxsw_sp_port)
2756 			return NOTIFY_DONE;
2757 
2758 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2759 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2760 		mlxsw_sp->router->neighs_update.interval = interval;
2761 
2762 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2763 		break;
2764 	case NETEVENT_NEIGH_UPDATE:
2765 		n = ptr;
2766 
2767 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2768 			return NOTIFY_DONE;
2769 
2770 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2771 		if (!mlxsw_sp_port)
2772 			return NOTIFY_DONE;
2773 
2774 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2775 		if (!net_work) {
2776 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2777 			return NOTIFY_BAD;
2778 		}
2779 
2780 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2781 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2782 		net_work->n = n;
2783 
2784 		/* Take a reference to ensure the neighbour won't be
2785 		 * destructed until we drop the reference in delayed
2786 		 * work.
2787 		 */
2788 		neigh_clone(n);
2789 		mlxsw_core_schedule_work(&net_work->work);
2790 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2791 		break;
2792 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2793 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2794 		return mlxsw_sp_router_schedule_work(ptr, nb,
2795 				mlxsw_sp_router_mp_hash_event_work);
2796 
2797 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2798 		return mlxsw_sp_router_schedule_work(ptr, nb,
2799 				mlxsw_sp_router_update_priority_work);
2800 	}
2801 
2802 	return NOTIFY_DONE;
2803 }
2804 
2805 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2806 {
2807 	int err;
2808 
2809 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2810 			      &mlxsw_sp_neigh_ht_params);
2811 	if (err)
2812 		return err;
2813 
2814 	/* Initialize the polling interval according to the default
2815 	 * table.
2816 	 */
2817 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2818 
2819 	/* Create the delayed works for the activity_update */
2820 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2821 			  mlxsw_sp_router_neighs_update_work);
2822 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2823 			  mlxsw_sp_router_probe_unresolved_nexthops);
2824 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2825 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2826 	return 0;
2827 }
2828 
2829 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2830 {
2831 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2832 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2833 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2834 }
2835 
2836 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2837 					 struct mlxsw_sp_rif *rif)
2838 {
2839 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2840 
2841 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2842 				 rif_list_node) {
2843 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2844 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2845 	}
2846 }
2847 
2848 enum mlxsw_sp_nexthop_type {
2849 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2850 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2851 };
2852 
2853 enum mlxsw_sp_nexthop_action {
2854 	/* Nexthop forwards packets to an egress RIF */
2855 	MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2856 	/* Nexthop discards packets */
2857 	MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2858 	/* Nexthop traps packets */
2859 	MLXSW_SP_NEXTHOP_ACTION_TRAP,
2860 };
2861 
2862 struct mlxsw_sp_nexthop_key {
2863 	struct fib_nh *fib_nh;
2864 };
2865 
2866 struct mlxsw_sp_nexthop {
2867 	struct list_head neigh_list_node; /* member of neigh entry list */
2868 	struct list_head rif_list_node;
2869 	struct list_head router_list_node;
2870 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2871 						   * this nexthop belongs to
2872 						   */
2873 	struct rhash_head ht_node;
2874 	struct neigh_table *neigh_tbl;
2875 	struct mlxsw_sp_nexthop_key key;
2876 	unsigned char gw_addr[sizeof(struct in6_addr)];
2877 	int ifindex;
2878 	int nh_weight;
2879 	int norm_nh_weight;
2880 	int num_adj_entries;
2881 	struct mlxsw_sp_rif *rif;
2882 	u8 should_offload:1, /* set indicates this nexthop should be written
2883 			      * to the adjacency table.
2884 			      */
2885 	   offloaded:1, /* set indicates this nexthop was written to the
2886 			 * adjacency table.
2887 			 */
2888 	   update:1; /* set indicates this nexthop should be updated in the
2889 		      * adjacency table (f.e., its MAC changed).
2890 		      */
2891 	enum mlxsw_sp_nexthop_action action;
2892 	enum mlxsw_sp_nexthop_type type;
2893 	union {
2894 		struct mlxsw_sp_neigh_entry *neigh_entry;
2895 		struct mlxsw_sp_ipip_entry *ipip_entry;
2896 	};
2897 	unsigned int counter_index;
2898 	bool counter_valid;
2899 };
2900 
2901 enum mlxsw_sp_nexthop_group_type {
2902 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2903 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2904 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
2905 };
2906 
2907 struct mlxsw_sp_nexthop_group_info {
2908 	struct mlxsw_sp_nexthop_group *nh_grp;
2909 	u32 adj_index;
2910 	u16 ecmp_size;
2911 	u16 count;
2912 	int sum_norm_weight;
2913 	u8 adj_index_valid:1,
2914 	   gateway:1, /* routes using the group use a gateway */
2915 	   is_resilient:1;
2916 	struct list_head list; /* member in nh_res_grp_list */
2917 	struct mlxsw_sp_nexthop nexthops[0];
2918 #define nh_rif	nexthops[0].rif
2919 };
2920 
2921 struct mlxsw_sp_nexthop_group_vr_key {
2922 	u16 vr_id;
2923 	enum mlxsw_sp_l3proto proto;
2924 };
2925 
2926 struct mlxsw_sp_nexthop_group_vr_entry {
2927 	struct list_head list; /* member in vr_list */
2928 	struct rhash_head ht_node; /* member in vr_ht */
2929 	refcount_t ref_count;
2930 	struct mlxsw_sp_nexthop_group_vr_key key;
2931 };
2932 
2933 struct mlxsw_sp_nexthop_group {
2934 	struct rhash_head ht_node;
2935 	struct list_head fib_list; /* list of fib entries that use this group */
2936 	union {
2937 		struct {
2938 			struct fib_info *fi;
2939 		} ipv4;
2940 		struct {
2941 			u32 id;
2942 		} obj;
2943 	};
2944 	struct mlxsw_sp_nexthop_group_info *nhgi;
2945 	struct list_head vr_list;
2946 	struct rhashtable vr_ht;
2947 	enum mlxsw_sp_nexthop_group_type type;
2948 	bool can_destroy;
2949 };
2950 
2951 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2952 				    struct mlxsw_sp_nexthop *nh)
2953 {
2954 	struct devlink *devlink;
2955 
2956 	devlink = priv_to_devlink(mlxsw_sp->core);
2957 	if (!devlink_dpipe_table_counter_enabled(devlink,
2958 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2959 		return;
2960 
2961 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2962 		return;
2963 
2964 	nh->counter_valid = true;
2965 }
2966 
2967 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2968 				   struct mlxsw_sp_nexthop *nh)
2969 {
2970 	if (!nh->counter_valid)
2971 		return;
2972 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2973 	nh->counter_valid = false;
2974 }
2975 
2976 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2977 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2978 {
2979 	if (!nh->counter_valid)
2980 		return -EINVAL;
2981 
2982 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2983 					 p_counter, NULL);
2984 }
2985 
2986 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2987 					       struct mlxsw_sp_nexthop *nh)
2988 {
2989 	if (!nh) {
2990 		if (list_empty(&router->nexthop_list))
2991 			return NULL;
2992 		else
2993 			return list_first_entry(&router->nexthop_list,
2994 						typeof(*nh), router_list_node);
2995 	}
2996 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2997 		return NULL;
2998 	return list_next_entry(nh, router_list_node);
2999 }
3000 
3001 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3002 {
3003 	return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3004 }
3005 
3006 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3007 {
3008 	if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3009 	    !mlxsw_sp_nexthop_is_forward(nh))
3010 		return NULL;
3011 	return nh->neigh_entry->ha;
3012 }
3013 
3014 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3015 			     u32 *p_adj_size, u32 *p_adj_hash_index)
3016 {
3017 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3018 	u32 adj_hash_index = 0;
3019 	int i;
3020 
3021 	if (!nh->offloaded || !nhgi->adj_index_valid)
3022 		return -EINVAL;
3023 
3024 	*p_adj_index = nhgi->adj_index;
3025 	*p_adj_size = nhgi->ecmp_size;
3026 
3027 	for (i = 0; i < nhgi->count; i++) {
3028 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3029 
3030 		if (nh_iter == nh)
3031 			break;
3032 		if (nh_iter->offloaded)
3033 			adj_hash_index += nh_iter->num_adj_entries;
3034 	}
3035 
3036 	*p_adj_hash_index = adj_hash_index;
3037 	return 0;
3038 }
3039 
3040 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3041 {
3042 	return nh->rif;
3043 }
3044 
3045 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3046 {
3047 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3048 	int i;
3049 
3050 	for (i = 0; i < nhgi->count; i++) {
3051 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3052 
3053 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3054 			return true;
3055 	}
3056 	return false;
3057 }
3058 
3059 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3060 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3061 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3062 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3063 	.automatic_shrinking = true,
3064 };
3065 
3066 static struct mlxsw_sp_nexthop_group_vr_entry *
3067 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3068 				       const struct mlxsw_sp_fib *fib)
3069 {
3070 	struct mlxsw_sp_nexthop_group_vr_key key;
3071 
3072 	memset(&key, 0, sizeof(key));
3073 	key.vr_id = fib->vr->id;
3074 	key.proto = fib->proto;
3075 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3076 				      mlxsw_sp_nexthop_group_vr_ht_params);
3077 }
3078 
3079 static int
3080 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3081 				       const struct mlxsw_sp_fib *fib)
3082 {
3083 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3084 	int err;
3085 
3086 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3087 	if (!vr_entry)
3088 		return -ENOMEM;
3089 
3090 	vr_entry->key.vr_id = fib->vr->id;
3091 	vr_entry->key.proto = fib->proto;
3092 	refcount_set(&vr_entry->ref_count, 1);
3093 
3094 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3095 				     mlxsw_sp_nexthop_group_vr_ht_params);
3096 	if (err)
3097 		goto err_hashtable_insert;
3098 
3099 	list_add(&vr_entry->list, &nh_grp->vr_list);
3100 
3101 	return 0;
3102 
3103 err_hashtable_insert:
3104 	kfree(vr_entry);
3105 	return err;
3106 }
3107 
3108 static void
3109 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3110 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3111 {
3112 	list_del(&vr_entry->list);
3113 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3114 			       mlxsw_sp_nexthop_group_vr_ht_params);
3115 	kfree(vr_entry);
3116 }
3117 
3118 static int
3119 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3120 			       const struct mlxsw_sp_fib *fib)
3121 {
3122 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3123 
3124 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3125 	if (vr_entry) {
3126 		refcount_inc(&vr_entry->ref_count);
3127 		return 0;
3128 	}
3129 
3130 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3131 }
3132 
3133 static void
3134 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3135 				 const struct mlxsw_sp_fib *fib)
3136 {
3137 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3138 
3139 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3140 	if (WARN_ON_ONCE(!vr_entry))
3141 		return;
3142 
3143 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3144 		return;
3145 
3146 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3147 }
3148 
3149 struct mlxsw_sp_nexthop_group_cmp_arg {
3150 	enum mlxsw_sp_nexthop_group_type type;
3151 	union {
3152 		struct fib_info *fi;
3153 		struct mlxsw_sp_fib6_entry *fib6_entry;
3154 		u32 id;
3155 	};
3156 };
3157 
3158 static bool
3159 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3160 				    const struct in6_addr *gw, int ifindex,
3161 				    int weight)
3162 {
3163 	int i;
3164 
3165 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3166 		const struct mlxsw_sp_nexthop *nh;
3167 
3168 		nh = &nh_grp->nhgi->nexthops[i];
3169 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3170 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3171 			return true;
3172 	}
3173 
3174 	return false;
3175 }
3176 
3177 static bool
3178 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3179 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3180 {
3181 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3182 
3183 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3184 		return false;
3185 
3186 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3187 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3188 		struct in6_addr *gw;
3189 		int ifindex, weight;
3190 
3191 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3192 		weight = fib6_nh->fib_nh_weight;
3193 		gw = &fib6_nh->fib_nh_gw6;
3194 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3195 							 weight))
3196 			return false;
3197 	}
3198 
3199 	return true;
3200 }
3201 
3202 static int
3203 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3204 {
3205 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3206 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3207 
3208 	if (nh_grp->type != cmp_arg->type)
3209 		return 1;
3210 
3211 	switch (cmp_arg->type) {
3212 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3213 		return cmp_arg->fi != nh_grp->ipv4.fi;
3214 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3215 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3216 						    cmp_arg->fib6_entry);
3217 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3218 		return cmp_arg->id != nh_grp->obj.id;
3219 	default:
3220 		WARN_ON(1);
3221 		return 1;
3222 	}
3223 }
3224 
3225 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3226 {
3227 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3228 	const struct mlxsw_sp_nexthop *nh;
3229 	struct fib_info *fi;
3230 	unsigned int val;
3231 	int i;
3232 
3233 	switch (nh_grp->type) {
3234 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3235 		fi = nh_grp->ipv4.fi;
3236 		return jhash(&fi, sizeof(fi), seed);
3237 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3238 		val = nh_grp->nhgi->count;
3239 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3240 			nh = &nh_grp->nhgi->nexthops[i];
3241 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3242 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3243 		}
3244 		return jhash(&val, sizeof(val), seed);
3245 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3246 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3247 	default:
3248 		WARN_ON(1);
3249 		return 0;
3250 	}
3251 }
3252 
3253 static u32
3254 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3255 {
3256 	unsigned int val = fib6_entry->nrt6;
3257 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3258 
3259 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3260 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3261 		struct net_device *dev = fib6_nh->fib_nh_dev;
3262 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3263 
3264 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3265 		val ^= jhash(gw, sizeof(*gw), seed);
3266 	}
3267 
3268 	return jhash(&val, sizeof(val), seed);
3269 }
3270 
3271 static u32
3272 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3273 {
3274 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3275 
3276 	switch (cmp_arg->type) {
3277 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3278 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3279 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3280 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3281 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3282 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3283 	default:
3284 		WARN_ON(1);
3285 		return 0;
3286 	}
3287 }
3288 
3289 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3290 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3291 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3292 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3293 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3294 };
3295 
3296 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3297 					 struct mlxsw_sp_nexthop_group *nh_grp)
3298 {
3299 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3300 	    !nh_grp->nhgi->gateway)
3301 		return 0;
3302 
3303 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3304 				      &nh_grp->ht_node,
3305 				      mlxsw_sp_nexthop_group_ht_params);
3306 }
3307 
3308 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3309 					  struct mlxsw_sp_nexthop_group *nh_grp)
3310 {
3311 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3312 	    !nh_grp->nhgi->gateway)
3313 		return;
3314 
3315 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3316 			       &nh_grp->ht_node,
3317 			       mlxsw_sp_nexthop_group_ht_params);
3318 }
3319 
3320 static struct mlxsw_sp_nexthop_group *
3321 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3322 			       struct fib_info *fi)
3323 {
3324 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3325 
3326 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3327 	cmp_arg.fi = fi;
3328 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3329 				      &cmp_arg,
3330 				      mlxsw_sp_nexthop_group_ht_params);
3331 }
3332 
3333 static struct mlxsw_sp_nexthop_group *
3334 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3335 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3336 {
3337 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3338 
3339 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3340 	cmp_arg.fib6_entry = fib6_entry;
3341 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3342 				      &cmp_arg,
3343 				      mlxsw_sp_nexthop_group_ht_params);
3344 }
3345 
3346 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3347 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3348 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3349 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3350 };
3351 
3352 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3353 				   struct mlxsw_sp_nexthop *nh)
3354 {
3355 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3356 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3357 }
3358 
3359 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3360 				    struct mlxsw_sp_nexthop *nh)
3361 {
3362 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3363 			       mlxsw_sp_nexthop_ht_params);
3364 }
3365 
3366 static struct mlxsw_sp_nexthop *
3367 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3368 			struct mlxsw_sp_nexthop_key key)
3369 {
3370 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3371 				      mlxsw_sp_nexthop_ht_params);
3372 }
3373 
3374 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3375 					     enum mlxsw_sp_l3proto proto,
3376 					     u16 vr_id,
3377 					     u32 adj_index, u16 ecmp_size,
3378 					     u32 new_adj_index,
3379 					     u16 new_ecmp_size)
3380 {
3381 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3382 
3383 	mlxsw_reg_raleu_pack(raleu_pl,
3384 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3385 			     adj_index, ecmp_size, new_adj_index,
3386 			     new_ecmp_size);
3387 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3388 }
3389 
3390 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3391 					  struct mlxsw_sp_nexthop_group *nh_grp,
3392 					  u32 old_adj_index, u16 old_ecmp_size)
3393 {
3394 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3395 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3396 	int err;
3397 
3398 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3399 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3400 							vr_entry->key.proto,
3401 							vr_entry->key.vr_id,
3402 							old_adj_index,
3403 							old_ecmp_size,
3404 							nhgi->adj_index,
3405 							nhgi->ecmp_size);
3406 		if (err)
3407 			goto err_mass_update_vr;
3408 	}
3409 	return 0;
3410 
3411 err_mass_update_vr:
3412 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3413 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3414 						  vr_entry->key.vr_id,
3415 						  nhgi->adj_index,
3416 						  nhgi->ecmp_size,
3417 						  old_adj_index, old_ecmp_size);
3418 	return err;
3419 }
3420 
3421 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3422 					 u32 adj_index,
3423 					 struct mlxsw_sp_nexthop *nh,
3424 					 bool force, char *ratr_pl)
3425 {
3426 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3427 	enum mlxsw_reg_ratr_op op;
3428 	u16 rif_index;
3429 
3430 	rif_index = nh->rif ? nh->rif->rif_index :
3431 			      mlxsw_sp->router->lb_rif_index;
3432 	op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3433 		     MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3434 	mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3435 			    adj_index, rif_index);
3436 	switch (nh->action) {
3437 	case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3438 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3439 		break;
3440 	case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3441 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3442 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3443 		break;
3444 	case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3445 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3446 					       MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3447 		mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3448 		break;
3449 	default:
3450 		WARN_ON_ONCE(1);
3451 		return -EINVAL;
3452 	}
3453 	if (nh->counter_valid)
3454 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3455 	else
3456 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3457 
3458 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3459 }
3460 
3461 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3462 				struct mlxsw_sp_nexthop *nh, bool force,
3463 				char *ratr_pl)
3464 {
3465 	int i;
3466 
3467 	for (i = 0; i < nh->num_adj_entries; i++) {
3468 		int err;
3469 
3470 		err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3471 						    nh, force, ratr_pl);
3472 		if (err)
3473 			return err;
3474 	}
3475 
3476 	return 0;
3477 }
3478 
3479 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3480 					  u32 adj_index,
3481 					  struct mlxsw_sp_nexthop *nh,
3482 					  bool force, char *ratr_pl)
3483 {
3484 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3485 
3486 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3487 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3488 					force, ratr_pl);
3489 }
3490 
3491 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3492 					u32 adj_index,
3493 					struct mlxsw_sp_nexthop *nh, bool force,
3494 					char *ratr_pl)
3495 {
3496 	int i;
3497 
3498 	for (i = 0; i < nh->num_adj_entries; i++) {
3499 		int err;
3500 
3501 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3502 						     nh, force, ratr_pl);
3503 		if (err)
3504 			return err;
3505 	}
3506 
3507 	return 0;
3508 }
3509 
3510 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3511 				   struct mlxsw_sp_nexthop *nh, bool force,
3512 				   char *ratr_pl)
3513 {
3514 	/* When action is discard or trap, the nexthop must be
3515 	 * programmed as an Ethernet nexthop.
3516 	 */
3517 	if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3518 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3519 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3520 		return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3521 						   force, ratr_pl);
3522 	else
3523 		return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3524 						    force, ratr_pl);
3525 }
3526 
3527 static int
3528 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3529 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3530 			      bool reallocate)
3531 {
3532 	char ratr_pl[MLXSW_REG_RATR_LEN];
3533 	u32 adj_index = nhgi->adj_index; /* base */
3534 	struct mlxsw_sp_nexthop *nh;
3535 	int i;
3536 
3537 	for (i = 0; i < nhgi->count; i++) {
3538 		nh = &nhgi->nexthops[i];
3539 
3540 		if (!nh->should_offload) {
3541 			nh->offloaded = 0;
3542 			continue;
3543 		}
3544 
3545 		if (nh->update || reallocate) {
3546 			int err = 0;
3547 
3548 			err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3549 						      true, ratr_pl);
3550 			if (err)
3551 				return err;
3552 			nh->update = 0;
3553 			nh->offloaded = 1;
3554 		}
3555 		adj_index += nh->num_adj_entries;
3556 	}
3557 	return 0;
3558 }
3559 
3560 static int
3561 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3562 				    struct mlxsw_sp_nexthop_group *nh_grp)
3563 {
3564 	struct mlxsw_sp_fib_entry *fib_entry;
3565 	int err;
3566 
3567 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3568 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3569 		if (err)
3570 			return err;
3571 	}
3572 	return 0;
3573 }
3574 
3575 struct mlxsw_sp_adj_grp_size_range {
3576 	u16 start; /* Inclusive */
3577 	u16 end; /* Inclusive */
3578 };
3579 
3580 /* Ordered by range start value */
3581 static const struct mlxsw_sp_adj_grp_size_range
3582 mlxsw_sp1_adj_grp_size_ranges[] = {
3583 	{ .start = 1, .end = 64 },
3584 	{ .start = 512, .end = 512 },
3585 	{ .start = 1024, .end = 1024 },
3586 	{ .start = 2048, .end = 2048 },
3587 	{ .start = 4096, .end = 4096 },
3588 };
3589 
3590 /* Ordered by range start value */
3591 static const struct mlxsw_sp_adj_grp_size_range
3592 mlxsw_sp2_adj_grp_size_ranges[] = {
3593 	{ .start = 1, .end = 128 },
3594 	{ .start = 256, .end = 256 },
3595 	{ .start = 512, .end = 512 },
3596 	{ .start = 1024, .end = 1024 },
3597 	{ .start = 2048, .end = 2048 },
3598 	{ .start = 4096, .end = 4096 },
3599 };
3600 
3601 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3602 					   u16 *p_adj_grp_size)
3603 {
3604 	int i;
3605 
3606 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3607 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3608 
3609 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3610 
3611 		if (*p_adj_grp_size >= size_range->start &&
3612 		    *p_adj_grp_size <= size_range->end)
3613 			return;
3614 
3615 		if (*p_adj_grp_size <= size_range->end) {
3616 			*p_adj_grp_size = size_range->end;
3617 			return;
3618 		}
3619 	}
3620 }
3621 
3622 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3623 					     u16 *p_adj_grp_size,
3624 					     unsigned int alloc_size)
3625 {
3626 	int i;
3627 
3628 	for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3629 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3630 
3631 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3632 
3633 		if (alloc_size >= size_range->end) {
3634 			*p_adj_grp_size = size_range->end;
3635 			return;
3636 		}
3637 	}
3638 }
3639 
3640 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3641 				     u16 *p_adj_grp_size)
3642 {
3643 	unsigned int alloc_size;
3644 	int err;
3645 
3646 	/* Round up the requested group size to the next size supported
3647 	 * by the device and make sure the request can be satisfied.
3648 	 */
3649 	mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3650 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3651 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3652 					      *p_adj_grp_size, &alloc_size);
3653 	if (err)
3654 		return err;
3655 	/* It is possible the allocation results in more allocated
3656 	 * entries than requested. Try to use as much of them as
3657 	 * possible.
3658 	 */
3659 	mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3660 
3661 	return 0;
3662 }
3663 
3664 static void
3665 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3666 {
3667 	int i, g = 0, sum_norm_weight = 0;
3668 	struct mlxsw_sp_nexthop *nh;
3669 
3670 	for (i = 0; i < nhgi->count; i++) {
3671 		nh = &nhgi->nexthops[i];
3672 
3673 		if (!nh->should_offload)
3674 			continue;
3675 		if (g > 0)
3676 			g = gcd(nh->nh_weight, g);
3677 		else
3678 			g = nh->nh_weight;
3679 	}
3680 
3681 	for (i = 0; i < nhgi->count; i++) {
3682 		nh = &nhgi->nexthops[i];
3683 
3684 		if (!nh->should_offload)
3685 			continue;
3686 		nh->norm_nh_weight = nh->nh_weight / g;
3687 		sum_norm_weight += nh->norm_nh_weight;
3688 	}
3689 
3690 	nhgi->sum_norm_weight = sum_norm_weight;
3691 }
3692 
3693 static void
3694 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3695 {
3696 	int i, weight = 0, lower_bound = 0;
3697 	int total = nhgi->sum_norm_weight;
3698 	u16 ecmp_size = nhgi->ecmp_size;
3699 
3700 	for (i = 0; i < nhgi->count; i++) {
3701 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3702 		int upper_bound;
3703 
3704 		if (!nh->should_offload)
3705 			continue;
3706 		weight += nh->norm_nh_weight;
3707 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3708 		nh->num_adj_entries = upper_bound - lower_bound;
3709 		lower_bound = upper_bound;
3710 	}
3711 }
3712 
3713 static struct mlxsw_sp_nexthop *
3714 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3715 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3716 
3717 static void
3718 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3719 					struct mlxsw_sp_nexthop_group *nh_grp)
3720 {
3721 	int i;
3722 
3723 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3724 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3725 
3726 		if (nh->offloaded)
3727 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3728 		else
3729 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3730 	}
3731 }
3732 
3733 static void
3734 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3735 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3736 {
3737 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3738 
3739 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3740 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3741 		struct mlxsw_sp_nexthop *nh;
3742 
3743 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3744 		if (nh && nh->offloaded)
3745 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3746 		else
3747 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3748 	}
3749 }
3750 
3751 static void
3752 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3753 					struct mlxsw_sp_nexthop_group *nh_grp)
3754 {
3755 	struct mlxsw_sp_fib6_entry *fib6_entry;
3756 
3757 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3758 	 * the same struct, so we need to iterate over all the routes using the
3759 	 * nexthop group and set / clear the offload indication for them.
3760 	 */
3761 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3762 			    common.nexthop_group_node)
3763 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3764 }
3765 
3766 static void
3767 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3768 					const struct mlxsw_sp_nexthop *nh,
3769 					u16 bucket_index)
3770 {
3771 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3772 	bool offload = false, trap = false;
3773 
3774 	if (nh->offloaded) {
3775 		if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3776 			trap = true;
3777 		else
3778 			offload = true;
3779 	}
3780 	nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3781 				    bucket_index, offload, trap);
3782 }
3783 
3784 static void
3785 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3786 					   struct mlxsw_sp_nexthop_group *nh_grp)
3787 {
3788 	int i;
3789 
3790 	/* Do not update the flags if the nexthop group is being destroyed
3791 	 * since:
3792 	 * 1. The nexthop objects is being deleted, in which case the flags are
3793 	 * irrelevant.
3794 	 * 2. The nexthop group was replaced by a newer group, in which case
3795 	 * the flags of the nexthop object were already updated based on the
3796 	 * new group.
3797 	 */
3798 	if (nh_grp->can_destroy)
3799 		return;
3800 
3801 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3802 			     nh_grp->nhgi->adj_index_valid, false);
3803 
3804 	/* Update flags of individual nexthop buckets in case of a resilient
3805 	 * nexthop group.
3806 	 */
3807 	if (!nh_grp->nhgi->is_resilient)
3808 		return;
3809 
3810 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3811 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3812 
3813 		mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3814 	}
3815 }
3816 
3817 static void
3818 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3819 				       struct mlxsw_sp_nexthop_group *nh_grp)
3820 {
3821 	switch (nh_grp->type) {
3822 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3823 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3824 		break;
3825 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3826 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3827 		break;
3828 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3829 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3830 		break;
3831 	}
3832 }
3833 
3834 static int
3835 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3836 			       struct mlxsw_sp_nexthop_group *nh_grp)
3837 {
3838 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3839 	u16 ecmp_size, old_ecmp_size;
3840 	struct mlxsw_sp_nexthop *nh;
3841 	bool offload_change = false;
3842 	u32 adj_index;
3843 	bool old_adj_index_valid;
3844 	int i, err2, err = 0;
3845 	u32 old_adj_index;
3846 
3847 	if (!nhgi->gateway)
3848 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3849 
3850 	for (i = 0; i < nhgi->count; i++) {
3851 		nh = &nhgi->nexthops[i];
3852 
3853 		if (nh->should_offload != nh->offloaded) {
3854 			offload_change = true;
3855 			if (nh->should_offload)
3856 				nh->update = 1;
3857 		}
3858 	}
3859 	if (!offload_change) {
3860 		/* Nothing was added or removed, so no need to reallocate. Just
3861 		 * update MAC on existing adjacency indexes.
3862 		 */
3863 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3864 		if (err) {
3865 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3866 			goto set_trap;
3867 		}
3868 		/* Flags of individual nexthop buckets might need to be
3869 		 * updated.
3870 		 */
3871 		mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3872 		return 0;
3873 	}
3874 	mlxsw_sp_nexthop_group_normalize(nhgi);
3875 	if (!nhgi->sum_norm_weight)
3876 		/* No neigh of this group is connected so we just set
3877 		 * the trap and let everthing flow through kernel.
3878 		 */
3879 		goto set_trap;
3880 
3881 	ecmp_size = nhgi->sum_norm_weight;
3882 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3883 	if (err)
3884 		/* No valid allocation size available. */
3885 		goto set_trap;
3886 
3887 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3888 				  ecmp_size, &adj_index);
3889 	if (err) {
3890 		/* We ran out of KVD linear space, just set the
3891 		 * trap and let everything flow through kernel.
3892 		 */
3893 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3894 		goto set_trap;
3895 	}
3896 	old_adj_index_valid = nhgi->adj_index_valid;
3897 	old_adj_index = nhgi->adj_index;
3898 	old_ecmp_size = nhgi->ecmp_size;
3899 	nhgi->adj_index_valid = 1;
3900 	nhgi->adj_index = adj_index;
3901 	nhgi->ecmp_size = ecmp_size;
3902 	mlxsw_sp_nexthop_group_rebalance(nhgi);
3903 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
3904 	if (err) {
3905 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3906 		goto set_trap;
3907 	}
3908 
3909 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3910 
3911 	if (!old_adj_index_valid) {
3912 		/* The trap was set for fib entries, so we have to call
3913 		 * fib entry update to unset it and use adjacency index.
3914 		 */
3915 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3916 		if (err) {
3917 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3918 			goto set_trap;
3919 		}
3920 		return 0;
3921 	}
3922 
3923 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3924 					     old_adj_index, old_ecmp_size);
3925 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3926 			   old_ecmp_size, old_adj_index);
3927 	if (err) {
3928 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3929 		goto set_trap;
3930 	}
3931 
3932 	return 0;
3933 
3934 set_trap:
3935 	old_adj_index_valid = nhgi->adj_index_valid;
3936 	nhgi->adj_index_valid = 0;
3937 	for (i = 0; i < nhgi->count; i++) {
3938 		nh = &nhgi->nexthops[i];
3939 		nh->offloaded = 0;
3940 	}
3941 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3942 	if (err2)
3943 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3944 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3945 	if (old_adj_index_valid)
3946 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3947 				   nhgi->ecmp_size, nhgi->adj_index);
3948 	return err;
3949 }
3950 
3951 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3952 					    bool removing)
3953 {
3954 	if (!removing) {
3955 		nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3956 		nh->should_offload = 1;
3957 	} else if (nh->nhgi->is_resilient) {
3958 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
3959 		nh->should_offload = 1;
3960 	} else {
3961 		nh->should_offload = 0;
3962 	}
3963 	nh->update = 1;
3964 }
3965 
3966 static int
3967 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3968 				    struct mlxsw_sp_neigh_entry *neigh_entry)
3969 {
3970 	struct neighbour *n, *old_n = neigh_entry->key.n;
3971 	struct mlxsw_sp_nexthop *nh;
3972 	bool entry_connected;
3973 	u8 nud_state, dead;
3974 	int err;
3975 
3976 	nh = list_first_entry(&neigh_entry->nexthop_list,
3977 			      struct mlxsw_sp_nexthop, neigh_list_node);
3978 
3979 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3980 	if (!n) {
3981 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3982 		if (IS_ERR(n))
3983 			return PTR_ERR(n);
3984 		neigh_event_send(n, NULL);
3985 	}
3986 
3987 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3988 	neigh_entry->key.n = n;
3989 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3990 	if (err)
3991 		goto err_neigh_entry_insert;
3992 
3993 	read_lock_bh(&n->lock);
3994 	nud_state = n->nud_state;
3995 	dead = n->dead;
3996 	read_unlock_bh(&n->lock);
3997 	entry_connected = nud_state & NUD_VALID && !dead;
3998 
3999 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4000 			    neigh_list_node) {
4001 		neigh_release(old_n);
4002 		neigh_clone(n);
4003 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4004 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4005 	}
4006 
4007 	neigh_release(n);
4008 
4009 	return 0;
4010 
4011 err_neigh_entry_insert:
4012 	neigh_entry->key.n = old_n;
4013 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4014 	neigh_release(n);
4015 	return err;
4016 }
4017 
4018 static void
4019 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4020 			      struct mlxsw_sp_neigh_entry *neigh_entry,
4021 			      bool removing, bool dead)
4022 {
4023 	struct mlxsw_sp_nexthop *nh;
4024 
4025 	if (list_empty(&neigh_entry->nexthop_list))
4026 		return;
4027 
4028 	if (dead) {
4029 		int err;
4030 
4031 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4032 							  neigh_entry);
4033 		if (err)
4034 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4035 		return;
4036 	}
4037 
4038 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4039 			    neigh_list_node) {
4040 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4041 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4042 	}
4043 }
4044 
4045 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4046 				      struct mlxsw_sp_rif *rif)
4047 {
4048 	if (nh->rif)
4049 		return;
4050 
4051 	nh->rif = rif;
4052 	list_add(&nh->rif_list_node, &rif->nexthop_list);
4053 }
4054 
4055 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4056 {
4057 	if (!nh->rif)
4058 		return;
4059 
4060 	list_del(&nh->rif_list_node);
4061 	nh->rif = NULL;
4062 }
4063 
4064 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4065 				       struct mlxsw_sp_nexthop *nh)
4066 {
4067 	struct mlxsw_sp_neigh_entry *neigh_entry;
4068 	struct neighbour *n;
4069 	u8 nud_state, dead;
4070 	int err;
4071 
4072 	if (!nh->nhgi->gateway || nh->neigh_entry)
4073 		return 0;
4074 
4075 	/* Take a reference of neigh here ensuring that neigh would
4076 	 * not be destructed before the nexthop entry is finished.
4077 	 * The reference is taken either in neigh_lookup() or
4078 	 * in neigh_create() in case n is not found.
4079 	 */
4080 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4081 	if (!n) {
4082 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4083 		if (IS_ERR(n))
4084 			return PTR_ERR(n);
4085 		neigh_event_send(n, NULL);
4086 	}
4087 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4088 	if (!neigh_entry) {
4089 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4090 		if (IS_ERR(neigh_entry)) {
4091 			err = -EINVAL;
4092 			goto err_neigh_entry_create;
4093 		}
4094 	}
4095 
4096 	/* If that is the first nexthop connected to that neigh, add to
4097 	 * nexthop_neighs_list
4098 	 */
4099 	if (list_empty(&neigh_entry->nexthop_list))
4100 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4101 			      &mlxsw_sp->router->nexthop_neighs_list);
4102 
4103 	nh->neigh_entry = neigh_entry;
4104 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4105 	read_lock_bh(&n->lock);
4106 	nud_state = n->nud_state;
4107 	dead = n->dead;
4108 	read_unlock_bh(&n->lock);
4109 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4110 
4111 	return 0;
4112 
4113 err_neigh_entry_create:
4114 	neigh_release(n);
4115 	return err;
4116 }
4117 
4118 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4119 					struct mlxsw_sp_nexthop *nh)
4120 {
4121 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4122 	struct neighbour *n;
4123 
4124 	if (!neigh_entry)
4125 		return;
4126 	n = neigh_entry->key.n;
4127 
4128 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4129 	list_del(&nh->neigh_list_node);
4130 	nh->neigh_entry = NULL;
4131 
4132 	/* If that is the last nexthop connected to that neigh, remove from
4133 	 * nexthop_neighs_list
4134 	 */
4135 	if (list_empty(&neigh_entry->nexthop_list))
4136 		list_del(&neigh_entry->nexthop_neighs_list_node);
4137 
4138 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4139 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4140 
4141 	neigh_release(n);
4142 }
4143 
4144 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4145 {
4146 	struct net_device *ul_dev;
4147 	bool is_up;
4148 
4149 	rcu_read_lock();
4150 	ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4151 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4152 	rcu_read_unlock();
4153 
4154 	return is_up;
4155 }
4156 
4157 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4158 				       struct mlxsw_sp_nexthop *nh,
4159 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4160 {
4161 	bool removing;
4162 
4163 	if (!nh->nhgi->gateway || nh->ipip_entry)
4164 		return;
4165 
4166 	nh->ipip_entry = ipip_entry;
4167 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4168 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4169 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4170 }
4171 
4172 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4173 				       struct mlxsw_sp_nexthop *nh)
4174 {
4175 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4176 
4177 	if (!ipip_entry)
4178 		return;
4179 
4180 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4181 	nh->ipip_entry = NULL;
4182 }
4183 
4184 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4185 					const struct fib_nh *fib_nh,
4186 					enum mlxsw_sp_ipip_type *p_ipipt)
4187 {
4188 	struct net_device *dev = fib_nh->fib_nh_dev;
4189 
4190 	return dev &&
4191 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4192 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4193 }
4194 
4195 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4196 				      struct mlxsw_sp_nexthop *nh,
4197 				      const struct net_device *dev)
4198 {
4199 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4200 	struct mlxsw_sp_ipip_entry *ipip_entry;
4201 	struct mlxsw_sp_rif *rif;
4202 	int err;
4203 
4204 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4205 	if (ipip_entry) {
4206 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4207 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4208 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4209 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4210 			return 0;
4211 		}
4212 	}
4213 
4214 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4215 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4216 	if (!rif)
4217 		return 0;
4218 
4219 	mlxsw_sp_nexthop_rif_init(nh, rif);
4220 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4221 	if (err)
4222 		goto err_neigh_init;
4223 
4224 	return 0;
4225 
4226 err_neigh_init:
4227 	mlxsw_sp_nexthop_rif_fini(nh);
4228 	return err;
4229 }
4230 
4231 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4232 				       struct mlxsw_sp_nexthop *nh)
4233 {
4234 	switch (nh->type) {
4235 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4236 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4237 		mlxsw_sp_nexthop_rif_fini(nh);
4238 		break;
4239 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4240 		mlxsw_sp_nexthop_rif_fini(nh);
4241 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4242 		break;
4243 	}
4244 }
4245 
4246 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4247 				  struct mlxsw_sp_nexthop_group *nh_grp,
4248 				  struct mlxsw_sp_nexthop *nh,
4249 				  struct fib_nh *fib_nh)
4250 {
4251 	struct net_device *dev = fib_nh->fib_nh_dev;
4252 	struct in_device *in_dev;
4253 	int err;
4254 
4255 	nh->nhgi = nh_grp->nhgi;
4256 	nh->key.fib_nh = fib_nh;
4257 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4258 	nh->nh_weight = fib_nh->fib_nh_weight;
4259 #else
4260 	nh->nh_weight = 1;
4261 #endif
4262 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4263 	nh->neigh_tbl = &arp_tbl;
4264 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4265 	if (err)
4266 		return err;
4267 
4268 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4269 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4270 
4271 	if (!dev)
4272 		return 0;
4273 	nh->ifindex = dev->ifindex;
4274 
4275 	rcu_read_lock();
4276 	in_dev = __in_dev_get_rcu(dev);
4277 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4278 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4279 		rcu_read_unlock();
4280 		return 0;
4281 	}
4282 	rcu_read_unlock();
4283 
4284 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4285 	if (err)
4286 		goto err_nexthop_neigh_init;
4287 
4288 	return 0;
4289 
4290 err_nexthop_neigh_init:
4291 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4292 	return err;
4293 }
4294 
4295 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4296 				   struct mlxsw_sp_nexthop *nh)
4297 {
4298 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4299 	list_del(&nh->router_list_node);
4300 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4301 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4302 }
4303 
4304 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4305 				    unsigned long event, struct fib_nh *fib_nh)
4306 {
4307 	struct mlxsw_sp_nexthop_key key;
4308 	struct mlxsw_sp_nexthop *nh;
4309 
4310 	if (mlxsw_sp->router->aborted)
4311 		return;
4312 
4313 	key.fib_nh = fib_nh;
4314 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4315 	if (!nh)
4316 		return;
4317 
4318 	switch (event) {
4319 	case FIB_EVENT_NH_ADD:
4320 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4321 		break;
4322 	case FIB_EVENT_NH_DEL:
4323 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4324 		break;
4325 	}
4326 
4327 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4328 }
4329 
4330 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4331 					struct mlxsw_sp_rif *rif)
4332 {
4333 	struct mlxsw_sp_nexthop *nh;
4334 	bool removing;
4335 
4336 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4337 		switch (nh->type) {
4338 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4339 			removing = false;
4340 			break;
4341 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4342 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4343 			break;
4344 		default:
4345 			WARN_ON(1);
4346 			continue;
4347 		}
4348 
4349 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4350 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4351 	}
4352 }
4353 
4354 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4355 					 struct mlxsw_sp_rif *old_rif,
4356 					 struct mlxsw_sp_rif *new_rif)
4357 {
4358 	struct mlxsw_sp_nexthop *nh;
4359 
4360 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4361 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4362 		nh->rif = new_rif;
4363 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4364 }
4365 
4366 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4367 					   struct mlxsw_sp_rif *rif)
4368 {
4369 	struct mlxsw_sp_nexthop *nh, *tmp;
4370 
4371 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4372 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4373 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4374 	}
4375 }
4376 
4377 static void
4378 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4379 			     const struct mlxsw_sp_nexthop_group *nh_grp,
4380 			     unsigned long *activity)
4381 {
4382 	char *ratrad_pl;
4383 	int i, err;
4384 
4385 	ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4386 	if (!ratrad_pl)
4387 		return;
4388 
4389 	mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4390 			      nh_grp->nhgi->count);
4391 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4392 	if (err)
4393 		goto out;
4394 
4395 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4396 		if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4397 			continue;
4398 		bitmap_set(activity, i, 1);
4399 	}
4400 
4401 out:
4402 	kfree(ratrad_pl);
4403 }
4404 
4405 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4406 
4407 static void
4408 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4409 				const struct mlxsw_sp_nexthop_group *nh_grp)
4410 {
4411 	unsigned long *activity;
4412 
4413 	activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4414 	if (!activity)
4415 		return;
4416 
4417 	mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4418 	nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4419 					nh_grp->nhgi->count, activity);
4420 
4421 	bitmap_free(activity);
4422 }
4423 
4424 static void
4425 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4426 {
4427 	unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4428 
4429 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4430 			       msecs_to_jiffies(interval));
4431 }
4432 
4433 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4434 {
4435 	struct mlxsw_sp_nexthop_group_info *nhgi;
4436 	struct mlxsw_sp_router *router;
4437 	bool reschedule = false;
4438 
4439 	router = container_of(work, struct mlxsw_sp_router,
4440 			      nh_grp_activity_dw.work);
4441 
4442 	mutex_lock(&router->lock);
4443 
4444 	list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4445 		mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4446 		reschedule = true;
4447 	}
4448 
4449 	mutex_unlock(&router->lock);
4450 
4451 	if (!reschedule)
4452 		return;
4453 	mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4454 }
4455 
4456 static int
4457 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4458 				     const struct nh_notifier_single_info *nh,
4459 				     struct netlink_ext_ack *extack)
4460 {
4461 	int err = -EINVAL;
4462 
4463 	if (nh->is_fdb)
4464 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4465 	else if (nh->has_encap)
4466 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4467 	else
4468 		err = 0;
4469 
4470 	return err;
4471 }
4472 
4473 static int
4474 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4475 					  const struct nh_notifier_single_info *nh,
4476 					  struct netlink_ext_ack *extack)
4477 {
4478 	int err;
4479 
4480 	err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4481 	if (err)
4482 		return err;
4483 
4484 	/* Device only nexthops with an IPIP device are programmed as
4485 	 * encapsulating adjacency entries.
4486 	 */
4487 	if (!nh->gw_family && !nh->is_reject &&
4488 	    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4489 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4490 		return -EINVAL;
4491 	}
4492 
4493 	return 0;
4494 }
4495 
4496 static int
4497 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4498 				    const struct nh_notifier_grp_info *nh_grp,
4499 				    struct netlink_ext_ack *extack)
4500 {
4501 	int i;
4502 
4503 	if (nh_grp->is_fdb) {
4504 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4505 		return -EINVAL;
4506 	}
4507 
4508 	for (i = 0; i < nh_grp->num_nh; i++) {
4509 		const struct nh_notifier_single_info *nh;
4510 		int err;
4511 
4512 		nh = &nh_grp->nh_entries[i].nh;
4513 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4514 								extack);
4515 		if (err)
4516 			return err;
4517 	}
4518 
4519 	return 0;
4520 }
4521 
4522 static int
4523 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4524 					     const struct nh_notifier_res_table_info *nh_res_table,
4525 					     struct netlink_ext_ack *extack)
4526 {
4527 	unsigned int alloc_size;
4528 	bool valid_size = false;
4529 	int err, i;
4530 
4531 	if (nh_res_table->num_nh_buckets < 32) {
4532 		NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4533 		return -EINVAL;
4534 	}
4535 
4536 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4537 		const struct mlxsw_sp_adj_grp_size_range *size_range;
4538 
4539 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4540 
4541 		if (nh_res_table->num_nh_buckets >= size_range->start &&
4542 		    nh_res_table->num_nh_buckets <= size_range->end) {
4543 			valid_size = true;
4544 			break;
4545 		}
4546 	}
4547 
4548 	if (!valid_size) {
4549 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4550 		return -EINVAL;
4551 	}
4552 
4553 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4554 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4555 					      nh_res_table->num_nh_buckets,
4556 					      &alloc_size);
4557 	if (err || nh_res_table->num_nh_buckets != alloc_size) {
4558 		NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4559 		return -EINVAL;
4560 	}
4561 
4562 	return 0;
4563 }
4564 
4565 static int
4566 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4567 					const struct nh_notifier_res_table_info *nh_res_table,
4568 					struct netlink_ext_ack *extack)
4569 {
4570 	int err;
4571 	u16 i;
4572 
4573 	err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4574 							   nh_res_table,
4575 							   extack);
4576 	if (err)
4577 		return err;
4578 
4579 	for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4580 		const struct nh_notifier_single_info *nh;
4581 		int err;
4582 
4583 		nh = &nh_res_table->nhs[i];
4584 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4585 								extack);
4586 		if (err)
4587 			return err;
4588 	}
4589 
4590 	return 0;
4591 }
4592 
4593 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4594 					 unsigned long event,
4595 					 struct nh_notifier_info *info)
4596 {
4597 	struct nh_notifier_single_info *nh;
4598 
4599 	if (event != NEXTHOP_EVENT_REPLACE &&
4600 	    event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4601 	    event != NEXTHOP_EVENT_BUCKET_REPLACE)
4602 		return 0;
4603 
4604 	switch (info->type) {
4605 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4606 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4607 							    info->extack);
4608 	case NH_NOTIFIER_INFO_TYPE_GRP:
4609 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4610 							   info->nh_grp,
4611 							   info->extack);
4612 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4613 		return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4614 							       info->nh_res_table,
4615 							       info->extack);
4616 	case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4617 		nh = &info->nh_res_bucket->new_nh;
4618 		return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4619 								 info->extack);
4620 	default:
4621 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4622 		return -EOPNOTSUPP;
4623 	}
4624 }
4625 
4626 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4627 					    const struct nh_notifier_info *info)
4628 {
4629 	const struct net_device *dev;
4630 
4631 	switch (info->type) {
4632 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4633 		dev = info->nh->dev;
4634 		return info->nh->gw_family || info->nh->is_reject ||
4635 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4636 	case NH_NOTIFIER_INFO_TYPE_GRP:
4637 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4638 		/* Already validated earlier. */
4639 		return true;
4640 	default:
4641 		return false;
4642 	}
4643 }
4644 
4645 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4646 						struct mlxsw_sp_nexthop *nh)
4647 {
4648 	u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4649 
4650 	nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4651 	nh->should_offload = 1;
4652 	/* While nexthops that discard packets do not forward packets
4653 	 * via an egress RIF, they still need to be programmed using a
4654 	 * valid RIF, so use the loopback RIF created during init.
4655 	 */
4656 	nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4657 }
4658 
4659 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4660 						struct mlxsw_sp_nexthop *nh)
4661 {
4662 	nh->rif = NULL;
4663 	nh->should_offload = 0;
4664 }
4665 
4666 static int
4667 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4668 			  struct mlxsw_sp_nexthop_group *nh_grp,
4669 			  struct mlxsw_sp_nexthop *nh,
4670 			  struct nh_notifier_single_info *nh_obj, int weight)
4671 {
4672 	struct net_device *dev = nh_obj->dev;
4673 	int err;
4674 
4675 	nh->nhgi = nh_grp->nhgi;
4676 	nh->nh_weight = weight;
4677 
4678 	switch (nh_obj->gw_family) {
4679 	case AF_INET:
4680 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4681 		nh->neigh_tbl = &arp_tbl;
4682 		break;
4683 	case AF_INET6:
4684 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4685 #if IS_ENABLED(CONFIG_IPV6)
4686 		nh->neigh_tbl = &nd_tbl;
4687 #endif
4688 		break;
4689 	}
4690 
4691 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4692 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4693 	nh->ifindex = dev->ifindex;
4694 
4695 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4696 	if (err)
4697 		goto err_type_init;
4698 
4699 	if (nh_obj->is_reject)
4700 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4701 
4702 	/* In a resilient nexthop group, all the nexthops must be written to
4703 	 * the adjacency table. Even if they do not have a valid neighbour or
4704 	 * RIF.
4705 	 */
4706 	if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4707 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4708 		nh->should_offload = 1;
4709 	}
4710 
4711 	return 0;
4712 
4713 err_type_init:
4714 	list_del(&nh->router_list_node);
4715 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4716 	return err;
4717 }
4718 
4719 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4720 				      struct mlxsw_sp_nexthop *nh)
4721 {
4722 	if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4723 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4724 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4725 	list_del(&nh->router_list_node);
4726 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4727 	nh->should_offload = 0;
4728 }
4729 
4730 static int
4731 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4732 				     struct mlxsw_sp_nexthop_group *nh_grp,
4733 				     struct nh_notifier_info *info)
4734 {
4735 	struct mlxsw_sp_nexthop_group_info *nhgi;
4736 	struct mlxsw_sp_nexthop *nh;
4737 	bool is_resilient = false;
4738 	unsigned int nhs;
4739 	int err, i;
4740 
4741 	switch (info->type) {
4742 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4743 		nhs = 1;
4744 		break;
4745 	case NH_NOTIFIER_INFO_TYPE_GRP:
4746 		nhs = info->nh_grp->num_nh;
4747 		break;
4748 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4749 		nhs = info->nh_res_table->num_nh_buckets;
4750 		is_resilient = true;
4751 		break;
4752 	default:
4753 		return -EINVAL;
4754 	}
4755 
4756 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4757 	if (!nhgi)
4758 		return -ENOMEM;
4759 	nh_grp->nhgi = nhgi;
4760 	nhgi->nh_grp = nh_grp;
4761 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4762 	nhgi->is_resilient = is_resilient;
4763 	nhgi->count = nhs;
4764 	for (i = 0; i < nhgi->count; i++) {
4765 		struct nh_notifier_single_info *nh_obj;
4766 		int weight;
4767 
4768 		nh = &nhgi->nexthops[i];
4769 		switch (info->type) {
4770 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
4771 			nh_obj = info->nh;
4772 			weight = 1;
4773 			break;
4774 		case NH_NOTIFIER_INFO_TYPE_GRP:
4775 			nh_obj = &info->nh_grp->nh_entries[i].nh;
4776 			weight = info->nh_grp->nh_entries[i].weight;
4777 			break;
4778 		case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4779 			nh_obj = &info->nh_res_table->nhs[i];
4780 			weight = 1;
4781 			break;
4782 		default:
4783 			err = -EINVAL;
4784 			goto err_nexthop_obj_init;
4785 		}
4786 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4787 						weight);
4788 		if (err)
4789 			goto err_nexthop_obj_init;
4790 	}
4791 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4792 	if (err) {
4793 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4794 		goto err_group_refresh;
4795 	}
4796 
4797 	/* Add resilient nexthop groups to a list so that the activity of their
4798 	 * nexthop buckets will be periodically queried and cleared.
4799 	 */
4800 	if (nhgi->is_resilient) {
4801 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4802 			mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4803 		list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4804 	}
4805 
4806 	return 0;
4807 
4808 err_group_refresh:
4809 	i = nhgi->count;
4810 err_nexthop_obj_init:
4811 	for (i--; i >= 0; i--) {
4812 		nh = &nhgi->nexthops[i];
4813 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4814 	}
4815 	kfree(nhgi);
4816 	return err;
4817 }
4818 
4819 static void
4820 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4821 				     struct mlxsw_sp_nexthop_group *nh_grp)
4822 {
4823 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4824 	struct mlxsw_sp_router *router = mlxsw_sp->router;
4825 	int i;
4826 
4827 	if (nhgi->is_resilient) {
4828 		list_del(&nhgi->list);
4829 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4830 			cancel_delayed_work(&router->nh_grp_activity_dw);
4831 	}
4832 
4833 	for (i = nhgi->count - 1; i >= 0; i--) {
4834 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4835 
4836 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4837 	}
4838 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4839 	WARN_ON_ONCE(nhgi->adj_index_valid);
4840 	kfree(nhgi);
4841 }
4842 
4843 static struct mlxsw_sp_nexthop_group *
4844 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
4845 				  struct nh_notifier_info *info)
4846 {
4847 	struct mlxsw_sp_nexthop_group *nh_grp;
4848 	int err;
4849 
4850 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4851 	if (!nh_grp)
4852 		return ERR_PTR(-ENOMEM);
4853 	INIT_LIST_HEAD(&nh_grp->vr_list);
4854 	err = rhashtable_init(&nh_grp->vr_ht,
4855 			      &mlxsw_sp_nexthop_group_vr_ht_params);
4856 	if (err)
4857 		goto err_nexthop_group_vr_ht_init;
4858 	INIT_LIST_HEAD(&nh_grp->fib_list);
4859 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4860 	nh_grp->obj.id = info->id;
4861 
4862 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
4863 	if (err)
4864 		goto err_nexthop_group_info_init;
4865 
4866 	nh_grp->can_destroy = false;
4867 
4868 	return nh_grp;
4869 
4870 err_nexthop_group_info_init:
4871 	rhashtable_destroy(&nh_grp->vr_ht);
4872 err_nexthop_group_vr_ht_init:
4873 	kfree(nh_grp);
4874 	return ERR_PTR(err);
4875 }
4876 
4877 static void
4878 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
4879 				   struct mlxsw_sp_nexthop_group *nh_grp)
4880 {
4881 	if (!nh_grp->can_destroy)
4882 		return;
4883 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
4884 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
4885 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
4886 	rhashtable_destroy(&nh_grp->vr_ht);
4887 	kfree(nh_grp);
4888 }
4889 
4890 static struct mlxsw_sp_nexthop_group *
4891 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
4892 {
4893 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
4894 
4895 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4896 	cmp_arg.id = id;
4897 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
4898 				      &cmp_arg,
4899 				      mlxsw_sp_nexthop_group_ht_params);
4900 }
4901 
4902 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
4903 					  struct mlxsw_sp_nexthop_group *nh_grp)
4904 {
4905 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4906 }
4907 
4908 static int
4909 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
4910 				   struct mlxsw_sp_nexthop_group *nh_grp,
4911 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
4912 				   struct netlink_ext_ack *extack)
4913 {
4914 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
4915 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
4916 	int err;
4917 
4918 	old_nh_grp->nhgi = new_nhgi;
4919 	new_nhgi->nh_grp = old_nh_grp;
4920 	nh_grp->nhgi = old_nhgi;
4921 	old_nhgi->nh_grp = nh_grp;
4922 
4923 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
4924 		/* Both the old adjacency index and the new one are valid.
4925 		 * Routes are currently using the old one. Tell the device to
4926 		 * replace the old adjacency index with the new one.
4927 		 */
4928 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
4929 						     old_nhgi->adj_index,
4930 						     old_nhgi->ecmp_size);
4931 		if (err) {
4932 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
4933 			goto err_out;
4934 		}
4935 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
4936 		/* The old adjacency index is valid, while the new one is not.
4937 		 * Iterate over all the routes using the group and change them
4938 		 * to trap packets to the CPU.
4939 		 */
4940 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
4941 		if (err) {
4942 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
4943 			goto err_out;
4944 		}
4945 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
4946 		/* The old adjacency index is invalid, while the new one is.
4947 		 * Iterate over all the routes using the group and change them
4948 		 * to forward packets using the new valid index.
4949 		 */
4950 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
4951 		if (err) {
4952 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
4953 			goto err_out;
4954 		}
4955 	}
4956 
4957 	/* Make sure the flags are set / cleared based on the new nexthop group
4958 	 * information.
4959 	 */
4960 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
4961 
4962 	/* At this point 'nh_grp' is just a shell that is not used by anyone
4963 	 * and its nexthop group info is the old info that was just replaced
4964 	 * with the new one. Remove it.
4965 	 */
4966 	nh_grp->can_destroy = true;
4967 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
4968 
4969 	return 0;
4970 
4971 err_out:
4972 	old_nhgi->nh_grp = old_nh_grp;
4973 	nh_grp->nhgi = new_nhgi;
4974 	new_nhgi->nh_grp = nh_grp;
4975 	old_nh_grp->nhgi = old_nhgi;
4976 	return err;
4977 }
4978 
4979 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
4980 				    struct nh_notifier_info *info)
4981 {
4982 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
4983 	struct netlink_ext_ack *extack = info->extack;
4984 	int err;
4985 
4986 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
4987 	if (IS_ERR(nh_grp))
4988 		return PTR_ERR(nh_grp);
4989 
4990 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
4991 	if (!old_nh_grp)
4992 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
4993 	else
4994 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
4995 							 old_nh_grp, extack);
4996 
4997 	if (err) {
4998 		nh_grp->can_destroy = true;
4999 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5000 	}
5001 
5002 	return err;
5003 }
5004 
5005 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5006 				     struct nh_notifier_info *info)
5007 {
5008 	struct mlxsw_sp_nexthop_group *nh_grp;
5009 
5010 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5011 	if (!nh_grp)
5012 		return;
5013 
5014 	nh_grp->can_destroy = true;
5015 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5016 
5017 	/* If the group still has routes using it, then defer the delete
5018 	 * operation until the last route using it is deleted.
5019 	 */
5020 	if (!list_empty(&nh_grp->fib_list))
5021 		return;
5022 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5023 }
5024 
5025 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5026 					     u32 adj_index, char *ratr_pl)
5027 {
5028 	MLXSW_REG_ZERO(ratr, ratr_pl);
5029 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5030 	mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5031 	mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5032 
5033 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5034 }
5035 
5036 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5037 {
5038 	/* Clear the opcode and activity on both the old and new payload as
5039 	 * they are irrelevant for the comparison.
5040 	 */
5041 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5042 	mlxsw_reg_ratr_a_set(ratr_pl, 0);
5043 	mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5044 	mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5045 
5046 	/* If the contents of the adjacency entry are consistent with the
5047 	 * replacement request, then replacement was successful.
5048 	 */
5049 	if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5050 		return 0;
5051 
5052 	return -EINVAL;
5053 }
5054 
5055 static int
5056 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5057 				       struct mlxsw_sp_nexthop *nh,
5058 				       struct nh_notifier_info *info)
5059 {
5060 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5061 	struct netlink_ext_ack *extack = info->extack;
5062 	bool force = info->nh_res_bucket->force;
5063 	char ratr_pl_new[MLXSW_REG_RATR_LEN];
5064 	char ratr_pl[MLXSW_REG_RATR_LEN];
5065 	u32 adj_index;
5066 	int err;
5067 
5068 	/* No point in trying an atomic replacement if the idle timer interval
5069 	 * is smaller than the interval in which we query and clear activity.
5070 	 */
5071 	if (!force && info->nh_res_bucket->idle_timer_ms <
5072 	    MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5073 		force = true;
5074 
5075 	adj_index = nh->nhgi->adj_index + bucket_index;
5076 	err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5077 	if (err) {
5078 		NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5079 		return err;
5080 	}
5081 
5082 	if (!force) {
5083 		err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5084 							ratr_pl_new);
5085 		if (err) {
5086 			NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5087 			return err;
5088 		}
5089 
5090 		err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5091 		if (err) {
5092 			NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5093 			return err;
5094 		}
5095 	}
5096 
5097 	nh->update = 0;
5098 	nh->offloaded = 1;
5099 	mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5100 
5101 	return 0;
5102 }
5103 
5104 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5105 					       struct nh_notifier_info *info)
5106 {
5107 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5108 	struct netlink_ext_ack *extack = info->extack;
5109 	struct mlxsw_sp_nexthop_group_info *nhgi;
5110 	struct nh_notifier_single_info *nh_obj;
5111 	struct mlxsw_sp_nexthop_group *nh_grp;
5112 	struct mlxsw_sp_nexthop *nh;
5113 	int err;
5114 
5115 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5116 	if (!nh_grp) {
5117 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5118 		return -EINVAL;
5119 	}
5120 
5121 	nhgi = nh_grp->nhgi;
5122 
5123 	if (bucket_index >= nhgi->count) {
5124 		NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5125 		return -EINVAL;
5126 	}
5127 
5128 	nh = &nhgi->nexthops[bucket_index];
5129 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5130 
5131 	nh_obj = &info->nh_res_bucket->new_nh;
5132 	err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5133 	if (err) {
5134 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5135 		goto err_nexthop_obj_init;
5136 	}
5137 
5138 	err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5139 	if (err)
5140 		goto err_nexthop_obj_bucket_adj_update;
5141 
5142 	return 0;
5143 
5144 err_nexthop_obj_bucket_adj_update:
5145 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5146 err_nexthop_obj_init:
5147 	nh_obj = &info->nh_res_bucket->old_nh;
5148 	mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5149 	/* The old adjacency entry was not overwritten */
5150 	nh->update = 0;
5151 	nh->offloaded = 1;
5152 	return err;
5153 }
5154 
5155 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5156 				      unsigned long event, void *ptr)
5157 {
5158 	struct nh_notifier_info *info = ptr;
5159 	struct mlxsw_sp_router *router;
5160 	int err = 0;
5161 
5162 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5163 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5164 	if (err)
5165 		goto out;
5166 
5167 	mutex_lock(&router->lock);
5168 
5169 	switch (event) {
5170 	case NEXTHOP_EVENT_REPLACE:
5171 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5172 		break;
5173 	case NEXTHOP_EVENT_DEL:
5174 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5175 		break;
5176 	case NEXTHOP_EVENT_BUCKET_REPLACE:
5177 		err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5178 							  info);
5179 		break;
5180 	default:
5181 		break;
5182 	}
5183 
5184 	mutex_unlock(&router->lock);
5185 
5186 out:
5187 	return notifier_from_errno(err);
5188 }
5189 
5190 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5191 				   struct fib_info *fi)
5192 {
5193 	const struct fib_nh *nh = fib_info_nh(fi, 0);
5194 
5195 	return nh->fib_nh_scope == RT_SCOPE_LINK ||
5196 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5197 }
5198 
5199 static int
5200 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5201 				  struct mlxsw_sp_nexthop_group *nh_grp)
5202 {
5203 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5204 	struct mlxsw_sp_nexthop_group_info *nhgi;
5205 	struct mlxsw_sp_nexthop *nh;
5206 	int err, i;
5207 
5208 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5209 	if (!nhgi)
5210 		return -ENOMEM;
5211 	nh_grp->nhgi = nhgi;
5212 	nhgi->nh_grp = nh_grp;
5213 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5214 	nhgi->count = nhs;
5215 	for (i = 0; i < nhgi->count; i++) {
5216 		struct fib_nh *fib_nh;
5217 
5218 		nh = &nhgi->nexthops[i];
5219 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5220 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5221 		if (err)
5222 			goto err_nexthop4_init;
5223 	}
5224 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5225 	if (err)
5226 		goto err_group_refresh;
5227 
5228 	return 0;
5229 
5230 err_group_refresh:
5231 	i = nhgi->count;
5232 err_nexthop4_init:
5233 	for (i--; i >= 0; i--) {
5234 		nh = &nhgi->nexthops[i];
5235 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5236 	}
5237 	kfree(nhgi);
5238 	return err;
5239 }
5240 
5241 static void
5242 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5243 				  struct mlxsw_sp_nexthop_group *nh_grp)
5244 {
5245 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5246 	int i;
5247 
5248 	for (i = nhgi->count - 1; i >= 0; i--) {
5249 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5250 
5251 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5252 	}
5253 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5254 	WARN_ON_ONCE(nhgi->adj_index_valid);
5255 	kfree(nhgi);
5256 }
5257 
5258 static struct mlxsw_sp_nexthop_group *
5259 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5260 {
5261 	struct mlxsw_sp_nexthop_group *nh_grp;
5262 	int err;
5263 
5264 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5265 	if (!nh_grp)
5266 		return ERR_PTR(-ENOMEM);
5267 	INIT_LIST_HEAD(&nh_grp->vr_list);
5268 	err = rhashtable_init(&nh_grp->vr_ht,
5269 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5270 	if (err)
5271 		goto err_nexthop_group_vr_ht_init;
5272 	INIT_LIST_HEAD(&nh_grp->fib_list);
5273 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5274 	nh_grp->ipv4.fi = fi;
5275 	fib_info_hold(fi);
5276 
5277 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5278 	if (err)
5279 		goto err_nexthop_group_info_init;
5280 
5281 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5282 	if (err)
5283 		goto err_nexthop_group_insert;
5284 
5285 	nh_grp->can_destroy = true;
5286 
5287 	return nh_grp;
5288 
5289 err_nexthop_group_insert:
5290 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5291 err_nexthop_group_info_init:
5292 	fib_info_put(fi);
5293 	rhashtable_destroy(&nh_grp->vr_ht);
5294 err_nexthop_group_vr_ht_init:
5295 	kfree(nh_grp);
5296 	return ERR_PTR(err);
5297 }
5298 
5299 static void
5300 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5301 				struct mlxsw_sp_nexthop_group *nh_grp)
5302 {
5303 	if (!nh_grp->can_destroy)
5304 		return;
5305 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5306 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5307 	fib_info_put(nh_grp->ipv4.fi);
5308 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5309 	rhashtable_destroy(&nh_grp->vr_ht);
5310 	kfree(nh_grp);
5311 }
5312 
5313 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5314 				       struct mlxsw_sp_fib_entry *fib_entry,
5315 				       struct fib_info *fi)
5316 {
5317 	struct mlxsw_sp_nexthop_group *nh_grp;
5318 
5319 	if (fi->nh) {
5320 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5321 							   fi->nh->id);
5322 		if (WARN_ON_ONCE(!nh_grp))
5323 			return -EINVAL;
5324 		goto out;
5325 	}
5326 
5327 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5328 	if (!nh_grp) {
5329 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5330 		if (IS_ERR(nh_grp))
5331 			return PTR_ERR(nh_grp);
5332 	}
5333 out:
5334 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5335 	fib_entry->nh_group = nh_grp;
5336 	return 0;
5337 }
5338 
5339 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5340 					struct mlxsw_sp_fib_entry *fib_entry)
5341 {
5342 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5343 
5344 	list_del(&fib_entry->nexthop_group_node);
5345 	if (!list_empty(&nh_grp->fib_list))
5346 		return;
5347 
5348 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5349 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5350 		return;
5351 	}
5352 
5353 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5354 }
5355 
5356 static bool
5357 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5358 {
5359 	struct mlxsw_sp_fib4_entry *fib4_entry;
5360 
5361 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5362 				  common);
5363 	return !fib4_entry->tos;
5364 }
5365 
5366 static bool
5367 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5368 {
5369 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5370 
5371 	switch (fib_entry->fib_node->fib->proto) {
5372 	case MLXSW_SP_L3_PROTO_IPV4:
5373 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5374 			return false;
5375 		break;
5376 	case MLXSW_SP_L3_PROTO_IPV6:
5377 		break;
5378 	}
5379 
5380 	switch (fib_entry->type) {
5381 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5382 		return !!nh_group->nhgi->adj_index_valid;
5383 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5384 		return !!nh_group->nhgi->nh_rif;
5385 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5386 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5387 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5388 		return true;
5389 	default:
5390 		return false;
5391 	}
5392 }
5393 
5394 static struct mlxsw_sp_nexthop *
5395 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5396 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5397 {
5398 	int i;
5399 
5400 	for (i = 0; i < nh_grp->nhgi->count; i++) {
5401 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5402 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5403 
5404 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5405 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5406 				    &rt->fib6_nh->fib_nh_gw6))
5407 			return nh;
5408 		continue;
5409 	}
5410 
5411 	return NULL;
5412 }
5413 
5414 static void
5415 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5416 				      struct fib_entry_notifier_info *fen_info)
5417 {
5418 	u32 *p_dst = (u32 *) &fen_info->dst;
5419 	struct fib_rt_info fri;
5420 
5421 	fri.fi = fen_info->fi;
5422 	fri.tb_id = fen_info->tb_id;
5423 	fri.dst = cpu_to_be32(*p_dst);
5424 	fri.dst_len = fen_info->dst_len;
5425 	fri.tos = fen_info->tos;
5426 	fri.type = fen_info->type;
5427 	fri.offload = false;
5428 	fri.trap = false;
5429 	fri.offload_failed = true;
5430 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5431 }
5432 
5433 static void
5434 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5435 				 struct mlxsw_sp_fib_entry *fib_entry)
5436 {
5437 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5438 	int dst_len = fib_entry->fib_node->key.prefix_len;
5439 	struct mlxsw_sp_fib4_entry *fib4_entry;
5440 	struct fib_rt_info fri;
5441 	bool should_offload;
5442 
5443 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5444 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5445 				  common);
5446 	fri.fi = fib4_entry->fi;
5447 	fri.tb_id = fib4_entry->tb_id;
5448 	fri.dst = cpu_to_be32(*p_dst);
5449 	fri.dst_len = dst_len;
5450 	fri.tos = fib4_entry->tos;
5451 	fri.type = fib4_entry->type;
5452 	fri.offload = should_offload;
5453 	fri.trap = !should_offload;
5454 	fri.offload_failed = false;
5455 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5456 }
5457 
5458 static void
5459 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5460 				   struct mlxsw_sp_fib_entry *fib_entry)
5461 {
5462 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5463 	int dst_len = fib_entry->fib_node->key.prefix_len;
5464 	struct mlxsw_sp_fib4_entry *fib4_entry;
5465 	struct fib_rt_info fri;
5466 
5467 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5468 				  common);
5469 	fri.fi = fib4_entry->fi;
5470 	fri.tb_id = fib4_entry->tb_id;
5471 	fri.dst = cpu_to_be32(*p_dst);
5472 	fri.dst_len = dst_len;
5473 	fri.tos = fib4_entry->tos;
5474 	fri.type = fib4_entry->type;
5475 	fri.offload = false;
5476 	fri.trap = false;
5477 	fri.offload_failed = false;
5478 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5479 }
5480 
5481 #if IS_ENABLED(CONFIG_IPV6)
5482 static void
5483 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5484 				      struct fib6_info **rt_arr,
5485 				      unsigned int nrt6)
5486 {
5487 	int i;
5488 
5489 	/* In IPv6 a multipath route is represented using multiple routes, so
5490 	 * we need to set the flags on all of them.
5491 	 */
5492 	for (i = 0; i < nrt6; i++)
5493 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5494 				       false, false, true);
5495 }
5496 #else
5497 static void
5498 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5499 				      struct fib6_info **rt_arr,
5500 				      unsigned int nrt6)
5501 {
5502 }
5503 #endif
5504 
5505 #if IS_ENABLED(CONFIG_IPV6)
5506 static void
5507 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5508 				 struct mlxsw_sp_fib_entry *fib_entry)
5509 {
5510 	struct mlxsw_sp_fib6_entry *fib6_entry;
5511 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5512 	bool should_offload;
5513 
5514 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5515 
5516 	/* In IPv6 a multipath route is represented using multiple routes, so
5517 	 * we need to set the flags on all of them.
5518 	 */
5519 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5520 				  common);
5521 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5522 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5523 				       should_offload, !should_offload, false);
5524 }
5525 #else
5526 static void
5527 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5528 				 struct mlxsw_sp_fib_entry *fib_entry)
5529 {
5530 }
5531 #endif
5532 
5533 #if IS_ENABLED(CONFIG_IPV6)
5534 static void
5535 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5536 				   struct mlxsw_sp_fib_entry *fib_entry)
5537 {
5538 	struct mlxsw_sp_fib6_entry *fib6_entry;
5539 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5540 
5541 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5542 				  common);
5543 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5544 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5545 				       false, false, false);
5546 }
5547 #else
5548 static void
5549 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5550 				   struct mlxsw_sp_fib_entry *fib_entry)
5551 {
5552 }
5553 #endif
5554 
5555 static void
5556 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5557 				struct mlxsw_sp_fib_entry *fib_entry)
5558 {
5559 	switch (fib_entry->fib_node->fib->proto) {
5560 	case MLXSW_SP_L3_PROTO_IPV4:
5561 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5562 		break;
5563 	case MLXSW_SP_L3_PROTO_IPV6:
5564 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5565 		break;
5566 	}
5567 }
5568 
5569 static void
5570 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5571 				  struct mlxsw_sp_fib_entry *fib_entry)
5572 {
5573 	switch (fib_entry->fib_node->fib->proto) {
5574 	case MLXSW_SP_L3_PROTO_IPV4:
5575 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5576 		break;
5577 	case MLXSW_SP_L3_PROTO_IPV6:
5578 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5579 		break;
5580 	}
5581 }
5582 
5583 static void
5584 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5585 				    struct mlxsw_sp_fib_entry *fib_entry,
5586 				    enum mlxsw_sp_fib_entry_op op)
5587 {
5588 	switch (op) {
5589 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5590 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5591 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5592 		break;
5593 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5594 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5595 		break;
5596 	default:
5597 		break;
5598 	}
5599 }
5600 
5601 struct mlxsw_sp_fib_entry_op_ctx_basic {
5602 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5603 };
5604 
5605 static void
5606 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5607 					enum mlxsw_sp_l3proto proto,
5608 					enum mlxsw_sp_fib_entry_op op,
5609 					u16 virtual_router, u8 prefix_len,
5610 					unsigned char *addr,
5611 					struct mlxsw_sp_fib_entry_priv *priv)
5612 {
5613 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5614 	enum mlxsw_reg_ralxx_protocol ralxx_proto;
5615 	char *ralue_pl = op_ctx_basic->ralue_pl;
5616 	enum mlxsw_reg_ralue_op ralue_op;
5617 
5618 	ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
5619 
5620 	switch (op) {
5621 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5622 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5623 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
5624 		break;
5625 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5626 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
5627 		break;
5628 	default:
5629 		WARN_ON_ONCE(1);
5630 		return;
5631 	}
5632 
5633 	switch (proto) {
5634 	case MLXSW_SP_L3_PROTO_IPV4:
5635 		mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
5636 				      virtual_router, prefix_len, (u32 *) addr);
5637 		break;
5638 	case MLXSW_SP_L3_PROTO_IPV6:
5639 		mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
5640 				      virtual_router, prefix_len, addr);
5641 		break;
5642 	}
5643 }
5644 
5645 static void
5646 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5647 						   enum mlxsw_reg_ralue_trap_action trap_action,
5648 						   u16 trap_id, u32 adjacency_index, u16 ecmp_size)
5649 {
5650 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5651 
5652 	mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
5653 					trap_id, adjacency_index, ecmp_size);
5654 }
5655 
5656 static void
5657 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5658 						  enum mlxsw_reg_ralue_trap_action trap_action,
5659 						  u16 trap_id, u16 local_erif)
5660 {
5661 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5662 
5663 	mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
5664 				       trap_id, local_erif);
5665 }
5666 
5667 static void
5668 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
5669 {
5670 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5671 
5672 	mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
5673 }
5674 
5675 static void
5676 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5677 						      u32 tunnel_ptr)
5678 {
5679 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5680 
5681 	mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
5682 }
5683 
5684 static int
5685 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5686 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5687 					  bool *postponed_for_bulk)
5688 {
5689 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5690 
5691 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5692 			       op_ctx_basic->ralue_pl);
5693 }
5694 
5695 static bool
5696 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
5697 {
5698 	return true;
5699 }
5700 
5701 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5702 				    struct mlxsw_sp_fib_entry *fib_entry,
5703 				    enum mlxsw_sp_fib_entry_op op)
5704 {
5705 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5706 
5707 	mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
5708 	fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
5709 				    fib_entry->fib_node->key.prefix_len,
5710 				    fib_entry->fib_node->key.addr,
5711 				    fib_entry->priv);
5712 }
5713 
5714 static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5715 				     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5716 				     const struct mlxsw_sp_router_ll_ops *ll_ops)
5717 {
5718 	bool postponed_for_bulk = false;
5719 	int err;
5720 
5721 	err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
5722 	if (!postponed_for_bulk)
5723 		mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
5724 	return err;
5725 }
5726 
5727 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp)
5728 {
5729 	enum mlxsw_reg_ratr_trap_action trap_action;
5730 	char ratr_pl[MLXSW_REG_RATR_LEN];
5731 	int err;
5732 
5733 	if (mlxsw_sp->router->adj_discard_index_valid)
5734 		return 0;
5735 
5736 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5737 				  &mlxsw_sp->router->adj_discard_index);
5738 	if (err)
5739 		return err;
5740 
5741 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
5742 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
5743 			    MLXSW_REG_RATR_TYPE_ETHERNET,
5744 			    mlxsw_sp->router->adj_discard_index,
5745 			    mlxsw_sp->router->lb_rif_index);
5746 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
5747 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
5748 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5749 	if (err)
5750 		goto err_ratr_write;
5751 
5752 	mlxsw_sp->router->adj_discard_index_valid = true;
5753 
5754 	return 0;
5755 
5756 err_ratr_write:
5757 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5758 			   mlxsw_sp->router->adj_discard_index);
5759 	return err;
5760 }
5761 
5762 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5763 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5764 					struct mlxsw_sp_fib_entry *fib_entry,
5765 					enum mlxsw_sp_fib_entry_op op)
5766 {
5767 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5768 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5769 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5770 	enum mlxsw_reg_ralue_trap_action trap_action;
5771 	u16 trap_id = 0;
5772 	u32 adjacency_index = 0;
5773 	u16 ecmp_size = 0;
5774 	int err;
5775 
5776 	/* In case the nexthop group adjacency index is valid, use it
5777 	 * with provided ECMP size. Otherwise, setup trap and pass
5778 	 * traffic to kernel.
5779 	 */
5780 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5781 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5782 		adjacency_index = nhgi->adj_index;
5783 		ecmp_size = nhgi->ecmp_size;
5784 	} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5785 		err = mlxsw_sp_adj_discard_write(mlxsw_sp);
5786 		if (err)
5787 			return err;
5788 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5789 		adjacency_index = mlxsw_sp->router->adj_discard_index;
5790 		ecmp_size = 1;
5791 	} else {
5792 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5793 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5794 	}
5795 
5796 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5797 	ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
5798 					  adjacency_index, ecmp_size);
5799 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5800 }
5801 
5802 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5803 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5804 				       struct mlxsw_sp_fib_entry *fib_entry,
5805 				       enum mlxsw_sp_fib_entry_op op)
5806 {
5807 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5808 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5809 	enum mlxsw_reg_ralue_trap_action trap_action;
5810 	u16 trap_id = 0;
5811 	u16 rif_index = 0;
5812 
5813 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5814 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5815 		rif_index = rif->rif_index;
5816 	} else {
5817 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5818 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5819 	}
5820 
5821 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5822 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
5823 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5824 }
5825 
5826 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5827 				      struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5828 				      struct mlxsw_sp_fib_entry *fib_entry,
5829 				      enum mlxsw_sp_fib_entry_op op)
5830 {
5831 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5832 
5833 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5834 	ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5835 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5836 }
5837 
5838 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5839 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5840 					   struct mlxsw_sp_fib_entry *fib_entry,
5841 					   enum mlxsw_sp_fib_entry_op op)
5842 {
5843 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5844 	enum mlxsw_reg_ralue_trap_action trap_action;
5845 
5846 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5847 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5848 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
5849 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5850 }
5851 
5852 static int
5853 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5854 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5855 				  struct mlxsw_sp_fib_entry *fib_entry,
5856 				  enum mlxsw_sp_fib_entry_op op)
5857 {
5858 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5859 	enum mlxsw_reg_ralue_trap_action trap_action;
5860 	u16 trap_id;
5861 
5862 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5863 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5864 
5865 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5866 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
5867 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5868 }
5869 
5870 static int
5871 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5872 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5873 				 struct mlxsw_sp_fib_entry *fib_entry,
5874 				 enum mlxsw_sp_fib_entry_op op)
5875 {
5876 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5877 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5878 	const struct mlxsw_sp_ipip_ops *ipip_ops;
5879 	int err;
5880 
5881 	if (WARN_ON(!ipip_entry))
5882 		return -EINVAL;
5883 
5884 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5885 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5886 				     fib_entry->decap.tunnel_index);
5887 	if (err)
5888 		return err;
5889 
5890 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5891 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5892 					     fib_entry->decap.tunnel_index);
5893 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5894 }
5895 
5896 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5897 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5898 					   struct mlxsw_sp_fib_entry *fib_entry,
5899 					   enum mlxsw_sp_fib_entry_op op)
5900 {
5901 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5902 
5903 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5904 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5905 					     fib_entry->decap.tunnel_index);
5906 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5907 }
5908 
5909 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5910 				   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5911 				   struct mlxsw_sp_fib_entry *fib_entry,
5912 				   enum mlxsw_sp_fib_entry_op op)
5913 {
5914 	switch (fib_entry->type) {
5915 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5916 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
5917 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5918 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
5919 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
5920 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
5921 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5922 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
5923 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
5924 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
5925 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5926 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
5927 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5928 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
5929 	}
5930 	return -EINVAL;
5931 }
5932 
5933 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5934 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5935 				 struct mlxsw_sp_fib_entry *fib_entry,
5936 				 enum mlxsw_sp_fib_entry_op op)
5937 {
5938 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
5939 
5940 	if (err)
5941 		return err;
5942 
5943 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
5944 
5945 	return err;
5946 }
5947 
5948 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
5949 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5950 				       struct mlxsw_sp_fib_entry *fib_entry,
5951 				       bool is_new)
5952 {
5953 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
5954 				     is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
5955 					      MLXSW_SP_FIB_ENTRY_OP_UPDATE);
5956 }
5957 
5958 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
5959 				     struct mlxsw_sp_fib_entry *fib_entry)
5960 {
5961 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5962 
5963 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
5964 	return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
5965 }
5966 
5967 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
5968 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5969 				  struct mlxsw_sp_fib_entry *fib_entry)
5970 {
5971 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5972 
5973 	if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
5974 		return 0;
5975 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
5976 				     MLXSW_SP_FIB_ENTRY_OP_DELETE);
5977 }
5978 
5979 static int
5980 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5981 			     const struct fib_entry_notifier_info *fen_info,
5982 			     struct mlxsw_sp_fib_entry *fib_entry)
5983 {
5984 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
5985 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
5986 	struct mlxsw_sp_router *router = mlxsw_sp->router;
5987 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
5988 	int ifindex = nhgi->nexthops[0].ifindex;
5989 	struct mlxsw_sp_ipip_entry *ipip_entry;
5990 
5991 	switch (fen_info->type) {
5992 	case RTN_LOCAL:
5993 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
5994 							       MLXSW_SP_L3_PROTO_IPV4, dip);
5995 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
5996 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
5997 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
5998 							     fib_entry,
5999 							     ipip_entry);
6000 		}
6001 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6002 						 MLXSW_SP_L3_PROTO_IPV4,
6003 						 &dip)) {
6004 			u32 tunnel_index;
6005 
6006 			tunnel_index = router->nve_decap_config.tunnel_index;
6007 			fib_entry->decap.tunnel_index = tunnel_index;
6008 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6009 			return 0;
6010 		}
6011 		fallthrough;
6012 	case RTN_BROADCAST:
6013 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6014 		return 0;
6015 	case RTN_BLACKHOLE:
6016 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6017 		return 0;
6018 	case RTN_UNREACHABLE:
6019 	case RTN_PROHIBIT:
6020 		/* Packets hitting these routes need to be trapped, but
6021 		 * can do so with a lower priority than packets directed
6022 		 * at the host, so use action type local instead of trap.
6023 		 */
6024 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6025 		return 0;
6026 	case RTN_UNICAST:
6027 		if (nhgi->gateway)
6028 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6029 		else
6030 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6031 		return 0;
6032 	default:
6033 		return -EINVAL;
6034 	}
6035 }
6036 
6037 static void
6038 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6039 			       struct mlxsw_sp_fib_entry *fib_entry)
6040 {
6041 	switch (fib_entry->type) {
6042 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6043 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6044 		break;
6045 	default:
6046 		break;
6047 	}
6048 }
6049 
6050 static struct mlxsw_sp_fib4_entry *
6051 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6052 			   struct mlxsw_sp_fib_node *fib_node,
6053 			   const struct fib_entry_notifier_info *fen_info)
6054 {
6055 	struct mlxsw_sp_fib4_entry *fib4_entry;
6056 	struct mlxsw_sp_fib_entry *fib_entry;
6057 	int err;
6058 
6059 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6060 	if (!fib4_entry)
6061 		return ERR_PTR(-ENOMEM);
6062 	fib_entry = &fib4_entry->common;
6063 
6064 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6065 	if (IS_ERR(fib_entry->priv)) {
6066 		err = PTR_ERR(fib_entry->priv);
6067 		goto err_fib_entry_priv_create;
6068 	}
6069 
6070 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6071 	if (err)
6072 		goto err_nexthop4_group_get;
6073 
6074 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6075 					     fib_node->fib);
6076 	if (err)
6077 		goto err_nexthop_group_vr_link;
6078 
6079 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6080 	if (err)
6081 		goto err_fib4_entry_type_set;
6082 
6083 	fib4_entry->fi = fen_info->fi;
6084 	fib_info_hold(fib4_entry->fi);
6085 	fib4_entry->tb_id = fen_info->tb_id;
6086 	fib4_entry->type = fen_info->type;
6087 	fib4_entry->tos = fen_info->tos;
6088 
6089 	fib_entry->fib_node = fib_node;
6090 
6091 	return fib4_entry;
6092 
6093 err_fib4_entry_type_set:
6094 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6095 err_nexthop_group_vr_link:
6096 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6097 err_nexthop4_group_get:
6098 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6099 err_fib_entry_priv_create:
6100 	kfree(fib4_entry);
6101 	return ERR_PTR(err);
6102 }
6103 
6104 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6105 					struct mlxsw_sp_fib4_entry *fib4_entry)
6106 {
6107 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6108 
6109 	fib_info_put(fib4_entry->fi);
6110 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6111 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6112 					 fib_node->fib);
6113 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6114 	mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
6115 	kfree(fib4_entry);
6116 }
6117 
6118 static struct mlxsw_sp_fib4_entry *
6119 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6120 			   const struct fib_entry_notifier_info *fen_info)
6121 {
6122 	struct mlxsw_sp_fib4_entry *fib4_entry;
6123 	struct mlxsw_sp_fib_node *fib_node;
6124 	struct mlxsw_sp_fib *fib;
6125 	struct mlxsw_sp_vr *vr;
6126 
6127 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6128 	if (!vr)
6129 		return NULL;
6130 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6131 
6132 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6133 					    sizeof(fen_info->dst),
6134 					    fen_info->dst_len);
6135 	if (!fib_node)
6136 		return NULL;
6137 
6138 	fib4_entry = container_of(fib_node->fib_entry,
6139 				  struct mlxsw_sp_fib4_entry, common);
6140 	if (fib4_entry->tb_id == fen_info->tb_id &&
6141 	    fib4_entry->tos == fen_info->tos &&
6142 	    fib4_entry->type == fen_info->type &&
6143 	    fib4_entry->fi == fen_info->fi)
6144 		return fib4_entry;
6145 
6146 	return NULL;
6147 }
6148 
6149 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6150 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6151 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6152 	.key_len = sizeof(struct mlxsw_sp_fib_key),
6153 	.automatic_shrinking = true,
6154 };
6155 
6156 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6157 				    struct mlxsw_sp_fib_node *fib_node)
6158 {
6159 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6160 				      mlxsw_sp_fib_ht_params);
6161 }
6162 
6163 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6164 				     struct mlxsw_sp_fib_node *fib_node)
6165 {
6166 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6167 			       mlxsw_sp_fib_ht_params);
6168 }
6169 
6170 static struct mlxsw_sp_fib_node *
6171 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6172 			 size_t addr_len, unsigned char prefix_len)
6173 {
6174 	struct mlxsw_sp_fib_key key;
6175 
6176 	memset(&key, 0, sizeof(key));
6177 	memcpy(key.addr, addr, addr_len);
6178 	key.prefix_len = prefix_len;
6179 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6180 }
6181 
6182 static struct mlxsw_sp_fib_node *
6183 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6184 			 size_t addr_len, unsigned char prefix_len)
6185 {
6186 	struct mlxsw_sp_fib_node *fib_node;
6187 
6188 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6189 	if (!fib_node)
6190 		return NULL;
6191 
6192 	list_add(&fib_node->list, &fib->node_list);
6193 	memcpy(fib_node->key.addr, addr, addr_len);
6194 	fib_node->key.prefix_len = prefix_len;
6195 
6196 	return fib_node;
6197 }
6198 
6199 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6200 {
6201 	list_del(&fib_node->list);
6202 	kfree(fib_node);
6203 }
6204 
6205 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6206 				      struct mlxsw_sp_fib_node *fib_node)
6207 {
6208 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6209 	struct mlxsw_sp_fib *fib = fib_node->fib;
6210 	struct mlxsw_sp_lpm_tree *lpm_tree;
6211 	int err;
6212 
6213 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6214 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6215 		goto out;
6216 
6217 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6218 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6219 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6220 					 fib->proto);
6221 	if (IS_ERR(lpm_tree))
6222 		return PTR_ERR(lpm_tree);
6223 
6224 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6225 	if (err)
6226 		goto err_lpm_tree_replace;
6227 
6228 out:
6229 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6230 	return 0;
6231 
6232 err_lpm_tree_replace:
6233 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6234 	return err;
6235 }
6236 
6237 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6238 					 struct mlxsw_sp_fib_node *fib_node)
6239 {
6240 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6241 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6242 	struct mlxsw_sp_fib *fib = fib_node->fib;
6243 	int err;
6244 
6245 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6246 		return;
6247 	/* Try to construct a new LPM tree from the current prefix usage
6248 	 * minus the unused one. If we fail, continue using the old one.
6249 	 */
6250 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6251 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6252 				    fib_node->key.prefix_len);
6253 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6254 					 fib->proto);
6255 	if (IS_ERR(lpm_tree))
6256 		return;
6257 
6258 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6259 	if (err)
6260 		goto err_lpm_tree_replace;
6261 
6262 	return;
6263 
6264 err_lpm_tree_replace:
6265 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6266 }
6267 
6268 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6269 				  struct mlxsw_sp_fib_node *fib_node,
6270 				  struct mlxsw_sp_fib *fib)
6271 {
6272 	int err;
6273 
6274 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
6275 	if (err)
6276 		return err;
6277 	fib_node->fib = fib;
6278 
6279 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6280 	if (err)
6281 		goto err_fib_lpm_tree_link;
6282 
6283 	return 0;
6284 
6285 err_fib_lpm_tree_link:
6286 	fib_node->fib = NULL;
6287 	mlxsw_sp_fib_node_remove(fib, fib_node);
6288 	return err;
6289 }
6290 
6291 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6292 				   struct mlxsw_sp_fib_node *fib_node)
6293 {
6294 	struct mlxsw_sp_fib *fib = fib_node->fib;
6295 
6296 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6297 	fib_node->fib = NULL;
6298 	mlxsw_sp_fib_node_remove(fib, fib_node);
6299 }
6300 
6301 static struct mlxsw_sp_fib_node *
6302 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6303 		      size_t addr_len, unsigned char prefix_len,
6304 		      enum mlxsw_sp_l3proto proto)
6305 {
6306 	struct mlxsw_sp_fib_node *fib_node;
6307 	struct mlxsw_sp_fib *fib;
6308 	struct mlxsw_sp_vr *vr;
6309 	int err;
6310 
6311 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6312 	if (IS_ERR(vr))
6313 		return ERR_CAST(vr);
6314 	fib = mlxsw_sp_vr_fib(vr, proto);
6315 
6316 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6317 	if (fib_node)
6318 		return fib_node;
6319 
6320 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6321 	if (!fib_node) {
6322 		err = -ENOMEM;
6323 		goto err_fib_node_create;
6324 	}
6325 
6326 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6327 	if (err)
6328 		goto err_fib_node_init;
6329 
6330 	return fib_node;
6331 
6332 err_fib_node_init:
6333 	mlxsw_sp_fib_node_destroy(fib_node);
6334 err_fib_node_create:
6335 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6336 	return ERR_PTR(err);
6337 }
6338 
6339 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6340 				  struct mlxsw_sp_fib_node *fib_node)
6341 {
6342 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6343 
6344 	if (fib_node->fib_entry)
6345 		return;
6346 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6347 	mlxsw_sp_fib_node_destroy(fib_node);
6348 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6349 }
6350 
6351 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6352 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6353 					struct mlxsw_sp_fib_entry *fib_entry)
6354 {
6355 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6356 	bool is_new = !fib_node->fib_entry;
6357 	int err;
6358 
6359 	fib_node->fib_entry = fib_entry;
6360 
6361 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
6362 	if (err)
6363 		goto err_fib_entry_update;
6364 
6365 	return 0;
6366 
6367 err_fib_entry_update:
6368 	fib_node->fib_entry = NULL;
6369 	return err;
6370 }
6371 
6372 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6373 					    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6374 					    struct mlxsw_sp_fib_entry *fib_entry)
6375 {
6376 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6377 	int err;
6378 
6379 	err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
6380 	fib_node->fib_entry = NULL;
6381 	return err;
6382 }
6383 
6384 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6385 					   struct mlxsw_sp_fib_entry *fib_entry)
6386 {
6387 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6388 
6389 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6390 	__mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
6391 }
6392 
6393 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6394 {
6395 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6396 	struct mlxsw_sp_fib4_entry *fib4_replaced;
6397 
6398 	if (!fib_node->fib_entry)
6399 		return true;
6400 
6401 	fib4_replaced = container_of(fib_node->fib_entry,
6402 				     struct mlxsw_sp_fib4_entry, common);
6403 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6404 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
6405 		return false;
6406 
6407 	return true;
6408 }
6409 
6410 static int
6411 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6412 			     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6413 			     const struct fib_entry_notifier_info *fen_info)
6414 {
6415 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6416 	struct mlxsw_sp_fib_entry *replaced;
6417 	struct mlxsw_sp_fib_node *fib_node;
6418 	int err;
6419 
6420 	if (mlxsw_sp->router->aborted)
6421 		return 0;
6422 
6423 	if (fen_info->fi->nh &&
6424 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6425 		return 0;
6426 
6427 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6428 					 &fen_info->dst, sizeof(fen_info->dst),
6429 					 fen_info->dst_len,
6430 					 MLXSW_SP_L3_PROTO_IPV4);
6431 	if (IS_ERR(fib_node)) {
6432 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6433 		return PTR_ERR(fib_node);
6434 	}
6435 
6436 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6437 	if (IS_ERR(fib4_entry)) {
6438 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6439 		err = PTR_ERR(fib4_entry);
6440 		goto err_fib4_entry_create;
6441 	}
6442 
6443 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6444 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6445 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6446 		return 0;
6447 	}
6448 
6449 	replaced = fib_node->fib_entry;
6450 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
6451 	if (err) {
6452 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6453 		goto err_fib_node_entry_link;
6454 	}
6455 
6456 	/* Nothing to replace */
6457 	if (!replaced)
6458 		return 0;
6459 
6460 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6461 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6462 				     common);
6463 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6464 
6465 	return 0;
6466 
6467 err_fib_node_entry_link:
6468 	fib_node->fib_entry = replaced;
6469 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6470 err_fib4_entry_create:
6471 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6472 	return err;
6473 }
6474 
6475 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6476 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6477 				    struct fib_entry_notifier_info *fen_info)
6478 {
6479 	struct mlxsw_sp_fib4_entry *fib4_entry;
6480 	struct mlxsw_sp_fib_node *fib_node;
6481 	int err;
6482 
6483 	if (mlxsw_sp->router->aborted)
6484 		return 0;
6485 
6486 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6487 	if (!fib4_entry)
6488 		return 0;
6489 	fib_node = fib4_entry->common.fib_node;
6490 
6491 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
6492 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6493 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6494 	return err;
6495 }
6496 
6497 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6498 {
6499 	/* Multicast routes aren't supported, so ignore them. Neighbour
6500 	 * Discovery packets are specifically trapped.
6501 	 */
6502 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6503 		return true;
6504 
6505 	/* Cloned routes are irrelevant in the forwarding path. */
6506 	if (rt->fib6_flags & RTF_CACHE)
6507 		return true;
6508 
6509 	return false;
6510 }
6511 
6512 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6513 {
6514 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6515 
6516 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6517 	if (!mlxsw_sp_rt6)
6518 		return ERR_PTR(-ENOMEM);
6519 
6520 	/* In case of route replace, replaced route is deleted with
6521 	 * no notification. Take reference to prevent accessing freed
6522 	 * memory.
6523 	 */
6524 	mlxsw_sp_rt6->rt = rt;
6525 	fib6_info_hold(rt);
6526 
6527 	return mlxsw_sp_rt6;
6528 }
6529 
6530 #if IS_ENABLED(CONFIG_IPV6)
6531 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6532 {
6533 	fib6_info_release(rt);
6534 }
6535 #else
6536 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6537 {
6538 }
6539 #endif
6540 
6541 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6542 {
6543 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6544 
6545 	if (!mlxsw_sp_rt6->rt->nh)
6546 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6547 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6548 	kfree(mlxsw_sp_rt6);
6549 }
6550 
6551 static struct fib6_info *
6552 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6553 {
6554 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6555 				list)->rt;
6556 }
6557 
6558 static struct mlxsw_sp_rt6 *
6559 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6560 			    const struct fib6_info *rt)
6561 {
6562 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6563 
6564 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6565 		if (mlxsw_sp_rt6->rt == rt)
6566 			return mlxsw_sp_rt6;
6567 	}
6568 
6569 	return NULL;
6570 }
6571 
6572 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6573 					const struct fib6_info *rt,
6574 					enum mlxsw_sp_ipip_type *ret)
6575 {
6576 	return rt->fib6_nh->fib_nh_dev &&
6577 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6578 }
6579 
6580 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6581 				  struct mlxsw_sp_nexthop_group *nh_grp,
6582 				  struct mlxsw_sp_nexthop *nh,
6583 				  const struct fib6_info *rt)
6584 {
6585 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6586 
6587 	nh->nhgi = nh_grp->nhgi;
6588 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6589 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6590 #if IS_ENABLED(CONFIG_IPV6)
6591 	nh->neigh_tbl = &nd_tbl;
6592 #endif
6593 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6594 
6595 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6596 
6597 	if (!dev)
6598 		return 0;
6599 	nh->ifindex = dev->ifindex;
6600 
6601 	return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6602 }
6603 
6604 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6605 				   struct mlxsw_sp_nexthop *nh)
6606 {
6607 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6608 	list_del(&nh->router_list_node);
6609 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6610 }
6611 
6612 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6613 				    const struct fib6_info *rt)
6614 {
6615 	return rt->fib6_nh->fib_nh_gw_family ||
6616 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6617 }
6618 
6619 static int
6620 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6621 				  struct mlxsw_sp_nexthop_group *nh_grp,
6622 				  struct mlxsw_sp_fib6_entry *fib6_entry)
6623 {
6624 	struct mlxsw_sp_nexthop_group_info *nhgi;
6625 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6626 	struct mlxsw_sp_nexthop *nh;
6627 	int err, i;
6628 
6629 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6630 		       GFP_KERNEL);
6631 	if (!nhgi)
6632 		return -ENOMEM;
6633 	nh_grp->nhgi = nhgi;
6634 	nhgi->nh_grp = nh_grp;
6635 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6636 					struct mlxsw_sp_rt6, list);
6637 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6638 	nhgi->count = fib6_entry->nrt6;
6639 	for (i = 0; i < nhgi->count; i++) {
6640 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
6641 
6642 		nh = &nhgi->nexthops[i];
6643 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6644 		if (err)
6645 			goto err_nexthop6_init;
6646 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6647 	}
6648 	nh_grp->nhgi = nhgi;
6649 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6650 	if (err)
6651 		goto err_group_refresh;
6652 
6653 	return 0;
6654 
6655 err_group_refresh:
6656 	i = nhgi->count;
6657 err_nexthop6_init:
6658 	for (i--; i >= 0; i--) {
6659 		nh = &nhgi->nexthops[i];
6660 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6661 	}
6662 	kfree(nhgi);
6663 	return err;
6664 }
6665 
6666 static void
6667 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6668 				  struct mlxsw_sp_nexthop_group *nh_grp)
6669 {
6670 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6671 	int i;
6672 
6673 	for (i = nhgi->count - 1; i >= 0; i--) {
6674 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6675 
6676 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6677 	}
6678 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6679 	WARN_ON_ONCE(nhgi->adj_index_valid);
6680 	kfree(nhgi);
6681 }
6682 
6683 static struct mlxsw_sp_nexthop_group *
6684 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6685 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6686 {
6687 	struct mlxsw_sp_nexthop_group *nh_grp;
6688 	int err;
6689 
6690 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6691 	if (!nh_grp)
6692 		return ERR_PTR(-ENOMEM);
6693 	INIT_LIST_HEAD(&nh_grp->vr_list);
6694 	err = rhashtable_init(&nh_grp->vr_ht,
6695 			      &mlxsw_sp_nexthop_group_vr_ht_params);
6696 	if (err)
6697 		goto err_nexthop_group_vr_ht_init;
6698 	INIT_LIST_HEAD(&nh_grp->fib_list);
6699 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6700 
6701 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6702 	if (err)
6703 		goto err_nexthop_group_info_init;
6704 
6705 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6706 	if (err)
6707 		goto err_nexthop_group_insert;
6708 
6709 	nh_grp->can_destroy = true;
6710 
6711 	return nh_grp;
6712 
6713 err_nexthop_group_insert:
6714 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6715 err_nexthop_group_info_init:
6716 	rhashtable_destroy(&nh_grp->vr_ht);
6717 err_nexthop_group_vr_ht_init:
6718 	kfree(nh_grp);
6719 	return ERR_PTR(err);
6720 }
6721 
6722 static void
6723 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6724 				struct mlxsw_sp_nexthop_group *nh_grp)
6725 {
6726 	if (!nh_grp->can_destroy)
6727 		return;
6728 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6729 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6730 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6731 	rhashtable_destroy(&nh_grp->vr_ht);
6732 	kfree(nh_grp);
6733 }
6734 
6735 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6736 				       struct mlxsw_sp_fib6_entry *fib6_entry)
6737 {
6738 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6739 	struct mlxsw_sp_nexthop_group *nh_grp;
6740 
6741 	if (rt->nh) {
6742 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6743 							   rt->nh->id);
6744 		if (WARN_ON_ONCE(!nh_grp))
6745 			return -EINVAL;
6746 		goto out;
6747 	}
6748 
6749 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6750 	if (!nh_grp) {
6751 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6752 		if (IS_ERR(nh_grp))
6753 			return PTR_ERR(nh_grp);
6754 	}
6755 
6756 	/* The route and the nexthop are described by the same struct, so we
6757 	 * need to the update the nexthop offload indication for the new route.
6758 	 */
6759 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6760 
6761 out:
6762 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6763 		      &nh_grp->fib_list);
6764 	fib6_entry->common.nh_group = nh_grp;
6765 
6766 	return 0;
6767 }
6768 
6769 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6770 					struct mlxsw_sp_fib_entry *fib_entry)
6771 {
6772 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6773 
6774 	list_del(&fib_entry->nexthop_group_node);
6775 	if (!list_empty(&nh_grp->fib_list))
6776 		return;
6777 
6778 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6779 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6780 		return;
6781 	}
6782 
6783 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6784 }
6785 
6786 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6787 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6788 					  struct mlxsw_sp_fib6_entry *fib6_entry)
6789 {
6790 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6791 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6792 	int err;
6793 
6794 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6795 	fib6_entry->common.nh_group = NULL;
6796 	list_del(&fib6_entry->common.nexthop_group_node);
6797 
6798 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6799 	if (err)
6800 		goto err_nexthop6_group_get;
6801 
6802 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6803 					     fib_node->fib);
6804 	if (err)
6805 		goto err_nexthop_group_vr_link;
6806 
6807 	/* In case this entry is offloaded, then the adjacency index
6808 	 * currently associated with it in the device's table is that
6809 	 * of the old group. Start using the new one instead.
6810 	 */
6811 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
6812 					  &fib6_entry->common, false);
6813 	if (err)
6814 		goto err_fib_entry_update;
6815 
6816 	if (list_empty(&old_nh_grp->fib_list))
6817 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6818 
6819 	return 0;
6820 
6821 err_fib_entry_update:
6822 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6823 					 fib_node->fib);
6824 err_nexthop_group_vr_link:
6825 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6826 err_nexthop6_group_get:
6827 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6828 		      &old_nh_grp->fib_list);
6829 	fib6_entry->common.nh_group = old_nh_grp;
6830 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6831 	return err;
6832 }
6833 
6834 static int
6835 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6836 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6837 				struct mlxsw_sp_fib6_entry *fib6_entry,
6838 				struct fib6_info **rt_arr, unsigned int nrt6)
6839 {
6840 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6841 	int err, i;
6842 
6843 	for (i = 0; i < nrt6; i++) {
6844 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6845 		if (IS_ERR(mlxsw_sp_rt6)) {
6846 			err = PTR_ERR(mlxsw_sp_rt6);
6847 			goto err_rt6_create;
6848 		}
6849 
6850 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6851 		fib6_entry->nrt6++;
6852 	}
6853 
6854 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6855 	if (err)
6856 		goto err_nexthop6_group_update;
6857 
6858 	return 0;
6859 
6860 err_nexthop6_group_update:
6861 	i = nrt6;
6862 err_rt6_create:
6863 	for (i--; i >= 0; i--) {
6864 		fib6_entry->nrt6--;
6865 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6866 					       struct mlxsw_sp_rt6, list);
6867 		list_del(&mlxsw_sp_rt6->list);
6868 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6869 	}
6870 	return err;
6871 }
6872 
6873 static void
6874 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6875 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6876 				struct mlxsw_sp_fib6_entry *fib6_entry,
6877 				struct fib6_info **rt_arr, unsigned int nrt6)
6878 {
6879 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6880 	int i;
6881 
6882 	for (i = 0; i < nrt6; i++) {
6883 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6884 							   rt_arr[i]);
6885 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6886 			continue;
6887 
6888 		fib6_entry->nrt6--;
6889 		list_del(&mlxsw_sp_rt6->list);
6890 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6891 	}
6892 
6893 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6894 }
6895 
6896 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6897 					 struct mlxsw_sp_fib_entry *fib_entry,
6898 					 const struct fib6_info *rt)
6899 {
6900 	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
6901 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6902 	else if (rt->fib6_type == RTN_BLACKHOLE)
6903 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6904 	else if (rt->fib6_flags & RTF_REJECT)
6905 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6906 	else if (fib_entry->nh_group->nhgi->gateway)
6907 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6908 	else
6909 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6910 }
6911 
6912 static void
6913 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
6914 {
6915 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
6916 
6917 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
6918 				 list) {
6919 		fib6_entry->nrt6--;
6920 		list_del(&mlxsw_sp_rt6->list);
6921 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6922 	}
6923 }
6924 
6925 static struct mlxsw_sp_fib6_entry *
6926 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
6927 			   struct mlxsw_sp_fib_node *fib_node,
6928 			   struct fib6_info **rt_arr, unsigned int nrt6)
6929 {
6930 	struct mlxsw_sp_fib6_entry *fib6_entry;
6931 	struct mlxsw_sp_fib_entry *fib_entry;
6932 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6933 	int err, i;
6934 
6935 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
6936 	if (!fib6_entry)
6937 		return ERR_PTR(-ENOMEM);
6938 	fib_entry = &fib6_entry->common;
6939 
6940 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6941 	if (IS_ERR(fib_entry->priv)) {
6942 		err = PTR_ERR(fib_entry->priv);
6943 		goto err_fib_entry_priv_create;
6944 	}
6945 
6946 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
6947 
6948 	for (i = 0; i < nrt6; i++) {
6949 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6950 		if (IS_ERR(mlxsw_sp_rt6)) {
6951 			err = PTR_ERR(mlxsw_sp_rt6);
6952 			goto err_rt6_create;
6953 		}
6954 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6955 		fib6_entry->nrt6++;
6956 	}
6957 
6958 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6959 	if (err)
6960 		goto err_nexthop6_group_get;
6961 
6962 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6963 					     fib_node->fib);
6964 	if (err)
6965 		goto err_nexthop_group_vr_link;
6966 
6967 	mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
6968 
6969 	fib_entry->fib_node = fib_node;
6970 
6971 	return fib6_entry;
6972 
6973 err_nexthop_group_vr_link:
6974 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
6975 err_nexthop6_group_get:
6976 	i = nrt6;
6977 err_rt6_create:
6978 	for (i--; i >= 0; i--) {
6979 		fib6_entry->nrt6--;
6980 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6981 					       struct mlxsw_sp_rt6, list);
6982 		list_del(&mlxsw_sp_rt6->list);
6983 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6984 	}
6985 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6986 err_fib_entry_priv_create:
6987 	kfree(fib6_entry);
6988 	return ERR_PTR(err);
6989 }
6990 
6991 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6992 					struct mlxsw_sp_fib6_entry *fib6_entry)
6993 {
6994 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6995 
6996 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6997 					 fib_node->fib);
6998 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6999 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7000 	WARN_ON(fib6_entry->nrt6);
7001 	mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
7002 	kfree(fib6_entry);
7003 }
7004 
7005 static struct mlxsw_sp_fib6_entry *
7006 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7007 			   const struct fib6_info *rt)
7008 {
7009 	struct mlxsw_sp_fib6_entry *fib6_entry;
7010 	struct mlxsw_sp_fib_node *fib_node;
7011 	struct mlxsw_sp_fib *fib;
7012 	struct fib6_info *cmp_rt;
7013 	struct mlxsw_sp_vr *vr;
7014 
7015 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7016 	if (!vr)
7017 		return NULL;
7018 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7019 
7020 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7021 					    sizeof(rt->fib6_dst.addr),
7022 					    rt->fib6_dst.plen);
7023 	if (!fib_node)
7024 		return NULL;
7025 
7026 	fib6_entry = container_of(fib_node->fib_entry,
7027 				  struct mlxsw_sp_fib6_entry, common);
7028 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7029 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7030 	    rt->fib6_metric == cmp_rt->fib6_metric &&
7031 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7032 		return fib6_entry;
7033 
7034 	return NULL;
7035 }
7036 
7037 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7038 {
7039 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7040 	struct mlxsw_sp_fib6_entry *fib6_replaced;
7041 	struct fib6_info *rt, *rt_replaced;
7042 
7043 	if (!fib_node->fib_entry)
7044 		return true;
7045 
7046 	fib6_replaced = container_of(fib_node->fib_entry,
7047 				     struct mlxsw_sp_fib6_entry,
7048 				     common);
7049 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7050 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7051 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7052 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7053 		return false;
7054 
7055 	return true;
7056 }
7057 
7058 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7059 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7060 					struct fib6_info **rt_arr, unsigned int nrt6)
7061 {
7062 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7063 	struct mlxsw_sp_fib_entry *replaced;
7064 	struct mlxsw_sp_fib_node *fib_node;
7065 	struct fib6_info *rt = rt_arr[0];
7066 	int err;
7067 
7068 	if (mlxsw_sp->router->aborted)
7069 		return 0;
7070 
7071 	if (rt->fib6_src.plen)
7072 		return -EINVAL;
7073 
7074 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7075 		return 0;
7076 
7077 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7078 		return 0;
7079 
7080 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7081 					 &rt->fib6_dst.addr,
7082 					 sizeof(rt->fib6_dst.addr),
7083 					 rt->fib6_dst.plen,
7084 					 MLXSW_SP_L3_PROTO_IPV6);
7085 	if (IS_ERR(fib_node))
7086 		return PTR_ERR(fib_node);
7087 
7088 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7089 						nrt6);
7090 	if (IS_ERR(fib6_entry)) {
7091 		err = PTR_ERR(fib6_entry);
7092 		goto err_fib6_entry_create;
7093 	}
7094 
7095 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7096 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7097 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7098 		return 0;
7099 	}
7100 
7101 	replaced = fib_node->fib_entry;
7102 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
7103 	if (err)
7104 		goto err_fib_node_entry_link;
7105 
7106 	/* Nothing to replace */
7107 	if (!replaced)
7108 		return 0;
7109 
7110 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7111 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7112 				     common);
7113 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7114 
7115 	return 0;
7116 
7117 err_fib_node_entry_link:
7118 	fib_node->fib_entry = replaced;
7119 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7120 err_fib6_entry_create:
7121 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7122 	return err;
7123 }
7124 
7125 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7126 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7127 				       struct fib6_info **rt_arr, unsigned int nrt6)
7128 {
7129 	struct mlxsw_sp_fib6_entry *fib6_entry;
7130 	struct mlxsw_sp_fib_node *fib_node;
7131 	struct fib6_info *rt = rt_arr[0];
7132 	int err;
7133 
7134 	if (mlxsw_sp->router->aborted)
7135 		return 0;
7136 
7137 	if (rt->fib6_src.plen)
7138 		return -EINVAL;
7139 
7140 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7141 		return 0;
7142 
7143 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7144 					 &rt->fib6_dst.addr,
7145 					 sizeof(rt->fib6_dst.addr),
7146 					 rt->fib6_dst.plen,
7147 					 MLXSW_SP_L3_PROTO_IPV6);
7148 	if (IS_ERR(fib_node))
7149 		return PTR_ERR(fib_node);
7150 
7151 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7152 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7153 		return -EINVAL;
7154 	}
7155 
7156 	fib6_entry = container_of(fib_node->fib_entry,
7157 				  struct mlxsw_sp_fib6_entry, common);
7158 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7159 	if (err)
7160 		goto err_fib6_entry_nexthop_add;
7161 
7162 	return 0;
7163 
7164 err_fib6_entry_nexthop_add:
7165 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7166 	return err;
7167 }
7168 
7169 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7170 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7171 				    struct fib6_info **rt_arr, unsigned int nrt6)
7172 {
7173 	struct mlxsw_sp_fib6_entry *fib6_entry;
7174 	struct mlxsw_sp_fib_node *fib_node;
7175 	struct fib6_info *rt = rt_arr[0];
7176 	int err;
7177 
7178 	if (mlxsw_sp->router->aborted)
7179 		return 0;
7180 
7181 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7182 		return 0;
7183 
7184 	/* Multipath routes are first added to the FIB trie and only then
7185 	 * notified. If we vetoed the addition, we will get a delete
7186 	 * notification for a route we do not have. Therefore, do not warn if
7187 	 * route was not found.
7188 	 */
7189 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7190 	if (!fib6_entry)
7191 		return 0;
7192 
7193 	/* If not all the nexthops are deleted, then only reduce the nexthop
7194 	 * group.
7195 	 */
7196 	if (nrt6 != fib6_entry->nrt6) {
7197 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7198 		return 0;
7199 	}
7200 
7201 	fib_node = fib6_entry->common.fib_node;
7202 
7203 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
7204 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7205 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7206 	return err;
7207 }
7208 
7209 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
7210 					    enum mlxsw_sp_l3proto proto,
7211 					    u8 tree_id)
7212 {
7213 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
7214 	enum mlxsw_reg_ralxx_protocol ralxx_proto =
7215 				(enum mlxsw_reg_ralxx_protocol) proto;
7216 	struct mlxsw_sp_fib_entry_priv *priv;
7217 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
7218 	char xralst_pl[MLXSW_REG_XRALST_LEN];
7219 	int i, err;
7220 
7221 	mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
7222 	err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
7223 	if (err)
7224 		return err;
7225 
7226 	mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
7227 	err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
7228 	if (err)
7229 		return err;
7230 
7231 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7232 		struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
7233 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7234 		char xraltb_pl[MLXSW_REG_XRALTB_LEN];
7235 
7236 		mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7237 		mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
7238 		err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
7239 		if (err)
7240 			return err;
7241 
7242 		priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
7243 		if (IS_ERR(priv))
7244 			return PTR_ERR(priv);
7245 
7246 		ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
7247 				       vr->id, 0, NULL, priv);
7248 		ll_ops->fib_entry_act_ip2me_pack(op_ctx);
7249 		err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
7250 		mlxsw_sp_fib_entry_priv_put(priv);
7251 		if (err)
7252 			return err;
7253 	}
7254 
7255 	return 0;
7256 }
7257 
7258 static struct mlxsw_sp_mr_table *
7259 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7260 {
7261 	if (family == RTNL_FAMILY_IPMR)
7262 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7263 	else
7264 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7265 }
7266 
7267 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7268 				     struct mfc_entry_notifier_info *men_info,
7269 				     bool replace)
7270 {
7271 	struct mlxsw_sp_mr_table *mrt;
7272 	struct mlxsw_sp_vr *vr;
7273 
7274 	if (mlxsw_sp->router->aborted)
7275 		return 0;
7276 
7277 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7278 	if (IS_ERR(vr))
7279 		return PTR_ERR(vr);
7280 
7281 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7282 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7283 }
7284 
7285 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7286 				      struct mfc_entry_notifier_info *men_info)
7287 {
7288 	struct mlxsw_sp_mr_table *mrt;
7289 	struct mlxsw_sp_vr *vr;
7290 
7291 	if (mlxsw_sp->router->aborted)
7292 		return;
7293 
7294 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7295 	if (WARN_ON(!vr))
7296 		return;
7297 
7298 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7299 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7300 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7301 }
7302 
7303 static int
7304 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7305 			      struct vif_entry_notifier_info *ven_info)
7306 {
7307 	struct mlxsw_sp_mr_table *mrt;
7308 	struct mlxsw_sp_rif *rif;
7309 	struct mlxsw_sp_vr *vr;
7310 
7311 	if (mlxsw_sp->router->aborted)
7312 		return 0;
7313 
7314 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7315 	if (IS_ERR(vr))
7316 		return PTR_ERR(vr);
7317 
7318 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7319 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7320 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7321 				   ven_info->vif_index,
7322 				   ven_info->vif_flags, rif);
7323 }
7324 
7325 static void
7326 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7327 			      struct vif_entry_notifier_info *ven_info)
7328 {
7329 	struct mlxsw_sp_mr_table *mrt;
7330 	struct mlxsw_sp_vr *vr;
7331 
7332 	if (mlxsw_sp->router->aborted)
7333 		return;
7334 
7335 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7336 	if (WARN_ON(!vr))
7337 		return;
7338 
7339 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7340 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7341 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7342 }
7343 
7344 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
7345 {
7346 	enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
7347 	int err;
7348 
7349 	err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
7350 					       MLXSW_SP_LPM_TREE_MIN);
7351 	if (err)
7352 		return err;
7353 
7354 	/* The multicast router code does not need an abort trap as by default,
7355 	 * packets that don't match any routes are trapped to the CPU.
7356 	 */
7357 
7358 	proto = MLXSW_SP_L3_PROTO_IPV6;
7359 	return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
7360 						MLXSW_SP_LPM_TREE_MIN + 1);
7361 }
7362 
7363 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7364 				     struct mlxsw_sp_fib_node *fib_node)
7365 {
7366 	struct mlxsw_sp_fib4_entry *fib4_entry;
7367 
7368 	fib4_entry = container_of(fib_node->fib_entry,
7369 				  struct mlxsw_sp_fib4_entry, common);
7370 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7371 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7372 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7373 }
7374 
7375 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7376 				     struct mlxsw_sp_fib_node *fib_node)
7377 {
7378 	struct mlxsw_sp_fib6_entry *fib6_entry;
7379 
7380 	fib6_entry = container_of(fib_node->fib_entry,
7381 				  struct mlxsw_sp_fib6_entry, common);
7382 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7383 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7384 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7385 }
7386 
7387 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7388 				    struct mlxsw_sp_fib_node *fib_node)
7389 {
7390 	switch (fib_node->fib->proto) {
7391 	case MLXSW_SP_L3_PROTO_IPV4:
7392 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7393 		break;
7394 	case MLXSW_SP_L3_PROTO_IPV6:
7395 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7396 		break;
7397 	}
7398 }
7399 
7400 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7401 				  struct mlxsw_sp_vr *vr,
7402 				  enum mlxsw_sp_l3proto proto)
7403 {
7404 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7405 	struct mlxsw_sp_fib_node *fib_node, *tmp;
7406 
7407 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7408 		bool do_break = &tmp->list == &fib->node_list;
7409 
7410 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7411 		if (do_break)
7412 			break;
7413 	}
7414 }
7415 
7416 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7417 {
7418 	int i, j;
7419 
7420 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7421 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7422 
7423 		if (!mlxsw_sp_vr_is_used(vr))
7424 			continue;
7425 
7426 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7427 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7428 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7429 
7430 		/* If virtual router was only used for IPv4, then it's no
7431 		 * longer used.
7432 		 */
7433 		if (!mlxsw_sp_vr_is_used(vr))
7434 			continue;
7435 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7436 	}
7437 
7438 	/* After flushing all the routes, it is not possible anyone is still
7439 	 * using the adjacency index that is discarding packets, so free it in
7440 	 * case it was allocated.
7441 	 */
7442 	if (!mlxsw_sp->router->adj_discard_index_valid)
7443 		return;
7444 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
7445 			   mlxsw_sp->router->adj_discard_index);
7446 	mlxsw_sp->router->adj_discard_index_valid = false;
7447 }
7448 
7449 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
7450 {
7451 	int err;
7452 
7453 	if (mlxsw_sp->router->aborted)
7454 		return;
7455 	dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
7456 	mlxsw_sp_router_fib_flush(mlxsw_sp);
7457 	mlxsw_sp->router->aborted = true;
7458 	err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
7459 	if (err)
7460 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
7461 }
7462 
7463 struct mlxsw_sp_fib6_event {
7464 	struct fib6_info **rt_arr;
7465 	unsigned int nrt6;
7466 };
7467 
7468 struct mlxsw_sp_fib_event {
7469 	struct list_head list; /* node in fib queue */
7470 	union {
7471 		struct mlxsw_sp_fib6_event fib6_event;
7472 		struct fib_entry_notifier_info fen_info;
7473 		struct fib_rule_notifier_info fr_info;
7474 		struct fib_nh_notifier_info fnh_info;
7475 		struct mfc_entry_notifier_info men_info;
7476 		struct vif_entry_notifier_info ven_info;
7477 	};
7478 	struct mlxsw_sp *mlxsw_sp;
7479 	unsigned long event;
7480 	int family;
7481 };
7482 
7483 static int
7484 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
7485 				struct fib6_entry_notifier_info *fen6_info)
7486 {
7487 	struct fib6_info *rt = fen6_info->rt;
7488 	struct fib6_info **rt_arr;
7489 	struct fib6_info *iter;
7490 	unsigned int nrt6;
7491 	int i = 0;
7492 
7493 	nrt6 = fen6_info->nsiblings + 1;
7494 
7495 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7496 	if (!rt_arr)
7497 		return -ENOMEM;
7498 
7499 	fib6_event->rt_arr = rt_arr;
7500 	fib6_event->nrt6 = nrt6;
7501 
7502 	rt_arr[0] = rt;
7503 	fib6_info_hold(rt);
7504 
7505 	if (!fen6_info->nsiblings)
7506 		return 0;
7507 
7508 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7509 		if (i == fen6_info->nsiblings)
7510 			break;
7511 
7512 		rt_arr[i + 1] = iter;
7513 		fib6_info_hold(iter);
7514 		i++;
7515 	}
7516 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7517 
7518 	return 0;
7519 }
7520 
7521 static void
7522 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
7523 {
7524 	int i;
7525 
7526 	for (i = 0; i < fib6_event->nrt6; i++)
7527 		mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
7528 	kfree(fib6_event->rt_arr);
7529 }
7530 
7531 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
7532 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7533 					       struct mlxsw_sp_fib_event *fib_event)
7534 {
7535 	int err;
7536 
7537 	mlxsw_sp_span_respin(mlxsw_sp);
7538 
7539 	switch (fib_event->event) {
7540 	case FIB_EVENT_ENTRY_REPLACE:
7541 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
7542 		if (err) {
7543 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7544 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7545 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7546 							      &fib_event->fen_info);
7547 		}
7548 		fib_info_put(fib_event->fen_info.fi);
7549 		break;
7550 	case FIB_EVENT_ENTRY_DEL:
7551 		err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
7552 		if (err)
7553 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7554 		fib_info_put(fib_event->fen_info.fi);
7555 		break;
7556 	case FIB_EVENT_NH_ADD:
7557 	case FIB_EVENT_NH_DEL:
7558 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
7559 		fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
7560 		break;
7561 	}
7562 }
7563 
7564 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
7565 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7566 					       struct mlxsw_sp_fib_event *fib_event)
7567 {
7568 	struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
7569 	int err;
7570 
7571 	mlxsw_sp_span_respin(mlxsw_sp);
7572 
7573 	switch (fib_event->event) {
7574 	case FIB_EVENT_ENTRY_REPLACE:
7575 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7576 						   fib_event->fib6_event.nrt6);
7577 		if (err) {
7578 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7579 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7580 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7581 							      fib6_event->rt_arr,
7582 							      fib6_event->nrt6);
7583 		}
7584 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7585 		break;
7586 	case FIB_EVENT_ENTRY_APPEND:
7587 		err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7588 						  fib_event->fib6_event.nrt6);
7589 		if (err) {
7590 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7591 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7592 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7593 							      fib6_event->rt_arr,
7594 							      fib6_event->nrt6);
7595 		}
7596 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7597 		break;
7598 	case FIB_EVENT_ENTRY_DEL:
7599 		err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7600 					       fib_event->fib6_event.nrt6);
7601 		if (err)
7602 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7603 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7604 		break;
7605 	}
7606 }
7607 
7608 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
7609 						struct mlxsw_sp_fib_event *fib_event)
7610 {
7611 	bool replace;
7612 	int err;
7613 
7614 	rtnl_lock();
7615 	mutex_lock(&mlxsw_sp->router->lock);
7616 	switch (fib_event->event) {
7617 	case FIB_EVENT_ENTRY_REPLACE:
7618 	case FIB_EVENT_ENTRY_ADD:
7619 		replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
7620 
7621 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
7622 		if (err)
7623 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7624 		mr_cache_put(fib_event->men_info.mfc);
7625 		break;
7626 	case FIB_EVENT_ENTRY_DEL:
7627 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
7628 		mr_cache_put(fib_event->men_info.mfc);
7629 		break;
7630 	case FIB_EVENT_VIF_ADD:
7631 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7632 						    &fib_event->ven_info);
7633 		if (err)
7634 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7635 		dev_put(fib_event->ven_info.dev);
7636 		break;
7637 	case FIB_EVENT_VIF_DEL:
7638 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
7639 		dev_put(fib_event->ven_info.dev);
7640 		break;
7641 	}
7642 	mutex_unlock(&mlxsw_sp->router->lock);
7643 	rtnl_unlock();
7644 }
7645 
7646 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
7647 {
7648 	struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
7649 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
7650 	struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
7651 	struct mlxsw_sp_fib_event *next_fib_event;
7652 	struct mlxsw_sp_fib_event *fib_event;
7653 	int last_family = AF_UNSPEC;
7654 	LIST_HEAD(fib_event_queue);
7655 
7656 	spin_lock_bh(&router->fib_event_queue_lock);
7657 	list_splice_init(&router->fib_event_queue, &fib_event_queue);
7658 	spin_unlock_bh(&router->fib_event_queue_lock);
7659 
7660 	/* Router lock is held here to make sure per-instance
7661 	 * operation context is not used in between FIB4/6 events
7662 	 * processing.
7663 	 */
7664 	mutex_lock(&router->lock);
7665 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7666 	list_for_each_entry_safe(fib_event, next_fib_event,
7667 				 &fib_event_queue, list) {
7668 		/* Check if the next entry in the queue exists and it is
7669 		 * of the same type (family and event) as the currect one.
7670 		 * In that case it is permitted to do the bulking
7671 		 * of multiple FIB entries to a single register write.
7672 		 */
7673 		op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
7674 				  fib_event->family == next_fib_event->family &&
7675 				  fib_event->event == next_fib_event->event;
7676 		op_ctx->event = fib_event->event;
7677 
7678 		/* In case family of this and the previous entry are different, context
7679 		 * reinitialization is going to be needed now, indicate that.
7680 		 * Note that since last_family is initialized to AF_UNSPEC, this is always
7681 		 * going to happen for the first entry processed in the work.
7682 		 */
7683 		if (fib_event->family != last_family)
7684 			op_ctx->initialized = false;
7685 
7686 		switch (fib_event->family) {
7687 		case AF_INET:
7688 			mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
7689 							   fib_event);
7690 			break;
7691 		case AF_INET6:
7692 			mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
7693 							   fib_event);
7694 			break;
7695 		case RTNL_FAMILY_IP6MR:
7696 		case RTNL_FAMILY_IPMR:
7697 			/* Unlock here as inside FIBMR the lock is taken again
7698 			 * under RTNL. The per-instance operation context
7699 			 * is not used by FIBMR.
7700 			 */
7701 			mutex_unlock(&router->lock);
7702 			mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
7703 							    fib_event);
7704 			mutex_lock(&router->lock);
7705 			break;
7706 		default:
7707 			WARN_ON_ONCE(1);
7708 		}
7709 		last_family = fib_event->family;
7710 		kfree(fib_event);
7711 		cond_resched();
7712 	}
7713 	WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
7714 	mutex_unlock(&router->lock);
7715 }
7716 
7717 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
7718 				       struct fib_notifier_info *info)
7719 {
7720 	struct fib_entry_notifier_info *fen_info;
7721 	struct fib_nh_notifier_info *fnh_info;
7722 
7723 	switch (fib_event->event) {
7724 	case FIB_EVENT_ENTRY_REPLACE:
7725 	case FIB_EVENT_ENTRY_DEL:
7726 		fen_info = container_of(info, struct fib_entry_notifier_info,
7727 					info);
7728 		fib_event->fen_info = *fen_info;
7729 		/* Take reference on fib_info to prevent it from being
7730 		 * freed while event is queued. Release it afterwards.
7731 		 */
7732 		fib_info_hold(fib_event->fen_info.fi);
7733 		break;
7734 	case FIB_EVENT_NH_ADD:
7735 	case FIB_EVENT_NH_DEL:
7736 		fnh_info = container_of(info, struct fib_nh_notifier_info,
7737 					info);
7738 		fib_event->fnh_info = *fnh_info;
7739 		fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
7740 		break;
7741 	}
7742 }
7743 
7744 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
7745 				      struct fib_notifier_info *info)
7746 {
7747 	struct fib6_entry_notifier_info *fen6_info;
7748 	int err;
7749 
7750 	switch (fib_event->event) {
7751 	case FIB_EVENT_ENTRY_REPLACE:
7752 	case FIB_EVENT_ENTRY_APPEND:
7753 	case FIB_EVENT_ENTRY_DEL:
7754 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
7755 					 info);
7756 		err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
7757 						      fen6_info);
7758 		if (err)
7759 			return err;
7760 		break;
7761 	}
7762 
7763 	return 0;
7764 }
7765 
7766 static void
7767 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
7768 			    struct fib_notifier_info *info)
7769 {
7770 	switch (fib_event->event) {
7771 	case FIB_EVENT_ENTRY_REPLACE:
7772 	case FIB_EVENT_ENTRY_ADD:
7773 	case FIB_EVENT_ENTRY_DEL:
7774 		memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
7775 		mr_cache_hold(fib_event->men_info.mfc);
7776 		break;
7777 	case FIB_EVENT_VIF_ADD:
7778 	case FIB_EVENT_VIF_DEL:
7779 		memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
7780 		dev_hold(fib_event->ven_info.dev);
7781 		break;
7782 	}
7783 }
7784 
7785 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7786 					  struct fib_notifier_info *info,
7787 					  struct mlxsw_sp *mlxsw_sp)
7788 {
7789 	struct netlink_ext_ack *extack = info->extack;
7790 	struct fib_rule_notifier_info *fr_info;
7791 	struct fib_rule *rule;
7792 	int err = 0;
7793 
7794 	/* nothing to do at the moment */
7795 	if (event == FIB_EVENT_RULE_DEL)
7796 		return 0;
7797 
7798 	if (mlxsw_sp->router->aborted)
7799 		return 0;
7800 
7801 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
7802 	rule = fr_info->rule;
7803 
7804 	/* Rule only affects locally generated traffic */
7805 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7806 		return 0;
7807 
7808 	switch (info->family) {
7809 	case AF_INET:
7810 		if (!fib4_rule_default(rule) && !rule->l3mdev)
7811 			err = -EOPNOTSUPP;
7812 		break;
7813 	case AF_INET6:
7814 		if (!fib6_rule_default(rule) && !rule->l3mdev)
7815 			err = -EOPNOTSUPP;
7816 		break;
7817 	case RTNL_FAMILY_IPMR:
7818 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
7819 			err = -EOPNOTSUPP;
7820 		break;
7821 	case RTNL_FAMILY_IP6MR:
7822 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7823 			err = -EOPNOTSUPP;
7824 		break;
7825 	}
7826 
7827 	if (err < 0)
7828 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7829 
7830 	return err;
7831 }
7832 
7833 /* Called with rcu_read_lock() */
7834 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7835 				     unsigned long event, void *ptr)
7836 {
7837 	struct mlxsw_sp_fib_event *fib_event;
7838 	struct fib_notifier_info *info = ptr;
7839 	struct mlxsw_sp_router *router;
7840 	int err;
7841 
7842 	if ((info->family != AF_INET && info->family != AF_INET6 &&
7843 	     info->family != RTNL_FAMILY_IPMR &&
7844 	     info->family != RTNL_FAMILY_IP6MR))
7845 		return NOTIFY_DONE;
7846 
7847 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7848 
7849 	switch (event) {
7850 	case FIB_EVENT_RULE_ADD:
7851 	case FIB_EVENT_RULE_DEL:
7852 		err = mlxsw_sp_router_fib_rule_event(event, info,
7853 						     router->mlxsw_sp);
7854 		return notifier_from_errno(err);
7855 	case FIB_EVENT_ENTRY_ADD:
7856 	case FIB_EVENT_ENTRY_REPLACE:
7857 	case FIB_EVENT_ENTRY_APPEND:
7858 		if (router->aborted) {
7859 			NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
7860 			return notifier_from_errno(-EINVAL);
7861 		}
7862 		if (info->family == AF_INET) {
7863 			struct fib_entry_notifier_info *fen_info = ptr;
7864 
7865 			if (fen_info->fi->fib_nh_is_v6) {
7866 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7867 				return notifier_from_errno(-EINVAL);
7868 			}
7869 		}
7870 		break;
7871 	}
7872 
7873 	fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
7874 	if (!fib_event)
7875 		return NOTIFY_BAD;
7876 
7877 	fib_event->mlxsw_sp = router->mlxsw_sp;
7878 	fib_event->event = event;
7879 	fib_event->family = info->family;
7880 
7881 	switch (info->family) {
7882 	case AF_INET:
7883 		mlxsw_sp_router_fib4_event(fib_event, info);
7884 		break;
7885 	case AF_INET6:
7886 		err = mlxsw_sp_router_fib6_event(fib_event, info);
7887 		if (err)
7888 			goto err_fib_event;
7889 		break;
7890 	case RTNL_FAMILY_IP6MR:
7891 	case RTNL_FAMILY_IPMR:
7892 		mlxsw_sp_router_fibmr_event(fib_event, info);
7893 		break;
7894 	}
7895 
7896 	/* Enqueue the event and trigger the work */
7897 	spin_lock_bh(&router->fib_event_queue_lock);
7898 	list_add_tail(&fib_event->list, &router->fib_event_queue);
7899 	spin_unlock_bh(&router->fib_event_queue_lock);
7900 	mlxsw_core_schedule_work(&router->fib_event_work);
7901 
7902 	return NOTIFY_DONE;
7903 
7904 err_fib_event:
7905 	kfree(fib_event);
7906 	return NOTIFY_BAD;
7907 }
7908 
7909 static struct mlxsw_sp_rif *
7910 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7911 			 const struct net_device *dev)
7912 {
7913 	int i;
7914 
7915 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7916 		if (mlxsw_sp->router->rifs[i] &&
7917 		    mlxsw_sp->router->rifs[i]->dev == dev)
7918 			return mlxsw_sp->router->rifs[i];
7919 
7920 	return NULL;
7921 }
7922 
7923 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7924 			 const struct net_device *dev)
7925 {
7926 	struct mlxsw_sp_rif *rif;
7927 
7928 	mutex_lock(&mlxsw_sp->router->lock);
7929 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7930 	mutex_unlock(&mlxsw_sp->router->lock);
7931 
7932 	return rif;
7933 }
7934 
7935 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7936 {
7937 	struct mlxsw_sp_rif *rif;
7938 	u16 vid = 0;
7939 
7940 	mutex_lock(&mlxsw_sp->router->lock);
7941 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7942 	if (!rif)
7943 		goto out;
7944 
7945 	/* We only return the VID for VLAN RIFs. Otherwise we return an
7946 	 * invalid value (0).
7947 	 */
7948 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7949 		goto out;
7950 
7951 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7952 
7953 out:
7954 	mutex_unlock(&mlxsw_sp->router->lock);
7955 	return vid;
7956 }
7957 
7958 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7959 {
7960 	char ritr_pl[MLXSW_REG_RITR_LEN];
7961 	int err;
7962 
7963 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7964 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7965 	if (err)
7966 		return err;
7967 
7968 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
7969 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7970 }
7971 
7972 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
7973 					  struct mlxsw_sp_rif *rif)
7974 {
7975 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
7976 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
7977 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
7978 }
7979 
7980 static bool
7981 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
7982 			   unsigned long event)
7983 {
7984 	struct inet6_dev *inet6_dev;
7985 	bool addr_list_empty = true;
7986 	struct in_device *idev;
7987 
7988 	switch (event) {
7989 	case NETDEV_UP:
7990 		return rif == NULL;
7991 	case NETDEV_DOWN:
7992 		rcu_read_lock();
7993 		idev = __in_dev_get_rcu(dev);
7994 		if (idev && idev->ifa_list)
7995 			addr_list_empty = false;
7996 
7997 		inet6_dev = __in6_dev_get(dev);
7998 		if (addr_list_empty && inet6_dev &&
7999 		    !list_empty(&inet6_dev->addr_list))
8000 			addr_list_empty = false;
8001 		rcu_read_unlock();
8002 
8003 		/* macvlans do not have a RIF, but rather piggy back on the
8004 		 * RIF of their lower device.
8005 		 */
8006 		if (netif_is_macvlan(dev) && addr_list_empty)
8007 			return true;
8008 
8009 		if (rif && addr_list_empty &&
8010 		    !netif_is_l3_slave(rif->dev))
8011 			return true;
8012 		/* It is possible we already removed the RIF ourselves
8013 		 * if it was assigned to a netdev that is now a bridge
8014 		 * or LAG slave.
8015 		 */
8016 		return false;
8017 	}
8018 
8019 	return false;
8020 }
8021 
8022 static enum mlxsw_sp_rif_type
8023 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8024 		      const struct net_device *dev)
8025 {
8026 	enum mlxsw_sp_fid_type type;
8027 
8028 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8029 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
8030 
8031 	/* Otherwise RIF type is derived from the type of the underlying FID. */
8032 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8033 		type = MLXSW_SP_FID_TYPE_8021Q;
8034 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8035 		type = MLXSW_SP_FID_TYPE_8021Q;
8036 	else if (netif_is_bridge_master(dev))
8037 		type = MLXSW_SP_FID_TYPE_8021D;
8038 	else
8039 		type = MLXSW_SP_FID_TYPE_RFID;
8040 
8041 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8042 }
8043 
8044 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
8045 {
8046 	int i;
8047 
8048 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8049 		if (!mlxsw_sp->router->rifs[i]) {
8050 			*p_rif_index = i;
8051 			return 0;
8052 		}
8053 	}
8054 
8055 	return -ENOBUFS;
8056 }
8057 
8058 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8059 					       u16 vr_id,
8060 					       struct net_device *l3_dev)
8061 {
8062 	struct mlxsw_sp_rif *rif;
8063 
8064 	rif = kzalloc(rif_size, GFP_KERNEL);
8065 	if (!rif)
8066 		return NULL;
8067 
8068 	INIT_LIST_HEAD(&rif->nexthop_list);
8069 	INIT_LIST_HEAD(&rif->neigh_list);
8070 	if (l3_dev) {
8071 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
8072 		rif->mtu = l3_dev->mtu;
8073 		rif->dev = l3_dev;
8074 	}
8075 	rif->vr_id = vr_id;
8076 	rif->rif_index = rif_index;
8077 
8078 	return rif;
8079 }
8080 
8081 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8082 					   u16 rif_index)
8083 {
8084 	return mlxsw_sp->router->rifs[rif_index];
8085 }
8086 
8087 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8088 {
8089 	return rif->rif_index;
8090 }
8091 
8092 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8093 {
8094 	return lb_rif->common.rif_index;
8095 }
8096 
8097 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8098 {
8099 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
8100 	struct mlxsw_sp_vr *ul_vr;
8101 
8102 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8103 	if (WARN_ON(IS_ERR(ul_vr)))
8104 		return 0;
8105 
8106 	return ul_vr->id;
8107 }
8108 
8109 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8110 {
8111 	return lb_rif->ul_rif_id;
8112 }
8113 
8114 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8115 {
8116 	return rif->dev->ifindex;
8117 }
8118 
8119 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8120 {
8121 	return rif->dev;
8122 }
8123 
8124 static struct mlxsw_sp_rif *
8125 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8126 		    const struct mlxsw_sp_rif_params *params,
8127 		    struct netlink_ext_ack *extack)
8128 {
8129 	u32 tb_id = l3mdev_fib_table(params->dev);
8130 	const struct mlxsw_sp_rif_ops *ops;
8131 	struct mlxsw_sp_fid *fid = NULL;
8132 	enum mlxsw_sp_rif_type type;
8133 	struct mlxsw_sp_rif *rif;
8134 	struct mlxsw_sp_vr *vr;
8135 	u16 rif_index;
8136 	int i, err;
8137 
8138 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8139 	ops = mlxsw_sp->router->rif_ops_arr[type];
8140 
8141 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8142 	if (IS_ERR(vr))
8143 		return ERR_CAST(vr);
8144 	vr->rif_count++;
8145 
8146 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8147 	if (err) {
8148 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8149 		goto err_rif_index_alloc;
8150 	}
8151 
8152 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8153 	if (!rif) {
8154 		err = -ENOMEM;
8155 		goto err_rif_alloc;
8156 	}
8157 	dev_hold(rif->dev);
8158 	mlxsw_sp->router->rifs[rif_index] = rif;
8159 	rif->mlxsw_sp = mlxsw_sp;
8160 	rif->ops = ops;
8161 
8162 	if (ops->fid_get) {
8163 		fid = ops->fid_get(rif, extack);
8164 		if (IS_ERR(fid)) {
8165 			err = PTR_ERR(fid);
8166 			goto err_fid_get;
8167 		}
8168 		rif->fid = fid;
8169 	}
8170 
8171 	if (ops->setup)
8172 		ops->setup(rif, params);
8173 
8174 	err = ops->configure(rif);
8175 	if (err)
8176 		goto err_configure;
8177 
8178 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8179 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8180 		if (err)
8181 			goto err_mr_rif_add;
8182 	}
8183 
8184 	mlxsw_sp_rif_counters_alloc(rif);
8185 
8186 	return rif;
8187 
8188 err_mr_rif_add:
8189 	for (i--; i >= 0; i--)
8190 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8191 	ops->deconfigure(rif);
8192 err_configure:
8193 	if (fid)
8194 		mlxsw_sp_fid_put(fid);
8195 err_fid_get:
8196 	mlxsw_sp->router->rifs[rif_index] = NULL;
8197 	dev_put(rif->dev);
8198 	kfree(rif);
8199 err_rif_alloc:
8200 err_rif_index_alloc:
8201 	vr->rif_count--;
8202 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8203 	return ERR_PTR(err);
8204 }
8205 
8206 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8207 {
8208 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
8209 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8210 	struct mlxsw_sp_fid *fid = rif->fid;
8211 	struct mlxsw_sp_vr *vr;
8212 	int i;
8213 
8214 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8215 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
8216 
8217 	mlxsw_sp_rif_counters_free(rif);
8218 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8219 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8220 	ops->deconfigure(rif);
8221 	if (fid)
8222 		/* Loopback RIFs are not associated with a FID. */
8223 		mlxsw_sp_fid_put(fid);
8224 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8225 	dev_put(rif->dev);
8226 	kfree(rif);
8227 	vr->rif_count--;
8228 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8229 }
8230 
8231 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8232 				 struct net_device *dev)
8233 {
8234 	struct mlxsw_sp_rif *rif;
8235 
8236 	mutex_lock(&mlxsw_sp->router->lock);
8237 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8238 	if (!rif)
8239 		goto out;
8240 	mlxsw_sp_rif_destroy(rif);
8241 out:
8242 	mutex_unlock(&mlxsw_sp->router->lock);
8243 }
8244 
8245 static void
8246 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8247 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8248 {
8249 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8250 
8251 	params->vid = mlxsw_sp_port_vlan->vid;
8252 	params->lag = mlxsw_sp_port->lagged;
8253 	if (params->lag)
8254 		params->lag_id = mlxsw_sp_port->lag_id;
8255 	else
8256 		params->system_port = mlxsw_sp_port->local_port;
8257 }
8258 
8259 static struct mlxsw_sp_rif_subport *
8260 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8261 {
8262 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
8263 }
8264 
8265 static struct mlxsw_sp_rif *
8266 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8267 			 const struct mlxsw_sp_rif_params *params,
8268 			 struct netlink_ext_ack *extack)
8269 {
8270 	struct mlxsw_sp_rif_subport *rif_subport;
8271 	struct mlxsw_sp_rif *rif;
8272 
8273 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8274 	if (!rif)
8275 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8276 
8277 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8278 	refcount_inc(&rif_subport->ref_count);
8279 	return rif;
8280 }
8281 
8282 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8283 {
8284 	struct mlxsw_sp_rif_subport *rif_subport;
8285 
8286 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8287 	if (!refcount_dec_and_test(&rif_subport->ref_count))
8288 		return;
8289 
8290 	mlxsw_sp_rif_destroy(rif);
8291 }
8292 
8293 static int
8294 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8295 				 struct net_device *l3_dev,
8296 				 struct netlink_ext_ack *extack)
8297 {
8298 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8299 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8300 	struct mlxsw_sp_rif_params params = {
8301 		.dev = l3_dev,
8302 	};
8303 	u16 vid = mlxsw_sp_port_vlan->vid;
8304 	struct mlxsw_sp_rif *rif;
8305 	struct mlxsw_sp_fid *fid;
8306 	int err;
8307 
8308 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
8309 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
8310 	if (IS_ERR(rif))
8311 		return PTR_ERR(rif);
8312 
8313 	/* FID was already created, just take a reference */
8314 	fid = rif->ops->fid_get(rif, extack);
8315 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8316 	if (err)
8317 		goto err_fid_port_vid_map;
8318 
8319 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8320 	if (err)
8321 		goto err_port_vid_learning_set;
8322 
8323 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8324 					BR_STATE_FORWARDING);
8325 	if (err)
8326 		goto err_port_vid_stp_set;
8327 
8328 	mlxsw_sp_port_vlan->fid = fid;
8329 
8330 	return 0;
8331 
8332 err_port_vid_stp_set:
8333 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8334 err_port_vid_learning_set:
8335 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8336 err_fid_port_vid_map:
8337 	mlxsw_sp_fid_put(fid);
8338 	mlxsw_sp_rif_subport_put(rif);
8339 	return err;
8340 }
8341 
8342 static void
8343 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8344 {
8345 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8346 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8347 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8348 	u16 vid = mlxsw_sp_port_vlan->vid;
8349 
8350 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8351 		return;
8352 
8353 	mlxsw_sp_port_vlan->fid = NULL;
8354 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8355 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8356 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8357 	mlxsw_sp_fid_put(fid);
8358 	mlxsw_sp_rif_subport_put(rif);
8359 }
8360 
8361 int
8362 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8363 			       struct net_device *l3_dev,
8364 			       struct netlink_ext_ack *extack)
8365 {
8366 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8367 	struct mlxsw_sp_rif *rif;
8368 	int err = 0;
8369 
8370 	mutex_lock(&mlxsw_sp->router->lock);
8371 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8372 	if (!rif)
8373 		goto out;
8374 
8375 	err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8376 					       extack);
8377 out:
8378 	mutex_unlock(&mlxsw_sp->router->lock);
8379 	return err;
8380 }
8381 
8382 void
8383 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8384 {
8385 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8386 
8387 	mutex_lock(&mlxsw_sp->router->lock);
8388 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8389 	mutex_unlock(&mlxsw_sp->router->lock);
8390 }
8391 
8392 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8393 					     struct net_device *port_dev,
8394 					     unsigned long event, u16 vid,
8395 					     struct netlink_ext_ack *extack)
8396 {
8397 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8398 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8399 
8400 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8401 	if (WARN_ON(!mlxsw_sp_port_vlan))
8402 		return -EINVAL;
8403 
8404 	switch (event) {
8405 	case NETDEV_UP:
8406 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8407 							l3_dev, extack);
8408 	case NETDEV_DOWN:
8409 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8410 		break;
8411 	}
8412 
8413 	return 0;
8414 }
8415 
8416 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8417 					unsigned long event,
8418 					struct netlink_ext_ack *extack)
8419 {
8420 	if (netif_is_bridge_port(port_dev) ||
8421 	    netif_is_lag_port(port_dev) ||
8422 	    netif_is_ovs_port(port_dev))
8423 		return 0;
8424 
8425 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8426 						 MLXSW_SP_DEFAULT_VID, extack);
8427 }
8428 
8429 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8430 					 struct net_device *lag_dev,
8431 					 unsigned long event, u16 vid,
8432 					 struct netlink_ext_ack *extack)
8433 {
8434 	struct net_device *port_dev;
8435 	struct list_head *iter;
8436 	int err;
8437 
8438 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8439 		if (mlxsw_sp_port_dev_check(port_dev)) {
8440 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8441 								port_dev,
8442 								event, vid,
8443 								extack);
8444 			if (err)
8445 				return err;
8446 		}
8447 	}
8448 
8449 	return 0;
8450 }
8451 
8452 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8453 				       unsigned long event,
8454 				       struct netlink_ext_ack *extack)
8455 {
8456 	if (netif_is_bridge_port(lag_dev))
8457 		return 0;
8458 
8459 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8460 					     MLXSW_SP_DEFAULT_VID, extack);
8461 }
8462 
8463 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8464 					  struct net_device *l3_dev,
8465 					  unsigned long event,
8466 					  struct netlink_ext_ack *extack)
8467 {
8468 	struct mlxsw_sp_rif_params params = {
8469 		.dev = l3_dev,
8470 	};
8471 	struct mlxsw_sp_rif *rif;
8472 
8473 	switch (event) {
8474 	case NETDEV_UP:
8475 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8476 			u16 proto;
8477 
8478 			br_vlan_get_proto(l3_dev, &proto);
8479 			if (proto == ETH_P_8021AD) {
8480 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8481 				return -EOPNOTSUPP;
8482 			}
8483 		}
8484 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8485 		if (IS_ERR(rif))
8486 			return PTR_ERR(rif);
8487 		break;
8488 	case NETDEV_DOWN:
8489 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8490 		mlxsw_sp_rif_destroy(rif);
8491 		break;
8492 	}
8493 
8494 	return 0;
8495 }
8496 
8497 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8498 					struct net_device *vlan_dev,
8499 					unsigned long event,
8500 					struct netlink_ext_ack *extack)
8501 {
8502 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8503 	u16 vid = vlan_dev_vlan_id(vlan_dev);
8504 
8505 	if (netif_is_bridge_port(vlan_dev))
8506 		return 0;
8507 
8508 	if (mlxsw_sp_port_dev_check(real_dev))
8509 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8510 							 event, vid, extack);
8511 	else if (netif_is_lag_master(real_dev))
8512 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8513 						     vid, extack);
8514 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8515 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8516 						      extack);
8517 
8518 	return 0;
8519 }
8520 
8521 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8522 {
8523 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
8524 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8525 
8526 	return ether_addr_equal_masked(mac, vrrp4, mask);
8527 }
8528 
8529 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
8530 {
8531 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
8532 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8533 
8534 	return ether_addr_equal_masked(mac, vrrp6, mask);
8535 }
8536 
8537 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8538 				const u8 *mac, bool adding)
8539 {
8540 	char ritr_pl[MLXSW_REG_RITR_LEN];
8541 	u8 vrrp_id = adding ? mac[5] : 0;
8542 	int err;
8543 
8544 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
8545 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
8546 		return 0;
8547 
8548 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8549 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8550 	if (err)
8551 		return err;
8552 
8553 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
8554 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
8555 	else
8556 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
8557 
8558 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8559 }
8560 
8561 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
8562 				    const struct net_device *macvlan_dev,
8563 				    struct netlink_ext_ack *extack)
8564 {
8565 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8566 	struct mlxsw_sp_rif *rif;
8567 	int err;
8568 
8569 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8570 	if (!rif) {
8571 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
8572 		return -EOPNOTSUPP;
8573 	}
8574 
8575 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8576 				  mlxsw_sp_fid_index(rif->fid), true);
8577 	if (err)
8578 		return err;
8579 
8580 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
8581 				   macvlan_dev->dev_addr, true);
8582 	if (err)
8583 		goto err_rif_vrrp_add;
8584 
8585 	/* Make sure the bridge driver does not have this MAC pointing at
8586 	 * some other port.
8587 	 */
8588 	if (rif->ops->fdb_del)
8589 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
8590 
8591 	return 0;
8592 
8593 err_rif_vrrp_add:
8594 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8595 			    mlxsw_sp_fid_index(rif->fid), false);
8596 	return err;
8597 }
8598 
8599 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8600 				       const struct net_device *macvlan_dev)
8601 {
8602 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8603 	struct mlxsw_sp_rif *rif;
8604 
8605 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8606 	/* If we do not have a RIF, then we already took care of
8607 	 * removing the macvlan's MAC during RIF deletion.
8608 	 */
8609 	if (!rif)
8610 		return;
8611 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
8612 			     false);
8613 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8614 			    mlxsw_sp_fid_index(rif->fid), false);
8615 }
8616 
8617 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8618 			      const struct net_device *macvlan_dev)
8619 {
8620 	mutex_lock(&mlxsw_sp->router->lock);
8621 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8622 	mutex_unlock(&mlxsw_sp->router->lock);
8623 }
8624 
8625 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
8626 					   struct net_device *macvlan_dev,
8627 					   unsigned long event,
8628 					   struct netlink_ext_ack *extack)
8629 {
8630 	switch (event) {
8631 	case NETDEV_UP:
8632 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
8633 	case NETDEV_DOWN:
8634 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8635 		break;
8636 	}
8637 
8638 	return 0;
8639 }
8640 
8641 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
8642 					       struct net_device *dev,
8643 					       const unsigned char *dev_addr,
8644 					       struct netlink_ext_ack *extack)
8645 {
8646 	struct mlxsw_sp_rif *rif;
8647 	int i;
8648 
8649 	/* A RIF is not created for macvlan netdevs. Their MAC is used to
8650 	 * populate the FDB
8651 	 */
8652 	if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
8653 		return 0;
8654 
8655 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8656 		rif = mlxsw_sp->router->rifs[i];
8657 		if (rif && rif->ops &&
8658 		    rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
8659 			continue;
8660 		if (rif && rif->dev && rif->dev != dev &&
8661 		    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
8662 					     mlxsw_sp->mac_mask)) {
8663 			NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
8664 			return -EINVAL;
8665 		}
8666 	}
8667 
8668 	return 0;
8669 }
8670 
8671 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
8672 				     struct net_device *dev,
8673 				     unsigned long event,
8674 				     struct netlink_ext_ack *extack)
8675 {
8676 	if (mlxsw_sp_port_dev_check(dev))
8677 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
8678 	else if (netif_is_lag_master(dev))
8679 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
8680 	else if (netif_is_bridge_master(dev))
8681 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
8682 						      extack);
8683 	else if (is_vlan_dev(dev))
8684 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
8685 						    extack);
8686 	else if (netif_is_macvlan(dev))
8687 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
8688 						       extack);
8689 	else
8690 		return 0;
8691 }
8692 
8693 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
8694 				   unsigned long event, void *ptr)
8695 {
8696 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
8697 	struct net_device *dev = ifa->ifa_dev->dev;
8698 	struct mlxsw_sp_router *router;
8699 	struct mlxsw_sp_rif *rif;
8700 	int err = 0;
8701 
8702 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
8703 	if (event == NETDEV_UP)
8704 		return NOTIFY_DONE;
8705 
8706 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
8707 	mutex_lock(&router->lock);
8708 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
8709 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8710 		goto out;
8711 
8712 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
8713 out:
8714 	mutex_unlock(&router->lock);
8715 	return notifier_from_errno(err);
8716 }
8717 
8718 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
8719 				  unsigned long event, void *ptr)
8720 {
8721 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
8722 	struct net_device *dev = ivi->ivi_dev->dev;
8723 	struct mlxsw_sp *mlxsw_sp;
8724 	struct mlxsw_sp_rif *rif;
8725 	int err = 0;
8726 
8727 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8728 	if (!mlxsw_sp)
8729 		return NOTIFY_DONE;
8730 
8731 	mutex_lock(&mlxsw_sp->router->lock);
8732 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8733 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8734 		goto out;
8735 
8736 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
8737 						  ivi->extack);
8738 	if (err)
8739 		goto out;
8740 
8741 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
8742 out:
8743 	mutex_unlock(&mlxsw_sp->router->lock);
8744 	return notifier_from_errno(err);
8745 }
8746 
8747 struct mlxsw_sp_inet6addr_event_work {
8748 	struct work_struct work;
8749 	struct mlxsw_sp *mlxsw_sp;
8750 	struct net_device *dev;
8751 	unsigned long event;
8752 };
8753 
8754 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
8755 {
8756 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
8757 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
8758 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
8759 	struct net_device *dev = inet6addr_work->dev;
8760 	unsigned long event = inet6addr_work->event;
8761 	struct mlxsw_sp_rif *rif;
8762 
8763 	rtnl_lock();
8764 	mutex_lock(&mlxsw_sp->router->lock);
8765 
8766 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8767 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8768 		goto out;
8769 
8770 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
8771 out:
8772 	mutex_unlock(&mlxsw_sp->router->lock);
8773 	rtnl_unlock();
8774 	dev_put(dev);
8775 	kfree(inet6addr_work);
8776 }
8777 
8778 /* Called with rcu_read_lock() */
8779 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
8780 				    unsigned long event, void *ptr)
8781 {
8782 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
8783 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
8784 	struct net_device *dev = if6->idev->dev;
8785 	struct mlxsw_sp_router *router;
8786 
8787 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
8788 	if (event == NETDEV_UP)
8789 		return NOTIFY_DONE;
8790 
8791 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
8792 	if (!inet6addr_work)
8793 		return NOTIFY_BAD;
8794 
8795 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
8796 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
8797 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
8798 	inet6addr_work->dev = dev;
8799 	inet6addr_work->event = event;
8800 	dev_hold(dev);
8801 	mlxsw_core_schedule_work(&inet6addr_work->work);
8802 
8803 	return NOTIFY_DONE;
8804 }
8805 
8806 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
8807 				   unsigned long event, void *ptr)
8808 {
8809 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
8810 	struct net_device *dev = i6vi->i6vi_dev->dev;
8811 	struct mlxsw_sp *mlxsw_sp;
8812 	struct mlxsw_sp_rif *rif;
8813 	int err = 0;
8814 
8815 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8816 	if (!mlxsw_sp)
8817 		return NOTIFY_DONE;
8818 
8819 	mutex_lock(&mlxsw_sp->router->lock);
8820 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8821 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8822 		goto out;
8823 
8824 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
8825 						  i6vi->extack);
8826 	if (err)
8827 		goto out;
8828 
8829 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
8830 out:
8831 	mutex_unlock(&mlxsw_sp->router->lock);
8832 	return notifier_from_errno(err);
8833 }
8834 
8835 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8836 			     const char *mac, int mtu)
8837 {
8838 	char ritr_pl[MLXSW_REG_RITR_LEN];
8839 	int err;
8840 
8841 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8842 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8843 	if (err)
8844 		return err;
8845 
8846 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
8847 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
8848 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
8849 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8850 }
8851 
8852 static int
8853 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
8854 				  struct mlxsw_sp_rif *rif)
8855 {
8856 	struct net_device *dev = rif->dev;
8857 	u16 fid_index;
8858 	int err;
8859 
8860 	fid_index = mlxsw_sp_fid_index(rif->fid);
8861 
8862 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
8863 	if (err)
8864 		return err;
8865 
8866 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
8867 				dev->mtu);
8868 	if (err)
8869 		goto err_rif_edit;
8870 
8871 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
8872 	if (err)
8873 		goto err_rif_fdb_op;
8874 
8875 	if (rif->mtu != dev->mtu) {
8876 		struct mlxsw_sp_vr *vr;
8877 		int i;
8878 
8879 		/* The RIF is relevant only to its mr_table instance, as unlike
8880 		 * unicast routing, in multicast routing a RIF cannot be shared
8881 		 * between several multicast routing tables.
8882 		 */
8883 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
8884 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8885 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
8886 						   rif, dev->mtu);
8887 	}
8888 
8889 	ether_addr_copy(rif->addr, dev->dev_addr);
8890 	rif->mtu = dev->mtu;
8891 
8892 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
8893 
8894 	return 0;
8895 
8896 err_rif_fdb_op:
8897 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
8898 err_rif_edit:
8899 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
8900 	return err;
8901 }
8902 
8903 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
8904 			    struct netdev_notifier_pre_changeaddr_info *info)
8905 {
8906 	struct netlink_ext_ack *extack;
8907 
8908 	extack = netdev_notifier_info_to_extack(&info->info);
8909 	return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
8910 						   info->dev_addr, extack);
8911 }
8912 
8913 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
8914 					 unsigned long event, void *ptr)
8915 {
8916 	struct mlxsw_sp *mlxsw_sp;
8917 	struct mlxsw_sp_rif *rif;
8918 	int err = 0;
8919 
8920 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8921 	if (!mlxsw_sp)
8922 		return 0;
8923 
8924 	mutex_lock(&mlxsw_sp->router->lock);
8925 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8926 	if (!rif)
8927 		goto out;
8928 
8929 	switch (event) {
8930 	case NETDEV_CHANGEMTU:
8931 	case NETDEV_CHANGEADDR:
8932 		err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
8933 		break;
8934 	case NETDEV_PRE_CHANGEADDR:
8935 		err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
8936 		break;
8937 	}
8938 
8939 out:
8940 	mutex_unlock(&mlxsw_sp->router->lock);
8941 	return err;
8942 }
8943 
8944 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
8945 				  struct net_device *l3_dev,
8946 				  struct netlink_ext_ack *extack)
8947 {
8948 	struct mlxsw_sp_rif *rif;
8949 
8950 	/* If netdev is already associated with a RIF, then we need to
8951 	 * destroy it and create a new one with the new virtual router ID.
8952 	 */
8953 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8954 	if (rif)
8955 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
8956 					  extack);
8957 
8958 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
8959 }
8960 
8961 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
8962 				    struct net_device *l3_dev)
8963 {
8964 	struct mlxsw_sp_rif *rif;
8965 
8966 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8967 	if (!rif)
8968 		return;
8969 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
8970 }
8971 
8972 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
8973 				 struct netdev_notifier_changeupper_info *info)
8974 {
8975 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
8976 	int err = 0;
8977 
8978 	/* We do not create a RIF for a macvlan, but only use it to
8979 	 * direct more MAC addresses to the router.
8980 	 */
8981 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
8982 		return 0;
8983 
8984 	mutex_lock(&mlxsw_sp->router->lock);
8985 	switch (event) {
8986 	case NETDEV_PRECHANGEUPPER:
8987 		break;
8988 	case NETDEV_CHANGEUPPER:
8989 		if (info->linking) {
8990 			struct netlink_ext_ack *extack;
8991 
8992 			extack = netdev_notifier_info_to_extack(&info->info);
8993 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
8994 		} else {
8995 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
8996 		}
8997 		break;
8998 	}
8999 	mutex_unlock(&mlxsw_sp->router->lock);
9000 
9001 	return err;
9002 }
9003 
9004 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9005 					struct netdev_nested_priv *priv)
9006 {
9007 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9008 
9009 	if (!netif_is_macvlan(dev))
9010 		return 0;
9011 
9012 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9013 				   mlxsw_sp_fid_index(rif->fid), false);
9014 }
9015 
9016 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9017 {
9018 	struct netdev_nested_priv priv = {
9019 		.data = (void *)rif,
9020 	};
9021 
9022 	if (!netif_is_macvlan_port(rif->dev))
9023 		return 0;
9024 
9025 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
9026 	return netdev_walk_all_upper_dev_rcu(rif->dev,
9027 					     __mlxsw_sp_rif_macvlan_flush, &priv);
9028 }
9029 
9030 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9031 				       const struct mlxsw_sp_rif_params *params)
9032 {
9033 	struct mlxsw_sp_rif_subport *rif_subport;
9034 
9035 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9036 	refcount_set(&rif_subport->ref_count, 1);
9037 	rif_subport->vid = params->vid;
9038 	rif_subport->lag = params->lag;
9039 	if (params->lag)
9040 		rif_subport->lag_id = params->lag_id;
9041 	else
9042 		rif_subport->system_port = params->system_port;
9043 }
9044 
9045 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9046 {
9047 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9048 	struct mlxsw_sp_rif_subport *rif_subport;
9049 	char ritr_pl[MLXSW_REG_RITR_LEN];
9050 
9051 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9052 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9053 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
9054 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9055 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9056 				  rif_subport->lag ? rif_subport->lag_id :
9057 						     rif_subport->system_port,
9058 				  rif_subport->vid);
9059 
9060 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9061 }
9062 
9063 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
9064 {
9065 	int err;
9066 
9067 	err = mlxsw_sp_rif_subport_op(rif, true);
9068 	if (err)
9069 		return err;
9070 
9071 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9072 				  mlxsw_sp_fid_index(rif->fid), true);
9073 	if (err)
9074 		goto err_rif_fdb_op;
9075 
9076 	mlxsw_sp_fid_rif_set(rif->fid, rif);
9077 	return 0;
9078 
9079 err_rif_fdb_op:
9080 	mlxsw_sp_rif_subport_op(rif, false);
9081 	return err;
9082 }
9083 
9084 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9085 {
9086 	struct mlxsw_sp_fid *fid = rif->fid;
9087 
9088 	mlxsw_sp_fid_rif_set(fid, NULL);
9089 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9090 			    mlxsw_sp_fid_index(fid), false);
9091 	mlxsw_sp_rif_macvlan_flush(rif);
9092 	mlxsw_sp_rif_subport_op(rif, false);
9093 }
9094 
9095 static struct mlxsw_sp_fid *
9096 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9097 			     struct netlink_ext_ack *extack)
9098 {
9099 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9100 }
9101 
9102 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9103 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
9104 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
9105 	.setup			= mlxsw_sp_rif_subport_setup,
9106 	.configure		= mlxsw_sp_rif_subport_configure,
9107 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
9108 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
9109 };
9110 
9111 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
9112 				    enum mlxsw_reg_ritr_if_type type,
9113 				    u16 vid_fid, bool enable)
9114 {
9115 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9116 	char ritr_pl[MLXSW_REG_RITR_LEN];
9117 
9118 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9119 			    rif->dev->mtu);
9120 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9121 	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
9122 
9123 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9124 }
9125 
9126 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9127 {
9128 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9129 }
9130 
9131 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
9132 {
9133 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9134 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9135 	int err;
9136 
9137 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
9138 				       true);
9139 	if (err)
9140 		return err;
9141 
9142 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9143 				     mlxsw_sp_router_port(mlxsw_sp), true);
9144 	if (err)
9145 		goto err_fid_mc_flood_set;
9146 
9147 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9148 				     mlxsw_sp_router_port(mlxsw_sp), true);
9149 	if (err)
9150 		goto err_fid_bc_flood_set;
9151 
9152 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9153 				  mlxsw_sp_fid_index(rif->fid), true);
9154 	if (err)
9155 		goto err_rif_fdb_op;
9156 
9157 	mlxsw_sp_fid_rif_set(rif->fid, rif);
9158 	return 0;
9159 
9160 err_rif_fdb_op:
9161 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9162 			       mlxsw_sp_router_port(mlxsw_sp), false);
9163 err_fid_bc_flood_set:
9164 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9165 			       mlxsw_sp_router_port(mlxsw_sp), false);
9166 err_fid_mc_flood_set:
9167 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9168 	return err;
9169 }
9170 
9171 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9172 {
9173 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9174 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9175 	struct mlxsw_sp_fid *fid = rif->fid;
9176 
9177 	mlxsw_sp_fid_rif_set(fid, NULL);
9178 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9179 			    mlxsw_sp_fid_index(fid), false);
9180 	mlxsw_sp_rif_macvlan_flush(rif);
9181 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9182 			       mlxsw_sp_router_port(mlxsw_sp), false);
9183 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9184 			       mlxsw_sp_router_port(mlxsw_sp), false);
9185 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9186 }
9187 
9188 static struct mlxsw_sp_fid *
9189 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9190 			 struct netlink_ext_ack *extack)
9191 {
9192 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9193 }
9194 
9195 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9196 {
9197 	struct switchdev_notifier_fdb_info info;
9198 	struct net_device *dev;
9199 
9200 	dev = br_fdb_find_port(rif->dev, mac, 0);
9201 	if (!dev)
9202 		return;
9203 
9204 	info.addr = mac;
9205 	info.vid = 0;
9206 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9207 				 NULL);
9208 }
9209 
9210 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9211 	.type			= MLXSW_SP_RIF_TYPE_FID,
9212 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9213 	.configure		= mlxsw_sp_rif_fid_configure,
9214 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
9215 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
9216 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
9217 };
9218 
9219 static struct mlxsw_sp_fid *
9220 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9221 			  struct netlink_ext_ack *extack)
9222 {
9223 	struct net_device *br_dev;
9224 	u16 vid;
9225 	int err;
9226 
9227 	if (is_vlan_dev(rif->dev)) {
9228 		vid = vlan_dev_vlan_id(rif->dev);
9229 		br_dev = vlan_dev_real_dev(rif->dev);
9230 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
9231 			return ERR_PTR(-EINVAL);
9232 	} else {
9233 		err = br_vlan_get_pvid(rif->dev, &vid);
9234 		if (err < 0 || !vid) {
9235 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9236 			return ERR_PTR(-EINVAL);
9237 		}
9238 	}
9239 
9240 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9241 }
9242 
9243 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9244 {
9245 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9246 	struct switchdev_notifier_fdb_info info;
9247 	struct net_device *br_dev;
9248 	struct net_device *dev;
9249 
9250 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9251 	dev = br_fdb_find_port(br_dev, mac, vid);
9252 	if (!dev)
9253 		return;
9254 
9255 	info.addr = mac;
9256 	info.vid = vid;
9257 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9258 				 NULL);
9259 }
9260 
9261 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
9262 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
9263 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9264 	.configure		= mlxsw_sp_rif_fid_configure,
9265 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
9266 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
9267 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
9268 };
9269 
9270 static struct mlxsw_sp_rif_ipip_lb *
9271 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9272 {
9273 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9274 }
9275 
9276 static void
9277 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9278 			   const struct mlxsw_sp_rif_params *params)
9279 {
9280 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9281 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
9282 
9283 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9284 				 common);
9285 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9286 	rif_lb->lb_config = params_lb->lb_config;
9287 }
9288 
9289 static int
9290 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
9291 {
9292 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9293 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9294 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9295 	struct mlxsw_sp_vr *ul_vr;
9296 	int err;
9297 
9298 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
9299 	if (IS_ERR(ul_vr))
9300 		return PTR_ERR(ul_vr);
9301 
9302 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9303 	if (err)
9304 		goto err_loopback_op;
9305 
9306 	lb_rif->ul_vr_id = ul_vr->id;
9307 	lb_rif->ul_rif_id = 0;
9308 	++ul_vr->rif_count;
9309 	return 0;
9310 
9311 err_loopback_op:
9312 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9313 	return err;
9314 }
9315 
9316 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9317 {
9318 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9319 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9320 	struct mlxsw_sp_vr *ul_vr;
9321 
9322 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9323 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9324 
9325 	--ul_vr->rif_count;
9326 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9327 }
9328 
9329 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9330 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9331 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9332 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9333 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
9334 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
9335 };
9336 
9337 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9338 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9339 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
9340 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9341 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
9342 };
9343 
9344 static int
9345 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9346 {
9347 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9348 	char ritr_pl[MLXSW_REG_RITR_LEN];
9349 
9350 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9351 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9352 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9353 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
9354 
9355 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9356 }
9357 
9358 static struct mlxsw_sp_rif *
9359 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9360 		       struct netlink_ext_ack *extack)
9361 {
9362 	struct mlxsw_sp_rif *ul_rif;
9363 	u16 rif_index;
9364 	int err;
9365 
9366 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
9367 	if (err) {
9368 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9369 		return ERR_PTR(err);
9370 	}
9371 
9372 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9373 	if (!ul_rif)
9374 		return ERR_PTR(-ENOMEM);
9375 
9376 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
9377 	ul_rif->mlxsw_sp = mlxsw_sp;
9378 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9379 	if (err)
9380 		goto ul_rif_op_err;
9381 
9382 	return ul_rif;
9383 
9384 ul_rif_op_err:
9385 	mlxsw_sp->router->rifs[rif_index] = NULL;
9386 	kfree(ul_rif);
9387 	return ERR_PTR(err);
9388 }
9389 
9390 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9391 {
9392 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9393 
9394 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9395 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
9396 	kfree(ul_rif);
9397 }
9398 
9399 static struct mlxsw_sp_rif *
9400 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
9401 		    struct netlink_ext_ack *extack)
9402 {
9403 	struct mlxsw_sp_vr *vr;
9404 	int err;
9405 
9406 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
9407 	if (IS_ERR(vr))
9408 		return ERR_CAST(vr);
9409 
9410 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
9411 		return vr->ul_rif;
9412 
9413 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
9414 	if (IS_ERR(vr->ul_rif)) {
9415 		err = PTR_ERR(vr->ul_rif);
9416 		goto err_ul_rif_create;
9417 	}
9418 
9419 	vr->rif_count++;
9420 	refcount_set(&vr->ul_rif_refcnt, 1);
9421 
9422 	return vr->ul_rif;
9423 
9424 err_ul_rif_create:
9425 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9426 	return ERR_PTR(err);
9427 }
9428 
9429 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
9430 {
9431 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9432 	struct mlxsw_sp_vr *vr;
9433 
9434 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
9435 
9436 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
9437 		return;
9438 
9439 	vr->rif_count--;
9440 	mlxsw_sp_ul_rif_destroy(ul_rif);
9441 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9442 }
9443 
9444 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
9445 			       u16 *ul_rif_index)
9446 {
9447 	struct mlxsw_sp_rif *ul_rif;
9448 	int err = 0;
9449 
9450 	mutex_lock(&mlxsw_sp->router->lock);
9451 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9452 	if (IS_ERR(ul_rif)) {
9453 		err = PTR_ERR(ul_rif);
9454 		goto out;
9455 	}
9456 	*ul_rif_index = ul_rif->rif_index;
9457 out:
9458 	mutex_unlock(&mlxsw_sp->router->lock);
9459 	return err;
9460 }
9461 
9462 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
9463 {
9464 	struct mlxsw_sp_rif *ul_rif;
9465 
9466 	mutex_lock(&mlxsw_sp->router->lock);
9467 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
9468 	if (WARN_ON(!ul_rif))
9469 		goto out;
9470 
9471 	mlxsw_sp_ul_rif_put(ul_rif);
9472 out:
9473 	mutex_unlock(&mlxsw_sp->router->lock);
9474 }
9475 
9476 static int
9477 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
9478 {
9479 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9480 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9481 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9482 	struct mlxsw_sp_rif *ul_rif;
9483 	int err;
9484 
9485 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9486 	if (IS_ERR(ul_rif))
9487 		return PTR_ERR(ul_rif);
9488 
9489 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
9490 	if (err)
9491 		goto err_loopback_op;
9492 
9493 	lb_rif->ul_vr_id = 0;
9494 	lb_rif->ul_rif_id = ul_rif->rif_index;
9495 
9496 	return 0;
9497 
9498 err_loopback_op:
9499 	mlxsw_sp_ul_rif_put(ul_rif);
9500 	return err;
9501 }
9502 
9503 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9504 {
9505 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9506 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9507 	struct mlxsw_sp_rif *ul_rif;
9508 
9509 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
9510 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
9511 	mlxsw_sp_ul_rif_put(ul_rif);
9512 }
9513 
9514 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
9515 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9516 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9517 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9518 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
9519 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
9520 };
9521 
9522 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
9523 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9524 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
9525 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9526 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
9527 };
9528 
9529 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
9530 {
9531 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9532 
9533 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
9534 					 sizeof(struct mlxsw_sp_rif *),
9535 					 GFP_KERNEL);
9536 	if (!mlxsw_sp->router->rifs)
9537 		return -ENOMEM;
9538 
9539 	return 0;
9540 }
9541 
9542 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
9543 {
9544 	int i;
9545 
9546 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
9547 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
9548 
9549 	kfree(mlxsw_sp->router->rifs);
9550 }
9551 
9552 static int
9553 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
9554 {
9555 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
9556 
9557 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
9558 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
9559 }
9560 
9561 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
9562 {
9563 	int err;
9564 
9565 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
9566 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
9567 
9568 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
9569 	if (err)
9570 		return err;
9571 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
9572 	if (err)
9573 		return err;
9574 
9575 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
9576 }
9577 
9578 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
9579 {
9580 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
9581 }
9582 
9583 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
9584 {
9585 	struct mlxsw_sp_router *router;
9586 
9587 	/* Flush pending FIB notifications and then flush the device's
9588 	 * table before requesting another dump. The FIB notification
9589 	 * block is unregistered, so no need to take RTNL.
9590 	 */
9591 	mlxsw_core_flush_owq();
9592 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
9593 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
9594 }
9595 
9596 #ifdef CONFIG_IP_ROUTE_MULTIPATH
9597 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
9598 {
9599 	mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
9600 }
9601 
9602 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
9603 {
9604 	mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
9605 }
9606 
9607 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
9608 {
9609 	struct net *net = mlxsw_sp_net(mlxsw_sp);
9610 	bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
9611 
9612 	mlxsw_sp_mp_hash_header_set(recr2_pl,
9613 				    MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
9614 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
9615 	mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
9616 	mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
9617 	if (only_l3)
9618 		return;
9619 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
9620 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
9621 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
9622 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
9623 }
9624 
9625 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
9626 {
9627 	bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
9628 
9629 	mlxsw_sp_mp_hash_header_set(recr2_pl,
9630 				    MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
9631 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
9632 	mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
9633 	mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
9634 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
9635 	if (only_l3) {
9636 		mlxsw_sp_mp_hash_field_set(recr2_pl,
9637 					   MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
9638 	} else {
9639 		mlxsw_sp_mp_hash_header_set(recr2_pl,
9640 					    MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
9641 		mlxsw_sp_mp_hash_field_set(recr2_pl,
9642 					   MLXSW_REG_RECR2_TCP_UDP_SPORT);
9643 		mlxsw_sp_mp_hash_field_set(recr2_pl,
9644 					   MLXSW_REG_RECR2_TCP_UDP_DPORT);
9645 	}
9646 }
9647 
9648 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
9649 {
9650 	char recr2_pl[MLXSW_REG_RECR2_LEN];
9651 	u32 seed;
9652 
9653 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
9654 	mlxsw_reg_recr2_pack(recr2_pl, seed);
9655 	mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
9656 	mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
9657 
9658 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
9659 }
9660 #else
9661 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
9662 {
9663 	return 0;
9664 }
9665 #endif
9666 
9667 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
9668 {
9669 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
9670 	unsigned int i;
9671 
9672 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
9673 
9674 	/* HW is determining switch priority based on DSCP-bits, but the
9675 	 * kernel is still doing that based on the ToS. Since there's a
9676 	 * mismatch in bits we need to make sure to translate the right
9677 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
9678 	 */
9679 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
9680 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
9681 
9682 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
9683 }
9684 
9685 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
9686 {
9687 	struct net *net = mlxsw_sp_net(mlxsw_sp);
9688 	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
9689 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
9690 	u64 max_rifs;
9691 
9692 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
9693 		return -EIO;
9694 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9695 
9696 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
9697 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
9698 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
9699 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
9700 }
9701 
9702 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
9703 {
9704 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
9705 
9706 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
9707 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
9708 }
9709 
9710 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
9711 	.init = mlxsw_sp_router_ll_basic_init,
9712 	.ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
9713 	.ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
9714 	.raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
9715 	.fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
9716 	.fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
9717 	.fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
9718 	.fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
9719 	.fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
9720 	.fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
9721 	.fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
9722 	.fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
9723 };
9724 
9725 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
9726 {
9727 	size_t max_size = 0;
9728 	int i;
9729 
9730 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
9731 		size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
9732 
9733 		if (size > max_size)
9734 			max_size = size;
9735 	}
9736 	router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
9737 				    GFP_KERNEL);
9738 	if (!router->ll_op_ctx)
9739 		return -ENOMEM;
9740 	INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
9741 	return 0;
9742 }
9743 
9744 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
9745 {
9746 	WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
9747 	kfree(router->ll_op_ctx);
9748 }
9749 
9750 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
9751 {
9752 	u16 lb_rif_index;
9753 	int err;
9754 
9755 	/* Create a generic loopback RIF associated with the main table
9756 	 * (default VRF). Any table can be used, but the main table exists
9757 	 * anyway, so we do not waste resources.
9758 	 */
9759 	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
9760 					 &lb_rif_index);
9761 	if (err)
9762 		return err;
9763 
9764 	mlxsw_sp->router->lb_rif_index = lb_rif_index;
9765 
9766 	return 0;
9767 }
9768 
9769 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
9770 {
9771 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
9772 }
9773 
9774 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
9775 {
9776 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
9777 
9778 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
9779 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
9780 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
9781 
9782 	return 0;
9783 }
9784 
9785 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
9786 	.init = mlxsw_sp1_router_init,
9787 };
9788 
9789 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
9790 {
9791 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
9792 
9793 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
9794 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
9795 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
9796 
9797 	return 0;
9798 }
9799 
9800 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
9801 	.init = mlxsw_sp2_router_init,
9802 };
9803 
9804 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
9805 			 struct netlink_ext_ack *extack)
9806 {
9807 	struct mlxsw_sp_router *router;
9808 	int err;
9809 
9810 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
9811 	if (!router)
9812 		return -ENOMEM;
9813 	mutex_init(&router->lock);
9814 	mlxsw_sp->router = router;
9815 	router->mlxsw_sp = mlxsw_sp;
9816 
9817 	err = mlxsw_sp->router_ops->init(mlxsw_sp);
9818 	if (err)
9819 		goto err_router_ops_init;
9820 
9821 	err = mlxsw_sp_router_xm_init(mlxsw_sp);
9822 	if (err)
9823 		goto err_xm_init;
9824 
9825 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
9826 						       &mlxsw_sp_router_ll_xm_ops :
9827 						       &mlxsw_sp_router_ll_basic_ops;
9828 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
9829 
9830 	err = mlxsw_sp_router_ll_op_ctx_init(router);
9831 	if (err)
9832 		goto err_ll_op_ctx_init;
9833 
9834 	INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
9835 	INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
9836 			  mlxsw_sp_nh_grp_activity_work);
9837 
9838 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
9839 	err = __mlxsw_sp_router_init(mlxsw_sp);
9840 	if (err)
9841 		goto err_router_init;
9842 
9843 	err = mlxsw_sp_rifs_init(mlxsw_sp);
9844 	if (err)
9845 		goto err_rifs_init;
9846 
9847 	err = mlxsw_sp_ipips_init(mlxsw_sp);
9848 	if (err)
9849 		goto err_ipips_init;
9850 
9851 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
9852 			      &mlxsw_sp_nexthop_ht_params);
9853 	if (err)
9854 		goto err_nexthop_ht_init;
9855 
9856 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
9857 			      &mlxsw_sp_nexthop_group_ht_params);
9858 	if (err)
9859 		goto err_nexthop_group_ht_init;
9860 
9861 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
9862 	err = mlxsw_sp_lpm_init(mlxsw_sp);
9863 	if (err)
9864 		goto err_lpm_init;
9865 
9866 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
9867 	if (err)
9868 		goto err_mr_init;
9869 
9870 	err = mlxsw_sp_vrs_init(mlxsw_sp);
9871 	if (err)
9872 		goto err_vrs_init;
9873 
9874 	err = mlxsw_sp_lb_rif_init(mlxsw_sp);
9875 	if (err)
9876 		goto err_lb_rif_init;
9877 
9878 	err = mlxsw_sp_neigh_init(mlxsw_sp);
9879 	if (err)
9880 		goto err_neigh_init;
9881 
9882 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
9883 	if (err)
9884 		goto err_mp_hash_init;
9885 
9886 	err = mlxsw_sp_dscp_init(mlxsw_sp);
9887 	if (err)
9888 		goto err_dscp_init;
9889 
9890 	INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
9891 	INIT_LIST_HEAD(&router->fib_event_queue);
9892 	spin_lock_init(&router->fib_event_queue_lock);
9893 
9894 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
9895 	err = register_inetaddr_notifier(&router->inetaddr_nb);
9896 	if (err)
9897 		goto err_register_inetaddr_notifier;
9898 
9899 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
9900 	err = register_inet6addr_notifier(&router->inet6addr_nb);
9901 	if (err)
9902 		goto err_register_inet6addr_notifier;
9903 
9904 	mlxsw_sp->router->netevent_nb.notifier_call =
9905 		mlxsw_sp_router_netevent_event;
9906 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
9907 	if (err)
9908 		goto err_register_netevent_notifier;
9909 
9910 	mlxsw_sp->router->nexthop_nb.notifier_call =
9911 		mlxsw_sp_nexthop_obj_event;
9912 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
9913 					&mlxsw_sp->router->nexthop_nb,
9914 					extack);
9915 	if (err)
9916 		goto err_register_nexthop_notifier;
9917 
9918 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
9919 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
9920 				    &mlxsw_sp->router->fib_nb,
9921 				    mlxsw_sp_router_fib_dump_flush, extack);
9922 	if (err)
9923 		goto err_register_fib_notifier;
9924 
9925 	return 0;
9926 
9927 err_register_fib_notifier:
9928 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
9929 				    &mlxsw_sp->router->nexthop_nb);
9930 err_register_nexthop_notifier:
9931 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
9932 err_register_netevent_notifier:
9933 	unregister_inet6addr_notifier(&router->inet6addr_nb);
9934 err_register_inet6addr_notifier:
9935 	unregister_inetaddr_notifier(&router->inetaddr_nb);
9936 err_register_inetaddr_notifier:
9937 	mlxsw_core_flush_owq();
9938 	WARN_ON(!list_empty(&router->fib_event_queue));
9939 err_dscp_init:
9940 err_mp_hash_init:
9941 	mlxsw_sp_neigh_fini(mlxsw_sp);
9942 err_neigh_init:
9943 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
9944 err_lb_rif_init:
9945 	mlxsw_sp_vrs_fini(mlxsw_sp);
9946 err_vrs_init:
9947 	mlxsw_sp_mr_fini(mlxsw_sp);
9948 err_mr_init:
9949 	mlxsw_sp_lpm_fini(mlxsw_sp);
9950 err_lpm_init:
9951 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
9952 err_nexthop_group_ht_init:
9953 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
9954 err_nexthop_ht_init:
9955 	mlxsw_sp_ipips_fini(mlxsw_sp);
9956 err_ipips_init:
9957 	mlxsw_sp_rifs_fini(mlxsw_sp);
9958 err_rifs_init:
9959 	__mlxsw_sp_router_fini(mlxsw_sp);
9960 err_router_init:
9961 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
9962 	mlxsw_sp_router_ll_op_ctx_fini(router);
9963 err_ll_op_ctx_init:
9964 	mlxsw_sp_router_xm_fini(mlxsw_sp);
9965 err_xm_init:
9966 err_router_ops_init:
9967 	mutex_destroy(&mlxsw_sp->router->lock);
9968 	kfree(mlxsw_sp->router);
9969 	return err;
9970 }
9971 
9972 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
9973 {
9974 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
9975 				&mlxsw_sp->router->fib_nb);
9976 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
9977 				    &mlxsw_sp->router->nexthop_nb);
9978 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
9979 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
9980 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
9981 	mlxsw_core_flush_owq();
9982 	WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
9983 	mlxsw_sp_neigh_fini(mlxsw_sp);
9984 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
9985 	mlxsw_sp_vrs_fini(mlxsw_sp);
9986 	mlxsw_sp_mr_fini(mlxsw_sp);
9987 	mlxsw_sp_lpm_fini(mlxsw_sp);
9988 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
9989 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
9990 	mlxsw_sp_ipips_fini(mlxsw_sp);
9991 	mlxsw_sp_rifs_fini(mlxsw_sp);
9992 	__mlxsw_sp_router_fini(mlxsw_sp);
9993 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
9994 	mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
9995 	mlxsw_sp_router_xm_fini(mlxsw_sp);
9996 	mutex_destroy(&mlxsw_sp->router->lock);
9997 	kfree(mlxsw_sp->router);
9998 }
9999