1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
32 #include <net/ipv6.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
35 
36 #include "spectrum.h"
37 #include "core.h"
38 #include "reg.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
46 
47 struct mlxsw_sp_fib;
48 struct mlxsw_sp_vr;
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
51 
52 struct mlxsw_sp_rif {
53 	struct list_head nexthop_list;
54 	struct list_head neigh_list;
55 	struct net_device *dev; /* NULL for underlay RIF */
56 	struct mlxsw_sp_fid *fid;
57 	unsigned char addr[ETH_ALEN];
58 	int mtu;
59 	u16 rif_index;
60 	u8 mac_profile_id;
61 	u16 vr_id;
62 	const struct mlxsw_sp_rif_ops *ops;
63 	struct mlxsw_sp *mlxsw_sp;
64 
65 	unsigned int counter_ingress;
66 	bool counter_ingress_valid;
67 	unsigned int counter_egress;
68 	bool counter_egress_valid;
69 };
70 
71 struct mlxsw_sp_rif_params {
72 	struct net_device *dev;
73 	union {
74 		u16 system_port;
75 		u16 lag_id;
76 	};
77 	u16 vid;
78 	bool lag;
79 };
80 
81 struct mlxsw_sp_rif_subport {
82 	struct mlxsw_sp_rif common;
83 	refcount_t ref_count;
84 	union {
85 		u16 system_port;
86 		u16 lag_id;
87 	};
88 	u16 vid;
89 	bool lag;
90 };
91 
92 struct mlxsw_sp_rif_ipip_lb {
93 	struct mlxsw_sp_rif common;
94 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
95 	u16 ul_vr_id; /* Reserved for Spectrum-2. */
96 	u16 ul_rif_id; /* Reserved for Spectrum. */
97 };
98 
99 struct mlxsw_sp_rif_params_ipip_lb {
100 	struct mlxsw_sp_rif_params common;
101 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
102 };
103 
104 struct mlxsw_sp_rif_ops {
105 	enum mlxsw_sp_rif_type type;
106 	size_t rif_size;
107 
108 	void (*setup)(struct mlxsw_sp_rif *rif,
109 		      const struct mlxsw_sp_rif_params *params);
110 	int (*configure)(struct mlxsw_sp_rif *rif,
111 			 struct netlink_ext_ack *extack);
112 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
113 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
114 					 struct netlink_ext_ack *extack);
115 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
116 };
117 
118 struct mlxsw_sp_rif_mac_profile {
119 	unsigned char mac_prefix[ETH_ALEN];
120 	refcount_t ref_count;
121 	u8 id;
122 };
123 
124 struct mlxsw_sp_router_ops {
125 	int (*init)(struct mlxsw_sp *mlxsw_sp);
126 	int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
127 };
128 
129 static struct mlxsw_sp_rif *
130 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
131 			 const struct net_device *dev);
132 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
133 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
134 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
135 				  struct mlxsw_sp_lpm_tree *lpm_tree);
136 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
137 				     const struct mlxsw_sp_fib *fib,
138 				     u8 tree_id);
139 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
140 				       const struct mlxsw_sp_fib *fib);
141 
142 static unsigned int *
143 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
144 			   enum mlxsw_sp_rif_counter_dir dir)
145 {
146 	switch (dir) {
147 	case MLXSW_SP_RIF_COUNTER_EGRESS:
148 		return &rif->counter_egress;
149 	case MLXSW_SP_RIF_COUNTER_INGRESS:
150 		return &rif->counter_ingress;
151 	}
152 	return NULL;
153 }
154 
155 static bool
156 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
157 			       enum mlxsw_sp_rif_counter_dir dir)
158 {
159 	switch (dir) {
160 	case MLXSW_SP_RIF_COUNTER_EGRESS:
161 		return rif->counter_egress_valid;
162 	case MLXSW_SP_RIF_COUNTER_INGRESS:
163 		return rif->counter_ingress_valid;
164 	}
165 	return false;
166 }
167 
168 static void
169 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
170 			       enum mlxsw_sp_rif_counter_dir dir,
171 			       bool valid)
172 {
173 	switch (dir) {
174 	case MLXSW_SP_RIF_COUNTER_EGRESS:
175 		rif->counter_egress_valid = valid;
176 		break;
177 	case MLXSW_SP_RIF_COUNTER_INGRESS:
178 		rif->counter_ingress_valid = valid;
179 		break;
180 	}
181 }
182 
183 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
184 				     unsigned int counter_index, bool enable,
185 				     enum mlxsw_sp_rif_counter_dir dir)
186 {
187 	char ritr_pl[MLXSW_REG_RITR_LEN];
188 	bool is_egress = false;
189 	int err;
190 
191 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
192 		is_egress = true;
193 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
194 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
195 	if (err)
196 		return err;
197 
198 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
199 				    is_egress);
200 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
201 }
202 
203 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
204 				   struct mlxsw_sp_rif *rif,
205 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
206 {
207 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
208 	unsigned int *p_counter_index;
209 	bool valid;
210 	int err;
211 
212 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
213 	if (!valid)
214 		return -EINVAL;
215 
216 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
217 	if (!p_counter_index)
218 		return -EINVAL;
219 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
220 			     MLXSW_REG_RICNT_OPCODE_NOP);
221 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
222 	if (err)
223 		return err;
224 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
225 	return 0;
226 }
227 
228 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
229 				      unsigned int counter_index)
230 {
231 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
232 
233 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
234 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
235 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
236 }
237 
238 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
239 			       struct mlxsw_sp_rif *rif,
240 			       enum mlxsw_sp_rif_counter_dir dir)
241 {
242 	unsigned int *p_counter_index;
243 	int err;
244 
245 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
246 	if (!p_counter_index)
247 		return -EINVAL;
248 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
249 				     p_counter_index);
250 	if (err)
251 		return err;
252 
253 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
254 	if (err)
255 		goto err_counter_clear;
256 
257 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
258 					*p_counter_index, true, dir);
259 	if (err)
260 		goto err_counter_edit;
261 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
262 	return 0;
263 
264 err_counter_edit:
265 err_counter_clear:
266 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
267 			      *p_counter_index);
268 	return err;
269 }
270 
271 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
272 			       struct mlxsw_sp_rif *rif,
273 			       enum mlxsw_sp_rif_counter_dir dir)
274 {
275 	unsigned int *p_counter_index;
276 
277 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
278 		return;
279 
280 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
281 	if (WARN_ON(!p_counter_index))
282 		return;
283 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
284 				  *p_counter_index, false, dir);
285 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
286 			      *p_counter_index);
287 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
288 }
289 
290 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
291 {
292 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
293 	struct devlink *devlink;
294 
295 	devlink = priv_to_devlink(mlxsw_sp->core);
296 	if (!devlink_dpipe_table_counter_enabled(devlink,
297 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
298 		return;
299 	mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
300 }
301 
302 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
303 {
304 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
305 
306 	mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
307 }
308 
309 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
310 
311 struct mlxsw_sp_prefix_usage {
312 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
313 };
314 
315 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
316 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
317 
318 static bool
319 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
320 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
321 {
322 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
323 }
324 
325 static void
326 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
327 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
328 {
329 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
330 }
331 
332 static void
333 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
334 			  unsigned char prefix_len)
335 {
336 	set_bit(prefix_len, prefix_usage->b);
337 }
338 
339 static void
340 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
341 			    unsigned char prefix_len)
342 {
343 	clear_bit(prefix_len, prefix_usage->b);
344 }
345 
346 struct mlxsw_sp_fib_key {
347 	unsigned char addr[sizeof(struct in6_addr)];
348 	unsigned char prefix_len;
349 };
350 
351 enum mlxsw_sp_fib_entry_type {
352 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
353 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
354 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
355 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
356 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
357 
358 	/* This is a special case of local delivery, where a packet should be
359 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
360 	 * because that's a type of next hop, not of FIB entry. (There can be
361 	 * several next hops in a REMOTE entry, and some of them may be
362 	 * encapsulating entries.)
363 	 */
364 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
365 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
366 };
367 
368 struct mlxsw_sp_nexthop_group_info;
369 struct mlxsw_sp_nexthop_group;
370 struct mlxsw_sp_fib_entry;
371 
372 struct mlxsw_sp_fib_node {
373 	struct mlxsw_sp_fib_entry *fib_entry;
374 	struct list_head list;
375 	struct rhash_head ht_node;
376 	struct mlxsw_sp_fib *fib;
377 	struct mlxsw_sp_fib_key key;
378 };
379 
380 struct mlxsw_sp_fib_entry_decap {
381 	struct mlxsw_sp_ipip_entry *ipip_entry;
382 	u32 tunnel_index;
383 };
384 
385 static struct mlxsw_sp_fib_entry_priv *
386 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
387 {
388 	struct mlxsw_sp_fib_entry_priv *priv;
389 
390 	if (!ll_ops->fib_entry_priv_size)
391 		/* No need to have priv */
392 		return NULL;
393 
394 	priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
395 	if (!priv)
396 		return ERR_PTR(-ENOMEM);
397 	refcount_set(&priv->refcnt, 1);
398 	return priv;
399 }
400 
401 static void
402 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
403 {
404 	kfree(priv);
405 }
406 
407 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
408 {
409 	refcount_inc(&priv->refcnt);
410 }
411 
412 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
413 {
414 	if (!priv || !refcount_dec_and_test(&priv->refcnt))
415 		return;
416 	mlxsw_sp_fib_entry_priv_destroy(priv);
417 }
418 
419 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
420 						struct mlxsw_sp_fib_entry_priv *priv)
421 {
422 	if (!priv)
423 		return;
424 	mlxsw_sp_fib_entry_priv_hold(priv);
425 	list_add(&priv->list, &op_ctx->fib_entry_priv_list);
426 }
427 
428 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
429 {
430 	struct mlxsw_sp_fib_entry_priv *priv, *tmp;
431 
432 	list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
433 		mlxsw_sp_fib_entry_priv_put(priv);
434 	INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
435 }
436 
437 struct mlxsw_sp_fib_entry {
438 	struct mlxsw_sp_fib_node *fib_node;
439 	enum mlxsw_sp_fib_entry_type type;
440 	struct list_head nexthop_group_node;
441 	struct mlxsw_sp_nexthop_group *nh_group;
442 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
443 	struct mlxsw_sp_fib_entry_priv *priv;
444 };
445 
446 struct mlxsw_sp_fib4_entry {
447 	struct mlxsw_sp_fib_entry common;
448 	struct fib_info *fi;
449 	u32 tb_id;
450 	u8 tos;
451 	u8 type;
452 };
453 
454 struct mlxsw_sp_fib6_entry {
455 	struct mlxsw_sp_fib_entry common;
456 	struct list_head rt6_list;
457 	unsigned int nrt6;
458 };
459 
460 struct mlxsw_sp_rt6 {
461 	struct list_head list;
462 	struct fib6_info *rt;
463 };
464 
465 struct mlxsw_sp_lpm_tree {
466 	u8 id; /* tree ID */
467 	unsigned int ref_count;
468 	enum mlxsw_sp_l3proto proto;
469 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
470 	struct mlxsw_sp_prefix_usage prefix_usage;
471 };
472 
473 struct mlxsw_sp_fib {
474 	struct rhashtable ht;
475 	struct list_head node_list;
476 	struct mlxsw_sp_vr *vr;
477 	struct mlxsw_sp_lpm_tree *lpm_tree;
478 	enum mlxsw_sp_l3proto proto;
479 	const struct mlxsw_sp_router_ll_ops *ll_ops;
480 };
481 
482 struct mlxsw_sp_vr {
483 	u16 id; /* virtual router ID */
484 	u32 tb_id; /* kernel fib table id */
485 	unsigned int rif_count;
486 	struct mlxsw_sp_fib *fib4;
487 	struct mlxsw_sp_fib *fib6;
488 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
489 	struct mlxsw_sp_rif *ul_rif;
490 	refcount_t ul_rif_refcnt;
491 };
492 
493 static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
494 					 enum mlxsw_sp_l3proto proto)
495 {
496 	return 0;
497 }
498 
499 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
500 {
501 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
502 			       xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
503 }
504 
505 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
506 {
507 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
508 			       xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
509 }
510 
511 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
512 {
513 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
514 			       xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
515 }
516 
517 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
518 
519 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
520 						struct mlxsw_sp_vr *vr,
521 						enum mlxsw_sp_l3proto proto)
522 {
523 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
524 	struct mlxsw_sp_lpm_tree *lpm_tree;
525 	struct mlxsw_sp_fib *fib;
526 	int err;
527 
528 	err = ll_ops->init(mlxsw_sp, vr->id, proto);
529 	if (err)
530 		return ERR_PTR(err);
531 
532 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
533 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
534 	if (!fib)
535 		return ERR_PTR(-ENOMEM);
536 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
537 	if (err)
538 		goto err_rhashtable_init;
539 	INIT_LIST_HEAD(&fib->node_list);
540 	fib->proto = proto;
541 	fib->vr = vr;
542 	fib->lpm_tree = lpm_tree;
543 	fib->ll_ops = ll_ops;
544 	mlxsw_sp_lpm_tree_hold(lpm_tree);
545 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
546 	if (err)
547 		goto err_lpm_tree_bind;
548 	return fib;
549 
550 err_lpm_tree_bind:
551 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
552 err_rhashtable_init:
553 	kfree(fib);
554 	return ERR_PTR(err);
555 }
556 
557 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
558 				 struct mlxsw_sp_fib *fib)
559 {
560 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
561 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
562 	WARN_ON(!list_empty(&fib->node_list));
563 	rhashtable_destroy(&fib->ht);
564 	kfree(fib);
565 }
566 
567 static struct mlxsw_sp_lpm_tree *
568 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
569 {
570 	static struct mlxsw_sp_lpm_tree *lpm_tree;
571 	int i;
572 
573 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
574 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
575 		if (lpm_tree->ref_count == 0)
576 			return lpm_tree;
577 	}
578 	return NULL;
579 }
580 
581 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
582 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
583 				   struct mlxsw_sp_lpm_tree *lpm_tree)
584 {
585 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
586 
587 	mlxsw_reg_xralta_pack(xralta_pl, true,
588 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
589 			      lpm_tree->id);
590 	return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
591 }
592 
593 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
594 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
595 				   struct mlxsw_sp_lpm_tree *lpm_tree)
596 {
597 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
598 
599 	mlxsw_reg_xralta_pack(xralta_pl, false,
600 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
601 			      lpm_tree->id);
602 	ll_ops->ralta_write(mlxsw_sp, xralta_pl);
603 }
604 
605 static int
606 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
607 				  const struct mlxsw_sp_router_ll_ops *ll_ops,
608 				  struct mlxsw_sp_prefix_usage *prefix_usage,
609 				  struct mlxsw_sp_lpm_tree *lpm_tree)
610 {
611 	char xralst_pl[MLXSW_REG_XRALST_LEN];
612 	u8 root_bin = 0;
613 	u8 prefix;
614 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
615 
616 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
617 		root_bin = prefix;
618 
619 	mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
620 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
621 		if (prefix == 0)
622 			continue;
623 		mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
624 					  MLXSW_REG_RALST_BIN_NO_CHILD);
625 		last_prefix = prefix;
626 	}
627 	return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
628 }
629 
630 static struct mlxsw_sp_lpm_tree *
631 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
632 			 const struct mlxsw_sp_router_ll_ops *ll_ops,
633 			 struct mlxsw_sp_prefix_usage *prefix_usage,
634 			 enum mlxsw_sp_l3proto proto)
635 {
636 	struct mlxsw_sp_lpm_tree *lpm_tree;
637 	int err;
638 
639 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
640 	if (!lpm_tree)
641 		return ERR_PTR(-EBUSY);
642 	lpm_tree->proto = proto;
643 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
644 	if (err)
645 		return ERR_PTR(err);
646 
647 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
648 	if (err)
649 		goto err_left_struct_set;
650 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
651 	       sizeof(lpm_tree->prefix_usage));
652 	memset(&lpm_tree->prefix_ref_count, 0,
653 	       sizeof(lpm_tree->prefix_ref_count));
654 	lpm_tree->ref_count = 1;
655 	return lpm_tree;
656 
657 err_left_struct_set:
658 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
659 	return ERR_PTR(err);
660 }
661 
662 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
663 				      const struct mlxsw_sp_router_ll_ops *ll_ops,
664 				      struct mlxsw_sp_lpm_tree *lpm_tree)
665 {
666 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
667 }
668 
669 static struct mlxsw_sp_lpm_tree *
670 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
671 		      struct mlxsw_sp_prefix_usage *prefix_usage,
672 		      enum mlxsw_sp_l3proto proto)
673 {
674 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
675 	struct mlxsw_sp_lpm_tree *lpm_tree;
676 	int i;
677 
678 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
679 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
680 		if (lpm_tree->ref_count != 0 &&
681 		    lpm_tree->proto == proto &&
682 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
683 					     prefix_usage)) {
684 			mlxsw_sp_lpm_tree_hold(lpm_tree);
685 			return lpm_tree;
686 		}
687 	}
688 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
689 }
690 
691 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
692 {
693 	lpm_tree->ref_count++;
694 }
695 
696 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
697 				  struct mlxsw_sp_lpm_tree *lpm_tree)
698 {
699 	const struct mlxsw_sp_router_ll_ops *ll_ops =
700 				mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
701 
702 	if (--lpm_tree->ref_count == 0)
703 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
704 }
705 
706 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
707 
708 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
709 {
710 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
711 	struct mlxsw_sp_lpm_tree *lpm_tree;
712 	u64 max_trees;
713 	int err, i;
714 
715 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
716 		return -EIO;
717 
718 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
719 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
720 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
721 					     sizeof(struct mlxsw_sp_lpm_tree),
722 					     GFP_KERNEL);
723 	if (!mlxsw_sp->router->lpm.trees)
724 		return -ENOMEM;
725 
726 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
727 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
728 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
729 	}
730 
731 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
732 					 MLXSW_SP_L3_PROTO_IPV4);
733 	if (IS_ERR(lpm_tree)) {
734 		err = PTR_ERR(lpm_tree);
735 		goto err_ipv4_tree_get;
736 	}
737 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
738 
739 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
740 					 MLXSW_SP_L3_PROTO_IPV6);
741 	if (IS_ERR(lpm_tree)) {
742 		err = PTR_ERR(lpm_tree);
743 		goto err_ipv6_tree_get;
744 	}
745 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
746 
747 	return 0;
748 
749 err_ipv6_tree_get:
750 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
751 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
752 err_ipv4_tree_get:
753 	kfree(mlxsw_sp->router->lpm.trees);
754 	return err;
755 }
756 
757 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
758 {
759 	struct mlxsw_sp_lpm_tree *lpm_tree;
760 
761 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
762 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
763 
764 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
765 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
766 
767 	kfree(mlxsw_sp->router->lpm.trees);
768 }
769 
770 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
771 {
772 	return !!vr->fib4 || !!vr->fib6 ||
773 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
774 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
775 }
776 
777 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
778 {
779 	struct mlxsw_sp_vr *vr;
780 	int i;
781 
782 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
783 		vr = &mlxsw_sp->router->vrs[i];
784 		if (!mlxsw_sp_vr_is_used(vr))
785 			return vr;
786 	}
787 	return NULL;
788 }
789 
790 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
791 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
792 {
793 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
794 
795 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
796 			      (enum mlxsw_reg_ralxx_protocol) fib->proto,
797 			      tree_id);
798 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
799 }
800 
801 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
802 				       const struct mlxsw_sp_fib *fib)
803 {
804 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
805 
806 	/* Bind to tree 0 which is default */
807 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
808 			      (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
809 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
810 }
811 
812 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
813 {
814 	/* For our purpose, squash main, default and local tables into one */
815 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
816 		tb_id = RT_TABLE_MAIN;
817 	return tb_id;
818 }
819 
820 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
821 					    u32 tb_id)
822 {
823 	struct mlxsw_sp_vr *vr;
824 	int i;
825 
826 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
827 
828 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
829 		vr = &mlxsw_sp->router->vrs[i];
830 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
831 			return vr;
832 	}
833 	return NULL;
834 }
835 
836 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
837 				u16 *vr_id)
838 {
839 	struct mlxsw_sp_vr *vr;
840 	int err = 0;
841 
842 	mutex_lock(&mlxsw_sp->router->lock);
843 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
844 	if (!vr) {
845 		err = -ESRCH;
846 		goto out;
847 	}
848 	*vr_id = vr->id;
849 out:
850 	mutex_unlock(&mlxsw_sp->router->lock);
851 	return err;
852 }
853 
854 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
855 					    enum mlxsw_sp_l3proto proto)
856 {
857 	switch (proto) {
858 	case MLXSW_SP_L3_PROTO_IPV4:
859 		return vr->fib4;
860 	case MLXSW_SP_L3_PROTO_IPV6:
861 		return vr->fib6;
862 	}
863 	return NULL;
864 }
865 
866 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
867 					      u32 tb_id,
868 					      struct netlink_ext_ack *extack)
869 {
870 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
871 	struct mlxsw_sp_fib *fib4;
872 	struct mlxsw_sp_fib *fib6;
873 	struct mlxsw_sp_vr *vr;
874 	int err;
875 
876 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
877 	if (!vr) {
878 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
879 		return ERR_PTR(-EBUSY);
880 	}
881 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
882 	if (IS_ERR(fib4))
883 		return ERR_CAST(fib4);
884 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
885 	if (IS_ERR(fib6)) {
886 		err = PTR_ERR(fib6);
887 		goto err_fib6_create;
888 	}
889 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
890 					     MLXSW_SP_L3_PROTO_IPV4);
891 	if (IS_ERR(mr4_table)) {
892 		err = PTR_ERR(mr4_table);
893 		goto err_mr4_table_create;
894 	}
895 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
896 					     MLXSW_SP_L3_PROTO_IPV6);
897 	if (IS_ERR(mr6_table)) {
898 		err = PTR_ERR(mr6_table);
899 		goto err_mr6_table_create;
900 	}
901 
902 	vr->fib4 = fib4;
903 	vr->fib6 = fib6;
904 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
905 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
906 	vr->tb_id = tb_id;
907 	return vr;
908 
909 err_mr6_table_create:
910 	mlxsw_sp_mr_table_destroy(mr4_table);
911 err_mr4_table_create:
912 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
913 err_fib6_create:
914 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
915 	return ERR_PTR(err);
916 }
917 
918 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
919 				struct mlxsw_sp_vr *vr)
920 {
921 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
922 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
923 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
924 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
925 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
926 	vr->fib6 = NULL;
927 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
928 	vr->fib4 = NULL;
929 }
930 
931 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
932 					   struct netlink_ext_ack *extack)
933 {
934 	struct mlxsw_sp_vr *vr;
935 
936 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
937 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
938 	if (!vr)
939 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
940 	return vr;
941 }
942 
943 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
944 {
945 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
946 	    list_empty(&vr->fib6->node_list) &&
947 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
948 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
949 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
950 }
951 
952 static bool
953 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
954 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
955 {
956 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
957 
958 	if (!mlxsw_sp_vr_is_used(vr))
959 		return false;
960 	if (fib->lpm_tree->id == tree_id)
961 		return true;
962 	return false;
963 }
964 
965 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
966 					struct mlxsw_sp_fib *fib,
967 					struct mlxsw_sp_lpm_tree *new_tree)
968 {
969 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
970 	int err;
971 
972 	fib->lpm_tree = new_tree;
973 	mlxsw_sp_lpm_tree_hold(new_tree);
974 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
975 	if (err)
976 		goto err_tree_bind;
977 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
978 	return 0;
979 
980 err_tree_bind:
981 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
982 	fib->lpm_tree = old_tree;
983 	return err;
984 }
985 
986 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
987 					 struct mlxsw_sp_fib *fib,
988 					 struct mlxsw_sp_lpm_tree *new_tree)
989 {
990 	enum mlxsw_sp_l3proto proto = fib->proto;
991 	struct mlxsw_sp_lpm_tree *old_tree;
992 	u8 old_id, new_id = new_tree->id;
993 	struct mlxsw_sp_vr *vr;
994 	int i, err;
995 
996 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
997 	old_id = old_tree->id;
998 
999 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
1000 		vr = &mlxsw_sp->router->vrs[i];
1001 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1002 			continue;
1003 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1004 						   mlxsw_sp_vr_fib(vr, proto),
1005 						   new_tree);
1006 		if (err)
1007 			goto err_tree_replace;
1008 	}
1009 
1010 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1011 	       sizeof(new_tree->prefix_ref_count));
1012 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1013 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1014 
1015 	return 0;
1016 
1017 err_tree_replace:
1018 	for (i--; i >= 0; i--) {
1019 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1020 			continue;
1021 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1022 					     mlxsw_sp_vr_fib(vr, proto),
1023 					     old_tree);
1024 	}
1025 	return err;
1026 }
1027 
1028 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1029 {
1030 	struct mlxsw_sp_vr *vr;
1031 	u64 max_vrs;
1032 	int i;
1033 
1034 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1035 		return -EIO;
1036 
1037 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1038 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1039 					GFP_KERNEL);
1040 	if (!mlxsw_sp->router->vrs)
1041 		return -ENOMEM;
1042 
1043 	for (i = 0; i < max_vrs; i++) {
1044 		vr = &mlxsw_sp->router->vrs[i];
1045 		vr->id = i;
1046 	}
1047 
1048 	return 0;
1049 }
1050 
1051 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1052 
1053 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1054 {
1055 	/* At this stage we're guaranteed not to have new incoming
1056 	 * FIB notifications and the work queue is free from FIBs
1057 	 * sitting on top of mlxsw netdevs. However, we can still
1058 	 * have other FIBs queued. Flush the queue before flushing
1059 	 * the device's tables. No need for locks, as we're the only
1060 	 * writer.
1061 	 */
1062 	mlxsw_core_flush_owq();
1063 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1064 	kfree(mlxsw_sp->router->vrs);
1065 }
1066 
1067 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1068 {
1069 	struct net_device *d;
1070 	u32 tb_id;
1071 
1072 	rcu_read_lock();
1073 	d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1074 	if (d)
1075 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1076 	else
1077 		tb_id = RT_TABLE_MAIN;
1078 	rcu_read_unlock();
1079 
1080 	return tb_id;
1081 }
1082 
1083 static struct mlxsw_sp_rif *
1084 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1085 		    const struct mlxsw_sp_rif_params *params,
1086 		    struct netlink_ext_ack *extack);
1087 
1088 static struct mlxsw_sp_rif_ipip_lb *
1089 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1090 				enum mlxsw_sp_ipip_type ipipt,
1091 				struct net_device *ol_dev,
1092 				struct netlink_ext_ack *extack)
1093 {
1094 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1095 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1096 	struct mlxsw_sp_rif *rif;
1097 
1098 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1099 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1100 		.common.dev = ol_dev,
1101 		.common.lag = false,
1102 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1103 	};
1104 
1105 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1106 	if (IS_ERR(rif))
1107 		return ERR_CAST(rif);
1108 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1109 }
1110 
1111 static struct mlxsw_sp_ipip_entry *
1112 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1113 			  enum mlxsw_sp_ipip_type ipipt,
1114 			  struct net_device *ol_dev)
1115 {
1116 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1117 	struct mlxsw_sp_ipip_entry *ipip_entry;
1118 	struct mlxsw_sp_ipip_entry *ret = NULL;
1119 	int err;
1120 
1121 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1122 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1123 	if (!ipip_entry)
1124 		return ERR_PTR(-ENOMEM);
1125 
1126 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1127 							    ol_dev, NULL);
1128 	if (IS_ERR(ipip_entry->ol_lb)) {
1129 		ret = ERR_CAST(ipip_entry->ol_lb);
1130 		goto err_ol_ipip_lb_create;
1131 	}
1132 
1133 	ipip_entry->ipipt = ipipt;
1134 	ipip_entry->ol_dev = ol_dev;
1135 	ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1136 
1137 	err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1138 	if (err) {
1139 		ret = ERR_PTR(err);
1140 		goto err_rem_ip_addr_set;
1141 	}
1142 
1143 	return ipip_entry;
1144 
1145 err_rem_ip_addr_set:
1146 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1147 err_ol_ipip_lb_create:
1148 	kfree(ipip_entry);
1149 	return ret;
1150 }
1151 
1152 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1153 					struct mlxsw_sp_ipip_entry *ipip_entry)
1154 {
1155 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1156 		mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1157 
1158 	ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1159 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1160 	kfree(ipip_entry);
1161 }
1162 
1163 static bool
1164 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1165 				  const enum mlxsw_sp_l3proto ul_proto,
1166 				  union mlxsw_sp_l3addr saddr,
1167 				  u32 ul_tb_id,
1168 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1169 {
1170 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1171 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1172 	union mlxsw_sp_l3addr tun_saddr;
1173 
1174 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1175 		return false;
1176 
1177 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1178 	return tun_ul_tb_id == ul_tb_id &&
1179 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1180 }
1181 
1182 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1183 						 enum mlxsw_sp_ipip_type ipipt)
1184 {
1185 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1186 
1187 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1188 
1189 	/* Not all tunnels require to increase the default pasing depth
1190 	 * (96 bytes).
1191 	 */
1192 	if (ipip_ops->inc_parsing_depth)
1193 		return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1194 
1195 	return 0;
1196 }
1197 
1198 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1199 						  enum mlxsw_sp_ipip_type ipipt)
1200 {
1201 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1202 		mlxsw_sp->router->ipip_ops_arr[ipipt];
1203 
1204 	if (ipip_ops->inc_parsing_depth)
1205 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1206 }
1207 
1208 static int
1209 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1210 			      struct mlxsw_sp_fib_entry *fib_entry,
1211 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1212 {
1213 	u32 tunnel_index;
1214 	int err;
1215 
1216 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1217 				  1, &tunnel_index);
1218 	if (err)
1219 		return err;
1220 
1221 	err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1222 						    ipip_entry->ipipt);
1223 	if (err)
1224 		goto err_parsing_depth_inc;
1225 
1226 	ipip_entry->decap_fib_entry = fib_entry;
1227 	fib_entry->decap.ipip_entry = ipip_entry;
1228 	fib_entry->decap.tunnel_index = tunnel_index;
1229 
1230 	return 0;
1231 
1232 err_parsing_depth_inc:
1233 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1234 			   fib_entry->decap.tunnel_index);
1235 	return err;
1236 }
1237 
1238 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1239 					  struct mlxsw_sp_fib_entry *fib_entry)
1240 {
1241 	enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1242 
1243 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1244 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1245 	fib_entry->decap.ipip_entry = NULL;
1246 	mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1247 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1248 			   1, fib_entry->decap.tunnel_index);
1249 }
1250 
1251 static struct mlxsw_sp_fib_node *
1252 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1253 			 size_t addr_len, unsigned char prefix_len);
1254 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1255 				     struct mlxsw_sp_fib_entry *fib_entry);
1256 
1257 static void
1258 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1259 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1260 {
1261 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1262 
1263 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1264 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1265 
1266 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1267 }
1268 
1269 static void
1270 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1271 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1272 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1273 {
1274 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1275 					  ipip_entry))
1276 		return;
1277 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1278 
1279 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1280 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1281 }
1282 
1283 static struct mlxsw_sp_fib_entry *
1284 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1285 				     enum mlxsw_sp_l3proto proto,
1286 				     const union mlxsw_sp_l3addr *addr,
1287 				     enum mlxsw_sp_fib_entry_type type)
1288 {
1289 	struct mlxsw_sp_fib_node *fib_node;
1290 	unsigned char addr_prefix_len;
1291 	struct mlxsw_sp_fib *fib;
1292 	struct mlxsw_sp_vr *vr;
1293 	const void *addrp;
1294 	size_t addr_len;
1295 	u32 addr4;
1296 
1297 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1298 	if (!vr)
1299 		return NULL;
1300 	fib = mlxsw_sp_vr_fib(vr, proto);
1301 
1302 	switch (proto) {
1303 	case MLXSW_SP_L3_PROTO_IPV4:
1304 		addr4 = be32_to_cpu(addr->addr4);
1305 		addrp = &addr4;
1306 		addr_len = 4;
1307 		addr_prefix_len = 32;
1308 		break;
1309 	case MLXSW_SP_L3_PROTO_IPV6:
1310 		addrp = &addr->addr6;
1311 		addr_len = 16;
1312 		addr_prefix_len = 128;
1313 		break;
1314 	default:
1315 		WARN_ON(1);
1316 		return NULL;
1317 	}
1318 
1319 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1320 					    addr_prefix_len);
1321 	if (!fib_node || fib_node->fib_entry->type != type)
1322 		return NULL;
1323 
1324 	return fib_node->fib_entry;
1325 }
1326 
1327 /* Given an IPIP entry, find the corresponding decap route. */
1328 static struct mlxsw_sp_fib_entry *
1329 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1330 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1331 {
1332 	static struct mlxsw_sp_fib_node *fib_node;
1333 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1334 	unsigned char saddr_prefix_len;
1335 	union mlxsw_sp_l3addr saddr;
1336 	struct mlxsw_sp_fib *ul_fib;
1337 	struct mlxsw_sp_vr *ul_vr;
1338 	const void *saddrp;
1339 	size_t saddr_len;
1340 	u32 ul_tb_id;
1341 	u32 saddr4;
1342 
1343 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1344 
1345 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1346 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1347 	if (!ul_vr)
1348 		return NULL;
1349 
1350 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1351 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1352 					   ipip_entry->ol_dev);
1353 
1354 	switch (ipip_ops->ul_proto) {
1355 	case MLXSW_SP_L3_PROTO_IPV4:
1356 		saddr4 = be32_to_cpu(saddr.addr4);
1357 		saddrp = &saddr4;
1358 		saddr_len = 4;
1359 		saddr_prefix_len = 32;
1360 		break;
1361 	case MLXSW_SP_L3_PROTO_IPV6:
1362 		saddrp = &saddr.addr6;
1363 		saddr_len = 16;
1364 		saddr_prefix_len = 128;
1365 		break;
1366 	default:
1367 		WARN_ON(1);
1368 		return NULL;
1369 	}
1370 
1371 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1372 					    saddr_prefix_len);
1373 	if (!fib_node ||
1374 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1375 		return NULL;
1376 
1377 	return fib_node->fib_entry;
1378 }
1379 
1380 static struct mlxsw_sp_ipip_entry *
1381 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1382 			   enum mlxsw_sp_ipip_type ipipt,
1383 			   struct net_device *ol_dev)
1384 {
1385 	struct mlxsw_sp_ipip_entry *ipip_entry;
1386 
1387 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1388 	if (IS_ERR(ipip_entry))
1389 		return ipip_entry;
1390 
1391 	list_add_tail(&ipip_entry->ipip_list_node,
1392 		      &mlxsw_sp->router->ipip_list);
1393 
1394 	return ipip_entry;
1395 }
1396 
1397 static void
1398 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1399 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1400 {
1401 	list_del(&ipip_entry->ipip_list_node);
1402 	mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1403 }
1404 
1405 static bool
1406 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1407 				  const struct net_device *ul_dev,
1408 				  enum mlxsw_sp_l3proto ul_proto,
1409 				  union mlxsw_sp_l3addr ul_dip,
1410 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1411 {
1412 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1413 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1414 
1415 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1416 		return false;
1417 
1418 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1419 						 ul_tb_id, ipip_entry);
1420 }
1421 
1422 /* Given decap parameters, find the corresponding IPIP entry. */
1423 static struct mlxsw_sp_ipip_entry *
1424 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1425 				  enum mlxsw_sp_l3proto ul_proto,
1426 				  union mlxsw_sp_l3addr ul_dip)
1427 {
1428 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1429 	struct net_device *ul_dev;
1430 
1431 	rcu_read_lock();
1432 
1433 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1434 	if (!ul_dev)
1435 		goto out_unlock;
1436 
1437 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1438 			    ipip_list_node)
1439 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1440 						      ul_proto, ul_dip,
1441 						      ipip_entry))
1442 			goto out_unlock;
1443 
1444 	rcu_read_unlock();
1445 
1446 	return NULL;
1447 
1448 out_unlock:
1449 	rcu_read_unlock();
1450 	return ipip_entry;
1451 }
1452 
1453 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1454 				      const struct net_device *dev,
1455 				      enum mlxsw_sp_ipip_type *p_type)
1456 {
1457 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1458 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1459 	enum mlxsw_sp_ipip_type ipipt;
1460 
1461 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1462 		ipip_ops = router->ipip_ops_arr[ipipt];
1463 		if (dev->type == ipip_ops->dev_type) {
1464 			if (p_type)
1465 				*p_type = ipipt;
1466 			return true;
1467 		}
1468 	}
1469 	return false;
1470 }
1471 
1472 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1473 				const struct net_device *dev)
1474 {
1475 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1476 }
1477 
1478 static struct mlxsw_sp_ipip_entry *
1479 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1480 				   const struct net_device *ol_dev)
1481 {
1482 	struct mlxsw_sp_ipip_entry *ipip_entry;
1483 
1484 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1485 			    ipip_list_node)
1486 		if (ipip_entry->ol_dev == ol_dev)
1487 			return ipip_entry;
1488 
1489 	return NULL;
1490 }
1491 
1492 static struct mlxsw_sp_ipip_entry *
1493 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1494 				   const struct net_device *ul_dev,
1495 				   struct mlxsw_sp_ipip_entry *start)
1496 {
1497 	struct mlxsw_sp_ipip_entry *ipip_entry;
1498 
1499 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1500 					ipip_list_node);
1501 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1502 				     ipip_list_node) {
1503 		struct net_device *ol_dev = ipip_entry->ol_dev;
1504 		struct net_device *ipip_ul_dev;
1505 
1506 		rcu_read_lock();
1507 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1508 		rcu_read_unlock();
1509 
1510 		if (ipip_ul_dev == ul_dev)
1511 			return ipip_entry;
1512 	}
1513 
1514 	return NULL;
1515 }
1516 
1517 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1518 				const struct net_device *dev)
1519 {
1520 	bool is_ipip_ul;
1521 
1522 	mutex_lock(&mlxsw_sp->router->lock);
1523 	is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1524 	mutex_unlock(&mlxsw_sp->router->lock);
1525 
1526 	return is_ipip_ul;
1527 }
1528 
1529 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1530 						const struct net_device *ol_dev,
1531 						enum mlxsw_sp_ipip_type ipipt)
1532 {
1533 	const struct mlxsw_sp_ipip_ops *ops
1534 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1535 
1536 	return ops->can_offload(mlxsw_sp, ol_dev);
1537 }
1538 
1539 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1540 						struct net_device *ol_dev)
1541 {
1542 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1543 	struct mlxsw_sp_ipip_entry *ipip_entry;
1544 	enum mlxsw_sp_l3proto ul_proto;
1545 	union mlxsw_sp_l3addr saddr;
1546 	u32 ul_tb_id;
1547 
1548 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1549 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1550 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1551 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1552 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1553 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1554 							  saddr, ul_tb_id,
1555 							  NULL)) {
1556 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1557 								ol_dev);
1558 			if (IS_ERR(ipip_entry))
1559 				return PTR_ERR(ipip_entry);
1560 		}
1561 	}
1562 
1563 	return 0;
1564 }
1565 
1566 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1567 						   struct net_device *ol_dev)
1568 {
1569 	struct mlxsw_sp_ipip_entry *ipip_entry;
1570 
1571 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1572 	if (ipip_entry)
1573 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1574 }
1575 
1576 static void
1577 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1578 				struct mlxsw_sp_ipip_entry *ipip_entry)
1579 {
1580 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1581 
1582 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1583 	if (decap_fib_entry)
1584 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1585 						  decap_fib_entry);
1586 }
1587 
1588 static int
1589 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1590 			u16 ul_rif_id, bool enable)
1591 {
1592 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1593 	enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1594 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1595 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1596 	char ritr_pl[MLXSW_REG_RITR_LEN];
1597 	struct in6_addr *saddr6;
1598 	u32 saddr4;
1599 
1600 	ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1601 	switch (lb_cf.ul_protocol) {
1602 	case MLXSW_SP_L3_PROTO_IPV4:
1603 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1604 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1605 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1606 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1607 						   ipip_options, ul_vr_id,
1608 						   ul_rif_id, saddr4,
1609 						   lb_cf.okey);
1610 		break;
1611 
1612 	case MLXSW_SP_L3_PROTO_IPV6:
1613 		saddr6 = &lb_cf.saddr.addr6;
1614 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1615 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1616 		mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1617 						   ipip_options, ul_vr_id,
1618 						   ul_rif_id, saddr6,
1619 						   lb_cf.okey);
1620 		break;
1621 	}
1622 
1623 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1624 }
1625 
1626 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1627 						 struct net_device *ol_dev)
1628 {
1629 	struct mlxsw_sp_ipip_entry *ipip_entry;
1630 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1631 	int err = 0;
1632 
1633 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1634 	if (ipip_entry) {
1635 		lb_rif = ipip_entry->ol_lb;
1636 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1637 					      lb_rif->ul_rif_id, true);
1638 		if (err)
1639 			goto out;
1640 		lb_rif->common.mtu = ol_dev->mtu;
1641 	}
1642 
1643 out:
1644 	return err;
1645 }
1646 
1647 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1648 						struct net_device *ol_dev)
1649 {
1650 	struct mlxsw_sp_ipip_entry *ipip_entry;
1651 
1652 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1653 	if (ipip_entry)
1654 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1655 }
1656 
1657 static void
1658 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1659 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1660 {
1661 	if (ipip_entry->decap_fib_entry)
1662 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1663 }
1664 
1665 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1666 						  struct net_device *ol_dev)
1667 {
1668 	struct mlxsw_sp_ipip_entry *ipip_entry;
1669 
1670 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1671 	if (ipip_entry)
1672 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1673 }
1674 
1675 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1676 					 struct mlxsw_sp_rif *old_rif,
1677 					 struct mlxsw_sp_rif *new_rif);
1678 static int
1679 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1680 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1681 				 bool keep_encap,
1682 				 struct netlink_ext_ack *extack)
1683 {
1684 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1685 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1686 
1687 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1688 						     ipip_entry->ipipt,
1689 						     ipip_entry->ol_dev,
1690 						     extack);
1691 	if (IS_ERR(new_lb_rif))
1692 		return PTR_ERR(new_lb_rif);
1693 	ipip_entry->ol_lb = new_lb_rif;
1694 
1695 	if (keep_encap)
1696 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1697 					     &new_lb_rif->common);
1698 
1699 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1700 
1701 	return 0;
1702 }
1703 
1704 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1705 					struct mlxsw_sp_rif *rif);
1706 
1707 /**
1708  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1709  * @mlxsw_sp: mlxsw_sp.
1710  * @ipip_entry: IPIP entry.
1711  * @recreate_loopback: Recreates the associated loopback RIF.
1712  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1713  *              relevant when recreate_loopback is true.
1714  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1715  *                   is only relevant when recreate_loopback is false.
1716  * @extack: extack.
1717  *
1718  * Return: Non-zero value on failure.
1719  */
1720 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1721 					struct mlxsw_sp_ipip_entry *ipip_entry,
1722 					bool recreate_loopback,
1723 					bool keep_encap,
1724 					bool update_nexthops,
1725 					struct netlink_ext_ack *extack)
1726 {
1727 	int err;
1728 
1729 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1730 	 * recreate it. That creates a window of opportunity where RALUE and
1731 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1732 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1733 	 * of RALUE, demote the decap route back.
1734 	 */
1735 	if (ipip_entry->decap_fib_entry)
1736 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1737 
1738 	if (recreate_loopback) {
1739 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1740 						       keep_encap, extack);
1741 		if (err)
1742 			return err;
1743 	} else if (update_nexthops) {
1744 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1745 					    &ipip_entry->ol_lb->common);
1746 	}
1747 
1748 	if (ipip_entry->ol_dev->flags & IFF_UP)
1749 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1750 
1751 	return 0;
1752 }
1753 
1754 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1755 						struct net_device *ol_dev,
1756 						struct netlink_ext_ack *extack)
1757 {
1758 	struct mlxsw_sp_ipip_entry *ipip_entry =
1759 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1760 
1761 	if (!ipip_entry)
1762 		return 0;
1763 
1764 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1765 						   true, false, false, extack);
1766 }
1767 
1768 static int
1769 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1770 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1771 				     struct net_device *ul_dev,
1772 				     bool *demote_this,
1773 				     struct netlink_ext_ack *extack)
1774 {
1775 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1776 	enum mlxsw_sp_l3proto ul_proto;
1777 	union mlxsw_sp_l3addr saddr;
1778 
1779 	/* Moving underlay to a different VRF might cause local address
1780 	 * conflict, and the conflicting tunnels need to be demoted.
1781 	 */
1782 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1783 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1784 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1785 						 saddr, ul_tb_id,
1786 						 ipip_entry)) {
1787 		*demote_this = true;
1788 		return 0;
1789 	}
1790 
1791 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1792 						   true, true, false, extack);
1793 }
1794 
1795 static int
1796 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1797 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1798 				    struct net_device *ul_dev)
1799 {
1800 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1801 						   false, false, true, NULL);
1802 }
1803 
1804 static int
1805 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1806 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1807 				      struct net_device *ul_dev)
1808 {
1809 	/* A down underlay device causes encapsulated packets to not be
1810 	 * forwarded, but decap still works. So refresh next hops without
1811 	 * touching anything else.
1812 	 */
1813 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1814 						   false, false, true, NULL);
1815 }
1816 
1817 static int
1818 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1819 					struct net_device *ol_dev,
1820 					struct netlink_ext_ack *extack)
1821 {
1822 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1823 	struct mlxsw_sp_ipip_entry *ipip_entry;
1824 	int err;
1825 
1826 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1827 	if (!ipip_entry)
1828 		/* A change might make a tunnel eligible for offloading, but
1829 		 * that is currently not implemented. What falls to slow path
1830 		 * stays there.
1831 		 */
1832 		return 0;
1833 
1834 	/* A change might make a tunnel not eligible for offloading. */
1835 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1836 						 ipip_entry->ipipt)) {
1837 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1838 		return 0;
1839 	}
1840 
1841 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1842 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1843 	return err;
1844 }
1845 
1846 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1847 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1848 {
1849 	struct net_device *ol_dev = ipip_entry->ol_dev;
1850 
1851 	if (ol_dev->flags & IFF_UP)
1852 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1853 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1854 }
1855 
1856 /* The configuration where several tunnels have the same local address in the
1857  * same underlay table needs special treatment in the HW. That is currently not
1858  * implemented in the driver. This function finds and demotes the first tunnel
1859  * with a given source address, except the one passed in in the argument
1860  * `except'.
1861  */
1862 bool
1863 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1864 				     enum mlxsw_sp_l3proto ul_proto,
1865 				     union mlxsw_sp_l3addr saddr,
1866 				     u32 ul_tb_id,
1867 				     const struct mlxsw_sp_ipip_entry *except)
1868 {
1869 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1870 
1871 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1872 				 ipip_list_node) {
1873 		if (ipip_entry != except &&
1874 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1875 						      ul_tb_id, ipip_entry)) {
1876 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1877 			return true;
1878 		}
1879 	}
1880 
1881 	return false;
1882 }
1883 
1884 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1885 						     struct net_device *ul_dev)
1886 {
1887 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1888 
1889 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1890 				 ipip_list_node) {
1891 		struct net_device *ol_dev = ipip_entry->ol_dev;
1892 		struct net_device *ipip_ul_dev;
1893 
1894 		rcu_read_lock();
1895 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1896 		rcu_read_unlock();
1897 		if (ipip_ul_dev == ul_dev)
1898 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1899 	}
1900 }
1901 
1902 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1903 				     struct net_device *ol_dev,
1904 				     unsigned long event,
1905 				     struct netdev_notifier_info *info)
1906 {
1907 	struct netdev_notifier_changeupper_info *chup;
1908 	struct netlink_ext_ack *extack;
1909 	int err = 0;
1910 
1911 	mutex_lock(&mlxsw_sp->router->lock);
1912 	switch (event) {
1913 	case NETDEV_REGISTER:
1914 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1915 		break;
1916 	case NETDEV_UNREGISTER:
1917 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1918 		break;
1919 	case NETDEV_UP:
1920 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1921 		break;
1922 	case NETDEV_DOWN:
1923 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1924 		break;
1925 	case NETDEV_CHANGEUPPER:
1926 		chup = container_of(info, typeof(*chup), info);
1927 		extack = info->extack;
1928 		if (netif_is_l3_master(chup->upper_dev))
1929 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1930 								   ol_dev,
1931 								   extack);
1932 		break;
1933 	case NETDEV_CHANGE:
1934 		extack = info->extack;
1935 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1936 							      ol_dev, extack);
1937 		break;
1938 	case NETDEV_CHANGEMTU:
1939 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1940 		break;
1941 	}
1942 	mutex_unlock(&mlxsw_sp->router->lock);
1943 	return err;
1944 }
1945 
1946 static int
1947 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1948 				   struct mlxsw_sp_ipip_entry *ipip_entry,
1949 				   struct net_device *ul_dev,
1950 				   bool *demote_this,
1951 				   unsigned long event,
1952 				   struct netdev_notifier_info *info)
1953 {
1954 	struct netdev_notifier_changeupper_info *chup;
1955 	struct netlink_ext_ack *extack;
1956 
1957 	switch (event) {
1958 	case NETDEV_CHANGEUPPER:
1959 		chup = container_of(info, typeof(*chup), info);
1960 		extack = info->extack;
1961 		if (netif_is_l3_master(chup->upper_dev))
1962 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1963 								    ipip_entry,
1964 								    ul_dev,
1965 								    demote_this,
1966 								    extack);
1967 		break;
1968 
1969 	case NETDEV_UP:
1970 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1971 							   ul_dev);
1972 	case NETDEV_DOWN:
1973 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1974 							     ipip_entry,
1975 							     ul_dev);
1976 	}
1977 	return 0;
1978 }
1979 
1980 int
1981 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1982 				 struct net_device *ul_dev,
1983 				 unsigned long event,
1984 				 struct netdev_notifier_info *info)
1985 {
1986 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1987 	int err = 0;
1988 
1989 	mutex_lock(&mlxsw_sp->router->lock);
1990 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1991 								ul_dev,
1992 								ipip_entry))) {
1993 		struct mlxsw_sp_ipip_entry *prev;
1994 		bool demote_this = false;
1995 
1996 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1997 							 ul_dev, &demote_this,
1998 							 event, info);
1999 		if (err) {
2000 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2001 								 ul_dev);
2002 			break;
2003 		}
2004 
2005 		if (demote_this) {
2006 			if (list_is_first(&ipip_entry->ipip_list_node,
2007 					  &mlxsw_sp->router->ipip_list))
2008 				prev = NULL;
2009 			else
2010 				/* This can't be cached from previous iteration,
2011 				 * because that entry could be gone now.
2012 				 */
2013 				prev = list_prev_entry(ipip_entry,
2014 						       ipip_list_node);
2015 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2016 			ipip_entry = prev;
2017 		}
2018 	}
2019 	mutex_unlock(&mlxsw_sp->router->lock);
2020 
2021 	return err;
2022 }
2023 
2024 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2025 				      enum mlxsw_sp_l3proto ul_proto,
2026 				      const union mlxsw_sp_l3addr *ul_sip,
2027 				      u32 tunnel_index)
2028 {
2029 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2030 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2031 	struct mlxsw_sp_fib_entry *fib_entry;
2032 	int err = 0;
2033 
2034 	mutex_lock(&mlxsw_sp->router->lock);
2035 
2036 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2037 		err = -EINVAL;
2038 		goto out;
2039 	}
2040 
2041 	router->nve_decap_config.ul_tb_id = ul_tb_id;
2042 	router->nve_decap_config.tunnel_index = tunnel_index;
2043 	router->nve_decap_config.ul_proto = ul_proto;
2044 	router->nve_decap_config.ul_sip = *ul_sip;
2045 	router->nve_decap_config.valid = true;
2046 
2047 	/* It is valid to create a tunnel with a local IP and only later
2048 	 * assign this IP address to a local interface
2049 	 */
2050 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2051 							 ul_proto, ul_sip,
2052 							 type);
2053 	if (!fib_entry)
2054 		goto out;
2055 
2056 	fib_entry->decap.tunnel_index = tunnel_index;
2057 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2058 
2059 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2060 	if (err)
2061 		goto err_fib_entry_update;
2062 
2063 	goto out;
2064 
2065 err_fib_entry_update:
2066 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2067 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2068 out:
2069 	mutex_unlock(&mlxsw_sp->router->lock);
2070 	return err;
2071 }
2072 
2073 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2074 				      enum mlxsw_sp_l3proto ul_proto,
2075 				      const union mlxsw_sp_l3addr *ul_sip)
2076 {
2077 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2078 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2079 	struct mlxsw_sp_fib_entry *fib_entry;
2080 
2081 	mutex_lock(&mlxsw_sp->router->lock);
2082 
2083 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2084 		goto out;
2085 
2086 	router->nve_decap_config.valid = false;
2087 
2088 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2089 							 ul_proto, ul_sip,
2090 							 type);
2091 	if (!fib_entry)
2092 		goto out;
2093 
2094 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2095 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2096 out:
2097 	mutex_unlock(&mlxsw_sp->router->lock);
2098 }
2099 
2100 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2101 					 u32 ul_tb_id,
2102 					 enum mlxsw_sp_l3proto ul_proto,
2103 					 const union mlxsw_sp_l3addr *ul_sip)
2104 {
2105 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2106 
2107 	return router->nve_decap_config.valid &&
2108 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2109 	       router->nve_decap_config.ul_proto == ul_proto &&
2110 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2111 		       sizeof(*ul_sip));
2112 }
2113 
2114 struct mlxsw_sp_neigh_key {
2115 	struct neighbour *n;
2116 };
2117 
2118 struct mlxsw_sp_neigh_entry {
2119 	struct list_head rif_list_node;
2120 	struct rhash_head ht_node;
2121 	struct mlxsw_sp_neigh_key key;
2122 	u16 rif;
2123 	bool connected;
2124 	unsigned char ha[ETH_ALEN];
2125 	struct list_head nexthop_list; /* list of nexthops using
2126 					* this neigh entry
2127 					*/
2128 	struct list_head nexthop_neighs_list_node;
2129 	unsigned int counter_index;
2130 	bool counter_valid;
2131 };
2132 
2133 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2134 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2135 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2136 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2137 };
2138 
2139 struct mlxsw_sp_neigh_entry *
2140 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2141 			struct mlxsw_sp_neigh_entry *neigh_entry)
2142 {
2143 	if (!neigh_entry) {
2144 		if (list_empty(&rif->neigh_list))
2145 			return NULL;
2146 		else
2147 			return list_first_entry(&rif->neigh_list,
2148 						typeof(*neigh_entry),
2149 						rif_list_node);
2150 	}
2151 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2152 		return NULL;
2153 	return list_next_entry(neigh_entry, rif_list_node);
2154 }
2155 
2156 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2157 {
2158 	return neigh_entry->key.n->tbl->family;
2159 }
2160 
2161 unsigned char *
2162 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2163 {
2164 	return neigh_entry->ha;
2165 }
2166 
2167 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2168 {
2169 	struct neighbour *n;
2170 
2171 	n = neigh_entry->key.n;
2172 	return ntohl(*((__be32 *) n->primary_key));
2173 }
2174 
2175 struct in6_addr *
2176 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2177 {
2178 	struct neighbour *n;
2179 
2180 	n = neigh_entry->key.n;
2181 	return (struct in6_addr *) &n->primary_key;
2182 }
2183 
2184 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2185 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2186 			       u64 *p_counter)
2187 {
2188 	if (!neigh_entry->counter_valid)
2189 		return -EINVAL;
2190 
2191 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2192 					 p_counter, NULL);
2193 }
2194 
2195 static struct mlxsw_sp_neigh_entry *
2196 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2197 			   u16 rif)
2198 {
2199 	struct mlxsw_sp_neigh_entry *neigh_entry;
2200 
2201 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2202 	if (!neigh_entry)
2203 		return NULL;
2204 
2205 	neigh_entry->key.n = n;
2206 	neigh_entry->rif = rif;
2207 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2208 
2209 	return neigh_entry;
2210 }
2211 
2212 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2213 {
2214 	kfree(neigh_entry);
2215 }
2216 
2217 static int
2218 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2219 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2220 {
2221 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2222 				      &neigh_entry->ht_node,
2223 				      mlxsw_sp_neigh_ht_params);
2224 }
2225 
2226 static void
2227 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2228 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2229 {
2230 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2231 			       &neigh_entry->ht_node,
2232 			       mlxsw_sp_neigh_ht_params);
2233 }
2234 
2235 static bool
2236 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2237 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2238 {
2239 	struct devlink *devlink;
2240 	const char *table_name;
2241 
2242 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2243 	case AF_INET:
2244 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2245 		break;
2246 	case AF_INET6:
2247 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2248 		break;
2249 	default:
2250 		WARN_ON(1);
2251 		return false;
2252 	}
2253 
2254 	devlink = priv_to_devlink(mlxsw_sp->core);
2255 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2256 }
2257 
2258 static void
2259 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2260 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2261 {
2262 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2263 		return;
2264 
2265 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2266 		return;
2267 
2268 	neigh_entry->counter_valid = true;
2269 }
2270 
2271 static void
2272 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2273 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2274 {
2275 	if (!neigh_entry->counter_valid)
2276 		return;
2277 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2278 				   neigh_entry->counter_index);
2279 	neigh_entry->counter_valid = false;
2280 }
2281 
2282 static struct mlxsw_sp_neigh_entry *
2283 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2284 {
2285 	struct mlxsw_sp_neigh_entry *neigh_entry;
2286 	struct mlxsw_sp_rif *rif;
2287 	int err;
2288 
2289 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2290 	if (!rif)
2291 		return ERR_PTR(-EINVAL);
2292 
2293 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2294 	if (!neigh_entry)
2295 		return ERR_PTR(-ENOMEM);
2296 
2297 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2298 	if (err)
2299 		goto err_neigh_entry_insert;
2300 
2301 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2302 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2303 
2304 	return neigh_entry;
2305 
2306 err_neigh_entry_insert:
2307 	mlxsw_sp_neigh_entry_free(neigh_entry);
2308 	return ERR_PTR(err);
2309 }
2310 
2311 static void
2312 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2313 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2314 {
2315 	list_del(&neigh_entry->rif_list_node);
2316 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2317 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2318 	mlxsw_sp_neigh_entry_free(neigh_entry);
2319 }
2320 
2321 static struct mlxsw_sp_neigh_entry *
2322 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2323 {
2324 	struct mlxsw_sp_neigh_key key;
2325 
2326 	key.n = n;
2327 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2328 				      &key, mlxsw_sp_neigh_ht_params);
2329 }
2330 
2331 static void
2332 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2333 {
2334 	unsigned long interval;
2335 
2336 #if IS_ENABLED(CONFIG_IPV6)
2337 	interval = min_t(unsigned long,
2338 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2339 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2340 #else
2341 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2342 #endif
2343 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2344 }
2345 
2346 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2347 						   char *rauhtd_pl,
2348 						   int ent_index)
2349 {
2350 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2351 	struct net_device *dev;
2352 	struct neighbour *n;
2353 	__be32 dipn;
2354 	u32 dip;
2355 	u16 rif;
2356 
2357 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2358 
2359 	if (WARN_ON_ONCE(rif >= max_rifs))
2360 		return;
2361 	if (!mlxsw_sp->router->rifs[rif]) {
2362 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2363 		return;
2364 	}
2365 
2366 	dipn = htonl(dip);
2367 	dev = mlxsw_sp->router->rifs[rif]->dev;
2368 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2369 	if (!n)
2370 		return;
2371 
2372 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2373 	neigh_event_send(n, NULL);
2374 	neigh_release(n);
2375 }
2376 
2377 #if IS_ENABLED(CONFIG_IPV6)
2378 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2379 						   char *rauhtd_pl,
2380 						   int rec_index)
2381 {
2382 	struct net_device *dev;
2383 	struct neighbour *n;
2384 	struct in6_addr dip;
2385 	u16 rif;
2386 
2387 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2388 					 (char *) &dip);
2389 
2390 	if (!mlxsw_sp->router->rifs[rif]) {
2391 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2392 		return;
2393 	}
2394 
2395 	dev = mlxsw_sp->router->rifs[rif]->dev;
2396 	n = neigh_lookup(&nd_tbl, &dip, dev);
2397 	if (!n)
2398 		return;
2399 
2400 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2401 	neigh_event_send(n, NULL);
2402 	neigh_release(n);
2403 }
2404 #else
2405 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2406 						   char *rauhtd_pl,
2407 						   int rec_index)
2408 {
2409 }
2410 #endif
2411 
2412 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2413 						   char *rauhtd_pl,
2414 						   int rec_index)
2415 {
2416 	u8 num_entries;
2417 	int i;
2418 
2419 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2420 								rec_index);
2421 	/* Hardware starts counting at 0, so add 1. */
2422 	num_entries++;
2423 
2424 	/* Each record consists of several neighbour entries. */
2425 	for (i = 0; i < num_entries; i++) {
2426 		int ent_index;
2427 
2428 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2429 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2430 						       ent_index);
2431 	}
2432 
2433 }
2434 
2435 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2436 						   char *rauhtd_pl,
2437 						   int rec_index)
2438 {
2439 	/* One record contains one entry. */
2440 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2441 					       rec_index);
2442 }
2443 
2444 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2445 					      char *rauhtd_pl, int rec_index)
2446 {
2447 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2448 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2449 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2450 						       rec_index);
2451 		break;
2452 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2453 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2454 						       rec_index);
2455 		break;
2456 	}
2457 }
2458 
2459 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2460 {
2461 	u8 num_rec, last_rec_index, num_entries;
2462 
2463 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2464 	last_rec_index = num_rec - 1;
2465 
2466 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2467 		return false;
2468 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2469 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2470 		return true;
2471 
2472 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2473 								last_rec_index);
2474 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2475 		return true;
2476 	return false;
2477 }
2478 
2479 static int
2480 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2481 				       char *rauhtd_pl,
2482 				       enum mlxsw_reg_rauhtd_type type)
2483 {
2484 	int i, num_rec;
2485 	int err;
2486 
2487 	/* Ensure the RIF we read from the device does not change mid-dump. */
2488 	mutex_lock(&mlxsw_sp->router->lock);
2489 	do {
2490 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2491 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2492 				      rauhtd_pl);
2493 		if (err) {
2494 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2495 			break;
2496 		}
2497 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2498 		for (i = 0; i < num_rec; i++)
2499 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2500 							  i);
2501 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2502 	mutex_unlock(&mlxsw_sp->router->lock);
2503 
2504 	return err;
2505 }
2506 
2507 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2508 {
2509 	enum mlxsw_reg_rauhtd_type type;
2510 	char *rauhtd_pl;
2511 	int err;
2512 
2513 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2514 	if (!rauhtd_pl)
2515 		return -ENOMEM;
2516 
2517 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2518 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2519 	if (err)
2520 		goto out;
2521 
2522 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2523 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2524 out:
2525 	kfree(rauhtd_pl);
2526 	return err;
2527 }
2528 
2529 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2530 {
2531 	struct mlxsw_sp_neigh_entry *neigh_entry;
2532 
2533 	mutex_lock(&mlxsw_sp->router->lock);
2534 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2535 			    nexthop_neighs_list_node)
2536 		/* If this neigh have nexthops, make the kernel think this neigh
2537 		 * is active regardless of the traffic.
2538 		 */
2539 		neigh_event_send(neigh_entry->key.n, NULL);
2540 	mutex_unlock(&mlxsw_sp->router->lock);
2541 }
2542 
2543 static void
2544 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2545 {
2546 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2547 
2548 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2549 			       msecs_to_jiffies(interval));
2550 }
2551 
2552 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2553 {
2554 	struct mlxsw_sp_router *router;
2555 	int err;
2556 
2557 	router = container_of(work, struct mlxsw_sp_router,
2558 			      neighs_update.dw.work);
2559 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2560 	if (err)
2561 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2562 
2563 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2564 
2565 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2566 }
2567 
2568 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2569 {
2570 	struct mlxsw_sp_neigh_entry *neigh_entry;
2571 	struct mlxsw_sp_router *router;
2572 
2573 	router = container_of(work, struct mlxsw_sp_router,
2574 			      nexthop_probe_dw.work);
2575 	/* Iterate over nexthop neighbours, find those who are unresolved and
2576 	 * send arp on them. This solves the chicken-egg problem when
2577 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2578 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2579 	 * using different nexthop.
2580 	 */
2581 	mutex_lock(&router->lock);
2582 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2583 			    nexthop_neighs_list_node)
2584 		if (!neigh_entry->connected)
2585 			neigh_event_send(neigh_entry->key.n, NULL);
2586 	mutex_unlock(&router->lock);
2587 
2588 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2589 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2590 }
2591 
2592 static void
2593 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2594 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2595 			      bool removing, bool dead);
2596 
2597 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2598 {
2599 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2600 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2601 }
2602 
2603 static int
2604 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2605 				struct mlxsw_sp_neigh_entry *neigh_entry,
2606 				enum mlxsw_reg_rauht_op op)
2607 {
2608 	struct neighbour *n = neigh_entry->key.n;
2609 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2610 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2611 
2612 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2613 			      dip);
2614 	if (neigh_entry->counter_valid)
2615 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2616 					     neigh_entry->counter_index);
2617 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2618 }
2619 
2620 static int
2621 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2622 				struct mlxsw_sp_neigh_entry *neigh_entry,
2623 				enum mlxsw_reg_rauht_op op)
2624 {
2625 	struct neighbour *n = neigh_entry->key.n;
2626 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2627 	const char *dip = n->primary_key;
2628 
2629 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2630 			      dip);
2631 	if (neigh_entry->counter_valid)
2632 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2633 					     neigh_entry->counter_index);
2634 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2635 }
2636 
2637 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2638 {
2639 	struct neighbour *n = neigh_entry->key.n;
2640 
2641 	/* Packets with a link-local destination address are trapped
2642 	 * after LPM lookup and never reach the neighbour table, so
2643 	 * there is no need to program such neighbours to the device.
2644 	 */
2645 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2646 	    IPV6_ADDR_LINKLOCAL)
2647 		return true;
2648 	return false;
2649 }
2650 
2651 static void
2652 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2653 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2654 			    bool adding)
2655 {
2656 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2657 	int err;
2658 
2659 	if (!adding && !neigh_entry->connected)
2660 		return;
2661 	neigh_entry->connected = adding;
2662 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2663 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2664 						      op);
2665 		if (err)
2666 			return;
2667 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2668 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2669 			return;
2670 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2671 						      op);
2672 		if (err)
2673 			return;
2674 	} else {
2675 		WARN_ON_ONCE(1);
2676 		return;
2677 	}
2678 
2679 	if (adding)
2680 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2681 	else
2682 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2683 }
2684 
2685 void
2686 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2687 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2688 				    bool adding)
2689 {
2690 	if (adding)
2691 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2692 	else
2693 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2694 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2695 }
2696 
2697 struct mlxsw_sp_netevent_work {
2698 	struct work_struct work;
2699 	struct mlxsw_sp *mlxsw_sp;
2700 	struct neighbour *n;
2701 };
2702 
2703 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2704 {
2705 	struct mlxsw_sp_netevent_work *net_work =
2706 		container_of(work, struct mlxsw_sp_netevent_work, work);
2707 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2708 	struct mlxsw_sp_neigh_entry *neigh_entry;
2709 	struct neighbour *n = net_work->n;
2710 	unsigned char ha[ETH_ALEN];
2711 	bool entry_connected;
2712 	u8 nud_state, dead;
2713 
2714 	/* If these parameters are changed after we release the lock,
2715 	 * then we are guaranteed to receive another event letting us
2716 	 * know about it.
2717 	 */
2718 	read_lock_bh(&n->lock);
2719 	memcpy(ha, n->ha, ETH_ALEN);
2720 	nud_state = n->nud_state;
2721 	dead = n->dead;
2722 	read_unlock_bh(&n->lock);
2723 
2724 	mutex_lock(&mlxsw_sp->router->lock);
2725 	mlxsw_sp_span_respin(mlxsw_sp);
2726 
2727 	entry_connected = nud_state & NUD_VALID && !dead;
2728 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2729 	if (!entry_connected && !neigh_entry)
2730 		goto out;
2731 	if (!neigh_entry) {
2732 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2733 		if (IS_ERR(neigh_entry))
2734 			goto out;
2735 	}
2736 
2737 	if (neigh_entry->connected && entry_connected &&
2738 	    !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2739 		goto out;
2740 
2741 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2742 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2743 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2744 				      dead);
2745 
2746 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2747 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2748 
2749 out:
2750 	mutex_unlock(&mlxsw_sp->router->lock);
2751 	neigh_release(n);
2752 	kfree(net_work);
2753 }
2754 
2755 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2756 
2757 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2758 {
2759 	struct mlxsw_sp_netevent_work *net_work =
2760 		container_of(work, struct mlxsw_sp_netevent_work, work);
2761 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2762 
2763 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2764 	kfree(net_work);
2765 }
2766 
2767 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2768 
2769 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2770 {
2771 	struct mlxsw_sp_netevent_work *net_work =
2772 		container_of(work, struct mlxsw_sp_netevent_work, work);
2773 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2774 
2775 	__mlxsw_sp_router_init(mlxsw_sp);
2776 	kfree(net_work);
2777 }
2778 
2779 static int mlxsw_sp_router_schedule_work(struct net *net,
2780 					 struct notifier_block *nb,
2781 					 void (*cb)(struct work_struct *))
2782 {
2783 	struct mlxsw_sp_netevent_work *net_work;
2784 	struct mlxsw_sp_router *router;
2785 
2786 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2787 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2788 		return NOTIFY_DONE;
2789 
2790 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2791 	if (!net_work)
2792 		return NOTIFY_BAD;
2793 
2794 	INIT_WORK(&net_work->work, cb);
2795 	net_work->mlxsw_sp = router->mlxsw_sp;
2796 	mlxsw_core_schedule_work(&net_work->work);
2797 	return NOTIFY_DONE;
2798 }
2799 
2800 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2801 					  unsigned long event, void *ptr)
2802 {
2803 	struct mlxsw_sp_netevent_work *net_work;
2804 	struct mlxsw_sp_port *mlxsw_sp_port;
2805 	struct mlxsw_sp *mlxsw_sp;
2806 	unsigned long interval;
2807 	struct neigh_parms *p;
2808 	struct neighbour *n;
2809 
2810 	switch (event) {
2811 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2812 		p = ptr;
2813 
2814 		/* We don't care about changes in the default table. */
2815 		if (!p->dev || (p->tbl->family != AF_INET &&
2816 				p->tbl->family != AF_INET6))
2817 			return NOTIFY_DONE;
2818 
2819 		/* We are in atomic context and can't take RTNL mutex,
2820 		 * so use RCU variant to walk the device chain.
2821 		 */
2822 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2823 		if (!mlxsw_sp_port)
2824 			return NOTIFY_DONE;
2825 
2826 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2827 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2828 		mlxsw_sp->router->neighs_update.interval = interval;
2829 
2830 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2831 		break;
2832 	case NETEVENT_NEIGH_UPDATE:
2833 		n = ptr;
2834 
2835 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2836 			return NOTIFY_DONE;
2837 
2838 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2839 		if (!mlxsw_sp_port)
2840 			return NOTIFY_DONE;
2841 
2842 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2843 		if (!net_work) {
2844 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2845 			return NOTIFY_BAD;
2846 		}
2847 
2848 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2849 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2850 		net_work->n = n;
2851 
2852 		/* Take a reference to ensure the neighbour won't be
2853 		 * destructed until we drop the reference in delayed
2854 		 * work.
2855 		 */
2856 		neigh_clone(n);
2857 		mlxsw_core_schedule_work(&net_work->work);
2858 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2859 		break;
2860 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2861 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2862 		return mlxsw_sp_router_schedule_work(ptr, nb,
2863 				mlxsw_sp_router_mp_hash_event_work);
2864 
2865 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2866 		return mlxsw_sp_router_schedule_work(ptr, nb,
2867 				mlxsw_sp_router_update_priority_work);
2868 	}
2869 
2870 	return NOTIFY_DONE;
2871 }
2872 
2873 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2874 {
2875 	int err;
2876 
2877 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2878 			      &mlxsw_sp_neigh_ht_params);
2879 	if (err)
2880 		return err;
2881 
2882 	/* Initialize the polling interval according to the default
2883 	 * table.
2884 	 */
2885 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2886 
2887 	/* Create the delayed works for the activity_update */
2888 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2889 			  mlxsw_sp_router_neighs_update_work);
2890 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2891 			  mlxsw_sp_router_probe_unresolved_nexthops);
2892 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2893 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2894 	return 0;
2895 }
2896 
2897 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2898 {
2899 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2900 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2901 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2902 }
2903 
2904 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2905 					 struct mlxsw_sp_rif *rif)
2906 {
2907 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2908 
2909 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2910 				 rif_list_node) {
2911 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2912 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2913 	}
2914 }
2915 
2916 enum mlxsw_sp_nexthop_type {
2917 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2918 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2919 };
2920 
2921 enum mlxsw_sp_nexthop_action {
2922 	/* Nexthop forwards packets to an egress RIF */
2923 	MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2924 	/* Nexthop discards packets */
2925 	MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2926 	/* Nexthop traps packets */
2927 	MLXSW_SP_NEXTHOP_ACTION_TRAP,
2928 };
2929 
2930 struct mlxsw_sp_nexthop_key {
2931 	struct fib_nh *fib_nh;
2932 };
2933 
2934 struct mlxsw_sp_nexthop {
2935 	struct list_head neigh_list_node; /* member of neigh entry list */
2936 	struct list_head rif_list_node;
2937 	struct list_head router_list_node;
2938 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2939 						   * this nexthop belongs to
2940 						   */
2941 	struct rhash_head ht_node;
2942 	struct neigh_table *neigh_tbl;
2943 	struct mlxsw_sp_nexthop_key key;
2944 	unsigned char gw_addr[sizeof(struct in6_addr)];
2945 	int ifindex;
2946 	int nh_weight;
2947 	int norm_nh_weight;
2948 	int num_adj_entries;
2949 	struct mlxsw_sp_rif *rif;
2950 	u8 should_offload:1, /* set indicates this nexthop should be written
2951 			      * to the adjacency table.
2952 			      */
2953 	   offloaded:1, /* set indicates this nexthop was written to the
2954 			 * adjacency table.
2955 			 */
2956 	   update:1; /* set indicates this nexthop should be updated in the
2957 		      * adjacency table (f.e., its MAC changed).
2958 		      */
2959 	enum mlxsw_sp_nexthop_action action;
2960 	enum mlxsw_sp_nexthop_type type;
2961 	union {
2962 		struct mlxsw_sp_neigh_entry *neigh_entry;
2963 		struct mlxsw_sp_ipip_entry *ipip_entry;
2964 	};
2965 	unsigned int counter_index;
2966 	bool counter_valid;
2967 };
2968 
2969 enum mlxsw_sp_nexthop_group_type {
2970 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2971 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2972 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
2973 };
2974 
2975 struct mlxsw_sp_nexthop_group_info {
2976 	struct mlxsw_sp_nexthop_group *nh_grp;
2977 	u32 adj_index;
2978 	u16 ecmp_size;
2979 	u16 count;
2980 	int sum_norm_weight;
2981 	u8 adj_index_valid:1,
2982 	   gateway:1, /* routes using the group use a gateway */
2983 	   is_resilient:1;
2984 	struct list_head list; /* member in nh_res_grp_list */
2985 	struct mlxsw_sp_nexthop nexthops[0];
2986 #define nh_rif	nexthops[0].rif
2987 };
2988 
2989 struct mlxsw_sp_nexthop_group_vr_key {
2990 	u16 vr_id;
2991 	enum mlxsw_sp_l3proto proto;
2992 };
2993 
2994 struct mlxsw_sp_nexthop_group_vr_entry {
2995 	struct list_head list; /* member in vr_list */
2996 	struct rhash_head ht_node; /* member in vr_ht */
2997 	refcount_t ref_count;
2998 	struct mlxsw_sp_nexthop_group_vr_key key;
2999 };
3000 
3001 struct mlxsw_sp_nexthop_group {
3002 	struct rhash_head ht_node;
3003 	struct list_head fib_list; /* list of fib entries that use this group */
3004 	union {
3005 		struct {
3006 			struct fib_info *fi;
3007 		} ipv4;
3008 		struct {
3009 			u32 id;
3010 		} obj;
3011 	};
3012 	struct mlxsw_sp_nexthop_group_info *nhgi;
3013 	struct list_head vr_list;
3014 	struct rhashtable vr_ht;
3015 	enum mlxsw_sp_nexthop_group_type type;
3016 	bool can_destroy;
3017 };
3018 
3019 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3020 				    struct mlxsw_sp_nexthop *nh)
3021 {
3022 	struct devlink *devlink;
3023 
3024 	devlink = priv_to_devlink(mlxsw_sp->core);
3025 	if (!devlink_dpipe_table_counter_enabled(devlink,
3026 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3027 		return;
3028 
3029 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3030 		return;
3031 
3032 	nh->counter_valid = true;
3033 }
3034 
3035 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3036 				   struct mlxsw_sp_nexthop *nh)
3037 {
3038 	if (!nh->counter_valid)
3039 		return;
3040 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3041 	nh->counter_valid = false;
3042 }
3043 
3044 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3045 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3046 {
3047 	if (!nh->counter_valid)
3048 		return -EINVAL;
3049 
3050 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3051 					 p_counter, NULL);
3052 }
3053 
3054 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3055 					       struct mlxsw_sp_nexthop *nh)
3056 {
3057 	if (!nh) {
3058 		if (list_empty(&router->nexthop_list))
3059 			return NULL;
3060 		else
3061 			return list_first_entry(&router->nexthop_list,
3062 						typeof(*nh), router_list_node);
3063 	}
3064 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3065 		return NULL;
3066 	return list_next_entry(nh, router_list_node);
3067 }
3068 
3069 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3070 {
3071 	return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3072 }
3073 
3074 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3075 {
3076 	if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3077 	    !mlxsw_sp_nexthop_is_forward(nh))
3078 		return NULL;
3079 	return nh->neigh_entry->ha;
3080 }
3081 
3082 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3083 			     u32 *p_adj_size, u32 *p_adj_hash_index)
3084 {
3085 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3086 	u32 adj_hash_index = 0;
3087 	int i;
3088 
3089 	if (!nh->offloaded || !nhgi->adj_index_valid)
3090 		return -EINVAL;
3091 
3092 	*p_adj_index = nhgi->adj_index;
3093 	*p_adj_size = nhgi->ecmp_size;
3094 
3095 	for (i = 0; i < nhgi->count; i++) {
3096 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3097 
3098 		if (nh_iter == nh)
3099 			break;
3100 		if (nh_iter->offloaded)
3101 			adj_hash_index += nh_iter->num_adj_entries;
3102 	}
3103 
3104 	*p_adj_hash_index = adj_hash_index;
3105 	return 0;
3106 }
3107 
3108 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3109 {
3110 	return nh->rif;
3111 }
3112 
3113 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3114 {
3115 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3116 	int i;
3117 
3118 	for (i = 0; i < nhgi->count; i++) {
3119 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3120 
3121 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3122 			return true;
3123 	}
3124 	return false;
3125 }
3126 
3127 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3128 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3129 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3130 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3131 	.automatic_shrinking = true,
3132 };
3133 
3134 static struct mlxsw_sp_nexthop_group_vr_entry *
3135 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3136 				       const struct mlxsw_sp_fib *fib)
3137 {
3138 	struct mlxsw_sp_nexthop_group_vr_key key;
3139 
3140 	memset(&key, 0, sizeof(key));
3141 	key.vr_id = fib->vr->id;
3142 	key.proto = fib->proto;
3143 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3144 				      mlxsw_sp_nexthop_group_vr_ht_params);
3145 }
3146 
3147 static int
3148 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3149 				       const struct mlxsw_sp_fib *fib)
3150 {
3151 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3152 	int err;
3153 
3154 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3155 	if (!vr_entry)
3156 		return -ENOMEM;
3157 
3158 	vr_entry->key.vr_id = fib->vr->id;
3159 	vr_entry->key.proto = fib->proto;
3160 	refcount_set(&vr_entry->ref_count, 1);
3161 
3162 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3163 				     mlxsw_sp_nexthop_group_vr_ht_params);
3164 	if (err)
3165 		goto err_hashtable_insert;
3166 
3167 	list_add(&vr_entry->list, &nh_grp->vr_list);
3168 
3169 	return 0;
3170 
3171 err_hashtable_insert:
3172 	kfree(vr_entry);
3173 	return err;
3174 }
3175 
3176 static void
3177 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3178 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3179 {
3180 	list_del(&vr_entry->list);
3181 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3182 			       mlxsw_sp_nexthop_group_vr_ht_params);
3183 	kfree(vr_entry);
3184 }
3185 
3186 static int
3187 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3188 			       const struct mlxsw_sp_fib *fib)
3189 {
3190 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3191 
3192 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3193 	if (vr_entry) {
3194 		refcount_inc(&vr_entry->ref_count);
3195 		return 0;
3196 	}
3197 
3198 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3199 }
3200 
3201 static void
3202 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3203 				 const struct mlxsw_sp_fib *fib)
3204 {
3205 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3206 
3207 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3208 	if (WARN_ON_ONCE(!vr_entry))
3209 		return;
3210 
3211 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3212 		return;
3213 
3214 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3215 }
3216 
3217 struct mlxsw_sp_nexthop_group_cmp_arg {
3218 	enum mlxsw_sp_nexthop_group_type type;
3219 	union {
3220 		struct fib_info *fi;
3221 		struct mlxsw_sp_fib6_entry *fib6_entry;
3222 		u32 id;
3223 	};
3224 };
3225 
3226 static bool
3227 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3228 				    const struct in6_addr *gw, int ifindex,
3229 				    int weight)
3230 {
3231 	int i;
3232 
3233 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3234 		const struct mlxsw_sp_nexthop *nh;
3235 
3236 		nh = &nh_grp->nhgi->nexthops[i];
3237 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3238 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3239 			return true;
3240 	}
3241 
3242 	return false;
3243 }
3244 
3245 static bool
3246 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3247 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3248 {
3249 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3250 
3251 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3252 		return false;
3253 
3254 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3255 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3256 		struct in6_addr *gw;
3257 		int ifindex, weight;
3258 
3259 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3260 		weight = fib6_nh->fib_nh_weight;
3261 		gw = &fib6_nh->fib_nh_gw6;
3262 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3263 							 weight))
3264 			return false;
3265 	}
3266 
3267 	return true;
3268 }
3269 
3270 static int
3271 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3272 {
3273 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3274 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3275 
3276 	if (nh_grp->type != cmp_arg->type)
3277 		return 1;
3278 
3279 	switch (cmp_arg->type) {
3280 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3281 		return cmp_arg->fi != nh_grp->ipv4.fi;
3282 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3283 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3284 						    cmp_arg->fib6_entry);
3285 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3286 		return cmp_arg->id != nh_grp->obj.id;
3287 	default:
3288 		WARN_ON(1);
3289 		return 1;
3290 	}
3291 }
3292 
3293 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3294 {
3295 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3296 	const struct mlxsw_sp_nexthop *nh;
3297 	struct fib_info *fi;
3298 	unsigned int val;
3299 	int i;
3300 
3301 	switch (nh_grp->type) {
3302 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3303 		fi = nh_grp->ipv4.fi;
3304 		return jhash(&fi, sizeof(fi), seed);
3305 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3306 		val = nh_grp->nhgi->count;
3307 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3308 			nh = &nh_grp->nhgi->nexthops[i];
3309 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3310 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3311 		}
3312 		return jhash(&val, sizeof(val), seed);
3313 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3314 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3315 	default:
3316 		WARN_ON(1);
3317 		return 0;
3318 	}
3319 }
3320 
3321 static u32
3322 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3323 {
3324 	unsigned int val = fib6_entry->nrt6;
3325 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3326 
3327 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3328 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3329 		struct net_device *dev = fib6_nh->fib_nh_dev;
3330 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3331 
3332 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3333 		val ^= jhash(gw, sizeof(*gw), seed);
3334 	}
3335 
3336 	return jhash(&val, sizeof(val), seed);
3337 }
3338 
3339 static u32
3340 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3341 {
3342 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3343 
3344 	switch (cmp_arg->type) {
3345 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3346 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3347 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3348 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3349 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3350 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3351 	default:
3352 		WARN_ON(1);
3353 		return 0;
3354 	}
3355 }
3356 
3357 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3358 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3359 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3360 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3361 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3362 };
3363 
3364 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3365 					 struct mlxsw_sp_nexthop_group *nh_grp)
3366 {
3367 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3368 	    !nh_grp->nhgi->gateway)
3369 		return 0;
3370 
3371 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3372 				      &nh_grp->ht_node,
3373 				      mlxsw_sp_nexthop_group_ht_params);
3374 }
3375 
3376 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3377 					  struct mlxsw_sp_nexthop_group *nh_grp)
3378 {
3379 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3380 	    !nh_grp->nhgi->gateway)
3381 		return;
3382 
3383 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3384 			       &nh_grp->ht_node,
3385 			       mlxsw_sp_nexthop_group_ht_params);
3386 }
3387 
3388 static struct mlxsw_sp_nexthop_group *
3389 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3390 			       struct fib_info *fi)
3391 {
3392 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3393 
3394 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3395 	cmp_arg.fi = fi;
3396 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3397 				      &cmp_arg,
3398 				      mlxsw_sp_nexthop_group_ht_params);
3399 }
3400 
3401 static struct mlxsw_sp_nexthop_group *
3402 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3403 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3404 {
3405 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3406 
3407 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3408 	cmp_arg.fib6_entry = fib6_entry;
3409 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3410 				      &cmp_arg,
3411 				      mlxsw_sp_nexthop_group_ht_params);
3412 }
3413 
3414 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3415 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3416 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3417 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3418 };
3419 
3420 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3421 				   struct mlxsw_sp_nexthop *nh)
3422 {
3423 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3424 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3425 }
3426 
3427 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3428 				    struct mlxsw_sp_nexthop *nh)
3429 {
3430 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3431 			       mlxsw_sp_nexthop_ht_params);
3432 }
3433 
3434 static struct mlxsw_sp_nexthop *
3435 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3436 			struct mlxsw_sp_nexthop_key key)
3437 {
3438 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3439 				      mlxsw_sp_nexthop_ht_params);
3440 }
3441 
3442 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3443 					     enum mlxsw_sp_l3proto proto,
3444 					     u16 vr_id,
3445 					     u32 adj_index, u16 ecmp_size,
3446 					     u32 new_adj_index,
3447 					     u16 new_ecmp_size)
3448 {
3449 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3450 
3451 	mlxsw_reg_raleu_pack(raleu_pl,
3452 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3453 			     adj_index, ecmp_size, new_adj_index,
3454 			     new_ecmp_size);
3455 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3456 }
3457 
3458 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3459 					  struct mlxsw_sp_nexthop_group *nh_grp,
3460 					  u32 old_adj_index, u16 old_ecmp_size)
3461 {
3462 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3463 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3464 	int err;
3465 
3466 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3467 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3468 							vr_entry->key.proto,
3469 							vr_entry->key.vr_id,
3470 							old_adj_index,
3471 							old_ecmp_size,
3472 							nhgi->adj_index,
3473 							nhgi->ecmp_size);
3474 		if (err)
3475 			goto err_mass_update_vr;
3476 	}
3477 	return 0;
3478 
3479 err_mass_update_vr:
3480 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3481 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3482 						  vr_entry->key.vr_id,
3483 						  nhgi->adj_index,
3484 						  nhgi->ecmp_size,
3485 						  old_adj_index, old_ecmp_size);
3486 	return err;
3487 }
3488 
3489 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3490 					 u32 adj_index,
3491 					 struct mlxsw_sp_nexthop *nh,
3492 					 bool force, char *ratr_pl)
3493 {
3494 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3495 	enum mlxsw_reg_ratr_op op;
3496 	u16 rif_index;
3497 
3498 	rif_index = nh->rif ? nh->rif->rif_index :
3499 			      mlxsw_sp->router->lb_rif_index;
3500 	op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3501 		     MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3502 	mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3503 			    adj_index, rif_index);
3504 	switch (nh->action) {
3505 	case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3506 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3507 		break;
3508 	case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3509 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3510 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3511 		break;
3512 	case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3513 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3514 					       MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3515 		mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3516 		break;
3517 	default:
3518 		WARN_ON_ONCE(1);
3519 		return -EINVAL;
3520 	}
3521 	if (nh->counter_valid)
3522 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3523 	else
3524 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3525 
3526 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3527 }
3528 
3529 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3530 				struct mlxsw_sp_nexthop *nh, bool force,
3531 				char *ratr_pl)
3532 {
3533 	int i;
3534 
3535 	for (i = 0; i < nh->num_adj_entries; i++) {
3536 		int err;
3537 
3538 		err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3539 						    nh, force, ratr_pl);
3540 		if (err)
3541 			return err;
3542 	}
3543 
3544 	return 0;
3545 }
3546 
3547 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3548 					  u32 adj_index,
3549 					  struct mlxsw_sp_nexthop *nh,
3550 					  bool force, char *ratr_pl)
3551 {
3552 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3553 
3554 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3555 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3556 					force, ratr_pl);
3557 }
3558 
3559 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3560 					u32 adj_index,
3561 					struct mlxsw_sp_nexthop *nh, bool force,
3562 					char *ratr_pl)
3563 {
3564 	int i;
3565 
3566 	for (i = 0; i < nh->num_adj_entries; i++) {
3567 		int err;
3568 
3569 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3570 						     nh, force, ratr_pl);
3571 		if (err)
3572 			return err;
3573 	}
3574 
3575 	return 0;
3576 }
3577 
3578 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3579 				   struct mlxsw_sp_nexthop *nh, bool force,
3580 				   char *ratr_pl)
3581 {
3582 	/* When action is discard or trap, the nexthop must be
3583 	 * programmed as an Ethernet nexthop.
3584 	 */
3585 	if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3586 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3587 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3588 		return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3589 						   force, ratr_pl);
3590 	else
3591 		return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3592 						    force, ratr_pl);
3593 }
3594 
3595 static int
3596 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3597 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3598 			      bool reallocate)
3599 {
3600 	char ratr_pl[MLXSW_REG_RATR_LEN];
3601 	u32 adj_index = nhgi->adj_index; /* base */
3602 	struct mlxsw_sp_nexthop *nh;
3603 	int i;
3604 
3605 	for (i = 0; i < nhgi->count; i++) {
3606 		nh = &nhgi->nexthops[i];
3607 
3608 		if (!nh->should_offload) {
3609 			nh->offloaded = 0;
3610 			continue;
3611 		}
3612 
3613 		if (nh->update || reallocate) {
3614 			int err = 0;
3615 
3616 			err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3617 						      true, ratr_pl);
3618 			if (err)
3619 				return err;
3620 			nh->update = 0;
3621 			nh->offloaded = 1;
3622 		}
3623 		adj_index += nh->num_adj_entries;
3624 	}
3625 	return 0;
3626 }
3627 
3628 static int
3629 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3630 				    struct mlxsw_sp_nexthop_group *nh_grp)
3631 {
3632 	struct mlxsw_sp_fib_entry *fib_entry;
3633 	int err;
3634 
3635 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3636 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3637 		if (err)
3638 			return err;
3639 	}
3640 	return 0;
3641 }
3642 
3643 struct mlxsw_sp_adj_grp_size_range {
3644 	u16 start; /* Inclusive */
3645 	u16 end; /* Inclusive */
3646 };
3647 
3648 /* Ordered by range start value */
3649 static const struct mlxsw_sp_adj_grp_size_range
3650 mlxsw_sp1_adj_grp_size_ranges[] = {
3651 	{ .start = 1, .end = 64 },
3652 	{ .start = 512, .end = 512 },
3653 	{ .start = 1024, .end = 1024 },
3654 	{ .start = 2048, .end = 2048 },
3655 	{ .start = 4096, .end = 4096 },
3656 };
3657 
3658 /* Ordered by range start value */
3659 static const struct mlxsw_sp_adj_grp_size_range
3660 mlxsw_sp2_adj_grp_size_ranges[] = {
3661 	{ .start = 1, .end = 128 },
3662 	{ .start = 256, .end = 256 },
3663 	{ .start = 512, .end = 512 },
3664 	{ .start = 1024, .end = 1024 },
3665 	{ .start = 2048, .end = 2048 },
3666 	{ .start = 4096, .end = 4096 },
3667 };
3668 
3669 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3670 					   u16 *p_adj_grp_size)
3671 {
3672 	int i;
3673 
3674 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3675 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3676 
3677 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3678 
3679 		if (*p_adj_grp_size >= size_range->start &&
3680 		    *p_adj_grp_size <= size_range->end)
3681 			return;
3682 
3683 		if (*p_adj_grp_size <= size_range->end) {
3684 			*p_adj_grp_size = size_range->end;
3685 			return;
3686 		}
3687 	}
3688 }
3689 
3690 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3691 					     u16 *p_adj_grp_size,
3692 					     unsigned int alloc_size)
3693 {
3694 	int i;
3695 
3696 	for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3697 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3698 
3699 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3700 
3701 		if (alloc_size >= size_range->end) {
3702 			*p_adj_grp_size = size_range->end;
3703 			return;
3704 		}
3705 	}
3706 }
3707 
3708 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3709 				     u16 *p_adj_grp_size)
3710 {
3711 	unsigned int alloc_size;
3712 	int err;
3713 
3714 	/* Round up the requested group size to the next size supported
3715 	 * by the device and make sure the request can be satisfied.
3716 	 */
3717 	mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3718 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3719 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3720 					      *p_adj_grp_size, &alloc_size);
3721 	if (err)
3722 		return err;
3723 	/* It is possible the allocation results in more allocated
3724 	 * entries than requested. Try to use as much of them as
3725 	 * possible.
3726 	 */
3727 	mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3728 
3729 	return 0;
3730 }
3731 
3732 static void
3733 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3734 {
3735 	int i, g = 0, sum_norm_weight = 0;
3736 	struct mlxsw_sp_nexthop *nh;
3737 
3738 	for (i = 0; i < nhgi->count; i++) {
3739 		nh = &nhgi->nexthops[i];
3740 
3741 		if (!nh->should_offload)
3742 			continue;
3743 		if (g > 0)
3744 			g = gcd(nh->nh_weight, g);
3745 		else
3746 			g = nh->nh_weight;
3747 	}
3748 
3749 	for (i = 0; i < nhgi->count; i++) {
3750 		nh = &nhgi->nexthops[i];
3751 
3752 		if (!nh->should_offload)
3753 			continue;
3754 		nh->norm_nh_weight = nh->nh_weight / g;
3755 		sum_norm_weight += nh->norm_nh_weight;
3756 	}
3757 
3758 	nhgi->sum_norm_weight = sum_norm_weight;
3759 }
3760 
3761 static void
3762 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3763 {
3764 	int i, weight = 0, lower_bound = 0;
3765 	int total = nhgi->sum_norm_weight;
3766 	u16 ecmp_size = nhgi->ecmp_size;
3767 
3768 	for (i = 0; i < nhgi->count; i++) {
3769 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3770 		int upper_bound;
3771 
3772 		if (!nh->should_offload)
3773 			continue;
3774 		weight += nh->norm_nh_weight;
3775 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3776 		nh->num_adj_entries = upper_bound - lower_bound;
3777 		lower_bound = upper_bound;
3778 	}
3779 }
3780 
3781 static struct mlxsw_sp_nexthop *
3782 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3783 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3784 
3785 static void
3786 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3787 					struct mlxsw_sp_nexthop_group *nh_grp)
3788 {
3789 	int i;
3790 
3791 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3792 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3793 
3794 		if (nh->offloaded)
3795 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3796 		else
3797 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3798 	}
3799 }
3800 
3801 static void
3802 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3803 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3804 {
3805 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3806 
3807 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3808 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3809 		struct mlxsw_sp_nexthop *nh;
3810 
3811 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3812 		if (nh && nh->offloaded)
3813 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3814 		else
3815 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3816 	}
3817 }
3818 
3819 static void
3820 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3821 					struct mlxsw_sp_nexthop_group *nh_grp)
3822 {
3823 	struct mlxsw_sp_fib6_entry *fib6_entry;
3824 
3825 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3826 	 * the same struct, so we need to iterate over all the routes using the
3827 	 * nexthop group and set / clear the offload indication for them.
3828 	 */
3829 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3830 			    common.nexthop_group_node)
3831 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3832 }
3833 
3834 static void
3835 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3836 					const struct mlxsw_sp_nexthop *nh,
3837 					u16 bucket_index)
3838 {
3839 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3840 	bool offload = false, trap = false;
3841 
3842 	if (nh->offloaded) {
3843 		if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3844 			trap = true;
3845 		else
3846 			offload = true;
3847 	}
3848 	nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3849 				    bucket_index, offload, trap);
3850 }
3851 
3852 static void
3853 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3854 					   struct mlxsw_sp_nexthop_group *nh_grp)
3855 {
3856 	int i;
3857 
3858 	/* Do not update the flags if the nexthop group is being destroyed
3859 	 * since:
3860 	 * 1. The nexthop objects is being deleted, in which case the flags are
3861 	 * irrelevant.
3862 	 * 2. The nexthop group was replaced by a newer group, in which case
3863 	 * the flags of the nexthop object were already updated based on the
3864 	 * new group.
3865 	 */
3866 	if (nh_grp->can_destroy)
3867 		return;
3868 
3869 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3870 			     nh_grp->nhgi->adj_index_valid, false);
3871 
3872 	/* Update flags of individual nexthop buckets in case of a resilient
3873 	 * nexthop group.
3874 	 */
3875 	if (!nh_grp->nhgi->is_resilient)
3876 		return;
3877 
3878 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3879 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3880 
3881 		mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3882 	}
3883 }
3884 
3885 static void
3886 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3887 				       struct mlxsw_sp_nexthop_group *nh_grp)
3888 {
3889 	switch (nh_grp->type) {
3890 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3891 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3892 		break;
3893 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3894 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3895 		break;
3896 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3897 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3898 		break;
3899 	}
3900 }
3901 
3902 static int
3903 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3904 			       struct mlxsw_sp_nexthop_group *nh_grp)
3905 {
3906 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3907 	u16 ecmp_size, old_ecmp_size;
3908 	struct mlxsw_sp_nexthop *nh;
3909 	bool offload_change = false;
3910 	u32 adj_index;
3911 	bool old_adj_index_valid;
3912 	u32 old_adj_index;
3913 	int i, err2, err;
3914 
3915 	if (!nhgi->gateway)
3916 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3917 
3918 	for (i = 0; i < nhgi->count; i++) {
3919 		nh = &nhgi->nexthops[i];
3920 
3921 		if (nh->should_offload != nh->offloaded) {
3922 			offload_change = true;
3923 			if (nh->should_offload)
3924 				nh->update = 1;
3925 		}
3926 	}
3927 	if (!offload_change) {
3928 		/* Nothing was added or removed, so no need to reallocate. Just
3929 		 * update MAC on existing adjacency indexes.
3930 		 */
3931 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3932 		if (err) {
3933 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3934 			goto set_trap;
3935 		}
3936 		/* Flags of individual nexthop buckets might need to be
3937 		 * updated.
3938 		 */
3939 		mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3940 		return 0;
3941 	}
3942 	mlxsw_sp_nexthop_group_normalize(nhgi);
3943 	if (!nhgi->sum_norm_weight) {
3944 		/* No neigh of this group is connected so we just set
3945 		 * the trap and let everthing flow through kernel.
3946 		 */
3947 		err = 0;
3948 		goto set_trap;
3949 	}
3950 
3951 	ecmp_size = nhgi->sum_norm_weight;
3952 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3953 	if (err)
3954 		/* No valid allocation size available. */
3955 		goto set_trap;
3956 
3957 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3958 				  ecmp_size, &adj_index);
3959 	if (err) {
3960 		/* We ran out of KVD linear space, just set the
3961 		 * trap and let everything flow through kernel.
3962 		 */
3963 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3964 		goto set_trap;
3965 	}
3966 	old_adj_index_valid = nhgi->adj_index_valid;
3967 	old_adj_index = nhgi->adj_index;
3968 	old_ecmp_size = nhgi->ecmp_size;
3969 	nhgi->adj_index_valid = 1;
3970 	nhgi->adj_index = adj_index;
3971 	nhgi->ecmp_size = ecmp_size;
3972 	mlxsw_sp_nexthop_group_rebalance(nhgi);
3973 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
3974 	if (err) {
3975 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3976 		goto set_trap;
3977 	}
3978 
3979 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3980 
3981 	if (!old_adj_index_valid) {
3982 		/* The trap was set for fib entries, so we have to call
3983 		 * fib entry update to unset it and use adjacency index.
3984 		 */
3985 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3986 		if (err) {
3987 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3988 			goto set_trap;
3989 		}
3990 		return 0;
3991 	}
3992 
3993 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3994 					     old_adj_index, old_ecmp_size);
3995 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3996 			   old_ecmp_size, old_adj_index);
3997 	if (err) {
3998 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3999 		goto set_trap;
4000 	}
4001 
4002 	return 0;
4003 
4004 set_trap:
4005 	old_adj_index_valid = nhgi->adj_index_valid;
4006 	nhgi->adj_index_valid = 0;
4007 	for (i = 0; i < nhgi->count; i++) {
4008 		nh = &nhgi->nexthops[i];
4009 		nh->offloaded = 0;
4010 	}
4011 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4012 	if (err2)
4013 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4014 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4015 	if (old_adj_index_valid)
4016 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4017 				   nhgi->ecmp_size, nhgi->adj_index);
4018 	return err;
4019 }
4020 
4021 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4022 					    bool removing)
4023 {
4024 	if (!removing) {
4025 		nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4026 		nh->should_offload = 1;
4027 	} else if (nh->nhgi->is_resilient) {
4028 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4029 		nh->should_offload = 1;
4030 	} else {
4031 		nh->should_offload = 0;
4032 	}
4033 	nh->update = 1;
4034 }
4035 
4036 static int
4037 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4038 				    struct mlxsw_sp_neigh_entry *neigh_entry)
4039 {
4040 	struct neighbour *n, *old_n = neigh_entry->key.n;
4041 	struct mlxsw_sp_nexthop *nh;
4042 	bool entry_connected;
4043 	u8 nud_state, dead;
4044 	int err;
4045 
4046 	nh = list_first_entry(&neigh_entry->nexthop_list,
4047 			      struct mlxsw_sp_nexthop, neigh_list_node);
4048 
4049 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4050 	if (!n) {
4051 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4052 		if (IS_ERR(n))
4053 			return PTR_ERR(n);
4054 		neigh_event_send(n, NULL);
4055 	}
4056 
4057 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4058 	neigh_entry->key.n = n;
4059 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4060 	if (err)
4061 		goto err_neigh_entry_insert;
4062 
4063 	read_lock_bh(&n->lock);
4064 	nud_state = n->nud_state;
4065 	dead = n->dead;
4066 	read_unlock_bh(&n->lock);
4067 	entry_connected = nud_state & NUD_VALID && !dead;
4068 
4069 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4070 			    neigh_list_node) {
4071 		neigh_release(old_n);
4072 		neigh_clone(n);
4073 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4074 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4075 	}
4076 
4077 	neigh_release(n);
4078 
4079 	return 0;
4080 
4081 err_neigh_entry_insert:
4082 	neigh_entry->key.n = old_n;
4083 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4084 	neigh_release(n);
4085 	return err;
4086 }
4087 
4088 static void
4089 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4090 			      struct mlxsw_sp_neigh_entry *neigh_entry,
4091 			      bool removing, bool dead)
4092 {
4093 	struct mlxsw_sp_nexthop *nh;
4094 
4095 	if (list_empty(&neigh_entry->nexthop_list))
4096 		return;
4097 
4098 	if (dead) {
4099 		int err;
4100 
4101 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4102 							  neigh_entry);
4103 		if (err)
4104 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4105 		return;
4106 	}
4107 
4108 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4109 			    neigh_list_node) {
4110 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4111 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4112 	}
4113 }
4114 
4115 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4116 				      struct mlxsw_sp_rif *rif)
4117 {
4118 	if (nh->rif)
4119 		return;
4120 
4121 	nh->rif = rif;
4122 	list_add(&nh->rif_list_node, &rif->nexthop_list);
4123 }
4124 
4125 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4126 {
4127 	if (!nh->rif)
4128 		return;
4129 
4130 	list_del(&nh->rif_list_node);
4131 	nh->rif = NULL;
4132 }
4133 
4134 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4135 				       struct mlxsw_sp_nexthop *nh)
4136 {
4137 	struct mlxsw_sp_neigh_entry *neigh_entry;
4138 	struct neighbour *n;
4139 	u8 nud_state, dead;
4140 	int err;
4141 
4142 	if (!nh->nhgi->gateway || nh->neigh_entry)
4143 		return 0;
4144 
4145 	/* Take a reference of neigh here ensuring that neigh would
4146 	 * not be destructed before the nexthop entry is finished.
4147 	 * The reference is taken either in neigh_lookup() or
4148 	 * in neigh_create() in case n is not found.
4149 	 */
4150 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4151 	if (!n) {
4152 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4153 		if (IS_ERR(n))
4154 			return PTR_ERR(n);
4155 		neigh_event_send(n, NULL);
4156 	}
4157 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4158 	if (!neigh_entry) {
4159 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4160 		if (IS_ERR(neigh_entry)) {
4161 			err = -EINVAL;
4162 			goto err_neigh_entry_create;
4163 		}
4164 	}
4165 
4166 	/* If that is the first nexthop connected to that neigh, add to
4167 	 * nexthop_neighs_list
4168 	 */
4169 	if (list_empty(&neigh_entry->nexthop_list))
4170 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4171 			      &mlxsw_sp->router->nexthop_neighs_list);
4172 
4173 	nh->neigh_entry = neigh_entry;
4174 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4175 	read_lock_bh(&n->lock);
4176 	nud_state = n->nud_state;
4177 	dead = n->dead;
4178 	read_unlock_bh(&n->lock);
4179 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4180 
4181 	return 0;
4182 
4183 err_neigh_entry_create:
4184 	neigh_release(n);
4185 	return err;
4186 }
4187 
4188 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4189 					struct mlxsw_sp_nexthop *nh)
4190 {
4191 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4192 	struct neighbour *n;
4193 
4194 	if (!neigh_entry)
4195 		return;
4196 	n = neigh_entry->key.n;
4197 
4198 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4199 	list_del(&nh->neigh_list_node);
4200 	nh->neigh_entry = NULL;
4201 
4202 	/* If that is the last nexthop connected to that neigh, remove from
4203 	 * nexthop_neighs_list
4204 	 */
4205 	if (list_empty(&neigh_entry->nexthop_list))
4206 		list_del(&neigh_entry->nexthop_neighs_list_node);
4207 
4208 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4209 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4210 
4211 	neigh_release(n);
4212 }
4213 
4214 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4215 {
4216 	struct net_device *ul_dev;
4217 	bool is_up;
4218 
4219 	rcu_read_lock();
4220 	ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4221 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4222 	rcu_read_unlock();
4223 
4224 	return is_up;
4225 }
4226 
4227 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4228 				       struct mlxsw_sp_nexthop *nh,
4229 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4230 {
4231 	bool removing;
4232 
4233 	if (!nh->nhgi->gateway || nh->ipip_entry)
4234 		return;
4235 
4236 	nh->ipip_entry = ipip_entry;
4237 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4238 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4239 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4240 }
4241 
4242 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4243 				       struct mlxsw_sp_nexthop *nh)
4244 {
4245 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4246 
4247 	if (!ipip_entry)
4248 		return;
4249 
4250 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4251 	nh->ipip_entry = NULL;
4252 }
4253 
4254 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4255 					const struct fib_nh *fib_nh,
4256 					enum mlxsw_sp_ipip_type *p_ipipt)
4257 {
4258 	struct net_device *dev = fib_nh->fib_nh_dev;
4259 
4260 	return dev &&
4261 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4262 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4263 }
4264 
4265 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4266 				      struct mlxsw_sp_nexthop *nh,
4267 				      const struct net_device *dev)
4268 {
4269 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4270 	struct mlxsw_sp_ipip_entry *ipip_entry;
4271 	struct mlxsw_sp_rif *rif;
4272 	int err;
4273 
4274 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4275 	if (ipip_entry) {
4276 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4277 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4278 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4279 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4280 			return 0;
4281 		}
4282 	}
4283 
4284 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4285 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4286 	if (!rif)
4287 		return 0;
4288 
4289 	mlxsw_sp_nexthop_rif_init(nh, rif);
4290 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4291 	if (err)
4292 		goto err_neigh_init;
4293 
4294 	return 0;
4295 
4296 err_neigh_init:
4297 	mlxsw_sp_nexthop_rif_fini(nh);
4298 	return err;
4299 }
4300 
4301 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4302 				       struct mlxsw_sp_nexthop *nh)
4303 {
4304 	switch (nh->type) {
4305 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4306 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4307 		mlxsw_sp_nexthop_rif_fini(nh);
4308 		break;
4309 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4310 		mlxsw_sp_nexthop_rif_fini(nh);
4311 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4312 		break;
4313 	}
4314 }
4315 
4316 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4317 				  struct mlxsw_sp_nexthop_group *nh_grp,
4318 				  struct mlxsw_sp_nexthop *nh,
4319 				  struct fib_nh *fib_nh)
4320 {
4321 	struct net_device *dev = fib_nh->fib_nh_dev;
4322 	struct in_device *in_dev;
4323 	int err;
4324 
4325 	nh->nhgi = nh_grp->nhgi;
4326 	nh->key.fib_nh = fib_nh;
4327 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4328 	nh->nh_weight = fib_nh->fib_nh_weight;
4329 #else
4330 	nh->nh_weight = 1;
4331 #endif
4332 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4333 	nh->neigh_tbl = &arp_tbl;
4334 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4335 	if (err)
4336 		return err;
4337 
4338 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4339 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4340 
4341 	if (!dev)
4342 		return 0;
4343 	nh->ifindex = dev->ifindex;
4344 
4345 	rcu_read_lock();
4346 	in_dev = __in_dev_get_rcu(dev);
4347 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4348 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4349 		rcu_read_unlock();
4350 		return 0;
4351 	}
4352 	rcu_read_unlock();
4353 
4354 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4355 	if (err)
4356 		goto err_nexthop_neigh_init;
4357 
4358 	return 0;
4359 
4360 err_nexthop_neigh_init:
4361 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4362 	return err;
4363 }
4364 
4365 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4366 				   struct mlxsw_sp_nexthop *nh)
4367 {
4368 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4369 	list_del(&nh->router_list_node);
4370 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4371 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4372 }
4373 
4374 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4375 				    unsigned long event, struct fib_nh *fib_nh)
4376 {
4377 	struct mlxsw_sp_nexthop_key key;
4378 	struct mlxsw_sp_nexthop *nh;
4379 
4380 	key.fib_nh = fib_nh;
4381 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4382 	if (!nh)
4383 		return;
4384 
4385 	switch (event) {
4386 	case FIB_EVENT_NH_ADD:
4387 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4388 		break;
4389 	case FIB_EVENT_NH_DEL:
4390 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4391 		break;
4392 	}
4393 
4394 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4395 }
4396 
4397 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4398 					struct mlxsw_sp_rif *rif)
4399 {
4400 	struct mlxsw_sp_nexthop *nh;
4401 	bool removing;
4402 
4403 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4404 		switch (nh->type) {
4405 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4406 			removing = false;
4407 			break;
4408 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4409 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4410 			break;
4411 		default:
4412 			WARN_ON(1);
4413 			continue;
4414 		}
4415 
4416 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4417 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4418 	}
4419 }
4420 
4421 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4422 					 struct mlxsw_sp_rif *old_rif,
4423 					 struct mlxsw_sp_rif *new_rif)
4424 {
4425 	struct mlxsw_sp_nexthop *nh;
4426 
4427 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4428 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4429 		nh->rif = new_rif;
4430 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4431 }
4432 
4433 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4434 					   struct mlxsw_sp_rif *rif)
4435 {
4436 	struct mlxsw_sp_nexthop *nh, *tmp;
4437 
4438 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4439 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4440 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4441 	}
4442 }
4443 
4444 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4445 {
4446 	enum mlxsw_reg_ratr_trap_action trap_action;
4447 	char ratr_pl[MLXSW_REG_RATR_LEN];
4448 	int err;
4449 
4450 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4451 				  &mlxsw_sp->router->adj_trap_index);
4452 	if (err)
4453 		return err;
4454 
4455 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4456 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4457 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4458 			    mlxsw_sp->router->adj_trap_index,
4459 			    mlxsw_sp->router->lb_rif_index);
4460 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4461 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4462 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4463 	if (err)
4464 		goto err_ratr_write;
4465 
4466 	return 0;
4467 
4468 err_ratr_write:
4469 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4470 			   mlxsw_sp->router->adj_trap_index);
4471 	return err;
4472 }
4473 
4474 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4475 {
4476 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4477 			   mlxsw_sp->router->adj_trap_index);
4478 }
4479 
4480 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4481 {
4482 	int err;
4483 
4484 	if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4485 		return 0;
4486 
4487 	err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4488 	if (err)
4489 		return err;
4490 
4491 	refcount_set(&mlxsw_sp->router->num_groups, 1);
4492 
4493 	return 0;
4494 }
4495 
4496 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4497 {
4498 	if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4499 		return;
4500 
4501 	mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4502 }
4503 
4504 static void
4505 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4506 			     const struct mlxsw_sp_nexthop_group *nh_grp,
4507 			     unsigned long *activity)
4508 {
4509 	char *ratrad_pl;
4510 	int i, err;
4511 
4512 	ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4513 	if (!ratrad_pl)
4514 		return;
4515 
4516 	mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4517 			      nh_grp->nhgi->count);
4518 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4519 	if (err)
4520 		goto out;
4521 
4522 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4523 		if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4524 			continue;
4525 		bitmap_set(activity, i, 1);
4526 	}
4527 
4528 out:
4529 	kfree(ratrad_pl);
4530 }
4531 
4532 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4533 
4534 static void
4535 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4536 				const struct mlxsw_sp_nexthop_group *nh_grp)
4537 {
4538 	unsigned long *activity;
4539 
4540 	activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4541 	if (!activity)
4542 		return;
4543 
4544 	mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4545 	nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4546 					nh_grp->nhgi->count, activity);
4547 
4548 	bitmap_free(activity);
4549 }
4550 
4551 static void
4552 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4553 {
4554 	unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4555 
4556 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4557 			       msecs_to_jiffies(interval));
4558 }
4559 
4560 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4561 {
4562 	struct mlxsw_sp_nexthop_group_info *nhgi;
4563 	struct mlxsw_sp_router *router;
4564 	bool reschedule = false;
4565 
4566 	router = container_of(work, struct mlxsw_sp_router,
4567 			      nh_grp_activity_dw.work);
4568 
4569 	mutex_lock(&router->lock);
4570 
4571 	list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4572 		mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4573 		reschedule = true;
4574 	}
4575 
4576 	mutex_unlock(&router->lock);
4577 
4578 	if (!reschedule)
4579 		return;
4580 	mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4581 }
4582 
4583 static int
4584 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4585 				     const struct nh_notifier_single_info *nh,
4586 				     struct netlink_ext_ack *extack)
4587 {
4588 	int err = -EINVAL;
4589 
4590 	if (nh->is_fdb)
4591 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4592 	else if (nh->has_encap)
4593 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4594 	else
4595 		err = 0;
4596 
4597 	return err;
4598 }
4599 
4600 static int
4601 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4602 					  const struct nh_notifier_single_info *nh,
4603 					  struct netlink_ext_ack *extack)
4604 {
4605 	int err;
4606 
4607 	err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4608 	if (err)
4609 		return err;
4610 
4611 	/* Device only nexthops with an IPIP device are programmed as
4612 	 * encapsulating adjacency entries.
4613 	 */
4614 	if (!nh->gw_family && !nh->is_reject &&
4615 	    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4616 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4617 		return -EINVAL;
4618 	}
4619 
4620 	return 0;
4621 }
4622 
4623 static int
4624 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4625 				    const struct nh_notifier_grp_info *nh_grp,
4626 				    struct netlink_ext_ack *extack)
4627 {
4628 	int i;
4629 
4630 	if (nh_grp->is_fdb) {
4631 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4632 		return -EINVAL;
4633 	}
4634 
4635 	for (i = 0; i < nh_grp->num_nh; i++) {
4636 		const struct nh_notifier_single_info *nh;
4637 		int err;
4638 
4639 		nh = &nh_grp->nh_entries[i].nh;
4640 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4641 								extack);
4642 		if (err)
4643 			return err;
4644 	}
4645 
4646 	return 0;
4647 }
4648 
4649 static int
4650 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4651 					     const struct nh_notifier_res_table_info *nh_res_table,
4652 					     struct netlink_ext_ack *extack)
4653 {
4654 	unsigned int alloc_size;
4655 	bool valid_size = false;
4656 	int err, i;
4657 
4658 	if (nh_res_table->num_nh_buckets < 32) {
4659 		NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4660 		return -EINVAL;
4661 	}
4662 
4663 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4664 		const struct mlxsw_sp_adj_grp_size_range *size_range;
4665 
4666 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4667 
4668 		if (nh_res_table->num_nh_buckets >= size_range->start &&
4669 		    nh_res_table->num_nh_buckets <= size_range->end) {
4670 			valid_size = true;
4671 			break;
4672 		}
4673 	}
4674 
4675 	if (!valid_size) {
4676 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4677 		return -EINVAL;
4678 	}
4679 
4680 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4681 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4682 					      nh_res_table->num_nh_buckets,
4683 					      &alloc_size);
4684 	if (err || nh_res_table->num_nh_buckets != alloc_size) {
4685 		NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4686 		return -EINVAL;
4687 	}
4688 
4689 	return 0;
4690 }
4691 
4692 static int
4693 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4694 					const struct nh_notifier_res_table_info *nh_res_table,
4695 					struct netlink_ext_ack *extack)
4696 {
4697 	int err;
4698 	u16 i;
4699 
4700 	err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4701 							   nh_res_table,
4702 							   extack);
4703 	if (err)
4704 		return err;
4705 
4706 	for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4707 		const struct nh_notifier_single_info *nh;
4708 		int err;
4709 
4710 		nh = &nh_res_table->nhs[i];
4711 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4712 								extack);
4713 		if (err)
4714 			return err;
4715 	}
4716 
4717 	return 0;
4718 }
4719 
4720 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4721 					 unsigned long event,
4722 					 struct nh_notifier_info *info)
4723 {
4724 	struct nh_notifier_single_info *nh;
4725 
4726 	if (event != NEXTHOP_EVENT_REPLACE &&
4727 	    event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4728 	    event != NEXTHOP_EVENT_BUCKET_REPLACE)
4729 		return 0;
4730 
4731 	switch (info->type) {
4732 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4733 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4734 							    info->extack);
4735 	case NH_NOTIFIER_INFO_TYPE_GRP:
4736 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4737 							   info->nh_grp,
4738 							   info->extack);
4739 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4740 		return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4741 							       info->nh_res_table,
4742 							       info->extack);
4743 	case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4744 		nh = &info->nh_res_bucket->new_nh;
4745 		return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4746 								 info->extack);
4747 	default:
4748 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4749 		return -EOPNOTSUPP;
4750 	}
4751 }
4752 
4753 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4754 					    const struct nh_notifier_info *info)
4755 {
4756 	const struct net_device *dev;
4757 
4758 	switch (info->type) {
4759 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4760 		dev = info->nh->dev;
4761 		return info->nh->gw_family || info->nh->is_reject ||
4762 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4763 	case NH_NOTIFIER_INFO_TYPE_GRP:
4764 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4765 		/* Already validated earlier. */
4766 		return true;
4767 	default:
4768 		return false;
4769 	}
4770 }
4771 
4772 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4773 						struct mlxsw_sp_nexthop *nh)
4774 {
4775 	u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4776 
4777 	nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4778 	nh->should_offload = 1;
4779 	/* While nexthops that discard packets do not forward packets
4780 	 * via an egress RIF, they still need to be programmed using a
4781 	 * valid RIF, so use the loopback RIF created during init.
4782 	 */
4783 	nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4784 }
4785 
4786 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4787 						struct mlxsw_sp_nexthop *nh)
4788 {
4789 	nh->rif = NULL;
4790 	nh->should_offload = 0;
4791 }
4792 
4793 static int
4794 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4795 			  struct mlxsw_sp_nexthop_group *nh_grp,
4796 			  struct mlxsw_sp_nexthop *nh,
4797 			  struct nh_notifier_single_info *nh_obj, int weight)
4798 {
4799 	struct net_device *dev = nh_obj->dev;
4800 	int err;
4801 
4802 	nh->nhgi = nh_grp->nhgi;
4803 	nh->nh_weight = weight;
4804 
4805 	switch (nh_obj->gw_family) {
4806 	case AF_INET:
4807 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4808 		nh->neigh_tbl = &arp_tbl;
4809 		break;
4810 	case AF_INET6:
4811 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4812 #if IS_ENABLED(CONFIG_IPV6)
4813 		nh->neigh_tbl = &nd_tbl;
4814 #endif
4815 		break;
4816 	}
4817 
4818 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4819 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4820 	nh->ifindex = dev->ifindex;
4821 
4822 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4823 	if (err)
4824 		goto err_type_init;
4825 
4826 	if (nh_obj->is_reject)
4827 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4828 
4829 	/* In a resilient nexthop group, all the nexthops must be written to
4830 	 * the adjacency table. Even if they do not have a valid neighbour or
4831 	 * RIF.
4832 	 */
4833 	if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4834 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4835 		nh->should_offload = 1;
4836 	}
4837 
4838 	return 0;
4839 
4840 err_type_init:
4841 	list_del(&nh->router_list_node);
4842 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4843 	return err;
4844 }
4845 
4846 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4847 				      struct mlxsw_sp_nexthop *nh)
4848 {
4849 	if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4850 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4851 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4852 	list_del(&nh->router_list_node);
4853 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4854 	nh->should_offload = 0;
4855 }
4856 
4857 static int
4858 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4859 				     struct mlxsw_sp_nexthop_group *nh_grp,
4860 				     struct nh_notifier_info *info)
4861 {
4862 	struct mlxsw_sp_nexthop_group_info *nhgi;
4863 	struct mlxsw_sp_nexthop *nh;
4864 	bool is_resilient = false;
4865 	unsigned int nhs;
4866 	int err, i;
4867 
4868 	switch (info->type) {
4869 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4870 		nhs = 1;
4871 		break;
4872 	case NH_NOTIFIER_INFO_TYPE_GRP:
4873 		nhs = info->nh_grp->num_nh;
4874 		break;
4875 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4876 		nhs = info->nh_res_table->num_nh_buckets;
4877 		is_resilient = true;
4878 		break;
4879 	default:
4880 		return -EINVAL;
4881 	}
4882 
4883 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4884 	if (!nhgi)
4885 		return -ENOMEM;
4886 	nh_grp->nhgi = nhgi;
4887 	nhgi->nh_grp = nh_grp;
4888 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4889 	nhgi->is_resilient = is_resilient;
4890 	nhgi->count = nhs;
4891 	for (i = 0; i < nhgi->count; i++) {
4892 		struct nh_notifier_single_info *nh_obj;
4893 		int weight;
4894 
4895 		nh = &nhgi->nexthops[i];
4896 		switch (info->type) {
4897 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
4898 			nh_obj = info->nh;
4899 			weight = 1;
4900 			break;
4901 		case NH_NOTIFIER_INFO_TYPE_GRP:
4902 			nh_obj = &info->nh_grp->nh_entries[i].nh;
4903 			weight = info->nh_grp->nh_entries[i].weight;
4904 			break;
4905 		case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4906 			nh_obj = &info->nh_res_table->nhs[i];
4907 			weight = 1;
4908 			break;
4909 		default:
4910 			err = -EINVAL;
4911 			goto err_nexthop_obj_init;
4912 		}
4913 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4914 						weight);
4915 		if (err)
4916 			goto err_nexthop_obj_init;
4917 	}
4918 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
4919 	if (err)
4920 		goto err_group_inc;
4921 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4922 	if (err) {
4923 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4924 		goto err_group_refresh;
4925 	}
4926 
4927 	/* Add resilient nexthop groups to a list so that the activity of their
4928 	 * nexthop buckets will be periodically queried and cleared.
4929 	 */
4930 	if (nhgi->is_resilient) {
4931 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4932 			mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4933 		list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4934 	}
4935 
4936 	return 0;
4937 
4938 err_group_refresh:
4939 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4940 err_group_inc:
4941 	i = nhgi->count;
4942 err_nexthop_obj_init:
4943 	for (i--; i >= 0; i--) {
4944 		nh = &nhgi->nexthops[i];
4945 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4946 	}
4947 	kfree(nhgi);
4948 	return err;
4949 }
4950 
4951 static void
4952 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4953 				     struct mlxsw_sp_nexthop_group *nh_grp)
4954 {
4955 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4956 	struct mlxsw_sp_router *router = mlxsw_sp->router;
4957 	int i;
4958 
4959 	if (nhgi->is_resilient) {
4960 		list_del(&nhgi->list);
4961 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4962 			cancel_delayed_work(&router->nh_grp_activity_dw);
4963 	}
4964 
4965 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4966 	for (i = nhgi->count - 1; i >= 0; i--) {
4967 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4968 
4969 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4970 	}
4971 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4972 	WARN_ON_ONCE(nhgi->adj_index_valid);
4973 	kfree(nhgi);
4974 }
4975 
4976 static struct mlxsw_sp_nexthop_group *
4977 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
4978 				  struct nh_notifier_info *info)
4979 {
4980 	struct mlxsw_sp_nexthop_group *nh_grp;
4981 	int err;
4982 
4983 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4984 	if (!nh_grp)
4985 		return ERR_PTR(-ENOMEM);
4986 	INIT_LIST_HEAD(&nh_grp->vr_list);
4987 	err = rhashtable_init(&nh_grp->vr_ht,
4988 			      &mlxsw_sp_nexthop_group_vr_ht_params);
4989 	if (err)
4990 		goto err_nexthop_group_vr_ht_init;
4991 	INIT_LIST_HEAD(&nh_grp->fib_list);
4992 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4993 	nh_grp->obj.id = info->id;
4994 
4995 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
4996 	if (err)
4997 		goto err_nexthop_group_info_init;
4998 
4999 	nh_grp->can_destroy = false;
5000 
5001 	return nh_grp;
5002 
5003 err_nexthop_group_info_init:
5004 	rhashtable_destroy(&nh_grp->vr_ht);
5005 err_nexthop_group_vr_ht_init:
5006 	kfree(nh_grp);
5007 	return ERR_PTR(err);
5008 }
5009 
5010 static void
5011 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5012 				   struct mlxsw_sp_nexthop_group *nh_grp)
5013 {
5014 	if (!nh_grp->can_destroy)
5015 		return;
5016 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5017 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5018 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5019 	rhashtable_destroy(&nh_grp->vr_ht);
5020 	kfree(nh_grp);
5021 }
5022 
5023 static struct mlxsw_sp_nexthop_group *
5024 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5025 {
5026 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5027 
5028 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5029 	cmp_arg.id = id;
5030 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5031 				      &cmp_arg,
5032 				      mlxsw_sp_nexthop_group_ht_params);
5033 }
5034 
5035 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5036 					  struct mlxsw_sp_nexthop_group *nh_grp)
5037 {
5038 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5039 }
5040 
5041 static int
5042 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5043 				   struct mlxsw_sp_nexthop_group *nh_grp,
5044 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
5045 				   struct netlink_ext_ack *extack)
5046 {
5047 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5048 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5049 	int err;
5050 
5051 	old_nh_grp->nhgi = new_nhgi;
5052 	new_nhgi->nh_grp = old_nh_grp;
5053 	nh_grp->nhgi = old_nhgi;
5054 	old_nhgi->nh_grp = nh_grp;
5055 
5056 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5057 		/* Both the old adjacency index and the new one are valid.
5058 		 * Routes are currently using the old one. Tell the device to
5059 		 * replace the old adjacency index with the new one.
5060 		 */
5061 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5062 						     old_nhgi->adj_index,
5063 						     old_nhgi->ecmp_size);
5064 		if (err) {
5065 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5066 			goto err_out;
5067 		}
5068 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5069 		/* The old adjacency index is valid, while the new one is not.
5070 		 * Iterate over all the routes using the group and change them
5071 		 * to trap packets to the CPU.
5072 		 */
5073 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5074 		if (err) {
5075 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5076 			goto err_out;
5077 		}
5078 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5079 		/* The old adjacency index is invalid, while the new one is.
5080 		 * Iterate over all the routes using the group and change them
5081 		 * to forward packets using the new valid index.
5082 		 */
5083 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5084 		if (err) {
5085 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5086 			goto err_out;
5087 		}
5088 	}
5089 
5090 	/* Make sure the flags are set / cleared based on the new nexthop group
5091 	 * information.
5092 	 */
5093 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5094 
5095 	/* At this point 'nh_grp' is just a shell that is not used by anyone
5096 	 * and its nexthop group info is the old info that was just replaced
5097 	 * with the new one. Remove it.
5098 	 */
5099 	nh_grp->can_destroy = true;
5100 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5101 
5102 	return 0;
5103 
5104 err_out:
5105 	old_nhgi->nh_grp = old_nh_grp;
5106 	nh_grp->nhgi = new_nhgi;
5107 	new_nhgi->nh_grp = nh_grp;
5108 	old_nh_grp->nhgi = old_nhgi;
5109 	return err;
5110 }
5111 
5112 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5113 				    struct nh_notifier_info *info)
5114 {
5115 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5116 	struct netlink_ext_ack *extack = info->extack;
5117 	int err;
5118 
5119 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5120 	if (IS_ERR(nh_grp))
5121 		return PTR_ERR(nh_grp);
5122 
5123 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5124 	if (!old_nh_grp)
5125 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5126 	else
5127 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5128 							 old_nh_grp, extack);
5129 
5130 	if (err) {
5131 		nh_grp->can_destroy = true;
5132 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5133 	}
5134 
5135 	return err;
5136 }
5137 
5138 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5139 				     struct nh_notifier_info *info)
5140 {
5141 	struct mlxsw_sp_nexthop_group *nh_grp;
5142 
5143 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5144 	if (!nh_grp)
5145 		return;
5146 
5147 	nh_grp->can_destroy = true;
5148 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5149 
5150 	/* If the group still has routes using it, then defer the delete
5151 	 * operation until the last route using it is deleted.
5152 	 */
5153 	if (!list_empty(&nh_grp->fib_list))
5154 		return;
5155 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5156 }
5157 
5158 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5159 					     u32 adj_index, char *ratr_pl)
5160 {
5161 	MLXSW_REG_ZERO(ratr, ratr_pl);
5162 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5163 	mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5164 	mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5165 
5166 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5167 }
5168 
5169 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5170 {
5171 	/* Clear the opcode and activity on both the old and new payload as
5172 	 * they are irrelevant for the comparison.
5173 	 */
5174 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5175 	mlxsw_reg_ratr_a_set(ratr_pl, 0);
5176 	mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5177 	mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5178 
5179 	/* If the contents of the adjacency entry are consistent with the
5180 	 * replacement request, then replacement was successful.
5181 	 */
5182 	if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5183 		return 0;
5184 
5185 	return -EINVAL;
5186 }
5187 
5188 static int
5189 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5190 				       struct mlxsw_sp_nexthop *nh,
5191 				       struct nh_notifier_info *info)
5192 {
5193 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5194 	struct netlink_ext_ack *extack = info->extack;
5195 	bool force = info->nh_res_bucket->force;
5196 	char ratr_pl_new[MLXSW_REG_RATR_LEN];
5197 	char ratr_pl[MLXSW_REG_RATR_LEN];
5198 	u32 adj_index;
5199 	int err;
5200 
5201 	/* No point in trying an atomic replacement if the idle timer interval
5202 	 * is smaller than the interval in which we query and clear activity.
5203 	 */
5204 	if (!force && info->nh_res_bucket->idle_timer_ms <
5205 	    MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5206 		force = true;
5207 
5208 	adj_index = nh->nhgi->adj_index + bucket_index;
5209 	err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5210 	if (err) {
5211 		NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5212 		return err;
5213 	}
5214 
5215 	if (!force) {
5216 		err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5217 							ratr_pl_new);
5218 		if (err) {
5219 			NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5220 			return err;
5221 		}
5222 
5223 		err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5224 		if (err) {
5225 			NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5226 			return err;
5227 		}
5228 	}
5229 
5230 	nh->update = 0;
5231 	nh->offloaded = 1;
5232 	mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5233 
5234 	return 0;
5235 }
5236 
5237 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5238 					       struct nh_notifier_info *info)
5239 {
5240 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5241 	struct netlink_ext_ack *extack = info->extack;
5242 	struct mlxsw_sp_nexthop_group_info *nhgi;
5243 	struct nh_notifier_single_info *nh_obj;
5244 	struct mlxsw_sp_nexthop_group *nh_grp;
5245 	struct mlxsw_sp_nexthop *nh;
5246 	int err;
5247 
5248 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5249 	if (!nh_grp) {
5250 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5251 		return -EINVAL;
5252 	}
5253 
5254 	nhgi = nh_grp->nhgi;
5255 
5256 	if (bucket_index >= nhgi->count) {
5257 		NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5258 		return -EINVAL;
5259 	}
5260 
5261 	nh = &nhgi->nexthops[bucket_index];
5262 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5263 
5264 	nh_obj = &info->nh_res_bucket->new_nh;
5265 	err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5266 	if (err) {
5267 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5268 		goto err_nexthop_obj_init;
5269 	}
5270 
5271 	err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5272 	if (err)
5273 		goto err_nexthop_obj_bucket_adj_update;
5274 
5275 	return 0;
5276 
5277 err_nexthop_obj_bucket_adj_update:
5278 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5279 err_nexthop_obj_init:
5280 	nh_obj = &info->nh_res_bucket->old_nh;
5281 	mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5282 	/* The old adjacency entry was not overwritten */
5283 	nh->update = 0;
5284 	nh->offloaded = 1;
5285 	return err;
5286 }
5287 
5288 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5289 				      unsigned long event, void *ptr)
5290 {
5291 	struct nh_notifier_info *info = ptr;
5292 	struct mlxsw_sp_router *router;
5293 	int err = 0;
5294 
5295 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5296 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5297 	if (err)
5298 		goto out;
5299 
5300 	mutex_lock(&router->lock);
5301 
5302 	switch (event) {
5303 	case NEXTHOP_EVENT_REPLACE:
5304 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5305 		break;
5306 	case NEXTHOP_EVENT_DEL:
5307 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5308 		break;
5309 	case NEXTHOP_EVENT_BUCKET_REPLACE:
5310 		err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5311 							  info);
5312 		break;
5313 	default:
5314 		break;
5315 	}
5316 
5317 	mutex_unlock(&router->lock);
5318 
5319 out:
5320 	return notifier_from_errno(err);
5321 }
5322 
5323 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5324 				   struct fib_info *fi)
5325 {
5326 	const struct fib_nh *nh = fib_info_nh(fi, 0);
5327 
5328 	return nh->fib_nh_scope == RT_SCOPE_LINK ||
5329 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5330 }
5331 
5332 static int
5333 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5334 				  struct mlxsw_sp_nexthop_group *nh_grp)
5335 {
5336 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5337 	struct mlxsw_sp_nexthop_group_info *nhgi;
5338 	struct mlxsw_sp_nexthop *nh;
5339 	int err, i;
5340 
5341 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5342 	if (!nhgi)
5343 		return -ENOMEM;
5344 	nh_grp->nhgi = nhgi;
5345 	nhgi->nh_grp = nh_grp;
5346 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5347 	nhgi->count = nhs;
5348 	for (i = 0; i < nhgi->count; i++) {
5349 		struct fib_nh *fib_nh;
5350 
5351 		nh = &nhgi->nexthops[i];
5352 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5353 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5354 		if (err)
5355 			goto err_nexthop4_init;
5356 	}
5357 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5358 	if (err)
5359 		goto err_group_inc;
5360 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5361 	if (err)
5362 		goto err_group_refresh;
5363 
5364 	return 0;
5365 
5366 err_group_refresh:
5367 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5368 err_group_inc:
5369 	i = nhgi->count;
5370 err_nexthop4_init:
5371 	for (i--; i >= 0; i--) {
5372 		nh = &nhgi->nexthops[i];
5373 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5374 	}
5375 	kfree(nhgi);
5376 	return err;
5377 }
5378 
5379 static void
5380 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5381 				  struct mlxsw_sp_nexthop_group *nh_grp)
5382 {
5383 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5384 	int i;
5385 
5386 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5387 	for (i = nhgi->count - 1; i >= 0; i--) {
5388 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5389 
5390 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5391 	}
5392 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5393 	WARN_ON_ONCE(nhgi->adj_index_valid);
5394 	kfree(nhgi);
5395 }
5396 
5397 static struct mlxsw_sp_nexthop_group *
5398 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5399 {
5400 	struct mlxsw_sp_nexthop_group *nh_grp;
5401 	int err;
5402 
5403 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5404 	if (!nh_grp)
5405 		return ERR_PTR(-ENOMEM);
5406 	INIT_LIST_HEAD(&nh_grp->vr_list);
5407 	err = rhashtable_init(&nh_grp->vr_ht,
5408 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5409 	if (err)
5410 		goto err_nexthop_group_vr_ht_init;
5411 	INIT_LIST_HEAD(&nh_grp->fib_list);
5412 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5413 	nh_grp->ipv4.fi = fi;
5414 	fib_info_hold(fi);
5415 
5416 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5417 	if (err)
5418 		goto err_nexthop_group_info_init;
5419 
5420 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5421 	if (err)
5422 		goto err_nexthop_group_insert;
5423 
5424 	nh_grp->can_destroy = true;
5425 
5426 	return nh_grp;
5427 
5428 err_nexthop_group_insert:
5429 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5430 err_nexthop_group_info_init:
5431 	fib_info_put(fi);
5432 	rhashtable_destroy(&nh_grp->vr_ht);
5433 err_nexthop_group_vr_ht_init:
5434 	kfree(nh_grp);
5435 	return ERR_PTR(err);
5436 }
5437 
5438 static void
5439 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5440 				struct mlxsw_sp_nexthop_group *nh_grp)
5441 {
5442 	if (!nh_grp->can_destroy)
5443 		return;
5444 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5445 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5446 	fib_info_put(nh_grp->ipv4.fi);
5447 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5448 	rhashtable_destroy(&nh_grp->vr_ht);
5449 	kfree(nh_grp);
5450 }
5451 
5452 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5453 				       struct mlxsw_sp_fib_entry *fib_entry,
5454 				       struct fib_info *fi)
5455 {
5456 	struct mlxsw_sp_nexthop_group *nh_grp;
5457 
5458 	if (fi->nh) {
5459 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5460 							   fi->nh->id);
5461 		if (WARN_ON_ONCE(!nh_grp))
5462 			return -EINVAL;
5463 		goto out;
5464 	}
5465 
5466 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5467 	if (!nh_grp) {
5468 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5469 		if (IS_ERR(nh_grp))
5470 			return PTR_ERR(nh_grp);
5471 	}
5472 out:
5473 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5474 	fib_entry->nh_group = nh_grp;
5475 	return 0;
5476 }
5477 
5478 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5479 					struct mlxsw_sp_fib_entry *fib_entry)
5480 {
5481 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5482 
5483 	list_del(&fib_entry->nexthop_group_node);
5484 	if (!list_empty(&nh_grp->fib_list))
5485 		return;
5486 
5487 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5488 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5489 		return;
5490 	}
5491 
5492 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5493 }
5494 
5495 static bool
5496 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5497 {
5498 	struct mlxsw_sp_fib4_entry *fib4_entry;
5499 
5500 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5501 				  common);
5502 	return !fib4_entry->tos;
5503 }
5504 
5505 static bool
5506 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5507 {
5508 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5509 
5510 	switch (fib_entry->fib_node->fib->proto) {
5511 	case MLXSW_SP_L3_PROTO_IPV4:
5512 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5513 			return false;
5514 		break;
5515 	case MLXSW_SP_L3_PROTO_IPV6:
5516 		break;
5517 	}
5518 
5519 	switch (fib_entry->type) {
5520 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5521 		return !!nh_group->nhgi->adj_index_valid;
5522 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5523 		return !!nh_group->nhgi->nh_rif;
5524 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5525 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5526 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5527 		return true;
5528 	default:
5529 		return false;
5530 	}
5531 }
5532 
5533 static struct mlxsw_sp_nexthop *
5534 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5535 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5536 {
5537 	int i;
5538 
5539 	for (i = 0; i < nh_grp->nhgi->count; i++) {
5540 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5541 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5542 
5543 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5544 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5545 				    &rt->fib6_nh->fib_nh_gw6))
5546 			return nh;
5547 	}
5548 
5549 	return NULL;
5550 }
5551 
5552 static void
5553 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5554 				      struct fib_entry_notifier_info *fen_info)
5555 {
5556 	u32 *p_dst = (u32 *) &fen_info->dst;
5557 	struct fib_rt_info fri;
5558 
5559 	fri.fi = fen_info->fi;
5560 	fri.tb_id = fen_info->tb_id;
5561 	fri.dst = cpu_to_be32(*p_dst);
5562 	fri.dst_len = fen_info->dst_len;
5563 	fri.tos = fen_info->tos;
5564 	fri.type = fen_info->type;
5565 	fri.offload = false;
5566 	fri.trap = false;
5567 	fri.offload_failed = true;
5568 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5569 }
5570 
5571 static void
5572 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5573 				 struct mlxsw_sp_fib_entry *fib_entry)
5574 {
5575 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5576 	int dst_len = fib_entry->fib_node->key.prefix_len;
5577 	struct mlxsw_sp_fib4_entry *fib4_entry;
5578 	struct fib_rt_info fri;
5579 	bool should_offload;
5580 
5581 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5582 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5583 				  common);
5584 	fri.fi = fib4_entry->fi;
5585 	fri.tb_id = fib4_entry->tb_id;
5586 	fri.dst = cpu_to_be32(*p_dst);
5587 	fri.dst_len = dst_len;
5588 	fri.tos = fib4_entry->tos;
5589 	fri.type = fib4_entry->type;
5590 	fri.offload = should_offload;
5591 	fri.trap = !should_offload;
5592 	fri.offload_failed = false;
5593 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5594 }
5595 
5596 static void
5597 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5598 				   struct mlxsw_sp_fib_entry *fib_entry)
5599 {
5600 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5601 	int dst_len = fib_entry->fib_node->key.prefix_len;
5602 	struct mlxsw_sp_fib4_entry *fib4_entry;
5603 	struct fib_rt_info fri;
5604 
5605 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5606 				  common);
5607 	fri.fi = fib4_entry->fi;
5608 	fri.tb_id = fib4_entry->tb_id;
5609 	fri.dst = cpu_to_be32(*p_dst);
5610 	fri.dst_len = dst_len;
5611 	fri.tos = fib4_entry->tos;
5612 	fri.type = fib4_entry->type;
5613 	fri.offload = false;
5614 	fri.trap = false;
5615 	fri.offload_failed = false;
5616 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5617 }
5618 
5619 #if IS_ENABLED(CONFIG_IPV6)
5620 static void
5621 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5622 				      struct fib6_info **rt_arr,
5623 				      unsigned int nrt6)
5624 {
5625 	int i;
5626 
5627 	/* In IPv6 a multipath route is represented using multiple routes, so
5628 	 * we need to set the flags on all of them.
5629 	 */
5630 	for (i = 0; i < nrt6; i++)
5631 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5632 				       false, false, true);
5633 }
5634 #else
5635 static void
5636 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5637 				      struct fib6_info **rt_arr,
5638 				      unsigned int nrt6)
5639 {
5640 }
5641 #endif
5642 
5643 #if IS_ENABLED(CONFIG_IPV6)
5644 static void
5645 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5646 				 struct mlxsw_sp_fib_entry *fib_entry)
5647 {
5648 	struct mlxsw_sp_fib6_entry *fib6_entry;
5649 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5650 	bool should_offload;
5651 
5652 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5653 
5654 	/* In IPv6 a multipath route is represented using multiple routes, so
5655 	 * we need to set the flags on all of them.
5656 	 */
5657 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5658 				  common);
5659 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5660 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5661 				       should_offload, !should_offload, false);
5662 }
5663 #else
5664 static void
5665 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5666 				 struct mlxsw_sp_fib_entry *fib_entry)
5667 {
5668 }
5669 #endif
5670 
5671 #if IS_ENABLED(CONFIG_IPV6)
5672 static void
5673 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5674 				   struct mlxsw_sp_fib_entry *fib_entry)
5675 {
5676 	struct mlxsw_sp_fib6_entry *fib6_entry;
5677 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5678 
5679 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5680 				  common);
5681 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5682 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5683 				       false, false, false);
5684 }
5685 #else
5686 static void
5687 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5688 				   struct mlxsw_sp_fib_entry *fib_entry)
5689 {
5690 }
5691 #endif
5692 
5693 static void
5694 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5695 				struct mlxsw_sp_fib_entry *fib_entry)
5696 {
5697 	switch (fib_entry->fib_node->fib->proto) {
5698 	case MLXSW_SP_L3_PROTO_IPV4:
5699 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5700 		break;
5701 	case MLXSW_SP_L3_PROTO_IPV6:
5702 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5703 		break;
5704 	}
5705 }
5706 
5707 static void
5708 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5709 				  struct mlxsw_sp_fib_entry *fib_entry)
5710 {
5711 	switch (fib_entry->fib_node->fib->proto) {
5712 	case MLXSW_SP_L3_PROTO_IPV4:
5713 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5714 		break;
5715 	case MLXSW_SP_L3_PROTO_IPV6:
5716 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5717 		break;
5718 	}
5719 }
5720 
5721 static void
5722 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5723 				    struct mlxsw_sp_fib_entry *fib_entry,
5724 				    enum mlxsw_sp_fib_entry_op op)
5725 {
5726 	switch (op) {
5727 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5728 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5729 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5730 		break;
5731 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5732 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5733 		break;
5734 	default:
5735 		break;
5736 	}
5737 }
5738 
5739 struct mlxsw_sp_fib_entry_op_ctx_basic {
5740 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5741 };
5742 
5743 static void
5744 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5745 					enum mlxsw_sp_l3proto proto,
5746 					enum mlxsw_sp_fib_entry_op op,
5747 					u16 virtual_router, u8 prefix_len,
5748 					unsigned char *addr,
5749 					struct mlxsw_sp_fib_entry_priv *priv)
5750 {
5751 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5752 	enum mlxsw_reg_ralxx_protocol ralxx_proto;
5753 	char *ralue_pl = op_ctx_basic->ralue_pl;
5754 	enum mlxsw_reg_ralue_op ralue_op;
5755 
5756 	ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
5757 
5758 	switch (op) {
5759 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5760 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5761 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
5762 		break;
5763 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5764 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
5765 		break;
5766 	default:
5767 		WARN_ON_ONCE(1);
5768 		return;
5769 	}
5770 
5771 	switch (proto) {
5772 	case MLXSW_SP_L3_PROTO_IPV4:
5773 		mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
5774 				      virtual_router, prefix_len, (u32 *) addr);
5775 		break;
5776 	case MLXSW_SP_L3_PROTO_IPV6:
5777 		mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
5778 				      virtual_router, prefix_len, addr);
5779 		break;
5780 	}
5781 }
5782 
5783 static void
5784 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5785 						   enum mlxsw_reg_ralue_trap_action trap_action,
5786 						   u16 trap_id, u32 adjacency_index, u16 ecmp_size)
5787 {
5788 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5789 
5790 	mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
5791 					trap_id, adjacency_index, ecmp_size);
5792 }
5793 
5794 static void
5795 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5796 						  enum mlxsw_reg_ralue_trap_action trap_action,
5797 						  u16 trap_id, u16 local_erif)
5798 {
5799 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5800 
5801 	mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
5802 				       trap_id, local_erif);
5803 }
5804 
5805 static void
5806 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
5807 {
5808 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5809 
5810 	mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
5811 }
5812 
5813 static void
5814 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5815 						      u32 tunnel_ptr)
5816 {
5817 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5818 
5819 	mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
5820 }
5821 
5822 static int
5823 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5824 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5825 					  bool *postponed_for_bulk)
5826 {
5827 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5828 
5829 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5830 			       op_ctx_basic->ralue_pl);
5831 }
5832 
5833 static bool
5834 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
5835 {
5836 	return true;
5837 }
5838 
5839 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5840 				    struct mlxsw_sp_fib_entry *fib_entry,
5841 				    enum mlxsw_sp_fib_entry_op op)
5842 {
5843 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5844 
5845 	mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
5846 	fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
5847 				    fib_entry->fib_node->key.prefix_len,
5848 				    fib_entry->fib_node->key.addr,
5849 				    fib_entry->priv);
5850 }
5851 
5852 static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5853 				     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5854 				     const struct mlxsw_sp_router_ll_ops *ll_ops)
5855 {
5856 	bool postponed_for_bulk = false;
5857 	int err;
5858 
5859 	err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
5860 	if (!postponed_for_bulk)
5861 		mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
5862 	return err;
5863 }
5864 
5865 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5866 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5867 					struct mlxsw_sp_fib_entry *fib_entry,
5868 					enum mlxsw_sp_fib_entry_op op)
5869 {
5870 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5871 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5872 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5873 	enum mlxsw_reg_ralue_trap_action trap_action;
5874 	u16 trap_id = 0;
5875 	u32 adjacency_index = 0;
5876 	u16 ecmp_size = 0;
5877 
5878 	/* In case the nexthop group adjacency index is valid, use it
5879 	 * with provided ECMP size. Otherwise, setup trap and pass
5880 	 * traffic to kernel.
5881 	 */
5882 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5883 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5884 		adjacency_index = nhgi->adj_index;
5885 		ecmp_size = nhgi->ecmp_size;
5886 	} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5887 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5888 		adjacency_index = mlxsw_sp->router->adj_trap_index;
5889 		ecmp_size = 1;
5890 	} else {
5891 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5892 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5893 	}
5894 
5895 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5896 	ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
5897 					  adjacency_index, ecmp_size);
5898 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5899 }
5900 
5901 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5902 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5903 				       struct mlxsw_sp_fib_entry *fib_entry,
5904 				       enum mlxsw_sp_fib_entry_op op)
5905 {
5906 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5907 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5908 	enum mlxsw_reg_ralue_trap_action trap_action;
5909 	u16 trap_id = 0;
5910 	u16 rif_index = 0;
5911 
5912 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5913 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5914 		rif_index = rif->rif_index;
5915 	} else {
5916 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5917 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5918 	}
5919 
5920 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5921 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
5922 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5923 }
5924 
5925 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5926 				      struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5927 				      struct mlxsw_sp_fib_entry *fib_entry,
5928 				      enum mlxsw_sp_fib_entry_op op)
5929 {
5930 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5931 
5932 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5933 	ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5934 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5935 }
5936 
5937 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5938 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5939 					   struct mlxsw_sp_fib_entry *fib_entry,
5940 					   enum mlxsw_sp_fib_entry_op op)
5941 {
5942 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5943 	enum mlxsw_reg_ralue_trap_action trap_action;
5944 
5945 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5946 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5947 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
5948 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5949 }
5950 
5951 static int
5952 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5953 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5954 				  struct mlxsw_sp_fib_entry *fib_entry,
5955 				  enum mlxsw_sp_fib_entry_op op)
5956 {
5957 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5958 	enum mlxsw_reg_ralue_trap_action trap_action;
5959 	u16 trap_id;
5960 
5961 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5962 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5963 
5964 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5965 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
5966 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5967 }
5968 
5969 static int
5970 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5971 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5972 				 struct mlxsw_sp_fib_entry *fib_entry,
5973 				 enum mlxsw_sp_fib_entry_op op)
5974 {
5975 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5976 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5977 	const struct mlxsw_sp_ipip_ops *ipip_ops;
5978 	int err;
5979 
5980 	if (WARN_ON(!ipip_entry))
5981 		return -EINVAL;
5982 
5983 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5984 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5985 				     fib_entry->decap.tunnel_index);
5986 	if (err)
5987 		return err;
5988 
5989 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5990 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5991 					     fib_entry->decap.tunnel_index);
5992 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5993 }
5994 
5995 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5996 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5997 					   struct mlxsw_sp_fib_entry *fib_entry,
5998 					   enum mlxsw_sp_fib_entry_op op)
5999 {
6000 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6001 
6002 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6003 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
6004 					     fib_entry->decap.tunnel_index);
6005 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6006 }
6007 
6008 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6009 				   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6010 				   struct mlxsw_sp_fib_entry *fib_entry,
6011 				   enum mlxsw_sp_fib_entry_op op)
6012 {
6013 	switch (fib_entry->type) {
6014 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6015 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
6016 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6017 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
6018 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6019 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
6020 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6021 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
6022 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6023 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
6024 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6025 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
6026 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6027 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
6028 	}
6029 	return -EINVAL;
6030 }
6031 
6032 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6033 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6034 				 struct mlxsw_sp_fib_entry *fib_entry,
6035 				 enum mlxsw_sp_fib_entry_op op)
6036 {
6037 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
6038 
6039 	if (err)
6040 		return err;
6041 
6042 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6043 
6044 	return err;
6045 }
6046 
6047 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6048 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6049 				       struct mlxsw_sp_fib_entry *fib_entry,
6050 				       bool is_new)
6051 {
6052 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6053 				     is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
6054 					      MLXSW_SP_FIB_ENTRY_OP_UPDATE);
6055 }
6056 
6057 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6058 				     struct mlxsw_sp_fib_entry *fib_entry)
6059 {
6060 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6061 
6062 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6063 	return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
6064 }
6065 
6066 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6067 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6068 				  struct mlxsw_sp_fib_entry *fib_entry)
6069 {
6070 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6071 
6072 	if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
6073 		return 0;
6074 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6075 				     MLXSW_SP_FIB_ENTRY_OP_DELETE);
6076 }
6077 
6078 static int
6079 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6080 			     const struct fib_entry_notifier_info *fen_info,
6081 			     struct mlxsw_sp_fib_entry *fib_entry)
6082 {
6083 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6084 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6085 	struct mlxsw_sp_router *router = mlxsw_sp->router;
6086 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6087 	int ifindex = nhgi->nexthops[0].ifindex;
6088 	struct mlxsw_sp_ipip_entry *ipip_entry;
6089 
6090 	switch (fen_info->type) {
6091 	case RTN_LOCAL:
6092 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6093 							       MLXSW_SP_L3_PROTO_IPV4, dip);
6094 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6095 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6096 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6097 							     fib_entry,
6098 							     ipip_entry);
6099 		}
6100 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6101 						 MLXSW_SP_L3_PROTO_IPV4,
6102 						 &dip)) {
6103 			u32 tunnel_index;
6104 
6105 			tunnel_index = router->nve_decap_config.tunnel_index;
6106 			fib_entry->decap.tunnel_index = tunnel_index;
6107 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6108 			return 0;
6109 		}
6110 		fallthrough;
6111 	case RTN_BROADCAST:
6112 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6113 		return 0;
6114 	case RTN_BLACKHOLE:
6115 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6116 		return 0;
6117 	case RTN_UNREACHABLE:
6118 	case RTN_PROHIBIT:
6119 		/* Packets hitting these routes need to be trapped, but
6120 		 * can do so with a lower priority than packets directed
6121 		 * at the host, so use action type local instead of trap.
6122 		 */
6123 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6124 		return 0;
6125 	case RTN_UNICAST:
6126 		if (nhgi->gateway)
6127 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6128 		else
6129 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6130 		return 0;
6131 	default:
6132 		return -EINVAL;
6133 	}
6134 }
6135 
6136 static void
6137 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6138 			      struct mlxsw_sp_fib_entry *fib_entry)
6139 {
6140 	switch (fib_entry->type) {
6141 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6142 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6143 		break;
6144 	default:
6145 		break;
6146 	}
6147 }
6148 
6149 static void
6150 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6151 			       struct mlxsw_sp_fib4_entry *fib4_entry)
6152 {
6153 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6154 }
6155 
6156 static struct mlxsw_sp_fib4_entry *
6157 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6158 			   struct mlxsw_sp_fib_node *fib_node,
6159 			   const struct fib_entry_notifier_info *fen_info)
6160 {
6161 	struct mlxsw_sp_fib4_entry *fib4_entry;
6162 	struct mlxsw_sp_fib_entry *fib_entry;
6163 	int err;
6164 
6165 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6166 	if (!fib4_entry)
6167 		return ERR_PTR(-ENOMEM);
6168 	fib_entry = &fib4_entry->common;
6169 
6170 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6171 	if (IS_ERR(fib_entry->priv)) {
6172 		err = PTR_ERR(fib_entry->priv);
6173 		goto err_fib_entry_priv_create;
6174 	}
6175 
6176 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6177 	if (err)
6178 		goto err_nexthop4_group_get;
6179 
6180 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6181 					     fib_node->fib);
6182 	if (err)
6183 		goto err_nexthop_group_vr_link;
6184 
6185 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6186 	if (err)
6187 		goto err_fib4_entry_type_set;
6188 
6189 	fib4_entry->fi = fen_info->fi;
6190 	fib_info_hold(fib4_entry->fi);
6191 	fib4_entry->tb_id = fen_info->tb_id;
6192 	fib4_entry->type = fen_info->type;
6193 	fib4_entry->tos = fen_info->tos;
6194 
6195 	fib_entry->fib_node = fib_node;
6196 
6197 	return fib4_entry;
6198 
6199 err_fib4_entry_type_set:
6200 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6201 err_nexthop_group_vr_link:
6202 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6203 err_nexthop4_group_get:
6204 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6205 err_fib_entry_priv_create:
6206 	kfree(fib4_entry);
6207 	return ERR_PTR(err);
6208 }
6209 
6210 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6211 					struct mlxsw_sp_fib4_entry *fib4_entry)
6212 {
6213 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6214 
6215 	fib_info_put(fib4_entry->fi);
6216 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6217 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6218 					 fib_node->fib);
6219 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6220 	mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
6221 	kfree(fib4_entry);
6222 }
6223 
6224 static struct mlxsw_sp_fib4_entry *
6225 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6226 			   const struct fib_entry_notifier_info *fen_info)
6227 {
6228 	struct mlxsw_sp_fib4_entry *fib4_entry;
6229 	struct mlxsw_sp_fib_node *fib_node;
6230 	struct mlxsw_sp_fib *fib;
6231 	struct mlxsw_sp_vr *vr;
6232 
6233 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6234 	if (!vr)
6235 		return NULL;
6236 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6237 
6238 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6239 					    sizeof(fen_info->dst),
6240 					    fen_info->dst_len);
6241 	if (!fib_node)
6242 		return NULL;
6243 
6244 	fib4_entry = container_of(fib_node->fib_entry,
6245 				  struct mlxsw_sp_fib4_entry, common);
6246 	if (fib4_entry->tb_id == fen_info->tb_id &&
6247 	    fib4_entry->tos == fen_info->tos &&
6248 	    fib4_entry->type == fen_info->type &&
6249 	    fib4_entry->fi == fen_info->fi)
6250 		return fib4_entry;
6251 
6252 	return NULL;
6253 }
6254 
6255 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6256 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6257 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6258 	.key_len = sizeof(struct mlxsw_sp_fib_key),
6259 	.automatic_shrinking = true,
6260 };
6261 
6262 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6263 				    struct mlxsw_sp_fib_node *fib_node)
6264 {
6265 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6266 				      mlxsw_sp_fib_ht_params);
6267 }
6268 
6269 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6270 				     struct mlxsw_sp_fib_node *fib_node)
6271 {
6272 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6273 			       mlxsw_sp_fib_ht_params);
6274 }
6275 
6276 static struct mlxsw_sp_fib_node *
6277 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6278 			 size_t addr_len, unsigned char prefix_len)
6279 {
6280 	struct mlxsw_sp_fib_key key;
6281 
6282 	memset(&key, 0, sizeof(key));
6283 	memcpy(key.addr, addr, addr_len);
6284 	key.prefix_len = prefix_len;
6285 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6286 }
6287 
6288 static struct mlxsw_sp_fib_node *
6289 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6290 			 size_t addr_len, unsigned char prefix_len)
6291 {
6292 	struct mlxsw_sp_fib_node *fib_node;
6293 
6294 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6295 	if (!fib_node)
6296 		return NULL;
6297 
6298 	list_add(&fib_node->list, &fib->node_list);
6299 	memcpy(fib_node->key.addr, addr, addr_len);
6300 	fib_node->key.prefix_len = prefix_len;
6301 
6302 	return fib_node;
6303 }
6304 
6305 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6306 {
6307 	list_del(&fib_node->list);
6308 	kfree(fib_node);
6309 }
6310 
6311 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6312 				      struct mlxsw_sp_fib_node *fib_node)
6313 {
6314 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6315 	struct mlxsw_sp_fib *fib = fib_node->fib;
6316 	struct mlxsw_sp_lpm_tree *lpm_tree;
6317 	int err;
6318 
6319 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6320 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6321 		goto out;
6322 
6323 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6324 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6325 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6326 					 fib->proto);
6327 	if (IS_ERR(lpm_tree))
6328 		return PTR_ERR(lpm_tree);
6329 
6330 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6331 	if (err)
6332 		goto err_lpm_tree_replace;
6333 
6334 out:
6335 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6336 	return 0;
6337 
6338 err_lpm_tree_replace:
6339 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6340 	return err;
6341 }
6342 
6343 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6344 					 struct mlxsw_sp_fib_node *fib_node)
6345 {
6346 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6347 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6348 	struct mlxsw_sp_fib *fib = fib_node->fib;
6349 	int err;
6350 
6351 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6352 		return;
6353 	/* Try to construct a new LPM tree from the current prefix usage
6354 	 * minus the unused one. If we fail, continue using the old one.
6355 	 */
6356 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6357 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6358 				    fib_node->key.prefix_len);
6359 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6360 					 fib->proto);
6361 	if (IS_ERR(lpm_tree))
6362 		return;
6363 
6364 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6365 	if (err)
6366 		goto err_lpm_tree_replace;
6367 
6368 	return;
6369 
6370 err_lpm_tree_replace:
6371 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6372 }
6373 
6374 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6375 				  struct mlxsw_sp_fib_node *fib_node,
6376 				  struct mlxsw_sp_fib *fib)
6377 {
6378 	int err;
6379 
6380 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
6381 	if (err)
6382 		return err;
6383 	fib_node->fib = fib;
6384 
6385 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6386 	if (err)
6387 		goto err_fib_lpm_tree_link;
6388 
6389 	return 0;
6390 
6391 err_fib_lpm_tree_link:
6392 	fib_node->fib = NULL;
6393 	mlxsw_sp_fib_node_remove(fib, fib_node);
6394 	return err;
6395 }
6396 
6397 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6398 				   struct mlxsw_sp_fib_node *fib_node)
6399 {
6400 	struct mlxsw_sp_fib *fib = fib_node->fib;
6401 
6402 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6403 	fib_node->fib = NULL;
6404 	mlxsw_sp_fib_node_remove(fib, fib_node);
6405 }
6406 
6407 static struct mlxsw_sp_fib_node *
6408 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6409 		      size_t addr_len, unsigned char prefix_len,
6410 		      enum mlxsw_sp_l3proto proto)
6411 {
6412 	struct mlxsw_sp_fib_node *fib_node;
6413 	struct mlxsw_sp_fib *fib;
6414 	struct mlxsw_sp_vr *vr;
6415 	int err;
6416 
6417 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6418 	if (IS_ERR(vr))
6419 		return ERR_CAST(vr);
6420 	fib = mlxsw_sp_vr_fib(vr, proto);
6421 
6422 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6423 	if (fib_node)
6424 		return fib_node;
6425 
6426 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6427 	if (!fib_node) {
6428 		err = -ENOMEM;
6429 		goto err_fib_node_create;
6430 	}
6431 
6432 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6433 	if (err)
6434 		goto err_fib_node_init;
6435 
6436 	return fib_node;
6437 
6438 err_fib_node_init:
6439 	mlxsw_sp_fib_node_destroy(fib_node);
6440 err_fib_node_create:
6441 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6442 	return ERR_PTR(err);
6443 }
6444 
6445 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6446 				  struct mlxsw_sp_fib_node *fib_node)
6447 {
6448 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6449 
6450 	if (fib_node->fib_entry)
6451 		return;
6452 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6453 	mlxsw_sp_fib_node_destroy(fib_node);
6454 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6455 }
6456 
6457 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6458 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6459 					struct mlxsw_sp_fib_entry *fib_entry)
6460 {
6461 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6462 	bool is_new = !fib_node->fib_entry;
6463 	int err;
6464 
6465 	fib_node->fib_entry = fib_entry;
6466 
6467 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
6468 	if (err)
6469 		goto err_fib_entry_update;
6470 
6471 	return 0;
6472 
6473 err_fib_entry_update:
6474 	fib_node->fib_entry = NULL;
6475 	return err;
6476 }
6477 
6478 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6479 					    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6480 					    struct mlxsw_sp_fib_entry *fib_entry)
6481 {
6482 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6483 	int err;
6484 
6485 	err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
6486 	fib_node->fib_entry = NULL;
6487 	return err;
6488 }
6489 
6490 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6491 					   struct mlxsw_sp_fib_entry *fib_entry)
6492 {
6493 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6494 
6495 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6496 	__mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
6497 }
6498 
6499 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6500 {
6501 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6502 	struct mlxsw_sp_fib4_entry *fib4_replaced;
6503 
6504 	if (!fib_node->fib_entry)
6505 		return true;
6506 
6507 	fib4_replaced = container_of(fib_node->fib_entry,
6508 				     struct mlxsw_sp_fib4_entry, common);
6509 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6510 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
6511 		return false;
6512 
6513 	return true;
6514 }
6515 
6516 static int
6517 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6518 			     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6519 			     const struct fib_entry_notifier_info *fen_info)
6520 {
6521 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6522 	struct mlxsw_sp_fib_entry *replaced;
6523 	struct mlxsw_sp_fib_node *fib_node;
6524 	int err;
6525 
6526 	if (fen_info->fi->nh &&
6527 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6528 		return 0;
6529 
6530 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6531 					 &fen_info->dst, sizeof(fen_info->dst),
6532 					 fen_info->dst_len,
6533 					 MLXSW_SP_L3_PROTO_IPV4);
6534 	if (IS_ERR(fib_node)) {
6535 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6536 		return PTR_ERR(fib_node);
6537 	}
6538 
6539 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6540 	if (IS_ERR(fib4_entry)) {
6541 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6542 		err = PTR_ERR(fib4_entry);
6543 		goto err_fib4_entry_create;
6544 	}
6545 
6546 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6547 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6548 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6549 		return 0;
6550 	}
6551 
6552 	replaced = fib_node->fib_entry;
6553 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
6554 	if (err) {
6555 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6556 		goto err_fib_node_entry_link;
6557 	}
6558 
6559 	/* Nothing to replace */
6560 	if (!replaced)
6561 		return 0;
6562 
6563 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6564 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6565 				     common);
6566 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6567 
6568 	return 0;
6569 
6570 err_fib_node_entry_link:
6571 	fib_node->fib_entry = replaced;
6572 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6573 err_fib4_entry_create:
6574 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6575 	return err;
6576 }
6577 
6578 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6579 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6580 				    struct fib_entry_notifier_info *fen_info)
6581 {
6582 	struct mlxsw_sp_fib4_entry *fib4_entry;
6583 	struct mlxsw_sp_fib_node *fib_node;
6584 	int err;
6585 
6586 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6587 	if (!fib4_entry)
6588 		return 0;
6589 	fib_node = fib4_entry->common.fib_node;
6590 
6591 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
6592 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6593 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6594 	return err;
6595 }
6596 
6597 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6598 {
6599 	/* Multicast routes aren't supported, so ignore them. Neighbour
6600 	 * Discovery packets are specifically trapped.
6601 	 */
6602 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6603 		return true;
6604 
6605 	/* Cloned routes are irrelevant in the forwarding path. */
6606 	if (rt->fib6_flags & RTF_CACHE)
6607 		return true;
6608 
6609 	return false;
6610 }
6611 
6612 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6613 {
6614 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6615 
6616 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6617 	if (!mlxsw_sp_rt6)
6618 		return ERR_PTR(-ENOMEM);
6619 
6620 	/* In case of route replace, replaced route is deleted with
6621 	 * no notification. Take reference to prevent accessing freed
6622 	 * memory.
6623 	 */
6624 	mlxsw_sp_rt6->rt = rt;
6625 	fib6_info_hold(rt);
6626 
6627 	return mlxsw_sp_rt6;
6628 }
6629 
6630 #if IS_ENABLED(CONFIG_IPV6)
6631 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6632 {
6633 	fib6_info_release(rt);
6634 }
6635 #else
6636 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6637 {
6638 }
6639 #endif
6640 
6641 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6642 {
6643 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6644 
6645 	if (!mlxsw_sp_rt6->rt->nh)
6646 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6647 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6648 	kfree(mlxsw_sp_rt6);
6649 }
6650 
6651 static struct fib6_info *
6652 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6653 {
6654 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6655 				list)->rt;
6656 }
6657 
6658 static struct mlxsw_sp_rt6 *
6659 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6660 			    const struct fib6_info *rt)
6661 {
6662 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6663 
6664 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6665 		if (mlxsw_sp_rt6->rt == rt)
6666 			return mlxsw_sp_rt6;
6667 	}
6668 
6669 	return NULL;
6670 }
6671 
6672 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6673 					const struct fib6_info *rt,
6674 					enum mlxsw_sp_ipip_type *ret)
6675 {
6676 	return rt->fib6_nh->fib_nh_dev &&
6677 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6678 }
6679 
6680 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6681 				  struct mlxsw_sp_nexthop_group *nh_grp,
6682 				  struct mlxsw_sp_nexthop *nh,
6683 				  const struct fib6_info *rt)
6684 {
6685 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6686 
6687 	nh->nhgi = nh_grp->nhgi;
6688 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6689 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6690 #if IS_ENABLED(CONFIG_IPV6)
6691 	nh->neigh_tbl = &nd_tbl;
6692 #endif
6693 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6694 
6695 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6696 
6697 	if (!dev)
6698 		return 0;
6699 	nh->ifindex = dev->ifindex;
6700 
6701 	return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6702 }
6703 
6704 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6705 				   struct mlxsw_sp_nexthop *nh)
6706 {
6707 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6708 	list_del(&nh->router_list_node);
6709 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6710 }
6711 
6712 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6713 				    const struct fib6_info *rt)
6714 {
6715 	return rt->fib6_nh->fib_nh_gw_family ||
6716 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6717 }
6718 
6719 static int
6720 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6721 				  struct mlxsw_sp_nexthop_group *nh_grp,
6722 				  struct mlxsw_sp_fib6_entry *fib6_entry)
6723 {
6724 	struct mlxsw_sp_nexthop_group_info *nhgi;
6725 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6726 	struct mlxsw_sp_nexthop *nh;
6727 	int err, i;
6728 
6729 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6730 		       GFP_KERNEL);
6731 	if (!nhgi)
6732 		return -ENOMEM;
6733 	nh_grp->nhgi = nhgi;
6734 	nhgi->nh_grp = nh_grp;
6735 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6736 					struct mlxsw_sp_rt6, list);
6737 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6738 	nhgi->count = fib6_entry->nrt6;
6739 	for (i = 0; i < nhgi->count; i++) {
6740 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
6741 
6742 		nh = &nhgi->nexthops[i];
6743 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6744 		if (err)
6745 			goto err_nexthop6_init;
6746 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6747 	}
6748 	nh_grp->nhgi = nhgi;
6749 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6750 	if (err)
6751 		goto err_group_inc;
6752 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6753 	if (err)
6754 		goto err_group_refresh;
6755 
6756 	return 0;
6757 
6758 err_group_refresh:
6759 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6760 err_group_inc:
6761 	i = nhgi->count;
6762 err_nexthop6_init:
6763 	for (i--; i >= 0; i--) {
6764 		nh = &nhgi->nexthops[i];
6765 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6766 	}
6767 	kfree(nhgi);
6768 	return err;
6769 }
6770 
6771 static void
6772 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6773 				  struct mlxsw_sp_nexthop_group *nh_grp)
6774 {
6775 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6776 	int i;
6777 
6778 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6779 	for (i = nhgi->count - 1; i >= 0; i--) {
6780 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6781 
6782 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6783 	}
6784 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6785 	WARN_ON_ONCE(nhgi->adj_index_valid);
6786 	kfree(nhgi);
6787 }
6788 
6789 static struct mlxsw_sp_nexthop_group *
6790 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6791 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6792 {
6793 	struct mlxsw_sp_nexthop_group *nh_grp;
6794 	int err;
6795 
6796 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6797 	if (!nh_grp)
6798 		return ERR_PTR(-ENOMEM);
6799 	INIT_LIST_HEAD(&nh_grp->vr_list);
6800 	err = rhashtable_init(&nh_grp->vr_ht,
6801 			      &mlxsw_sp_nexthop_group_vr_ht_params);
6802 	if (err)
6803 		goto err_nexthop_group_vr_ht_init;
6804 	INIT_LIST_HEAD(&nh_grp->fib_list);
6805 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6806 
6807 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6808 	if (err)
6809 		goto err_nexthop_group_info_init;
6810 
6811 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6812 	if (err)
6813 		goto err_nexthop_group_insert;
6814 
6815 	nh_grp->can_destroy = true;
6816 
6817 	return nh_grp;
6818 
6819 err_nexthop_group_insert:
6820 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6821 err_nexthop_group_info_init:
6822 	rhashtable_destroy(&nh_grp->vr_ht);
6823 err_nexthop_group_vr_ht_init:
6824 	kfree(nh_grp);
6825 	return ERR_PTR(err);
6826 }
6827 
6828 static void
6829 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6830 				struct mlxsw_sp_nexthop_group *nh_grp)
6831 {
6832 	if (!nh_grp->can_destroy)
6833 		return;
6834 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6835 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6836 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6837 	rhashtable_destroy(&nh_grp->vr_ht);
6838 	kfree(nh_grp);
6839 }
6840 
6841 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6842 				       struct mlxsw_sp_fib6_entry *fib6_entry)
6843 {
6844 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6845 	struct mlxsw_sp_nexthop_group *nh_grp;
6846 
6847 	if (rt->nh) {
6848 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6849 							   rt->nh->id);
6850 		if (WARN_ON_ONCE(!nh_grp))
6851 			return -EINVAL;
6852 		goto out;
6853 	}
6854 
6855 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6856 	if (!nh_grp) {
6857 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6858 		if (IS_ERR(nh_grp))
6859 			return PTR_ERR(nh_grp);
6860 	}
6861 
6862 	/* The route and the nexthop are described by the same struct, so we
6863 	 * need to the update the nexthop offload indication for the new route.
6864 	 */
6865 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6866 
6867 out:
6868 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6869 		      &nh_grp->fib_list);
6870 	fib6_entry->common.nh_group = nh_grp;
6871 
6872 	return 0;
6873 }
6874 
6875 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6876 					struct mlxsw_sp_fib_entry *fib_entry)
6877 {
6878 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6879 
6880 	list_del(&fib_entry->nexthop_group_node);
6881 	if (!list_empty(&nh_grp->fib_list))
6882 		return;
6883 
6884 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6885 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6886 		return;
6887 	}
6888 
6889 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6890 }
6891 
6892 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6893 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6894 					  struct mlxsw_sp_fib6_entry *fib6_entry)
6895 {
6896 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6897 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6898 	int err;
6899 
6900 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6901 	fib6_entry->common.nh_group = NULL;
6902 	list_del(&fib6_entry->common.nexthop_group_node);
6903 
6904 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6905 	if (err)
6906 		goto err_nexthop6_group_get;
6907 
6908 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6909 					     fib_node->fib);
6910 	if (err)
6911 		goto err_nexthop_group_vr_link;
6912 
6913 	/* In case this entry is offloaded, then the adjacency index
6914 	 * currently associated with it in the device's table is that
6915 	 * of the old group. Start using the new one instead.
6916 	 */
6917 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
6918 					  &fib6_entry->common, false);
6919 	if (err)
6920 		goto err_fib_entry_update;
6921 
6922 	if (list_empty(&old_nh_grp->fib_list))
6923 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6924 
6925 	return 0;
6926 
6927 err_fib_entry_update:
6928 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6929 					 fib_node->fib);
6930 err_nexthop_group_vr_link:
6931 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6932 err_nexthop6_group_get:
6933 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6934 		      &old_nh_grp->fib_list);
6935 	fib6_entry->common.nh_group = old_nh_grp;
6936 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6937 	return err;
6938 }
6939 
6940 static int
6941 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6942 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6943 				struct mlxsw_sp_fib6_entry *fib6_entry,
6944 				struct fib6_info **rt_arr, unsigned int nrt6)
6945 {
6946 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6947 	int err, i;
6948 
6949 	for (i = 0; i < nrt6; i++) {
6950 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6951 		if (IS_ERR(mlxsw_sp_rt6)) {
6952 			err = PTR_ERR(mlxsw_sp_rt6);
6953 			goto err_rt6_create;
6954 		}
6955 
6956 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6957 		fib6_entry->nrt6++;
6958 	}
6959 
6960 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6961 	if (err)
6962 		goto err_nexthop6_group_update;
6963 
6964 	return 0;
6965 
6966 err_nexthop6_group_update:
6967 	i = nrt6;
6968 err_rt6_create:
6969 	for (i--; i >= 0; i--) {
6970 		fib6_entry->nrt6--;
6971 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6972 					       struct mlxsw_sp_rt6, list);
6973 		list_del(&mlxsw_sp_rt6->list);
6974 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6975 	}
6976 	return err;
6977 }
6978 
6979 static void
6980 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6981 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6982 				struct mlxsw_sp_fib6_entry *fib6_entry,
6983 				struct fib6_info **rt_arr, unsigned int nrt6)
6984 {
6985 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6986 	int i;
6987 
6988 	for (i = 0; i < nrt6; i++) {
6989 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6990 							   rt_arr[i]);
6991 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6992 			continue;
6993 
6994 		fib6_entry->nrt6--;
6995 		list_del(&mlxsw_sp_rt6->list);
6996 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6997 	}
6998 
6999 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
7000 }
7001 
7002 static int
7003 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7004 				   struct mlxsw_sp_fib_entry *fib_entry,
7005 				   const struct fib6_info *rt)
7006 {
7007 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7008 	union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7009 	u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7010 	struct mlxsw_sp_router *router = mlxsw_sp->router;
7011 	int ifindex = nhgi->nexthops[0].ifindex;
7012 	struct mlxsw_sp_ipip_entry *ipip_entry;
7013 
7014 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7015 	ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7016 						       MLXSW_SP_L3_PROTO_IPV6,
7017 						       dip);
7018 
7019 	if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7020 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7021 		return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7022 						     ipip_entry);
7023 	}
7024 	if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7025 					 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7026 		u32 tunnel_index;
7027 
7028 		tunnel_index = router->nve_decap_config.tunnel_index;
7029 		fib_entry->decap.tunnel_index = tunnel_index;
7030 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7031 	}
7032 
7033 	return 0;
7034 }
7035 
7036 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7037 					struct mlxsw_sp_fib_entry *fib_entry,
7038 					const struct fib6_info *rt)
7039 {
7040 	if (rt->fib6_flags & RTF_LOCAL)
7041 		return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7042 							  rt);
7043 	if (rt->fib6_flags & RTF_ANYCAST)
7044 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7045 	else if (rt->fib6_type == RTN_BLACKHOLE)
7046 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7047 	else if (rt->fib6_flags & RTF_REJECT)
7048 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7049 	else if (fib_entry->nh_group->nhgi->gateway)
7050 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7051 	else
7052 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7053 
7054 	return 0;
7055 }
7056 
7057 static void
7058 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7059 {
7060 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7061 
7062 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7063 				 list) {
7064 		fib6_entry->nrt6--;
7065 		list_del(&mlxsw_sp_rt6->list);
7066 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7067 	}
7068 }
7069 
7070 static struct mlxsw_sp_fib6_entry *
7071 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7072 			   struct mlxsw_sp_fib_node *fib_node,
7073 			   struct fib6_info **rt_arr, unsigned int nrt6)
7074 {
7075 	struct mlxsw_sp_fib6_entry *fib6_entry;
7076 	struct mlxsw_sp_fib_entry *fib_entry;
7077 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7078 	int err, i;
7079 
7080 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7081 	if (!fib6_entry)
7082 		return ERR_PTR(-ENOMEM);
7083 	fib_entry = &fib6_entry->common;
7084 
7085 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
7086 	if (IS_ERR(fib_entry->priv)) {
7087 		err = PTR_ERR(fib_entry->priv);
7088 		goto err_fib_entry_priv_create;
7089 	}
7090 
7091 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
7092 
7093 	for (i = 0; i < nrt6; i++) {
7094 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7095 		if (IS_ERR(mlxsw_sp_rt6)) {
7096 			err = PTR_ERR(mlxsw_sp_rt6);
7097 			goto err_rt6_create;
7098 		}
7099 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7100 		fib6_entry->nrt6++;
7101 	}
7102 
7103 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7104 	if (err)
7105 		goto err_nexthop6_group_get;
7106 
7107 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7108 					     fib_node->fib);
7109 	if (err)
7110 		goto err_nexthop_group_vr_link;
7111 
7112 	err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7113 	if (err)
7114 		goto err_fib6_entry_type_set;
7115 
7116 	fib_entry->fib_node = fib_node;
7117 
7118 	return fib6_entry;
7119 
7120 err_fib6_entry_type_set:
7121 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7122 err_nexthop_group_vr_link:
7123 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7124 err_nexthop6_group_get:
7125 	i = nrt6;
7126 err_rt6_create:
7127 	for (i--; i >= 0; i--) {
7128 		fib6_entry->nrt6--;
7129 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7130 					       struct mlxsw_sp_rt6, list);
7131 		list_del(&mlxsw_sp_rt6->list);
7132 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7133 	}
7134 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
7135 err_fib_entry_priv_create:
7136 	kfree(fib6_entry);
7137 	return ERR_PTR(err);
7138 }
7139 
7140 static void
7141 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7142 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7143 {
7144 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7145 }
7146 
7147 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7148 					struct mlxsw_sp_fib6_entry *fib6_entry)
7149 {
7150 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7151 
7152 	mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7153 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7154 					 fib_node->fib);
7155 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7156 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7157 	WARN_ON(fib6_entry->nrt6);
7158 	mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
7159 	kfree(fib6_entry);
7160 }
7161 
7162 static struct mlxsw_sp_fib6_entry *
7163 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7164 			   const struct fib6_info *rt)
7165 {
7166 	struct mlxsw_sp_fib6_entry *fib6_entry;
7167 	struct mlxsw_sp_fib_node *fib_node;
7168 	struct mlxsw_sp_fib *fib;
7169 	struct fib6_info *cmp_rt;
7170 	struct mlxsw_sp_vr *vr;
7171 
7172 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7173 	if (!vr)
7174 		return NULL;
7175 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7176 
7177 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7178 					    sizeof(rt->fib6_dst.addr),
7179 					    rt->fib6_dst.plen);
7180 	if (!fib_node)
7181 		return NULL;
7182 
7183 	fib6_entry = container_of(fib_node->fib_entry,
7184 				  struct mlxsw_sp_fib6_entry, common);
7185 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7186 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7187 	    rt->fib6_metric == cmp_rt->fib6_metric &&
7188 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7189 		return fib6_entry;
7190 
7191 	return NULL;
7192 }
7193 
7194 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7195 {
7196 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7197 	struct mlxsw_sp_fib6_entry *fib6_replaced;
7198 	struct fib6_info *rt, *rt_replaced;
7199 
7200 	if (!fib_node->fib_entry)
7201 		return true;
7202 
7203 	fib6_replaced = container_of(fib_node->fib_entry,
7204 				     struct mlxsw_sp_fib6_entry,
7205 				     common);
7206 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7207 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7208 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7209 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7210 		return false;
7211 
7212 	return true;
7213 }
7214 
7215 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7216 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7217 					struct fib6_info **rt_arr, unsigned int nrt6)
7218 {
7219 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7220 	struct mlxsw_sp_fib_entry *replaced;
7221 	struct mlxsw_sp_fib_node *fib_node;
7222 	struct fib6_info *rt = rt_arr[0];
7223 	int err;
7224 
7225 	if (rt->fib6_src.plen)
7226 		return -EINVAL;
7227 
7228 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7229 		return 0;
7230 
7231 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7232 		return 0;
7233 
7234 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7235 					 &rt->fib6_dst.addr,
7236 					 sizeof(rt->fib6_dst.addr),
7237 					 rt->fib6_dst.plen,
7238 					 MLXSW_SP_L3_PROTO_IPV6);
7239 	if (IS_ERR(fib_node))
7240 		return PTR_ERR(fib_node);
7241 
7242 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7243 						nrt6);
7244 	if (IS_ERR(fib6_entry)) {
7245 		err = PTR_ERR(fib6_entry);
7246 		goto err_fib6_entry_create;
7247 	}
7248 
7249 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7250 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7251 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7252 		return 0;
7253 	}
7254 
7255 	replaced = fib_node->fib_entry;
7256 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
7257 	if (err)
7258 		goto err_fib_node_entry_link;
7259 
7260 	/* Nothing to replace */
7261 	if (!replaced)
7262 		return 0;
7263 
7264 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7265 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7266 				     common);
7267 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7268 
7269 	return 0;
7270 
7271 err_fib_node_entry_link:
7272 	fib_node->fib_entry = replaced;
7273 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7274 err_fib6_entry_create:
7275 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7276 	return err;
7277 }
7278 
7279 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7280 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7281 				       struct fib6_info **rt_arr, unsigned int nrt6)
7282 {
7283 	struct mlxsw_sp_fib6_entry *fib6_entry;
7284 	struct mlxsw_sp_fib_node *fib_node;
7285 	struct fib6_info *rt = rt_arr[0];
7286 	int err;
7287 
7288 	if (rt->fib6_src.plen)
7289 		return -EINVAL;
7290 
7291 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7292 		return 0;
7293 
7294 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7295 					 &rt->fib6_dst.addr,
7296 					 sizeof(rt->fib6_dst.addr),
7297 					 rt->fib6_dst.plen,
7298 					 MLXSW_SP_L3_PROTO_IPV6);
7299 	if (IS_ERR(fib_node))
7300 		return PTR_ERR(fib_node);
7301 
7302 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7303 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7304 		return -EINVAL;
7305 	}
7306 
7307 	fib6_entry = container_of(fib_node->fib_entry,
7308 				  struct mlxsw_sp_fib6_entry, common);
7309 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7310 	if (err)
7311 		goto err_fib6_entry_nexthop_add;
7312 
7313 	return 0;
7314 
7315 err_fib6_entry_nexthop_add:
7316 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7317 	return err;
7318 }
7319 
7320 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7321 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7322 				    struct fib6_info **rt_arr, unsigned int nrt6)
7323 {
7324 	struct mlxsw_sp_fib6_entry *fib6_entry;
7325 	struct mlxsw_sp_fib_node *fib_node;
7326 	struct fib6_info *rt = rt_arr[0];
7327 	int err;
7328 
7329 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7330 		return 0;
7331 
7332 	/* Multipath routes are first added to the FIB trie and only then
7333 	 * notified. If we vetoed the addition, we will get a delete
7334 	 * notification for a route we do not have. Therefore, do not warn if
7335 	 * route was not found.
7336 	 */
7337 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7338 	if (!fib6_entry)
7339 		return 0;
7340 
7341 	/* If not all the nexthops are deleted, then only reduce the nexthop
7342 	 * group.
7343 	 */
7344 	if (nrt6 != fib6_entry->nrt6) {
7345 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7346 		return 0;
7347 	}
7348 
7349 	fib_node = fib6_entry->common.fib_node;
7350 
7351 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
7352 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7353 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7354 	return err;
7355 }
7356 
7357 static struct mlxsw_sp_mr_table *
7358 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7359 {
7360 	if (family == RTNL_FAMILY_IPMR)
7361 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7362 	else
7363 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7364 }
7365 
7366 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7367 				     struct mfc_entry_notifier_info *men_info,
7368 				     bool replace)
7369 {
7370 	struct mlxsw_sp_mr_table *mrt;
7371 	struct mlxsw_sp_vr *vr;
7372 
7373 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7374 	if (IS_ERR(vr))
7375 		return PTR_ERR(vr);
7376 
7377 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7378 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7379 }
7380 
7381 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7382 				      struct mfc_entry_notifier_info *men_info)
7383 {
7384 	struct mlxsw_sp_mr_table *mrt;
7385 	struct mlxsw_sp_vr *vr;
7386 
7387 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7388 	if (WARN_ON(!vr))
7389 		return;
7390 
7391 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7392 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7393 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7394 }
7395 
7396 static int
7397 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7398 			      struct vif_entry_notifier_info *ven_info)
7399 {
7400 	struct mlxsw_sp_mr_table *mrt;
7401 	struct mlxsw_sp_rif *rif;
7402 	struct mlxsw_sp_vr *vr;
7403 
7404 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7405 	if (IS_ERR(vr))
7406 		return PTR_ERR(vr);
7407 
7408 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7409 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7410 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7411 				   ven_info->vif_index,
7412 				   ven_info->vif_flags, rif);
7413 }
7414 
7415 static void
7416 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7417 			      struct vif_entry_notifier_info *ven_info)
7418 {
7419 	struct mlxsw_sp_mr_table *mrt;
7420 	struct mlxsw_sp_vr *vr;
7421 
7422 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7423 	if (WARN_ON(!vr))
7424 		return;
7425 
7426 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7427 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7428 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7429 }
7430 
7431 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7432 				     struct mlxsw_sp_fib_node *fib_node)
7433 {
7434 	struct mlxsw_sp_fib4_entry *fib4_entry;
7435 
7436 	fib4_entry = container_of(fib_node->fib_entry,
7437 				  struct mlxsw_sp_fib4_entry, common);
7438 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7439 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7440 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7441 }
7442 
7443 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7444 				     struct mlxsw_sp_fib_node *fib_node)
7445 {
7446 	struct mlxsw_sp_fib6_entry *fib6_entry;
7447 
7448 	fib6_entry = container_of(fib_node->fib_entry,
7449 				  struct mlxsw_sp_fib6_entry, common);
7450 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7451 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7452 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7453 }
7454 
7455 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7456 				    struct mlxsw_sp_fib_node *fib_node)
7457 {
7458 	switch (fib_node->fib->proto) {
7459 	case MLXSW_SP_L3_PROTO_IPV4:
7460 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7461 		break;
7462 	case MLXSW_SP_L3_PROTO_IPV6:
7463 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7464 		break;
7465 	}
7466 }
7467 
7468 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7469 				  struct mlxsw_sp_vr *vr,
7470 				  enum mlxsw_sp_l3proto proto)
7471 {
7472 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7473 	struct mlxsw_sp_fib_node *fib_node, *tmp;
7474 
7475 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7476 		bool do_break = &tmp->list == &fib->node_list;
7477 
7478 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7479 		if (do_break)
7480 			break;
7481 	}
7482 }
7483 
7484 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7485 {
7486 	int i, j;
7487 
7488 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7489 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7490 
7491 		if (!mlxsw_sp_vr_is_used(vr))
7492 			continue;
7493 
7494 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7495 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7496 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7497 
7498 		/* If virtual router was only used for IPv4, then it's no
7499 		 * longer used.
7500 		 */
7501 		if (!mlxsw_sp_vr_is_used(vr))
7502 			continue;
7503 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7504 	}
7505 }
7506 
7507 struct mlxsw_sp_fib6_event {
7508 	struct fib6_info **rt_arr;
7509 	unsigned int nrt6;
7510 };
7511 
7512 struct mlxsw_sp_fib_event {
7513 	struct list_head list; /* node in fib queue */
7514 	union {
7515 		struct mlxsw_sp_fib6_event fib6_event;
7516 		struct fib_entry_notifier_info fen_info;
7517 		struct fib_rule_notifier_info fr_info;
7518 		struct fib_nh_notifier_info fnh_info;
7519 		struct mfc_entry_notifier_info men_info;
7520 		struct vif_entry_notifier_info ven_info;
7521 	};
7522 	struct mlxsw_sp *mlxsw_sp;
7523 	unsigned long event;
7524 	int family;
7525 };
7526 
7527 static int
7528 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
7529 				struct fib6_entry_notifier_info *fen6_info)
7530 {
7531 	struct fib6_info *rt = fen6_info->rt;
7532 	struct fib6_info **rt_arr;
7533 	struct fib6_info *iter;
7534 	unsigned int nrt6;
7535 	int i = 0;
7536 
7537 	nrt6 = fen6_info->nsiblings + 1;
7538 
7539 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7540 	if (!rt_arr)
7541 		return -ENOMEM;
7542 
7543 	fib6_event->rt_arr = rt_arr;
7544 	fib6_event->nrt6 = nrt6;
7545 
7546 	rt_arr[0] = rt;
7547 	fib6_info_hold(rt);
7548 
7549 	if (!fen6_info->nsiblings)
7550 		return 0;
7551 
7552 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7553 		if (i == fen6_info->nsiblings)
7554 			break;
7555 
7556 		rt_arr[i + 1] = iter;
7557 		fib6_info_hold(iter);
7558 		i++;
7559 	}
7560 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7561 
7562 	return 0;
7563 }
7564 
7565 static void
7566 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
7567 {
7568 	int i;
7569 
7570 	for (i = 0; i < fib6_event->nrt6; i++)
7571 		mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
7572 	kfree(fib6_event->rt_arr);
7573 }
7574 
7575 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
7576 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7577 					       struct mlxsw_sp_fib_event *fib_event)
7578 {
7579 	int err;
7580 
7581 	mlxsw_sp_span_respin(mlxsw_sp);
7582 
7583 	switch (fib_event->event) {
7584 	case FIB_EVENT_ENTRY_REPLACE:
7585 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
7586 		if (err) {
7587 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7588 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7589 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7590 							      &fib_event->fen_info);
7591 		}
7592 		fib_info_put(fib_event->fen_info.fi);
7593 		break;
7594 	case FIB_EVENT_ENTRY_DEL:
7595 		err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
7596 		if (err)
7597 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7598 		fib_info_put(fib_event->fen_info.fi);
7599 		break;
7600 	case FIB_EVENT_NH_ADD:
7601 	case FIB_EVENT_NH_DEL:
7602 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
7603 		fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
7604 		break;
7605 	}
7606 }
7607 
7608 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
7609 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7610 					       struct mlxsw_sp_fib_event *fib_event)
7611 {
7612 	struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
7613 	int err;
7614 
7615 	mlxsw_sp_span_respin(mlxsw_sp);
7616 
7617 	switch (fib_event->event) {
7618 	case FIB_EVENT_ENTRY_REPLACE:
7619 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7620 						   fib_event->fib6_event.nrt6);
7621 		if (err) {
7622 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7623 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7624 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7625 							      fib6_event->rt_arr,
7626 							      fib6_event->nrt6);
7627 		}
7628 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7629 		break;
7630 	case FIB_EVENT_ENTRY_APPEND:
7631 		err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7632 						  fib_event->fib6_event.nrt6);
7633 		if (err) {
7634 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7635 			dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7636 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7637 							      fib6_event->rt_arr,
7638 							      fib6_event->nrt6);
7639 		}
7640 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7641 		break;
7642 	case FIB_EVENT_ENTRY_DEL:
7643 		err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7644 					       fib_event->fib6_event.nrt6);
7645 		if (err)
7646 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7647 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7648 		break;
7649 	}
7650 }
7651 
7652 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
7653 						struct mlxsw_sp_fib_event *fib_event)
7654 {
7655 	bool replace;
7656 	int err;
7657 
7658 	rtnl_lock();
7659 	mutex_lock(&mlxsw_sp->router->lock);
7660 	switch (fib_event->event) {
7661 	case FIB_EVENT_ENTRY_REPLACE:
7662 	case FIB_EVENT_ENTRY_ADD:
7663 		replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
7664 
7665 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
7666 		if (err)
7667 			dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7668 		mr_cache_put(fib_event->men_info.mfc);
7669 		break;
7670 	case FIB_EVENT_ENTRY_DEL:
7671 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
7672 		mr_cache_put(fib_event->men_info.mfc);
7673 		break;
7674 	case FIB_EVENT_VIF_ADD:
7675 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7676 						    &fib_event->ven_info);
7677 		if (err)
7678 			dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7679 		dev_put(fib_event->ven_info.dev);
7680 		break;
7681 	case FIB_EVENT_VIF_DEL:
7682 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
7683 		dev_put(fib_event->ven_info.dev);
7684 		break;
7685 	}
7686 	mutex_unlock(&mlxsw_sp->router->lock);
7687 	rtnl_unlock();
7688 }
7689 
7690 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
7691 {
7692 	struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
7693 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
7694 	struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
7695 	struct mlxsw_sp_fib_event *next_fib_event;
7696 	struct mlxsw_sp_fib_event *fib_event;
7697 	int last_family = AF_UNSPEC;
7698 	LIST_HEAD(fib_event_queue);
7699 
7700 	spin_lock_bh(&router->fib_event_queue_lock);
7701 	list_splice_init(&router->fib_event_queue, &fib_event_queue);
7702 	spin_unlock_bh(&router->fib_event_queue_lock);
7703 
7704 	/* Router lock is held here to make sure per-instance
7705 	 * operation context is not used in between FIB4/6 events
7706 	 * processing.
7707 	 */
7708 	mutex_lock(&router->lock);
7709 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7710 	list_for_each_entry_safe(fib_event, next_fib_event,
7711 				 &fib_event_queue, list) {
7712 		/* Check if the next entry in the queue exists and it is
7713 		 * of the same type (family and event) as the currect one.
7714 		 * In that case it is permitted to do the bulking
7715 		 * of multiple FIB entries to a single register write.
7716 		 */
7717 		op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
7718 				  fib_event->family == next_fib_event->family &&
7719 				  fib_event->event == next_fib_event->event;
7720 		op_ctx->event = fib_event->event;
7721 
7722 		/* In case family of this and the previous entry are different, context
7723 		 * reinitialization is going to be needed now, indicate that.
7724 		 * Note that since last_family is initialized to AF_UNSPEC, this is always
7725 		 * going to happen for the first entry processed in the work.
7726 		 */
7727 		if (fib_event->family != last_family)
7728 			op_ctx->initialized = false;
7729 
7730 		switch (fib_event->family) {
7731 		case AF_INET:
7732 			mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
7733 							   fib_event);
7734 			break;
7735 		case AF_INET6:
7736 			mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
7737 							   fib_event);
7738 			break;
7739 		case RTNL_FAMILY_IP6MR:
7740 		case RTNL_FAMILY_IPMR:
7741 			/* Unlock here as inside FIBMR the lock is taken again
7742 			 * under RTNL. The per-instance operation context
7743 			 * is not used by FIBMR.
7744 			 */
7745 			mutex_unlock(&router->lock);
7746 			mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
7747 							    fib_event);
7748 			mutex_lock(&router->lock);
7749 			break;
7750 		default:
7751 			WARN_ON_ONCE(1);
7752 		}
7753 		last_family = fib_event->family;
7754 		kfree(fib_event);
7755 		cond_resched();
7756 	}
7757 	WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
7758 	mutex_unlock(&router->lock);
7759 }
7760 
7761 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
7762 				       struct fib_notifier_info *info)
7763 {
7764 	struct fib_entry_notifier_info *fen_info;
7765 	struct fib_nh_notifier_info *fnh_info;
7766 
7767 	switch (fib_event->event) {
7768 	case FIB_EVENT_ENTRY_REPLACE:
7769 	case FIB_EVENT_ENTRY_DEL:
7770 		fen_info = container_of(info, struct fib_entry_notifier_info,
7771 					info);
7772 		fib_event->fen_info = *fen_info;
7773 		/* Take reference on fib_info to prevent it from being
7774 		 * freed while event is queued. Release it afterwards.
7775 		 */
7776 		fib_info_hold(fib_event->fen_info.fi);
7777 		break;
7778 	case FIB_EVENT_NH_ADD:
7779 	case FIB_EVENT_NH_DEL:
7780 		fnh_info = container_of(info, struct fib_nh_notifier_info,
7781 					info);
7782 		fib_event->fnh_info = *fnh_info;
7783 		fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
7784 		break;
7785 	}
7786 }
7787 
7788 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
7789 				      struct fib_notifier_info *info)
7790 {
7791 	struct fib6_entry_notifier_info *fen6_info;
7792 	int err;
7793 
7794 	switch (fib_event->event) {
7795 	case FIB_EVENT_ENTRY_REPLACE:
7796 	case FIB_EVENT_ENTRY_APPEND:
7797 	case FIB_EVENT_ENTRY_DEL:
7798 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
7799 					 info);
7800 		err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
7801 						      fen6_info);
7802 		if (err)
7803 			return err;
7804 		break;
7805 	}
7806 
7807 	return 0;
7808 }
7809 
7810 static void
7811 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
7812 			    struct fib_notifier_info *info)
7813 {
7814 	switch (fib_event->event) {
7815 	case FIB_EVENT_ENTRY_REPLACE:
7816 	case FIB_EVENT_ENTRY_ADD:
7817 	case FIB_EVENT_ENTRY_DEL:
7818 		memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
7819 		mr_cache_hold(fib_event->men_info.mfc);
7820 		break;
7821 	case FIB_EVENT_VIF_ADD:
7822 	case FIB_EVENT_VIF_DEL:
7823 		memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
7824 		dev_hold(fib_event->ven_info.dev);
7825 		break;
7826 	}
7827 }
7828 
7829 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7830 					  struct fib_notifier_info *info,
7831 					  struct mlxsw_sp *mlxsw_sp)
7832 {
7833 	struct netlink_ext_ack *extack = info->extack;
7834 	struct fib_rule_notifier_info *fr_info;
7835 	struct fib_rule *rule;
7836 	int err = 0;
7837 
7838 	/* nothing to do at the moment */
7839 	if (event == FIB_EVENT_RULE_DEL)
7840 		return 0;
7841 
7842 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
7843 	rule = fr_info->rule;
7844 
7845 	/* Rule only affects locally generated traffic */
7846 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7847 		return 0;
7848 
7849 	switch (info->family) {
7850 	case AF_INET:
7851 		if (!fib4_rule_default(rule) && !rule->l3mdev)
7852 			err = -EOPNOTSUPP;
7853 		break;
7854 	case AF_INET6:
7855 		if (!fib6_rule_default(rule) && !rule->l3mdev)
7856 			err = -EOPNOTSUPP;
7857 		break;
7858 	case RTNL_FAMILY_IPMR:
7859 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
7860 			err = -EOPNOTSUPP;
7861 		break;
7862 	case RTNL_FAMILY_IP6MR:
7863 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7864 			err = -EOPNOTSUPP;
7865 		break;
7866 	}
7867 
7868 	if (err < 0)
7869 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7870 
7871 	return err;
7872 }
7873 
7874 /* Called with rcu_read_lock() */
7875 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7876 				     unsigned long event, void *ptr)
7877 {
7878 	struct mlxsw_sp_fib_event *fib_event;
7879 	struct fib_notifier_info *info = ptr;
7880 	struct mlxsw_sp_router *router;
7881 	int err;
7882 
7883 	if ((info->family != AF_INET && info->family != AF_INET6 &&
7884 	     info->family != RTNL_FAMILY_IPMR &&
7885 	     info->family != RTNL_FAMILY_IP6MR))
7886 		return NOTIFY_DONE;
7887 
7888 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7889 
7890 	switch (event) {
7891 	case FIB_EVENT_RULE_ADD:
7892 	case FIB_EVENT_RULE_DEL:
7893 		err = mlxsw_sp_router_fib_rule_event(event, info,
7894 						     router->mlxsw_sp);
7895 		return notifier_from_errno(err);
7896 	case FIB_EVENT_ENTRY_ADD:
7897 	case FIB_EVENT_ENTRY_REPLACE:
7898 	case FIB_EVENT_ENTRY_APPEND:
7899 		if (info->family == AF_INET) {
7900 			struct fib_entry_notifier_info *fen_info = ptr;
7901 
7902 			if (fen_info->fi->fib_nh_is_v6) {
7903 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7904 				return notifier_from_errno(-EINVAL);
7905 			}
7906 		}
7907 		break;
7908 	}
7909 
7910 	fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
7911 	if (!fib_event)
7912 		return NOTIFY_BAD;
7913 
7914 	fib_event->mlxsw_sp = router->mlxsw_sp;
7915 	fib_event->event = event;
7916 	fib_event->family = info->family;
7917 
7918 	switch (info->family) {
7919 	case AF_INET:
7920 		mlxsw_sp_router_fib4_event(fib_event, info);
7921 		break;
7922 	case AF_INET6:
7923 		err = mlxsw_sp_router_fib6_event(fib_event, info);
7924 		if (err)
7925 			goto err_fib_event;
7926 		break;
7927 	case RTNL_FAMILY_IP6MR:
7928 	case RTNL_FAMILY_IPMR:
7929 		mlxsw_sp_router_fibmr_event(fib_event, info);
7930 		break;
7931 	}
7932 
7933 	/* Enqueue the event and trigger the work */
7934 	spin_lock_bh(&router->fib_event_queue_lock);
7935 	list_add_tail(&fib_event->list, &router->fib_event_queue);
7936 	spin_unlock_bh(&router->fib_event_queue_lock);
7937 	mlxsw_core_schedule_work(&router->fib_event_work);
7938 
7939 	return NOTIFY_DONE;
7940 
7941 err_fib_event:
7942 	kfree(fib_event);
7943 	return NOTIFY_BAD;
7944 }
7945 
7946 static struct mlxsw_sp_rif *
7947 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7948 			 const struct net_device *dev)
7949 {
7950 	int i;
7951 
7952 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7953 		if (mlxsw_sp->router->rifs[i] &&
7954 		    mlxsw_sp->router->rifs[i]->dev == dev)
7955 			return mlxsw_sp->router->rifs[i];
7956 
7957 	return NULL;
7958 }
7959 
7960 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7961 			 const struct net_device *dev)
7962 {
7963 	struct mlxsw_sp_rif *rif;
7964 
7965 	mutex_lock(&mlxsw_sp->router->lock);
7966 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7967 	mutex_unlock(&mlxsw_sp->router->lock);
7968 
7969 	return rif;
7970 }
7971 
7972 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7973 {
7974 	struct mlxsw_sp_rif *rif;
7975 	u16 vid = 0;
7976 
7977 	mutex_lock(&mlxsw_sp->router->lock);
7978 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7979 	if (!rif)
7980 		goto out;
7981 
7982 	/* We only return the VID for VLAN RIFs. Otherwise we return an
7983 	 * invalid value (0).
7984 	 */
7985 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7986 		goto out;
7987 
7988 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7989 
7990 out:
7991 	mutex_unlock(&mlxsw_sp->router->lock);
7992 	return vid;
7993 }
7994 
7995 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7996 {
7997 	char ritr_pl[MLXSW_REG_RITR_LEN];
7998 	int err;
7999 
8000 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
8001 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8002 	if (err)
8003 		return err;
8004 
8005 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
8006 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8007 }
8008 
8009 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8010 					  struct mlxsw_sp_rif *rif)
8011 {
8012 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8013 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8014 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8015 }
8016 
8017 static bool
8018 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8019 			   unsigned long event)
8020 {
8021 	struct inet6_dev *inet6_dev;
8022 	bool addr_list_empty = true;
8023 	struct in_device *idev;
8024 
8025 	switch (event) {
8026 	case NETDEV_UP:
8027 		return rif == NULL;
8028 	case NETDEV_DOWN:
8029 		rcu_read_lock();
8030 		idev = __in_dev_get_rcu(dev);
8031 		if (idev && idev->ifa_list)
8032 			addr_list_empty = false;
8033 
8034 		inet6_dev = __in6_dev_get(dev);
8035 		if (addr_list_empty && inet6_dev &&
8036 		    !list_empty(&inet6_dev->addr_list))
8037 			addr_list_empty = false;
8038 		rcu_read_unlock();
8039 
8040 		/* macvlans do not have a RIF, but rather piggy back on the
8041 		 * RIF of their lower device.
8042 		 */
8043 		if (netif_is_macvlan(dev) && addr_list_empty)
8044 			return true;
8045 
8046 		if (rif && addr_list_empty &&
8047 		    !netif_is_l3_slave(rif->dev))
8048 			return true;
8049 		/* It is possible we already removed the RIF ourselves
8050 		 * if it was assigned to a netdev that is now a bridge
8051 		 * or LAG slave.
8052 		 */
8053 		return false;
8054 	}
8055 
8056 	return false;
8057 }
8058 
8059 static enum mlxsw_sp_rif_type
8060 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8061 		      const struct net_device *dev)
8062 {
8063 	enum mlxsw_sp_fid_type type;
8064 
8065 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8066 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
8067 
8068 	/* Otherwise RIF type is derived from the type of the underlying FID. */
8069 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8070 		type = MLXSW_SP_FID_TYPE_8021Q;
8071 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8072 		type = MLXSW_SP_FID_TYPE_8021Q;
8073 	else if (netif_is_bridge_master(dev))
8074 		type = MLXSW_SP_FID_TYPE_8021D;
8075 	else
8076 		type = MLXSW_SP_FID_TYPE_RFID;
8077 
8078 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8079 }
8080 
8081 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
8082 {
8083 	int i;
8084 
8085 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8086 		if (!mlxsw_sp->router->rifs[i]) {
8087 			*p_rif_index = i;
8088 			return 0;
8089 		}
8090 	}
8091 
8092 	return -ENOBUFS;
8093 }
8094 
8095 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8096 					       u16 vr_id,
8097 					       struct net_device *l3_dev)
8098 {
8099 	struct mlxsw_sp_rif *rif;
8100 
8101 	rif = kzalloc(rif_size, GFP_KERNEL);
8102 	if (!rif)
8103 		return NULL;
8104 
8105 	INIT_LIST_HEAD(&rif->nexthop_list);
8106 	INIT_LIST_HEAD(&rif->neigh_list);
8107 	if (l3_dev) {
8108 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
8109 		rif->mtu = l3_dev->mtu;
8110 		rif->dev = l3_dev;
8111 	}
8112 	rif->vr_id = vr_id;
8113 	rif->rif_index = rif_index;
8114 
8115 	return rif;
8116 }
8117 
8118 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8119 					   u16 rif_index)
8120 {
8121 	return mlxsw_sp->router->rifs[rif_index];
8122 }
8123 
8124 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8125 {
8126 	return rif->rif_index;
8127 }
8128 
8129 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8130 {
8131 	return lb_rif->common.rif_index;
8132 }
8133 
8134 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8135 {
8136 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
8137 	struct mlxsw_sp_vr *ul_vr;
8138 
8139 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8140 	if (WARN_ON(IS_ERR(ul_vr)))
8141 		return 0;
8142 
8143 	return ul_vr->id;
8144 }
8145 
8146 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8147 {
8148 	return lb_rif->ul_rif_id;
8149 }
8150 
8151 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8152 {
8153 	return rif->dev->ifindex;
8154 }
8155 
8156 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8157 {
8158 	return rif->dev;
8159 }
8160 
8161 static struct mlxsw_sp_rif *
8162 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8163 		    const struct mlxsw_sp_rif_params *params,
8164 		    struct netlink_ext_ack *extack)
8165 {
8166 	u32 tb_id = l3mdev_fib_table(params->dev);
8167 	const struct mlxsw_sp_rif_ops *ops;
8168 	struct mlxsw_sp_fid *fid = NULL;
8169 	enum mlxsw_sp_rif_type type;
8170 	struct mlxsw_sp_rif *rif;
8171 	struct mlxsw_sp_vr *vr;
8172 	u16 rif_index;
8173 	int i, err;
8174 
8175 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8176 	ops = mlxsw_sp->router->rif_ops_arr[type];
8177 
8178 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8179 	if (IS_ERR(vr))
8180 		return ERR_CAST(vr);
8181 	vr->rif_count++;
8182 
8183 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8184 	if (err) {
8185 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8186 		goto err_rif_index_alloc;
8187 	}
8188 
8189 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8190 	if (!rif) {
8191 		err = -ENOMEM;
8192 		goto err_rif_alloc;
8193 	}
8194 	dev_hold(rif->dev);
8195 	mlxsw_sp->router->rifs[rif_index] = rif;
8196 	rif->mlxsw_sp = mlxsw_sp;
8197 	rif->ops = ops;
8198 
8199 	if (ops->fid_get) {
8200 		fid = ops->fid_get(rif, extack);
8201 		if (IS_ERR(fid)) {
8202 			err = PTR_ERR(fid);
8203 			goto err_fid_get;
8204 		}
8205 		rif->fid = fid;
8206 	}
8207 
8208 	if (ops->setup)
8209 		ops->setup(rif, params);
8210 
8211 	err = ops->configure(rif, extack);
8212 	if (err)
8213 		goto err_configure;
8214 
8215 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8216 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8217 		if (err)
8218 			goto err_mr_rif_add;
8219 	}
8220 
8221 	mlxsw_sp_rif_counters_alloc(rif);
8222 
8223 	return rif;
8224 
8225 err_mr_rif_add:
8226 	for (i--; i >= 0; i--)
8227 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8228 	ops->deconfigure(rif);
8229 err_configure:
8230 	if (fid)
8231 		mlxsw_sp_fid_put(fid);
8232 err_fid_get:
8233 	mlxsw_sp->router->rifs[rif_index] = NULL;
8234 	dev_put(rif->dev);
8235 	kfree(rif);
8236 err_rif_alloc:
8237 err_rif_index_alloc:
8238 	vr->rif_count--;
8239 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8240 	return ERR_PTR(err);
8241 }
8242 
8243 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8244 {
8245 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
8246 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8247 	struct mlxsw_sp_fid *fid = rif->fid;
8248 	struct mlxsw_sp_vr *vr;
8249 	int i;
8250 
8251 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8252 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
8253 
8254 	mlxsw_sp_rif_counters_free(rif);
8255 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8256 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8257 	ops->deconfigure(rif);
8258 	if (fid)
8259 		/* Loopback RIFs are not associated with a FID. */
8260 		mlxsw_sp_fid_put(fid);
8261 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8262 	dev_put(rif->dev);
8263 	kfree(rif);
8264 	vr->rif_count--;
8265 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8266 }
8267 
8268 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8269 				 struct net_device *dev)
8270 {
8271 	struct mlxsw_sp_rif *rif;
8272 
8273 	mutex_lock(&mlxsw_sp->router->lock);
8274 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8275 	if (!rif)
8276 		goto out;
8277 	mlxsw_sp_rif_destroy(rif);
8278 out:
8279 	mutex_unlock(&mlxsw_sp->router->lock);
8280 }
8281 
8282 static void
8283 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8284 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8285 {
8286 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8287 
8288 	params->vid = mlxsw_sp_port_vlan->vid;
8289 	params->lag = mlxsw_sp_port->lagged;
8290 	if (params->lag)
8291 		params->lag_id = mlxsw_sp_port->lag_id;
8292 	else
8293 		params->system_port = mlxsw_sp_port->local_port;
8294 }
8295 
8296 static struct mlxsw_sp_rif_subport *
8297 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8298 {
8299 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
8300 }
8301 
8302 static struct mlxsw_sp_rif *
8303 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8304 			 const struct mlxsw_sp_rif_params *params,
8305 			 struct netlink_ext_ack *extack)
8306 {
8307 	struct mlxsw_sp_rif_subport *rif_subport;
8308 	struct mlxsw_sp_rif *rif;
8309 
8310 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8311 	if (!rif)
8312 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8313 
8314 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8315 	refcount_inc(&rif_subport->ref_count);
8316 	return rif;
8317 }
8318 
8319 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8320 {
8321 	struct mlxsw_sp_rif_subport *rif_subport;
8322 
8323 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8324 	if (!refcount_dec_and_test(&rif_subport->ref_count))
8325 		return;
8326 
8327 	mlxsw_sp_rif_destroy(rif);
8328 }
8329 
8330 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8331 						struct mlxsw_sp_rif_mac_profile *profile,
8332 						struct netlink_ext_ack *extack)
8333 {
8334 	u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8335 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8336 	int id;
8337 
8338 	id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8339 		       max_rif_mac_profiles, GFP_KERNEL);
8340 
8341 	if (id >= 0) {
8342 		profile->id = id;
8343 		return 0;
8344 	}
8345 
8346 	if (id == -ENOSPC)
8347 		NL_SET_ERR_MSG_MOD(extack,
8348 				   "Exceeded number of supported router interface MAC profiles");
8349 
8350 	return id;
8351 }
8352 
8353 static struct mlxsw_sp_rif_mac_profile *
8354 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8355 {
8356 	struct mlxsw_sp_rif_mac_profile *profile;
8357 
8358 	profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8359 			     mac_profile);
8360 	WARN_ON(!profile);
8361 	return profile;
8362 }
8363 
8364 static struct mlxsw_sp_rif_mac_profile *
8365 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8366 {
8367 	struct mlxsw_sp_rif_mac_profile *profile;
8368 
8369 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8370 	if (!profile)
8371 		return NULL;
8372 
8373 	ether_addr_copy(profile->mac_prefix, mac);
8374 	refcount_set(&profile->ref_count, 1);
8375 	return profile;
8376 }
8377 
8378 static struct mlxsw_sp_rif_mac_profile *
8379 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8380 {
8381 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8382 	struct mlxsw_sp_rif_mac_profile *profile;
8383 	int id;
8384 
8385 	idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8386 		if (ether_addr_equal_masked(profile->mac_prefix, mac,
8387 					    mlxsw_sp->mac_mask))
8388 			return profile;
8389 	}
8390 
8391 	return NULL;
8392 }
8393 
8394 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8395 {
8396 	const struct mlxsw_sp *mlxsw_sp = priv;
8397 
8398 	return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8399 }
8400 
8401 static struct mlxsw_sp_rif_mac_profile *
8402 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8403 				struct netlink_ext_ack *extack)
8404 {
8405 	struct mlxsw_sp_rif_mac_profile *profile;
8406 	int err;
8407 
8408 	profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8409 	if (!profile)
8410 		return ERR_PTR(-ENOMEM);
8411 
8412 	err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8413 	if (err)
8414 		goto profile_index_alloc_err;
8415 
8416 	atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8417 	return profile;
8418 
8419 profile_index_alloc_err:
8420 	kfree(profile);
8421 	return ERR_PTR(err);
8422 }
8423 
8424 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8425 					     u8 mac_profile)
8426 {
8427 	struct mlxsw_sp_rif_mac_profile *profile;
8428 
8429 	atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8430 	profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8431 	kfree(profile);
8432 }
8433 
8434 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8435 					const char *mac, u8 *p_mac_profile,
8436 					struct netlink_ext_ack *extack)
8437 {
8438 	struct mlxsw_sp_rif_mac_profile *profile;
8439 
8440 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8441 	if (profile) {
8442 		refcount_inc(&profile->ref_count);
8443 		goto out;
8444 	}
8445 
8446 	profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8447 	if (IS_ERR(profile))
8448 		return PTR_ERR(profile);
8449 
8450 out:
8451 	*p_mac_profile = profile->id;
8452 	return 0;
8453 }
8454 
8455 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8456 					 u8 mac_profile)
8457 {
8458 	struct mlxsw_sp_rif_mac_profile *profile;
8459 
8460 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8461 			   mac_profile);
8462 	if (WARN_ON(!profile))
8463 		return;
8464 
8465 	if (!refcount_dec_and_test(&profile->ref_count))
8466 		return;
8467 
8468 	mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8469 }
8470 
8471 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8472 {
8473 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8474 	struct mlxsw_sp_rif_mac_profile *profile;
8475 
8476 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8477 			   rif->mac_profile_id);
8478 	if (WARN_ON(!profile))
8479 		return false;
8480 
8481 	return refcount_read(&profile->ref_count) > 1;
8482 }
8483 
8484 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8485 					 const char *new_mac)
8486 {
8487 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8488 	struct mlxsw_sp_rif_mac_profile *profile;
8489 
8490 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8491 			   rif->mac_profile_id);
8492 	if (WARN_ON(!profile))
8493 		return -EINVAL;
8494 
8495 	ether_addr_copy(profile->mac_prefix, new_mac);
8496 	return 0;
8497 }
8498 
8499 static int
8500 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8501 				 struct mlxsw_sp_rif *rif,
8502 				 const char *new_mac,
8503 				 struct netlink_ext_ack *extack)
8504 {
8505 	u8 mac_profile;
8506 	int err;
8507 
8508 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8509 	    !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8510 		return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8511 
8512 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8513 					   &mac_profile, extack);
8514 	if (err)
8515 		return err;
8516 
8517 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8518 	rif->mac_profile_id = mac_profile;
8519 	return 0;
8520 }
8521 
8522 static int
8523 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8524 				 struct net_device *l3_dev,
8525 				 struct netlink_ext_ack *extack)
8526 {
8527 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8528 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8529 	struct mlxsw_sp_rif_params params = {
8530 		.dev = l3_dev,
8531 	};
8532 	u16 vid = mlxsw_sp_port_vlan->vid;
8533 	struct mlxsw_sp_rif *rif;
8534 	struct mlxsw_sp_fid *fid;
8535 	int err;
8536 
8537 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
8538 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
8539 	if (IS_ERR(rif))
8540 		return PTR_ERR(rif);
8541 
8542 	/* FID was already created, just take a reference */
8543 	fid = rif->ops->fid_get(rif, extack);
8544 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8545 	if (err)
8546 		goto err_fid_port_vid_map;
8547 
8548 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8549 	if (err)
8550 		goto err_port_vid_learning_set;
8551 
8552 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8553 					BR_STATE_FORWARDING);
8554 	if (err)
8555 		goto err_port_vid_stp_set;
8556 
8557 	mlxsw_sp_port_vlan->fid = fid;
8558 
8559 	return 0;
8560 
8561 err_port_vid_stp_set:
8562 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8563 err_port_vid_learning_set:
8564 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8565 err_fid_port_vid_map:
8566 	mlxsw_sp_fid_put(fid);
8567 	mlxsw_sp_rif_subport_put(rif);
8568 	return err;
8569 }
8570 
8571 static void
8572 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8573 {
8574 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8575 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8576 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8577 	u16 vid = mlxsw_sp_port_vlan->vid;
8578 
8579 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8580 		return;
8581 
8582 	mlxsw_sp_port_vlan->fid = NULL;
8583 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8584 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8585 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8586 	mlxsw_sp_fid_put(fid);
8587 	mlxsw_sp_rif_subport_put(rif);
8588 }
8589 
8590 int
8591 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8592 			       struct net_device *l3_dev,
8593 			       struct netlink_ext_ack *extack)
8594 {
8595 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8596 	struct mlxsw_sp_rif *rif;
8597 	int err = 0;
8598 
8599 	mutex_lock(&mlxsw_sp->router->lock);
8600 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8601 	if (!rif)
8602 		goto out;
8603 
8604 	err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8605 					       extack);
8606 out:
8607 	mutex_unlock(&mlxsw_sp->router->lock);
8608 	return err;
8609 }
8610 
8611 void
8612 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8613 {
8614 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8615 
8616 	mutex_lock(&mlxsw_sp->router->lock);
8617 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8618 	mutex_unlock(&mlxsw_sp->router->lock);
8619 }
8620 
8621 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8622 					     struct net_device *port_dev,
8623 					     unsigned long event, u16 vid,
8624 					     struct netlink_ext_ack *extack)
8625 {
8626 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8627 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8628 
8629 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8630 	if (WARN_ON(!mlxsw_sp_port_vlan))
8631 		return -EINVAL;
8632 
8633 	switch (event) {
8634 	case NETDEV_UP:
8635 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8636 							l3_dev, extack);
8637 	case NETDEV_DOWN:
8638 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8639 		break;
8640 	}
8641 
8642 	return 0;
8643 }
8644 
8645 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8646 					unsigned long event,
8647 					struct netlink_ext_ack *extack)
8648 {
8649 	if (netif_is_bridge_port(port_dev) ||
8650 	    netif_is_lag_port(port_dev) ||
8651 	    netif_is_ovs_port(port_dev))
8652 		return 0;
8653 
8654 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8655 						 MLXSW_SP_DEFAULT_VID, extack);
8656 }
8657 
8658 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8659 					 struct net_device *lag_dev,
8660 					 unsigned long event, u16 vid,
8661 					 struct netlink_ext_ack *extack)
8662 {
8663 	struct net_device *port_dev;
8664 	struct list_head *iter;
8665 	int err;
8666 
8667 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8668 		if (mlxsw_sp_port_dev_check(port_dev)) {
8669 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8670 								port_dev,
8671 								event, vid,
8672 								extack);
8673 			if (err)
8674 				return err;
8675 		}
8676 	}
8677 
8678 	return 0;
8679 }
8680 
8681 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8682 				       unsigned long event,
8683 				       struct netlink_ext_ack *extack)
8684 {
8685 	if (netif_is_bridge_port(lag_dev))
8686 		return 0;
8687 
8688 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8689 					     MLXSW_SP_DEFAULT_VID, extack);
8690 }
8691 
8692 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8693 					  struct net_device *l3_dev,
8694 					  unsigned long event,
8695 					  struct netlink_ext_ack *extack)
8696 {
8697 	struct mlxsw_sp_rif_params params = {
8698 		.dev = l3_dev,
8699 	};
8700 	struct mlxsw_sp_rif *rif;
8701 
8702 	switch (event) {
8703 	case NETDEV_UP:
8704 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8705 			u16 proto;
8706 
8707 			br_vlan_get_proto(l3_dev, &proto);
8708 			if (proto == ETH_P_8021AD) {
8709 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8710 				return -EOPNOTSUPP;
8711 			}
8712 		}
8713 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8714 		if (IS_ERR(rif))
8715 			return PTR_ERR(rif);
8716 		break;
8717 	case NETDEV_DOWN:
8718 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8719 		mlxsw_sp_rif_destroy(rif);
8720 		break;
8721 	}
8722 
8723 	return 0;
8724 }
8725 
8726 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8727 					struct net_device *vlan_dev,
8728 					unsigned long event,
8729 					struct netlink_ext_ack *extack)
8730 {
8731 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8732 	u16 vid = vlan_dev_vlan_id(vlan_dev);
8733 
8734 	if (netif_is_bridge_port(vlan_dev))
8735 		return 0;
8736 
8737 	if (mlxsw_sp_port_dev_check(real_dev))
8738 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8739 							 event, vid, extack);
8740 	else if (netif_is_lag_master(real_dev))
8741 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8742 						     vid, extack);
8743 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8744 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8745 						      extack);
8746 
8747 	return 0;
8748 }
8749 
8750 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8751 {
8752 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
8753 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8754 
8755 	return ether_addr_equal_masked(mac, vrrp4, mask);
8756 }
8757 
8758 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
8759 {
8760 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
8761 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8762 
8763 	return ether_addr_equal_masked(mac, vrrp6, mask);
8764 }
8765 
8766 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8767 				const u8 *mac, bool adding)
8768 {
8769 	char ritr_pl[MLXSW_REG_RITR_LEN];
8770 	u8 vrrp_id = adding ? mac[5] : 0;
8771 	int err;
8772 
8773 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
8774 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
8775 		return 0;
8776 
8777 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8778 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8779 	if (err)
8780 		return err;
8781 
8782 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
8783 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
8784 	else
8785 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
8786 
8787 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8788 }
8789 
8790 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
8791 				    const struct net_device *macvlan_dev,
8792 				    struct netlink_ext_ack *extack)
8793 {
8794 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8795 	struct mlxsw_sp_rif *rif;
8796 	int err;
8797 
8798 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8799 	if (!rif) {
8800 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
8801 		return -EOPNOTSUPP;
8802 	}
8803 
8804 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8805 				  mlxsw_sp_fid_index(rif->fid), true);
8806 	if (err)
8807 		return err;
8808 
8809 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
8810 				   macvlan_dev->dev_addr, true);
8811 	if (err)
8812 		goto err_rif_vrrp_add;
8813 
8814 	/* Make sure the bridge driver does not have this MAC pointing at
8815 	 * some other port.
8816 	 */
8817 	if (rif->ops->fdb_del)
8818 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
8819 
8820 	return 0;
8821 
8822 err_rif_vrrp_add:
8823 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8824 			    mlxsw_sp_fid_index(rif->fid), false);
8825 	return err;
8826 }
8827 
8828 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8829 				       const struct net_device *macvlan_dev)
8830 {
8831 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8832 	struct mlxsw_sp_rif *rif;
8833 
8834 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8835 	/* If we do not have a RIF, then we already took care of
8836 	 * removing the macvlan's MAC during RIF deletion.
8837 	 */
8838 	if (!rif)
8839 		return;
8840 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
8841 			     false);
8842 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8843 			    mlxsw_sp_fid_index(rif->fid), false);
8844 }
8845 
8846 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8847 			      const struct net_device *macvlan_dev)
8848 {
8849 	mutex_lock(&mlxsw_sp->router->lock);
8850 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8851 	mutex_unlock(&mlxsw_sp->router->lock);
8852 }
8853 
8854 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
8855 					   struct net_device *macvlan_dev,
8856 					   unsigned long event,
8857 					   struct netlink_ext_ack *extack)
8858 {
8859 	switch (event) {
8860 	case NETDEV_UP:
8861 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
8862 	case NETDEV_DOWN:
8863 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8864 		break;
8865 	}
8866 
8867 	return 0;
8868 }
8869 
8870 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
8871 				     struct net_device *dev,
8872 				     unsigned long event,
8873 				     struct netlink_ext_ack *extack)
8874 {
8875 	if (mlxsw_sp_port_dev_check(dev))
8876 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
8877 	else if (netif_is_lag_master(dev))
8878 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
8879 	else if (netif_is_bridge_master(dev))
8880 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
8881 						      extack);
8882 	else if (is_vlan_dev(dev))
8883 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
8884 						    extack);
8885 	else if (netif_is_macvlan(dev))
8886 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
8887 						       extack);
8888 	else
8889 		return 0;
8890 }
8891 
8892 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
8893 				   unsigned long event, void *ptr)
8894 {
8895 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
8896 	struct net_device *dev = ifa->ifa_dev->dev;
8897 	struct mlxsw_sp_router *router;
8898 	struct mlxsw_sp_rif *rif;
8899 	int err = 0;
8900 
8901 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
8902 	if (event == NETDEV_UP)
8903 		return NOTIFY_DONE;
8904 
8905 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
8906 	mutex_lock(&router->lock);
8907 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
8908 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8909 		goto out;
8910 
8911 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
8912 out:
8913 	mutex_unlock(&router->lock);
8914 	return notifier_from_errno(err);
8915 }
8916 
8917 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
8918 				  unsigned long event, void *ptr)
8919 {
8920 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
8921 	struct net_device *dev = ivi->ivi_dev->dev;
8922 	struct mlxsw_sp *mlxsw_sp;
8923 	struct mlxsw_sp_rif *rif;
8924 	int err = 0;
8925 
8926 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8927 	if (!mlxsw_sp)
8928 		return NOTIFY_DONE;
8929 
8930 	mutex_lock(&mlxsw_sp->router->lock);
8931 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8932 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8933 		goto out;
8934 
8935 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
8936 out:
8937 	mutex_unlock(&mlxsw_sp->router->lock);
8938 	return notifier_from_errno(err);
8939 }
8940 
8941 struct mlxsw_sp_inet6addr_event_work {
8942 	struct work_struct work;
8943 	struct mlxsw_sp *mlxsw_sp;
8944 	struct net_device *dev;
8945 	unsigned long event;
8946 };
8947 
8948 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
8949 {
8950 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
8951 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
8952 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
8953 	struct net_device *dev = inet6addr_work->dev;
8954 	unsigned long event = inet6addr_work->event;
8955 	struct mlxsw_sp_rif *rif;
8956 
8957 	rtnl_lock();
8958 	mutex_lock(&mlxsw_sp->router->lock);
8959 
8960 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8961 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8962 		goto out;
8963 
8964 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
8965 out:
8966 	mutex_unlock(&mlxsw_sp->router->lock);
8967 	rtnl_unlock();
8968 	dev_put(dev);
8969 	kfree(inet6addr_work);
8970 }
8971 
8972 /* Called with rcu_read_lock() */
8973 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
8974 				    unsigned long event, void *ptr)
8975 {
8976 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
8977 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
8978 	struct net_device *dev = if6->idev->dev;
8979 	struct mlxsw_sp_router *router;
8980 
8981 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
8982 	if (event == NETDEV_UP)
8983 		return NOTIFY_DONE;
8984 
8985 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
8986 	if (!inet6addr_work)
8987 		return NOTIFY_BAD;
8988 
8989 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
8990 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
8991 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
8992 	inet6addr_work->dev = dev;
8993 	inet6addr_work->event = event;
8994 	dev_hold(dev);
8995 	mlxsw_core_schedule_work(&inet6addr_work->work);
8996 
8997 	return NOTIFY_DONE;
8998 }
8999 
9000 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9001 				   unsigned long event, void *ptr)
9002 {
9003 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9004 	struct net_device *dev = i6vi->i6vi_dev->dev;
9005 	struct mlxsw_sp *mlxsw_sp;
9006 	struct mlxsw_sp_rif *rif;
9007 	int err = 0;
9008 
9009 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9010 	if (!mlxsw_sp)
9011 		return NOTIFY_DONE;
9012 
9013 	mutex_lock(&mlxsw_sp->router->lock);
9014 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9015 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9016 		goto out;
9017 
9018 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
9019 out:
9020 	mutex_unlock(&mlxsw_sp->router->lock);
9021 	return notifier_from_errno(err);
9022 }
9023 
9024 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9025 			     const char *mac, int mtu, u8 mac_profile)
9026 {
9027 	char ritr_pl[MLXSW_REG_RITR_LEN];
9028 	int err;
9029 
9030 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9031 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9032 	if (err)
9033 		return err;
9034 
9035 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9036 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9037 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9038 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9039 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9040 }
9041 
9042 static int
9043 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9044 				  struct mlxsw_sp_rif *rif,
9045 				  struct netlink_ext_ack *extack)
9046 {
9047 	struct net_device *dev = rif->dev;
9048 	u8 old_mac_profile;
9049 	u16 fid_index;
9050 	int err;
9051 
9052 	fid_index = mlxsw_sp_fid_index(rif->fid);
9053 
9054 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9055 	if (err)
9056 		return err;
9057 
9058 	old_mac_profile = rif->mac_profile_id;
9059 	err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9060 					       extack);
9061 	if (err)
9062 		goto err_rif_mac_profile_replace;
9063 
9064 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9065 				dev->mtu, rif->mac_profile_id);
9066 	if (err)
9067 		goto err_rif_edit;
9068 
9069 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9070 	if (err)
9071 		goto err_rif_fdb_op;
9072 
9073 	if (rif->mtu != dev->mtu) {
9074 		struct mlxsw_sp_vr *vr;
9075 		int i;
9076 
9077 		/* The RIF is relevant only to its mr_table instance, as unlike
9078 		 * unicast routing, in multicast routing a RIF cannot be shared
9079 		 * between several multicast routing tables.
9080 		 */
9081 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
9082 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9083 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9084 						   rif, dev->mtu);
9085 	}
9086 
9087 	ether_addr_copy(rif->addr, dev->dev_addr);
9088 	rif->mtu = dev->mtu;
9089 
9090 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9091 
9092 	return 0;
9093 
9094 err_rif_fdb_op:
9095 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9096 			  old_mac_profile);
9097 err_rif_edit:
9098 	mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9099 err_rif_mac_profile_replace:
9100 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9101 	return err;
9102 }
9103 
9104 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9105 			    struct netdev_notifier_pre_changeaddr_info *info)
9106 {
9107 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9108 	struct mlxsw_sp_rif_mac_profile *profile;
9109 	struct netlink_ext_ack *extack;
9110 	u8 max_rif_mac_profiles;
9111 	u64 occ;
9112 
9113 	extack = netdev_notifier_info_to_extack(&info->info);
9114 
9115 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9116 	if (profile)
9117 		return 0;
9118 
9119 	max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9120 	occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9121 	if (occ < max_rif_mac_profiles)
9122 		return 0;
9123 
9124 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9125 		return 0;
9126 
9127 	NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9128 	return -ENOBUFS;
9129 }
9130 
9131 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9132 					 unsigned long event, void *ptr)
9133 {
9134 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9135 	struct mlxsw_sp *mlxsw_sp;
9136 	struct mlxsw_sp_rif *rif;
9137 	int err = 0;
9138 
9139 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9140 	if (!mlxsw_sp)
9141 		return 0;
9142 
9143 	mutex_lock(&mlxsw_sp->router->lock);
9144 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9145 	if (!rif)
9146 		goto out;
9147 
9148 	switch (event) {
9149 	case NETDEV_CHANGEMTU:
9150 	case NETDEV_CHANGEADDR:
9151 		err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9152 		break;
9153 	case NETDEV_PRE_CHANGEADDR:
9154 		err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9155 		break;
9156 	}
9157 
9158 out:
9159 	mutex_unlock(&mlxsw_sp->router->lock);
9160 	return err;
9161 }
9162 
9163 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9164 				  struct net_device *l3_dev,
9165 				  struct netlink_ext_ack *extack)
9166 {
9167 	struct mlxsw_sp_rif *rif;
9168 
9169 	/* If netdev is already associated with a RIF, then we need to
9170 	 * destroy it and create a new one with the new virtual router ID.
9171 	 */
9172 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9173 	if (rif)
9174 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
9175 					  extack);
9176 
9177 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
9178 }
9179 
9180 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9181 				    struct net_device *l3_dev)
9182 {
9183 	struct mlxsw_sp_rif *rif;
9184 
9185 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9186 	if (!rif)
9187 		return;
9188 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
9189 }
9190 
9191 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9192 				 struct netdev_notifier_changeupper_info *info)
9193 {
9194 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9195 	int err = 0;
9196 
9197 	/* We do not create a RIF for a macvlan, but only use it to
9198 	 * direct more MAC addresses to the router.
9199 	 */
9200 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9201 		return 0;
9202 
9203 	mutex_lock(&mlxsw_sp->router->lock);
9204 	switch (event) {
9205 	case NETDEV_PRECHANGEUPPER:
9206 		break;
9207 	case NETDEV_CHANGEUPPER:
9208 		if (info->linking) {
9209 			struct netlink_ext_ack *extack;
9210 
9211 			extack = netdev_notifier_info_to_extack(&info->info);
9212 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9213 		} else {
9214 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9215 		}
9216 		break;
9217 	}
9218 	mutex_unlock(&mlxsw_sp->router->lock);
9219 
9220 	return err;
9221 }
9222 
9223 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9224 					struct netdev_nested_priv *priv)
9225 {
9226 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9227 
9228 	if (!netif_is_macvlan(dev))
9229 		return 0;
9230 
9231 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9232 				   mlxsw_sp_fid_index(rif->fid), false);
9233 }
9234 
9235 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9236 {
9237 	struct netdev_nested_priv priv = {
9238 		.data = (void *)rif,
9239 	};
9240 
9241 	if (!netif_is_macvlan_port(rif->dev))
9242 		return 0;
9243 
9244 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
9245 	return netdev_walk_all_upper_dev_rcu(rif->dev,
9246 					     __mlxsw_sp_rif_macvlan_flush, &priv);
9247 }
9248 
9249 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9250 				       const struct mlxsw_sp_rif_params *params)
9251 {
9252 	struct mlxsw_sp_rif_subport *rif_subport;
9253 
9254 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9255 	refcount_set(&rif_subport->ref_count, 1);
9256 	rif_subport->vid = params->vid;
9257 	rif_subport->lag = params->lag;
9258 	if (params->lag)
9259 		rif_subport->lag_id = params->lag_id;
9260 	else
9261 		rif_subport->system_port = params->system_port;
9262 }
9263 
9264 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9265 {
9266 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9267 	struct mlxsw_sp_rif_subport *rif_subport;
9268 	char ritr_pl[MLXSW_REG_RITR_LEN];
9269 
9270 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9271 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9272 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
9273 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9274 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9275 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9276 				  rif_subport->lag ? rif_subport->lag_id :
9277 						     rif_subport->system_port,
9278 				  rif_subport->vid);
9279 
9280 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9281 }
9282 
9283 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
9284 					  struct netlink_ext_ack *extack)
9285 {
9286 	u8 mac_profile;
9287 	int err;
9288 
9289 	err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
9290 					   &mac_profile, extack);
9291 	if (err)
9292 		return err;
9293 	rif->mac_profile_id = mac_profile;
9294 
9295 	err = mlxsw_sp_rif_subport_op(rif, true);
9296 	if (err)
9297 		goto err_rif_subport_op;
9298 
9299 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9300 				  mlxsw_sp_fid_index(rif->fid), true);
9301 	if (err)
9302 		goto err_rif_fdb_op;
9303 
9304 	mlxsw_sp_fid_rif_set(rif->fid, rif);
9305 	return 0;
9306 
9307 err_rif_fdb_op:
9308 	mlxsw_sp_rif_subport_op(rif, false);
9309 err_rif_subport_op:
9310 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
9311 	return err;
9312 }
9313 
9314 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9315 {
9316 	struct mlxsw_sp_fid *fid = rif->fid;
9317 
9318 	mlxsw_sp_fid_rif_set(fid, NULL);
9319 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9320 			    mlxsw_sp_fid_index(fid), false);
9321 	mlxsw_sp_rif_macvlan_flush(rif);
9322 	mlxsw_sp_rif_subport_op(rif, false);
9323 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9324 }
9325 
9326 static struct mlxsw_sp_fid *
9327 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9328 			     struct netlink_ext_ack *extack)
9329 {
9330 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9331 }
9332 
9333 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9334 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
9335 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
9336 	.setup			= mlxsw_sp_rif_subport_setup,
9337 	.configure		= mlxsw_sp_rif_subport_configure,
9338 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
9339 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
9340 };
9341 
9342 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
9343 				    enum mlxsw_reg_ritr_if_type type,
9344 				    u16 vid_fid, bool enable)
9345 {
9346 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9347 	char ritr_pl[MLXSW_REG_RITR_LEN];
9348 
9349 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9350 			    rif->dev->mtu);
9351 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9352 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9353 	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
9354 
9355 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9356 }
9357 
9358 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9359 {
9360 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9361 }
9362 
9363 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
9364 				      struct netlink_ext_ack *extack)
9365 {
9366 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9367 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9368 	u8 mac_profile;
9369 	int err;
9370 
9371 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9372 					   &mac_profile, extack);
9373 	if (err)
9374 		return err;
9375 	rif->mac_profile_id = mac_profile;
9376 
9377 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
9378 				       true);
9379 	if (err)
9380 		goto err_rif_vlan_fid_op;
9381 
9382 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9383 				     mlxsw_sp_router_port(mlxsw_sp), true);
9384 	if (err)
9385 		goto err_fid_mc_flood_set;
9386 
9387 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9388 				     mlxsw_sp_router_port(mlxsw_sp), true);
9389 	if (err)
9390 		goto err_fid_bc_flood_set;
9391 
9392 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9393 				  mlxsw_sp_fid_index(rif->fid), true);
9394 	if (err)
9395 		goto err_rif_fdb_op;
9396 
9397 	mlxsw_sp_fid_rif_set(rif->fid, rif);
9398 	return 0;
9399 
9400 err_rif_fdb_op:
9401 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9402 			       mlxsw_sp_router_port(mlxsw_sp), false);
9403 err_fid_bc_flood_set:
9404 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9405 			       mlxsw_sp_router_port(mlxsw_sp), false);
9406 err_fid_mc_flood_set:
9407 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9408 err_rif_vlan_fid_op:
9409 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9410 	return err;
9411 }
9412 
9413 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9414 {
9415 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9416 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9417 	struct mlxsw_sp_fid *fid = rif->fid;
9418 
9419 	mlxsw_sp_fid_rif_set(fid, NULL);
9420 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9421 			    mlxsw_sp_fid_index(fid), false);
9422 	mlxsw_sp_rif_macvlan_flush(rif);
9423 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9424 			       mlxsw_sp_router_port(mlxsw_sp), false);
9425 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9426 			       mlxsw_sp_router_port(mlxsw_sp), false);
9427 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9428 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9429 }
9430 
9431 static struct mlxsw_sp_fid *
9432 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9433 			 struct netlink_ext_ack *extack)
9434 {
9435 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9436 }
9437 
9438 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9439 {
9440 	struct switchdev_notifier_fdb_info info = {};
9441 	struct net_device *dev;
9442 
9443 	dev = br_fdb_find_port(rif->dev, mac, 0);
9444 	if (!dev)
9445 		return;
9446 
9447 	info.addr = mac;
9448 	info.vid = 0;
9449 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9450 				 NULL);
9451 }
9452 
9453 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9454 	.type			= MLXSW_SP_RIF_TYPE_FID,
9455 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9456 	.configure		= mlxsw_sp_rif_fid_configure,
9457 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
9458 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
9459 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
9460 };
9461 
9462 static struct mlxsw_sp_fid *
9463 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9464 			  struct netlink_ext_ack *extack)
9465 {
9466 	struct net_device *br_dev;
9467 	u16 vid;
9468 	int err;
9469 
9470 	if (is_vlan_dev(rif->dev)) {
9471 		vid = vlan_dev_vlan_id(rif->dev);
9472 		br_dev = vlan_dev_real_dev(rif->dev);
9473 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
9474 			return ERR_PTR(-EINVAL);
9475 	} else {
9476 		err = br_vlan_get_pvid(rif->dev, &vid);
9477 		if (err < 0 || !vid) {
9478 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9479 			return ERR_PTR(-EINVAL);
9480 		}
9481 	}
9482 
9483 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9484 }
9485 
9486 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9487 {
9488 	struct switchdev_notifier_fdb_info info = {};
9489 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9490 	struct net_device *br_dev;
9491 	struct net_device *dev;
9492 
9493 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9494 	dev = br_fdb_find_port(br_dev, mac, vid);
9495 	if (!dev)
9496 		return;
9497 
9498 	info.addr = mac;
9499 	info.vid = vid;
9500 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9501 				 NULL);
9502 }
9503 
9504 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
9505 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
9506 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9507 	.configure		= mlxsw_sp_rif_fid_configure,
9508 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
9509 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
9510 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
9511 };
9512 
9513 static struct mlxsw_sp_rif_ipip_lb *
9514 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9515 {
9516 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9517 }
9518 
9519 static void
9520 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9521 			   const struct mlxsw_sp_rif_params *params)
9522 {
9523 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9524 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
9525 
9526 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9527 				 common);
9528 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9529 	rif_lb->lb_config = params_lb->lb_config;
9530 }
9531 
9532 static int
9533 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9534 				struct netlink_ext_ack *extack)
9535 {
9536 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9537 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9538 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9539 	struct mlxsw_sp_vr *ul_vr;
9540 	int err;
9541 
9542 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
9543 	if (IS_ERR(ul_vr))
9544 		return PTR_ERR(ul_vr);
9545 
9546 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9547 	if (err)
9548 		goto err_loopback_op;
9549 
9550 	lb_rif->ul_vr_id = ul_vr->id;
9551 	lb_rif->ul_rif_id = 0;
9552 	++ul_vr->rif_count;
9553 	return 0;
9554 
9555 err_loopback_op:
9556 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9557 	return err;
9558 }
9559 
9560 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9561 {
9562 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9563 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9564 	struct mlxsw_sp_vr *ul_vr;
9565 
9566 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9567 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9568 
9569 	--ul_vr->rif_count;
9570 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9571 }
9572 
9573 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9574 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9575 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9576 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9577 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
9578 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
9579 };
9580 
9581 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9582 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9583 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
9584 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9585 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
9586 };
9587 
9588 static int
9589 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9590 {
9591 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9592 	char ritr_pl[MLXSW_REG_RITR_LEN];
9593 
9594 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9595 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9596 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9597 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
9598 
9599 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9600 }
9601 
9602 static struct mlxsw_sp_rif *
9603 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9604 		       struct netlink_ext_ack *extack)
9605 {
9606 	struct mlxsw_sp_rif *ul_rif;
9607 	u16 rif_index;
9608 	int err;
9609 
9610 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
9611 	if (err) {
9612 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9613 		return ERR_PTR(err);
9614 	}
9615 
9616 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9617 	if (!ul_rif)
9618 		return ERR_PTR(-ENOMEM);
9619 
9620 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
9621 	ul_rif->mlxsw_sp = mlxsw_sp;
9622 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9623 	if (err)
9624 		goto ul_rif_op_err;
9625 
9626 	return ul_rif;
9627 
9628 ul_rif_op_err:
9629 	mlxsw_sp->router->rifs[rif_index] = NULL;
9630 	kfree(ul_rif);
9631 	return ERR_PTR(err);
9632 }
9633 
9634 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9635 {
9636 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9637 
9638 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9639 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
9640 	kfree(ul_rif);
9641 }
9642 
9643 static struct mlxsw_sp_rif *
9644 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
9645 		    struct netlink_ext_ack *extack)
9646 {
9647 	struct mlxsw_sp_vr *vr;
9648 	int err;
9649 
9650 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
9651 	if (IS_ERR(vr))
9652 		return ERR_CAST(vr);
9653 
9654 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
9655 		return vr->ul_rif;
9656 
9657 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
9658 	if (IS_ERR(vr->ul_rif)) {
9659 		err = PTR_ERR(vr->ul_rif);
9660 		goto err_ul_rif_create;
9661 	}
9662 
9663 	vr->rif_count++;
9664 	refcount_set(&vr->ul_rif_refcnt, 1);
9665 
9666 	return vr->ul_rif;
9667 
9668 err_ul_rif_create:
9669 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9670 	return ERR_PTR(err);
9671 }
9672 
9673 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
9674 {
9675 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9676 	struct mlxsw_sp_vr *vr;
9677 
9678 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
9679 
9680 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
9681 		return;
9682 
9683 	vr->rif_count--;
9684 	mlxsw_sp_ul_rif_destroy(ul_rif);
9685 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9686 }
9687 
9688 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
9689 			       u16 *ul_rif_index)
9690 {
9691 	struct mlxsw_sp_rif *ul_rif;
9692 	int err = 0;
9693 
9694 	mutex_lock(&mlxsw_sp->router->lock);
9695 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9696 	if (IS_ERR(ul_rif)) {
9697 		err = PTR_ERR(ul_rif);
9698 		goto out;
9699 	}
9700 	*ul_rif_index = ul_rif->rif_index;
9701 out:
9702 	mutex_unlock(&mlxsw_sp->router->lock);
9703 	return err;
9704 }
9705 
9706 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
9707 {
9708 	struct mlxsw_sp_rif *ul_rif;
9709 
9710 	mutex_lock(&mlxsw_sp->router->lock);
9711 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
9712 	if (WARN_ON(!ul_rif))
9713 		goto out;
9714 
9715 	mlxsw_sp_ul_rif_put(ul_rif);
9716 out:
9717 	mutex_unlock(&mlxsw_sp->router->lock);
9718 }
9719 
9720 static int
9721 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9722 				struct netlink_ext_ack *extack)
9723 {
9724 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9725 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9726 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9727 	struct mlxsw_sp_rif *ul_rif;
9728 	int err;
9729 
9730 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9731 	if (IS_ERR(ul_rif))
9732 		return PTR_ERR(ul_rif);
9733 
9734 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
9735 	if (err)
9736 		goto err_loopback_op;
9737 
9738 	lb_rif->ul_vr_id = 0;
9739 	lb_rif->ul_rif_id = ul_rif->rif_index;
9740 
9741 	return 0;
9742 
9743 err_loopback_op:
9744 	mlxsw_sp_ul_rif_put(ul_rif);
9745 	return err;
9746 }
9747 
9748 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9749 {
9750 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9751 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9752 	struct mlxsw_sp_rif *ul_rif;
9753 
9754 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
9755 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
9756 	mlxsw_sp_ul_rif_put(ul_rif);
9757 }
9758 
9759 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
9760 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9761 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9762 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9763 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
9764 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
9765 };
9766 
9767 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
9768 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9769 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
9770 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9771 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
9772 };
9773 
9774 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
9775 {
9776 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9777 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
9778 	struct mlxsw_core *core = mlxsw_sp->core;
9779 
9780 	if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
9781 		return -EIO;
9782 	mlxsw_sp->router->max_rif_mac_profile =
9783 		MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
9784 
9785 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
9786 					 sizeof(struct mlxsw_sp_rif *),
9787 					 GFP_KERNEL);
9788 	if (!mlxsw_sp->router->rifs)
9789 		return -ENOMEM;
9790 
9791 	idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
9792 	atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
9793 	devlink_resource_occ_get_register(devlink,
9794 					  MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
9795 					  mlxsw_sp_rif_mac_profiles_occ_get,
9796 					  mlxsw_sp);
9797 
9798 	return 0;
9799 }
9800 
9801 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
9802 {
9803 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
9804 	int i;
9805 
9806 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
9807 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
9808 
9809 	devlink_resource_occ_get_unregister(devlink,
9810 					    MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
9811 	WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
9812 	idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
9813 	kfree(mlxsw_sp->router->rifs);
9814 }
9815 
9816 static int
9817 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
9818 {
9819 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
9820 
9821 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
9822 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
9823 }
9824 
9825 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
9826 {
9827 	int err;
9828 
9829 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
9830 
9831 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
9832 	if (err)
9833 		return err;
9834 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
9835 	if (err)
9836 		return err;
9837 
9838 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
9839 }
9840 
9841 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
9842 {
9843 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
9844 	return mlxsw_sp_ipips_init(mlxsw_sp);
9845 }
9846 
9847 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
9848 {
9849 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
9850 	return mlxsw_sp_ipips_init(mlxsw_sp);
9851 }
9852 
9853 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
9854 {
9855 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
9856 }
9857 
9858 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
9859 {
9860 	struct mlxsw_sp_router *router;
9861 
9862 	/* Flush pending FIB notifications and then flush the device's
9863 	 * table before requesting another dump. The FIB notification
9864 	 * block is unregistered, so no need to take RTNL.
9865 	 */
9866 	mlxsw_core_flush_owq();
9867 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
9868 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
9869 }
9870 
9871 #ifdef CONFIG_IP_ROUTE_MULTIPATH
9872 struct mlxsw_sp_mp_hash_config {
9873 	DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
9874 	DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
9875 	DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
9876 	DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
9877 	bool inc_parsing_depth;
9878 };
9879 
9880 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
9881 	bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
9882 
9883 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
9884 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
9885 
9886 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
9887 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
9888 
9889 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
9890 {
9891 	unsigned long *inner_headers = config->inner_headers;
9892 	unsigned long *inner_fields = config->inner_fields;
9893 
9894 	/* IPv4 inner */
9895 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
9896 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
9897 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
9898 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
9899 	/* IPv6 inner */
9900 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
9901 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
9902 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
9903 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
9904 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
9905 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
9906 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
9907 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
9908 }
9909 
9910 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
9911 {
9912 	unsigned long *headers = config->headers;
9913 	unsigned long *fields = config->fields;
9914 
9915 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
9916 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
9917 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
9918 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
9919 }
9920 
9921 static void
9922 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
9923 			      u32 hash_fields)
9924 {
9925 	unsigned long *inner_headers = config->inner_headers;
9926 	unsigned long *inner_fields = config->inner_fields;
9927 
9928 	/* IPv4 Inner */
9929 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
9930 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
9931 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
9932 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
9933 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
9934 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
9935 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
9936 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
9937 	/* IPv6 inner */
9938 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
9939 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
9940 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
9941 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
9942 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
9943 	}
9944 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
9945 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
9946 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
9947 	}
9948 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
9949 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
9950 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
9951 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
9952 	/* L4 inner */
9953 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
9954 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
9955 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
9956 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
9957 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
9958 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
9959 }
9960 
9961 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
9962 				   struct mlxsw_sp_mp_hash_config *config)
9963 {
9964 	struct net *net = mlxsw_sp_net(mlxsw_sp);
9965 	unsigned long *headers = config->headers;
9966 	unsigned long *fields = config->fields;
9967 	u32 hash_fields;
9968 
9969 	switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
9970 	case 0:
9971 		mlxsw_sp_mp4_hash_outer_addr(config);
9972 		break;
9973 	case 1:
9974 		mlxsw_sp_mp4_hash_outer_addr(config);
9975 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
9976 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
9977 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9978 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9979 		break;
9980 	case 2:
9981 		/* Outer */
9982 		mlxsw_sp_mp4_hash_outer_addr(config);
9983 		/* Inner */
9984 		mlxsw_sp_mp_hash_inner_l3(config);
9985 		break;
9986 	case 3:
9987 		hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
9988 		/* Outer */
9989 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
9990 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
9991 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
9992 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
9993 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
9994 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
9995 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
9996 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
9997 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
9998 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
9999 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10000 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10001 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10002 		/* Inner */
10003 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10004 		break;
10005 	}
10006 }
10007 
10008 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10009 {
10010 	unsigned long *headers = config->headers;
10011 	unsigned long *fields = config->fields;
10012 
10013 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10014 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10015 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10016 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10017 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10018 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10019 }
10020 
10021 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
10022 				   struct mlxsw_sp_mp_hash_config *config)
10023 {
10024 	u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
10025 	unsigned long *headers = config->headers;
10026 	unsigned long *fields = config->fields;
10027 
10028 	switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
10029 	case 0:
10030 		mlxsw_sp_mp6_hash_outer_addr(config);
10031 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10032 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10033 		break;
10034 	case 1:
10035 		mlxsw_sp_mp6_hash_outer_addr(config);
10036 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10037 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10038 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10039 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10040 		break;
10041 	case 2:
10042 		/* Outer */
10043 		mlxsw_sp_mp6_hash_outer_addr(config);
10044 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10045 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10046 		/* Inner */
10047 		mlxsw_sp_mp_hash_inner_l3(config);
10048 		config->inc_parsing_depth = true;
10049 		break;
10050 	case 3:
10051 		/* Outer */
10052 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10053 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10054 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10055 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
10056 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10057 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10058 		}
10059 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
10060 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10061 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10062 		}
10063 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10064 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10065 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
10066 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10067 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10068 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10069 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10070 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10071 		/* Inner */
10072 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10073 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
10074 			config->inc_parsing_depth = true;
10075 		break;
10076 	}
10077 }
10078 
10079 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
10080 						 bool old_inc_parsing_depth,
10081 						 bool new_inc_parsing_depth)
10082 {
10083 	int err;
10084 
10085 	if (!old_inc_parsing_depth && new_inc_parsing_depth) {
10086 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
10087 		if (err)
10088 			return err;
10089 		mlxsw_sp->router->inc_parsing_depth = true;
10090 	} else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
10091 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
10092 		mlxsw_sp->router->inc_parsing_depth = false;
10093 	}
10094 
10095 	return 0;
10096 }
10097 
10098 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10099 {
10100 	bool old_inc_parsing_depth, new_inc_parsing_depth;
10101 	struct mlxsw_sp_mp_hash_config config = {};
10102 	char recr2_pl[MLXSW_REG_RECR2_LEN];
10103 	unsigned long bit;
10104 	u32 seed;
10105 	int err;
10106 
10107 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
10108 	mlxsw_reg_recr2_pack(recr2_pl, seed);
10109 	mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
10110 	mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
10111 
10112 	old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10113 	new_inc_parsing_depth = config.inc_parsing_depth;
10114 	err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
10115 						    old_inc_parsing_depth,
10116 						    new_inc_parsing_depth);
10117 	if (err)
10118 		return err;
10119 
10120 	for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
10121 		mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
10122 	for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
10123 		mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
10124 	for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
10125 		mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
10126 	for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
10127 		mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
10128 
10129 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
10130 	if (err)
10131 		goto err_reg_write;
10132 
10133 	return 0;
10134 
10135 err_reg_write:
10136 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
10137 					      old_inc_parsing_depth);
10138 	return err;
10139 }
10140 #else
10141 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10142 {
10143 	return 0;
10144 }
10145 #endif
10146 
10147 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
10148 {
10149 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
10150 	unsigned int i;
10151 
10152 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
10153 
10154 	/* HW is determining switch priority based on DSCP-bits, but the
10155 	 * kernel is still doing that based on the ToS. Since there's a
10156 	 * mismatch in bits we need to make sure to translate the right
10157 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
10158 	 */
10159 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
10160 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
10161 
10162 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
10163 }
10164 
10165 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
10166 {
10167 	struct net *net = mlxsw_sp_net(mlxsw_sp);
10168 	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
10169 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
10170 	u64 max_rifs;
10171 
10172 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
10173 		return -EIO;
10174 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10175 
10176 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
10177 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
10178 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
10179 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10180 }
10181 
10182 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10183 {
10184 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
10185 
10186 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
10187 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10188 }
10189 
10190 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
10191 	.init = mlxsw_sp_router_ll_basic_init,
10192 	.ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
10193 	.ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
10194 	.raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
10195 	.fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
10196 	.fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
10197 	.fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
10198 	.fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
10199 	.fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
10200 	.fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
10201 	.fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
10202 	.fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
10203 };
10204 
10205 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
10206 {
10207 	size_t max_size = 0;
10208 	int i;
10209 
10210 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
10211 		size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
10212 
10213 		if (size > max_size)
10214 			max_size = size;
10215 	}
10216 	router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
10217 				    GFP_KERNEL);
10218 	if (!router->ll_op_ctx)
10219 		return -ENOMEM;
10220 	INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
10221 	return 0;
10222 }
10223 
10224 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
10225 {
10226 	WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
10227 	kfree(router->ll_op_ctx);
10228 }
10229 
10230 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
10231 {
10232 	u16 lb_rif_index;
10233 	int err;
10234 
10235 	/* Create a generic loopback RIF associated with the main table
10236 	 * (default VRF). Any table can be used, but the main table exists
10237 	 * anyway, so we do not waste resources.
10238 	 */
10239 	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
10240 					 &lb_rif_index);
10241 	if (err)
10242 		return err;
10243 
10244 	mlxsw_sp->router->lb_rif_index = lb_rif_index;
10245 
10246 	return 0;
10247 }
10248 
10249 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10250 {
10251 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
10252 }
10253 
10254 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10255 {
10256 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10257 
10258 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10259 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10260 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10261 
10262 	return 0;
10263 }
10264 
10265 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10266 	.init = mlxsw_sp1_router_init,
10267 	.ipips_init = mlxsw_sp1_ipips_init,
10268 };
10269 
10270 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10271 {
10272 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10273 
10274 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10275 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10276 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10277 
10278 	return 0;
10279 }
10280 
10281 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10282 	.init = mlxsw_sp2_router_init,
10283 	.ipips_init = mlxsw_sp2_ipips_init,
10284 };
10285 
10286 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
10287 			 struct netlink_ext_ack *extack)
10288 {
10289 	struct mlxsw_sp_router *router;
10290 	int err;
10291 
10292 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
10293 	if (!router)
10294 		return -ENOMEM;
10295 	mutex_init(&router->lock);
10296 	mlxsw_sp->router = router;
10297 	router->mlxsw_sp = mlxsw_sp;
10298 
10299 	err = mlxsw_sp->router_ops->init(mlxsw_sp);
10300 	if (err)
10301 		goto err_router_ops_init;
10302 
10303 	err = mlxsw_sp_router_xm_init(mlxsw_sp);
10304 	if (err)
10305 		goto err_xm_init;
10306 
10307 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
10308 						       &mlxsw_sp_router_ll_xm_ops :
10309 						       &mlxsw_sp_router_ll_basic_ops;
10310 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
10311 
10312 	err = mlxsw_sp_router_ll_op_ctx_init(router);
10313 	if (err)
10314 		goto err_ll_op_ctx_init;
10315 
10316 	INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
10317 	INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
10318 			  mlxsw_sp_nh_grp_activity_work);
10319 
10320 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
10321 	err = __mlxsw_sp_router_init(mlxsw_sp);
10322 	if (err)
10323 		goto err_router_init;
10324 
10325 	err = mlxsw_sp_rifs_init(mlxsw_sp);
10326 	if (err)
10327 		goto err_rifs_init;
10328 
10329 	err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
10330 	if (err)
10331 		goto err_ipips_init;
10332 
10333 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
10334 			      &mlxsw_sp_nexthop_ht_params);
10335 	if (err)
10336 		goto err_nexthop_ht_init;
10337 
10338 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
10339 			      &mlxsw_sp_nexthop_group_ht_params);
10340 	if (err)
10341 		goto err_nexthop_group_ht_init;
10342 
10343 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
10344 	err = mlxsw_sp_lpm_init(mlxsw_sp);
10345 	if (err)
10346 		goto err_lpm_init;
10347 
10348 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
10349 	if (err)
10350 		goto err_mr_init;
10351 
10352 	err = mlxsw_sp_vrs_init(mlxsw_sp);
10353 	if (err)
10354 		goto err_vrs_init;
10355 
10356 	err = mlxsw_sp_lb_rif_init(mlxsw_sp);
10357 	if (err)
10358 		goto err_lb_rif_init;
10359 
10360 	err = mlxsw_sp_neigh_init(mlxsw_sp);
10361 	if (err)
10362 		goto err_neigh_init;
10363 
10364 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
10365 	if (err)
10366 		goto err_mp_hash_init;
10367 
10368 	err = mlxsw_sp_dscp_init(mlxsw_sp);
10369 	if (err)
10370 		goto err_dscp_init;
10371 
10372 	INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
10373 	INIT_LIST_HEAD(&router->fib_event_queue);
10374 	spin_lock_init(&router->fib_event_queue_lock);
10375 
10376 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
10377 	err = register_inetaddr_notifier(&router->inetaddr_nb);
10378 	if (err)
10379 		goto err_register_inetaddr_notifier;
10380 
10381 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10382 	err = register_inet6addr_notifier(&router->inet6addr_nb);
10383 	if (err)
10384 		goto err_register_inet6addr_notifier;
10385 
10386 	mlxsw_sp->router->netevent_nb.notifier_call =
10387 		mlxsw_sp_router_netevent_event;
10388 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10389 	if (err)
10390 		goto err_register_netevent_notifier;
10391 
10392 	mlxsw_sp->router->nexthop_nb.notifier_call =
10393 		mlxsw_sp_nexthop_obj_event;
10394 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10395 					&mlxsw_sp->router->nexthop_nb,
10396 					extack);
10397 	if (err)
10398 		goto err_register_nexthop_notifier;
10399 
10400 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10401 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10402 				    &mlxsw_sp->router->fib_nb,
10403 				    mlxsw_sp_router_fib_dump_flush, extack);
10404 	if (err)
10405 		goto err_register_fib_notifier;
10406 
10407 	return 0;
10408 
10409 err_register_fib_notifier:
10410 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10411 				    &mlxsw_sp->router->nexthop_nb);
10412 err_register_nexthop_notifier:
10413 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10414 err_register_netevent_notifier:
10415 	unregister_inet6addr_notifier(&router->inet6addr_nb);
10416 err_register_inet6addr_notifier:
10417 	unregister_inetaddr_notifier(&router->inetaddr_nb);
10418 err_register_inetaddr_notifier:
10419 	mlxsw_core_flush_owq();
10420 	WARN_ON(!list_empty(&router->fib_event_queue));
10421 err_dscp_init:
10422 err_mp_hash_init:
10423 	mlxsw_sp_neigh_fini(mlxsw_sp);
10424 err_neigh_init:
10425 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
10426 err_lb_rif_init:
10427 	mlxsw_sp_vrs_fini(mlxsw_sp);
10428 err_vrs_init:
10429 	mlxsw_sp_mr_fini(mlxsw_sp);
10430 err_mr_init:
10431 	mlxsw_sp_lpm_fini(mlxsw_sp);
10432 err_lpm_init:
10433 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10434 err_nexthop_group_ht_init:
10435 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10436 err_nexthop_ht_init:
10437 	mlxsw_sp_ipips_fini(mlxsw_sp);
10438 err_ipips_init:
10439 	mlxsw_sp_rifs_fini(mlxsw_sp);
10440 err_rifs_init:
10441 	__mlxsw_sp_router_fini(mlxsw_sp);
10442 err_router_init:
10443 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10444 	mlxsw_sp_router_ll_op_ctx_fini(router);
10445 err_ll_op_ctx_init:
10446 	mlxsw_sp_router_xm_fini(mlxsw_sp);
10447 err_xm_init:
10448 err_router_ops_init:
10449 	mutex_destroy(&mlxsw_sp->router->lock);
10450 	kfree(mlxsw_sp->router);
10451 	return err;
10452 }
10453 
10454 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10455 {
10456 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10457 				&mlxsw_sp->router->fib_nb);
10458 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10459 				    &mlxsw_sp->router->nexthop_nb);
10460 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10461 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
10462 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
10463 	mlxsw_core_flush_owq();
10464 	WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
10465 	mlxsw_sp_neigh_fini(mlxsw_sp);
10466 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
10467 	mlxsw_sp_vrs_fini(mlxsw_sp);
10468 	mlxsw_sp_mr_fini(mlxsw_sp);
10469 	mlxsw_sp_lpm_fini(mlxsw_sp);
10470 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10471 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10472 	mlxsw_sp_ipips_fini(mlxsw_sp);
10473 	mlxsw_sp_rifs_fini(mlxsw_sp);
10474 	__mlxsw_sp_router_fini(mlxsw_sp);
10475 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10476 	mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
10477 	mlxsw_sp_router_xm_fini(mlxsw_sp);
10478 	mutex_destroy(&mlxsw_sp->router->lock);
10479 	kfree(mlxsw_sp->router);
10480 }
10481