1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
32 #include <net/ipv6.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
35 
36 #include "spectrum.h"
37 #include "core.h"
38 #include "reg.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
46 
47 struct mlxsw_sp_fib;
48 struct mlxsw_sp_vr;
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
51 
52 struct mlxsw_sp_rif {
53 	struct list_head nexthop_list;
54 	struct list_head neigh_list;
55 	struct net_device *dev; /* NULL for underlay RIF */
56 	struct mlxsw_sp_fid *fid;
57 	unsigned char addr[ETH_ALEN];
58 	int mtu;
59 	u16 rif_index;
60 	u16 vr_id;
61 	const struct mlxsw_sp_rif_ops *ops;
62 	struct mlxsw_sp *mlxsw_sp;
63 
64 	unsigned int counter_ingress;
65 	bool counter_ingress_valid;
66 	unsigned int counter_egress;
67 	bool counter_egress_valid;
68 };
69 
70 struct mlxsw_sp_rif_params {
71 	struct net_device *dev;
72 	union {
73 		u16 system_port;
74 		u16 lag_id;
75 	};
76 	u16 vid;
77 	bool lag;
78 };
79 
80 struct mlxsw_sp_rif_subport {
81 	struct mlxsw_sp_rif common;
82 	refcount_t ref_count;
83 	union {
84 		u16 system_port;
85 		u16 lag_id;
86 	};
87 	u16 vid;
88 	bool lag;
89 };
90 
91 struct mlxsw_sp_rif_ipip_lb {
92 	struct mlxsw_sp_rif common;
93 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 	u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 	u16 ul_rif_id; /* Reserved for Spectrum. */
96 };
97 
98 struct mlxsw_sp_rif_params_ipip_lb {
99 	struct mlxsw_sp_rif_params common;
100 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
101 };
102 
103 struct mlxsw_sp_rif_ops {
104 	enum mlxsw_sp_rif_type type;
105 	size_t rif_size;
106 
107 	void (*setup)(struct mlxsw_sp_rif *rif,
108 		      const struct mlxsw_sp_rif_params *params);
109 	int (*configure)(struct mlxsw_sp_rif *rif);
110 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 					 struct netlink_ext_ack *extack);
113 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
114 };
115 
116 static struct mlxsw_sp_rif *
117 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
118 			 const struct net_device *dev);
119 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
120 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
121 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
122 				  struct mlxsw_sp_lpm_tree *lpm_tree);
123 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
124 				     const struct mlxsw_sp_fib *fib,
125 				     u8 tree_id);
126 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
127 				       const struct mlxsw_sp_fib *fib);
128 
129 static unsigned int *
130 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
131 			   enum mlxsw_sp_rif_counter_dir dir)
132 {
133 	switch (dir) {
134 	case MLXSW_SP_RIF_COUNTER_EGRESS:
135 		return &rif->counter_egress;
136 	case MLXSW_SP_RIF_COUNTER_INGRESS:
137 		return &rif->counter_ingress;
138 	}
139 	return NULL;
140 }
141 
142 static bool
143 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
144 			       enum mlxsw_sp_rif_counter_dir dir)
145 {
146 	switch (dir) {
147 	case MLXSW_SP_RIF_COUNTER_EGRESS:
148 		return rif->counter_egress_valid;
149 	case MLXSW_SP_RIF_COUNTER_INGRESS:
150 		return rif->counter_ingress_valid;
151 	}
152 	return false;
153 }
154 
155 static void
156 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
157 			       enum mlxsw_sp_rif_counter_dir dir,
158 			       bool valid)
159 {
160 	switch (dir) {
161 	case MLXSW_SP_RIF_COUNTER_EGRESS:
162 		rif->counter_egress_valid = valid;
163 		break;
164 	case MLXSW_SP_RIF_COUNTER_INGRESS:
165 		rif->counter_ingress_valid = valid;
166 		break;
167 	}
168 }
169 
170 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
171 				     unsigned int counter_index, bool enable,
172 				     enum mlxsw_sp_rif_counter_dir dir)
173 {
174 	char ritr_pl[MLXSW_REG_RITR_LEN];
175 	bool is_egress = false;
176 	int err;
177 
178 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
179 		is_egress = true;
180 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
181 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
182 	if (err)
183 		return err;
184 
185 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
186 				    is_egress);
187 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
188 }
189 
190 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
191 				   struct mlxsw_sp_rif *rif,
192 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
193 {
194 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
195 	unsigned int *p_counter_index;
196 	bool valid;
197 	int err;
198 
199 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
200 	if (!valid)
201 		return -EINVAL;
202 
203 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
204 	if (!p_counter_index)
205 		return -EINVAL;
206 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
207 			     MLXSW_REG_RICNT_OPCODE_NOP);
208 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
209 	if (err)
210 		return err;
211 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
212 	return 0;
213 }
214 
215 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
216 				      unsigned int counter_index)
217 {
218 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
219 
220 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
221 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
222 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
223 }
224 
225 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
226 			       struct mlxsw_sp_rif *rif,
227 			       enum mlxsw_sp_rif_counter_dir dir)
228 {
229 	unsigned int *p_counter_index;
230 	int err;
231 
232 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
233 	if (!p_counter_index)
234 		return -EINVAL;
235 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
236 				     p_counter_index);
237 	if (err)
238 		return err;
239 
240 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
241 	if (err)
242 		goto err_counter_clear;
243 
244 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
245 					*p_counter_index, true, dir);
246 	if (err)
247 		goto err_counter_edit;
248 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
249 	return 0;
250 
251 err_counter_edit:
252 err_counter_clear:
253 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
254 			      *p_counter_index);
255 	return err;
256 }
257 
258 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
259 			       struct mlxsw_sp_rif *rif,
260 			       enum mlxsw_sp_rif_counter_dir dir)
261 {
262 	unsigned int *p_counter_index;
263 
264 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
265 		return;
266 
267 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
268 	if (WARN_ON(!p_counter_index))
269 		return;
270 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
271 				  *p_counter_index, false, dir);
272 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
273 			      *p_counter_index);
274 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
275 }
276 
277 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
278 {
279 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
280 	struct devlink *devlink;
281 
282 	devlink = priv_to_devlink(mlxsw_sp->core);
283 	if (!devlink_dpipe_table_counter_enabled(devlink,
284 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
285 		return;
286 	mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
287 }
288 
289 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
290 {
291 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
292 
293 	mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
294 }
295 
296 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
297 
298 struct mlxsw_sp_prefix_usage {
299 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
300 };
301 
302 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
303 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
304 
305 static bool
306 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
307 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
308 {
309 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
310 }
311 
312 static void
313 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
314 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
315 {
316 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
317 }
318 
319 static void
320 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
321 			  unsigned char prefix_len)
322 {
323 	set_bit(prefix_len, prefix_usage->b);
324 }
325 
326 static void
327 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
328 			    unsigned char prefix_len)
329 {
330 	clear_bit(prefix_len, prefix_usage->b);
331 }
332 
333 struct mlxsw_sp_fib_key {
334 	unsigned char addr[sizeof(struct in6_addr)];
335 	unsigned char prefix_len;
336 };
337 
338 enum mlxsw_sp_fib_entry_type {
339 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
340 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
341 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
342 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
343 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
344 
345 	/* This is a special case of local delivery, where a packet should be
346 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
347 	 * because that's a type of next hop, not of FIB entry. (There can be
348 	 * several next hops in a REMOTE entry, and some of them may be
349 	 * encapsulating entries.)
350 	 */
351 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
352 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
353 };
354 
355 struct mlxsw_sp_nexthop_group_info;
356 struct mlxsw_sp_nexthop_group;
357 struct mlxsw_sp_fib_entry;
358 
359 struct mlxsw_sp_fib_node {
360 	struct mlxsw_sp_fib_entry *fib_entry;
361 	struct list_head list;
362 	struct rhash_head ht_node;
363 	struct mlxsw_sp_fib *fib;
364 	struct mlxsw_sp_fib_key key;
365 };
366 
367 struct mlxsw_sp_fib_entry_decap {
368 	struct mlxsw_sp_ipip_entry *ipip_entry;
369 	u32 tunnel_index;
370 };
371 
372 static struct mlxsw_sp_fib_entry_priv *
373 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
374 {
375 	struct mlxsw_sp_fib_entry_priv *priv;
376 
377 	if (!ll_ops->fib_entry_priv_size)
378 		/* No need to have priv */
379 		return NULL;
380 
381 	priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
382 	if (!priv)
383 		return ERR_PTR(-ENOMEM);
384 	refcount_set(&priv->refcnt, 1);
385 	return priv;
386 }
387 
388 static void
389 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
390 {
391 	kfree(priv);
392 }
393 
394 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
395 {
396 	refcount_inc(&priv->refcnt);
397 }
398 
399 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
400 {
401 	if (!priv || !refcount_dec_and_test(&priv->refcnt))
402 		return;
403 	mlxsw_sp_fib_entry_priv_destroy(priv);
404 }
405 
406 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
407 						struct mlxsw_sp_fib_entry_priv *priv)
408 {
409 	if (!priv)
410 		return;
411 	mlxsw_sp_fib_entry_priv_hold(priv);
412 	list_add(&priv->list, &op_ctx->fib_entry_priv_list);
413 }
414 
415 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
416 {
417 	struct mlxsw_sp_fib_entry_priv *priv, *tmp;
418 
419 	list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
420 		mlxsw_sp_fib_entry_priv_put(priv);
421 	INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
422 }
423 
424 struct mlxsw_sp_fib_entry {
425 	struct mlxsw_sp_fib_node *fib_node;
426 	enum mlxsw_sp_fib_entry_type type;
427 	struct list_head nexthop_group_node;
428 	struct mlxsw_sp_nexthop_group *nh_group;
429 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
430 	struct mlxsw_sp_fib_entry_priv *priv;
431 };
432 
433 struct mlxsw_sp_fib4_entry {
434 	struct mlxsw_sp_fib_entry common;
435 	struct fib_info *fi;
436 	u32 tb_id;
437 	u8 tos;
438 	u8 type;
439 };
440 
441 struct mlxsw_sp_fib6_entry {
442 	struct mlxsw_sp_fib_entry common;
443 	struct list_head rt6_list;
444 	unsigned int nrt6;
445 };
446 
447 struct mlxsw_sp_rt6 {
448 	struct list_head list;
449 	struct fib6_info *rt;
450 };
451 
452 struct mlxsw_sp_lpm_tree {
453 	u8 id; /* tree ID */
454 	unsigned int ref_count;
455 	enum mlxsw_sp_l3proto proto;
456 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 	struct mlxsw_sp_prefix_usage prefix_usage;
458 };
459 
460 struct mlxsw_sp_fib {
461 	struct rhashtable ht;
462 	struct list_head node_list;
463 	struct mlxsw_sp_vr *vr;
464 	struct mlxsw_sp_lpm_tree *lpm_tree;
465 	enum mlxsw_sp_l3proto proto;
466 	const struct mlxsw_sp_router_ll_ops *ll_ops;
467 };
468 
469 struct mlxsw_sp_vr {
470 	u16 id; /* virtual router ID */
471 	u32 tb_id; /* kernel fib table id */
472 	unsigned int rif_count;
473 	struct mlxsw_sp_fib *fib4;
474 	struct mlxsw_sp_fib *fib6;
475 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
476 	struct mlxsw_sp_rif *ul_rif;
477 	refcount_t ul_rif_refcnt;
478 };
479 
480 static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
481 					 enum mlxsw_sp_l3proto proto)
482 {
483 	return 0;
484 }
485 
486 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
487 {
488 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
489 			       xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
490 }
491 
492 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
493 {
494 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
495 			       xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
496 }
497 
498 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
499 {
500 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
501 			       xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
502 }
503 
504 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
505 
506 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
507 						struct mlxsw_sp_vr *vr,
508 						enum mlxsw_sp_l3proto proto)
509 {
510 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
511 	struct mlxsw_sp_lpm_tree *lpm_tree;
512 	struct mlxsw_sp_fib *fib;
513 	int err;
514 
515 	err = ll_ops->init(mlxsw_sp, vr->id, proto);
516 	if (err)
517 		return ERR_PTR(err);
518 
519 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
520 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
521 	if (!fib)
522 		return ERR_PTR(-ENOMEM);
523 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
524 	if (err)
525 		goto err_rhashtable_init;
526 	INIT_LIST_HEAD(&fib->node_list);
527 	fib->proto = proto;
528 	fib->vr = vr;
529 	fib->lpm_tree = lpm_tree;
530 	fib->ll_ops = ll_ops;
531 	mlxsw_sp_lpm_tree_hold(lpm_tree);
532 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
533 	if (err)
534 		goto err_lpm_tree_bind;
535 	return fib;
536 
537 err_lpm_tree_bind:
538 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
539 err_rhashtable_init:
540 	kfree(fib);
541 	return ERR_PTR(err);
542 }
543 
544 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
545 				 struct mlxsw_sp_fib *fib)
546 {
547 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
548 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
549 	WARN_ON(!list_empty(&fib->node_list));
550 	rhashtable_destroy(&fib->ht);
551 	kfree(fib);
552 }
553 
554 static struct mlxsw_sp_lpm_tree *
555 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
556 {
557 	static struct mlxsw_sp_lpm_tree *lpm_tree;
558 	int i;
559 
560 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
561 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
562 		if (lpm_tree->ref_count == 0)
563 			return lpm_tree;
564 	}
565 	return NULL;
566 }
567 
568 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
569 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
570 				   struct mlxsw_sp_lpm_tree *lpm_tree)
571 {
572 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
573 
574 	mlxsw_reg_xralta_pack(xralta_pl, true,
575 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
576 			      lpm_tree->id);
577 	return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
578 }
579 
580 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
581 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
582 				   struct mlxsw_sp_lpm_tree *lpm_tree)
583 {
584 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
585 
586 	mlxsw_reg_xralta_pack(xralta_pl, false,
587 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
588 			      lpm_tree->id);
589 	ll_ops->ralta_write(mlxsw_sp, xralta_pl);
590 }
591 
592 static int
593 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
594 				  const struct mlxsw_sp_router_ll_ops *ll_ops,
595 				  struct mlxsw_sp_prefix_usage *prefix_usage,
596 				  struct mlxsw_sp_lpm_tree *lpm_tree)
597 {
598 	char xralst_pl[MLXSW_REG_XRALST_LEN];
599 	u8 root_bin = 0;
600 	u8 prefix;
601 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
602 
603 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
604 		root_bin = prefix;
605 
606 	mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
607 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
608 		if (prefix == 0)
609 			continue;
610 		mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
611 					  MLXSW_REG_RALST_BIN_NO_CHILD);
612 		last_prefix = prefix;
613 	}
614 	return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
615 }
616 
617 static struct mlxsw_sp_lpm_tree *
618 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
619 			 const struct mlxsw_sp_router_ll_ops *ll_ops,
620 			 struct mlxsw_sp_prefix_usage *prefix_usage,
621 			 enum mlxsw_sp_l3proto proto)
622 {
623 	struct mlxsw_sp_lpm_tree *lpm_tree;
624 	int err;
625 
626 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
627 	if (!lpm_tree)
628 		return ERR_PTR(-EBUSY);
629 	lpm_tree->proto = proto;
630 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
631 	if (err)
632 		return ERR_PTR(err);
633 
634 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
635 	if (err)
636 		goto err_left_struct_set;
637 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
638 	       sizeof(lpm_tree->prefix_usage));
639 	memset(&lpm_tree->prefix_ref_count, 0,
640 	       sizeof(lpm_tree->prefix_ref_count));
641 	lpm_tree->ref_count = 1;
642 	return lpm_tree;
643 
644 err_left_struct_set:
645 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
646 	return ERR_PTR(err);
647 }
648 
649 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
650 				      const struct mlxsw_sp_router_ll_ops *ll_ops,
651 				      struct mlxsw_sp_lpm_tree *lpm_tree)
652 {
653 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
654 }
655 
656 static struct mlxsw_sp_lpm_tree *
657 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
658 		      struct mlxsw_sp_prefix_usage *prefix_usage,
659 		      enum mlxsw_sp_l3proto proto)
660 {
661 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
662 	struct mlxsw_sp_lpm_tree *lpm_tree;
663 	int i;
664 
665 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
666 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
667 		if (lpm_tree->ref_count != 0 &&
668 		    lpm_tree->proto == proto &&
669 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
670 					     prefix_usage)) {
671 			mlxsw_sp_lpm_tree_hold(lpm_tree);
672 			return lpm_tree;
673 		}
674 	}
675 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
676 }
677 
678 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
679 {
680 	lpm_tree->ref_count++;
681 }
682 
683 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
684 				  struct mlxsw_sp_lpm_tree *lpm_tree)
685 {
686 	const struct mlxsw_sp_router_ll_ops *ll_ops =
687 				mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
688 
689 	if (--lpm_tree->ref_count == 0)
690 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
691 }
692 
693 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
694 
695 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
696 {
697 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
698 	struct mlxsw_sp_lpm_tree *lpm_tree;
699 	u64 max_trees;
700 	int err, i;
701 
702 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
703 		return -EIO;
704 
705 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
706 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
707 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
708 					     sizeof(struct mlxsw_sp_lpm_tree),
709 					     GFP_KERNEL);
710 	if (!mlxsw_sp->router->lpm.trees)
711 		return -ENOMEM;
712 
713 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
714 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
715 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
716 	}
717 
718 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
719 					 MLXSW_SP_L3_PROTO_IPV4);
720 	if (IS_ERR(lpm_tree)) {
721 		err = PTR_ERR(lpm_tree);
722 		goto err_ipv4_tree_get;
723 	}
724 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
725 
726 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
727 					 MLXSW_SP_L3_PROTO_IPV6);
728 	if (IS_ERR(lpm_tree)) {
729 		err = PTR_ERR(lpm_tree);
730 		goto err_ipv6_tree_get;
731 	}
732 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
733 
734 	return 0;
735 
736 err_ipv6_tree_get:
737 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
738 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
739 err_ipv4_tree_get:
740 	kfree(mlxsw_sp->router->lpm.trees);
741 	return err;
742 }
743 
744 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
745 {
746 	struct mlxsw_sp_lpm_tree *lpm_tree;
747 
748 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
749 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
750 
751 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
752 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
753 
754 	kfree(mlxsw_sp->router->lpm.trees);
755 }
756 
757 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
758 {
759 	return !!vr->fib4 || !!vr->fib6 ||
760 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
761 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
762 }
763 
764 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
765 {
766 	struct mlxsw_sp_vr *vr;
767 	int i;
768 
769 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
770 		vr = &mlxsw_sp->router->vrs[i];
771 		if (!mlxsw_sp_vr_is_used(vr))
772 			return vr;
773 	}
774 	return NULL;
775 }
776 
777 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
778 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
779 {
780 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
781 
782 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
783 			      (enum mlxsw_reg_ralxx_protocol) fib->proto,
784 			      tree_id);
785 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
786 }
787 
788 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
789 				       const struct mlxsw_sp_fib *fib)
790 {
791 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
792 
793 	/* Bind to tree 0 which is default */
794 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
795 			      (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
796 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
797 }
798 
799 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
800 {
801 	/* For our purpose, squash main, default and local tables into one */
802 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
803 		tb_id = RT_TABLE_MAIN;
804 	return tb_id;
805 }
806 
807 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
808 					    u32 tb_id)
809 {
810 	struct mlxsw_sp_vr *vr;
811 	int i;
812 
813 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
814 
815 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
816 		vr = &mlxsw_sp->router->vrs[i];
817 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
818 			return vr;
819 	}
820 	return NULL;
821 }
822 
823 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
824 				u16 *vr_id)
825 {
826 	struct mlxsw_sp_vr *vr;
827 	int err = 0;
828 
829 	mutex_lock(&mlxsw_sp->router->lock);
830 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
831 	if (!vr) {
832 		err = -ESRCH;
833 		goto out;
834 	}
835 	*vr_id = vr->id;
836 out:
837 	mutex_unlock(&mlxsw_sp->router->lock);
838 	return err;
839 }
840 
841 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
842 					    enum mlxsw_sp_l3proto proto)
843 {
844 	switch (proto) {
845 	case MLXSW_SP_L3_PROTO_IPV4:
846 		return vr->fib4;
847 	case MLXSW_SP_L3_PROTO_IPV6:
848 		return vr->fib6;
849 	}
850 	return NULL;
851 }
852 
853 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
854 					      u32 tb_id,
855 					      struct netlink_ext_ack *extack)
856 {
857 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
858 	struct mlxsw_sp_fib *fib4;
859 	struct mlxsw_sp_fib *fib6;
860 	struct mlxsw_sp_vr *vr;
861 	int err;
862 
863 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
864 	if (!vr) {
865 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
866 		return ERR_PTR(-EBUSY);
867 	}
868 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
869 	if (IS_ERR(fib4))
870 		return ERR_CAST(fib4);
871 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
872 	if (IS_ERR(fib6)) {
873 		err = PTR_ERR(fib6);
874 		goto err_fib6_create;
875 	}
876 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
877 					     MLXSW_SP_L3_PROTO_IPV4);
878 	if (IS_ERR(mr4_table)) {
879 		err = PTR_ERR(mr4_table);
880 		goto err_mr4_table_create;
881 	}
882 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
883 					     MLXSW_SP_L3_PROTO_IPV6);
884 	if (IS_ERR(mr6_table)) {
885 		err = PTR_ERR(mr6_table);
886 		goto err_mr6_table_create;
887 	}
888 
889 	vr->fib4 = fib4;
890 	vr->fib6 = fib6;
891 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
892 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
893 	vr->tb_id = tb_id;
894 	return vr;
895 
896 err_mr6_table_create:
897 	mlxsw_sp_mr_table_destroy(mr4_table);
898 err_mr4_table_create:
899 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
900 err_fib6_create:
901 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
902 	return ERR_PTR(err);
903 }
904 
905 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
906 				struct mlxsw_sp_vr *vr)
907 {
908 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
909 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
910 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
911 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
912 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
913 	vr->fib6 = NULL;
914 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
915 	vr->fib4 = NULL;
916 }
917 
918 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
919 					   struct netlink_ext_ack *extack)
920 {
921 	struct mlxsw_sp_vr *vr;
922 
923 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
924 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
925 	if (!vr)
926 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
927 	return vr;
928 }
929 
930 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
931 {
932 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
933 	    list_empty(&vr->fib6->node_list) &&
934 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
935 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
936 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
937 }
938 
939 static bool
940 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
941 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
942 {
943 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
944 
945 	if (!mlxsw_sp_vr_is_used(vr))
946 		return false;
947 	if (fib->lpm_tree->id == tree_id)
948 		return true;
949 	return false;
950 }
951 
952 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
953 					struct mlxsw_sp_fib *fib,
954 					struct mlxsw_sp_lpm_tree *new_tree)
955 {
956 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
957 	int err;
958 
959 	fib->lpm_tree = new_tree;
960 	mlxsw_sp_lpm_tree_hold(new_tree);
961 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
962 	if (err)
963 		goto err_tree_bind;
964 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
965 	return 0;
966 
967 err_tree_bind:
968 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
969 	fib->lpm_tree = old_tree;
970 	return err;
971 }
972 
973 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
974 					 struct mlxsw_sp_fib *fib,
975 					 struct mlxsw_sp_lpm_tree *new_tree)
976 {
977 	enum mlxsw_sp_l3proto proto = fib->proto;
978 	struct mlxsw_sp_lpm_tree *old_tree;
979 	u8 old_id, new_id = new_tree->id;
980 	struct mlxsw_sp_vr *vr;
981 	int i, err;
982 
983 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
984 	old_id = old_tree->id;
985 
986 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
987 		vr = &mlxsw_sp->router->vrs[i];
988 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
989 			continue;
990 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
991 						   mlxsw_sp_vr_fib(vr, proto),
992 						   new_tree);
993 		if (err)
994 			goto err_tree_replace;
995 	}
996 
997 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
998 	       sizeof(new_tree->prefix_ref_count));
999 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1000 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1001 
1002 	return 0;
1003 
1004 err_tree_replace:
1005 	for (i--; i >= 0; i--) {
1006 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1007 			continue;
1008 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1009 					     mlxsw_sp_vr_fib(vr, proto),
1010 					     old_tree);
1011 	}
1012 	return err;
1013 }
1014 
1015 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1016 {
1017 	struct mlxsw_sp_vr *vr;
1018 	u64 max_vrs;
1019 	int i;
1020 
1021 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1022 		return -EIO;
1023 
1024 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1025 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1026 					GFP_KERNEL);
1027 	if (!mlxsw_sp->router->vrs)
1028 		return -ENOMEM;
1029 
1030 	for (i = 0; i < max_vrs; i++) {
1031 		vr = &mlxsw_sp->router->vrs[i];
1032 		vr->id = i;
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1039 
1040 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1041 {
1042 	/* At this stage we're guaranteed not to have new incoming
1043 	 * FIB notifications and the work queue is free from FIBs
1044 	 * sitting on top of mlxsw netdevs. However, we can still
1045 	 * have other FIBs queued. Flush the queue before flushing
1046 	 * the device's tables. No need for locks, as we're the only
1047 	 * writer.
1048 	 */
1049 	mlxsw_core_flush_owq();
1050 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1051 	kfree(mlxsw_sp->router->vrs);
1052 }
1053 
1054 static struct net_device *
1055 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
1056 {
1057 	struct ip_tunnel *tun = netdev_priv(ol_dev);
1058 	struct net *net = dev_net(ol_dev);
1059 
1060 	return dev_get_by_index_rcu(net, tun->parms.link);
1061 }
1062 
1063 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1064 {
1065 	struct net_device *d;
1066 	u32 tb_id;
1067 
1068 	rcu_read_lock();
1069 	d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1070 	if (d)
1071 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1072 	else
1073 		tb_id = RT_TABLE_MAIN;
1074 	rcu_read_unlock();
1075 
1076 	return tb_id;
1077 }
1078 
1079 static struct mlxsw_sp_rif *
1080 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1081 		    const struct mlxsw_sp_rif_params *params,
1082 		    struct netlink_ext_ack *extack);
1083 
1084 static struct mlxsw_sp_rif_ipip_lb *
1085 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1086 				enum mlxsw_sp_ipip_type ipipt,
1087 				struct net_device *ol_dev,
1088 				struct netlink_ext_ack *extack)
1089 {
1090 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1091 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1092 	struct mlxsw_sp_rif *rif;
1093 
1094 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1095 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1096 		.common.dev = ol_dev,
1097 		.common.lag = false,
1098 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1099 	};
1100 
1101 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1102 	if (IS_ERR(rif))
1103 		return ERR_CAST(rif);
1104 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1105 }
1106 
1107 static struct mlxsw_sp_ipip_entry *
1108 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1109 			  enum mlxsw_sp_ipip_type ipipt,
1110 			  struct net_device *ol_dev)
1111 {
1112 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1113 	struct mlxsw_sp_ipip_entry *ipip_entry;
1114 	struct mlxsw_sp_ipip_entry *ret = NULL;
1115 
1116 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1117 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1118 	if (!ipip_entry)
1119 		return ERR_PTR(-ENOMEM);
1120 
1121 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1122 							    ol_dev, NULL);
1123 	if (IS_ERR(ipip_entry->ol_lb)) {
1124 		ret = ERR_CAST(ipip_entry->ol_lb);
1125 		goto err_ol_ipip_lb_create;
1126 	}
1127 
1128 	ipip_entry->ipipt = ipipt;
1129 	ipip_entry->ol_dev = ol_dev;
1130 
1131 	switch (ipip_ops->ul_proto) {
1132 	case MLXSW_SP_L3_PROTO_IPV4:
1133 		ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1134 		break;
1135 	case MLXSW_SP_L3_PROTO_IPV6:
1136 		WARN_ON(1);
1137 		break;
1138 	}
1139 
1140 	return ipip_entry;
1141 
1142 err_ol_ipip_lb_create:
1143 	kfree(ipip_entry);
1144 	return ret;
1145 }
1146 
1147 static void
1148 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1149 {
1150 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1151 	kfree(ipip_entry);
1152 }
1153 
1154 static bool
1155 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1156 				  const enum mlxsw_sp_l3proto ul_proto,
1157 				  union mlxsw_sp_l3addr saddr,
1158 				  u32 ul_tb_id,
1159 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1160 {
1161 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1162 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1163 	union mlxsw_sp_l3addr tun_saddr;
1164 
1165 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1166 		return false;
1167 
1168 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1169 	return tun_ul_tb_id == ul_tb_id &&
1170 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1171 }
1172 
1173 static int
1174 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1175 			      struct mlxsw_sp_fib_entry *fib_entry,
1176 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1177 {
1178 	u32 tunnel_index;
1179 	int err;
1180 
1181 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1182 				  1, &tunnel_index);
1183 	if (err)
1184 		return err;
1185 
1186 	ipip_entry->decap_fib_entry = fib_entry;
1187 	fib_entry->decap.ipip_entry = ipip_entry;
1188 	fib_entry->decap.tunnel_index = tunnel_index;
1189 	return 0;
1190 }
1191 
1192 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1193 					  struct mlxsw_sp_fib_entry *fib_entry)
1194 {
1195 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1196 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1197 	fib_entry->decap.ipip_entry = NULL;
1198 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1199 			   1, fib_entry->decap.tunnel_index);
1200 }
1201 
1202 static struct mlxsw_sp_fib_node *
1203 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1204 			 size_t addr_len, unsigned char prefix_len);
1205 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1206 				     struct mlxsw_sp_fib_entry *fib_entry);
1207 
1208 static void
1209 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1210 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1211 {
1212 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1213 
1214 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1215 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1216 
1217 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1218 }
1219 
1220 static void
1221 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1222 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1223 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1224 {
1225 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1226 					  ipip_entry))
1227 		return;
1228 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1229 
1230 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1231 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1232 }
1233 
1234 static struct mlxsw_sp_fib_entry *
1235 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1236 				     enum mlxsw_sp_l3proto proto,
1237 				     const union mlxsw_sp_l3addr *addr,
1238 				     enum mlxsw_sp_fib_entry_type type)
1239 {
1240 	struct mlxsw_sp_fib_node *fib_node;
1241 	unsigned char addr_prefix_len;
1242 	struct mlxsw_sp_fib *fib;
1243 	struct mlxsw_sp_vr *vr;
1244 	const void *addrp;
1245 	size_t addr_len;
1246 	u32 addr4;
1247 
1248 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1249 	if (!vr)
1250 		return NULL;
1251 	fib = mlxsw_sp_vr_fib(vr, proto);
1252 
1253 	switch (proto) {
1254 	case MLXSW_SP_L3_PROTO_IPV4:
1255 		addr4 = be32_to_cpu(addr->addr4);
1256 		addrp = &addr4;
1257 		addr_len = 4;
1258 		addr_prefix_len = 32;
1259 		break;
1260 	case MLXSW_SP_L3_PROTO_IPV6:
1261 	default:
1262 		WARN_ON(1);
1263 		return NULL;
1264 	}
1265 
1266 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1267 					    addr_prefix_len);
1268 	if (!fib_node || fib_node->fib_entry->type != type)
1269 		return NULL;
1270 
1271 	return fib_node->fib_entry;
1272 }
1273 
1274 /* Given an IPIP entry, find the corresponding decap route. */
1275 static struct mlxsw_sp_fib_entry *
1276 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1277 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1278 {
1279 	static struct mlxsw_sp_fib_node *fib_node;
1280 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1281 	unsigned char saddr_prefix_len;
1282 	union mlxsw_sp_l3addr saddr;
1283 	struct mlxsw_sp_fib *ul_fib;
1284 	struct mlxsw_sp_vr *ul_vr;
1285 	const void *saddrp;
1286 	size_t saddr_len;
1287 	u32 ul_tb_id;
1288 	u32 saddr4;
1289 
1290 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1291 
1292 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1293 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1294 	if (!ul_vr)
1295 		return NULL;
1296 
1297 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1298 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1299 					   ipip_entry->ol_dev);
1300 
1301 	switch (ipip_ops->ul_proto) {
1302 	case MLXSW_SP_L3_PROTO_IPV4:
1303 		saddr4 = be32_to_cpu(saddr.addr4);
1304 		saddrp = &saddr4;
1305 		saddr_len = 4;
1306 		saddr_prefix_len = 32;
1307 		break;
1308 	default:
1309 		WARN_ON(1);
1310 		return NULL;
1311 	}
1312 
1313 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1314 					    saddr_prefix_len);
1315 	if (!fib_node ||
1316 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1317 		return NULL;
1318 
1319 	return fib_node->fib_entry;
1320 }
1321 
1322 static struct mlxsw_sp_ipip_entry *
1323 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1324 			   enum mlxsw_sp_ipip_type ipipt,
1325 			   struct net_device *ol_dev)
1326 {
1327 	struct mlxsw_sp_ipip_entry *ipip_entry;
1328 
1329 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1330 	if (IS_ERR(ipip_entry))
1331 		return ipip_entry;
1332 
1333 	list_add_tail(&ipip_entry->ipip_list_node,
1334 		      &mlxsw_sp->router->ipip_list);
1335 
1336 	return ipip_entry;
1337 }
1338 
1339 static void
1340 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1341 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1342 {
1343 	list_del(&ipip_entry->ipip_list_node);
1344 	mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1345 }
1346 
1347 static bool
1348 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1349 				  const struct net_device *ul_dev,
1350 				  enum mlxsw_sp_l3proto ul_proto,
1351 				  union mlxsw_sp_l3addr ul_dip,
1352 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1353 {
1354 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1355 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1356 
1357 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1358 		return false;
1359 
1360 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1361 						 ul_tb_id, ipip_entry);
1362 }
1363 
1364 /* Given decap parameters, find the corresponding IPIP entry. */
1365 static struct mlxsw_sp_ipip_entry *
1366 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1367 				  enum mlxsw_sp_l3proto ul_proto,
1368 				  union mlxsw_sp_l3addr ul_dip)
1369 {
1370 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1371 	struct net_device *ul_dev;
1372 
1373 	rcu_read_lock();
1374 
1375 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1376 	if (!ul_dev)
1377 		goto out_unlock;
1378 
1379 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1380 			    ipip_list_node)
1381 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1382 						      ul_proto, ul_dip,
1383 						      ipip_entry))
1384 			goto out_unlock;
1385 
1386 	rcu_read_unlock();
1387 
1388 	return NULL;
1389 
1390 out_unlock:
1391 	rcu_read_unlock();
1392 	return ipip_entry;
1393 }
1394 
1395 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1396 				      const struct net_device *dev,
1397 				      enum mlxsw_sp_ipip_type *p_type)
1398 {
1399 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1400 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1401 	enum mlxsw_sp_ipip_type ipipt;
1402 
1403 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1404 		ipip_ops = router->ipip_ops_arr[ipipt];
1405 		if (dev->type == ipip_ops->dev_type) {
1406 			if (p_type)
1407 				*p_type = ipipt;
1408 			return true;
1409 		}
1410 	}
1411 	return false;
1412 }
1413 
1414 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1415 				const struct net_device *dev)
1416 {
1417 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1418 }
1419 
1420 static struct mlxsw_sp_ipip_entry *
1421 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1422 				   const struct net_device *ol_dev)
1423 {
1424 	struct mlxsw_sp_ipip_entry *ipip_entry;
1425 
1426 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1427 			    ipip_list_node)
1428 		if (ipip_entry->ol_dev == ol_dev)
1429 			return ipip_entry;
1430 
1431 	return NULL;
1432 }
1433 
1434 static struct mlxsw_sp_ipip_entry *
1435 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1436 				   const struct net_device *ul_dev,
1437 				   struct mlxsw_sp_ipip_entry *start)
1438 {
1439 	struct mlxsw_sp_ipip_entry *ipip_entry;
1440 
1441 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1442 					ipip_list_node);
1443 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1444 				     ipip_list_node) {
1445 		struct net_device *ol_dev = ipip_entry->ol_dev;
1446 		struct net_device *ipip_ul_dev;
1447 
1448 		rcu_read_lock();
1449 		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1450 		rcu_read_unlock();
1451 
1452 		if (ipip_ul_dev == ul_dev)
1453 			return ipip_entry;
1454 	}
1455 
1456 	return NULL;
1457 }
1458 
1459 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1460 				const struct net_device *dev)
1461 {
1462 	bool is_ipip_ul;
1463 
1464 	mutex_lock(&mlxsw_sp->router->lock);
1465 	is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1466 	mutex_unlock(&mlxsw_sp->router->lock);
1467 
1468 	return is_ipip_ul;
1469 }
1470 
1471 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1472 						const struct net_device *ol_dev,
1473 						enum mlxsw_sp_ipip_type ipipt)
1474 {
1475 	const struct mlxsw_sp_ipip_ops *ops
1476 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1477 
1478 	return ops->can_offload(mlxsw_sp, ol_dev);
1479 }
1480 
1481 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1482 						struct net_device *ol_dev)
1483 {
1484 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1485 	struct mlxsw_sp_ipip_entry *ipip_entry;
1486 	enum mlxsw_sp_l3proto ul_proto;
1487 	union mlxsw_sp_l3addr saddr;
1488 	u32 ul_tb_id;
1489 
1490 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1491 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1492 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1493 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1494 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1495 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1496 							  saddr, ul_tb_id,
1497 							  NULL)) {
1498 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1499 								ol_dev);
1500 			if (IS_ERR(ipip_entry))
1501 				return PTR_ERR(ipip_entry);
1502 		}
1503 	}
1504 
1505 	return 0;
1506 }
1507 
1508 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1509 						   struct net_device *ol_dev)
1510 {
1511 	struct mlxsw_sp_ipip_entry *ipip_entry;
1512 
1513 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1514 	if (ipip_entry)
1515 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1516 }
1517 
1518 static void
1519 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1520 				struct mlxsw_sp_ipip_entry *ipip_entry)
1521 {
1522 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1523 
1524 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1525 	if (decap_fib_entry)
1526 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1527 						  decap_fib_entry);
1528 }
1529 
1530 static int
1531 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1532 			u16 ul_rif_id, bool enable)
1533 {
1534 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1535 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1536 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1537 	char ritr_pl[MLXSW_REG_RITR_LEN];
1538 	u32 saddr4;
1539 
1540 	switch (lb_cf.ul_protocol) {
1541 	case MLXSW_SP_L3_PROTO_IPV4:
1542 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1543 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1544 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1545 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1546 			    MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1547 			    ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1548 		break;
1549 
1550 	case MLXSW_SP_L3_PROTO_IPV6:
1551 		return -EAFNOSUPPORT;
1552 	}
1553 
1554 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1555 }
1556 
1557 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1558 						 struct net_device *ol_dev)
1559 {
1560 	struct mlxsw_sp_ipip_entry *ipip_entry;
1561 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1562 	int err = 0;
1563 
1564 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1565 	if (ipip_entry) {
1566 		lb_rif = ipip_entry->ol_lb;
1567 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1568 					      lb_rif->ul_rif_id, true);
1569 		if (err)
1570 			goto out;
1571 		lb_rif->common.mtu = ol_dev->mtu;
1572 	}
1573 
1574 out:
1575 	return err;
1576 }
1577 
1578 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1579 						struct net_device *ol_dev)
1580 {
1581 	struct mlxsw_sp_ipip_entry *ipip_entry;
1582 
1583 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1584 	if (ipip_entry)
1585 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1586 }
1587 
1588 static void
1589 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1590 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1591 {
1592 	if (ipip_entry->decap_fib_entry)
1593 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1594 }
1595 
1596 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1597 						  struct net_device *ol_dev)
1598 {
1599 	struct mlxsw_sp_ipip_entry *ipip_entry;
1600 
1601 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1602 	if (ipip_entry)
1603 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1604 }
1605 
1606 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1607 					 struct mlxsw_sp_rif *old_rif,
1608 					 struct mlxsw_sp_rif *new_rif);
1609 static int
1610 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1611 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1612 				 bool keep_encap,
1613 				 struct netlink_ext_ack *extack)
1614 {
1615 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1616 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1617 
1618 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1619 						     ipip_entry->ipipt,
1620 						     ipip_entry->ol_dev,
1621 						     extack);
1622 	if (IS_ERR(new_lb_rif))
1623 		return PTR_ERR(new_lb_rif);
1624 	ipip_entry->ol_lb = new_lb_rif;
1625 
1626 	if (keep_encap)
1627 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1628 					     &new_lb_rif->common);
1629 
1630 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1631 
1632 	return 0;
1633 }
1634 
1635 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1636 					struct mlxsw_sp_rif *rif);
1637 
1638 /**
1639  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1640  * @mlxsw_sp: mlxsw_sp.
1641  * @ipip_entry: IPIP entry.
1642  * @recreate_loopback: Recreates the associated loopback RIF.
1643  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1644  *              relevant when recreate_loopback is true.
1645  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1646  *                   is only relevant when recreate_loopback is false.
1647  * @extack: extack.
1648  *
1649  * Return: Non-zero value on failure.
1650  */
1651 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1652 					struct mlxsw_sp_ipip_entry *ipip_entry,
1653 					bool recreate_loopback,
1654 					bool keep_encap,
1655 					bool update_nexthops,
1656 					struct netlink_ext_ack *extack)
1657 {
1658 	int err;
1659 
1660 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1661 	 * recreate it. That creates a window of opportunity where RALUE and
1662 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1663 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1664 	 * of RALUE, demote the decap route back.
1665 	 */
1666 	if (ipip_entry->decap_fib_entry)
1667 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1668 
1669 	if (recreate_loopback) {
1670 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1671 						       keep_encap, extack);
1672 		if (err)
1673 			return err;
1674 	} else if (update_nexthops) {
1675 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1676 					    &ipip_entry->ol_lb->common);
1677 	}
1678 
1679 	if (ipip_entry->ol_dev->flags & IFF_UP)
1680 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1681 
1682 	return 0;
1683 }
1684 
1685 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1686 						struct net_device *ol_dev,
1687 						struct netlink_ext_ack *extack)
1688 {
1689 	struct mlxsw_sp_ipip_entry *ipip_entry =
1690 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1691 
1692 	if (!ipip_entry)
1693 		return 0;
1694 
1695 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1696 						   true, false, false, extack);
1697 }
1698 
1699 static int
1700 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1701 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1702 				     struct net_device *ul_dev,
1703 				     bool *demote_this,
1704 				     struct netlink_ext_ack *extack)
1705 {
1706 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1707 	enum mlxsw_sp_l3proto ul_proto;
1708 	union mlxsw_sp_l3addr saddr;
1709 
1710 	/* Moving underlay to a different VRF might cause local address
1711 	 * conflict, and the conflicting tunnels need to be demoted.
1712 	 */
1713 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1714 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1715 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1716 						 saddr, ul_tb_id,
1717 						 ipip_entry)) {
1718 		*demote_this = true;
1719 		return 0;
1720 	}
1721 
1722 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1723 						   true, true, false, extack);
1724 }
1725 
1726 static int
1727 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1728 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1729 				    struct net_device *ul_dev)
1730 {
1731 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1732 						   false, false, true, NULL);
1733 }
1734 
1735 static int
1736 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1737 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1738 				      struct net_device *ul_dev)
1739 {
1740 	/* A down underlay device causes encapsulated packets to not be
1741 	 * forwarded, but decap still works. So refresh next hops without
1742 	 * touching anything else.
1743 	 */
1744 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1745 						   false, false, true, NULL);
1746 }
1747 
1748 static int
1749 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1750 					struct net_device *ol_dev,
1751 					struct netlink_ext_ack *extack)
1752 {
1753 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1754 	struct mlxsw_sp_ipip_entry *ipip_entry;
1755 	int err;
1756 
1757 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1758 	if (!ipip_entry)
1759 		/* A change might make a tunnel eligible for offloading, but
1760 		 * that is currently not implemented. What falls to slow path
1761 		 * stays there.
1762 		 */
1763 		return 0;
1764 
1765 	/* A change might make a tunnel not eligible for offloading. */
1766 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1767 						 ipip_entry->ipipt)) {
1768 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1769 		return 0;
1770 	}
1771 
1772 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1773 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1774 	return err;
1775 }
1776 
1777 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1778 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1779 {
1780 	struct net_device *ol_dev = ipip_entry->ol_dev;
1781 
1782 	if (ol_dev->flags & IFF_UP)
1783 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1784 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1785 }
1786 
1787 /* The configuration where several tunnels have the same local address in the
1788  * same underlay table needs special treatment in the HW. That is currently not
1789  * implemented in the driver. This function finds and demotes the first tunnel
1790  * with a given source address, except the one passed in in the argument
1791  * `except'.
1792  */
1793 bool
1794 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1795 				     enum mlxsw_sp_l3proto ul_proto,
1796 				     union mlxsw_sp_l3addr saddr,
1797 				     u32 ul_tb_id,
1798 				     const struct mlxsw_sp_ipip_entry *except)
1799 {
1800 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1801 
1802 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1803 				 ipip_list_node) {
1804 		if (ipip_entry != except &&
1805 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1806 						      ul_tb_id, ipip_entry)) {
1807 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1808 			return true;
1809 		}
1810 	}
1811 
1812 	return false;
1813 }
1814 
1815 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1816 						     struct net_device *ul_dev)
1817 {
1818 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1819 
1820 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1821 				 ipip_list_node) {
1822 		struct net_device *ol_dev = ipip_entry->ol_dev;
1823 		struct net_device *ipip_ul_dev;
1824 
1825 		rcu_read_lock();
1826 		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1827 		rcu_read_unlock();
1828 		if (ipip_ul_dev == ul_dev)
1829 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1830 	}
1831 }
1832 
1833 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1834 				     struct net_device *ol_dev,
1835 				     unsigned long event,
1836 				     struct netdev_notifier_info *info)
1837 {
1838 	struct netdev_notifier_changeupper_info *chup;
1839 	struct netlink_ext_ack *extack;
1840 	int err = 0;
1841 
1842 	mutex_lock(&mlxsw_sp->router->lock);
1843 	switch (event) {
1844 	case NETDEV_REGISTER:
1845 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1846 		break;
1847 	case NETDEV_UNREGISTER:
1848 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1849 		break;
1850 	case NETDEV_UP:
1851 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1852 		break;
1853 	case NETDEV_DOWN:
1854 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1855 		break;
1856 	case NETDEV_CHANGEUPPER:
1857 		chup = container_of(info, typeof(*chup), info);
1858 		extack = info->extack;
1859 		if (netif_is_l3_master(chup->upper_dev))
1860 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1861 								   ol_dev,
1862 								   extack);
1863 		break;
1864 	case NETDEV_CHANGE:
1865 		extack = info->extack;
1866 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1867 							      ol_dev, extack);
1868 		break;
1869 	case NETDEV_CHANGEMTU:
1870 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1871 		break;
1872 	}
1873 	mutex_unlock(&mlxsw_sp->router->lock);
1874 	return err;
1875 }
1876 
1877 static int
1878 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1879 				   struct mlxsw_sp_ipip_entry *ipip_entry,
1880 				   struct net_device *ul_dev,
1881 				   bool *demote_this,
1882 				   unsigned long event,
1883 				   struct netdev_notifier_info *info)
1884 {
1885 	struct netdev_notifier_changeupper_info *chup;
1886 	struct netlink_ext_ack *extack;
1887 
1888 	switch (event) {
1889 	case NETDEV_CHANGEUPPER:
1890 		chup = container_of(info, typeof(*chup), info);
1891 		extack = info->extack;
1892 		if (netif_is_l3_master(chup->upper_dev))
1893 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1894 								    ipip_entry,
1895 								    ul_dev,
1896 								    demote_this,
1897 								    extack);
1898 		break;
1899 
1900 	case NETDEV_UP:
1901 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1902 							   ul_dev);
1903 	case NETDEV_DOWN:
1904 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1905 							     ipip_entry,
1906 							     ul_dev);
1907 	}
1908 	return 0;
1909 }
1910 
1911 int
1912 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1913 				 struct net_device *ul_dev,
1914 				 unsigned long event,
1915 				 struct netdev_notifier_info *info)
1916 {
1917 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1918 	int err = 0;
1919 
1920 	mutex_lock(&mlxsw_sp->router->lock);
1921 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1922 								ul_dev,
1923 								ipip_entry))) {
1924 		struct mlxsw_sp_ipip_entry *prev;
1925 		bool demote_this = false;
1926 
1927 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1928 							 ul_dev, &demote_this,
1929 							 event, info);
1930 		if (err) {
1931 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1932 								 ul_dev);
1933 			break;
1934 		}
1935 
1936 		if (demote_this) {
1937 			if (list_is_first(&ipip_entry->ipip_list_node,
1938 					  &mlxsw_sp->router->ipip_list))
1939 				prev = NULL;
1940 			else
1941 				/* This can't be cached from previous iteration,
1942 				 * because that entry could be gone now.
1943 				 */
1944 				prev = list_prev_entry(ipip_entry,
1945 						       ipip_list_node);
1946 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1947 			ipip_entry = prev;
1948 		}
1949 	}
1950 	mutex_unlock(&mlxsw_sp->router->lock);
1951 
1952 	return err;
1953 }
1954 
1955 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1956 				      enum mlxsw_sp_l3proto ul_proto,
1957 				      const union mlxsw_sp_l3addr *ul_sip,
1958 				      u32 tunnel_index)
1959 {
1960 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1961 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1962 	struct mlxsw_sp_fib_entry *fib_entry;
1963 	int err = 0;
1964 
1965 	mutex_lock(&mlxsw_sp->router->lock);
1966 
1967 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
1968 		err = -EINVAL;
1969 		goto out;
1970 	}
1971 
1972 	router->nve_decap_config.ul_tb_id = ul_tb_id;
1973 	router->nve_decap_config.tunnel_index = tunnel_index;
1974 	router->nve_decap_config.ul_proto = ul_proto;
1975 	router->nve_decap_config.ul_sip = *ul_sip;
1976 	router->nve_decap_config.valid = true;
1977 
1978 	/* It is valid to create a tunnel with a local IP and only later
1979 	 * assign this IP address to a local interface
1980 	 */
1981 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1982 							 ul_proto, ul_sip,
1983 							 type);
1984 	if (!fib_entry)
1985 		goto out;
1986 
1987 	fib_entry->decap.tunnel_index = tunnel_index;
1988 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1989 
1990 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1991 	if (err)
1992 		goto err_fib_entry_update;
1993 
1994 	goto out;
1995 
1996 err_fib_entry_update:
1997 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1998 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1999 out:
2000 	mutex_unlock(&mlxsw_sp->router->lock);
2001 	return err;
2002 }
2003 
2004 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2005 				      enum mlxsw_sp_l3proto ul_proto,
2006 				      const union mlxsw_sp_l3addr *ul_sip)
2007 {
2008 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2009 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2010 	struct mlxsw_sp_fib_entry *fib_entry;
2011 
2012 	mutex_lock(&mlxsw_sp->router->lock);
2013 
2014 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2015 		goto out;
2016 
2017 	router->nve_decap_config.valid = false;
2018 
2019 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2020 							 ul_proto, ul_sip,
2021 							 type);
2022 	if (!fib_entry)
2023 		goto out;
2024 
2025 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2026 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2027 out:
2028 	mutex_unlock(&mlxsw_sp->router->lock);
2029 }
2030 
2031 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2032 					 u32 ul_tb_id,
2033 					 enum mlxsw_sp_l3proto ul_proto,
2034 					 const union mlxsw_sp_l3addr *ul_sip)
2035 {
2036 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2037 
2038 	return router->nve_decap_config.valid &&
2039 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2040 	       router->nve_decap_config.ul_proto == ul_proto &&
2041 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2042 		       sizeof(*ul_sip));
2043 }
2044 
2045 struct mlxsw_sp_neigh_key {
2046 	struct neighbour *n;
2047 };
2048 
2049 struct mlxsw_sp_neigh_entry {
2050 	struct list_head rif_list_node;
2051 	struct rhash_head ht_node;
2052 	struct mlxsw_sp_neigh_key key;
2053 	u16 rif;
2054 	bool connected;
2055 	unsigned char ha[ETH_ALEN];
2056 	struct list_head nexthop_list; /* list of nexthops using
2057 					* this neigh entry
2058 					*/
2059 	struct list_head nexthop_neighs_list_node;
2060 	unsigned int counter_index;
2061 	bool counter_valid;
2062 };
2063 
2064 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2065 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2066 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2067 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2068 };
2069 
2070 struct mlxsw_sp_neigh_entry *
2071 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2072 			struct mlxsw_sp_neigh_entry *neigh_entry)
2073 {
2074 	if (!neigh_entry) {
2075 		if (list_empty(&rif->neigh_list))
2076 			return NULL;
2077 		else
2078 			return list_first_entry(&rif->neigh_list,
2079 						typeof(*neigh_entry),
2080 						rif_list_node);
2081 	}
2082 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2083 		return NULL;
2084 	return list_next_entry(neigh_entry, rif_list_node);
2085 }
2086 
2087 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2088 {
2089 	return neigh_entry->key.n->tbl->family;
2090 }
2091 
2092 unsigned char *
2093 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2094 {
2095 	return neigh_entry->ha;
2096 }
2097 
2098 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2099 {
2100 	struct neighbour *n;
2101 
2102 	n = neigh_entry->key.n;
2103 	return ntohl(*((__be32 *) n->primary_key));
2104 }
2105 
2106 struct in6_addr *
2107 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2108 {
2109 	struct neighbour *n;
2110 
2111 	n = neigh_entry->key.n;
2112 	return (struct in6_addr *) &n->primary_key;
2113 }
2114 
2115 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2116 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2117 			       u64 *p_counter)
2118 {
2119 	if (!neigh_entry->counter_valid)
2120 		return -EINVAL;
2121 
2122 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2123 					 p_counter, NULL);
2124 }
2125 
2126 static struct mlxsw_sp_neigh_entry *
2127 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2128 			   u16 rif)
2129 {
2130 	struct mlxsw_sp_neigh_entry *neigh_entry;
2131 
2132 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2133 	if (!neigh_entry)
2134 		return NULL;
2135 
2136 	neigh_entry->key.n = n;
2137 	neigh_entry->rif = rif;
2138 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2139 
2140 	return neigh_entry;
2141 }
2142 
2143 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2144 {
2145 	kfree(neigh_entry);
2146 }
2147 
2148 static int
2149 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2150 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2151 {
2152 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2153 				      &neigh_entry->ht_node,
2154 				      mlxsw_sp_neigh_ht_params);
2155 }
2156 
2157 static void
2158 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2159 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2160 {
2161 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2162 			       &neigh_entry->ht_node,
2163 			       mlxsw_sp_neigh_ht_params);
2164 }
2165 
2166 static bool
2167 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2168 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2169 {
2170 	struct devlink *devlink;
2171 	const char *table_name;
2172 
2173 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2174 	case AF_INET:
2175 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2176 		break;
2177 	case AF_INET6:
2178 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2179 		break;
2180 	default:
2181 		WARN_ON(1);
2182 		return false;
2183 	}
2184 
2185 	devlink = priv_to_devlink(mlxsw_sp->core);
2186 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2187 }
2188 
2189 static void
2190 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2191 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2192 {
2193 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2194 		return;
2195 
2196 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2197 		return;
2198 
2199 	neigh_entry->counter_valid = true;
2200 }
2201 
2202 static void
2203 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2204 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2205 {
2206 	if (!neigh_entry->counter_valid)
2207 		return;
2208 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2209 				   neigh_entry->counter_index);
2210 	neigh_entry->counter_valid = false;
2211 }
2212 
2213 static struct mlxsw_sp_neigh_entry *
2214 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2215 {
2216 	struct mlxsw_sp_neigh_entry *neigh_entry;
2217 	struct mlxsw_sp_rif *rif;
2218 	int err;
2219 
2220 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2221 	if (!rif)
2222 		return ERR_PTR(-EINVAL);
2223 
2224 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2225 	if (!neigh_entry)
2226 		return ERR_PTR(-ENOMEM);
2227 
2228 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2229 	if (err)
2230 		goto err_neigh_entry_insert;
2231 
2232 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2233 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2234 
2235 	return neigh_entry;
2236 
2237 err_neigh_entry_insert:
2238 	mlxsw_sp_neigh_entry_free(neigh_entry);
2239 	return ERR_PTR(err);
2240 }
2241 
2242 static void
2243 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2244 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2245 {
2246 	list_del(&neigh_entry->rif_list_node);
2247 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2248 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2249 	mlxsw_sp_neigh_entry_free(neigh_entry);
2250 }
2251 
2252 static struct mlxsw_sp_neigh_entry *
2253 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2254 {
2255 	struct mlxsw_sp_neigh_key key;
2256 
2257 	key.n = n;
2258 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2259 				      &key, mlxsw_sp_neigh_ht_params);
2260 }
2261 
2262 static void
2263 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2264 {
2265 	unsigned long interval;
2266 
2267 #if IS_ENABLED(CONFIG_IPV6)
2268 	interval = min_t(unsigned long,
2269 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2270 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2271 #else
2272 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2273 #endif
2274 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2275 }
2276 
2277 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2278 						   char *rauhtd_pl,
2279 						   int ent_index)
2280 {
2281 	struct net_device *dev;
2282 	struct neighbour *n;
2283 	__be32 dipn;
2284 	u32 dip;
2285 	u16 rif;
2286 
2287 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2288 
2289 	if (!mlxsw_sp->router->rifs[rif]) {
2290 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2291 		return;
2292 	}
2293 
2294 	dipn = htonl(dip);
2295 	dev = mlxsw_sp->router->rifs[rif]->dev;
2296 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2297 	if (!n)
2298 		return;
2299 
2300 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2301 	neigh_event_send(n, NULL);
2302 	neigh_release(n);
2303 }
2304 
2305 #if IS_ENABLED(CONFIG_IPV6)
2306 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2307 						   char *rauhtd_pl,
2308 						   int rec_index)
2309 {
2310 	struct net_device *dev;
2311 	struct neighbour *n;
2312 	struct in6_addr dip;
2313 	u16 rif;
2314 
2315 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2316 					 (char *) &dip);
2317 
2318 	if (!mlxsw_sp->router->rifs[rif]) {
2319 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2320 		return;
2321 	}
2322 
2323 	dev = mlxsw_sp->router->rifs[rif]->dev;
2324 	n = neigh_lookup(&nd_tbl, &dip, dev);
2325 	if (!n)
2326 		return;
2327 
2328 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2329 	neigh_event_send(n, NULL);
2330 	neigh_release(n);
2331 }
2332 #else
2333 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2334 						   char *rauhtd_pl,
2335 						   int rec_index)
2336 {
2337 }
2338 #endif
2339 
2340 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2341 						   char *rauhtd_pl,
2342 						   int rec_index)
2343 {
2344 	u8 num_entries;
2345 	int i;
2346 
2347 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2348 								rec_index);
2349 	/* Hardware starts counting at 0, so add 1. */
2350 	num_entries++;
2351 
2352 	/* Each record consists of several neighbour entries. */
2353 	for (i = 0; i < num_entries; i++) {
2354 		int ent_index;
2355 
2356 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2357 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2358 						       ent_index);
2359 	}
2360 
2361 }
2362 
2363 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2364 						   char *rauhtd_pl,
2365 						   int rec_index)
2366 {
2367 	/* One record contains one entry. */
2368 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2369 					       rec_index);
2370 }
2371 
2372 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2373 					      char *rauhtd_pl, int rec_index)
2374 {
2375 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2376 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2377 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2378 						       rec_index);
2379 		break;
2380 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2381 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2382 						       rec_index);
2383 		break;
2384 	}
2385 }
2386 
2387 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2388 {
2389 	u8 num_rec, last_rec_index, num_entries;
2390 
2391 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2392 	last_rec_index = num_rec - 1;
2393 
2394 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2395 		return false;
2396 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2397 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2398 		return true;
2399 
2400 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2401 								last_rec_index);
2402 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2403 		return true;
2404 	return false;
2405 }
2406 
2407 static int
2408 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2409 				       char *rauhtd_pl,
2410 				       enum mlxsw_reg_rauhtd_type type)
2411 {
2412 	int i, num_rec;
2413 	int err;
2414 
2415 	/* Ensure the RIF we read from the device does not change mid-dump. */
2416 	mutex_lock(&mlxsw_sp->router->lock);
2417 	do {
2418 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2419 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2420 				      rauhtd_pl);
2421 		if (err) {
2422 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2423 			break;
2424 		}
2425 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2426 		for (i = 0; i < num_rec; i++)
2427 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2428 							  i);
2429 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2430 	mutex_unlock(&mlxsw_sp->router->lock);
2431 
2432 	return err;
2433 }
2434 
2435 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2436 {
2437 	enum mlxsw_reg_rauhtd_type type;
2438 	char *rauhtd_pl;
2439 	int err;
2440 
2441 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2442 	if (!rauhtd_pl)
2443 		return -ENOMEM;
2444 
2445 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2446 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2447 	if (err)
2448 		goto out;
2449 
2450 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2451 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2452 out:
2453 	kfree(rauhtd_pl);
2454 	return err;
2455 }
2456 
2457 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2458 {
2459 	struct mlxsw_sp_neigh_entry *neigh_entry;
2460 
2461 	mutex_lock(&mlxsw_sp->router->lock);
2462 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2463 			    nexthop_neighs_list_node)
2464 		/* If this neigh have nexthops, make the kernel think this neigh
2465 		 * is active regardless of the traffic.
2466 		 */
2467 		neigh_event_send(neigh_entry->key.n, NULL);
2468 	mutex_unlock(&mlxsw_sp->router->lock);
2469 }
2470 
2471 static void
2472 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2473 {
2474 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2475 
2476 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2477 			       msecs_to_jiffies(interval));
2478 }
2479 
2480 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2481 {
2482 	struct mlxsw_sp_router *router;
2483 	int err;
2484 
2485 	router = container_of(work, struct mlxsw_sp_router,
2486 			      neighs_update.dw.work);
2487 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2488 	if (err)
2489 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2490 
2491 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2492 
2493 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2494 }
2495 
2496 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2497 {
2498 	struct mlxsw_sp_neigh_entry *neigh_entry;
2499 	struct mlxsw_sp_router *router;
2500 
2501 	router = container_of(work, struct mlxsw_sp_router,
2502 			      nexthop_probe_dw.work);
2503 	/* Iterate over nexthop neighbours, find those who are unresolved and
2504 	 * send arp on them. This solves the chicken-egg problem when
2505 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2506 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2507 	 * using different nexthop.
2508 	 */
2509 	mutex_lock(&router->lock);
2510 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2511 			    nexthop_neighs_list_node)
2512 		if (!neigh_entry->connected)
2513 			neigh_event_send(neigh_entry->key.n, NULL);
2514 	mutex_unlock(&router->lock);
2515 
2516 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2517 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2518 }
2519 
2520 static void
2521 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2522 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2523 			      bool removing, bool dead);
2524 
2525 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2526 {
2527 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2528 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2529 }
2530 
2531 static int
2532 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2533 				struct mlxsw_sp_neigh_entry *neigh_entry,
2534 				enum mlxsw_reg_rauht_op op)
2535 {
2536 	struct neighbour *n = neigh_entry->key.n;
2537 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2538 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2539 
2540 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2541 			      dip);
2542 	if (neigh_entry->counter_valid)
2543 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2544 					     neigh_entry->counter_index);
2545 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2546 }
2547 
2548 static int
2549 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2550 				struct mlxsw_sp_neigh_entry *neigh_entry,
2551 				enum mlxsw_reg_rauht_op op)
2552 {
2553 	struct neighbour *n = neigh_entry->key.n;
2554 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2555 	const char *dip = n->primary_key;
2556 
2557 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2558 			      dip);
2559 	if (neigh_entry->counter_valid)
2560 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2561 					     neigh_entry->counter_index);
2562 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2563 }
2564 
2565 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2566 {
2567 	struct neighbour *n = neigh_entry->key.n;
2568 
2569 	/* Packets with a link-local destination address are trapped
2570 	 * after LPM lookup and never reach the neighbour table, so
2571 	 * there is no need to program such neighbours to the device.
2572 	 */
2573 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2574 	    IPV6_ADDR_LINKLOCAL)
2575 		return true;
2576 	return false;
2577 }
2578 
2579 static void
2580 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2581 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2582 			    bool adding)
2583 {
2584 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2585 	int err;
2586 
2587 	if (!adding && !neigh_entry->connected)
2588 		return;
2589 	neigh_entry->connected = adding;
2590 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2591 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2592 						      op);
2593 		if (err)
2594 			return;
2595 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2596 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2597 			return;
2598 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2599 						      op);
2600 		if (err)
2601 			return;
2602 	} else {
2603 		WARN_ON_ONCE(1);
2604 		return;
2605 	}
2606 
2607 	if (adding)
2608 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2609 	else
2610 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2611 }
2612 
2613 void
2614 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2615 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2616 				    bool adding)
2617 {
2618 	if (adding)
2619 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2620 	else
2621 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2622 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2623 }
2624 
2625 struct mlxsw_sp_netevent_work {
2626 	struct work_struct work;
2627 	struct mlxsw_sp *mlxsw_sp;
2628 	struct neighbour *n;
2629 };
2630 
2631 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2632 {
2633 	struct mlxsw_sp_netevent_work *net_work =
2634 		container_of(work, struct mlxsw_sp_netevent_work, work);
2635 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2636 	struct mlxsw_sp_neigh_entry *neigh_entry;
2637 	struct neighbour *n = net_work->n;
2638 	unsigned char ha[ETH_ALEN];
2639 	bool entry_connected;
2640 	u8 nud_state, dead;
2641 
2642 	/* If these parameters are changed after we release the lock,
2643 	 * then we are guaranteed to receive another event letting us
2644 	 * know about it.
2645 	 */
2646 	read_lock_bh(&n->lock);
2647 	memcpy(ha, n->ha, ETH_ALEN);
2648 	nud_state = n->nud_state;
2649 	dead = n->dead;
2650 	read_unlock_bh(&n->lock);
2651 
2652 	mutex_lock(&mlxsw_sp->router->lock);
2653 	mlxsw_sp_span_respin(mlxsw_sp);
2654 
2655 	entry_connected = nud_state & NUD_VALID && !dead;
2656 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2657 	if (!entry_connected && !neigh_entry)
2658 		goto out;
2659 	if (!neigh_entry) {
2660 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2661 		if (IS_ERR(neigh_entry))
2662 			goto out;
2663 	}
2664 
2665 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2666 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2667 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2668 				      dead);
2669 
2670 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2671 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2672 
2673 out:
2674 	mutex_unlock(&mlxsw_sp->router->lock);
2675 	neigh_release(n);
2676 	kfree(net_work);
2677 }
2678 
2679 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2680 
2681 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2682 {
2683 	struct mlxsw_sp_netevent_work *net_work =
2684 		container_of(work, struct mlxsw_sp_netevent_work, work);
2685 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2686 
2687 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2688 	kfree(net_work);
2689 }
2690 
2691 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2692 
2693 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2694 {
2695 	struct mlxsw_sp_netevent_work *net_work =
2696 		container_of(work, struct mlxsw_sp_netevent_work, work);
2697 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2698 
2699 	__mlxsw_sp_router_init(mlxsw_sp);
2700 	kfree(net_work);
2701 }
2702 
2703 static int mlxsw_sp_router_schedule_work(struct net *net,
2704 					 struct notifier_block *nb,
2705 					 void (*cb)(struct work_struct *))
2706 {
2707 	struct mlxsw_sp_netevent_work *net_work;
2708 	struct mlxsw_sp_router *router;
2709 
2710 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2711 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2712 		return NOTIFY_DONE;
2713 
2714 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2715 	if (!net_work)
2716 		return NOTIFY_BAD;
2717 
2718 	INIT_WORK(&net_work->work, cb);
2719 	net_work->mlxsw_sp = router->mlxsw_sp;
2720 	mlxsw_core_schedule_work(&net_work->work);
2721 	return NOTIFY_DONE;
2722 }
2723 
2724 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2725 					  unsigned long event, void *ptr)
2726 {
2727 	struct mlxsw_sp_netevent_work *net_work;
2728 	struct mlxsw_sp_port *mlxsw_sp_port;
2729 	struct mlxsw_sp *mlxsw_sp;
2730 	unsigned long interval;
2731 	struct neigh_parms *p;
2732 	struct neighbour *n;
2733 
2734 	switch (event) {
2735 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2736 		p = ptr;
2737 
2738 		/* We don't care about changes in the default table. */
2739 		if (!p->dev || (p->tbl->family != AF_INET &&
2740 				p->tbl->family != AF_INET6))
2741 			return NOTIFY_DONE;
2742 
2743 		/* We are in atomic context and can't take RTNL mutex,
2744 		 * so use RCU variant to walk the device chain.
2745 		 */
2746 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2747 		if (!mlxsw_sp_port)
2748 			return NOTIFY_DONE;
2749 
2750 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2751 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2752 		mlxsw_sp->router->neighs_update.interval = interval;
2753 
2754 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2755 		break;
2756 	case NETEVENT_NEIGH_UPDATE:
2757 		n = ptr;
2758 
2759 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2760 			return NOTIFY_DONE;
2761 
2762 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2763 		if (!mlxsw_sp_port)
2764 			return NOTIFY_DONE;
2765 
2766 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2767 		if (!net_work) {
2768 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2769 			return NOTIFY_BAD;
2770 		}
2771 
2772 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2773 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2774 		net_work->n = n;
2775 
2776 		/* Take a reference to ensure the neighbour won't be
2777 		 * destructed until we drop the reference in delayed
2778 		 * work.
2779 		 */
2780 		neigh_clone(n);
2781 		mlxsw_core_schedule_work(&net_work->work);
2782 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2783 		break;
2784 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2785 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2786 		return mlxsw_sp_router_schedule_work(ptr, nb,
2787 				mlxsw_sp_router_mp_hash_event_work);
2788 
2789 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2790 		return mlxsw_sp_router_schedule_work(ptr, nb,
2791 				mlxsw_sp_router_update_priority_work);
2792 	}
2793 
2794 	return NOTIFY_DONE;
2795 }
2796 
2797 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2798 {
2799 	int err;
2800 
2801 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2802 			      &mlxsw_sp_neigh_ht_params);
2803 	if (err)
2804 		return err;
2805 
2806 	/* Initialize the polling interval according to the default
2807 	 * table.
2808 	 */
2809 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2810 
2811 	/* Create the delayed works for the activity_update */
2812 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2813 			  mlxsw_sp_router_neighs_update_work);
2814 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2815 			  mlxsw_sp_router_probe_unresolved_nexthops);
2816 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2817 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2818 	return 0;
2819 }
2820 
2821 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2822 {
2823 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2824 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2825 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2826 }
2827 
2828 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2829 					 struct mlxsw_sp_rif *rif)
2830 {
2831 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2832 
2833 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2834 				 rif_list_node) {
2835 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2836 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2837 	}
2838 }
2839 
2840 enum mlxsw_sp_nexthop_type {
2841 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2842 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2843 };
2844 
2845 struct mlxsw_sp_nexthop_key {
2846 	struct fib_nh *fib_nh;
2847 };
2848 
2849 struct mlxsw_sp_nexthop {
2850 	struct list_head neigh_list_node; /* member of neigh entry list */
2851 	struct list_head rif_list_node;
2852 	struct list_head router_list_node;
2853 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2854 						   * this nexthop belongs to
2855 						   */
2856 	struct rhash_head ht_node;
2857 	struct neigh_table *neigh_tbl;
2858 	struct mlxsw_sp_nexthop_key key;
2859 	unsigned char gw_addr[sizeof(struct in6_addr)];
2860 	int ifindex;
2861 	int nh_weight;
2862 	int norm_nh_weight;
2863 	int num_adj_entries;
2864 	struct mlxsw_sp_rif *rif;
2865 	u8 should_offload:1, /* set indicates this neigh is connected and
2866 			      * should be put to KVD linear area of this group.
2867 			      */
2868 	   offloaded:1, /* set in case the neigh is actually put into
2869 			 * KVD linear area of this group.
2870 			 */
2871 	   update:1, /* set indicates that MAC of this neigh should be
2872 		      * updated in HW
2873 		      */
2874 	   discard:1; /* nexthop is programmed to discard packets */
2875 	enum mlxsw_sp_nexthop_type type;
2876 	union {
2877 		struct mlxsw_sp_neigh_entry *neigh_entry;
2878 		struct mlxsw_sp_ipip_entry *ipip_entry;
2879 	};
2880 	unsigned int counter_index;
2881 	bool counter_valid;
2882 };
2883 
2884 enum mlxsw_sp_nexthop_group_type {
2885 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2886 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2887 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
2888 };
2889 
2890 struct mlxsw_sp_nexthop_group_info {
2891 	struct mlxsw_sp_nexthop_group *nh_grp;
2892 	u32 adj_index;
2893 	u16 ecmp_size;
2894 	u16 count;
2895 	int sum_norm_weight;
2896 	u8 adj_index_valid:1,
2897 	   gateway:1; /* routes using the group use a gateway */
2898 	struct mlxsw_sp_nexthop nexthops[0];
2899 #define nh_rif	nexthops[0].rif
2900 };
2901 
2902 struct mlxsw_sp_nexthop_group_vr_key {
2903 	u16 vr_id;
2904 	enum mlxsw_sp_l3proto proto;
2905 };
2906 
2907 struct mlxsw_sp_nexthop_group_vr_entry {
2908 	struct list_head list; /* member in vr_list */
2909 	struct rhash_head ht_node; /* member in vr_ht */
2910 	refcount_t ref_count;
2911 	struct mlxsw_sp_nexthop_group_vr_key key;
2912 };
2913 
2914 struct mlxsw_sp_nexthop_group {
2915 	struct rhash_head ht_node;
2916 	struct list_head fib_list; /* list of fib entries that use this group */
2917 	union {
2918 		struct {
2919 			struct fib_info *fi;
2920 		} ipv4;
2921 		struct {
2922 			u32 id;
2923 		} obj;
2924 	};
2925 	struct mlxsw_sp_nexthop_group_info *nhgi;
2926 	struct list_head vr_list;
2927 	struct rhashtable vr_ht;
2928 	enum mlxsw_sp_nexthop_group_type type;
2929 	bool can_destroy;
2930 };
2931 
2932 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2933 				    struct mlxsw_sp_nexthop *nh)
2934 {
2935 	struct devlink *devlink;
2936 
2937 	devlink = priv_to_devlink(mlxsw_sp->core);
2938 	if (!devlink_dpipe_table_counter_enabled(devlink,
2939 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2940 		return;
2941 
2942 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2943 		return;
2944 
2945 	nh->counter_valid = true;
2946 }
2947 
2948 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2949 				   struct mlxsw_sp_nexthop *nh)
2950 {
2951 	if (!nh->counter_valid)
2952 		return;
2953 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2954 	nh->counter_valid = false;
2955 }
2956 
2957 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2958 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2959 {
2960 	if (!nh->counter_valid)
2961 		return -EINVAL;
2962 
2963 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2964 					 p_counter, NULL);
2965 }
2966 
2967 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2968 					       struct mlxsw_sp_nexthop *nh)
2969 {
2970 	if (!nh) {
2971 		if (list_empty(&router->nexthop_list))
2972 			return NULL;
2973 		else
2974 			return list_first_entry(&router->nexthop_list,
2975 						typeof(*nh), router_list_node);
2976 	}
2977 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2978 		return NULL;
2979 	return list_next_entry(nh, router_list_node);
2980 }
2981 
2982 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2983 {
2984 	return nh->offloaded;
2985 }
2986 
2987 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2988 {
2989 	if (!nh->offloaded)
2990 		return NULL;
2991 	return nh->neigh_entry->ha;
2992 }
2993 
2994 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2995 			     u32 *p_adj_size, u32 *p_adj_hash_index)
2996 {
2997 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
2998 	u32 adj_hash_index = 0;
2999 	int i;
3000 
3001 	if (!nh->offloaded || !nhgi->adj_index_valid)
3002 		return -EINVAL;
3003 
3004 	*p_adj_index = nhgi->adj_index;
3005 	*p_adj_size = nhgi->ecmp_size;
3006 
3007 	for (i = 0; i < nhgi->count; i++) {
3008 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3009 
3010 		if (nh_iter == nh)
3011 			break;
3012 		if (nh_iter->offloaded)
3013 			adj_hash_index += nh_iter->num_adj_entries;
3014 	}
3015 
3016 	*p_adj_hash_index = adj_hash_index;
3017 	return 0;
3018 }
3019 
3020 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3021 {
3022 	return nh->rif;
3023 }
3024 
3025 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3026 {
3027 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3028 	int i;
3029 
3030 	for (i = 0; i < nhgi->count; i++) {
3031 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3032 
3033 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3034 			return true;
3035 	}
3036 	return false;
3037 }
3038 
3039 bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh)
3040 {
3041 	return nh->discard;
3042 }
3043 
3044 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3045 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3046 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3047 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3048 	.automatic_shrinking = true,
3049 };
3050 
3051 static struct mlxsw_sp_nexthop_group_vr_entry *
3052 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3053 				       const struct mlxsw_sp_fib *fib)
3054 {
3055 	struct mlxsw_sp_nexthop_group_vr_key key;
3056 
3057 	memset(&key, 0, sizeof(key));
3058 	key.vr_id = fib->vr->id;
3059 	key.proto = fib->proto;
3060 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3061 				      mlxsw_sp_nexthop_group_vr_ht_params);
3062 }
3063 
3064 static int
3065 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3066 				       const struct mlxsw_sp_fib *fib)
3067 {
3068 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3069 	int err;
3070 
3071 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3072 	if (!vr_entry)
3073 		return -ENOMEM;
3074 
3075 	vr_entry->key.vr_id = fib->vr->id;
3076 	vr_entry->key.proto = fib->proto;
3077 	refcount_set(&vr_entry->ref_count, 1);
3078 
3079 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3080 				     mlxsw_sp_nexthop_group_vr_ht_params);
3081 	if (err)
3082 		goto err_hashtable_insert;
3083 
3084 	list_add(&vr_entry->list, &nh_grp->vr_list);
3085 
3086 	return 0;
3087 
3088 err_hashtable_insert:
3089 	kfree(vr_entry);
3090 	return err;
3091 }
3092 
3093 static void
3094 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3095 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3096 {
3097 	list_del(&vr_entry->list);
3098 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3099 			       mlxsw_sp_nexthop_group_vr_ht_params);
3100 	kfree(vr_entry);
3101 }
3102 
3103 static int
3104 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3105 			       const struct mlxsw_sp_fib *fib)
3106 {
3107 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3108 
3109 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3110 	if (vr_entry) {
3111 		refcount_inc(&vr_entry->ref_count);
3112 		return 0;
3113 	}
3114 
3115 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3116 }
3117 
3118 static void
3119 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3120 				 const struct mlxsw_sp_fib *fib)
3121 {
3122 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3123 
3124 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3125 	if (WARN_ON_ONCE(!vr_entry))
3126 		return;
3127 
3128 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3129 		return;
3130 
3131 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3132 }
3133 
3134 struct mlxsw_sp_nexthop_group_cmp_arg {
3135 	enum mlxsw_sp_nexthop_group_type type;
3136 	union {
3137 		struct fib_info *fi;
3138 		struct mlxsw_sp_fib6_entry *fib6_entry;
3139 		u32 id;
3140 	};
3141 };
3142 
3143 static bool
3144 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3145 				    const struct in6_addr *gw, int ifindex,
3146 				    int weight)
3147 {
3148 	int i;
3149 
3150 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3151 		const struct mlxsw_sp_nexthop *nh;
3152 
3153 		nh = &nh_grp->nhgi->nexthops[i];
3154 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3155 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3156 			return true;
3157 	}
3158 
3159 	return false;
3160 }
3161 
3162 static bool
3163 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3164 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3165 {
3166 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3167 
3168 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3169 		return false;
3170 
3171 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3172 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3173 		struct in6_addr *gw;
3174 		int ifindex, weight;
3175 
3176 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3177 		weight = fib6_nh->fib_nh_weight;
3178 		gw = &fib6_nh->fib_nh_gw6;
3179 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3180 							 weight))
3181 			return false;
3182 	}
3183 
3184 	return true;
3185 }
3186 
3187 static int
3188 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3189 {
3190 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3191 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3192 
3193 	if (nh_grp->type != cmp_arg->type)
3194 		return 1;
3195 
3196 	switch (cmp_arg->type) {
3197 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3198 		return cmp_arg->fi != nh_grp->ipv4.fi;
3199 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3200 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3201 						    cmp_arg->fib6_entry);
3202 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3203 		return cmp_arg->id != nh_grp->obj.id;
3204 	default:
3205 		WARN_ON(1);
3206 		return 1;
3207 	}
3208 }
3209 
3210 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3211 {
3212 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3213 	const struct mlxsw_sp_nexthop *nh;
3214 	struct fib_info *fi;
3215 	unsigned int val;
3216 	int i;
3217 
3218 	switch (nh_grp->type) {
3219 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3220 		fi = nh_grp->ipv4.fi;
3221 		return jhash(&fi, sizeof(fi), seed);
3222 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3223 		val = nh_grp->nhgi->count;
3224 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3225 			nh = &nh_grp->nhgi->nexthops[i];
3226 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3227 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3228 		}
3229 		return jhash(&val, sizeof(val), seed);
3230 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3231 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3232 	default:
3233 		WARN_ON(1);
3234 		return 0;
3235 	}
3236 }
3237 
3238 static u32
3239 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3240 {
3241 	unsigned int val = fib6_entry->nrt6;
3242 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3243 
3244 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3245 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3246 		struct net_device *dev = fib6_nh->fib_nh_dev;
3247 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3248 
3249 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3250 		val ^= jhash(gw, sizeof(*gw), seed);
3251 	}
3252 
3253 	return jhash(&val, sizeof(val), seed);
3254 }
3255 
3256 static u32
3257 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3258 {
3259 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3260 
3261 	switch (cmp_arg->type) {
3262 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3263 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3264 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3265 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3266 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3267 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3268 	default:
3269 		WARN_ON(1);
3270 		return 0;
3271 	}
3272 }
3273 
3274 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3275 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3276 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3277 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3278 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3279 };
3280 
3281 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3282 					 struct mlxsw_sp_nexthop_group *nh_grp)
3283 {
3284 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3285 	    !nh_grp->nhgi->gateway)
3286 		return 0;
3287 
3288 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3289 				      &nh_grp->ht_node,
3290 				      mlxsw_sp_nexthop_group_ht_params);
3291 }
3292 
3293 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3294 					  struct mlxsw_sp_nexthop_group *nh_grp)
3295 {
3296 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3297 	    !nh_grp->nhgi->gateway)
3298 		return;
3299 
3300 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3301 			       &nh_grp->ht_node,
3302 			       mlxsw_sp_nexthop_group_ht_params);
3303 }
3304 
3305 static struct mlxsw_sp_nexthop_group *
3306 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3307 			       struct fib_info *fi)
3308 {
3309 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3310 
3311 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3312 	cmp_arg.fi = fi;
3313 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3314 				      &cmp_arg,
3315 				      mlxsw_sp_nexthop_group_ht_params);
3316 }
3317 
3318 static struct mlxsw_sp_nexthop_group *
3319 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3320 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3321 {
3322 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3323 
3324 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3325 	cmp_arg.fib6_entry = fib6_entry;
3326 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3327 				      &cmp_arg,
3328 				      mlxsw_sp_nexthop_group_ht_params);
3329 }
3330 
3331 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3332 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3333 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3334 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3335 };
3336 
3337 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3338 				   struct mlxsw_sp_nexthop *nh)
3339 {
3340 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3341 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3342 }
3343 
3344 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3345 				    struct mlxsw_sp_nexthop *nh)
3346 {
3347 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3348 			       mlxsw_sp_nexthop_ht_params);
3349 }
3350 
3351 static struct mlxsw_sp_nexthop *
3352 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3353 			struct mlxsw_sp_nexthop_key key)
3354 {
3355 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3356 				      mlxsw_sp_nexthop_ht_params);
3357 }
3358 
3359 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3360 					     enum mlxsw_sp_l3proto proto,
3361 					     u16 vr_id,
3362 					     u32 adj_index, u16 ecmp_size,
3363 					     u32 new_adj_index,
3364 					     u16 new_ecmp_size)
3365 {
3366 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3367 
3368 	mlxsw_reg_raleu_pack(raleu_pl,
3369 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3370 			     adj_index, ecmp_size, new_adj_index,
3371 			     new_ecmp_size);
3372 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3373 }
3374 
3375 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3376 					  struct mlxsw_sp_nexthop_group *nh_grp,
3377 					  u32 old_adj_index, u16 old_ecmp_size)
3378 {
3379 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3380 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3381 	int err;
3382 
3383 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3384 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3385 							vr_entry->key.proto,
3386 							vr_entry->key.vr_id,
3387 							old_adj_index,
3388 							old_ecmp_size,
3389 							nhgi->adj_index,
3390 							nhgi->ecmp_size);
3391 		if (err)
3392 			goto err_mass_update_vr;
3393 	}
3394 	return 0;
3395 
3396 err_mass_update_vr:
3397 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3398 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3399 						  vr_entry->key.vr_id,
3400 						  nhgi->adj_index,
3401 						  nhgi->ecmp_size,
3402 						  old_adj_index, old_ecmp_size);
3403 	return err;
3404 }
3405 
3406 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3407 				     struct mlxsw_sp_nexthop *nh)
3408 {
3409 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3410 	char ratr_pl[MLXSW_REG_RATR_LEN];
3411 
3412 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3413 			    true, MLXSW_REG_RATR_TYPE_ETHERNET,
3414 			    adj_index, nh->rif->rif_index);
3415 	if (nh->discard)
3416 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3417 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3418 	else
3419 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3420 	if (nh->counter_valid)
3421 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3422 	else
3423 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3424 
3425 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3426 }
3427 
3428 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3429 			    struct mlxsw_sp_nexthop *nh)
3430 {
3431 	int i;
3432 
3433 	for (i = 0; i < nh->num_adj_entries; i++) {
3434 		int err;
3435 
3436 		err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3437 		if (err)
3438 			return err;
3439 	}
3440 
3441 	return 0;
3442 }
3443 
3444 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3445 					  u32 adj_index,
3446 					  struct mlxsw_sp_nexthop *nh)
3447 {
3448 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3449 
3450 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3451 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3452 }
3453 
3454 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3455 					u32 adj_index,
3456 					struct mlxsw_sp_nexthop *nh)
3457 {
3458 	int i;
3459 
3460 	for (i = 0; i < nh->num_adj_entries; i++) {
3461 		int err;
3462 
3463 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3464 						     nh);
3465 		if (err)
3466 			return err;
3467 	}
3468 
3469 	return 0;
3470 }
3471 
3472 static int
3473 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3474 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3475 			      bool reallocate)
3476 {
3477 	u32 adj_index = nhgi->adj_index; /* base */
3478 	struct mlxsw_sp_nexthop *nh;
3479 	int i;
3480 
3481 	for (i = 0; i < nhgi->count; i++) {
3482 		nh = &nhgi->nexthops[i];
3483 
3484 		if (!nh->should_offload) {
3485 			nh->offloaded = 0;
3486 			continue;
3487 		}
3488 
3489 		if (nh->update || reallocate) {
3490 			int err = 0;
3491 
3492 			switch (nh->type) {
3493 			case MLXSW_SP_NEXTHOP_TYPE_ETH:
3494 				err = mlxsw_sp_nexthop_update
3495 					    (mlxsw_sp, adj_index, nh);
3496 				break;
3497 			case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3498 				err = mlxsw_sp_nexthop_ipip_update
3499 					    (mlxsw_sp, adj_index, nh);
3500 				break;
3501 			}
3502 			if (err)
3503 				return err;
3504 			nh->update = 0;
3505 			nh->offloaded = 1;
3506 		}
3507 		adj_index += nh->num_adj_entries;
3508 	}
3509 	return 0;
3510 }
3511 
3512 static int
3513 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3514 				    struct mlxsw_sp_nexthop_group *nh_grp)
3515 {
3516 	struct mlxsw_sp_fib_entry *fib_entry;
3517 	int err;
3518 
3519 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3520 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3521 		if (err)
3522 			return err;
3523 	}
3524 	return 0;
3525 }
3526 
3527 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3528 {
3529 	/* Valid sizes for an adjacency group are:
3530 	 * 1-64, 512, 1024, 2048 and 4096.
3531 	 */
3532 	if (*p_adj_grp_size <= 64)
3533 		return;
3534 	else if (*p_adj_grp_size <= 512)
3535 		*p_adj_grp_size = 512;
3536 	else if (*p_adj_grp_size <= 1024)
3537 		*p_adj_grp_size = 1024;
3538 	else if (*p_adj_grp_size <= 2048)
3539 		*p_adj_grp_size = 2048;
3540 	else
3541 		*p_adj_grp_size = 4096;
3542 }
3543 
3544 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3545 					     unsigned int alloc_size)
3546 {
3547 	if (alloc_size >= 4096)
3548 		*p_adj_grp_size = 4096;
3549 	else if (alloc_size >= 2048)
3550 		*p_adj_grp_size = 2048;
3551 	else if (alloc_size >= 1024)
3552 		*p_adj_grp_size = 1024;
3553 	else if (alloc_size >= 512)
3554 		*p_adj_grp_size = 512;
3555 }
3556 
3557 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3558 				     u16 *p_adj_grp_size)
3559 {
3560 	unsigned int alloc_size;
3561 	int err;
3562 
3563 	/* Round up the requested group size to the next size supported
3564 	 * by the device and make sure the request can be satisfied.
3565 	 */
3566 	mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3567 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3568 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3569 					      *p_adj_grp_size, &alloc_size);
3570 	if (err)
3571 		return err;
3572 	/* It is possible the allocation results in more allocated
3573 	 * entries than requested. Try to use as much of them as
3574 	 * possible.
3575 	 */
3576 	mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3577 
3578 	return 0;
3579 }
3580 
3581 static void
3582 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3583 {
3584 	int i, g = 0, sum_norm_weight = 0;
3585 	struct mlxsw_sp_nexthop *nh;
3586 
3587 	for (i = 0; i < nhgi->count; i++) {
3588 		nh = &nhgi->nexthops[i];
3589 
3590 		if (!nh->should_offload)
3591 			continue;
3592 		if (g > 0)
3593 			g = gcd(nh->nh_weight, g);
3594 		else
3595 			g = nh->nh_weight;
3596 	}
3597 
3598 	for (i = 0; i < nhgi->count; i++) {
3599 		nh = &nhgi->nexthops[i];
3600 
3601 		if (!nh->should_offload)
3602 			continue;
3603 		nh->norm_nh_weight = nh->nh_weight / g;
3604 		sum_norm_weight += nh->norm_nh_weight;
3605 	}
3606 
3607 	nhgi->sum_norm_weight = sum_norm_weight;
3608 }
3609 
3610 static void
3611 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3612 {
3613 	int i, weight = 0, lower_bound = 0;
3614 	int total = nhgi->sum_norm_weight;
3615 	u16 ecmp_size = nhgi->ecmp_size;
3616 
3617 	for (i = 0; i < nhgi->count; i++) {
3618 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3619 		int upper_bound;
3620 
3621 		if (!nh->should_offload)
3622 			continue;
3623 		weight += nh->norm_nh_weight;
3624 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3625 		nh->num_adj_entries = upper_bound - lower_bound;
3626 		lower_bound = upper_bound;
3627 	}
3628 }
3629 
3630 static struct mlxsw_sp_nexthop *
3631 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3632 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3633 
3634 static void
3635 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3636 					struct mlxsw_sp_nexthop_group *nh_grp)
3637 {
3638 	int i;
3639 
3640 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3641 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3642 
3643 		if (nh->offloaded)
3644 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3645 		else
3646 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3647 	}
3648 }
3649 
3650 static void
3651 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3652 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3653 {
3654 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3655 
3656 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3657 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3658 		struct mlxsw_sp_nexthop *nh;
3659 
3660 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3661 		if (nh && nh->offloaded)
3662 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3663 		else
3664 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3665 	}
3666 }
3667 
3668 static void
3669 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3670 					struct mlxsw_sp_nexthop_group *nh_grp)
3671 {
3672 	struct mlxsw_sp_fib6_entry *fib6_entry;
3673 
3674 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3675 	 * the same struct, so we need to iterate over all the routes using the
3676 	 * nexthop group and set / clear the offload indication for them.
3677 	 */
3678 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3679 			    common.nexthop_group_node)
3680 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3681 }
3682 
3683 static void
3684 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3685 					   struct mlxsw_sp_nexthop_group *nh_grp)
3686 {
3687 	/* Do not update the flags if the nexthop group is being destroyed
3688 	 * since:
3689 	 * 1. The nexthop objects is being deleted, in which case the flags are
3690 	 * irrelevant.
3691 	 * 2. The nexthop group was replaced by a newer group, in which case
3692 	 * the flags of the nexthop object were already updated based on the
3693 	 * new group.
3694 	 */
3695 	if (nh_grp->can_destroy)
3696 		return;
3697 
3698 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3699 			     nh_grp->nhgi->adj_index_valid, false);
3700 }
3701 
3702 static void
3703 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3704 				       struct mlxsw_sp_nexthop_group *nh_grp)
3705 {
3706 	switch (nh_grp->type) {
3707 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3708 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3709 		break;
3710 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3711 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3712 		break;
3713 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3714 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3715 		break;
3716 	}
3717 }
3718 
3719 static int
3720 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3721 			       struct mlxsw_sp_nexthop_group *nh_grp)
3722 {
3723 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3724 	u16 ecmp_size, old_ecmp_size;
3725 	struct mlxsw_sp_nexthop *nh;
3726 	bool offload_change = false;
3727 	u32 adj_index;
3728 	bool old_adj_index_valid;
3729 	int i, err2, err = 0;
3730 	u32 old_adj_index;
3731 
3732 	if (!nhgi->gateway)
3733 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3734 
3735 	for (i = 0; i < nhgi->count; i++) {
3736 		nh = &nhgi->nexthops[i];
3737 
3738 		if (nh->should_offload != nh->offloaded) {
3739 			offload_change = true;
3740 			if (nh->should_offload)
3741 				nh->update = 1;
3742 		}
3743 	}
3744 	if (!offload_change) {
3745 		/* Nothing was added or removed, so no need to reallocate. Just
3746 		 * update MAC on existing adjacency indexes.
3747 		 */
3748 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3749 		if (err) {
3750 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3751 			goto set_trap;
3752 		}
3753 		return 0;
3754 	}
3755 	mlxsw_sp_nexthop_group_normalize(nhgi);
3756 	if (!nhgi->sum_norm_weight)
3757 		/* No neigh of this group is connected so we just set
3758 		 * the trap and let everthing flow through kernel.
3759 		 */
3760 		goto set_trap;
3761 
3762 	ecmp_size = nhgi->sum_norm_weight;
3763 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3764 	if (err)
3765 		/* No valid allocation size available. */
3766 		goto set_trap;
3767 
3768 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3769 				  ecmp_size, &adj_index);
3770 	if (err) {
3771 		/* We ran out of KVD linear space, just set the
3772 		 * trap and let everything flow through kernel.
3773 		 */
3774 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3775 		goto set_trap;
3776 	}
3777 	old_adj_index_valid = nhgi->adj_index_valid;
3778 	old_adj_index = nhgi->adj_index;
3779 	old_ecmp_size = nhgi->ecmp_size;
3780 	nhgi->adj_index_valid = 1;
3781 	nhgi->adj_index = adj_index;
3782 	nhgi->ecmp_size = ecmp_size;
3783 	mlxsw_sp_nexthop_group_rebalance(nhgi);
3784 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
3785 	if (err) {
3786 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3787 		goto set_trap;
3788 	}
3789 
3790 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3791 
3792 	if (!old_adj_index_valid) {
3793 		/* The trap was set for fib entries, so we have to call
3794 		 * fib entry update to unset it and use adjacency index.
3795 		 */
3796 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3797 		if (err) {
3798 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3799 			goto set_trap;
3800 		}
3801 		return 0;
3802 	}
3803 
3804 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3805 					     old_adj_index, old_ecmp_size);
3806 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3807 			   old_ecmp_size, old_adj_index);
3808 	if (err) {
3809 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3810 		goto set_trap;
3811 	}
3812 
3813 	return 0;
3814 
3815 set_trap:
3816 	old_adj_index_valid = nhgi->adj_index_valid;
3817 	nhgi->adj_index_valid = 0;
3818 	for (i = 0; i < nhgi->count; i++) {
3819 		nh = &nhgi->nexthops[i];
3820 		nh->offloaded = 0;
3821 	}
3822 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3823 	if (err2)
3824 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3825 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3826 	if (old_adj_index_valid)
3827 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3828 				   nhgi->ecmp_size, nhgi->adj_index);
3829 	return err;
3830 }
3831 
3832 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3833 					    bool removing)
3834 {
3835 	if (!removing)
3836 		nh->should_offload = 1;
3837 	else
3838 		nh->should_offload = 0;
3839 	nh->update = 1;
3840 }
3841 
3842 static int
3843 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3844 				    struct mlxsw_sp_neigh_entry *neigh_entry)
3845 {
3846 	struct neighbour *n, *old_n = neigh_entry->key.n;
3847 	struct mlxsw_sp_nexthop *nh;
3848 	bool entry_connected;
3849 	u8 nud_state, dead;
3850 	int err;
3851 
3852 	nh = list_first_entry(&neigh_entry->nexthop_list,
3853 			      struct mlxsw_sp_nexthop, neigh_list_node);
3854 
3855 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3856 	if (!n) {
3857 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3858 		if (IS_ERR(n))
3859 			return PTR_ERR(n);
3860 		neigh_event_send(n, NULL);
3861 	}
3862 
3863 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3864 	neigh_entry->key.n = n;
3865 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3866 	if (err)
3867 		goto err_neigh_entry_insert;
3868 
3869 	read_lock_bh(&n->lock);
3870 	nud_state = n->nud_state;
3871 	dead = n->dead;
3872 	read_unlock_bh(&n->lock);
3873 	entry_connected = nud_state & NUD_VALID && !dead;
3874 
3875 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3876 			    neigh_list_node) {
3877 		neigh_release(old_n);
3878 		neigh_clone(n);
3879 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
3880 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
3881 	}
3882 
3883 	neigh_release(n);
3884 
3885 	return 0;
3886 
3887 err_neigh_entry_insert:
3888 	neigh_entry->key.n = old_n;
3889 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3890 	neigh_release(n);
3891 	return err;
3892 }
3893 
3894 static void
3895 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3896 			      struct mlxsw_sp_neigh_entry *neigh_entry,
3897 			      bool removing, bool dead)
3898 {
3899 	struct mlxsw_sp_nexthop *nh;
3900 
3901 	if (list_empty(&neigh_entry->nexthop_list))
3902 		return;
3903 
3904 	if (dead) {
3905 		int err;
3906 
3907 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
3908 							  neigh_entry);
3909 		if (err)
3910 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
3911 		return;
3912 	}
3913 
3914 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3915 			    neigh_list_node) {
3916 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
3917 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
3918 	}
3919 }
3920 
3921 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3922 				      struct mlxsw_sp_rif *rif)
3923 {
3924 	if (nh->rif)
3925 		return;
3926 
3927 	nh->rif = rif;
3928 	list_add(&nh->rif_list_node, &rif->nexthop_list);
3929 }
3930 
3931 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3932 {
3933 	if (!nh->rif)
3934 		return;
3935 
3936 	list_del(&nh->rif_list_node);
3937 	nh->rif = NULL;
3938 }
3939 
3940 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3941 				       struct mlxsw_sp_nexthop *nh)
3942 {
3943 	struct mlxsw_sp_neigh_entry *neigh_entry;
3944 	struct neighbour *n;
3945 	u8 nud_state, dead;
3946 	int err;
3947 
3948 	if (!nh->nhgi->gateway || nh->neigh_entry)
3949 		return 0;
3950 
3951 	/* Take a reference of neigh here ensuring that neigh would
3952 	 * not be destructed before the nexthop entry is finished.
3953 	 * The reference is taken either in neigh_lookup() or
3954 	 * in neigh_create() in case n is not found.
3955 	 */
3956 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3957 	if (!n) {
3958 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3959 		if (IS_ERR(n))
3960 			return PTR_ERR(n);
3961 		neigh_event_send(n, NULL);
3962 	}
3963 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3964 	if (!neigh_entry) {
3965 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3966 		if (IS_ERR(neigh_entry)) {
3967 			err = -EINVAL;
3968 			goto err_neigh_entry_create;
3969 		}
3970 	}
3971 
3972 	/* If that is the first nexthop connected to that neigh, add to
3973 	 * nexthop_neighs_list
3974 	 */
3975 	if (list_empty(&neigh_entry->nexthop_list))
3976 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3977 			      &mlxsw_sp->router->nexthop_neighs_list);
3978 
3979 	nh->neigh_entry = neigh_entry;
3980 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3981 	read_lock_bh(&n->lock);
3982 	nud_state = n->nud_state;
3983 	dead = n->dead;
3984 	read_unlock_bh(&n->lock);
3985 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3986 
3987 	return 0;
3988 
3989 err_neigh_entry_create:
3990 	neigh_release(n);
3991 	return err;
3992 }
3993 
3994 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3995 					struct mlxsw_sp_nexthop *nh)
3996 {
3997 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3998 	struct neighbour *n;
3999 
4000 	if (!neigh_entry)
4001 		return;
4002 	n = neigh_entry->key.n;
4003 
4004 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4005 	list_del(&nh->neigh_list_node);
4006 	nh->neigh_entry = NULL;
4007 
4008 	/* If that is the last nexthop connected to that neigh, remove from
4009 	 * nexthop_neighs_list
4010 	 */
4011 	if (list_empty(&neigh_entry->nexthop_list))
4012 		list_del(&neigh_entry->nexthop_neighs_list_node);
4013 
4014 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4015 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4016 
4017 	neigh_release(n);
4018 }
4019 
4020 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4021 {
4022 	struct net_device *ul_dev;
4023 	bool is_up;
4024 
4025 	rcu_read_lock();
4026 	ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4027 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4028 	rcu_read_unlock();
4029 
4030 	return is_up;
4031 }
4032 
4033 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4034 				       struct mlxsw_sp_nexthop *nh,
4035 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4036 {
4037 	bool removing;
4038 
4039 	if (!nh->nhgi->gateway || nh->ipip_entry)
4040 		return;
4041 
4042 	nh->ipip_entry = ipip_entry;
4043 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4044 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4045 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4046 }
4047 
4048 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4049 				       struct mlxsw_sp_nexthop *nh)
4050 {
4051 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4052 
4053 	if (!ipip_entry)
4054 		return;
4055 
4056 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4057 	nh->ipip_entry = NULL;
4058 }
4059 
4060 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4061 					const struct fib_nh *fib_nh,
4062 					enum mlxsw_sp_ipip_type *p_ipipt)
4063 {
4064 	struct net_device *dev = fib_nh->fib_nh_dev;
4065 
4066 	return dev &&
4067 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4068 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4069 }
4070 
4071 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4072 				      struct mlxsw_sp_nexthop *nh,
4073 				      const struct net_device *dev)
4074 {
4075 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4076 	struct mlxsw_sp_ipip_entry *ipip_entry;
4077 	struct mlxsw_sp_rif *rif;
4078 	int err;
4079 
4080 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4081 	if (ipip_entry) {
4082 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4083 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4084 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4085 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4086 			return 0;
4087 		}
4088 	}
4089 
4090 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4091 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4092 	if (!rif)
4093 		return 0;
4094 
4095 	mlxsw_sp_nexthop_rif_init(nh, rif);
4096 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4097 	if (err)
4098 		goto err_neigh_init;
4099 
4100 	return 0;
4101 
4102 err_neigh_init:
4103 	mlxsw_sp_nexthop_rif_fini(nh);
4104 	return err;
4105 }
4106 
4107 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4108 				       struct mlxsw_sp_nexthop *nh)
4109 {
4110 	switch (nh->type) {
4111 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4112 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4113 		mlxsw_sp_nexthop_rif_fini(nh);
4114 		break;
4115 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4116 		mlxsw_sp_nexthop_rif_fini(nh);
4117 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4118 		break;
4119 	}
4120 }
4121 
4122 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4123 				  struct mlxsw_sp_nexthop_group *nh_grp,
4124 				  struct mlxsw_sp_nexthop *nh,
4125 				  struct fib_nh *fib_nh)
4126 {
4127 	struct net_device *dev = fib_nh->fib_nh_dev;
4128 	struct in_device *in_dev;
4129 	int err;
4130 
4131 	nh->nhgi = nh_grp->nhgi;
4132 	nh->key.fib_nh = fib_nh;
4133 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4134 	nh->nh_weight = fib_nh->fib_nh_weight;
4135 #else
4136 	nh->nh_weight = 1;
4137 #endif
4138 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4139 	nh->neigh_tbl = &arp_tbl;
4140 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4141 	if (err)
4142 		return err;
4143 
4144 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4145 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4146 
4147 	if (!dev)
4148 		return 0;
4149 	nh->ifindex = dev->ifindex;
4150 
4151 	rcu_read_lock();
4152 	in_dev = __in_dev_get_rcu(dev);
4153 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4154 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4155 		rcu_read_unlock();
4156 		return 0;
4157 	}
4158 	rcu_read_unlock();
4159 
4160 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4161 	if (err)
4162 		goto err_nexthop_neigh_init;
4163 
4164 	return 0;
4165 
4166 err_nexthop_neigh_init:
4167 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4168 	return err;
4169 }
4170 
4171 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4172 				   struct mlxsw_sp_nexthop *nh)
4173 {
4174 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4175 	list_del(&nh->router_list_node);
4176 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4177 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4178 }
4179 
4180 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4181 				    unsigned long event, struct fib_nh *fib_nh)
4182 {
4183 	struct mlxsw_sp_nexthop_key key;
4184 	struct mlxsw_sp_nexthop *nh;
4185 
4186 	if (mlxsw_sp->router->aborted)
4187 		return;
4188 
4189 	key.fib_nh = fib_nh;
4190 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4191 	if (!nh)
4192 		return;
4193 
4194 	switch (event) {
4195 	case FIB_EVENT_NH_ADD:
4196 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4197 		break;
4198 	case FIB_EVENT_NH_DEL:
4199 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4200 		break;
4201 	}
4202 
4203 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4204 }
4205 
4206 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4207 					struct mlxsw_sp_rif *rif)
4208 {
4209 	struct mlxsw_sp_nexthop *nh;
4210 	bool removing;
4211 
4212 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4213 		switch (nh->type) {
4214 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4215 			removing = false;
4216 			break;
4217 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4218 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4219 			break;
4220 		default:
4221 			WARN_ON(1);
4222 			continue;
4223 		}
4224 
4225 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4226 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4227 	}
4228 }
4229 
4230 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4231 					 struct mlxsw_sp_rif *old_rif,
4232 					 struct mlxsw_sp_rif *new_rif)
4233 {
4234 	struct mlxsw_sp_nexthop *nh;
4235 
4236 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4237 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4238 		nh->rif = new_rif;
4239 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4240 }
4241 
4242 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4243 					   struct mlxsw_sp_rif *rif)
4244 {
4245 	struct mlxsw_sp_nexthop *nh, *tmp;
4246 
4247 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4248 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4249 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4250 	}
4251 }
4252 
4253 static int
4254 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4255 				     const struct nh_notifier_single_info *nh,
4256 				     struct netlink_ext_ack *extack)
4257 {
4258 	int err = -EINVAL;
4259 
4260 	if (nh->is_fdb)
4261 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4262 	else if (nh->has_encap)
4263 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4264 	else
4265 		err = 0;
4266 
4267 	return err;
4268 }
4269 
4270 static int
4271 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4272 				    const struct nh_notifier_grp_info *nh_grp,
4273 				    struct netlink_ext_ack *extack)
4274 {
4275 	int i;
4276 
4277 	if (nh_grp->is_fdb) {
4278 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4279 		return -EINVAL;
4280 	}
4281 
4282 	for (i = 0; i < nh_grp->num_nh; i++) {
4283 		const struct nh_notifier_single_info *nh;
4284 		int err;
4285 
4286 		nh = &nh_grp->nh_entries[i].nh;
4287 		err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh,
4288 							   extack);
4289 		if (err)
4290 			return err;
4291 
4292 		/* Device only nexthops with an IPIP device are programmed as
4293 		 * encapsulating adjacency entries.
4294 		 */
4295 		if (!nh->gw_family && !nh->is_reject &&
4296 		    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4297 			NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4298 			return -EINVAL;
4299 		}
4300 	}
4301 
4302 	return 0;
4303 }
4304 
4305 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4306 					 unsigned long event,
4307 					 struct nh_notifier_info *info)
4308 {
4309 	if (event != NEXTHOP_EVENT_REPLACE)
4310 		return 0;
4311 
4312 	switch (info->type) {
4313 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4314 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4315 							    info->extack);
4316 	case NH_NOTIFIER_INFO_TYPE_GRP:
4317 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4318 							   info->nh_grp,
4319 							   info->extack);
4320 	default:
4321 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4322 		return -EOPNOTSUPP;
4323 	}
4324 }
4325 
4326 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4327 					    const struct nh_notifier_info *info)
4328 {
4329 	const struct net_device *dev;
4330 
4331 	switch (info->type) {
4332 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4333 		dev = info->nh->dev;
4334 		return info->nh->gw_family || info->nh->is_reject ||
4335 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4336 	case NH_NOTIFIER_INFO_TYPE_GRP:
4337 		/* Already validated earlier. */
4338 		return true;
4339 	default:
4340 		return false;
4341 	}
4342 }
4343 
4344 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4345 						struct mlxsw_sp_nexthop *nh)
4346 {
4347 	u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4348 
4349 	nh->discard = 1;
4350 	nh->should_offload = 1;
4351 	/* While nexthops that discard packets do not forward packets
4352 	 * via an egress RIF, they still need to be programmed using a
4353 	 * valid RIF, so use the loopback RIF created during init.
4354 	 */
4355 	nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4356 }
4357 
4358 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4359 						struct mlxsw_sp_nexthop *nh)
4360 {
4361 	nh->rif = NULL;
4362 	nh->should_offload = 0;
4363 }
4364 
4365 static int
4366 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4367 			  struct mlxsw_sp_nexthop_group *nh_grp,
4368 			  struct mlxsw_sp_nexthop *nh,
4369 			  struct nh_notifier_single_info *nh_obj, int weight)
4370 {
4371 	struct net_device *dev = nh_obj->dev;
4372 	int err;
4373 
4374 	nh->nhgi = nh_grp->nhgi;
4375 	nh->nh_weight = weight;
4376 
4377 	switch (nh_obj->gw_family) {
4378 	case AF_INET:
4379 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4380 		nh->neigh_tbl = &arp_tbl;
4381 		break;
4382 	case AF_INET6:
4383 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4384 #if IS_ENABLED(CONFIG_IPV6)
4385 		nh->neigh_tbl = &nd_tbl;
4386 #endif
4387 		break;
4388 	}
4389 
4390 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4391 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4392 	nh->ifindex = dev->ifindex;
4393 
4394 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4395 	if (err)
4396 		goto err_type_init;
4397 
4398 	if (nh_obj->is_reject)
4399 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4400 
4401 	return 0;
4402 
4403 err_type_init:
4404 	list_del(&nh->router_list_node);
4405 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4406 	return err;
4407 }
4408 
4409 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4410 				      struct mlxsw_sp_nexthop *nh)
4411 {
4412 	if (nh->discard)
4413 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4414 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4415 	list_del(&nh->router_list_node);
4416 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4417 }
4418 
4419 static int
4420 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4421 				     struct mlxsw_sp_nexthop_group *nh_grp,
4422 				     struct nh_notifier_info *info)
4423 {
4424 	struct mlxsw_sp_nexthop_group_info *nhgi;
4425 	struct mlxsw_sp_nexthop *nh;
4426 	unsigned int nhs;
4427 	int err, i;
4428 
4429 	switch (info->type) {
4430 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4431 		nhs = 1;
4432 		break;
4433 	case NH_NOTIFIER_INFO_TYPE_GRP:
4434 		nhs = info->nh_grp->num_nh;
4435 		break;
4436 	default:
4437 		return -EINVAL;
4438 	}
4439 
4440 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4441 	if (!nhgi)
4442 		return -ENOMEM;
4443 	nh_grp->nhgi = nhgi;
4444 	nhgi->nh_grp = nh_grp;
4445 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4446 	nhgi->count = nhs;
4447 	for (i = 0; i < nhgi->count; i++) {
4448 		struct nh_notifier_single_info *nh_obj;
4449 		int weight;
4450 
4451 		nh = &nhgi->nexthops[i];
4452 		switch (info->type) {
4453 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
4454 			nh_obj = info->nh;
4455 			weight = 1;
4456 			break;
4457 		case NH_NOTIFIER_INFO_TYPE_GRP:
4458 			nh_obj = &info->nh_grp->nh_entries[i].nh;
4459 			weight = info->nh_grp->nh_entries[i].weight;
4460 			break;
4461 		default:
4462 			err = -EINVAL;
4463 			goto err_nexthop_obj_init;
4464 		}
4465 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4466 						weight);
4467 		if (err)
4468 			goto err_nexthop_obj_init;
4469 	}
4470 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4471 	if (err) {
4472 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4473 		goto err_group_refresh;
4474 	}
4475 
4476 	return 0;
4477 
4478 err_group_refresh:
4479 	i = nhgi->count;
4480 err_nexthop_obj_init:
4481 	for (i--; i >= 0; i--) {
4482 		nh = &nhgi->nexthops[i];
4483 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4484 	}
4485 	kfree(nhgi);
4486 	return err;
4487 }
4488 
4489 static void
4490 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4491 				     struct mlxsw_sp_nexthop_group *nh_grp)
4492 {
4493 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4494 	int i;
4495 
4496 	for (i = nhgi->count - 1; i >= 0; i--) {
4497 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4498 
4499 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4500 	}
4501 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4502 	WARN_ON_ONCE(nhgi->adj_index_valid);
4503 	kfree(nhgi);
4504 }
4505 
4506 static struct mlxsw_sp_nexthop_group *
4507 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
4508 				  struct nh_notifier_info *info)
4509 {
4510 	struct mlxsw_sp_nexthop_group *nh_grp;
4511 	int err;
4512 
4513 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4514 	if (!nh_grp)
4515 		return ERR_PTR(-ENOMEM);
4516 	INIT_LIST_HEAD(&nh_grp->vr_list);
4517 	err = rhashtable_init(&nh_grp->vr_ht,
4518 			      &mlxsw_sp_nexthop_group_vr_ht_params);
4519 	if (err)
4520 		goto err_nexthop_group_vr_ht_init;
4521 	INIT_LIST_HEAD(&nh_grp->fib_list);
4522 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4523 	nh_grp->obj.id = info->id;
4524 
4525 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
4526 	if (err)
4527 		goto err_nexthop_group_info_init;
4528 
4529 	nh_grp->can_destroy = false;
4530 
4531 	return nh_grp;
4532 
4533 err_nexthop_group_info_init:
4534 	rhashtable_destroy(&nh_grp->vr_ht);
4535 err_nexthop_group_vr_ht_init:
4536 	kfree(nh_grp);
4537 	return ERR_PTR(err);
4538 }
4539 
4540 static void
4541 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
4542 				   struct mlxsw_sp_nexthop_group *nh_grp)
4543 {
4544 	if (!nh_grp->can_destroy)
4545 		return;
4546 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
4547 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
4548 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
4549 	rhashtable_destroy(&nh_grp->vr_ht);
4550 	kfree(nh_grp);
4551 }
4552 
4553 static struct mlxsw_sp_nexthop_group *
4554 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
4555 {
4556 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
4557 
4558 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4559 	cmp_arg.id = id;
4560 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
4561 				      &cmp_arg,
4562 				      mlxsw_sp_nexthop_group_ht_params);
4563 }
4564 
4565 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
4566 					  struct mlxsw_sp_nexthop_group *nh_grp)
4567 {
4568 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4569 }
4570 
4571 static int
4572 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
4573 				   struct mlxsw_sp_nexthop_group *nh_grp,
4574 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
4575 				   struct netlink_ext_ack *extack)
4576 {
4577 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
4578 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
4579 	int err;
4580 
4581 	old_nh_grp->nhgi = new_nhgi;
4582 	new_nhgi->nh_grp = old_nh_grp;
4583 	nh_grp->nhgi = old_nhgi;
4584 	old_nhgi->nh_grp = nh_grp;
4585 
4586 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
4587 		/* Both the old adjacency index and the new one are valid.
4588 		 * Routes are currently using the old one. Tell the device to
4589 		 * replace the old adjacency index with the new one.
4590 		 */
4591 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
4592 						     old_nhgi->adj_index,
4593 						     old_nhgi->ecmp_size);
4594 		if (err) {
4595 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
4596 			goto err_out;
4597 		}
4598 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
4599 		/* The old adjacency index is valid, while the new one is not.
4600 		 * Iterate over all the routes using the group and change them
4601 		 * to trap packets to the CPU.
4602 		 */
4603 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
4604 		if (err) {
4605 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
4606 			goto err_out;
4607 		}
4608 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
4609 		/* The old adjacency index is invalid, while the new one is.
4610 		 * Iterate over all the routes using the group and change them
4611 		 * to forward packets using the new valid index.
4612 		 */
4613 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
4614 		if (err) {
4615 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
4616 			goto err_out;
4617 		}
4618 	}
4619 
4620 	/* Make sure the flags are set / cleared based on the new nexthop group
4621 	 * information.
4622 	 */
4623 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
4624 
4625 	/* At this point 'nh_grp' is just a shell that is not used by anyone
4626 	 * and its nexthop group info is the old info that was just replaced
4627 	 * with the new one. Remove it.
4628 	 */
4629 	nh_grp->can_destroy = true;
4630 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
4631 
4632 	return 0;
4633 
4634 err_out:
4635 	old_nhgi->nh_grp = old_nh_grp;
4636 	nh_grp->nhgi = new_nhgi;
4637 	new_nhgi->nh_grp = nh_grp;
4638 	old_nh_grp->nhgi = old_nhgi;
4639 	return err;
4640 }
4641 
4642 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
4643 				    struct nh_notifier_info *info)
4644 {
4645 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
4646 	struct netlink_ext_ack *extack = info->extack;
4647 	int err;
4648 
4649 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
4650 	if (IS_ERR(nh_grp))
4651 		return PTR_ERR(nh_grp);
4652 
4653 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
4654 	if (!old_nh_grp)
4655 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
4656 	else
4657 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
4658 							 old_nh_grp, extack);
4659 
4660 	if (err) {
4661 		nh_grp->can_destroy = true;
4662 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
4663 	}
4664 
4665 	return err;
4666 }
4667 
4668 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
4669 				     struct nh_notifier_info *info)
4670 {
4671 	struct mlxsw_sp_nexthop_group *nh_grp;
4672 
4673 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
4674 	if (!nh_grp)
4675 		return;
4676 
4677 	nh_grp->can_destroy = true;
4678 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
4679 
4680 	/* If the group still has routes using it, then defer the delete
4681 	 * operation until the last route using it is deleted.
4682 	 */
4683 	if (!list_empty(&nh_grp->fib_list))
4684 		return;
4685 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
4686 }
4687 
4688 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
4689 				      unsigned long event, void *ptr)
4690 {
4691 	struct nh_notifier_info *info = ptr;
4692 	struct mlxsw_sp_router *router;
4693 	int err = 0;
4694 
4695 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
4696 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
4697 	if (err)
4698 		goto out;
4699 
4700 	mutex_lock(&router->lock);
4701 
4702 	ASSERT_RTNL();
4703 
4704 	switch (event) {
4705 	case NEXTHOP_EVENT_REPLACE:
4706 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
4707 		break;
4708 	case NEXTHOP_EVENT_DEL:
4709 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
4710 		break;
4711 	default:
4712 		break;
4713 	}
4714 
4715 	mutex_unlock(&router->lock);
4716 
4717 out:
4718 	return notifier_from_errno(err);
4719 }
4720 
4721 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4722 				   struct fib_info *fi)
4723 {
4724 	const struct fib_nh *nh = fib_info_nh(fi, 0);
4725 
4726 	return nh->fib_nh_scope == RT_SCOPE_LINK ||
4727 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
4728 }
4729 
4730 static int
4731 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
4732 				  struct mlxsw_sp_nexthop_group *nh_grp)
4733 {
4734 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
4735 	struct mlxsw_sp_nexthop_group_info *nhgi;
4736 	struct mlxsw_sp_nexthop *nh;
4737 	int err, i;
4738 
4739 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4740 	if (!nhgi)
4741 		return -ENOMEM;
4742 	nh_grp->nhgi = nhgi;
4743 	nhgi->nh_grp = nh_grp;
4744 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
4745 	nhgi->count = nhs;
4746 	for (i = 0; i < nhgi->count; i++) {
4747 		struct fib_nh *fib_nh;
4748 
4749 		nh = &nhgi->nexthops[i];
4750 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
4751 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
4752 		if (err)
4753 			goto err_nexthop4_init;
4754 	}
4755 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4756 	if (err)
4757 		goto err_group_refresh;
4758 
4759 	return 0;
4760 
4761 err_group_refresh:
4762 	i = nhgi->count;
4763 err_nexthop4_init:
4764 	for (i--; i >= 0; i--) {
4765 		nh = &nhgi->nexthops[i];
4766 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4767 	}
4768 	kfree(nhgi);
4769 	return err;
4770 }
4771 
4772 static void
4773 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4774 				  struct mlxsw_sp_nexthop_group *nh_grp)
4775 {
4776 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4777 	int i;
4778 
4779 	for (i = nhgi->count - 1; i >= 0; i--) {
4780 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4781 
4782 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4783 	}
4784 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4785 	WARN_ON_ONCE(nhgi->adj_index_valid);
4786 	kfree(nhgi);
4787 }
4788 
4789 static struct mlxsw_sp_nexthop_group *
4790 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
4791 {
4792 	struct mlxsw_sp_nexthop_group *nh_grp;
4793 	int err;
4794 
4795 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4796 	if (!nh_grp)
4797 		return ERR_PTR(-ENOMEM);
4798 	INIT_LIST_HEAD(&nh_grp->vr_list);
4799 	err = rhashtable_init(&nh_grp->vr_ht,
4800 			      &mlxsw_sp_nexthop_group_vr_ht_params);
4801 	if (err)
4802 		goto err_nexthop_group_vr_ht_init;
4803 	INIT_LIST_HEAD(&nh_grp->fib_list);
4804 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
4805 	nh_grp->ipv4.fi = fi;
4806 	fib_info_hold(fi);
4807 
4808 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
4809 	if (err)
4810 		goto err_nexthop_group_info_init;
4811 
4812 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4813 	if (err)
4814 		goto err_nexthop_group_insert;
4815 
4816 	nh_grp->can_destroy = true;
4817 
4818 	return nh_grp;
4819 
4820 err_nexthop_group_insert:
4821 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
4822 err_nexthop_group_info_init:
4823 	fib_info_put(fi);
4824 	rhashtable_destroy(&nh_grp->vr_ht);
4825 err_nexthop_group_vr_ht_init:
4826 	kfree(nh_grp);
4827 	return ERR_PTR(err);
4828 }
4829 
4830 static void
4831 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
4832 				struct mlxsw_sp_nexthop_group *nh_grp)
4833 {
4834 	if (!nh_grp->can_destroy)
4835 		return;
4836 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
4837 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
4838 	fib_info_put(nh_grp->ipv4.fi);
4839 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
4840 	rhashtable_destroy(&nh_grp->vr_ht);
4841 	kfree(nh_grp);
4842 }
4843 
4844 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
4845 				       struct mlxsw_sp_fib_entry *fib_entry,
4846 				       struct fib_info *fi)
4847 {
4848 	struct mlxsw_sp_nexthop_group *nh_grp;
4849 
4850 	if (fi->nh) {
4851 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
4852 							   fi->nh->id);
4853 		if (WARN_ON_ONCE(!nh_grp))
4854 			return -EINVAL;
4855 		goto out;
4856 	}
4857 
4858 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
4859 	if (!nh_grp) {
4860 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
4861 		if (IS_ERR(nh_grp))
4862 			return PTR_ERR(nh_grp);
4863 	}
4864 out:
4865 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
4866 	fib_entry->nh_group = nh_grp;
4867 	return 0;
4868 }
4869 
4870 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
4871 					struct mlxsw_sp_fib_entry *fib_entry)
4872 {
4873 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4874 
4875 	list_del(&fib_entry->nexthop_group_node);
4876 	if (!list_empty(&nh_grp->fib_list))
4877 		return;
4878 
4879 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
4880 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
4881 		return;
4882 	}
4883 
4884 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
4885 }
4886 
4887 static bool
4888 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4889 {
4890 	struct mlxsw_sp_fib4_entry *fib4_entry;
4891 
4892 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4893 				  common);
4894 	return !fib4_entry->tos;
4895 }
4896 
4897 static bool
4898 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4899 {
4900 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4901 
4902 	switch (fib_entry->fib_node->fib->proto) {
4903 	case MLXSW_SP_L3_PROTO_IPV4:
4904 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
4905 			return false;
4906 		break;
4907 	case MLXSW_SP_L3_PROTO_IPV6:
4908 		break;
4909 	}
4910 
4911 	switch (fib_entry->type) {
4912 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4913 		return !!nh_group->nhgi->adj_index_valid;
4914 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4915 		return !!nh_group->nhgi->nh_rif;
4916 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4917 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4918 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4919 		return true;
4920 	default:
4921 		return false;
4922 	}
4923 }
4924 
4925 static struct mlxsw_sp_nexthop *
4926 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4927 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4928 {
4929 	int i;
4930 
4931 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4932 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4933 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
4934 
4935 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4936 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4937 				    &rt->fib6_nh->fib_nh_gw6))
4938 			return nh;
4939 		continue;
4940 	}
4941 
4942 	return NULL;
4943 }
4944 
4945 static void
4946 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
4947 				      struct fib_entry_notifier_info *fen_info)
4948 {
4949 	u32 *p_dst = (u32 *) &fen_info->dst;
4950 	struct fib_rt_info fri;
4951 
4952 	fri.fi = fen_info->fi;
4953 	fri.tb_id = fen_info->tb_id;
4954 	fri.dst = cpu_to_be32(*p_dst);
4955 	fri.dst_len = fen_info->dst_len;
4956 	fri.tos = fen_info->tos;
4957 	fri.type = fen_info->type;
4958 	fri.offload = false;
4959 	fri.trap = false;
4960 	fri.offload_failed = true;
4961 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4962 }
4963 
4964 static void
4965 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4966 				 struct mlxsw_sp_fib_entry *fib_entry)
4967 {
4968 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4969 	int dst_len = fib_entry->fib_node->key.prefix_len;
4970 	struct mlxsw_sp_fib4_entry *fib4_entry;
4971 	struct fib_rt_info fri;
4972 	bool should_offload;
4973 
4974 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4975 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4976 				  common);
4977 	fri.fi = fib4_entry->fi;
4978 	fri.tb_id = fib4_entry->tb_id;
4979 	fri.dst = cpu_to_be32(*p_dst);
4980 	fri.dst_len = dst_len;
4981 	fri.tos = fib4_entry->tos;
4982 	fri.type = fib4_entry->type;
4983 	fri.offload = should_offload;
4984 	fri.trap = !should_offload;
4985 	fri.offload_failed = false;
4986 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4987 }
4988 
4989 static void
4990 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4991 				   struct mlxsw_sp_fib_entry *fib_entry)
4992 {
4993 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4994 	int dst_len = fib_entry->fib_node->key.prefix_len;
4995 	struct mlxsw_sp_fib4_entry *fib4_entry;
4996 	struct fib_rt_info fri;
4997 
4998 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4999 				  common);
5000 	fri.fi = fib4_entry->fi;
5001 	fri.tb_id = fib4_entry->tb_id;
5002 	fri.dst = cpu_to_be32(*p_dst);
5003 	fri.dst_len = dst_len;
5004 	fri.tos = fib4_entry->tos;
5005 	fri.type = fib4_entry->type;
5006 	fri.offload = false;
5007 	fri.trap = false;
5008 	fri.offload_failed = false;
5009 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5010 }
5011 
5012 #if IS_ENABLED(CONFIG_IPV6)
5013 static void
5014 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5015 				      struct fib6_info **rt_arr,
5016 				      unsigned int nrt6)
5017 {
5018 	int i;
5019 
5020 	/* In IPv6 a multipath route is represented using multiple routes, so
5021 	 * we need to set the flags on all of them.
5022 	 */
5023 	for (i = 0; i < nrt6; i++)
5024 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5025 				       false, false, true);
5026 }
5027 #else
5028 static void
5029 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5030 				      struct fib6_info **rt_arr,
5031 				      unsigned int nrt6)
5032 {
5033 }
5034 #endif
5035 
5036 #if IS_ENABLED(CONFIG_IPV6)
5037 static void
5038 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5039 				 struct mlxsw_sp_fib_entry *fib_entry)
5040 {
5041 	struct mlxsw_sp_fib6_entry *fib6_entry;
5042 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5043 	bool should_offload;
5044 
5045 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5046 
5047 	/* In IPv6 a multipath route is represented using multiple routes, so
5048 	 * we need to set the flags on all of them.
5049 	 */
5050 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5051 				  common);
5052 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5053 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5054 				       should_offload, !should_offload, false);
5055 }
5056 #else
5057 static void
5058 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5059 				 struct mlxsw_sp_fib_entry *fib_entry)
5060 {
5061 }
5062 #endif
5063 
5064 #if IS_ENABLED(CONFIG_IPV6)
5065 static void
5066 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5067 				   struct mlxsw_sp_fib_entry *fib_entry)
5068 {
5069 	struct mlxsw_sp_fib6_entry *fib6_entry;
5070 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5071 
5072 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5073 				  common);
5074 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5075 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5076 				       false, false, false);
5077 }
5078 #else
5079 static void
5080 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5081 				   struct mlxsw_sp_fib_entry *fib_entry)
5082 {
5083 }
5084 #endif
5085 
5086 static void
5087 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5088 				struct mlxsw_sp_fib_entry *fib_entry)
5089 {
5090 	switch (fib_entry->fib_node->fib->proto) {
5091 	case MLXSW_SP_L3_PROTO_IPV4:
5092 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5093 		break;
5094 	case MLXSW_SP_L3_PROTO_IPV6:
5095 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5096 		break;
5097 	}
5098 }
5099 
5100 static void
5101 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5102 				  struct mlxsw_sp_fib_entry *fib_entry)
5103 {
5104 	switch (fib_entry->fib_node->fib->proto) {
5105 	case MLXSW_SP_L3_PROTO_IPV4:
5106 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5107 		break;
5108 	case MLXSW_SP_L3_PROTO_IPV6:
5109 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5110 		break;
5111 	}
5112 }
5113 
5114 static void
5115 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5116 				    struct mlxsw_sp_fib_entry *fib_entry,
5117 				    enum mlxsw_sp_fib_entry_op op)
5118 {
5119 	switch (op) {
5120 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5121 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5122 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5123 		break;
5124 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5125 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5126 		break;
5127 	default:
5128 		break;
5129 	}
5130 }
5131 
5132 struct mlxsw_sp_fib_entry_op_ctx_basic {
5133 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5134 };
5135 
5136 static void
5137 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5138 					enum mlxsw_sp_l3proto proto,
5139 					enum mlxsw_sp_fib_entry_op op,
5140 					u16 virtual_router, u8 prefix_len,
5141 					unsigned char *addr,
5142 					struct mlxsw_sp_fib_entry_priv *priv)
5143 {
5144 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5145 	enum mlxsw_reg_ralxx_protocol ralxx_proto;
5146 	char *ralue_pl = op_ctx_basic->ralue_pl;
5147 	enum mlxsw_reg_ralue_op ralue_op;
5148 
5149 	ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
5150 
5151 	switch (op) {
5152 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5153 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5154 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
5155 		break;
5156 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5157 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
5158 		break;
5159 	default:
5160 		WARN_ON_ONCE(1);
5161 		return;
5162 	}
5163 
5164 	switch (proto) {
5165 	case MLXSW_SP_L3_PROTO_IPV4:
5166 		mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
5167 				      virtual_router, prefix_len, (u32 *) addr);
5168 		break;
5169 	case MLXSW_SP_L3_PROTO_IPV6:
5170 		mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
5171 				      virtual_router, prefix_len, addr);
5172 		break;
5173 	}
5174 }
5175 
5176 static void
5177 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5178 						   enum mlxsw_reg_ralue_trap_action trap_action,
5179 						   u16 trap_id, u32 adjacency_index, u16 ecmp_size)
5180 {
5181 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5182 
5183 	mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
5184 					trap_id, adjacency_index, ecmp_size);
5185 }
5186 
5187 static void
5188 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5189 						  enum mlxsw_reg_ralue_trap_action trap_action,
5190 						  u16 trap_id, u16 local_erif)
5191 {
5192 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5193 
5194 	mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
5195 				       trap_id, local_erif);
5196 }
5197 
5198 static void
5199 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
5200 {
5201 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5202 
5203 	mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
5204 }
5205 
5206 static void
5207 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5208 						      u32 tunnel_ptr)
5209 {
5210 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5211 
5212 	mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
5213 }
5214 
5215 static int
5216 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5217 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5218 					  bool *postponed_for_bulk)
5219 {
5220 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5221 
5222 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5223 			       op_ctx_basic->ralue_pl);
5224 }
5225 
5226 static bool
5227 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
5228 {
5229 	return true;
5230 }
5231 
5232 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5233 				    struct mlxsw_sp_fib_entry *fib_entry,
5234 				    enum mlxsw_sp_fib_entry_op op)
5235 {
5236 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5237 
5238 	mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
5239 	fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
5240 				    fib_entry->fib_node->key.prefix_len,
5241 				    fib_entry->fib_node->key.addr,
5242 				    fib_entry->priv);
5243 }
5244 
5245 static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5246 				     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5247 				     const struct mlxsw_sp_router_ll_ops *ll_ops)
5248 {
5249 	bool postponed_for_bulk = false;
5250 	int err;
5251 
5252 	err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
5253 	if (!postponed_for_bulk)
5254 		mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
5255 	return err;
5256 }
5257 
5258 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp)
5259 {
5260 	enum mlxsw_reg_ratr_trap_action trap_action;
5261 	char ratr_pl[MLXSW_REG_RATR_LEN];
5262 	int err;
5263 
5264 	if (mlxsw_sp->router->adj_discard_index_valid)
5265 		return 0;
5266 
5267 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5268 				  &mlxsw_sp->router->adj_discard_index);
5269 	if (err)
5270 		return err;
5271 
5272 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
5273 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
5274 			    MLXSW_REG_RATR_TYPE_ETHERNET,
5275 			    mlxsw_sp->router->adj_discard_index,
5276 			    mlxsw_sp->router->lb_rif_index);
5277 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
5278 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
5279 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5280 	if (err)
5281 		goto err_ratr_write;
5282 
5283 	mlxsw_sp->router->adj_discard_index_valid = true;
5284 
5285 	return 0;
5286 
5287 err_ratr_write:
5288 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5289 			   mlxsw_sp->router->adj_discard_index);
5290 	return err;
5291 }
5292 
5293 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5294 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5295 					struct mlxsw_sp_fib_entry *fib_entry,
5296 					enum mlxsw_sp_fib_entry_op op)
5297 {
5298 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5299 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5300 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5301 	enum mlxsw_reg_ralue_trap_action trap_action;
5302 	u16 trap_id = 0;
5303 	u32 adjacency_index = 0;
5304 	u16 ecmp_size = 0;
5305 	int err;
5306 
5307 	/* In case the nexthop group adjacency index is valid, use it
5308 	 * with provided ECMP size. Otherwise, setup trap and pass
5309 	 * traffic to kernel.
5310 	 */
5311 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5312 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5313 		adjacency_index = nhgi->adj_index;
5314 		ecmp_size = nhgi->ecmp_size;
5315 	} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5316 		err = mlxsw_sp_adj_discard_write(mlxsw_sp);
5317 		if (err)
5318 			return err;
5319 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5320 		adjacency_index = mlxsw_sp->router->adj_discard_index;
5321 		ecmp_size = 1;
5322 	} else {
5323 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5324 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5325 	}
5326 
5327 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5328 	ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
5329 					  adjacency_index, ecmp_size);
5330 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5331 }
5332 
5333 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5334 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5335 				       struct mlxsw_sp_fib_entry *fib_entry,
5336 				       enum mlxsw_sp_fib_entry_op op)
5337 {
5338 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5339 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5340 	enum mlxsw_reg_ralue_trap_action trap_action;
5341 	u16 trap_id = 0;
5342 	u16 rif_index = 0;
5343 
5344 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5345 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5346 		rif_index = rif->rif_index;
5347 	} else {
5348 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5349 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5350 	}
5351 
5352 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5353 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
5354 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5355 }
5356 
5357 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5358 				      struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5359 				      struct mlxsw_sp_fib_entry *fib_entry,
5360 				      enum mlxsw_sp_fib_entry_op op)
5361 {
5362 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5363 
5364 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5365 	ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5366 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5367 }
5368 
5369 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5370 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5371 					   struct mlxsw_sp_fib_entry *fib_entry,
5372 					   enum mlxsw_sp_fib_entry_op op)
5373 {
5374 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5375 	enum mlxsw_reg_ralue_trap_action trap_action;
5376 
5377 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5378 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5379 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
5380 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5381 }
5382 
5383 static int
5384 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5385 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5386 				  struct mlxsw_sp_fib_entry *fib_entry,
5387 				  enum mlxsw_sp_fib_entry_op op)
5388 {
5389 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5390 	enum mlxsw_reg_ralue_trap_action trap_action;
5391 	u16 trap_id;
5392 
5393 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5394 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5395 
5396 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5397 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
5398 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5399 }
5400 
5401 static int
5402 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5403 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5404 				 struct mlxsw_sp_fib_entry *fib_entry,
5405 				 enum mlxsw_sp_fib_entry_op op)
5406 {
5407 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5408 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5409 	const struct mlxsw_sp_ipip_ops *ipip_ops;
5410 	int err;
5411 
5412 	if (WARN_ON(!ipip_entry))
5413 		return -EINVAL;
5414 
5415 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5416 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5417 				     fib_entry->decap.tunnel_index);
5418 	if (err)
5419 		return err;
5420 
5421 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5422 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5423 					     fib_entry->decap.tunnel_index);
5424 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5425 }
5426 
5427 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5428 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5429 					   struct mlxsw_sp_fib_entry *fib_entry,
5430 					   enum mlxsw_sp_fib_entry_op op)
5431 {
5432 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5433 
5434 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5435 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5436 					     fib_entry->decap.tunnel_index);
5437 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5438 }
5439 
5440 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5441 				   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5442 				   struct mlxsw_sp_fib_entry *fib_entry,
5443 				   enum mlxsw_sp_fib_entry_op op)
5444 {
5445 	switch (fib_entry->type) {
5446 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5447 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
5448 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5449 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
5450 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
5451 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
5452 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5453 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
5454 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
5455 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
5456 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5457 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
5458 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5459 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
5460 	}
5461 	return -EINVAL;
5462 }
5463 
5464 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5465 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5466 				 struct mlxsw_sp_fib_entry *fib_entry,
5467 				 enum mlxsw_sp_fib_entry_op op)
5468 {
5469 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
5470 
5471 	if (err)
5472 		return err;
5473 
5474 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
5475 
5476 	return err;
5477 }
5478 
5479 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
5480 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5481 				       struct mlxsw_sp_fib_entry *fib_entry,
5482 				       bool is_new)
5483 {
5484 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
5485 				     is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
5486 					      MLXSW_SP_FIB_ENTRY_OP_UPDATE);
5487 }
5488 
5489 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
5490 				     struct mlxsw_sp_fib_entry *fib_entry)
5491 {
5492 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5493 
5494 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
5495 	return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
5496 }
5497 
5498 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
5499 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5500 				  struct mlxsw_sp_fib_entry *fib_entry)
5501 {
5502 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5503 
5504 	if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
5505 		return 0;
5506 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
5507 				     MLXSW_SP_FIB_ENTRY_OP_DELETE);
5508 }
5509 
5510 static int
5511 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5512 			     const struct fib_entry_notifier_info *fen_info,
5513 			     struct mlxsw_sp_fib_entry *fib_entry)
5514 {
5515 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
5516 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
5517 	struct mlxsw_sp_router *router = mlxsw_sp->router;
5518 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
5519 	int ifindex = nhgi->nexthops[0].ifindex;
5520 	struct mlxsw_sp_ipip_entry *ipip_entry;
5521 
5522 	switch (fen_info->type) {
5523 	case RTN_LOCAL:
5524 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
5525 							       MLXSW_SP_L3_PROTO_IPV4, dip);
5526 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
5527 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
5528 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
5529 							     fib_entry,
5530 							     ipip_entry);
5531 		}
5532 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
5533 						 MLXSW_SP_L3_PROTO_IPV4,
5534 						 &dip)) {
5535 			u32 tunnel_index;
5536 
5537 			tunnel_index = router->nve_decap_config.tunnel_index;
5538 			fib_entry->decap.tunnel_index = tunnel_index;
5539 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
5540 			return 0;
5541 		}
5542 		fallthrough;
5543 	case RTN_BROADCAST:
5544 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5545 		return 0;
5546 	case RTN_BLACKHOLE:
5547 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5548 		return 0;
5549 	case RTN_UNREACHABLE:
5550 	case RTN_PROHIBIT:
5551 		/* Packets hitting these routes need to be trapped, but
5552 		 * can do so with a lower priority than packets directed
5553 		 * at the host, so use action type local instead of trap.
5554 		 */
5555 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5556 		return 0;
5557 	case RTN_UNICAST:
5558 		if (nhgi->gateway)
5559 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5560 		else
5561 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5562 		return 0;
5563 	default:
5564 		return -EINVAL;
5565 	}
5566 }
5567 
5568 static void
5569 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
5570 			       struct mlxsw_sp_fib_entry *fib_entry)
5571 {
5572 	switch (fib_entry->type) {
5573 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5574 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
5575 		break;
5576 	default:
5577 		break;
5578 	}
5579 }
5580 
5581 static struct mlxsw_sp_fib4_entry *
5582 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
5583 			   struct mlxsw_sp_fib_node *fib_node,
5584 			   const struct fib_entry_notifier_info *fen_info)
5585 {
5586 	struct mlxsw_sp_fib4_entry *fib4_entry;
5587 	struct mlxsw_sp_fib_entry *fib_entry;
5588 	int err;
5589 
5590 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
5591 	if (!fib4_entry)
5592 		return ERR_PTR(-ENOMEM);
5593 	fib_entry = &fib4_entry->common;
5594 
5595 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
5596 	if (IS_ERR(fib_entry->priv)) {
5597 		err = PTR_ERR(fib_entry->priv);
5598 		goto err_fib_entry_priv_create;
5599 	}
5600 
5601 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
5602 	if (err)
5603 		goto err_nexthop4_group_get;
5604 
5605 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
5606 					     fib_node->fib);
5607 	if (err)
5608 		goto err_nexthop_group_vr_link;
5609 
5610 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
5611 	if (err)
5612 		goto err_fib4_entry_type_set;
5613 
5614 	fib4_entry->fi = fen_info->fi;
5615 	fib_info_hold(fib4_entry->fi);
5616 	fib4_entry->tb_id = fen_info->tb_id;
5617 	fib4_entry->type = fen_info->type;
5618 	fib4_entry->tos = fen_info->tos;
5619 
5620 	fib_entry->fib_node = fib_node;
5621 
5622 	return fib4_entry;
5623 
5624 err_fib4_entry_type_set:
5625 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
5626 err_nexthop_group_vr_link:
5627 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
5628 err_nexthop4_group_get:
5629 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
5630 err_fib_entry_priv_create:
5631 	kfree(fib4_entry);
5632 	return ERR_PTR(err);
5633 }
5634 
5635 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5636 					struct mlxsw_sp_fib4_entry *fib4_entry)
5637 {
5638 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
5639 
5640 	fib_info_put(fib4_entry->fi);
5641 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
5642 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
5643 					 fib_node->fib);
5644 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
5645 	mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
5646 	kfree(fib4_entry);
5647 }
5648 
5649 static struct mlxsw_sp_fib4_entry *
5650 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5651 			   const struct fib_entry_notifier_info *fen_info)
5652 {
5653 	struct mlxsw_sp_fib4_entry *fib4_entry;
5654 	struct mlxsw_sp_fib_node *fib_node;
5655 	struct mlxsw_sp_fib *fib;
5656 	struct mlxsw_sp_vr *vr;
5657 
5658 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
5659 	if (!vr)
5660 		return NULL;
5661 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
5662 
5663 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
5664 					    sizeof(fen_info->dst),
5665 					    fen_info->dst_len);
5666 	if (!fib_node)
5667 		return NULL;
5668 
5669 	fib4_entry = container_of(fib_node->fib_entry,
5670 				  struct mlxsw_sp_fib4_entry, common);
5671 	if (fib4_entry->tb_id == fen_info->tb_id &&
5672 	    fib4_entry->tos == fen_info->tos &&
5673 	    fib4_entry->type == fen_info->type &&
5674 	    fib4_entry->fi == fen_info->fi)
5675 		return fib4_entry;
5676 
5677 	return NULL;
5678 }
5679 
5680 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
5681 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
5682 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
5683 	.key_len = sizeof(struct mlxsw_sp_fib_key),
5684 	.automatic_shrinking = true,
5685 };
5686 
5687 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
5688 				    struct mlxsw_sp_fib_node *fib_node)
5689 {
5690 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
5691 				      mlxsw_sp_fib_ht_params);
5692 }
5693 
5694 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
5695 				     struct mlxsw_sp_fib_node *fib_node)
5696 {
5697 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
5698 			       mlxsw_sp_fib_ht_params);
5699 }
5700 
5701 static struct mlxsw_sp_fib_node *
5702 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
5703 			 size_t addr_len, unsigned char prefix_len)
5704 {
5705 	struct mlxsw_sp_fib_key key;
5706 
5707 	memset(&key, 0, sizeof(key));
5708 	memcpy(key.addr, addr, addr_len);
5709 	key.prefix_len = prefix_len;
5710 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
5711 }
5712 
5713 static struct mlxsw_sp_fib_node *
5714 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
5715 			 size_t addr_len, unsigned char prefix_len)
5716 {
5717 	struct mlxsw_sp_fib_node *fib_node;
5718 
5719 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
5720 	if (!fib_node)
5721 		return NULL;
5722 
5723 	list_add(&fib_node->list, &fib->node_list);
5724 	memcpy(fib_node->key.addr, addr, addr_len);
5725 	fib_node->key.prefix_len = prefix_len;
5726 
5727 	return fib_node;
5728 }
5729 
5730 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
5731 {
5732 	list_del(&fib_node->list);
5733 	kfree(fib_node);
5734 }
5735 
5736 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
5737 				      struct mlxsw_sp_fib_node *fib_node)
5738 {
5739 	struct mlxsw_sp_prefix_usage req_prefix_usage;
5740 	struct mlxsw_sp_fib *fib = fib_node->fib;
5741 	struct mlxsw_sp_lpm_tree *lpm_tree;
5742 	int err;
5743 
5744 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
5745 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
5746 		goto out;
5747 
5748 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
5749 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
5750 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
5751 					 fib->proto);
5752 	if (IS_ERR(lpm_tree))
5753 		return PTR_ERR(lpm_tree);
5754 
5755 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
5756 	if (err)
5757 		goto err_lpm_tree_replace;
5758 
5759 out:
5760 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
5761 	return 0;
5762 
5763 err_lpm_tree_replace:
5764 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
5765 	return err;
5766 }
5767 
5768 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
5769 					 struct mlxsw_sp_fib_node *fib_node)
5770 {
5771 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
5772 	struct mlxsw_sp_prefix_usage req_prefix_usage;
5773 	struct mlxsw_sp_fib *fib = fib_node->fib;
5774 	int err;
5775 
5776 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
5777 		return;
5778 	/* Try to construct a new LPM tree from the current prefix usage
5779 	 * minus the unused one. If we fail, continue using the old one.
5780 	 */
5781 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
5782 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
5783 				    fib_node->key.prefix_len);
5784 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
5785 					 fib->proto);
5786 	if (IS_ERR(lpm_tree))
5787 		return;
5788 
5789 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
5790 	if (err)
5791 		goto err_lpm_tree_replace;
5792 
5793 	return;
5794 
5795 err_lpm_tree_replace:
5796 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
5797 }
5798 
5799 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
5800 				  struct mlxsw_sp_fib_node *fib_node,
5801 				  struct mlxsw_sp_fib *fib)
5802 {
5803 	int err;
5804 
5805 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
5806 	if (err)
5807 		return err;
5808 	fib_node->fib = fib;
5809 
5810 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
5811 	if (err)
5812 		goto err_fib_lpm_tree_link;
5813 
5814 	return 0;
5815 
5816 err_fib_lpm_tree_link:
5817 	fib_node->fib = NULL;
5818 	mlxsw_sp_fib_node_remove(fib, fib_node);
5819 	return err;
5820 }
5821 
5822 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
5823 				   struct mlxsw_sp_fib_node *fib_node)
5824 {
5825 	struct mlxsw_sp_fib *fib = fib_node->fib;
5826 
5827 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
5828 	fib_node->fib = NULL;
5829 	mlxsw_sp_fib_node_remove(fib, fib_node);
5830 }
5831 
5832 static struct mlxsw_sp_fib_node *
5833 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
5834 		      size_t addr_len, unsigned char prefix_len,
5835 		      enum mlxsw_sp_l3proto proto)
5836 {
5837 	struct mlxsw_sp_fib_node *fib_node;
5838 	struct mlxsw_sp_fib *fib;
5839 	struct mlxsw_sp_vr *vr;
5840 	int err;
5841 
5842 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
5843 	if (IS_ERR(vr))
5844 		return ERR_CAST(vr);
5845 	fib = mlxsw_sp_vr_fib(vr, proto);
5846 
5847 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
5848 	if (fib_node)
5849 		return fib_node;
5850 
5851 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
5852 	if (!fib_node) {
5853 		err = -ENOMEM;
5854 		goto err_fib_node_create;
5855 	}
5856 
5857 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
5858 	if (err)
5859 		goto err_fib_node_init;
5860 
5861 	return fib_node;
5862 
5863 err_fib_node_init:
5864 	mlxsw_sp_fib_node_destroy(fib_node);
5865 err_fib_node_create:
5866 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5867 	return ERR_PTR(err);
5868 }
5869 
5870 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
5871 				  struct mlxsw_sp_fib_node *fib_node)
5872 {
5873 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
5874 
5875 	if (fib_node->fib_entry)
5876 		return;
5877 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
5878 	mlxsw_sp_fib_node_destroy(fib_node);
5879 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5880 }
5881 
5882 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
5883 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5884 					struct mlxsw_sp_fib_entry *fib_entry)
5885 {
5886 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
5887 	bool is_new = !fib_node->fib_entry;
5888 	int err;
5889 
5890 	fib_node->fib_entry = fib_entry;
5891 
5892 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
5893 	if (err)
5894 		goto err_fib_entry_update;
5895 
5896 	return 0;
5897 
5898 err_fib_entry_update:
5899 	fib_node->fib_entry = NULL;
5900 	return err;
5901 }
5902 
5903 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5904 					    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5905 					    struct mlxsw_sp_fib_entry *fib_entry)
5906 {
5907 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
5908 	int err;
5909 
5910 	err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
5911 	fib_node->fib_entry = NULL;
5912 	return err;
5913 }
5914 
5915 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5916 					   struct mlxsw_sp_fib_entry *fib_entry)
5917 {
5918 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5919 
5920 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
5921 	__mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
5922 }
5923 
5924 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
5925 {
5926 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
5927 	struct mlxsw_sp_fib4_entry *fib4_replaced;
5928 
5929 	if (!fib_node->fib_entry)
5930 		return true;
5931 
5932 	fib4_replaced = container_of(fib_node->fib_entry,
5933 				     struct mlxsw_sp_fib4_entry, common);
5934 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
5935 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
5936 		return false;
5937 
5938 	return true;
5939 }
5940 
5941 static int
5942 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
5943 			     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5944 			     const struct fib_entry_notifier_info *fen_info)
5945 {
5946 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
5947 	struct mlxsw_sp_fib_entry *replaced;
5948 	struct mlxsw_sp_fib_node *fib_node;
5949 	int err;
5950 
5951 	if (mlxsw_sp->router->aborted)
5952 		return 0;
5953 
5954 	if (fen_info->fi->nh &&
5955 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
5956 		return 0;
5957 
5958 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
5959 					 &fen_info->dst, sizeof(fen_info->dst),
5960 					 fen_info->dst_len,
5961 					 MLXSW_SP_L3_PROTO_IPV4);
5962 	if (IS_ERR(fib_node)) {
5963 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
5964 		return PTR_ERR(fib_node);
5965 	}
5966 
5967 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
5968 	if (IS_ERR(fib4_entry)) {
5969 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
5970 		err = PTR_ERR(fib4_entry);
5971 		goto err_fib4_entry_create;
5972 	}
5973 
5974 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
5975 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5976 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5977 		return 0;
5978 	}
5979 
5980 	replaced = fib_node->fib_entry;
5981 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
5982 	if (err) {
5983 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
5984 		goto err_fib_node_entry_link;
5985 	}
5986 
5987 	/* Nothing to replace */
5988 	if (!replaced)
5989 		return 0;
5990 
5991 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5992 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
5993 				     common);
5994 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
5995 
5996 	return 0;
5997 
5998 err_fib_node_entry_link:
5999 	fib_node->fib_entry = replaced;
6000 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6001 err_fib4_entry_create:
6002 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6003 	return err;
6004 }
6005 
6006 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6007 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6008 				    struct fib_entry_notifier_info *fen_info)
6009 {
6010 	struct mlxsw_sp_fib4_entry *fib4_entry;
6011 	struct mlxsw_sp_fib_node *fib_node;
6012 	int err;
6013 
6014 	if (mlxsw_sp->router->aborted)
6015 		return 0;
6016 
6017 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6018 	if (!fib4_entry)
6019 		return 0;
6020 	fib_node = fib4_entry->common.fib_node;
6021 
6022 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
6023 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6024 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6025 	return err;
6026 }
6027 
6028 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6029 {
6030 	/* Multicast routes aren't supported, so ignore them. Neighbour
6031 	 * Discovery packets are specifically trapped.
6032 	 */
6033 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6034 		return true;
6035 
6036 	/* Cloned routes are irrelevant in the forwarding path. */
6037 	if (rt->fib6_flags & RTF_CACHE)
6038 		return true;
6039 
6040 	return false;
6041 }
6042 
6043 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6044 {
6045 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6046 
6047 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6048 	if (!mlxsw_sp_rt6)
6049 		return ERR_PTR(-ENOMEM);
6050 
6051 	/* In case of route replace, replaced route is deleted with
6052 	 * no notification. Take reference to prevent accessing freed
6053 	 * memory.
6054 	 */
6055 	mlxsw_sp_rt6->rt = rt;
6056 	fib6_info_hold(rt);
6057 
6058 	return mlxsw_sp_rt6;
6059 }
6060 
6061 #if IS_ENABLED(CONFIG_IPV6)
6062 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6063 {
6064 	fib6_info_release(rt);
6065 }
6066 #else
6067 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6068 {
6069 }
6070 #endif
6071 
6072 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6073 {
6074 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6075 
6076 	if (!mlxsw_sp_rt6->rt->nh)
6077 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6078 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6079 	kfree(mlxsw_sp_rt6);
6080 }
6081 
6082 static struct fib6_info *
6083 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6084 {
6085 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6086 				list)->rt;
6087 }
6088 
6089 static struct mlxsw_sp_rt6 *
6090 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6091 			    const struct fib6_info *rt)
6092 {
6093 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6094 
6095 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6096 		if (mlxsw_sp_rt6->rt == rt)
6097 			return mlxsw_sp_rt6;
6098 	}
6099 
6100 	return NULL;
6101 }
6102 
6103 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6104 					const struct fib6_info *rt,
6105 					enum mlxsw_sp_ipip_type *ret)
6106 {
6107 	return rt->fib6_nh->fib_nh_dev &&
6108 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6109 }
6110 
6111 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6112 				  struct mlxsw_sp_nexthop_group *nh_grp,
6113 				  struct mlxsw_sp_nexthop *nh,
6114 				  const struct fib6_info *rt)
6115 {
6116 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6117 
6118 	nh->nhgi = nh_grp->nhgi;
6119 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6120 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6121 #if IS_ENABLED(CONFIG_IPV6)
6122 	nh->neigh_tbl = &nd_tbl;
6123 #endif
6124 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6125 
6126 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6127 
6128 	if (!dev)
6129 		return 0;
6130 	nh->ifindex = dev->ifindex;
6131 
6132 	return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6133 }
6134 
6135 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6136 				   struct mlxsw_sp_nexthop *nh)
6137 {
6138 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6139 	list_del(&nh->router_list_node);
6140 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6141 }
6142 
6143 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6144 				    const struct fib6_info *rt)
6145 {
6146 	return rt->fib6_nh->fib_nh_gw_family ||
6147 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6148 }
6149 
6150 static int
6151 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6152 				  struct mlxsw_sp_nexthop_group *nh_grp,
6153 				  struct mlxsw_sp_fib6_entry *fib6_entry)
6154 {
6155 	struct mlxsw_sp_nexthop_group_info *nhgi;
6156 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6157 	struct mlxsw_sp_nexthop *nh;
6158 	int err, i;
6159 
6160 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6161 		       GFP_KERNEL);
6162 	if (!nhgi)
6163 		return -ENOMEM;
6164 	nh_grp->nhgi = nhgi;
6165 	nhgi->nh_grp = nh_grp;
6166 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6167 					struct mlxsw_sp_rt6, list);
6168 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6169 	nhgi->count = fib6_entry->nrt6;
6170 	for (i = 0; i < nhgi->count; i++) {
6171 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
6172 
6173 		nh = &nhgi->nexthops[i];
6174 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6175 		if (err)
6176 			goto err_nexthop6_init;
6177 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6178 	}
6179 	nh_grp->nhgi = nhgi;
6180 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6181 	if (err)
6182 		goto err_group_refresh;
6183 
6184 	return 0;
6185 
6186 err_group_refresh:
6187 	i = nhgi->count;
6188 err_nexthop6_init:
6189 	for (i--; i >= 0; i--) {
6190 		nh = &nhgi->nexthops[i];
6191 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6192 	}
6193 	kfree(nhgi);
6194 	return err;
6195 }
6196 
6197 static void
6198 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6199 				  struct mlxsw_sp_nexthop_group *nh_grp)
6200 {
6201 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6202 	int i;
6203 
6204 	for (i = nhgi->count - 1; i >= 0; i--) {
6205 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6206 
6207 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6208 	}
6209 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6210 	WARN_ON_ONCE(nhgi->adj_index_valid);
6211 	kfree(nhgi);
6212 }
6213 
6214 static struct mlxsw_sp_nexthop_group *
6215 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6216 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6217 {
6218 	struct mlxsw_sp_nexthop_group *nh_grp;
6219 	int err;
6220 
6221 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6222 	if (!nh_grp)
6223 		return ERR_PTR(-ENOMEM);
6224 	INIT_LIST_HEAD(&nh_grp->vr_list);
6225 	err = rhashtable_init(&nh_grp->vr_ht,
6226 			      &mlxsw_sp_nexthop_group_vr_ht_params);
6227 	if (err)
6228 		goto err_nexthop_group_vr_ht_init;
6229 	INIT_LIST_HEAD(&nh_grp->fib_list);
6230 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6231 
6232 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6233 	if (err)
6234 		goto err_nexthop_group_info_init;
6235 
6236 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6237 	if (err)
6238 		goto err_nexthop_group_insert;
6239 
6240 	nh_grp->can_destroy = true;
6241 
6242 	return nh_grp;
6243 
6244 err_nexthop_group_insert:
6245 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6246 err_nexthop_group_info_init:
6247 	rhashtable_destroy(&nh_grp->vr_ht);
6248 err_nexthop_group_vr_ht_init:
6249 	kfree(nh_grp);
6250 	return ERR_PTR(err);
6251 }
6252 
6253 static void
6254 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6255 				struct mlxsw_sp_nexthop_group *nh_grp)
6256 {
6257 	if (!nh_grp->can_destroy)
6258 		return;
6259 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6260 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6261 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6262 	rhashtable_destroy(&nh_grp->vr_ht);
6263 	kfree(nh_grp);
6264 }
6265 
6266 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6267 				       struct mlxsw_sp_fib6_entry *fib6_entry)
6268 {
6269 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6270 	struct mlxsw_sp_nexthop_group *nh_grp;
6271 
6272 	if (rt->nh) {
6273 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6274 							   rt->nh->id);
6275 		if (WARN_ON_ONCE(!nh_grp))
6276 			return -EINVAL;
6277 		goto out;
6278 	}
6279 
6280 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6281 	if (!nh_grp) {
6282 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6283 		if (IS_ERR(nh_grp))
6284 			return PTR_ERR(nh_grp);
6285 	}
6286 
6287 	/* The route and the nexthop are described by the same struct, so we
6288 	 * need to the update the nexthop offload indication for the new route.
6289 	 */
6290 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6291 
6292 out:
6293 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6294 		      &nh_grp->fib_list);
6295 	fib6_entry->common.nh_group = nh_grp;
6296 
6297 	return 0;
6298 }
6299 
6300 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6301 					struct mlxsw_sp_fib_entry *fib_entry)
6302 {
6303 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6304 
6305 	list_del(&fib_entry->nexthop_group_node);
6306 	if (!list_empty(&nh_grp->fib_list))
6307 		return;
6308 
6309 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6310 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6311 		return;
6312 	}
6313 
6314 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6315 }
6316 
6317 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6318 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6319 					  struct mlxsw_sp_fib6_entry *fib6_entry)
6320 {
6321 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6322 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6323 	int err;
6324 
6325 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6326 	fib6_entry->common.nh_group = NULL;
6327 	list_del(&fib6_entry->common.nexthop_group_node);
6328 
6329 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6330 	if (err)
6331 		goto err_nexthop6_group_get;
6332 
6333 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6334 					     fib_node->fib);
6335 	if (err)
6336 		goto err_nexthop_group_vr_link;
6337 
6338 	/* In case this entry is offloaded, then the adjacency index
6339 	 * currently associated with it in the device's table is that
6340 	 * of the old group. Start using the new one instead.
6341 	 */
6342 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
6343 					  &fib6_entry->common, false);
6344 	if (err)
6345 		goto err_fib_entry_update;
6346 
6347 	if (list_empty(&old_nh_grp->fib_list))
6348 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6349 
6350 	return 0;
6351 
6352 err_fib_entry_update:
6353 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6354 					 fib_node->fib);
6355 err_nexthop_group_vr_link:
6356 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6357 err_nexthop6_group_get:
6358 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6359 		      &old_nh_grp->fib_list);
6360 	fib6_entry->common.nh_group = old_nh_grp;
6361 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6362 	return err;
6363 }
6364 
6365 static int
6366 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6367 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6368 				struct mlxsw_sp_fib6_entry *fib6_entry,
6369 				struct fib6_info **rt_arr, unsigned int nrt6)
6370 {
6371 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6372 	int err, i;
6373 
6374 	for (i = 0; i < nrt6; i++) {
6375 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6376 		if (IS_ERR(mlxsw_sp_rt6)) {
6377 			err = PTR_ERR(mlxsw_sp_rt6);
6378 			goto err_rt6_create;
6379 		}
6380 
6381 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6382 		fib6_entry->nrt6++;
6383 	}
6384 
6385 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6386 	if (err)
6387 		goto err_nexthop6_group_update;
6388 
6389 	return 0;
6390 
6391 err_nexthop6_group_update:
6392 	i = nrt6;
6393 err_rt6_create:
6394 	for (i--; i >= 0; i--) {
6395 		fib6_entry->nrt6--;
6396 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6397 					       struct mlxsw_sp_rt6, list);
6398 		list_del(&mlxsw_sp_rt6->list);
6399 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6400 	}
6401 	return err;
6402 }
6403 
6404 static void
6405 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6406 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6407 				struct mlxsw_sp_fib6_entry *fib6_entry,
6408 				struct fib6_info **rt_arr, unsigned int nrt6)
6409 {
6410 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6411 	int i;
6412 
6413 	for (i = 0; i < nrt6; i++) {
6414 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6415 							   rt_arr[i]);
6416 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6417 			continue;
6418 
6419 		fib6_entry->nrt6--;
6420 		list_del(&mlxsw_sp_rt6->list);
6421 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6422 	}
6423 
6424 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6425 }
6426 
6427 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6428 					 struct mlxsw_sp_fib_entry *fib_entry,
6429 					 const struct fib6_info *rt)
6430 {
6431 	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
6432 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6433 	else if (rt->fib6_type == RTN_BLACKHOLE)
6434 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6435 	else if (rt->fib6_flags & RTF_REJECT)
6436 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6437 	else if (fib_entry->nh_group->nhgi->gateway)
6438 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6439 	else
6440 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6441 }
6442 
6443 static void
6444 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
6445 {
6446 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
6447 
6448 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
6449 				 list) {
6450 		fib6_entry->nrt6--;
6451 		list_del(&mlxsw_sp_rt6->list);
6452 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6453 	}
6454 }
6455 
6456 static struct mlxsw_sp_fib6_entry *
6457 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
6458 			   struct mlxsw_sp_fib_node *fib_node,
6459 			   struct fib6_info **rt_arr, unsigned int nrt6)
6460 {
6461 	struct mlxsw_sp_fib6_entry *fib6_entry;
6462 	struct mlxsw_sp_fib_entry *fib_entry;
6463 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6464 	int err, i;
6465 
6466 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
6467 	if (!fib6_entry)
6468 		return ERR_PTR(-ENOMEM);
6469 	fib_entry = &fib6_entry->common;
6470 
6471 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6472 	if (IS_ERR(fib_entry->priv)) {
6473 		err = PTR_ERR(fib_entry->priv);
6474 		goto err_fib_entry_priv_create;
6475 	}
6476 
6477 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
6478 
6479 	for (i = 0; i < nrt6; i++) {
6480 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6481 		if (IS_ERR(mlxsw_sp_rt6)) {
6482 			err = PTR_ERR(mlxsw_sp_rt6);
6483 			goto err_rt6_create;
6484 		}
6485 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6486 		fib6_entry->nrt6++;
6487 	}
6488 
6489 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6490 	if (err)
6491 		goto err_nexthop6_group_get;
6492 
6493 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6494 					     fib_node->fib);
6495 	if (err)
6496 		goto err_nexthop_group_vr_link;
6497 
6498 	mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
6499 
6500 	fib_entry->fib_node = fib_node;
6501 
6502 	return fib6_entry;
6503 
6504 err_nexthop_group_vr_link:
6505 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
6506 err_nexthop6_group_get:
6507 	i = nrt6;
6508 err_rt6_create:
6509 	for (i--; i >= 0; i--) {
6510 		fib6_entry->nrt6--;
6511 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6512 					       struct mlxsw_sp_rt6, list);
6513 		list_del(&mlxsw_sp_rt6->list);
6514 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6515 	}
6516 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6517 err_fib_entry_priv_create:
6518 	kfree(fib6_entry);
6519 	return ERR_PTR(err);
6520 }
6521 
6522 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6523 					struct mlxsw_sp_fib6_entry *fib6_entry)
6524 {
6525 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6526 
6527 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6528 					 fib_node->fib);
6529 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6530 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
6531 	WARN_ON(fib6_entry->nrt6);
6532 	mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
6533 	kfree(fib6_entry);
6534 }
6535 
6536 static struct mlxsw_sp_fib6_entry *
6537 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6538 			   const struct fib6_info *rt)
6539 {
6540 	struct mlxsw_sp_fib6_entry *fib6_entry;
6541 	struct mlxsw_sp_fib_node *fib_node;
6542 	struct mlxsw_sp_fib *fib;
6543 	struct fib6_info *cmp_rt;
6544 	struct mlxsw_sp_vr *vr;
6545 
6546 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
6547 	if (!vr)
6548 		return NULL;
6549 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
6550 
6551 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
6552 					    sizeof(rt->fib6_dst.addr),
6553 					    rt->fib6_dst.plen);
6554 	if (!fib_node)
6555 		return NULL;
6556 
6557 	fib6_entry = container_of(fib_node->fib_entry,
6558 				  struct mlxsw_sp_fib6_entry, common);
6559 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6560 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
6561 	    rt->fib6_metric == cmp_rt->fib6_metric &&
6562 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
6563 		return fib6_entry;
6564 
6565 	return NULL;
6566 }
6567 
6568 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
6569 {
6570 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6571 	struct mlxsw_sp_fib6_entry *fib6_replaced;
6572 	struct fib6_info *rt, *rt_replaced;
6573 
6574 	if (!fib_node->fib_entry)
6575 		return true;
6576 
6577 	fib6_replaced = container_of(fib_node->fib_entry,
6578 				     struct mlxsw_sp_fib6_entry,
6579 				     common);
6580 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6581 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
6582 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
6583 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
6584 		return false;
6585 
6586 	return true;
6587 }
6588 
6589 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
6590 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6591 					struct fib6_info **rt_arr, unsigned int nrt6)
6592 {
6593 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
6594 	struct mlxsw_sp_fib_entry *replaced;
6595 	struct mlxsw_sp_fib_node *fib_node;
6596 	struct fib6_info *rt = rt_arr[0];
6597 	int err;
6598 
6599 	if (mlxsw_sp->router->aborted)
6600 		return 0;
6601 
6602 	if (rt->fib6_src.plen)
6603 		return -EINVAL;
6604 
6605 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
6606 		return 0;
6607 
6608 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
6609 		return 0;
6610 
6611 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
6612 					 &rt->fib6_dst.addr,
6613 					 sizeof(rt->fib6_dst.addr),
6614 					 rt->fib6_dst.plen,
6615 					 MLXSW_SP_L3_PROTO_IPV6);
6616 	if (IS_ERR(fib_node))
6617 		return PTR_ERR(fib_node);
6618 
6619 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
6620 						nrt6);
6621 	if (IS_ERR(fib6_entry)) {
6622 		err = PTR_ERR(fib6_entry);
6623 		goto err_fib6_entry_create;
6624 	}
6625 
6626 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
6627 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
6628 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6629 		return 0;
6630 	}
6631 
6632 	replaced = fib_node->fib_entry;
6633 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
6634 	if (err)
6635 		goto err_fib_node_entry_link;
6636 
6637 	/* Nothing to replace */
6638 	if (!replaced)
6639 		return 0;
6640 
6641 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6642 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
6643 				     common);
6644 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
6645 
6646 	return 0;
6647 
6648 err_fib_node_entry_link:
6649 	fib_node->fib_entry = replaced;
6650 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
6651 err_fib6_entry_create:
6652 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6653 	return err;
6654 }
6655 
6656 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
6657 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6658 				       struct fib6_info **rt_arr, unsigned int nrt6)
6659 {
6660 	struct mlxsw_sp_fib6_entry *fib6_entry;
6661 	struct mlxsw_sp_fib_node *fib_node;
6662 	struct fib6_info *rt = rt_arr[0];
6663 	int err;
6664 
6665 	if (mlxsw_sp->router->aborted)
6666 		return 0;
6667 
6668 	if (rt->fib6_src.plen)
6669 		return -EINVAL;
6670 
6671 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
6672 		return 0;
6673 
6674 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
6675 					 &rt->fib6_dst.addr,
6676 					 sizeof(rt->fib6_dst.addr),
6677 					 rt->fib6_dst.plen,
6678 					 MLXSW_SP_L3_PROTO_IPV6);
6679 	if (IS_ERR(fib_node))
6680 		return PTR_ERR(fib_node);
6681 
6682 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
6683 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6684 		return -EINVAL;
6685 	}
6686 
6687 	fib6_entry = container_of(fib_node->fib_entry,
6688 				  struct mlxsw_sp_fib6_entry, common);
6689 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
6690 	if (err)
6691 		goto err_fib6_entry_nexthop_add;
6692 
6693 	return 0;
6694 
6695 err_fib6_entry_nexthop_add:
6696 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6697 	return err;
6698 }
6699 
6700 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
6701 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6702 				    struct fib6_info **rt_arr, unsigned int nrt6)
6703 {
6704 	struct mlxsw_sp_fib6_entry *fib6_entry;
6705 	struct mlxsw_sp_fib_node *fib_node;
6706 	struct fib6_info *rt = rt_arr[0];
6707 	int err;
6708 
6709 	if (mlxsw_sp->router->aborted)
6710 		return 0;
6711 
6712 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
6713 		return 0;
6714 
6715 	/* Multipath routes are first added to the FIB trie and only then
6716 	 * notified. If we vetoed the addition, we will get a delete
6717 	 * notification for a route we do not have. Therefore, do not warn if
6718 	 * route was not found.
6719 	 */
6720 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
6721 	if (!fib6_entry)
6722 		return 0;
6723 
6724 	/* If not all the nexthops are deleted, then only reduce the nexthop
6725 	 * group.
6726 	 */
6727 	if (nrt6 != fib6_entry->nrt6) {
6728 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
6729 		return 0;
6730 	}
6731 
6732 	fib_node = fib6_entry->common.fib_node;
6733 
6734 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
6735 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
6736 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6737 	return err;
6738 }
6739 
6740 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
6741 					    enum mlxsw_sp_l3proto proto,
6742 					    u8 tree_id)
6743 {
6744 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
6745 	enum mlxsw_reg_ralxx_protocol ralxx_proto =
6746 				(enum mlxsw_reg_ralxx_protocol) proto;
6747 	struct mlxsw_sp_fib_entry_priv *priv;
6748 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
6749 	char xralst_pl[MLXSW_REG_XRALST_LEN];
6750 	int i, err;
6751 
6752 	mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
6753 	err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
6754 	if (err)
6755 		return err;
6756 
6757 	mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
6758 	err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
6759 	if (err)
6760 		return err;
6761 
6762 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
6763 		struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6764 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
6765 		char xraltb_pl[MLXSW_REG_XRALTB_LEN];
6766 
6767 		mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6768 		mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
6769 		err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
6770 		if (err)
6771 			return err;
6772 
6773 		priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
6774 		if (IS_ERR(priv))
6775 			return PTR_ERR(priv);
6776 
6777 		ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
6778 				       vr->id, 0, NULL, priv);
6779 		ll_ops->fib_entry_act_ip2me_pack(op_ctx);
6780 		err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
6781 		mlxsw_sp_fib_entry_priv_put(priv);
6782 		if (err)
6783 			return err;
6784 	}
6785 
6786 	return 0;
6787 }
6788 
6789 static struct mlxsw_sp_mr_table *
6790 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
6791 {
6792 	if (family == RTNL_FAMILY_IPMR)
6793 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
6794 	else
6795 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
6796 }
6797 
6798 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
6799 				     struct mfc_entry_notifier_info *men_info,
6800 				     bool replace)
6801 {
6802 	struct mlxsw_sp_mr_table *mrt;
6803 	struct mlxsw_sp_vr *vr;
6804 
6805 	if (mlxsw_sp->router->aborted)
6806 		return 0;
6807 
6808 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
6809 	if (IS_ERR(vr))
6810 		return PTR_ERR(vr);
6811 
6812 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
6813 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
6814 }
6815 
6816 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
6817 				      struct mfc_entry_notifier_info *men_info)
6818 {
6819 	struct mlxsw_sp_mr_table *mrt;
6820 	struct mlxsw_sp_vr *vr;
6821 
6822 	if (mlxsw_sp->router->aborted)
6823 		return;
6824 
6825 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
6826 	if (WARN_ON(!vr))
6827 		return;
6828 
6829 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
6830 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
6831 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6832 }
6833 
6834 static int
6835 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
6836 			      struct vif_entry_notifier_info *ven_info)
6837 {
6838 	struct mlxsw_sp_mr_table *mrt;
6839 	struct mlxsw_sp_rif *rif;
6840 	struct mlxsw_sp_vr *vr;
6841 
6842 	if (mlxsw_sp->router->aborted)
6843 		return 0;
6844 
6845 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
6846 	if (IS_ERR(vr))
6847 		return PTR_ERR(vr);
6848 
6849 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
6850 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
6851 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
6852 				   ven_info->vif_index,
6853 				   ven_info->vif_flags, rif);
6854 }
6855 
6856 static void
6857 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
6858 			      struct vif_entry_notifier_info *ven_info)
6859 {
6860 	struct mlxsw_sp_mr_table *mrt;
6861 	struct mlxsw_sp_vr *vr;
6862 
6863 	if (mlxsw_sp->router->aborted)
6864 		return;
6865 
6866 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
6867 	if (WARN_ON(!vr))
6868 		return;
6869 
6870 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
6871 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
6872 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6873 }
6874 
6875 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
6876 {
6877 	enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
6878 	int err;
6879 
6880 	err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
6881 					       MLXSW_SP_LPM_TREE_MIN);
6882 	if (err)
6883 		return err;
6884 
6885 	/* The multicast router code does not need an abort trap as by default,
6886 	 * packets that don't match any routes are trapped to the CPU.
6887 	 */
6888 
6889 	proto = MLXSW_SP_L3_PROTO_IPV6;
6890 	return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
6891 						MLXSW_SP_LPM_TREE_MIN + 1);
6892 }
6893 
6894 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
6895 				     struct mlxsw_sp_fib_node *fib_node)
6896 {
6897 	struct mlxsw_sp_fib4_entry *fib4_entry;
6898 
6899 	fib4_entry = container_of(fib_node->fib_entry,
6900 				  struct mlxsw_sp_fib4_entry, common);
6901 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
6902 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6903 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6904 }
6905 
6906 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
6907 				     struct mlxsw_sp_fib_node *fib_node)
6908 {
6909 	struct mlxsw_sp_fib6_entry *fib6_entry;
6910 
6911 	fib6_entry = container_of(fib_node->fib_entry,
6912 				  struct mlxsw_sp_fib6_entry, common);
6913 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
6914 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
6915 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6916 }
6917 
6918 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
6919 				    struct mlxsw_sp_fib_node *fib_node)
6920 {
6921 	switch (fib_node->fib->proto) {
6922 	case MLXSW_SP_L3_PROTO_IPV4:
6923 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
6924 		break;
6925 	case MLXSW_SP_L3_PROTO_IPV6:
6926 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
6927 		break;
6928 	}
6929 }
6930 
6931 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
6932 				  struct mlxsw_sp_vr *vr,
6933 				  enum mlxsw_sp_l3proto proto)
6934 {
6935 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
6936 	struct mlxsw_sp_fib_node *fib_node, *tmp;
6937 
6938 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
6939 		bool do_break = &tmp->list == &fib->node_list;
6940 
6941 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
6942 		if (do_break)
6943 			break;
6944 	}
6945 }
6946 
6947 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
6948 {
6949 	int i, j;
6950 
6951 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
6952 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
6953 
6954 		if (!mlxsw_sp_vr_is_used(vr))
6955 			continue;
6956 
6957 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
6958 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
6959 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
6960 
6961 		/* If virtual router was only used for IPv4, then it's no
6962 		 * longer used.
6963 		 */
6964 		if (!mlxsw_sp_vr_is_used(vr))
6965 			continue;
6966 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
6967 	}
6968 
6969 	/* After flushing all the routes, it is not possible anyone is still
6970 	 * using the adjacency index that is discarding packets, so free it in
6971 	 * case it was allocated.
6972 	 */
6973 	if (!mlxsw_sp->router->adj_discard_index_valid)
6974 		return;
6975 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
6976 			   mlxsw_sp->router->adj_discard_index);
6977 	mlxsw_sp->router->adj_discard_index_valid = false;
6978 }
6979 
6980 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
6981 {
6982 	int err;
6983 
6984 	if (mlxsw_sp->router->aborted)
6985 		return;
6986 	dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
6987 	mlxsw_sp_router_fib_flush(mlxsw_sp);
6988 	mlxsw_sp->router->aborted = true;
6989 	err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
6990 	if (err)
6991 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
6992 }
6993 
6994 struct mlxsw_sp_fib6_event {
6995 	struct fib6_info **rt_arr;
6996 	unsigned int nrt6;
6997 };
6998 
6999 struct mlxsw_sp_fib_event {
7000 	struct list_head list; /* node in fib queue */
7001 	union {
7002 		struct mlxsw_sp_fib6_event fib6_event;
7003 		struct fib_entry_notifier_info fen_info;
7004 		struct fib_rule_notifier_info fr_info;
7005 		struct fib_nh_notifier_info fnh_info;
7006 		struct mfc_entry_notifier_info men_info;
7007 		struct vif_entry_notifier_info ven_info;
7008 	};
7009 	struct mlxsw_sp *mlxsw_sp;
7010 	unsigned long event;
7011 	int family;
7012 };
7013 
7014 static int
7015 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
7016 				struct fib6_entry_notifier_info *fen6_info)
7017 {
7018 	struct fib6_info *rt = fen6_info->rt;
7019 	struct fib6_info **rt_arr;
7020 	struct fib6_info *iter;
7021 	unsigned int nrt6;
7022 	int i = 0;
7023 
7024 	nrt6 = fen6_info->nsiblings + 1;
7025 
7026 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7027 	if (!rt_arr)
7028 		return -ENOMEM;
7029 
7030 	fib6_event->rt_arr = rt_arr;
7031 	fib6_event->nrt6 = nrt6;
7032 
7033 	rt_arr[0] = rt;
7034 	fib6_info_hold(rt);
7035 
7036 	if (!fen6_info->nsiblings)
7037 		return 0;
7038 
7039 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7040 		if (i == fen6_info->nsiblings)
7041 			break;
7042 
7043 		rt_arr[i + 1] = iter;
7044 		fib6_info_hold(iter);
7045 		i++;
7046 	}
7047 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7048 
7049 	return 0;
7050 }
7051 
7052 static void
7053 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
7054 {
7055 	int i;
7056 
7057 	for (i = 0; i < fib6_event->nrt6; i++)
7058 		mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
7059 	kfree(fib6_event->rt_arr);
7060 }
7061 
7062 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
7063 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7064 					       struct mlxsw_sp_fib_event *fib_event)
7065 {
7066 	int err;
7067 
7068 	mlxsw_sp_span_respin(mlxsw_sp);
7069 
7070 	switch (fib_event->event) {
7071 	case FIB_EVENT_ENTRY_REPLACE:
7072 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
7073 		if (err) {
7074 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7075 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7076 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7077 							      &fib_event->fen_info);
7078 		}
7079 		fib_info_put(fib_event->fen_info.fi);
7080 		break;
7081 	case FIB_EVENT_ENTRY_DEL:
7082 		err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
7083 		if (err)
7084 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7085 		fib_info_put(fib_event->fen_info.fi);
7086 		break;
7087 	case FIB_EVENT_NH_ADD:
7088 	case FIB_EVENT_NH_DEL:
7089 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
7090 		fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
7091 		break;
7092 	}
7093 }
7094 
7095 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
7096 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7097 					       struct mlxsw_sp_fib_event *fib_event)
7098 {
7099 	struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
7100 	int err;
7101 
7102 	mlxsw_sp_span_respin(mlxsw_sp);
7103 
7104 	switch (fib_event->event) {
7105 	case FIB_EVENT_ENTRY_REPLACE:
7106 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7107 						   fib_event->fib6_event.nrt6);
7108 		if (err) {
7109 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7110 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7111 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7112 							      fib6_event->rt_arr,
7113 							      fib6_event->nrt6);
7114 		}
7115 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7116 		break;
7117 	case FIB_EVENT_ENTRY_APPEND:
7118 		err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7119 						  fib_event->fib6_event.nrt6);
7120 		if (err) {
7121 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7122 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7123 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7124 							      fib6_event->rt_arr,
7125 							      fib6_event->nrt6);
7126 		}
7127 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7128 		break;
7129 	case FIB_EVENT_ENTRY_DEL:
7130 		err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7131 					       fib_event->fib6_event.nrt6);
7132 		if (err)
7133 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7134 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7135 		break;
7136 	}
7137 }
7138 
7139 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
7140 						struct mlxsw_sp_fib_event *fib_event)
7141 {
7142 	bool replace;
7143 	int err;
7144 
7145 	rtnl_lock();
7146 	mutex_lock(&mlxsw_sp->router->lock);
7147 	switch (fib_event->event) {
7148 	case FIB_EVENT_ENTRY_REPLACE:
7149 	case FIB_EVENT_ENTRY_ADD:
7150 		replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
7151 
7152 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
7153 		if (err)
7154 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7155 		mr_cache_put(fib_event->men_info.mfc);
7156 		break;
7157 	case FIB_EVENT_ENTRY_DEL:
7158 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
7159 		mr_cache_put(fib_event->men_info.mfc);
7160 		break;
7161 	case FIB_EVENT_VIF_ADD:
7162 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7163 						    &fib_event->ven_info);
7164 		if (err)
7165 			mlxsw_sp_router_fib_abort(mlxsw_sp);
7166 		dev_put(fib_event->ven_info.dev);
7167 		break;
7168 	case FIB_EVENT_VIF_DEL:
7169 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
7170 		dev_put(fib_event->ven_info.dev);
7171 		break;
7172 	}
7173 	mutex_unlock(&mlxsw_sp->router->lock);
7174 	rtnl_unlock();
7175 }
7176 
7177 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
7178 {
7179 	struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
7180 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
7181 	struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
7182 	struct mlxsw_sp_fib_event *next_fib_event;
7183 	struct mlxsw_sp_fib_event *fib_event;
7184 	int last_family = AF_UNSPEC;
7185 	LIST_HEAD(fib_event_queue);
7186 
7187 	spin_lock_bh(&router->fib_event_queue_lock);
7188 	list_splice_init(&router->fib_event_queue, &fib_event_queue);
7189 	spin_unlock_bh(&router->fib_event_queue_lock);
7190 
7191 	/* Router lock is held here to make sure per-instance
7192 	 * operation context is not used in between FIB4/6 events
7193 	 * processing.
7194 	 */
7195 	mutex_lock(&router->lock);
7196 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7197 	list_for_each_entry_safe(fib_event, next_fib_event,
7198 				 &fib_event_queue, list) {
7199 		/* Check if the next entry in the queue exists and it is
7200 		 * of the same type (family and event) as the currect one.
7201 		 * In that case it is permitted to do the bulking
7202 		 * of multiple FIB entries to a single register write.
7203 		 */
7204 		op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
7205 				  fib_event->family == next_fib_event->family &&
7206 				  fib_event->event == next_fib_event->event;
7207 		op_ctx->event = fib_event->event;
7208 
7209 		/* In case family of this and the previous entry are different, context
7210 		 * reinitialization is going to be needed now, indicate that.
7211 		 * Note that since last_family is initialized to AF_UNSPEC, this is always
7212 		 * going to happen for the first entry processed in the work.
7213 		 */
7214 		if (fib_event->family != last_family)
7215 			op_ctx->initialized = false;
7216 
7217 		switch (fib_event->family) {
7218 		case AF_INET:
7219 			mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
7220 							   fib_event);
7221 			break;
7222 		case AF_INET6:
7223 			mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
7224 							   fib_event);
7225 			break;
7226 		case RTNL_FAMILY_IP6MR:
7227 		case RTNL_FAMILY_IPMR:
7228 			/* Unlock here as inside FIBMR the lock is taken again
7229 			 * under RTNL. The per-instance operation context
7230 			 * is not used by FIBMR.
7231 			 */
7232 			mutex_unlock(&router->lock);
7233 			mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
7234 							    fib_event);
7235 			mutex_lock(&router->lock);
7236 			break;
7237 		default:
7238 			WARN_ON_ONCE(1);
7239 		}
7240 		last_family = fib_event->family;
7241 		kfree(fib_event);
7242 		cond_resched();
7243 	}
7244 	WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
7245 	mutex_unlock(&router->lock);
7246 }
7247 
7248 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
7249 				       struct fib_notifier_info *info)
7250 {
7251 	struct fib_entry_notifier_info *fen_info;
7252 	struct fib_nh_notifier_info *fnh_info;
7253 
7254 	switch (fib_event->event) {
7255 	case FIB_EVENT_ENTRY_REPLACE:
7256 	case FIB_EVENT_ENTRY_DEL:
7257 		fen_info = container_of(info, struct fib_entry_notifier_info,
7258 					info);
7259 		fib_event->fen_info = *fen_info;
7260 		/* Take reference on fib_info to prevent it from being
7261 		 * freed while event is queued. Release it afterwards.
7262 		 */
7263 		fib_info_hold(fib_event->fen_info.fi);
7264 		break;
7265 	case FIB_EVENT_NH_ADD:
7266 	case FIB_EVENT_NH_DEL:
7267 		fnh_info = container_of(info, struct fib_nh_notifier_info,
7268 					info);
7269 		fib_event->fnh_info = *fnh_info;
7270 		fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
7271 		break;
7272 	}
7273 }
7274 
7275 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
7276 				      struct fib_notifier_info *info)
7277 {
7278 	struct fib6_entry_notifier_info *fen6_info;
7279 	int err;
7280 
7281 	switch (fib_event->event) {
7282 	case FIB_EVENT_ENTRY_REPLACE:
7283 	case FIB_EVENT_ENTRY_APPEND:
7284 	case FIB_EVENT_ENTRY_DEL:
7285 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
7286 					 info);
7287 		err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
7288 						      fen6_info);
7289 		if (err)
7290 			return err;
7291 		break;
7292 	}
7293 
7294 	return 0;
7295 }
7296 
7297 static void
7298 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
7299 			    struct fib_notifier_info *info)
7300 {
7301 	switch (fib_event->event) {
7302 	case FIB_EVENT_ENTRY_REPLACE:
7303 	case FIB_EVENT_ENTRY_ADD:
7304 	case FIB_EVENT_ENTRY_DEL:
7305 		memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
7306 		mr_cache_hold(fib_event->men_info.mfc);
7307 		break;
7308 	case FIB_EVENT_VIF_ADD:
7309 	case FIB_EVENT_VIF_DEL:
7310 		memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
7311 		dev_hold(fib_event->ven_info.dev);
7312 		break;
7313 	}
7314 }
7315 
7316 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7317 					  struct fib_notifier_info *info,
7318 					  struct mlxsw_sp *mlxsw_sp)
7319 {
7320 	struct netlink_ext_ack *extack = info->extack;
7321 	struct fib_rule_notifier_info *fr_info;
7322 	struct fib_rule *rule;
7323 	int err = 0;
7324 
7325 	/* nothing to do at the moment */
7326 	if (event == FIB_EVENT_RULE_DEL)
7327 		return 0;
7328 
7329 	if (mlxsw_sp->router->aborted)
7330 		return 0;
7331 
7332 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
7333 	rule = fr_info->rule;
7334 
7335 	/* Rule only affects locally generated traffic */
7336 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7337 		return 0;
7338 
7339 	switch (info->family) {
7340 	case AF_INET:
7341 		if (!fib4_rule_default(rule) && !rule->l3mdev)
7342 			err = -EOPNOTSUPP;
7343 		break;
7344 	case AF_INET6:
7345 		if (!fib6_rule_default(rule) && !rule->l3mdev)
7346 			err = -EOPNOTSUPP;
7347 		break;
7348 	case RTNL_FAMILY_IPMR:
7349 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
7350 			err = -EOPNOTSUPP;
7351 		break;
7352 	case RTNL_FAMILY_IP6MR:
7353 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7354 			err = -EOPNOTSUPP;
7355 		break;
7356 	}
7357 
7358 	if (err < 0)
7359 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7360 
7361 	return err;
7362 }
7363 
7364 /* Called with rcu_read_lock() */
7365 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7366 				     unsigned long event, void *ptr)
7367 {
7368 	struct mlxsw_sp_fib_event *fib_event;
7369 	struct fib_notifier_info *info = ptr;
7370 	struct mlxsw_sp_router *router;
7371 	int err;
7372 
7373 	if ((info->family != AF_INET && info->family != AF_INET6 &&
7374 	     info->family != RTNL_FAMILY_IPMR &&
7375 	     info->family != RTNL_FAMILY_IP6MR))
7376 		return NOTIFY_DONE;
7377 
7378 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7379 
7380 	switch (event) {
7381 	case FIB_EVENT_RULE_ADD:
7382 	case FIB_EVENT_RULE_DEL:
7383 		err = mlxsw_sp_router_fib_rule_event(event, info,
7384 						     router->mlxsw_sp);
7385 		return notifier_from_errno(err);
7386 	case FIB_EVENT_ENTRY_ADD:
7387 	case FIB_EVENT_ENTRY_REPLACE:
7388 	case FIB_EVENT_ENTRY_APPEND:
7389 		if (router->aborted) {
7390 			NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
7391 			return notifier_from_errno(-EINVAL);
7392 		}
7393 		if (info->family == AF_INET) {
7394 			struct fib_entry_notifier_info *fen_info = ptr;
7395 
7396 			if (fen_info->fi->fib_nh_is_v6) {
7397 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7398 				return notifier_from_errno(-EINVAL);
7399 			}
7400 		}
7401 		break;
7402 	}
7403 
7404 	fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
7405 	if (!fib_event)
7406 		return NOTIFY_BAD;
7407 
7408 	fib_event->mlxsw_sp = router->mlxsw_sp;
7409 	fib_event->event = event;
7410 	fib_event->family = info->family;
7411 
7412 	switch (info->family) {
7413 	case AF_INET:
7414 		mlxsw_sp_router_fib4_event(fib_event, info);
7415 		break;
7416 	case AF_INET6:
7417 		err = mlxsw_sp_router_fib6_event(fib_event, info);
7418 		if (err)
7419 			goto err_fib_event;
7420 		break;
7421 	case RTNL_FAMILY_IP6MR:
7422 	case RTNL_FAMILY_IPMR:
7423 		mlxsw_sp_router_fibmr_event(fib_event, info);
7424 		break;
7425 	}
7426 
7427 	/* Enqueue the event and trigger the work */
7428 	spin_lock_bh(&router->fib_event_queue_lock);
7429 	list_add_tail(&fib_event->list, &router->fib_event_queue);
7430 	spin_unlock_bh(&router->fib_event_queue_lock);
7431 	mlxsw_core_schedule_work(&router->fib_event_work);
7432 
7433 	return NOTIFY_DONE;
7434 
7435 err_fib_event:
7436 	kfree(fib_event);
7437 	return NOTIFY_BAD;
7438 }
7439 
7440 static struct mlxsw_sp_rif *
7441 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7442 			 const struct net_device *dev)
7443 {
7444 	int i;
7445 
7446 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7447 		if (mlxsw_sp->router->rifs[i] &&
7448 		    mlxsw_sp->router->rifs[i]->dev == dev)
7449 			return mlxsw_sp->router->rifs[i];
7450 
7451 	return NULL;
7452 }
7453 
7454 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7455 			 const struct net_device *dev)
7456 {
7457 	struct mlxsw_sp_rif *rif;
7458 
7459 	mutex_lock(&mlxsw_sp->router->lock);
7460 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7461 	mutex_unlock(&mlxsw_sp->router->lock);
7462 
7463 	return rif;
7464 }
7465 
7466 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7467 {
7468 	struct mlxsw_sp_rif *rif;
7469 	u16 vid = 0;
7470 
7471 	mutex_lock(&mlxsw_sp->router->lock);
7472 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7473 	if (!rif)
7474 		goto out;
7475 
7476 	/* We only return the VID for VLAN RIFs. Otherwise we return an
7477 	 * invalid value (0).
7478 	 */
7479 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7480 		goto out;
7481 
7482 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7483 
7484 out:
7485 	mutex_unlock(&mlxsw_sp->router->lock);
7486 	return vid;
7487 }
7488 
7489 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7490 {
7491 	char ritr_pl[MLXSW_REG_RITR_LEN];
7492 	int err;
7493 
7494 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7495 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7496 	if (err)
7497 		return err;
7498 
7499 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
7500 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7501 }
7502 
7503 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
7504 					  struct mlxsw_sp_rif *rif)
7505 {
7506 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
7507 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
7508 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
7509 }
7510 
7511 static bool
7512 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
7513 			   unsigned long event)
7514 {
7515 	struct inet6_dev *inet6_dev;
7516 	bool addr_list_empty = true;
7517 	struct in_device *idev;
7518 
7519 	switch (event) {
7520 	case NETDEV_UP:
7521 		return rif == NULL;
7522 	case NETDEV_DOWN:
7523 		rcu_read_lock();
7524 		idev = __in_dev_get_rcu(dev);
7525 		if (idev && idev->ifa_list)
7526 			addr_list_empty = false;
7527 
7528 		inet6_dev = __in6_dev_get(dev);
7529 		if (addr_list_empty && inet6_dev &&
7530 		    !list_empty(&inet6_dev->addr_list))
7531 			addr_list_empty = false;
7532 		rcu_read_unlock();
7533 
7534 		/* macvlans do not have a RIF, but rather piggy back on the
7535 		 * RIF of their lower device.
7536 		 */
7537 		if (netif_is_macvlan(dev) && addr_list_empty)
7538 			return true;
7539 
7540 		if (rif && addr_list_empty &&
7541 		    !netif_is_l3_slave(rif->dev))
7542 			return true;
7543 		/* It is possible we already removed the RIF ourselves
7544 		 * if it was assigned to a netdev that is now a bridge
7545 		 * or LAG slave.
7546 		 */
7547 		return false;
7548 	}
7549 
7550 	return false;
7551 }
7552 
7553 static enum mlxsw_sp_rif_type
7554 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
7555 		      const struct net_device *dev)
7556 {
7557 	enum mlxsw_sp_fid_type type;
7558 
7559 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
7560 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
7561 
7562 	/* Otherwise RIF type is derived from the type of the underlying FID. */
7563 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
7564 		type = MLXSW_SP_FID_TYPE_8021Q;
7565 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
7566 		type = MLXSW_SP_FID_TYPE_8021Q;
7567 	else if (netif_is_bridge_master(dev))
7568 		type = MLXSW_SP_FID_TYPE_8021D;
7569 	else
7570 		type = MLXSW_SP_FID_TYPE_RFID;
7571 
7572 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
7573 }
7574 
7575 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
7576 {
7577 	int i;
7578 
7579 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7580 		if (!mlxsw_sp->router->rifs[i]) {
7581 			*p_rif_index = i;
7582 			return 0;
7583 		}
7584 	}
7585 
7586 	return -ENOBUFS;
7587 }
7588 
7589 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
7590 					       u16 vr_id,
7591 					       struct net_device *l3_dev)
7592 {
7593 	struct mlxsw_sp_rif *rif;
7594 
7595 	rif = kzalloc(rif_size, GFP_KERNEL);
7596 	if (!rif)
7597 		return NULL;
7598 
7599 	INIT_LIST_HEAD(&rif->nexthop_list);
7600 	INIT_LIST_HEAD(&rif->neigh_list);
7601 	if (l3_dev) {
7602 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
7603 		rif->mtu = l3_dev->mtu;
7604 		rif->dev = l3_dev;
7605 	}
7606 	rif->vr_id = vr_id;
7607 	rif->rif_index = rif_index;
7608 
7609 	return rif;
7610 }
7611 
7612 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
7613 					   u16 rif_index)
7614 {
7615 	return mlxsw_sp->router->rifs[rif_index];
7616 }
7617 
7618 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
7619 {
7620 	return rif->rif_index;
7621 }
7622 
7623 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7624 {
7625 	return lb_rif->common.rif_index;
7626 }
7627 
7628 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7629 {
7630 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
7631 	struct mlxsw_sp_vr *ul_vr;
7632 
7633 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
7634 	if (WARN_ON(IS_ERR(ul_vr)))
7635 		return 0;
7636 
7637 	return ul_vr->id;
7638 }
7639 
7640 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7641 {
7642 	return lb_rif->ul_rif_id;
7643 }
7644 
7645 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
7646 {
7647 	return rif->dev->ifindex;
7648 }
7649 
7650 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
7651 {
7652 	return rif->dev;
7653 }
7654 
7655 static struct mlxsw_sp_rif *
7656 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
7657 		    const struct mlxsw_sp_rif_params *params,
7658 		    struct netlink_ext_ack *extack)
7659 {
7660 	u32 tb_id = l3mdev_fib_table(params->dev);
7661 	const struct mlxsw_sp_rif_ops *ops;
7662 	struct mlxsw_sp_fid *fid = NULL;
7663 	enum mlxsw_sp_rif_type type;
7664 	struct mlxsw_sp_rif *rif;
7665 	struct mlxsw_sp_vr *vr;
7666 	u16 rif_index;
7667 	int i, err;
7668 
7669 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
7670 	ops = mlxsw_sp->rif_ops_arr[type];
7671 
7672 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
7673 	if (IS_ERR(vr))
7674 		return ERR_CAST(vr);
7675 	vr->rif_count++;
7676 
7677 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
7678 	if (err) {
7679 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
7680 		goto err_rif_index_alloc;
7681 	}
7682 
7683 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
7684 	if (!rif) {
7685 		err = -ENOMEM;
7686 		goto err_rif_alloc;
7687 	}
7688 	dev_hold(rif->dev);
7689 	mlxsw_sp->router->rifs[rif_index] = rif;
7690 	rif->mlxsw_sp = mlxsw_sp;
7691 	rif->ops = ops;
7692 
7693 	if (ops->fid_get) {
7694 		fid = ops->fid_get(rif, extack);
7695 		if (IS_ERR(fid)) {
7696 			err = PTR_ERR(fid);
7697 			goto err_fid_get;
7698 		}
7699 		rif->fid = fid;
7700 	}
7701 
7702 	if (ops->setup)
7703 		ops->setup(rif, params);
7704 
7705 	err = ops->configure(rif);
7706 	if (err)
7707 		goto err_configure;
7708 
7709 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
7710 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
7711 		if (err)
7712 			goto err_mr_rif_add;
7713 	}
7714 
7715 	mlxsw_sp_rif_counters_alloc(rif);
7716 
7717 	return rif;
7718 
7719 err_mr_rif_add:
7720 	for (i--; i >= 0; i--)
7721 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
7722 	ops->deconfigure(rif);
7723 err_configure:
7724 	if (fid)
7725 		mlxsw_sp_fid_put(fid);
7726 err_fid_get:
7727 	mlxsw_sp->router->rifs[rif_index] = NULL;
7728 	dev_put(rif->dev);
7729 	kfree(rif);
7730 err_rif_alloc:
7731 err_rif_index_alloc:
7732 	vr->rif_count--;
7733 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7734 	return ERR_PTR(err);
7735 }
7736 
7737 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
7738 {
7739 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
7740 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7741 	struct mlxsw_sp_fid *fid = rif->fid;
7742 	struct mlxsw_sp_vr *vr;
7743 	int i;
7744 
7745 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
7746 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
7747 
7748 	mlxsw_sp_rif_counters_free(rif);
7749 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
7750 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
7751 	ops->deconfigure(rif);
7752 	if (fid)
7753 		/* Loopback RIFs are not associated with a FID. */
7754 		mlxsw_sp_fid_put(fid);
7755 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
7756 	dev_put(rif->dev);
7757 	kfree(rif);
7758 	vr->rif_count--;
7759 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7760 }
7761 
7762 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
7763 				 struct net_device *dev)
7764 {
7765 	struct mlxsw_sp_rif *rif;
7766 
7767 	mutex_lock(&mlxsw_sp->router->lock);
7768 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7769 	if (!rif)
7770 		goto out;
7771 	mlxsw_sp_rif_destroy(rif);
7772 out:
7773 	mutex_unlock(&mlxsw_sp->router->lock);
7774 }
7775 
7776 static void
7777 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
7778 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
7779 {
7780 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7781 
7782 	params->vid = mlxsw_sp_port_vlan->vid;
7783 	params->lag = mlxsw_sp_port->lagged;
7784 	if (params->lag)
7785 		params->lag_id = mlxsw_sp_port->lag_id;
7786 	else
7787 		params->system_port = mlxsw_sp_port->local_port;
7788 }
7789 
7790 static struct mlxsw_sp_rif_subport *
7791 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
7792 {
7793 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
7794 }
7795 
7796 static struct mlxsw_sp_rif *
7797 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
7798 			 const struct mlxsw_sp_rif_params *params,
7799 			 struct netlink_ext_ack *extack)
7800 {
7801 	struct mlxsw_sp_rif_subport *rif_subport;
7802 	struct mlxsw_sp_rif *rif;
7803 
7804 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
7805 	if (!rif)
7806 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
7807 
7808 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7809 	refcount_inc(&rif_subport->ref_count);
7810 	return rif;
7811 }
7812 
7813 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
7814 {
7815 	struct mlxsw_sp_rif_subport *rif_subport;
7816 
7817 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7818 	if (!refcount_dec_and_test(&rif_subport->ref_count))
7819 		return;
7820 
7821 	mlxsw_sp_rif_destroy(rif);
7822 }
7823 
7824 static int
7825 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
7826 				 struct net_device *l3_dev,
7827 				 struct netlink_ext_ack *extack)
7828 {
7829 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7830 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
7831 	struct mlxsw_sp_rif_params params = {
7832 		.dev = l3_dev,
7833 	};
7834 	u16 vid = mlxsw_sp_port_vlan->vid;
7835 	struct mlxsw_sp_rif *rif;
7836 	struct mlxsw_sp_fid *fid;
7837 	int err;
7838 
7839 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
7840 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
7841 	if (IS_ERR(rif))
7842 		return PTR_ERR(rif);
7843 
7844 	/* FID was already created, just take a reference */
7845 	fid = rif->ops->fid_get(rif, extack);
7846 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
7847 	if (err)
7848 		goto err_fid_port_vid_map;
7849 
7850 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
7851 	if (err)
7852 		goto err_port_vid_learning_set;
7853 
7854 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
7855 					BR_STATE_FORWARDING);
7856 	if (err)
7857 		goto err_port_vid_stp_set;
7858 
7859 	mlxsw_sp_port_vlan->fid = fid;
7860 
7861 	return 0;
7862 
7863 err_port_vid_stp_set:
7864 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
7865 err_port_vid_learning_set:
7866 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
7867 err_fid_port_vid_map:
7868 	mlxsw_sp_fid_put(fid);
7869 	mlxsw_sp_rif_subport_put(rif);
7870 	return err;
7871 }
7872 
7873 static void
7874 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
7875 {
7876 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7877 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
7878 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
7879 	u16 vid = mlxsw_sp_port_vlan->vid;
7880 
7881 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
7882 		return;
7883 
7884 	mlxsw_sp_port_vlan->fid = NULL;
7885 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
7886 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
7887 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
7888 	mlxsw_sp_fid_put(fid);
7889 	mlxsw_sp_rif_subport_put(rif);
7890 }
7891 
7892 int
7893 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
7894 			       struct net_device *l3_dev,
7895 			       struct netlink_ext_ack *extack)
7896 {
7897 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
7898 	struct mlxsw_sp_rif *rif;
7899 	int err = 0;
7900 
7901 	mutex_lock(&mlxsw_sp->router->lock);
7902 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7903 	if (!rif)
7904 		goto out;
7905 
7906 	err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
7907 					       extack);
7908 out:
7909 	mutex_unlock(&mlxsw_sp->router->lock);
7910 	return err;
7911 }
7912 
7913 void
7914 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
7915 {
7916 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
7917 
7918 	mutex_lock(&mlxsw_sp->router->lock);
7919 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
7920 	mutex_unlock(&mlxsw_sp->router->lock);
7921 }
7922 
7923 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
7924 					     struct net_device *port_dev,
7925 					     unsigned long event, u16 vid,
7926 					     struct netlink_ext_ack *extack)
7927 {
7928 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
7929 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
7930 
7931 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
7932 	if (WARN_ON(!mlxsw_sp_port_vlan))
7933 		return -EINVAL;
7934 
7935 	switch (event) {
7936 	case NETDEV_UP:
7937 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
7938 							l3_dev, extack);
7939 	case NETDEV_DOWN:
7940 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
7941 		break;
7942 	}
7943 
7944 	return 0;
7945 }
7946 
7947 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
7948 					unsigned long event,
7949 					struct netlink_ext_ack *extack)
7950 {
7951 	if (netif_is_bridge_port(port_dev) ||
7952 	    netif_is_lag_port(port_dev) ||
7953 	    netif_is_ovs_port(port_dev))
7954 		return 0;
7955 
7956 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
7957 						 MLXSW_SP_DEFAULT_VID, extack);
7958 }
7959 
7960 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
7961 					 struct net_device *lag_dev,
7962 					 unsigned long event, u16 vid,
7963 					 struct netlink_ext_ack *extack)
7964 {
7965 	struct net_device *port_dev;
7966 	struct list_head *iter;
7967 	int err;
7968 
7969 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
7970 		if (mlxsw_sp_port_dev_check(port_dev)) {
7971 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
7972 								port_dev,
7973 								event, vid,
7974 								extack);
7975 			if (err)
7976 				return err;
7977 		}
7978 	}
7979 
7980 	return 0;
7981 }
7982 
7983 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
7984 				       unsigned long event,
7985 				       struct netlink_ext_ack *extack)
7986 {
7987 	if (netif_is_bridge_port(lag_dev))
7988 		return 0;
7989 
7990 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
7991 					     MLXSW_SP_DEFAULT_VID, extack);
7992 }
7993 
7994 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
7995 					  struct net_device *l3_dev,
7996 					  unsigned long event,
7997 					  struct netlink_ext_ack *extack)
7998 {
7999 	struct mlxsw_sp_rif_params params = {
8000 		.dev = l3_dev,
8001 	};
8002 	struct mlxsw_sp_rif *rif;
8003 
8004 	switch (event) {
8005 	case NETDEV_UP:
8006 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8007 			u16 proto;
8008 
8009 			br_vlan_get_proto(l3_dev, &proto);
8010 			if (proto == ETH_P_8021AD) {
8011 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8012 				return -EOPNOTSUPP;
8013 			}
8014 		}
8015 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8016 		if (IS_ERR(rif))
8017 			return PTR_ERR(rif);
8018 		break;
8019 	case NETDEV_DOWN:
8020 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8021 		mlxsw_sp_rif_destroy(rif);
8022 		break;
8023 	}
8024 
8025 	return 0;
8026 }
8027 
8028 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8029 					struct net_device *vlan_dev,
8030 					unsigned long event,
8031 					struct netlink_ext_ack *extack)
8032 {
8033 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8034 	u16 vid = vlan_dev_vlan_id(vlan_dev);
8035 
8036 	if (netif_is_bridge_port(vlan_dev))
8037 		return 0;
8038 
8039 	if (mlxsw_sp_port_dev_check(real_dev))
8040 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8041 							 event, vid, extack);
8042 	else if (netif_is_lag_master(real_dev))
8043 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8044 						     vid, extack);
8045 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8046 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8047 						      extack);
8048 
8049 	return 0;
8050 }
8051 
8052 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8053 {
8054 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
8055 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8056 
8057 	return ether_addr_equal_masked(mac, vrrp4, mask);
8058 }
8059 
8060 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
8061 {
8062 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
8063 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8064 
8065 	return ether_addr_equal_masked(mac, vrrp6, mask);
8066 }
8067 
8068 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8069 				const u8 *mac, bool adding)
8070 {
8071 	char ritr_pl[MLXSW_REG_RITR_LEN];
8072 	u8 vrrp_id = adding ? mac[5] : 0;
8073 	int err;
8074 
8075 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
8076 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
8077 		return 0;
8078 
8079 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8080 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8081 	if (err)
8082 		return err;
8083 
8084 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
8085 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
8086 	else
8087 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
8088 
8089 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8090 }
8091 
8092 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
8093 				    const struct net_device *macvlan_dev,
8094 				    struct netlink_ext_ack *extack)
8095 {
8096 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8097 	struct mlxsw_sp_rif *rif;
8098 	int err;
8099 
8100 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8101 	if (!rif) {
8102 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
8103 		return -EOPNOTSUPP;
8104 	}
8105 
8106 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8107 				  mlxsw_sp_fid_index(rif->fid), true);
8108 	if (err)
8109 		return err;
8110 
8111 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
8112 				   macvlan_dev->dev_addr, true);
8113 	if (err)
8114 		goto err_rif_vrrp_add;
8115 
8116 	/* Make sure the bridge driver does not have this MAC pointing at
8117 	 * some other port.
8118 	 */
8119 	if (rif->ops->fdb_del)
8120 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
8121 
8122 	return 0;
8123 
8124 err_rif_vrrp_add:
8125 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8126 			    mlxsw_sp_fid_index(rif->fid), false);
8127 	return err;
8128 }
8129 
8130 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8131 				       const struct net_device *macvlan_dev)
8132 {
8133 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8134 	struct mlxsw_sp_rif *rif;
8135 
8136 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8137 	/* If we do not have a RIF, then we already took care of
8138 	 * removing the macvlan's MAC during RIF deletion.
8139 	 */
8140 	if (!rif)
8141 		return;
8142 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
8143 			     false);
8144 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8145 			    mlxsw_sp_fid_index(rif->fid), false);
8146 }
8147 
8148 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8149 			      const struct net_device *macvlan_dev)
8150 {
8151 	mutex_lock(&mlxsw_sp->router->lock);
8152 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8153 	mutex_unlock(&mlxsw_sp->router->lock);
8154 }
8155 
8156 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
8157 					   struct net_device *macvlan_dev,
8158 					   unsigned long event,
8159 					   struct netlink_ext_ack *extack)
8160 {
8161 	switch (event) {
8162 	case NETDEV_UP:
8163 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
8164 	case NETDEV_DOWN:
8165 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8166 		break;
8167 	}
8168 
8169 	return 0;
8170 }
8171 
8172 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
8173 					       struct net_device *dev,
8174 					       const unsigned char *dev_addr,
8175 					       struct netlink_ext_ack *extack)
8176 {
8177 	struct mlxsw_sp_rif *rif;
8178 	int i;
8179 
8180 	/* A RIF is not created for macvlan netdevs. Their MAC is used to
8181 	 * populate the FDB
8182 	 */
8183 	if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
8184 		return 0;
8185 
8186 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8187 		rif = mlxsw_sp->router->rifs[i];
8188 		if (rif && rif->ops &&
8189 		    rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
8190 			continue;
8191 		if (rif && rif->dev && rif->dev != dev &&
8192 		    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
8193 					     mlxsw_sp->mac_mask)) {
8194 			NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
8195 			return -EINVAL;
8196 		}
8197 	}
8198 
8199 	return 0;
8200 }
8201 
8202 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
8203 				     struct net_device *dev,
8204 				     unsigned long event,
8205 				     struct netlink_ext_ack *extack)
8206 {
8207 	if (mlxsw_sp_port_dev_check(dev))
8208 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
8209 	else if (netif_is_lag_master(dev))
8210 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
8211 	else if (netif_is_bridge_master(dev))
8212 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
8213 						      extack);
8214 	else if (is_vlan_dev(dev))
8215 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
8216 						    extack);
8217 	else if (netif_is_macvlan(dev))
8218 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
8219 						       extack);
8220 	else
8221 		return 0;
8222 }
8223 
8224 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
8225 				   unsigned long event, void *ptr)
8226 {
8227 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
8228 	struct net_device *dev = ifa->ifa_dev->dev;
8229 	struct mlxsw_sp_router *router;
8230 	struct mlxsw_sp_rif *rif;
8231 	int err = 0;
8232 
8233 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
8234 	if (event == NETDEV_UP)
8235 		return NOTIFY_DONE;
8236 
8237 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
8238 	mutex_lock(&router->lock);
8239 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
8240 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8241 		goto out;
8242 
8243 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
8244 out:
8245 	mutex_unlock(&router->lock);
8246 	return notifier_from_errno(err);
8247 }
8248 
8249 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
8250 				  unsigned long event, void *ptr)
8251 {
8252 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
8253 	struct net_device *dev = ivi->ivi_dev->dev;
8254 	struct mlxsw_sp *mlxsw_sp;
8255 	struct mlxsw_sp_rif *rif;
8256 	int err = 0;
8257 
8258 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8259 	if (!mlxsw_sp)
8260 		return NOTIFY_DONE;
8261 
8262 	mutex_lock(&mlxsw_sp->router->lock);
8263 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8264 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8265 		goto out;
8266 
8267 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
8268 						  ivi->extack);
8269 	if (err)
8270 		goto out;
8271 
8272 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
8273 out:
8274 	mutex_unlock(&mlxsw_sp->router->lock);
8275 	return notifier_from_errno(err);
8276 }
8277 
8278 struct mlxsw_sp_inet6addr_event_work {
8279 	struct work_struct work;
8280 	struct mlxsw_sp *mlxsw_sp;
8281 	struct net_device *dev;
8282 	unsigned long event;
8283 };
8284 
8285 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
8286 {
8287 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
8288 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
8289 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
8290 	struct net_device *dev = inet6addr_work->dev;
8291 	unsigned long event = inet6addr_work->event;
8292 	struct mlxsw_sp_rif *rif;
8293 
8294 	rtnl_lock();
8295 	mutex_lock(&mlxsw_sp->router->lock);
8296 
8297 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8298 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8299 		goto out;
8300 
8301 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
8302 out:
8303 	mutex_unlock(&mlxsw_sp->router->lock);
8304 	rtnl_unlock();
8305 	dev_put(dev);
8306 	kfree(inet6addr_work);
8307 }
8308 
8309 /* Called with rcu_read_lock() */
8310 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
8311 				    unsigned long event, void *ptr)
8312 {
8313 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
8314 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
8315 	struct net_device *dev = if6->idev->dev;
8316 	struct mlxsw_sp_router *router;
8317 
8318 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
8319 	if (event == NETDEV_UP)
8320 		return NOTIFY_DONE;
8321 
8322 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
8323 	if (!inet6addr_work)
8324 		return NOTIFY_BAD;
8325 
8326 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
8327 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
8328 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
8329 	inet6addr_work->dev = dev;
8330 	inet6addr_work->event = event;
8331 	dev_hold(dev);
8332 	mlxsw_core_schedule_work(&inet6addr_work->work);
8333 
8334 	return NOTIFY_DONE;
8335 }
8336 
8337 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
8338 				   unsigned long event, void *ptr)
8339 {
8340 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
8341 	struct net_device *dev = i6vi->i6vi_dev->dev;
8342 	struct mlxsw_sp *mlxsw_sp;
8343 	struct mlxsw_sp_rif *rif;
8344 	int err = 0;
8345 
8346 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8347 	if (!mlxsw_sp)
8348 		return NOTIFY_DONE;
8349 
8350 	mutex_lock(&mlxsw_sp->router->lock);
8351 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8352 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8353 		goto out;
8354 
8355 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
8356 						  i6vi->extack);
8357 	if (err)
8358 		goto out;
8359 
8360 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
8361 out:
8362 	mutex_unlock(&mlxsw_sp->router->lock);
8363 	return notifier_from_errno(err);
8364 }
8365 
8366 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8367 			     const char *mac, int mtu)
8368 {
8369 	char ritr_pl[MLXSW_REG_RITR_LEN];
8370 	int err;
8371 
8372 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8373 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8374 	if (err)
8375 		return err;
8376 
8377 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
8378 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
8379 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
8380 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8381 }
8382 
8383 static int
8384 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
8385 				  struct mlxsw_sp_rif *rif)
8386 {
8387 	struct net_device *dev = rif->dev;
8388 	u16 fid_index;
8389 	int err;
8390 
8391 	fid_index = mlxsw_sp_fid_index(rif->fid);
8392 
8393 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
8394 	if (err)
8395 		return err;
8396 
8397 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
8398 				dev->mtu);
8399 	if (err)
8400 		goto err_rif_edit;
8401 
8402 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
8403 	if (err)
8404 		goto err_rif_fdb_op;
8405 
8406 	if (rif->mtu != dev->mtu) {
8407 		struct mlxsw_sp_vr *vr;
8408 		int i;
8409 
8410 		/* The RIF is relevant only to its mr_table instance, as unlike
8411 		 * unicast routing, in multicast routing a RIF cannot be shared
8412 		 * between several multicast routing tables.
8413 		 */
8414 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
8415 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8416 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
8417 						   rif, dev->mtu);
8418 	}
8419 
8420 	ether_addr_copy(rif->addr, dev->dev_addr);
8421 	rif->mtu = dev->mtu;
8422 
8423 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
8424 
8425 	return 0;
8426 
8427 err_rif_fdb_op:
8428 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
8429 err_rif_edit:
8430 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
8431 	return err;
8432 }
8433 
8434 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
8435 			    struct netdev_notifier_pre_changeaddr_info *info)
8436 {
8437 	struct netlink_ext_ack *extack;
8438 
8439 	extack = netdev_notifier_info_to_extack(&info->info);
8440 	return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
8441 						   info->dev_addr, extack);
8442 }
8443 
8444 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
8445 					 unsigned long event, void *ptr)
8446 {
8447 	struct mlxsw_sp *mlxsw_sp;
8448 	struct mlxsw_sp_rif *rif;
8449 	int err = 0;
8450 
8451 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8452 	if (!mlxsw_sp)
8453 		return 0;
8454 
8455 	mutex_lock(&mlxsw_sp->router->lock);
8456 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8457 	if (!rif)
8458 		goto out;
8459 
8460 	switch (event) {
8461 	case NETDEV_CHANGEMTU:
8462 	case NETDEV_CHANGEADDR:
8463 		err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
8464 		break;
8465 	case NETDEV_PRE_CHANGEADDR:
8466 		err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
8467 		break;
8468 	}
8469 
8470 out:
8471 	mutex_unlock(&mlxsw_sp->router->lock);
8472 	return err;
8473 }
8474 
8475 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
8476 				  struct net_device *l3_dev,
8477 				  struct netlink_ext_ack *extack)
8478 {
8479 	struct mlxsw_sp_rif *rif;
8480 
8481 	/* If netdev is already associated with a RIF, then we need to
8482 	 * destroy it and create a new one with the new virtual router ID.
8483 	 */
8484 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8485 	if (rif)
8486 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
8487 					  extack);
8488 
8489 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
8490 }
8491 
8492 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
8493 				    struct net_device *l3_dev)
8494 {
8495 	struct mlxsw_sp_rif *rif;
8496 
8497 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8498 	if (!rif)
8499 		return;
8500 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
8501 }
8502 
8503 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
8504 				 struct netdev_notifier_changeupper_info *info)
8505 {
8506 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
8507 	int err = 0;
8508 
8509 	/* We do not create a RIF for a macvlan, but only use it to
8510 	 * direct more MAC addresses to the router.
8511 	 */
8512 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
8513 		return 0;
8514 
8515 	mutex_lock(&mlxsw_sp->router->lock);
8516 	switch (event) {
8517 	case NETDEV_PRECHANGEUPPER:
8518 		break;
8519 	case NETDEV_CHANGEUPPER:
8520 		if (info->linking) {
8521 			struct netlink_ext_ack *extack;
8522 
8523 			extack = netdev_notifier_info_to_extack(&info->info);
8524 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
8525 		} else {
8526 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
8527 		}
8528 		break;
8529 	}
8530 	mutex_unlock(&mlxsw_sp->router->lock);
8531 
8532 	return err;
8533 }
8534 
8535 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
8536 					struct netdev_nested_priv *priv)
8537 {
8538 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
8539 
8540 	if (!netif_is_macvlan(dev))
8541 		return 0;
8542 
8543 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
8544 				   mlxsw_sp_fid_index(rif->fid), false);
8545 }
8546 
8547 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
8548 {
8549 	struct netdev_nested_priv priv = {
8550 		.data = (void *)rif,
8551 	};
8552 
8553 	if (!netif_is_macvlan_port(rif->dev))
8554 		return 0;
8555 
8556 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
8557 	return netdev_walk_all_upper_dev_rcu(rif->dev,
8558 					     __mlxsw_sp_rif_macvlan_flush, &priv);
8559 }
8560 
8561 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
8562 				       const struct mlxsw_sp_rif_params *params)
8563 {
8564 	struct mlxsw_sp_rif_subport *rif_subport;
8565 
8566 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8567 	refcount_set(&rif_subport->ref_count, 1);
8568 	rif_subport->vid = params->vid;
8569 	rif_subport->lag = params->lag;
8570 	if (params->lag)
8571 		rif_subport->lag_id = params->lag_id;
8572 	else
8573 		rif_subport->system_port = params->system_port;
8574 }
8575 
8576 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
8577 {
8578 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8579 	struct mlxsw_sp_rif_subport *rif_subport;
8580 	char ritr_pl[MLXSW_REG_RITR_LEN];
8581 
8582 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8583 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
8584 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
8585 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
8586 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
8587 				  rif_subport->lag ? rif_subport->lag_id :
8588 						     rif_subport->system_port,
8589 				  rif_subport->vid);
8590 
8591 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8592 }
8593 
8594 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
8595 {
8596 	int err;
8597 
8598 	err = mlxsw_sp_rif_subport_op(rif, true);
8599 	if (err)
8600 		return err;
8601 
8602 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
8603 				  mlxsw_sp_fid_index(rif->fid), true);
8604 	if (err)
8605 		goto err_rif_fdb_op;
8606 
8607 	mlxsw_sp_fid_rif_set(rif->fid, rif);
8608 	return 0;
8609 
8610 err_rif_fdb_op:
8611 	mlxsw_sp_rif_subport_op(rif, false);
8612 	return err;
8613 }
8614 
8615 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
8616 {
8617 	struct mlxsw_sp_fid *fid = rif->fid;
8618 
8619 	mlxsw_sp_fid_rif_set(fid, NULL);
8620 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
8621 			    mlxsw_sp_fid_index(fid), false);
8622 	mlxsw_sp_rif_macvlan_flush(rif);
8623 	mlxsw_sp_rif_subport_op(rif, false);
8624 }
8625 
8626 static struct mlxsw_sp_fid *
8627 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
8628 			     struct netlink_ext_ack *extack)
8629 {
8630 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
8631 }
8632 
8633 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
8634 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
8635 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
8636 	.setup			= mlxsw_sp_rif_subport_setup,
8637 	.configure		= mlxsw_sp_rif_subport_configure,
8638 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
8639 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
8640 };
8641 
8642 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
8643 				    enum mlxsw_reg_ritr_if_type type,
8644 				    u16 vid_fid, bool enable)
8645 {
8646 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8647 	char ritr_pl[MLXSW_REG_RITR_LEN];
8648 
8649 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
8650 			    rif->dev->mtu);
8651 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
8652 	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
8653 
8654 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8655 }
8656 
8657 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
8658 {
8659 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
8660 }
8661 
8662 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
8663 {
8664 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8665 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
8666 	int err;
8667 
8668 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
8669 				       true);
8670 	if (err)
8671 		return err;
8672 
8673 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
8674 				     mlxsw_sp_router_port(mlxsw_sp), true);
8675 	if (err)
8676 		goto err_fid_mc_flood_set;
8677 
8678 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
8679 				     mlxsw_sp_router_port(mlxsw_sp), true);
8680 	if (err)
8681 		goto err_fid_bc_flood_set;
8682 
8683 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
8684 				  mlxsw_sp_fid_index(rif->fid), true);
8685 	if (err)
8686 		goto err_rif_fdb_op;
8687 
8688 	mlxsw_sp_fid_rif_set(rif->fid, rif);
8689 	return 0;
8690 
8691 err_rif_fdb_op:
8692 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
8693 			       mlxsw_sp_router_port(mlxsw_sp), false);
8694 err_fid_bc_flood_set:
8695 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
8696 			       mlxsw_sp_router_port(mlxsw_sp), false);
8697 err_fid_mc_flood_set:
8698 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
8699 	return err;
8700 }
8701 
8702 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
8703 {
8704 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
8705 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8706 	struct mlxsw_sp_fid *fid = rif->fid;
8707 
8708 	mlxsw_sp_fid_rif_set(fid, NULL);
8709 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
8710 			    mlxsw_sp_fid_index(fid), false);
8711 	mlxsw_sp_rif_macvlan_flush(rif);
8712 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
8713 			       mlxsw_sp_router_port(mlxsw_sp), false);
8714 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
8715 			       mlxsw_sp_router_port(mlxsw_sp), false);
8716 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
8717 }
8718 
8719 static struct mlxsw_sp_fid *
8720 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
8721 			 struct netlink_ext_ack *extack)
8722 {
8723 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
8724 }
8725 
8726 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
8727 {
8728 	struct switchdev_notifier_fdb_info info;
8729 	struct net_device *dev;
8730 
8731 	dev = br_fdb_find_port(rif->dev, mac, 0);
8732 	if (!dev)
8733 		return;
8734 
8735 	info.addr = mac;
8736 	info.vid = 0;
8737 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
8738 				 NULL);
8739 }
8740 
8741 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
8742 	.type			= MLXSW_SP_RIF_TYPE_FID,
8743 	.rif_size		= sizeof(struct mlxsw_sp_rif),
8744 	.configure		= mlxsw_sp_rif_fid_configure,
8745 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
8746 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
8747 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
8748 };
8749 
8750 static struct mlxsw_sp_fid *
8751 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
8752 			  struct netlink_ext_ack *extack)
8753 {
8754 	struct net_device *br_dev;
8755 	u16 vid;
8756 	int err;
8757 
8758 	if (is_vlan_dev(rif->dev)) {
8759 		vid = vlan_dev_vlan_id(rif->dev);
8760 		br_dev = vlan_dev_real_dev(rif->dev);
8761 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
8762 			return ERR_PTR(-EINVAL);
8763 	} else {
8764 		err = br_vlan_get_pvid(rif->dev, &vid);
8765 		if (err < 0 || !vid) {
8766 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
8767 			return ERR_PTR(-EINVAL);
8768 		}
8769 	}
8770 
8771 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
8772 }
8773 
8774 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
8775 {
8776 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
8777 	struct switchdev_notifier_fdb_info info;
8778 	struct net_device *br_dev;
8779 	struct net_device *dev;
8780 
8781 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
8782 	dev = br_fdb_find_port(br_dev, mac, vid);
8783 	if (!dev)
8784 		return;
8785 
8786 	info.addr = mac;
8787 	info.vid = vid;
8788 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
8789 				 NULL);
8790 }
8791 
8792 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
8793 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
8794 	.rif_size		= sizeof(struct mlxsw_sp_rif),
8795 	.configure		= mlxsw_sp_rif_fid_configure,
8796 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
8797 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
8798 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
8799 };
8800 
8801 static struct mlxsw_sp_rif_ipip_lb *
8802 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
8803 {
8804 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
8805 }
8806 
8807 static void
8808 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
8809 			   const struct mlxsw_sp_rif_params *params)
8810 {
8811 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
8812 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
8813 
8814 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
8815 				 common);
8816 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
8817 	rif_lb->lb_config = params_lb->lb_config;
8818 }
8819 
8820 static int
8821 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
8822 {
8823 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
8824 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
8825 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8826 	struct mlxsw_sp_vr *ul_vr;
8827 	int err;
8828 
8829 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
8830 	if (IS_ERR(ul_vr))
8831 		return PTR_ERR(ul_vr);
8832 
8833 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
8834 	if (err)
8835 		goto err_loopback_op;
8836 
8837 	lb_rif->ul_vr_id = ul_vr->id;
8838 	lb_rif->ul_rif_id = 0;
8839 	++ul_vr->rif_count;
8840 	return 0;
8841 
8842 err_loopback_op:
8843 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
8844 	return err;
8845 }
8846 
8847 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
8848 {
8849 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
8850 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8851 	struct mlxsw_sp_vr *ul_vr;
8852 
8853 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
8854 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
8855 
8856 	--ul_vr->rif_count;
8857 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
8858 }
8859 
8860 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
8861 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
8862 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
8863 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
8864 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
8865 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
8866 };
8867 
8868 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
8869 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
8870 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
8871 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
8872 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
8873 };
8874 
8875 static int
8876 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
8877 {
8878 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
8879 	char ritr_pl[MLXSW_REG_RITR_LEN];
8880 
8881 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
8882 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
8883 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
8884 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
8885 
8886 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8887 }
8888 
8889 static struct mlxsw_sp_rif *
8890 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
8891 		       struct netlink_ext_ack *extack)
8892 {
8893 	struct mlxsw_sp_rif *ul_rif;
8894 	u16 rif_index;
8895 	int err;
8896 
8897 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8898 	if (err) {
8899 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8900 		return ERR_PTR(err);
8901 	}
8902 
8903 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
8904 	if (!ul_rif)
8905 		return ERR_PTR(-ENOMEM);
8906 
8907 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
8908 	ul_rif->mlxsw_sp = mlxsw_sp;
8909 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
8910 	if (err)
8911 		goto ul_rif_op_err;
8912 
8913 	return ul_rif;
8914 
8915 ul_rif_op_err:
8916 	mlxsw_sp->router->rifs[rif_index] = NULL;
8917 	kfree(ul_rif);
8918 	return ERR_PTR(err);
8919 }
8920 
8921 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
8922 {
8923 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
8924 
8925 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
8926 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
8927 	kfree(ul_rif);
8928 }
8929 
8930 static struct mlxsw_sp_rif *
8931 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
8932 		    struct netlink_ext_ack *extack)
8933 {
8934 	struct mlxsw_sp_vr *vr;
8935 	int err;
8936 
8937 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
8938 	if (IS_ERR(vr))
8939 		return ERR_CAST(vr);
8940 
8941 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
8942 		return vr->ul_rif;
8943 
8944 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
8945 	if (IS_ERR(vr->ul_rif)) {
8946 		err = PTR_ERR(vr->ul_rif);
8947 		goto err_ul_rif_create;
8948 	}
8949 
8950 	vr->rif_count++;
8951 	refcount_set(&vr->ul_rif_refcnt, 1);
8952 
8953 	return vr->ul_rif;
8954 
8955 err_ul_rif_create:
8956 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8957 	return ERR_PTR(err);
8958 }
8959 
8960 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
8961 {
8962 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
8963 	struct mlxsw_sp_vr *vr;
8964 
8965 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
8966 
8967 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
8968 		return;
8969 
8970 	vr->rif_count--;
8971 	mlxsw_sp_ul_rif_destroy(ul_rif);
8972 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8973 }
8974 
8975 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
8976 			       u16 *ul_rif_index)
8977 {
8978 	struct mlxsw_sp_rif *ul_rif;
8979 	int err = 0;
8980 
8981 	mutex_lock(&mlxsw_sp->router->lock);
8982 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
8983 	if (IS_ERR(ul_rif)) {
8984 		err = PTR_ERR(ul_rif);
8985 		goto out;
8986 	}
8987 	*ul_rif_index = ul_rif->rif_index;
8988 out:
8989 	mutex_unlock(&mlxsw_sp->router->lock);
8990 	return err;
8991 }
8992 
8993 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
8994 {
8995 	struct mlxsw_sp_rif *ul_rif;
8996 
8997 	mutex_lock(&mlxsw_sp->router->lock);
8998 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
8999 	if (WARN_ON(!ul_rif))
9000 		goto out;
9001 
9002 	mlxsw_sp_ul_rif_put(ul_rif);
9003 out:
9004 	mutex_unlock(&mlxsw_sp->router->lock);
9005 }
9006 
9007 static int
9008 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
9009 {
9010 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9011 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9012 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9013 	struct mlxsw_sp_rif *ul_rif;
9014 	int err;
9015 
9016 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9017 	if (IS_ERR(ul_rif))
9018 		return PTR_ERR(ul_rif);
9019 
9020 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
9021 	if (err)
9022 		goto err_loopback_op;
9023 
9024 	lb_rif->ul_vr_id = 0;
9025 	lb_rif->ul_rif_id = ul_rif->rif_index;
9026 
9027 	return 0;
9028 
9029 err_loopback_op:
9030 	mlxsw_sp_ul_rif_put(ul_rif);
9031 	return err;
9032 }
9033 
9034 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9035 {
9036 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9037 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9038 	struct mlxsw_sp_rif *ul_rif;
9039 
9040 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
9041 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
9042 	mlxsw_sp_ul_rif_put(ul_rif);
9043 }
9044 
9045 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
9046 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9047 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9048 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9049 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
9050 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
9051 };
9052 
9053 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
9054 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9055 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
9056 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9057 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
9058 };
9059 
9060 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
9061 {
9062 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9063 
9064 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
9065 					 sizeof(struct mlxsw_sp_rif *),
9066 					 GFP_KERNEL);
9067 	if (!mlxsw_sp->router->rifs)
9068 		return -ENOMEM;
9069 
9070 	return 0;
9071 }
9072 
9073 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
9074 {
9075 	int i;
9076 
9077 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
9078 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
9079 
9080 	kfree(mlxsw_sp->router->rifs);
9081 }
9082 
9083 static int
9084 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
9085 {
9086 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
9087 
9088 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
9089 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
9090 }
9091 
9092 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
9093 {
9094 	int err;
9095 
9096 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
9097 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
9098 
9099 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
9100 	if (err)
9101 		return err;
9102 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
9103 	if (err)
9104 		return err;
9105 
9106 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
9107 }
9108 
9109 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
9110 {
9111 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
9112 }
9113 
9114 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
9115 {
9116 	struct mlxsw_sp_router *router;
9117 
9118 	/* Flush pending FIB notifications and then flush the device's
9119 	 * table before requesting another dump. The FIB notification
9120 	 * block is unregistered, so no need to take RTNL.
9121 	 */
9122 	mlxsw_core_flush_owq();
9123 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
9124 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
9125 }
9126 
9127 #ifdef CONFIG_IP_ROUTE_MULTIPATH
9128 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
9129 {
9130 	mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
9131 }
9132 
9133 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
9134 {
9135 	mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
9136 }
9137 
9138 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
9139 {
9140 	struct net *net = mlxsw_sp_net(mlxsw_sp);
9141 	bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
9142 
9143 	mlxsw_sp_mp_hash_header_set(recr2_pl,
9144 				    MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
9145 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
9146 	mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
9147 	mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
9148 	if (only_l3)
9149 		return;
9150 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
9151 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
9152 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
9153 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
9154 }
9155 
9156 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
9157 {
9158 	bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
9159 
9160 	mlxsw_sp_mp_hash_header_set(recr2_pl,
9161 				    MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
9162 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
9163 	mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
9164 	mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
9165 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
9166 	if (only_l3) {
9167 		mlxsw_sp_mp_hash_field_set(recr2_pl,
9168 					   MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
9169 	} else {
9170 		mlxsw_sp_mp_hash_header_set(recr2_pl,
9171 					    MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
9172 		mlxsw_sp_mp_hash_field_set(recr2_pl,
9173 					   MLXSW_REG_RECR2_TCP_UDP_SPORT);
9174 		mlxsw_sp_mp_hash_field_set(recr2_pl,
9175 					   MLXSW_REG_RECR2_TCP_UDP_DPORT);
9176 	}
9177 }
9178 
9179 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
9180 {
9181 	char recr2_pl[MLXSW_REG_RECR2_LEN];
9182 	u32 seed;
9183 
9184 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
9185 	mlxsw_reg_recr2_pack(recr2_pl, seed);
9186 	mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
9187 	mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
9188 
9189 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
9190 }
9191 #else
9192 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
9193 {
9194 	return 0;
9195 }
9196 #endif
9197 
9198 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
9199 {
9200 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
9201 	unsigned int i;
9202 
9203 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
9204 
9205 	/* HW is determining switch priority based on DSCP-bits, but the
9206 	 * kernel is still doing that based on the ToS. Since there's a
9207 	 * mismatch in bits we need to make sure to translate the right
9208 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
9209 	 */
9210 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
9211 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
9212 
9213 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
9214 }
9215 
9216 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
9217 {
9218 	struct net *net = mlxsw_sp_net(mlxsw_sp);
9219 	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
9220 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
9221 	u64 max_rifs;
9222 
9223 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
9224 		return -EIO;
9225 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9226 
9227 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
9228 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
9229 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
9230 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
9231 }
9232 
9233 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
9234 {
9235 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
9236 
9237 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
9238 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
9239 }
9240 
9241 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
9242 	.init = mlxsw_sp_router_ll_basic_init,
9243 	.ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
9244 	.ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
9245 	.raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
9246 	.fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
9247 	.fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
9248 	.fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
9249 	.fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
9250 	.fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
9251 	.fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
9252 	.fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
9253 	.fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
9254 };
9255 
9256 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
9257 {
9258 	size_t max_size = 0;
9259 	int i;
9260 
9261 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
9262 		size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
9263 
9264 		if (size > max_size)
9265 			max_size = size;
9266 	}
9267 	router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
9268 				    GFP_KERNEL);
9269 	if (!router->ll_op_ctx)
9270 		return -ENOMEM;
9271 	INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
9272 	return 0;
9273 }
9274 
9275 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
9276 {
9277 	WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
9278 	kfree(router->ll_op_ctx);
9279 }
9280 
9281 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
9282 {
9283 	u16 lb_rif_index;
9284 	int err;
9285 
9286 	/* Create a generic loopback RIF associated with the main table
9287 	 * (default VRF). Any table can be used, but the main table exists
9288 	 * anyway, so we do not waste resources.
9289 	 */
9290 	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
9291 					 &lb_rif_index);
9292 	if (err)
9293 		return err;
9294 
9295 	mlxsw_sp->router->lb_rif_index = lb_rif_index;
9296 
9297 	return 0;
9298 }
9299 
9300 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
9301 {
9302 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
9303 }
9304 
9305 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
9306 			 struct netlink_ext_ack *extack)
9307 {
9308 	struct mlxsw_sp_router *router;
9309 	int err;
9310 
9311 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
9312 	if (!router)
9313 		return -ENOMEM;
9314 	mutex_init(&router->lock);
9315 	mlxsw_sp->router = router;
9316 	router->mlxsw_sp = mlxsw_sp;
9317 
9318 	err = mlxsw_sp_router_xm_init(mlxsw_sp);
9319 	if (err)
9320 		goto err_xm_init;
9321 
9322 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
9323 						       &mlxsw_sp_router_ll_xm_ops :
9324 						       &mlxsw_sp_router_ll_basic_ops;
9325 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
9326 
9327 	err = mlxsw_sp_router_ll_op_ctx_init(router);
9328 	if (err)
9329 		goto err_ll_op_ctx_init;
9330 
9331 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
9332 	err = __mlxsw_sp_router_init(mlxsw_sp);
9333 	if (err)
9334 		goto err_router_init;
9335 
9336 	err = mlxsw_sp_rifs_init(mlxsw_sp);
9337 	if (err)
9338 		goto err_rifs_init;
9339 
9340 	err = mlxsw_sp_ipips_init(mlxsw_sp);
9341 	if (err)
9342 		goto err_ipips_init;
9343 
9344 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
9345 			      &mlxsw_sp_nexthop_ht_params);
9346 	if (err)
9347 		goto err_nexthop_ht_init;
9348 
9349 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
9350 			      &mlxsw_sp_nexthop_group_ht_params);
9351 	if (err)
9352 		goto err_nexthop_group_ht_init;
9353 
9354 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
9355 	err = mlxsw_sp_lpm_init(mlxsw_sp);
9356 	if (err)
9357 		goto err_lpm_init;
9358 
9359 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
9360 	if (err)
9361 		goto err_mr_init;
9362 
9363 	err = mlxsw_sp_vrs_init(mlxsw_sp);
9364 	if (err)
9365 		goto err_vrs_init;
9366 
9367 	err = mlxsw_sp_lb_rif_init(mlxsw_sp);
9368 	if (err)
9369 		goto err_lb_rif_init;
9370 
9371 	err = mlxsw_sp_neigh_init(mlxsw_sp);
9372 	if (err)
9373 		goto err_neigh_init;
9374 
9375 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
9376 	if (err)
9377 		goto err_mp_hash_init;
9378 
9379 	err = mlxsw_sp_dscp_init(mlxsw_sp);
9380 	if (err)
9381 		goto err_dscp_init;
9382 
9383 	INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
9384 	INIT_LIST_HEAD(&router->fib_event_queue);
9385 	spin_lock_init(&router->fib_event_queue_lock);
9386 
9387 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
9388 	err = register_inetaddr_notifier(&router->inetaddr_nb);
9389 	if (err)
9390 		goto err_register_inetaddr_notifier;
9391 
9392 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
9393 	err = register_inet6addr_notifier(&router->inet6addr_nb);
9394 	if (err)
9395 		goto err_register_inet6addr_notifier;
9396 
9397 	mlxsw_sp->router->netevent_nb.notifier_call =
9398 		mlxsw_sp_router_netevent_event;
9399 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
9400 	if (err)
9401 		goto err_register_netevent_notifier;
9402 
9403 	mlxsw_sp->router->nexthop_nb.notifier_call =
9404 		mlxsw_sp_nexthop_obj_event;
9405 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
9406 					&mlxsw_sp->router->nexthop_nb,
9407 					extack);
9408 	if (err)
9409 		goto err_register_nexthop_notifier;
9410 
9411 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
9412 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
9413 				    &mlxsw_sp->router->fib_nb,
9414 				    mlxsw_sp_router_fib_dump_flush, extack);
9415 	if (err)
9416 		goto err_register_fib_notifier;
9417 
9418 	return 0;
9419 
9420 err_register_fib_notifier:
9421 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
9422 				    &mlxsw_sp->router->nexthop_nb);
9423 err_register_nexthop_notifier:
9424 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
9425 err_register_netevent_notifier:
9426 	unregister_inet6addr_notifier(&router->inet6addr_nb);
9427 err_register_inet6addr_notifier:
9428 	unregister_inetaddr_notifier(&router->inetaddr_nb);
9429 err_register_inetaddr_notifier:
9430 	mlxsw_core_flush_owq();
9431 	WARN_ON(!list_empty(&router->fib_event_queue));
9432 err_dscp_init:
9433 err_mp_hash_init:
9434 	mlxsw_sp_neigh_fini(mlxsw_sp);
9435 err_neigh_init:
9436 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
9437 err_lb_rif_init:
9438 	mlxsw_sp_vrs_fini(mlxsw_sp);
9439 err_vrs_init:
9440 	mlxsw_sp_mr_fini(mlxsw_sp);
9441 err_mr_init:
9442 	mlxsw_sp_lpm_fini(mlxsw_sp);
9443 err_lpm_init:
9444 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
9445 err_nexthop_group_ht_init:
9446 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
9447 err_nexthop_ht_init:
9448 	mlxsw_sp_ipips_fini(mlxsw_sp);
9449 err_ipips_init:
9450 	mlxsw_sp_rifs_fini(mlxsw_sp);
9451 err_rifs_init:
9452 	__mlxsw_sp_router_fini(mlxsw_sp);
9453 err_router_init:
9454 	mlxsw_sp_router_ll_op_ctx_fini(router);
9455 err_ll_op_ctx_init:
9456 	mlxsw_sp_router_xm_fini(mlxsw_sp);
9457 err_xm_init:
9458 	mutex_destroy(&mlxsw_sp->router->lock);
9459 	kfree(mlxsw_sp->router);
9460 	return err;
9461 }
9462 
9463 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
9464 {
9465 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
9466 				&mlxsw_sp->router->fib_nb);
9467 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
9468 				    &mlxsw_sp->router->nexthop_nb);
9469 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
9470 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
9471 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
9472 	mlxsw_core_flush_owq();
9473 	WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
9474 	mlxsw_sp_neigh_fini(mlxsw_sp);
9475 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
9476 	mlxsw_sp_vrs_fini(mlxsw_sp);
9477 	mlxsw_sp_mr_fini(mlxsw_sp);
9478 	mlxsw_sp_lpm_fini(mlxsw_sp);
9479 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
9480 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
9481 	mlxsw_sp_ipips_fini(mlxsw_sp);
9482 	mlxsw_sp_rifs_fini(mlxsw_sp);
9483 	__mlxsw_sp_router_fini(mlxsw_sp);
9484 	mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
9485 	mlxsw_sp_router_xm_fini(mlxsw_sp);
9486 	mutex_destroy(&mlxsw_sp->router->lock);
9487 	kfree(mlxsw_sp->router);
9488 }
9489