1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <net/netevent.h>
21 #include <net/neighbour.h>
22 #include <net/arp.h>
23 #include <net/ip_fib.h>
24 #include <net/ip6_fib.h>
25 #include <net/nexthop.h>
26 #include <net/fib_rules.h>
27 #include <net/ip_tunnels.h>
28 #include <net/l3mdev.h>
29 #include <net/addrconf.h>
30 #include <net/ndisc.h>
31 #include <net/ipv6.h>
32 #include <net/fib_notifier.h>
33 #include <net/switchdev.h>
34 
35 #include "spectrum.h"
36 #include "core.h"
37 #include "reg.h"
38 #include "spectrum_cnt.h"
39 #include "spectrum_dpipe.h"
40 #include "spectrum_ipip.h"
41 #include "spectrum_mr.h"
42 #include "spectrum_mr_tcam.h"
43 #include "spectrum_router.h"
44 #include "spectrum_span.h"
45 
46 struct mlxsw_sp_fib;
47 struct mlxsw_sp_vr;
48 struct mlxsw_sp_lpm_tree;
49 struct mlxsw_sp_rif_ops;
50 
51 struct mlxsw_sp_router {
52 	struct mlxsw_sp *mlxsw_sp;
53 	struct mlxsw_sp_rif **rifs;
54 	struct mlxsw_sp_vr *vrs;
55 	struct rhashtable neigh_ht;
56 	struct rhashtable nexthop_group_ht;
57 	struct rhashtable nexthop_ht;
58 	struct list_head nexthop_list;
59 	struct {
60 		/* One tree for each protocol: IPv4 and IPv6 */
61 		struct mlxsw_sp_lpm_tree *proto_trees[2];
62 		struct mlxsw_sp_lpm_tree *trees;
63 		unsigned int tree_count;
64 	} lpm;
65 	struct {
66 		struct delayed_work dw;
67 		unsigned long interval;	/* ms */
68 	} neighs_update;
69 	struct delayed_work nexthop_probe_dw;
70 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
71 	struct list_head nexthop_neighs_list;
72 	struct list_head ipip_list;
73 	bool aborted;
74 	struct notifier_block fib_nb;
75 	struct notifier_block netevent_nb;
76 	struct notifier_block inetaddr_nb;
77 	struct notifier_block inet6addr_nb;
78 	const struct mlxsw_sp_rif_ops **rif_ops_arr;
79 	const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
80 	u32 adj_discard_index;
81 	bool adj_discard_index_valid;
82 };
83 
84 struct mlxsw_sp_rif {
85 	struct list_head nexthop_list;
86 	struct list_head neigh_list;
87 	struct net_device *dev; /* NULL for underlay RIF */
88 	struct mlxsw_sp_fid *fid;
89 	unsigned char addr[ETH_ALEN];
90 	int mtu;
91 	u16 rif_index;
92 	u16 vr_id;
93 	const struct mlxsw_sp_rif_ops *ops;
94 	struct mlxsw_sp *mlxsw_sp;
95 
96 	unsigned int counter_ingress;
97 	bool counter_ingress_valid;
98 	unsigned int counter_egress;
99 	bool counter_egress_valid;
100 };
101 
102 struct mlxsw_sp_rif_params {
103 	struct net_device *dev;
104 	union {
105 		u16 system_port;
106 		u16 lag_id;
107 	};
108 	u16 vid;
109 	bool lag;
110 };
111 
112 struct mlxsw_sp_rif_subport {
113 	struct mlxsw_sp_rif common;
114 	refcount_t ref_count;
115 	union {
116 		u16 system_port;
117 		u16 lag_id;
118 	};
119 	u16 vid;
120 	bool lag;
121 };
122 
123 struct mlxsw_sp_rif_ipip_lb {
124 	struct mlxsw_sp_rif common;
125 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
126 	u16 ul_vr_id; /* Reserved for Spectrum-2. */
127 	u16 ul_rif_id; /* Reserved for Spectrum. */
128 };
129 
130 struct mlxsw_sp_rif_params_ipip_lb {
131 	struct mlxsw_sp_rif_params common;
132 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
133 };
134 
135 struct mlxsw_sp_rif_ops {
136 	enum mlxsw_sp_rif_type type;
137 	size_t rif_size;
138 
139 	void (*setup)(struct mlxsw_sp_rif *rif,
140 		      const struct mlxsw_sp_rif_params *params);
141 	int (*configure)(struct mlxsw_sp_rif *rif);
142 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
143 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
144 					 struct netlink_ext_ack *extack);
145 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
146 };
147 
148 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
149 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
150 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
151 				  struct mlxsw_sp_lpm_tree *lpm_tree);
152 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
153 				     const struct mlxsw_sp_fib *fib,
154 				     u8 tree_id);
155 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
156 				       const struct mlxsw_sp_fib *fib);
157 
158 static unsigned int *
159 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
160 			   enum mlxsw_sp_rif_counter_dir dir)
161 {
162 	switch (dir) {
163 	case MLXSW_SP_RIF_COUNTER_EGRESS:
164 		return &rif->counter_egress;
165 	case MLXSW_SP_RIF_COUNTER_INGRESS:
166 		return &rif->counter_ingress;
167 	}
168 	return NULL;
169 }
170 
171 static bool
172 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
173 			       enum mlxsw_sp_rif_counter_dir dir)
174 {
175 	switch (dir) {
176 	case MLXSW_SP_RIF_COUNTER_EGRESS:
177 		return rif->counter_egress_valid;
178 	case MLXSW_SP_RIF_COUNTER_INGRESS:
179 		return rif->counter_ingress_valid;
180 	}
181 	return false;
182 }
183 
184 static void
185 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
186 			       enum mlxsw_sp_rif_counter_dir dir,
187 			       bool valid)
188 {
189 	switch (dir) {
190 	case MLXSW_SP_RIF_COUNTER_EGRESS:
191 		rif->counter_egress_valid = valid;
192 		break;
193 	case MLXSW_SP_RIF_COUNTER_INGRESS:
194 		rif->counter_ingress_valid = valid;
195 		break;
196 	}
197 }
198 
199 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
200 				     unsigned int counter_index, bool enable,
201 				     enum mlxsw_sp_rif_counter_dir dir)
202 {
203 	char ritr_pl[MLXSW_REG_RITR_LEN];
204 	bool is_egress = false;
205 	int err;
206 
207 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
208 		is_egress = true;
209 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
210 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
211 	if (err)
212 		return err;
213 
214 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
215 				    is_egress);
216 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
217 }
218 
219 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
220 				   struct mlxsw_sp_rif *rif,
221 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
222 {
223 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
224 	unsigned int *p_counter_index;
225 	bool valid;
226 	int err;
227 
228 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
229 	if (!valid)
230 		return -EINVAL;
231 
232 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
233 	if (!p_counter_index)
234 		return -EINVAL;
235 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
236 			     MLXSW_REG_RICNT_OPCODE_NOP);
237 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
238 	if (err)
239 		return err;
240 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
241 	return 0;
242 }
243 
244 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
245 				      unsigned int counter_index)
246 {
247 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
248 
249 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
250 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
251 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
252 }
253 
254 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
255 			       struct mlxsw_sp_rif *rif,
256 			       enum mlxsw_sp_rif_counter_dir dir)
257 {
258 	unsigned int *p_counter_index;
259 	int err;
260 
261 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
262 	if (!p_counter_index)
263 		return -EINVAL;
264 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
265 				     p_counter_index);
266 	if (err)
267 		return err;
268 
269 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
270 	if (err)
271 		goto err_counter_clear;
272 
273 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
274 					*p_counter_index, true, dir);
275 	if (err)
276 		goto err_counter_edit;
277 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
278 	return 0;
279 
280 err_counter_edit:
281 err_counter_clear:
282 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
283 			      *p_counter_index);
284 	return err;
285 }
286 
287 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
288 			       struct mlxsw_sp_rif *rif,
289 			       enum mlxsw_sp_rif_counter_dir dir)
290 {
291 	unsigned int *p_counter_index;
292 
293 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
294 		return;
295 
296 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
297 	if (WARN_ON(!p_counter_index))
298 		return;
299 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
300 				  *p_counter_index, false, dir);
301 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
302 			      *p_counter_index);
303 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
304 }
305 
306 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
307 {
308 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
309 	struct devlink *devlink;
310 
311 	devlink = priv_to_devlink(mlxsw_sp->core);
312 	if (!devlink_dpipe_table_counter_enabled(devlink,
313 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
314 		return;
315 	mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
316 }
317 
318 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
319 {
320 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
321 
322 	mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323 }
324 
325 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
326 
327 struct mlxsw_sp_prefix_usage {
328 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
329 };
330 
331 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
332 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
333 
334 static bool
335 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
336 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
337 {
338 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
339 }
340 
341 static void
342 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
343 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
344 {
345 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
346 }
347 
348 static void
349 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
350 			  unsigned char prefix_len)
351 {
352 	set_bit(prefix_len, prefix_usage->b);
353 }
354 
355 static void
356 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
357 			    unsigned char prefix_len)
358 {
359 	clear_bit(prefix_len, prefix_usage->b);
360 }
361 
362 struct mlxsw_sp_fib_key {
363 	unsigned char addr[sizeof(struct in6_addr)];
364 	unsigned char prefix_len;
365 };
366 
367 enum mlxsw_sp_fib_entry_type {
368 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
369 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
370 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
371 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
372 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
373 
374 	/* This is a special case of local delivery, where a packet should be
375 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
376 	 * because that's a type of next hop, not of FIB entry. (There can be
377 	 * several next hops in a REMOTE entry, and some of them may be
378 	 * encapsulating entries.)
379 	 */
380 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
381 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
382 };
383 
384 struct mlxsw_sp_nexthop_group;
385 
386 struct mlxsw_sp_fib_node {
387 	struct list_head entry_list;
388 	struct list_head list;
389 	struct rhash_head ht_node;
390 	struct mlxsw_sp_fib *fib;
391 	struct mlxsw_sp_fib_key key;
392 };
393 
394 struct mlxsw_sp_fib_entry_decap {
395 	struct mlxsw_sp_ipip_entry *ipip_entry;
396 	u32 tunnel_index;
397 };
398 
399 struct mlxsw_sp_fib_entry {
400 	struct list_head list;
401 	struct mlxsw_sp_fib_node *fib_node;
402 	enum mlxsw_sp_fib_entry_type type;
403 	struct list_head nexthop_group_node;
404 	struct mlxsw_sp_nexthop_group *nh_group;
405 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
406 };
407 
408 struct mlxsw_sp_fib4_entry {
409 	struct mlxsw_sp_fib_entry common;
410 	u32 tb_id;
411 	u32 prio;
412 	u8 tos;
413 	u8 type;
414 };
415 
416 struct mlxsw_sp_fib6_entry {
417 	struct mlxsw_sp_fib_entry common;
418 	struct list_head rt6_list;
419 	unsigned int nrt6;
420 };
421 
422 struct mlxsw_sp_rt6 {
423 	struct list_head list;
424 	struct fib6_info *rt;
425 };
426 
427 struct mlxsw_sp_lpm_tree {
428 	u8 id; /* tree ID */
429 	unsigned int ref_count;
430 	enum mlxsw_sp_l3proto proto;
431 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
432 	struct mlxsw_sp_prefix_usage prefix_usage;
433 };
434 
435 struct mlxsw_sp_fib {
436 	struct rhashtable ht;
437 	struct list_head node_list;
438 	struct mlxsw_sp_vr *vr;
439 	struct mlxsw_sp_lpm_tree *lpm_tree;
440 	enum mlxsw_sp_l3proto proto;
441 };
442 
443 struct mlxsw_sp_vr {
444 	u16 id; /* virtual router ID */
445 	u32 tb_id; /* kernel fib table id */
446 	unsigned int rif_count;
447 	struct mlxsw_sp_fib *fib4;
448 	struct mlxsw_sp_fib *fib6;
449 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
450 	struct mlxsw_sp_rif *ul_rif;
451 	refcount_t ul_rif_refcnt;
452 };
453 
454 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
455 
456 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
457 						struct mlxsw_sp_vr *vr,
458 						enum mlxsw_sp_l3proto proto)
459 {
460 	struct mlxsw_sp_lpm_tree *lpm_tree;
461 	struct mlxsw_sp_fib *fib;
462 	int err;
463 
464 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
465 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
466 	if (!fib)
467 		return ERR_PTR(-ENOMEM);
468 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
469 	if (err)
470 		goto err_rhashtable_init;
471 	INIT_LIST_HEAD(&fib->node_list);
472 	fib->proto = proto;
473 	fib->vr = vr;
474 	fib->lpm_tree = lpm_tree;
475 	mlxsw_sp_lpm_tree_hold(lpm_tree);
476 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
477 	if (err)
478 		goto err_lpm_tree_bind;
479 	return fib;
480 
481 err_lpm_tree_bind:
482 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
483 err_rhashtable_init:
484 	kfree(fib);
485 	return ERR_PTR(err);
486 }
487 
488 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
489 				 struct mlxsw_sp_fib *fib)
490 {
491 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
492 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
493 	WARN_ON(!list_empty(&fib->node_list));
494 	rhashtable_destroy(&fib->ht);
495 	kfree(fib);
496 }
497 
498 static struct mlxsw_sp_lpm_tree *
499 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
500 {
501 	static struct mlxsw_sp_lpm_tree *lpm_tree;
502 	int i;
503 
504 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
505 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
506 		if (lpm_tree->ref_count == 0)
507 			return lpm_tree;
508 	}
509 	return NULL;
510 }
511 
512 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
513 				   struct mlxsw_sp_lpm_tree *lpm_tree)
514 {
515 	char ralta_pl[MLXSW_REG_RALTA_LEN];
516 
517 	mlxsw_reg_ralta_pack(ralta_pl, true,
518 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
519 			     lpm_tree->id);
520 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
521 }
522 
523 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
524 				   struct mlxsw_sp_lpm_tree *lpm_tree)
525 {
526 	char ralta_pl[MLXSW_REG_RALTA_LEN];
527 
528 	mlxsw_reg_ralta_pack(ralta_pl, false,
529 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
530 			     lpm_tree->id);
531 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
532 }
533 
534 static int
535 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
536 				  struct mlxsw_sp_prefix_usage *prefix_usage,
537 				  struct mlxsw_sp_lpm_tree *lpm_tree)
538 {
539 	char ralst_pl[MLXSW_REG_RALST_LEN];
540 	u8 root_bin = 0;
541 	u8 prefix;
542 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
543 
544 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
545 		root_bin = prefix;
546 
547 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
548 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
549 		if (prefix == 0)
550 			continue;
551 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
552 					 MLXSW_REG_RALST_BIN_NO_CHILD);
553 		last_prefix = prefix;
554 	}
555 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
556 }
557 
558 static struct mlxsw_sp_lpm_tree *
559 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
560 			 struct mlxsw_sp_prefix_usage *prefix_usage,
561 			 enum mlxsw_sp_l3proto proto)
562 {
563 	struct mlxsw_sp_lpm_tree *lpm_tree;
564 	int err;
565 
566 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
567 	if (!lpm_tree)
568 		return ERR_PTR(-EBUSY);
569 	lpm_tree->proto = proto;
570 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
571 	if (err)
572 		return ERR_PTR(err);
573 
574 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
575 						lpm_tree);
576 	if (err)
577 		goto err_left_struct_set;
578 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
579 	       sizeof(lpm_tree->prefix_usage));
580 	memset(&lpm_tree->prefix_ref_count, 0,
581 	       sizeof(lpm_tree->prefix_ref_count));
582 	lpm_tree->ref_count = 1;
583 	return lpm_tree;
584 
585 err_left_struct_set:
586 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
587 	return ERR_PTR(err);
588 }
589 
590 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
591 				      struct mlxsw_sp_lpm_tree *lpm_tree)
592 {
593 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
594 }
595 
596 static struct mlxsw_sp_lpm_tree *
597 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
598 		      struct mlxsw_sp_prefix_usage *prefix_usage,
599 		      enum mlxsw_sp_l3proto proto)
600 {
601 	struct mlxsw_sp_lpm_tree *lpm_tree;
602 	int i;
603 
604 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
605 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
606 		if (lpm_tree->ref_count != 0 &&
607 		    lpm_tree->proto == proto &&
608 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
609 					     prefix_usage)) {
610 			mlxsw_sp_lpm_tree_hold(lpm_tree);
611 			return lpm_tree;
612 		}
613 	}
614 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
615 }
616 
617 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
618 {
619 	lpm_tree->ref_count++;
620 }
621 
622 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
623 				  struct mlxsw_sp_lpm_tree *lpm_tree)
624 {
625 	if (--lpm_tree->ref_count == 0)
626 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
627 }
628 
629 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
630 
631 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
632 {
633 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
634 	struct mlxsw_sp_lpm_tree *lpm_tree;
635 	u64 max_trees;
636 	int err, i;
637 
638 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
639 		return -EIO;
640 
641 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
642 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
643 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
644 					     sizeof(struct mlxsw_sp_lpm_tree),
645 					     GFP_KERNEL);
646 	if (!mlxsw_sp->router->lpm.trees)
647 		return -ENOMEM;
648 
649 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
650 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
651 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
652 	}
653 
654 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
655 					 MLXSW_SP_L3_PROTO_IPV4);
656 	if (IS_ERR(lpm_tree)) {
657 		err = PTR_ERR(lpm_tree);
658 		goto err_ipv4_tree_get;
659 	}
660 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
661 
662 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
663 					 MLXSW_SP_L3_PROTO_IPV6);
664 	if (IS_ERR(lpm_tree)) {
665 		err = PTR_ERR(lpm_tree);
666 		goto err_ipv6_tree_get;
667 	}
668 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
669 
670 	return 0;
671 
672 err_ipv6_tree_get:
673 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
674 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
675 err_ipv4_tree_get:
676 	kfree(mlxsw_sp->router->lpm.trees);
677 	return err;
678 }
679 
680 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
681 {
682 	struct mlxsw_sp_lpm_tree *lpm_tree;
683 
684 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
685 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
686 
687 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
688 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
689 
690 	kfree(mlxsw_sp->router->lpm.trees);
691 }
692 
693 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
694 {
695 	return !!vr->fib4 || !!vr->fib6 ||
696 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
697 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
698 }
699 
700 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
701 {
702 	struct mlxsw_sp_vr *vr;
703 	int i;
704 
705 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
706 		vr = &mlxsw_sp->router->vrs[i];
707 		if (!mlxsw_sp_vr_is_used(vr))
708 			return vr;
709 	}
710 	return NULL;
711 }
712 
713 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
714 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
715 {
716 	char raltb_pl[MLXSW_REG_RALTB_LEN];
717 
718 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
719 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
720 			     tree_id);
721 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
722 }
723 
724 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
725 				       const struct mlxsw_sp_fib *fib)
726 {
727 	char raltb_pl[MLXSW_REG_RALTB_LEN];
728 
729 	/* Bind to tree 0 which is default */
730 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
731 			     (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
732 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
733 }
734 
735 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
736 {
737 	/* For our purpose, squash main, default and local tables into one */
738 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
739 		tb_id = RT_TABLE_MAIN;
740 	return tb_id;
741 }
742 
743 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
744 					    u32 tb_id)
745 {
746 	struct mlxsw_sp_vr *vr;
747 	int i;
748 
749 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
750 
751 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
752 		vr = &mlxsw_sp->router->vrs[i];
753 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
754 			return vr;
755 	}
756 	return NULL;
757 }
758 
759 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
760 				u16 *vr_id)
761 {
762 	struct mlxsw_sp_vr *vr;
763 
764 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
765 	if (!vr)
766 		return -ESRCH;
767 	*vr_id = vr->id;
768 
769 	return 0;
770 }
771 
772 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
773 					    enum mlxsw_sp_l3proto proto)
774 {
775 	switch (proto) {
776 	case MLXSW_SP_L3_PROTO_IPV4:
777 		return vr->fib4;
778 	case MLXSW_SP_L3_PROTO_IPV6:
779 		return vr->fib6;
780 	}
781 	return NULL;
782 }
783 
784 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
785 					      u32 tb_id,
786 					      struct netlink_ext_ack *extack)
787 {
788 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
789 	struct mlxsw_sp_fib *fib4;
790 	struct mlxsw_sp_fib *fib6;
791 	struct mlxsw_sp_vr *vr;
792 	int err;
793 
794 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
795 	if (!vr) {
796 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
797 		return ERR_PTR(-EBUSY);
798 	}
799 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
800 	if (IS_ERR(fib4))
801 		return ERR_CAST(fib4);
802 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
803 	if (IS_ERR(fib6)) {
804 		err = PTR_ERR(fib6);
805 		goto err_fib6_create;
806 	}
807 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
808 					     MLXSW_SP_L3_PROTO_IPV4);
809 	if (IS_ERR(mr4_table)) {
810 		err = PTR_ERR(mr4_table);
811 		goto err_mr4_table_create;
812 	}
813 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
814 					     MLXSW_SP_L3_PROTO_IPV6);
815 	if (IS_ERR(mr6_table)) {
816 		err = PTR_ERR(mr6_table);
817 		goto err_mr6_table_create;
818 	}
819 
820 	vr->fib4 = fib4;
821 	vr->fib6 = fib6;
822 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
823 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
824 	vr->tb_id = tb_id;
825 	return vr;
826 
827 err_mr6_table_create:
828 	mlxsw_sp_mr_table_destroy(mr4_table);
829 err_mr4_table_create:
830 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
831 err_fib6_create:
832 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
833 	return ERR_PTR(err);
834 }
835 
836 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
837 				struct mlxsw_sp_vr *vr)
838 {
839 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
840 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
841 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
842 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
843 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
844 	vr->fib6 = NULL;
845 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
846 	vr->fib4 = NULL;
847 }
848 
849 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
850 					   struct netlink_ext_ack *extack)
851 {
852 	struct mlxsw_sp_vr *vr;
853 
854 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
855 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
856 	if (!vr)
857 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
858 	return vr;
859 }
860 
861 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
862 {
863 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
864 	    list_empty(&vr->fib6->node_list) &&
865 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
866 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
867 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
868 }
869 
870 static bool
871 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
872 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
873 {
874 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
875 
876 	if (!mlxsw_sp_vr_is_used(vr))
877 		return false;
878 	if (fib->lpm_tree->id == tree_id)
879 		return true;
880 	return false;
881 }
882 
883 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
884 					struct mlxsw_sp_fib *fib,
885 					struct mlxsw_sp_lpm_tree *new_tree)
886 {
887 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
888 	int err;
889 
890 	fib->lpm_tree = new_tree;
891 	mlxsw_sp_lpm_tree_hold(new_tree);
892 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
893 	if (err)
894 		goto err_tree_bind;
895 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
896 	return 0;
897 
898 err_tree_bind:
899 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
900 	fib->lpm_tree = old_tree;
901 	return err;
902 }
903 
904 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
905 					 struct mlxsw_sp_fib *fib,
906 					 struct mlxsw_sp_lpm_tree *new_tree)
907 {
908 	enum mlxsw_sp_l3proto proto = fib->proto;
909 	struct mlxsw_sp_lpm_tree *old_tree;
910 	u8 old_id, new_id = new_tree->id;
911 	struct mlxsw_sp_vr *vr;
912 	int i, err;
913 
914 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
915 	old_id = old_tree->id;
916 
917 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
918 		vr = &mlxsw_sp->router->vrs[i];
919 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
920 			continue;
921 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
922 						   mlxsw_sp_vr_fib(vr, proto),
923 						   new_tree);
924 		if (err)
925 			goto err_tree_replace;
926 	}
927 
928 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
929 	       sizeof(new_tree->prefix_ref_count));
930 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
931 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
932 
933 	return 0;
934 
935 err_tree_replace:
936 	for (i--; i >= 0; i--) {
937 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
938 			continue;
939 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
940 					     mlxsw_sp_vr_fib(vr, proto),
941 					     old_tree);
942 	}
943 	return err;
944 }
945 
946 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
947 {
948 	struct mlxsw_sp_vr *vr;
949 	u64 max_vrs;
950 	int i;
951 
952 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
953 		return -EIO;
954 
955 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
956 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
957 					GFP_KERNEL);
958 	if (!mlxsw_sp->router->vrs)
959 		return -ENOMEM;
960 
961 	for (i = 0; i < max_vrs; i++) {
962 		vr = &mlxsw_sp->router->vrs[i];
963 		vr->id = i;
964 	}
965 
966 	return 0;
967 }
968 
969 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
970 
971 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
972 {
973 	/* At this stage we're guaranteed not to have new incoming
974 	 * FIB notifications and the work queue is free from FIBs
975 	 * sitting on top of mlxsw netdevs. However, we can still
976 	 * have other FIBs queued. Flush the queue before flushing
977 	 * the device's tables. No need for locks, as we're the only
978 	 * writer.
979 	 */
980 	mlxsw_core_flush_owq();
981 	mlxsw_sp_router_fib_flush(mlxsw_sp);
982 	kfree(mlxsw_sp->router->vrs);
983 }
984 
985 static struct net_device *
986 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
987 {
988 	struct ip_tunnel *tun = netdev_priv(ol_dev);
989 	struct net *net = dev_net(ol_dev);
990 
991 	return __dev_get_by_index(net, tun->parms.link);
992 }
993 
994 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
995 {
996 	struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
997 
998 	if (d)
999 		return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1000 	else
1001 		return RT_TABLE_MAIN;
1002 }
1003 
1004 static struct mlxsw_sp_rif *
1005 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1006 		    const struct mlxsw_sp_rif_params *params,
1007 		    struct netlink_ext_ack *extack);
1008 
1009 static struct mlxsw_sp_rif_ipip_lb *
1010 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1011 				enum mlxsw_sp_ipip_type ipipt,
1012 				struct net_device *ol_dev,
1013 				struct netlink_ext_ack *extack)
1014 {
1015 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1016 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1017 	struct mlxsw_sp_rif *rif;
1018 
1019 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1020 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1021 		.common.dev = ol_dev,
1022 		.common.lag = false,
1023 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1024 	};
1025 
1026 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1027 	if (IS_ERR(rif))
1028 		return ERR_CAST(rif);
1029 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1030 }
1031 
1032 static struct mlxsw_sp_ipip_entry *
1033 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1034 			  enum mlxsw_sp_ipip_type ipipt,
1035 			  struct net_device *ol_dev)
1036 {
1037 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1038 	struct mlxsw_sp_ipip_entry *ipip_entry;
1039 	struct mlxsw_sp_ipip_entry *ret = NULL;
1040 
1041 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1042 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1043 	if (!ipip_entry)
1044 		return ERR_PTR(-ENOMEM);
1045 
1046 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1047 							    ol_dev, NULL);
1048 	if (IS_ERR(ipip_entry->ol_lb)) {
1049 		ret = ERR_CAST(ipip_entry->ol_lb);
1050 		goto err_ol_ipip_lb_create;
1051 	}
1052 
1053 	ipip_entry->ipipt = ipipt;
1054 	ipip_entry->ol_dev = ol_dev;
1055 
1056 	switch (ipip_ops->ul_proto) {
1057 	case MLXSW_SP_L3_PROTO_IPV4:
1058 		ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1059 		break;
1060 	case MLXSW_SP_L3_PROTO_IPV6:
1061 		WARN_ON(1);
1062 		break;
1063 	}
1064 
1065 	return ipip_entry;
1066 
1067 err_ol_ipip_lb_create:
1068 	kfree(ipip_entry);
1069 	return ret;
1070 }
1071 
1072 static void
1073 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1074 {
1075 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1076 	kfree(ipip_entry);
1077 }
1078 
1079 static bool
1080 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1081 				  const enum mlxsw_sp_l3proto ul_proto,
1082 				  union mlxsw_sp_l3addr saddr,
1083 				  u32 ul_tb_id,
1084 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1085 {
1086 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1087 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1088 	union mlxsw_sp_l3addr tun_saddr;
1089 
1090 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1091 		return false;
1092 
1093 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1094 	return tun_ul_tb_id == ul_tb_id &&
1095 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1096 }
1097 
1098 static int
1099 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1100 			      struct mlxsw_sp_fib_entry *fib_entry,
1101 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1102 {
1103 	u32 tunnel_index;
1104 	int err;
1105 
1106 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1107 				  1, &tunnel_index);
1108 	if (err)
1109 		return err;
1110 
1111 	ipip_entry->decap_fib_entry = fib_entry;
1112 	fib_entry->decap.ipip_entry = ipip_entry;
1113 	fib_entry->decap.tunnel_index = tunnel_index;
1114 	return 0;
1115 }
1116 
1117 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1118 					  struct mlxsw_sp_fib_entry *fib_entry)
1119 {
1120 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1121 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1122 	fib_entry->decap.ipip_entry = NULL;
1123 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1124 			   1, fib_entry->decap.tunnel_index);
1125 }
1126 
1127 static struct mlxsw_sp_fib_node *
1128 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1129 			 size_t addr_len, unsigned char prefix_len);
1130 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1131 				     struct mlxsw_sp_fib_entry *fib_entry);
1132 
1133 static void
1134 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1135 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1136 {
1137 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1138 
1139 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1140 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1141 
1142 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1143 }
1144 
1145 static void
1146 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1147 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1148 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1149 {
1150 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1151 					  ipip_entry))
1152 		return;
1153 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1154 
1155 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1156 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1157 }
1158 
1159 static struct mlxsw_sp_fib_entry *
1160 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1161 				     enum mlxsw_sp_l3proto proto,
1162 				     const union mlxsw_sp_l3addr *addr,
1163 				     enum mlxsw_sp_fib_entry_type type)
1164 {
1165 	struct mlxsw_sp_fib_entry *fib_entry;
1166 	struct mlxsw_sp_fib_node *fib_node;
1167 	unsigned char addr_prefix_len;
1168 	struct mlxsw_sp_fib *fib;
1169 	struct mlxsw_sp_vr *vr;
1170 	const void *addrp;
1171 	size_t addr_len;
1172 	u32 addr4;
1173 
1174 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1175 	if (!vr)
1176 		return NULL;
1177 	fib = mlxsw_sp_vr_fib(vr, proto);
1178 
1179 	switch (proto) {
1180 	case MLXSW_SP_L3_PROTO_IPV4:
1181 		addr4 = be32_to_cpu(addr->addr4);
1182 		addrp = &addr4;
1183 		addr_len = 4;
1184 		addr_prefix_len = 32;
1185 		break;
1186 	case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1187 	default:
1188 		WARN_ON(1);
1189 		return NULL;
1190 	}
1191 
1192 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1193 					    addr_prefix_len);
1194 	if (!fib_node || list_empty(&fib_node->entry_list))
1195 		return NULL;
1196 
1197 	fib_entry = list_first_entry(&fib_node->entry_list,
1198 				     struct mlxsw_sp_fib_entry, list);
1199 	if (fib_entry->type != type)
1200 		return NULL;
1201 
1202 	return fib_entry;
1203 }
1204 
1205 /* Given an IPIP entry, find the corresponding decap route. */
1206 static struct mlxsw_sp_fib_entry *
1207 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1208 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1209 {
1210 	static struct mlxsw_sp_fib_node *fib_node;
1211 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1212 	struct mlxsw_sp_fib_entry *fib_entry;
1213 	unsigned char saddr_prefix_len;
1214 	union mlxsw_sp_l3addr saddr;
1215 	struct mlxsw_sp_fib *ul_fib;
1216 	struct mlxsw_sp_vr *ul_vr;
1217 	const void *saddrp;
1218 	size_t saddr_len;
1219 	u32 ul_tb_id;
1220 	u32 saddr4;
1221 
1222 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1223 
1224 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1225 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1226 	if (!ul_vr)
1227 		return NULL;
1228 
1229 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1230 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1231 					   ipip_entry->ol_dev);
1232 
1233 	switch (ipip_ops->ul_proto) {
1234 	case MLXSW_SP_L3_PROTO_IPV4:
1235 		saddr4 = be32_to_cpu(saddr.addr4);
1236 		saddrp = &saddr4;
1237 		saddr_len = 4;
1238 		saddr_prefix_len = 32;
1239 		break;
1240 	case MLXSW_SP_L3_PROTO_IPV6:
1241 		WARN_ON(1);
1242 		return NULL;
1243 	}
1244 
1245 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1246 					    saddr_prefix_len);
1247 	if (!fib_node || list_empty(&fib_node->entry_list))
1248 		return NULL;
1249 
1250 	fib_entry = list_first_entry(&fib_node->entry_list,
1251 				     struct mlxsw_sp_fib_entry, list);
1252 	if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1253 		return NULL;
1254 
1255 	return fib_entry;
1256 }
1257 
1258 static struct mlxsw_sp_ipip_entry *
1259 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1260 			   enum mlxsw_sp_ipip_type ipipt,
1261 			   struct net_device *ol_dev)
1262 {
1263 	struct mlxsw_sp_ipip_entry *ipip_entry;
1264 
1265 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1266 	if (IS_ERR(ipip_entry))
1267 		return ipip_entry;
1268 
1269 	list_add_tail(&ipip_entry->ipip_list_node,
1270 		      &mlxsw_sp->router->ipip_list);
1271 
1272 	return ipip_entry;
1273 }
1274 
1275 static void
1276 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1277 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1278 {
1279 	list_del(&ipip_entry->ipip_list_node);
1280 	mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1281 }
1282 
1283 static bool
1284 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1285 				  const struct net_device *ul_dev,
1286 				  enum mlxsw_sp_l3proto ul_proto,
1287 				  union mlxsw_sp_l3addr ul_dip,
1288 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1289 {
1290 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1291 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1292 
1293 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1294 		return false;
1295 
1296 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1297 						 ul_tb_id, ipip_entry);
1298 }
1299 
1300 /* Given decap parameters, find the corresponding IPIP entry. */
1301 static struct mlxsw_sp_ipip_entry *
1302 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1303 				  const struct net_device *ul_dev,
1304 				  enum mlxsw_sp_l3proto ul_proto,
1305 				  union mlxsw_sp_l3addr ul_dip)
1306 {
1307 	struct mlxsw_sp_ipip_entry *ipip_entry;
1308 
1309 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1310 			    ipip_list_node)
1311 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1312 						      ul_proto, ul_dip,
1313 						      ipip_entry))
1314 			return ipip_entry;
1315 
1316 	return NULL;
1317 }
1318 
1319 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1320 				      const struct net_device *dev,
1321 				      enum mlxsw_sp_ipip_type *p_type)
1322 {
1323 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1324 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1325 	enum mlxsw_sp_ipip_type ipipt;
1326 
1327 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1328 		ipip_ops = router->ipip_ops_arr[ipipt];
1329 		if (dev->type == ipip_ops->dev_type) {
1330 			if (p_type)
1331 				*p_type = ipipt;
1332 			return true;
1333 		}
1334 	}
1335 	return false;
1336 }
1337 
1338 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1339 				const struct net_device *dev)
1340 {
1341 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1342 }
1343 
1344 static struct mlxsw_sp_ipip_entry *
1345 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1346 				   const struct net_device *ol_dev)
1347 {
1348 	struct mlxsw_sp_ipip_entry *ipip_entry;
1349 
1350 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1351 			    ipip_list_node)
1352 		if (ipip_entry->ol_dev == ol_dev)
1353 			return ipip_entry;
1354 
1355 	return NULL;
1356 }
1357 
1358 static struct mlxsw_sp_ipip_entry *
1359 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1360 				   const struct net_device *ul_dev,
1361 				   struct mlxsw_sp_ipip_entry *start)
1362 {
1363 	struct mlxsw_sp_ipip_entry *ipip_entry;
1364 
1365 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1366 					ipip_list_node);
1367 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1368 				     ipip_list_node) {
1369 		struct net_device *ipip_ul_dev =
1370 			__mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1371 
1372 		if (ipip_ul_dev == ul_dev)
1373 			return ipip_entry;
1374 	}
1375 
1376 	return NULL;
1377 }
1378 
1379 bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1380 				const struct net_device *dev)
1381 {
1382 	return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1383 }
1384 
1385 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1386 						const struct net_device *ol_dev,
1387 						enum mlxsw_sp_ipip_type ipipt)
1388 {
1389 	const struct mlxsw_sp_ipip_ops *ops
1390 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1391 
1392 	/* For deciding whether decap should be offloaded, we don't care about
1393 	 * overlay protocol, so ask whether either one is supported.
1394 	 */
1395 	return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1396 	       ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1397 }
1398 
1399 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1400 						struct net_device *ol_dev)
1401 {
1402 	struct mlxsw_sp_ipip_entry *ipip_entry;
1403 	enum mlxsw_sp_l3proto ul_proto;
1404 	enum mlxsw_sp_ipip_type ipipt;
1405 	union mlxsw_sp_l3addr saddr;
1406 	u32 ul_tb_id;
1407 
1408 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1409 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1410 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1411 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1412 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1413 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1414 							  saddr, ul_tb_id,
1415 							  NULL)) {
1416 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1417 								ol_dev);
1418 			if (IS_ERR(ipip_entry))
1419 				return PTR_ERR(ipip_entry);
1420 		}
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1427 						   struct net_device *ol_dev)
1428 {
1429 	struct mlxsw_sp_ipip_entry *ipip_entry;
1430 
1431 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1432 	if (ipip_entry)
1433 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1434 }
1435 
1436 static void
1437 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1438 				struct mlxsw_sp_ipip_entry *ipip_entry)
1439 {
1440 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1441 
1442 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1443 	if (decap_fib_entry)
1444 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1445 						  decap_fib_entry);
1446 }
1447 
1448 static int
1449 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1450 			u16 ul_rif_id, bool enable)
1451 {
1452 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1453 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1454 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1455 	char ritr_pl[MLXSW_REG_RITR_LEN];
1456 	u32 saddr4;
1457 
1458 	switch (lb_cf.ul_protocol) {
1459 	case MLXSW_SP_L3_PROTO_IPV4:
1460 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1461 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1462 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1463 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1464 			    MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1465 			    ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1466 		break;
1467 
1468 	case MLXSW_SP_L3_PROTO_IPV6:
1469 		return -EAFNOSUPPORT;
1470 	}
1471 
1472 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1473 }
1474 
1475 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1476 						 struct net_device *ol_dev)
1477 {
1478 	struct mlxsw_sp_ipip_entry *ipip_entry;
1479 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1480 	int err = 0;
1481 
1482 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1483 	if (ipip_entry) {
1484 		lb_rif = ipip_entry->ol_lb;
1485 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1486 					      lb_rif->ul_rif_id, true);
1487 		if (err)
1488 			goto out;
1489 		lb_rif->common.mtu = ol_dev->mtu;
1490 	}
1491 
1492 out:
1493 	return err;
1494 }
1495 
1496 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1497 						struct net_device *ol_dev)
1498 {
1499 	struct mlxsw_sp_ipip_entry *ipip_entry;
1500 
1501 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1502 	if (ipip_entry)
1503 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1504 }
1505 
1506 static void
1507 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1508 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1509 {
1510 	if (ipip_entry->decap_fib_entry)
1511 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1512 }
1513 
1514 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1515 						  struct net_device *ol_dev)
1516 {
1517 	struct mlxsw_sp_ipip_entry *ipip_entry;
1518 
1519 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1520 	if (ipip_entry)
1521 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1522 }
1523 
1524 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1525 					 struct mlxsw_sp_rif *old_rif,
1526 					 struct mlxsw_sp_rif *new_rif);
1527 static int
1528 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1529 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1530 				 bool keep_encap,
1531 				 struct netlink_ext_ack *extack)
1532 {
1533 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1534 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1535 
1536 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1537 						     ipip_entry->ipipt,
1538 						     ipip_entry->ol_dev,
1539 						     extack);
1540 	if (IS_ERR(new_lb_rif))
1541 		return PTR_ERR(new_lb_rif);
1542 	ipip_entry->ol_lb = new_lb_rif;
1543 
1544 	if (keep_encap)
1545 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1546 					     &new_lb_rif->common);
1547 
1548 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1549 
1550 	return 0;
1551 }
1552 
1553 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1554 					struct mlxsw_sp_rif *rif);
1555 
1556 /**
1557  * Update the offload related to an IPIP entry. This always updates decap, and
1558  * in addition to that it also:
1559  * @recreate_loopback: recreates the associated loopback RIF
1560  * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1561  *              relevant when recreate_loopback is true.
1562  * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1563  *                   is only relevant when recreate_loopback is false.
1564  */
1565 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1566 					struct mlxsw_sp_ipip_entry *ipip_entry,
1567 					bool recreate_loopback,
1568 					bool keep_encap,
1569 					bool update_nexthops,
1570 					struct netlink_ext_ack *extack)
1571 {
1572 	int err;
1573 
1574 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1575 	 * recreate it. That creates a window of opportunity where RALUE and
1576 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1577 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1578 	 * of RALUE, demote the decap route back.
1579 	 */
1580 	if (ipip_entry->decap_fib_entry)
1581 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1582 
1583 	if (recreate_loopback) {
1584 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1585 						       keep_encap, extack);
1586 		if (err)
1587 			return err;
1588 	} else if (update_nexthops) {
1589 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1590 					    &ipip_entry->ol_lb->common);
1591 	}
1592 
1593 	if (ipip_entry->ol_dev->flags & IFF_UP)
1594 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1595 
1596 	return 0;
1597 }
1598 
1599 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1600 						struct net_device *ol_dev,
1601 						struct netlink_ext_ack *extack)
1602 {
1603 	struct mlxsw_sp_ipip_entry *ipip_entry =
1604 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1605 
1606 	if (!ipip_entry)
1607 		return 0;
1608 
1609 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1610 						   true, false, false, extack);
1611 }
1612 
1613 static int
1614 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1615 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1616 				     struct net_device *ul_dev,
1617 				     bool *demote_this,
1618 				     struct netlink_ext_ack *extack)
1619 {
1620 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1621 	enum mlxsw_sp_l3proto ul_proto;
1622 	union mlxsw_sp_l3addr saddr;
1623 
1624 	/* Moving underlay to a different VRF might cause local address
1625 	 * conflict, and the conflicting tunnels need to be demoted.
1626 	 */
1627 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1628 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1629 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1630 						 saddr, ul_tb_id,
1631 						 ipip_entry)) {
1632 		*demote_this = true;
1633 		return 0;
1634 	}
1635 
1636 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1637 						   true, true, false, extack);
1638 }
1639 
1640 static int
1641 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1642 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1643 				    struct net_device *ul_dev)
1644 {
1645 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1646 						   false, false, true, NULL);
1647 }
1648 
1649 static int
1650 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1651 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1652 				      struct net_device *ul_dev)
1653 {
1654 	/* A down underlay device causes encapsulated packets to not be
1655 	 * forwarded, but decap still works. So refresh next hops without
1656 	 * touching anything else.
1657 	 */
1658 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1659 						   false, false, true, NULL);
1660 }
1661 
1662 static int
1663 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1664 					struct net_device *ol_dev,
1665 					struct netlink_ext_ack *extack)
1666 {
1667 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1668 	struct mlxsw_sp_ipip_entry *ipip_entry;
1669 	int err;
1670 
1671 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1672 	if (!ipip_entry)
1673 		/* A change might make a tunnel eligible for offloading, but
1674 		 * that is currently not implemented. What falls to slow path
1675 		 * stays there.
1676 		 */
1677 		return 0;
1678 
1679 	/* A change might make a tunnel not eligible for offloading. */
1680 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1681 						 ipip_entry->ipipt)) {
1682 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1683 		return 0;
1684 	}
1685 
1686 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1687 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1688 	return err;
1689 }
1690 
1691 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1692 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1693 {
1694 	struct net_device *ol_dev = ipip_entry->ol_dev;
1695 
1696 	if (ol_dev->flags & IFF_UP)
1697 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1698 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1699 }
1700 
1701 /* The configuration where several tunnels have the same local address in the
1702  * same underlay table needs special treatment in the HW. That is currently not
1703  * implemented in the driver. This function finds and demotes the first tunnel
1704  * with a given source address, except the one passed in in the argument
1705  * `except'.
1706  */
1707 bool
1708 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1709 				     enum mlxsw_sp_l3proto ul_proto,
1710 				     union mlxsw_sp_l3addr saddr,
1711 				     u32 ul_tb_id,
1712 				     const struct mlxsw_sp_ipip_entry *except)
1713 {
1714 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1715 
1716 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1717 				 ipip_list_node) {
1718 		if (ipip_entry != except &&
1719 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1720 						      ul_tb_id, ipip_entry)) {
1721 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1722 			return true;
1723 		}
1724 	}
1725 
1726 	return false;
1727 }
1728 
1729 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1730 						     struct net_device *ul_dev)
1731 {
1732 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1733 
1734 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1735 				 ipip_list_node) {
1736 		struct net_device *ipip_ul_dev =
1737 			__mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1738 
1739 		if (ipip_ul_dev == ul_dev)
1740 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1741 	}
1742 }
1743 
1744 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1745 				     struct net_device *ol_dev,
1746 				     unsigned long event,
1747 				     struct netdev_notifier_info *info)
1748 {
1749 	struct netdev_notifier_changeupper_info *chup;
1750 	struct netlink_ext_ack *extack;
1751 
1752 	switch (event) {
1753 	case NETDEV_REGISTER:
1754 		return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1755 	case NETDEV_UNREGISTER:
1756 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1757 		return 0;
1758 	case NETDEV_UP:
1759 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1760 		return 0;
1761 	case NETDEV_DOWN:
1762 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1763 		return 0;
1764 	case NETDEV_CHANGEUPPER:
1765 		chup = container_of(info, typeof(*chup), info);
1766 		extack = info->extack;
1767 		if (netif_is_l3_master(chup->upper_dev))
1768 			return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1769 								    ol_dev,
1770 								    extack);
1771 		return 0;
1772 	case NETDEV_CHANGE:
1773 		extack = info->extack;
1774 		return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1775 							       ol_dev, extack);
1776 	case NETDEV_CHANGEMTU:
1777 		return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1778 	}
1779 	return 0;
1780 }
1781 
1782 static int
1783 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1784 				   struct mlxsw_sp_ipip_entry *ipip_entry,
1785 				   struct net_device *ul_dev,
1786 				   bool *demote_this,
1787 				   unsigned long event,
1788 				   struct netdev_notifier_info *info)
1789 {
1790 	struct netdev_notifier_changeupper_info *chup;
1791 	struct netlink_ext_ack *extack;
1792 
1793 	switch (event) {
1794 	case NETDEV_CHANGEUPPER:
1795 		chup = container_of(info, typeof(*chup), info);
1796 		extack = info->extack;
1797 		if (netif_is_l3_master(chup->upper_dev))
1798 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1799 								    ipip_entry,
1800 								    ul_dev,
1801 								    demote_this,
1802 								    extack);
1803 		break;
1804 
1805 	case NETDEV_UP:
1806 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1807 							   ul_dev);
1808 	case NETDEV_DOWN:
1809 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1810 							     ipip_entry,
1811 							     ul_dev);
1812 	}
1813 	return 0;
1814 }
1815 
1816 int
1817 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1818 				 struct net_device *ul_dev,
1819 				 unsigned long event,
1820 				 struct netdev_notifier_info *info)
1821 {
1822 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1823 	int err;
1824 
1825 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1826 								ul_dev,
1827 								ipip_entry))) {
1828 		struct mlxsw_sp_ipip_entry *prev;
1829 		bool demote_this = false;
1830 
1831 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1832 							 ul_dev, &demote_this,
1833 							 event, info);
1834 		if (err) {
1835 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1836 								 ul_dev);
1837 			return err;
1838 		}
1839 
1840 		if (demote_this) {
1841 			if (list_is_first(&ipip_entry->ipip_list_node,
1842 					  &mlxsw_sp->router->ipip_list))
1843 				prev = NULL;
1844 			else
1845 				/* This can't be cached from previous iteration,
1846 				 * because that entry could be gone now.
1847 				 */
1848 				prev = list_prev_entry(ipip_entry,
1849 						       ipip_list_node);
1850 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1851 			ipip_entry = prev;
1852 		}
1853 	}
1854 
1855 	return 0;
1856 }
1857 
1858 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1859 				      enum mlxsw_sp_l3proto ul_proto,
1860 				      const union mlxsw_sp_l3addr *ul_sip,
1861 				      u32 tunnel_index)
1862 {
1863 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1864 	struct mlxsw_sp_fib_entry *fib_entry;
1865 	int err;
1866 
1867 	/* It is valid to create a tunnel with a local IP and only later
1868 	 * assign this IP address to a local interface
1869 	 */
1870 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1871 							 ul_proto, ul_sip,
1872 							 type);
1873 	if (!fib_entry)
1874 		return 0;
1875 
1876 	fib_entry->decap.tunnel_index = tunnel_index;
1877 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1878 
1879 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1880 	if (err)
1881 		goto err_fib_entry_update;
1882 
1883 	return 0;
1884 
1885 err_fib_entry_update:
1886 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1887 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1888 	return err;
1889 }
1890 
1891 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1892 				      enum mlxsw_sp_l3proto ul_proto,
1893 				      const union mlxsw_sp_l3addr *ul_sip)
1894 {
1895 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1896 	struct mlxsw_sp_fib_entry *fib_entry;
1897 
1898 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1899 							 ul_proto, ul_sip,
1900 							 type);
1901 	if (!fib_entry)
1902 		return;
1903 
1904 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1905 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1906 }
1907 
1908 struct mlxsw_sp_neigh_key {
1909 	struct neighbour *n;
1910 };
1911 
1912 struct mlxsw_sp_neigh_entry {
1913 	struct list_head rif_list_node;
1914 	struct rhash_head ht_node;
1915 	struct mlxsw_sp_neigh_key key;
1916 	u16 rif;
1917 	bool connected;
1918 	unsigned char ha[ETH_ALEN];
1919 	struct list_head nexthop_list; /* list of nexthops using
1920 					* this neigh entry
1921 					*/
1922 	struct list_head nexthop_neighs_list_node;
1923 	unsigned int counter_index;
1924 	bool counter_valid;
1925 };
1926 
1927 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1928 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1929 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1930 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
1931 };
1932 
1933 struct mlxsw_sp_neigh_entry *
1934 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1935 			struct mlxsw_sp_neigh_entry *neigh_entry)
1936 {
1937 	if (!neigh_entry) {
1938 		if (list_empty(&rif->neigh_list))
1939 			return NULL;
1940 		else
1941 			return list_first_entry(&rif->neigh_list,
1942 						typeof(*neigh_entry),
1943 						rif_list_node);
1944 	}
1945 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
1946 		return NULL;
1947 	return list_next_entry(neigh_entry, rif_list_node);
1948 }
1949 
1950 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1951 {
1952 	return neigh_entry->key.n->tbl->family;
1953 }
1954 
1955 unsigned char *
1956 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1957 {
1958 	return neigh_entry->ha;
1959 }
1960 
1961 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1962 {
1963 	struct neighbour *n;
1964 
1965 	n = neigh_entry->key.n;
1966 	return ntohl(*((__be32 *) n->primary_key));
1967 }
1968 
1969 struct in6_addr *
1970 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1971 {
1972 	struct neighbour *n;
1973 
1974 	n = neigh_entry->key.n;
1975 	return (struct in6_addr *) &n->primary_key;
1976 }
1977 
1978 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1979 			       struct mlxsw_sp_neigh_entry *neigh_entry,
1980 			       u64 *p_counter)
1981 {
1982 	if (!neigh_entry->counter_valid)
1983 		return -EINVAL;
1984 
1985 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1986 					 p_counter, NULL);
1987 }
1988 
1989 static struct mlxsw_sp_neigh_entry *
1990 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1991 			   u16 rif)
1992 {
1993 	struct mlxsw_sp_neigh_entry *neigh_entry;
1994 
1995 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1996 	if (!neigh_entry)
1997 		return NULL;
1998 
1999 	neigh_entry->key.n = n;
2000 	neigh_entry->rif = rif;
2001 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2002 
2003 	return neigh_entry;
2004 }
2005 
2006 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2007 {
2008 	kfree(neigh_entry);
2009 }
2010 
2011 static int
2012 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2013 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2014 {
2015 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2016 				      &neigh_entry->ht_node,
2017 				      mlxsw_sp_neigh_ht_params);
2018 }
2019 
2020 static void
2021 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2022 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2023 {
2024 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2025 			       &neigh_entry->ht_node,
2026 			       mlxsw_sp_neigh_ht_params);
2027 }
2028 
2029 static bool
2030 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2031 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2032 {
2033 	struct devlink *devlink;
2034 	const char *table_name;
2035 
2036 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2037 	case AF_INET:
2038 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2039 		break;
2040 	case AF_INET6:
2041 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2042 		break;
2043 	default:
2044 		WARN_ON(1);
2045 		return false;
2046 	}
2047 
2048 	devlink = priv_to_devlink(mlxsw_sp->core);
2049 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2050 }
2051 
2052 static void
2053 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2054 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2055 {
2056 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2057 		return;
2058 
2059 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2060 		return;
2061 
2062 	neigh_entry->counter_valid = true;
2063 }
2064 
2065 static void
2066 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2067 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2068 {
2069 	if (!neigh_entry->counter_valid)
2070 		return;
2071 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2072 				   neigh_entry->counter_index);
2073 	neigh_entry->counter_valid = false;
2074 }
2075 
2076 static struct mlxsw_sp_neigh_entry *
2077 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2078 {
2079 	struct mlxsw_sp_neigh_entry *neigh_entry;
2080 	struct mlxsw_sp_rif *rif;
2081 	int err;
2082 
2083 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2084 	if (!rif)
2085 		return ERR_PTR(-EINVAL);
2086 
2087 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2088 	if (!neigh_entry)
2089 		return ERR_PTR(-ENOMEM);
2090 
2091 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2092 	if (err)
2093 		goto err_neigh_entry_insert;
2094 
2095 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2096 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2097 
2098 	return neigh_entry;
2099 
2100 err_neigh_entry_insert:
2101 	mlxsw_sp_neigh_entry_free(neigh_entry);
2102 	return ERR_PTR(err);
2103 }
2104 
2105 static void
2106 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2107 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2108 {
2109 	list_del(&neigh_entry->rif_list_node);
2110 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2111 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2112 	mlxsw_sp_neigh_entry_free(neigh_entry);
2113 }
2114 
2115 static struct mlxsw_sp_neigh_entry *
2116 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2117 {
2118 	struct mlxsw_sp_neigh_key key;
2119 
2120 	key.n = n;
2121 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2122 				      &key, mlxsw_sp_neigh_ht_params);
2123 }
2124 
2125 static void
2126 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2127 {
2128 	unsigned long interval;
2129 
2130 #if IS_ENABLED(CONFIG_IPV6)
2131 	interval = min_t(unsigned long,
2132 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2133 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2134 #else
2135 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2136 #endif
2137 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2138 }
2139 
2140 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2141 						   char *rauhtd_pl,
2142 						   int ent_index)
2143 {
2144 	struct net_device *dev;
2145 	struct neighbour *n;
2146 	__be32 dipn;
2147 	u32 dip;
2148 	u16 rif;
2149 
2150 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2151 
2152 	if (!mlxsw_sp->router->rifs[rif]) {
2153 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2154 		return;
2155 	}
2156 
2157 	dipn = htonl(dip);
2158 	dev = mlxsw_sp->router->rifs[rif]->dev;
2159 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2160 	if (!n)
2161 		return;
2162 
2163 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2164 	neigh_event_send(n, NULL);
2165 	neigh_release(n);
2166 }
2167 
2168 #if IS_ENABLED(CONFIG_IPV6)
2169 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2170 						   char *rauhtd_pl,
2171 						   int rec_index)
2172 {
2173 	struct net_device *dev;
2174 	struct neighbour *n;
2175 	struct in6_addr dip;
2176 	u16 rif;
2177 
2178 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2179 					 (char *) &dip);
2180 
2181 	if (!mlxsw_sp->router->rifs[rif]) {
2182 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2183 		return;
2184 	}
2185 
2186 	dev = mlxsw_sp->router->rifs[rif]->dev;
2187 	n = neigh_lookup(&nd_tbl, &dip, dev);
2188 	if (!n)
2189 		return;
2190 
2191 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2192 	neigh_event_send(n, NULL);
2193 	neigh_release(n);
2194 }
2195 #else
2196 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2197 						   char *rauhtd_pl,
2198 						   int rec_index)
2199 {
2200 }
2201 #endif
2202 
2203 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2204 						   char *rauhtd_pl,
2205 						   int rec_index)
2206 {
2207 	u8 num_entries;
2208 	int i;
2209 
2210 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2211 								rec_index);
2212 	/* Hardware starts counting at 0, so add 1. */
2213 	num_entries++;
2214 
2215 	/* Each record consists of several neighbour entries. */
2216 	for (i = 0; i < num_entries; i++) {
2217 		int ent_index;
2218 
2219 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2220 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2221 						       ent_index);
2222 	}
2223 
2224 }
2225 
2226 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2227 						   char *rauhtd_pl,
2228 						   int rec_index)
2229 {
2230 	/* One record contains one entry. */
2231 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2232 					       rec_index);
2233 }
2234 
2235 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2236 					      char *rauhtd_pl, int rec_index)
2237 {
2238 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2239 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2240 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2241 						       rec_index);
2242 		break;
2243 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2244 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2245 						       rec_index);
2246 		break;
2247 	}
2248 }
2249 
2250 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2251 {
2252 	u8 num_rec, last_rec_index, num_entries;
2253 
2254 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2255 	last_rec_index = num_rec - 1;
2256 
2257 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2258 		return false;
2259 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2260 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2261 		return true;
2262 
2263 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2264 								last_rec_index);
2265 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2266 		return true;
2267 	return false;
2268 }
2269 
2270 static int
2271 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2272 				       char *rauhtd_pl,
2273 				       enum mlxsw_reg_rauhtd_type type)
2274 {
2275 	int i, num_rec;
2276 	int err;
2277 
2278 	/* Make sure the neighbour's netdev isn't removed in the
2279 	 * process.
2280 	 */
2281 	rtnl_lock();
2282 	do {
2283 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2284 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2285 				      rauhtd_pl);
2286 		if (err) {
2287 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2288 			break;
2289 		}
2290 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2291 		for (i = 0; i < num_rec; i++)
2292 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2293 							  i);
2294 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2295 	rtnl_unlock();
2296 
2297 	return err;
2298 }
2299 
2300 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2301 {
2302 	enum mlxsw_reg_rauhtd_type type;
2303 	char *rauhtd_pl;
2304 	int err;
2305 
2306 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2307 	if (!rauhtd_pl)
2308 		return -ENOMEM;
2309 
2310 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2311 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2312 	if (err)
2313 		goto out;
2314 
2315 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2316 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2317 out:
2318 	kfree(rauhtd_pl);
2319 	return err;
2320 }
2321 
2322 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2323 {
2324 	struct mlxsw_sp_neigh_entry *neigh_entry;
2325 
2326 	/* Take RTNL mutex here to prevent lists from changes */
2327 	rtnl_lock();
2328 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2329 			    nexthop_neighs_list_node)
2330 		/* If this neigh have nexthops, make the kernel think this neigh
2331 		 * is active regardless of the traffic.
2332 		 */
2333 		neigh_event_send(neigh_entry->key.n, NULL);
2334 	rtnl_unlock();
2335 }
2336 
2337 static void
2338 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2339 {
2340 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2341 
2342 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2343 			       msecs_to_jiffies(interval));
2344 }
2345 
2346 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2347 {
2348 	struct mlxsw_sp_router *router;
2349 	int err;
2350 
2351 	router = container_of(work, struct mlxsw_sp_router,
2352 			      neighs_update.dw.work);
2353 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2354 	if (err)
2355 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2356 
2357 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2358 
2359 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2360 }
2361 
2362 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2363 {
2364 	struct mlxsw_sp_neigh_entry *neigh_entry;
2365 	struct mlxsw_sp_router *router;
2366 
2367 	router = container_of(work, struct mlxsw_sp_router,
2368 			      nexthop_probe_dw.work);
2369 	/* Iterate over nexthop neighbours, find those who are unresolved and
2370 	 * send arp on them. This solves the chicken-egg problem when
2371 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2372 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2373 	 * using different nexthop.
2374 	 *
2375 	 * Take RTNL mutex here to prevent lists from changes.
2376 	 */
2377 	rtnl_lock();
2378 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2379 			    nexthop_neighs_list_node)
2380 		if (!neigh_entry->connected)
2381 			neigh_event_send(neigh_entry->key.n, NULL);
2382 	rtnl_unlock();
2383 
2384 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2385 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2386 }
2387 
2388 static void
2389 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2390 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2391 			      bool removing, bool dead);
2392 
2393 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2394 {
2395 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2396 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2397 }
2398 
2399 static int
2400 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2401 				struct mlxsw_sp_neigh_entry *neigh_entry,
2402 				enum mlxsw_reg_rauht_op op)
2403 {
2404 	struct neighbour *n = neigh_entry->key.n;
2405 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2406 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2407 
2408 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2409 			      dip);
2410 	if (neigh_entry->counter_valid)
2411 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2412 					     neigh_entry->counter_index);
2413 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2414 }
2415 
2416 static int
2417 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2418 				struct mlxsw_sp_neigh_entry *neigh_entry,
2419 				enum mlxsw_reg_rauht_op op)
2420 {
2421 	struct neighbour *n = neigh_entry->key.n;
2422 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2423 	const char *dip = n->primary_key;
2424 
2425 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2426 			      dip);
2427 	if (neigh_entry->counter_valid)
2428 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2429 					     neigh_entry->counter_index);
2430 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2431 }
2432 
2433 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2434 {
2435 	struct neighbour *n = neigh_entry->key.n;
2436 
2437 	/* Packets with a link-local destination address are trapped
2438 	 * after LPM lookup and never reach the neighbour table, so
2439 	 * there is no need to program such neighbours to the device.
2440 	 */
2441 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2442 	    IPV6_ADDR_LINKLOCAL)
2443 		return true;
2444 	return false;
2445 }
2446 
2447 static void
2448 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2449 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2450 			    bool adding)
2451 {
2452 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2453 	int err;
2454 
2455 	if (!adding && !neigh_entry->connected)
2456 		return;
2457 	neigh_entry->connected = adding;
2458 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2459 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2460 						      op);
2461 		if (err)
2462 			return;
2463 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2464 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2465 			return;
2466 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2467 						      op);
2468 		if (err)
2469 			return;
2470 	} else {
2471 		WARN_ON_ONCE(1);
2472 		return;
2473 	}
2474 
2475 	if (adding)
2476 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2477 	else
2478 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2479 }
2480 
2481 void
2482 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2483 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2484 				    bool adding)
2485 {
2486 	if (adding)
2487 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2488 	else
2489 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2490 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2491 }
2492 
2493 struct mlxsw_sp_netevent_work {
2494 	struct work_struct work;
2495 	struct mlxsw_sp *mlxsw_sp;
2496 	struct neighbour *n;
2497 };
2498 
2499 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2500 {
2501 	struct mlxsw_sp_netevent_work *net_work =
2502 		container_of(work, struct mlxsw_sp_netevent_work, work);
2503 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2504 	struct mlxsw_sp_neigh_entry *neigh_entry;
2505 	struct neighbour *n = net_work->n;
2506 	unsigned char ha[ETH_ALEN];
2507 	bool entry_connected;
2508 	u8 nud_state, dead;
2509 
2510 	/* If these parameters are changed after we release the lock,
2511 	 * then we are guaranteed to receive another event letting us
2512 	 * know about it.
2513 	 */
2514 	read_lock_bh(&n->lock);
2515 	memcpy(ha, n->ha, ETH_ALEN);
2516 	nud_state = n->nud_state;
2517 	dead = n->dead;
2518 	read_unlock_bh(&n->lock);
2519 
2520 	rtnl_lock();
2521 	mlxsw_sp_span_respin(mlxsw_sp);
2522 
2523 	entry_connected = nud_state & NUD_VALID && !dead;
2524 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2525 	if (!entry_connected && !neigh_entry)
2526 		goto out;
2527 	if (!neigh_entry) {
2528 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2529 		if (IS_ERR(neigh_entry))
2530 			goto out;
2531 	}
2532 
2533 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2534 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2535 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2536 				      dead);
2537 
2538 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2539 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2540 
2541 out:
2542 	rtnl_unlock();
2543 	neigh_release(n);
2544 	kfree(net_work);
2545 }
2546 
2547 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2548 
2549 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2550 {
2551 	struct mlxsw_sp_netevent_work *net_work =
2552 		container_of(work, struct mlxsw_sp_netevent_work, work);
2553 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2554 
2555 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2556 	kfree(net_work);
2557 }
2558 
2559 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2560 
2561 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2562 {
2563 	struct mlxsw_sp_netevent_work *net_work =
2564 		container_of(work, struct mlxsw_sp_netevent_work, work);
2565 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2566 
2567 	__mlxsw_sp_router_init(mlxsw_sp);
2568 	kfree(net_work);
2569 }
2570 
2571 static int mlxsw_sp_router_schedule_work(struct net *net,
2572 					 struct notifier_block *nb,
2573 					 void (*cb)(struct work_struct *))
2574 {
2575 	struct mlxsw_sp_netevent_work *net_work;
2576 	struct mlxsw_sp_router *router;
2577 
2578 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2579 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2580 		return NOTIFY_DONE;
2581 
2582 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2583 	if (!net_work)
2584 		return NOTIFY_BAD;
2585 
2586 	INIT_WORK(&net_work->work, cb);
2587 	net_work->mlxsw_sp = router->mlxsw_sp;
2588 	mlxsw_core_schedule_work(&net_work->work);
2589 	return NOTIFY_DONE;
2590 }
2591 
2592 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2593 					  unsigned long event, void *ptr)
2594 {
2595 	struct mlxsw_sp_netevent_work *net_work;
2596 	struct mlxsw_sp_port *mlxsw_sp_port;
2597 	struct mlxsw_sp *mlxsw_sp;
2598 	unsigned long interval;
2599 	struct neigh_parms *p;
2600 	struct neighbour *n;
2601 
2602 	switch (event) {
2603 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2604 		p = ptr;
2605 
2606 		/* We don't care about changes in the default table. */
2607 		if (!p->dev || (p->tbl->family != AF_INET &&
2608 				p->tbl->family != AF_INET6))
2609 			return NOTIFY_DONE;
2610 
2611 		/* We are in atomic context and can't take RTNL mutex,
2612 		 * so use RCU variant to walk the device chain.
2613 		 */
2614 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2615 		if (!mlxsw_sp_port)
2616 			return NOTIFY_DONE;
2617 
2618 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2619 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2620 		mlxsw_sp->router->neighs_update.interval = interval;
2621 
2622 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2623 		break;
2624 	case NETEVENT_NEIGH_UPDATE:
2625 		n = ptr;
2626 
2627 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2628 			return NOTIFY_DONE;
2629 
2630 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2631 		if (!mlxsw_sp_port)
2632 			return NOTIFY_DONE;
2633 
2634 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2635 		if (!net_work) {
2636 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2637 			return NOTIFY_BAD;
2638 		}
2639 
2640 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2641 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2642 		net_work->n = n;
2643 
2644 		/* Take a reference to ensure the neighbour won't be
2645 		 * destructed until we drop the reference in delayed
2646 		 * work.
2647 		 */
2648 		neigh_clone(n);
2649 		mlxsw_core_schedule_work(&net_work->work);
2650 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2651 		break;
2652 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2653 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2654 		return mlxsw_sp_router_schedule_work(ptr, nb,
2655 				mlxsw_sp_router_mp_hash_event_work);
2656 
2657 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2658 		return mlxsw_sp_router_schedule_work(ptr, nb,
2659 				mlxsw_sp_router_update_priority_work);
2660 	}
2661 
2662 	return NOTIFY_DONE;
2663 }
2664 
2665 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2666 {
2667 	int err;
2668 
2669 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2670 			      &mlxsw_sp_neigh_ht_params);
2671 	if (err)
2672 		return err;
2673 
2674 	/* Initialize the polling interval according to the default
2675 	 * table.
2676 	 */
2677 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2678 
2679 	/* Create the delayed works for the activity_update */
2680 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2681 			  mlxsw_sp_router_neighs_update_work);
2682 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2683 			  mlxsw_sp_router_probe_unresolved_nexthops);
2684 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2685 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2686 	return 0;
2687 }
2688 
2689 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2690 {
2691 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2692 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2693 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2694 }
2695 
2696 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2697 					 struct mlxsw_sp_rif *rif)
2698 {
2699 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2700 
2701 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2702 				 rif_list_node) {
2703 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2704 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2705 	}
2706 }
2707 
2708 enum mlxsw_sp_nexthop_type {
2709 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2710 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2711 };
2712 
2713 struct mlxsw_sp_nexthop_key {
2714 	struct fib_nh *fib_nh;
2715 };
2716 
2717 struct mlxsw_sp_nexthop {
2718 	struct list_head neigh_list_node; /* member of neigh entry list */
2719 	struct list_head rif_list_node;
2720 	struct list_head router_list_node;
2721 	struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2722 						* this belongs to
2723 						*/
2724 	struct rhash_head ht_node;
2725 	struct mlxsw_sp_nexthop_key key;
2726 	unsigned char gw_addr[sizeof(struct in6_addr)];
2727 	int ifindex;
2728 	int nh_weight;
2729 	int norm_nh_weight;
2730 	int num_adj_entries;
2731 	struct mlxsw_sp_rif *rif;
2732 	u8 should_offload:1, /* set indicates this neigh is connected and
2733 			      * should be put to KVD linear area of this group.
2734 			      */
2735 	   offloaded:1, /* set in case the neigh is actually put into
2736 			 * KVD linear area of this group.
2737 			 */
2738 	   update:1; /* set indicates that MAC of this neigh should be
2739 		      * updated in HW
2740 		      */
2741 	enum mlxsw_sp_nexthop_type type;
2742 	union {
2743 		struct mlxsw_sp_neigh_entry *neigh_entry;
2744 		struct mlxsw_sp_ipip_entry *ipip_entry;
2745 	};
2746 	unsigned int counter_index;
2747 	bool counter_valid;
2748 };
2749 
2750 struct mlxsw_sp_nexthop_group {
2751 	void *priv;
2752 	struct rhash_head ht_node;
2753 	struct list_head fib_list; /* list of fib entries that use this group */
2754 	struct neigh_table *neigh_tbl;
2755 	u8 adj_index_valid:1,
2756 	   gateway:1; /* routes using the group use a gateway */
2757 	u32 adj_index;
2758 	u16 ecmp_size;
2759 	u16 count;
2760 	int sum_norm_weight;
2761 	struct mlxsw_sp_nexthop nexthops[0];
2762 #define nh_rif	nexthops[0].rif
2763 };
2764 
2765 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2766 				    struct mlxsw_sp_nexthop *nh)
2767 {
2768 	struct devlink *devlink;
2769 
2770 	devlink = priv_to_devlink(mlxsw_sp->core);
2771 	if (!devlink_dpipe_table_counter_enabled(devlink,
2772 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2773 		return;
2774 
2775 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2776 		return;
2777 
2778 	nh->counter_valid = true;
2779 }
2780 
2781 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2782 				   struct mlxsw_sp_nexthop *nh)
2783 {
2784 	if (!nh->counter_valid)
2785 		return;
2786 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2787 	nh->counter_valid = false;
2788 }
2789 
2790 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2791 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2792 {
2793 	if (!nh->counter_valid)
2794 		return -EINVAL;
2795 
2796 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2797 					 p_counter, NULL);
2798 }
2799 
2800 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2801 					       struct mlxsw_sp_nexthop *nh)
2802 {
2803 	if (!nh) {
2804 		if (list_empty(&router->nexthop_list))
2805 			return NULL;
2806 		else
2807 			return list_first_entry(&router->nexthop_list,
2808 						typeof(*nh), router_list_node);
2809 	}
2810 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2811 		return NULL;
2812 	return list_next_entry(nh, router_list_node);
2813 }
2814 
2815 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2816 {
2817 	return nh->offloaded;
2818 }
2819 
2820 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2821 {
2822 	if (!nh->offloaded)
2823 		return NULL;
2824 	return nh->neigh_entry->ha;
2825 }
2826 
2827 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2828 			     u32 *p_adj_size, u32 *p_adj_hash_index)
2829 {
2830 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2831 	u32 adj_hash_index = 0;
2832 	int i;
2833 
2834 	if (!nh->offloaded || !nh_grp->adj_index_valid)
2835 		return -EINVAL;
2836 
2837 	*p_adj_index = nh_grp->adj_index;
2838 	*p_adj_size = nh_grp->ecmp_size;
2839 
2840 	for (i = 0; i < nh_grp->count; i++) {
2841 		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2842 
2843 		if (nh_iter == nh)
2844 			break;
2845 		if (nh_iter->offloaded)
2846 			adj_hash_index += nh_iter->num_adj_entries;
2847 	}
2848 
2849 	*p_adj_hash_index = adj_hash_index;
2850 	return 0;
2851 }
2852 
2853 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2854 {
2855 	return nh->rif;
2856 }
2857 
2858 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2859 {
2860 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2861 	int i;
2862 
2863 	for (i = 0; i < nh_grp->count; i++) {
2864 		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2865 
2866 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2867 			return true;
2868 	}
2869 	return false;
2870 }
2871 
2872 static struct fib_info *
2873 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2874 {
2875 	return nh_grp->priv;
2876 }
2877 
2878 struct mlxsw_sp_nexthop_group_cmp_arg {
2879 	enum mlxsw_sp_l3proto proto;
2880 	union {
2881 		struct fib_info *fi;
2882 		struct mlxsw_sp_fib6_entry *fib6_entry;
2883 	};
2884 };
2885 
2886 static bool
2887 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2888 				    const struct in6_addr *gw, int ifindex,
2889 				    int weight)
2890 {
2891 	int i;
2892 
2893 	for (i = 0; i < nh_grp->count; i++) {
2894 		const struct mlxsw_sp_nexthop *nh;
2895 
2896 		nh = &nh_grp->nexthops[i];
2897 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
2898 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2899 			return true;
2900 	}
2901 
2902 	return false;
2903 }
2904 
2905 static bool
2906 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2907 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
2908 {
2909 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2910 
2911 	if (nh_grp->count != fib6_entry->nrt6)
2912 		return false;
2913 
2914 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2915 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
2916 		struct in6_addr *gw;
2917 		int ifindex, weight;
2918 
2919 		ifindex = fib6_nh->fib_nh_dev->ifindex;
2920 		weight = fib6_nh->fib_nh_weight;
2921 		gw = &fib6_nh->fib_nh_gw6;
2922 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2923 							 weight))
2924 			return false;
2925 	}
2926 
2927 	return true;
2928 }
2929 
2930 static int
2931 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2932 {
2933 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2934 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2935 
2936 	switch (cmp_arg->proto) {
2937 	case MLXSW_SP_L3_PROTO_IPV4:
2938 		return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2939 	case MLXSW_SP_L3_PROTO_IPV6:
2940 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2941 						    cmp_arg->fib6_entry);
2942 	default:
2943 		WARN_ON(1);
2944 		return 1;
2945 	}
2946 }
2947 
2948 static int
2949 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2950 {
2951 	return nh_grp->neigh_tbl->family;
2952 }
2953 
2954 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2955 {
2956 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
2957 	const struct mlxsw_sp_nexthop *nh;
2958 	struct fib_info *fi;
2959 	unsigned int val;
2960 	int i;
2961 
2962 	switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2963 	case AF_INET:
2964 		fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2965 		return jhash(&fi, sizeof(fi), seed);
2966 	case AF_INET6:
2967 		val = nh_grp->count;
2968 		for (i = 0; i < nh_grp->count; i++) {
2969 			nh = &nh_grp->nexthops[i];
2970 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
2971 		}
2972 		return jhash(&val, sizeof(val), seed);
2973 	default:
2974 		WARN_ON(1);
2975 		return 0;
2976 	}
2977 }
2978 
2979 static u32
2980 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2981 {
2982 	unsigned int val = fib6_entry->nrt6;
2983 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2984 	struct net_device *dev;
2985 
2986 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2987 		dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
2988 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
2989 	}
2990 
2991 	return jhash(&val, sizeof(val), seed);
2992 }
2993 
2994 static u32
2995 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2996 {
2997 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2998 
2999 	switch (cmp_arg->proto) {
3000 	case MLXSW_SP_L3_PROTO_IPV4:
3001 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3002 	case MLXSW_SP_L3_PROTO_IPV6:
3003 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3004 	default:
3005 		WARN_ON(1);
3006 		return 0;
3007 	}
3008 }
3009 
3010 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3011 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3012 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3013 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3014 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3015 };
3016 
3017 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3018 					 struct mlxsw_sp_nexthop_group *nh_grp)
3019 {
3020 	if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3021 	    !nh_grp->gateway)
3022 		return 0;
3023 
3024 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3025 				      &nh_grp->ht_node,
3026 				      mlxsw_sp_nexthop_group_ht_params);
3027 }
3028 
3029 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3030 					  struct mlxsw_sp_nexthop_group *nh_grp)
3031 {
3032 	if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3033 	    !nh_grp->gateway)
3034 		return;
3035 
3036 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3037 			       &nh_grp->ht_node,
3038 			       mlxsw_sp_nexthop_group_ht_params);
3039 }
3040 
3041 static struct mlxsw_sp_nexthop_group *
3042 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3043 			       struct fib_info *fi)
3044 {
3045 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3046 
3047 	cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
3048 	cmp_arg.fi = fi;
3049 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3050 				      &cmp_arg,
3051 				      mlxsw_sp_nexthop_group_ht_params);
3052 }
3053 
3054 static struct mlxsw_sp_nexthop_group *
3055 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3056 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3057 {
3058 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3059 
3060 	cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
3061 	cmp_arg.fib6_entry = fib6_entry;
3062 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3063 				      &cmp_arg,
3064 				      mlxsw_sp_nexthop_group_ht_params);
3065 }
3066 
3067 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3068 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3069 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3070 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3071 };
3072 
3073 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3074 				   struct mlxsw_sp_nexthop *nh)
3075 {
3076 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3077 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3078 }
3079 
3080 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3081 				    struct mlxsw_sp_nexthop *nh)
3082 {
3083 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3084 			       mlxsw_sp_nexthop_ht_params);
3085 }
3086 
3087 static struct mlxsw_sp_nexthop *
3088 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3089 			struct mlxsw_sp_nexthop_key key)
3090 {
3091 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3092 				      mlxsw_sp_nexthop_ht_params);
3093 }
3094 
3095 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3096 					     const struct mlxsw_sp_fib *fib,
3097 					     u32 adj_index, u16 ecmp_size,
3098 					     u32 new_adj_index,
3099 					     u16 new_ecmp_size)
3100 {
3101 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3102 
3103 	mlxsw_reg_raleu_pack(raleu_pl,
3104 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
3105 			     fib->vr->id, adj_index, ecmp_size, new_adj_index,
3106 			     new_ecmp_size);
3107 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3108 }
3109 
3110 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3111 					  struct mlxsw_sp_nexthop_group *nh_grp,
3112 					  u32 old_adj_index, u16 old_ecmp_size)
3113 {
3114 	struct mlxsw_sp_fib_entry *fib_entry;
3115 	struct mlxsw_sp_fib *fib = NULL;
3116 	int err;
3117 
3118 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3119 		if (fib == fib_entry->fib_node->fib)
3120 			continue;
3121 		fib = fib_entry->fib_node->fib;
3122 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3123 							old_adj_index,
3124 							old_ecmp_size,
3125 							nh_grp->adj_index,
3126 							nh_grp->ecmp_size);
3127 		if (err)
3128 			return err;
3129 	}
3130 	return 0;
3131 }
3132 
3133 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3134 				     struct mlxsw_sp_nexthop *nh)
3135 {
3136 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3137 	char ratr_pl[MLXSW_REG_RATR_LEN];
3138 
3139 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3140 			    true, MLXSW_REG_RATR_TYPE_ETHERNET,
3141 			    adj_index, neigh_entry->rif);
3142 	mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3143 	if (nh->counter_valid)
3144 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3145 	else
3146 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3147 
3148 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3149 }
3150 
3151 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3152 			    struct mlxsw_sp_nexthop *nh)
3153 {
3154 	int i;
3155 
3156 	for (i = 0; i < nh->num_adj_entries; i++) {
3157 		int err;
3158 
3159 		err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3160 		if (err)
3161 			return err;
3162 	}
3163 
3164 	return 0;
3165 }
3166 
3167 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3168 					  u32 adj_index,
3169 					  struct mlxsw_sp_nexthop *nh)
3170 {
3171 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3172 
3173 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3174 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3175 }
3176 
3177 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3178 					u32 adj_index,
3179 					struct mlxsw_sp_nexthop *nh)
3180 {
3181 	int i;
3182 
3183 	for (i = 0; i < nh->num_adj_entries; i++) {
3184 		int err;
3185 
3186 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3187 						     nh);
3188 		if (err)
3189 			return err;
3190 	}
3191 
3192 	return 0;
3193 }
3194 
3195 static int
3196 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3197 			      struct mlxsw_sp_nexthop_group *nh_grp,
3198 			      bool reallocate)
3199 {
3200 	u32 adj_index = nh_grp->adj_index; /* base */
3201 	struct mlxsw_sp_nexthop *nh;
3202 	int i;
3203 	int err;
3204 
3205 	for (i = 0; i < nh_grp->count; i++) {
3206 		nh = &nh_grp->nexthops[i];
3207 
3208 		if (!nh->should_offload) {
3209 			nh->offloaded = 0;
3210 			continue;
3211 		}
3212 
3213 		if (nh->update || reallocate) {
3214 			switch (nh->type) {
3215 			case MLXSW_SP_NEXTHOP_TYPE_ETH:
3216 				err = mlxsw_sp_nexthop_update
3217 					    (mlxsw_sp, adj_index, nh);
3218 				break;
3219 			case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3220 				err = mlxsw_sp_nexthop_ipip_update
3221 					    (mlxsw_sp, adj_index, nh);
3222 				break;
3223 			}
3224 			if (err)
3225 				return err;
3226 			nh->update = 0;
3227 			nh->offloaded = 1;
3228 		}
3229 		adj_index += nh->num_adj_entries;
3230 	}
3231 	return 0;
3232 }
3233 
3234 static bool
3235 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3236 				 const struct mlxsw_sp_fib_entry *fib_entry);
3237 
3238 static int
3239 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3240 				    struct mlxsw_sp_nexthop_group *nh_grp)
3241 {
3242 	struct mlxsw_sp_fib_entry *fib_entry;
3243 	int err;
3244 
3245 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3246 		if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3247 						      fib_entry))
3248 			continue;
3249 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3250 		if (err)
3251 			return err;
3252 	}
3253 	return 0;
3254 }
3255 
3256 static void
3257 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3258 				   enum mlxsw_reg_ralue_op op, int err);
3259 
3260 static void
3261 mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3262 {
3263 	enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3264 	struct mlxsw_sp_fib_entry *fib_entry;
3265 
3266 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3267 		if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3268 						      fib_entry))
3269 			continue;
3270 		mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3271 	}
3272 }
3273 
3274 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3275 {
3276 	/* Valid sizes for an adjacency group are:
3277 	 * 1-64, 512, 1024, 2048 and 4096.
3278 	 */
3279 	if (*p_adj_grp_size <= 64)
3280 		return;
3281 	else if (*p_adj_grp_size <= 512)
3282 		*p_adj_grp_size = 512;
3283 	else if (*p_adj_grp_size <= 1024)
3284 		*p_adj_grp_size = 1024;
3285 	else if (*p_adj_grp_size <= 2048)
3286 		*p_adj_grp_size = 2048;
3287 	else
3288 		*p_adj_grp_size = 4096;
3289 }
3290 
3291 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3292 					     unsigned int alloc_size)
3293 {
3294 	if (alloc_size >= 4096)
3295 		*p_adj_grp_size = 4096;
3296 	else if (alloc_size >= 2048)
3297 		*p_adj_grp_size = 2048;
3298 	else if (alloc_size >= 1024)
3299 		*p_adj_grp_size = 1024;
3300 	else if (alloc_size >= 512)
3301 		*p_adj_grp_size = 512;
3302 }
3303 
3304 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3305 				     u16 *p_adj_grp_size)
3306 {
3307 	unsigned int alloc_size;
3308 	int err;
3309 
3310 	/* Round up the requested group size to the next size supported
3311 	 * by the device and make sure the request can be satisfied.
3312 	 */
3313 	mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3314 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3315 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3316 					      *p_adj_grp_size, &alloc_size);
3317 	if (err)
3318 		return err;
3319 	/* It is possible the allocation results in more allocated
3320 	 * entries than requested. Try to use as much of them as
3321 	 * possible.
3322 	 */
3323 	mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3324 
3325 	return 0;
3326 }
3327 
3328 static void
3329 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3330 {
3331 	int i, g = 0, sum_norm_weight = 0;
3332 	struct mlxsw_sp_nexthop *nh;
3333 
3334 	for (i = 0; i < nh_grp->count; i++) {
3335 		nh = &nh_grp->nexthops[i];
3336 
3337 		if (!nh->should_offload)
3338 			continue;
3339 		if (g > 0)
3340 			g = gcd(nh->nh_weight, g);
3341 		else
3342 			g = nh->nh_weight;
3343 	}
3344 
3345 	for (i = 0; i < nh_grp->count; i++) {
3346 		nh = &nh_grp->nexthops[i];
3347 
3348 		if (!nh->should_offload)
3349 			continue;
3350 		nh->norm_nh_weight = nh->nh_weight / g;
3351 		sum_norm_weight += nh->norm_nh_weight;
3352 	}
3353 
3354 	nh_grp->sum_norm_weight = sum_norm_weight;
3355 }
3356 
3357 static void
3358 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3359 {
3360 	int total = nh_grp->sum_norm_weight;
3361 	u16 ecmp_size = nh_grp->ecmp_size;
3362 	int i, weight = 0, lower_bound = 0;
3363 
3364 	for (i = 0; i < nh_grp->count; i++) {
3365 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3366 		int upper_bound;
3367 
3368 		if (!nh->should_offload)
3369 			continue;
3370 		weight += nh->norm_nh_weight;
3371 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3372 		nh->num_adj_entries = upper_bound - lower_bound;
3373 		lower_bound = upper_bound;
3374 	}
3375 }
3376 
3377 static void
3378 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3379 			       struct mlxsw_sp_nexthop_group *nh_grp)
3380 {
3381 	u16 ecmp_size, old_ecmp_size;
3382 	struct mlxsw_sp_nexthop *nh;
3383 	bool offload_change = false;
3384 	u32 adj_index;
3385 	bool old_adj_index_valid;
3386 	u32 old_adj_index;
3387 	int i;
3388 	int err;
3389 
3390 	if (!nh_grp->gateway) {
3391 		mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3392 		return;
3393 	}
3394 
3395 	for (i = 0; i < nh_grp->count; i++) {
3396 		nh = &nh_grp->nexthops[i];
3397 
3398 		if (nh->should_offload != nh->offloaded) {
3399 			offload_change = true;
3400 			if (nh->should_offload)
3401 				nh->update = 1;
3402 		}
3403 	}
3404 	if (!offload_change) {
3405 		/* Nothing was added or removed, so no need to reallocate. Just
3406 		 * update MAC on existing adjacency indexes.
3407 		 */
3408 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
3409 		if (err) {
3410 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3411 			goto set_trap;
3412 		}
3413 		return;
3414 	}
3415 	mlxsw_sp_nexthop_group_normalize(nh_grp);
3416 	if (!nh_grp->sum_norm_weight)
3417 		/* No neigh of this group is connected so we just set
3418 		 * the trap and let everthing flow through kernel.
3419 		 */
3420 		goto set_trap;
3421 
3422 	ecmp_size = nh_grp->sum_norm_weight;
3423 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3424 	if (err)
3425 		/* No valid allocation size available. */
3426 		goto set_trap;
3427 
3428 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3429 				  ecmp_size, &adj_index);
3430 	if (err) {
3431 		/* We ran out of KVD linear space, just set the
3432 		 * trap and let everything flow through kernel.
3433 		 */
3434 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3435 		goto set_trap;
3436 	}
3437 	old_adj_index_valid = nh_grp->adj_index_valid;
3438 	old_adj_index = nh_grp->adj_index;
3439 	old_ecmp_size = nh_grp->ecmp_size;
3440 	nh_grp->adj_index_valid = 1;
3441 	nh_grp->adj_index = adj_index;
3442 	nh_grp->ecmp_size = ecmp_size;
3443 	mlxsw_sp_nexthop_group_rebalance(nh_grp);
3444 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
3445 	if (err) {
3446 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3447 		goto set_trap;
3448 	}
3449 
3450 	if (!old_adj_index_valid) {
3451 		/* The trap was set for fib entries, so we have to call
3452 		 * fib entry update to unset it and use adjacency index.
3453 		 */
3454 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3455 		if (err) {
3456 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3457 			goto set_trap;
3458 		}
3459 		return;
3460 	}
3461 
3462 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3463 					     old_adj_index, old_ecmp_size);
3464 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3465 			   old_ecmp_size, old_adj_index);
3466 	if (err) {
3467 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3468 		goto set_trap;
3469 	}
3470 
3471 	/* Offload state within the group changed, so update the flags. */
3472 	mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3473 
3474 	return;
3475 
3476 set_trap:
3477 	old_adj_index_valid = nh_grp->adj_index_valid;
3478 	nh_grp->adj_index_valid = 0;
3479 	for (i = 0; i < nh_grp->count; i++) {
3480 		nh = &nh_grp->nexthops[i];
3481 		nh->offloaded = 0;
3482 	}
3483 	err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3484 	if (err)
3485 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3486 	if (old_adj_index_valid)
3487 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3488 				   nh_grp->ecmp_size, nh_grp->adj_index);
3489 }
3490 
3491 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3492 					    bool removing)
3493 {
3494 	if (!removing)
3495 		nh->should_offload = 1;
3496 	else
3497 		nh->should_offload = 0;
3498 	nh->update = 1;
3499 }
3500 
3501 static int
3502 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3503 				    struct mlxsw_sp_neigh_entry *neigh_entry)
3504 {
3505 	struct neighbour *n, *old_n = neigh_entry->key.n;
3506 	struct mlxsw_sp_nexthop *nh;
3507 	bool entry_connected;
3508 	u8 nud_state, dead;
3509 	int err;
3510 
3511 	nh = list_first_entry(&neigh_entry->nexthop_list,
3512 			      struct mlxsw_sp_nexthop, neigh_list_node);
3513 
3514 	n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3515 	if (!n) {
3516 		n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3517 				 nh->rif->dev);
3518 		if (IS_ERR(n))
3519 			return PTR_ERR(n);
3520 		neigh_event_send(n, NULL);
3521 	}
3522 
3523 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3524 	neigh_entry->key.n = n;
3525 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3526 	if (err)
3527 		goto err_neigh_entry_insert;
3528 
3529 	read_lock_bh(&n->lock);
3530 	nud_state = n->nud_state;
3531 	dead = n->dead;
3532 	read_unlock_bh(&n->lock);
3533 	entry_connected = nud_state & NUD_VALID && !dead;
3534 
3535 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3536 			    neigh_list_node) {
3537 		neigh_release(old_n);
3538 		neigh_clone(n);
3539 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
3540 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3541 	}
3542 
3543 	neigh_release(n);
3544 
3545 	return 0;
3546 
3547 err_neigh_entry_insert:
3548 	neigh_entry->key.n = old_n;
3549 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3550 	neigh_release(n);
3551 	return err;
3552 }
3553 
3554 static void
3555 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3556 			      struct mlxsw_sp_neigh_entry *neigh_entry,
3557 			      bool removing, bool dead)
3558 {
3559 	struct mlxsw_sp_nexthop *nh;
3560 
3561 	if (list_empty(&neigh_entry->nexthop_list))
3562 		return;
3563 
3564 	if (dead) {
3565 		int err;
3566 
3567 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
3568 							  neigh_entry);
3569 		if (err)
3570 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
3571 		return;
3572 	}
3573 
3574 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3575 			    neigh_list_node) {
3576 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
3577 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3578 	}
3579 }
3580 
3581 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3582 				      struct mlxsw_sp_rif *rif)
3583 {
3584 	if (nh->rif)
3585 		return;
3586 
3587 	nh->rif = rif;
3588 	list_add(&nh->rif_list_node, &rif->nexthop_list);
3589 }
3590 
3591 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3592 {
3593 	if (!nh->rif)
3594 		return;
3595 
3596 	list_del(&nh->rif_list_node);
3597 	nh->rif = NULL;
3598 }
3599 
3600 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3601 				       struct mlxsw_sp_nexthop *nh)
3602 {
3603 	struct mlxsw_sp_neigh_entry *neigh_entry;
3604 	struct neighbour *n;
3605 	u8 nud_state, dead;
3606 	int err;
3607 
3608 	if (!nh->nh_grp->gateway || nh->neigh_entry)
3609 		return 0;
3610 
3611 	/* Take a reference of neigh here ensuring that neigh would
3612 	 * not be destructed before the nexthop entry is finished.
3613 	 * The reference is taken either in neigh_lookup() or
3614 	 * in neigh_create() in case n is not found.
3615 	 */
3616 	n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3617 	if (!n) {
3618 		n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3619 				 nh->rif->dev);
3620 		if (IS_ERR(n))
3621 			return PTR_ERR(n);
3622 		neigh_event_send(n, NULL);
3623 	}
3624 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3625 	if (!neigh_entry) {
3626 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3627 		if (IS_ERR(neigh_entry)) {
3628 			err = -EINVAL;
3629 			goto err_neigh_entry_create;
3630 		}
3631 	}
3632 
3633 	/* If that is the first nexthop connected to that neigh, add to
3634 	 * nexthop_neighs_list
3635 	 */
3636 	if (list_empty(&neigh_entry->nexthop_list))
3637 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3638 			      &mlxsw_sp->router->nexthop_neighs_list);
3639 
3640 	nh->neigh_entry = neigh_entry;
3641 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3642 	read_lock_bh(&n->lock);
3643 	nud_state = n->nud_state;
3644 	dead = n->dead;
3645 	read_unlock_bh(&n->lock);
3646 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3647 
3648 	return 0;
3649 
3650 err_neigh_entry_create:
3651 	neigh_release(n);
3652 	return err;
3653 }
3654 
3655 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3656 					struct mlxsw_sp_nexthop *nh)
3657 {
3658 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3659 	struct neighbour *n;
3660 
3661 	if (!neigh_entry)
3662 		return;
3663 	n = neigh_entry->key.n;
3664 
3665 	__mlxsw_sp_nexthop_neigh_update(nh, true);
3666 	list_del(&nh->neigh_list_node);
3667 	nh->neigh_entry = NULL;
3668 
3669 	/* If that is the last nexthop connected to that neigh, remove from
3670 	 * nexthop_neighs_list
3671 	 */
3672 	if (list_empty(&neigh_entry->nexthop_list))
3673 		list_del(&neigh_entry->nexthop_neighs_list_node);
3674 
3675 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3676 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3677 
3678 	neigh_release(n);
3679 }
3680 
3681 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3682 {
3683 	struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3684 
3685 	return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3686 }
3687 
3688 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3689 				       struct mlxsw_sp_nexthop *nh,
3690 				       struct mlxsw_sp_ipip_entry *ipip_entry)
3691 {
3692 	bool removing;
3693 
3694 	if (!nh->nh_grp->gateway || nh->ipip_entry)
3695 		return;
3696 
3697 	nh->ipip_entry = ipip_entry;
3698 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3699 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
3700 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3701 }
3702 
3703 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3704 				       struct mlxsw_sp_nexthop *nh)
3705 {
3706 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3707 
3708 	if (!ipip_entry)
3709 		return;
3710 
3711 	__mlxsw_sp_nexthop_neigh_update(nh, true);
3712 	nh->ipip_entry = NULL;
3713 }
3714 
3715 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3716 					const struct fib_nh *fib_nh,
3717 					enum mlxsw_sp_ipip_type *p_ipipt)
3718 {
3719 	struct net_device *dev = fib_nh->fib_nh_dev;
3720 
3721 	return dev &&
3722 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3723 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3724 }
3725 
3726 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3727 				       struct mlxsw_sp_nexthop *nh)
3728 {
3729 	switch (nh->type) {
3730 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
3731 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3732 		mlxsw_sp_nexthop_rif_fini(nh);
3733 		break;
3734 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3735 		mlxsw_sp_nexthop_rif_fini(nh);
3736 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3737 		break;
3738 	}
3739 }
3740 
3741 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3742 				       struct mlxsw_sp_nexthop *nh,
3743 				       struct fib_nh *fib_nh)
3744 {
3745 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3746 	struct net_device *dev = fib_nh->fib_nh_dev;
3747 	struct mlxsw_sp_ipip_entry *ipip_entry;
3748 	struct mlxsw_sp_rif *rif;
3749 	int err;
3750 
3751 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3752 	if (ipip_entry) {
3753 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3754 		if (ipip_ops->can_offload(mlxsw_sp, dev,
3755 					  MLXSW_SP_L3_PROTO_IPV4)) {
3756 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3757 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3758 			return 0;
3759 		}
3760 	}
3761 
3762 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3763 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3764 	if (!rif)
3765 		return 0;
3766 
3767 	mlxsw_sp_nexthop_rif_init(nh, rif);
3768 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3769 	if (err)
3770 		goto err_neigh_init;
3771 
3772 	return 0;
3773 
3774 err_neigh_init:
3775 	mlxsw_sp_nexthop_rif_fini(nh);
3776 	return err;
3777 }
3778 
3779 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3780 					struct mlxsw_sp_nexthop *nh)
3781 {
3782 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3783 }
3784 
3785 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3786 				  struct mlxsw_sp_nexthop_group *nh_grp,
3787 				  struct mlxsw_sp_nexthop *nh,
3788 				  struct fib_nh *fib_nh)
3789 {
3790 	struct net_device *dev = fib_nh->fib_nh_dev;
3791 	struct in_device *in_dev;
3792 	int err;
3793 
3794 	nh->nh_grp = nh_grp;
3795 	nh->key.fib_nh = fib_nh;
3796 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3797 	nh->nh_weight = fib_nh->fib_nh_weight;
3798 #else
3799 	nh->nh_weight = 1;
3800 #endif
3801 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
3802 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3803 	if (err)
3804 		return err;
3805 
3806 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3807 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3808 
3809 	if (!dev)
3810 		return 0;
3811 
3812 	in_dev = __in_dev_get_rtnl(dev);
3813 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3814 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
3815 		return 0;
3816 
3817 	err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3818 	if (err)
3819 		goto err_nexthop_neigh_init;
3820 
3821 	return 0;
3822 
3823 err_nexthop_neigh_init:
3824 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3825 	return err;
3826 }
3827 
3828 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3829 				   struct mlxsw_sp_nexthop *nh)
3830 {
3831 	mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3832 	list_del(&nh->router_list_node);
3833 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
3834 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3835 }
3836 
3837 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3838 				    unsigned long event, struct fib_nh *fib_nh)
3839 {
3840 	struct mlxsw_sp_nexthop_key key;
3841 	struct mlxsw_sp_nexthop *nh;
3842 
3843 	if (mlxsw_sp->router->aborted)
3844 		return;
3845 
3846 	key.fib_nh = fib_nh;
3847 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3848 	if (WARN_ON_ONCE(!nh))
3849 		return;
3850 
3851 	switch (event) {
3852 	case FIB_EVENT_NH_ADD:
3853 		mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3854 		break;
3855 	case FIB_EVENT_NH_DEL:
3856 		mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3857 		break;
3858 	}
3859 
3860 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3861 }
3862 
3863 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3864 					struct mlxsw_sp_rif *rif)
3865 {
3866 	struct mlxsw_sp_nexthop *nh;
3867 	bool removing;
3868 
3869 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3870 		switch (nh->type) {
3871 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
3872 			removing = false;
3873 			break;
3874 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3875 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3876 			break;
3877 		default:
3878 			WARN_ON(1);
3879 			continue;
3880 		}
3881 
3882 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
3883 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3884 	}
3885 }
3886 
3887 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3888 					 struct mlxsw_sp_rif *old_rif,
3889 					 struct mlxsw_sp_rif *new_rif)
3890 {
3891 	struct mlxsw_sp_nexthop *nh;
3892 
3893 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3894 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3895 		nh->rif = new_rif;
3896 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3897 }
3898 
3899 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3900 					   struct mlxsw_sp_rif *rif)
3901 {
3902 	struct mlxsw_sp_nexthop *nh, *tmp;
3903 
3904 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
3905 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3906 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3907 	}
3908 }
3909 
3910 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3911 				   struct fib_info *fi)
3912 {
3913 	const struct fib_nh *nh = fib_info_nh(fi, 0);
3914 
3915 	return nh->fib_nh_scope == RT_SCOPE_LINK ||
3916 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
3917 }
3918 
3919 static struct mlxsw_sp_nexthop_group *
3920 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
3921 {
3922 	unsigned int nhs = fib_info_num_path(fi);
3923 	struct mlxsw_sp_nexthop_group *nh_grp;
3924 	struct mlxsw_sp_nexthop *nh;
3925 	struct fib_nh *fib_nh;
3926 	int i;
3927 	int err;
3928 
3929 	nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
3930 	if (!nh_grp)
3931 		return ERR_PTR(-ENOMEM);
3932 	nh_grp->priv = fi;
3933 	INIT_LIST_HEAD(&nh_grp->fib_list);
3934 	nh_grp->neigh_tbl = &arp_tbl;
3935 
3936 	nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
3937 	nh_grp->count = nhs;
3938 	fib_info_hold(fi);
3939 	for (i = 0; i < nh_grp->count; i++) {
3940 		nh = &nh_grp->nexthops[i];
3941 		fib_nh = fib_info_nh(fi, i);
3942 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
3943 		if (err)
3944 			goto err_nexthop4_init;
3945 	}
3946 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3947 	if (err)
3948 		goto err_nexthop_group_insert;
3949 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3950 	return nh_grp;
3951 
3952 err_nexthop_group_insert:
3953 err_nexthop4_init:
3954 	for (i--; i >= 0; i--) {
3955 		nh = &nh_grp->nexthops[i];
3956 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
3957 	}
3958 	fib_info_put(fi);
3959 	kfree(nh_grp);
3960 	return ERR_PTR(err);
3961 }
3962 
3963 static void
3964 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3965 				struct mlxsw_sp_nexthop_group *nh_grp)
3966 {
3967 	struct mlxsw_sp_nexthop *nh;
3968 	int i;
3969 
3970 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
3971 	for (i = 0; i < nh_grp->count; i++) {
3972 		nh = &nh_grp->nexthops[i];
3973 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
3974 	}
3975 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3976 	WARN_ON_ONCE(nh_grp->adj_index_valid);
3977 	fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
3978 	kfree(nh_grp);
3979 }
3980 
3981 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3982 				       struct mlxsw_sp_fib_entry *fib_entry,
3983 				       struct fib_info *fi)
3984 {
3985 	struct mlxsw_sp_nexthop_group *nh_grp;
3986 
3987 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
3988 	if (!nh_grp) {
3989 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
3990 		if (IS_ERR(nh_grp))
3991 			return PTR_ERR(nh_grp);
3992 	}
3993 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3994 	fib_entry->nh_group = nh_grp;
3995 	return 0;
3996 }
3997 
3998 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3999 					struct mlxsw_sp_fib_entry *fib_entry)
4000 {
4001 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4002 
4003 	list_del(&fib_entry->nexthop_group_node);
4004 	if (!list_empty(&nh_grp->fib_list))
4005 		return;
4006 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
4007 }
4008 
4009 static bool
4010 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4011 {
4012 	struct mlxsw_sp_fib4_entry *fib4_entry;
4013 
4014 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4015 				  common);
4016 	return !fib4_entry->tos;
4017 }
4018 
4019 static bool
4020 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4021 {
4022 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4023 
4024 	switch (fib_entry->fib_node->fib->proto) {
4025 	case MLXSW_SP_L3_PROTO_IPV4:
4026 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
4027 			return false;
4028 		break;
4029 	case MLXSW_SP_L3_PROTO_IPV6:
4030 		break;
4031 	}
4032 
4033 	switch (fib_entry->type) {
4034 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4035 		return !!nh_group->adj_index_valid;
4036 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4037 		return !!nh_group->nh_rif;
4038 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4039 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4040 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4041 		return true;
4042 	default:
4043 		return false;
4044 	}
4045 }
4046 
4047 static struct mlxsw_sp_nexthop *
4048 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4049 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4050 {
4051 	int i;
4052 
4053 	for (i = 0; i < nh_grp->count; i++) {
4054 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4055 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
4056 
4057 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4058 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4059 				    &rt->fib6_nh->fib_nh_gw6))
4060 			return nh;
4061 		continue;
4062 	}
4063 
4064 	return NULL;
4065 }
4066 
4067 static void
4068 mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
4069 {
4070 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4071 	int i;
4072 
4073 	if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
4074 	    fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
4075 	    fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
4076 	    fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
4077 		nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4078 		return;
4079 	}
4080 
4081 	for (i = 0; i < nh_grp->count; i++) {
4082 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4083 
4084 		if (nh->offloaded)
4085 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4086 		else
4087 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4088 	}
4089 }
4090 
4091 static void
4092 mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
4093 {
4094 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4095 	int i;
4096 
4097 	if (!list_is_singular(&nh_grp->fib_list))
4098 		return;
4099 
4100 	for (i = 0; i < nh_grp->count; i++) {
4101 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4102 
4103 		nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4104 	}
4105 }
4106 
4107 static void
4108 mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
4109 {
4110 	struct mlxsw_sp_fib6_entry *fib6_entry;
4111 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4112 
4113 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4114 				  common);
4115 
4116 	if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
4117 	    fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
4118 		list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4119 				 list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4120 		return;
4121 	}
4122 
4123 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4124 		struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4125 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
4126 		struct mlxsw_sp_nexthop *nh;
4127 
4128 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
4129 		if (nh && nh->offloaded)
4130 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4131 		else
4132 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4133 	}
4134 }
4135 
4136 static void
4137 mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
4138 {
4139 	struct mlxsw_sp_fib6_entry *fib6_entry;
4140 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4141 
4142 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4143 				  common);
4144 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4145 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
4146 
4147 		rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4148 	}
4149 }
4150 
4151 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
4152 {
4153 	switch (fib_entry->fib_node->fib->proto) {
4154 	case MLXSW_SP_L3_PROTO_IPV4:
4155 		mlxsw_sp_fib4_entry_offload_set(fib_entry);
4156 		break;
4157 	case MLXSW_SP_L3_PROTO_IPV6:
4158 		mlxsw_sp_fib6_entry_offload_set(fib_entry);
4159 		break;
4160 	}
4161 }
4162 
4163 static void
4164 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
4165 {
4166 	switch (fib_entry->fib_node->fib->proto) {
4167 	case MLXSW_SP_L3_PROTO_IPV4:
4168 		mlxsw_sp_fib4_entry_offload_unset(fib_entry);
4169 		break;
4170 	case MLXSW_SP_L3_PROTO_IPV6:
4171 		mlxsw_sp_fib6_entry_offload_unset(fib_entry);
4172 		break;
4173 	}
4174 }
4175 
4176 static void
4177 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
4178 				   enum mlxsw_reg_ralue_op op, int err)
4179 {
4180 	switch (op) {
4181 	case MLXSW_REG_RALUE_OP_WRITE_DELETE:
4182 		return mlxsw_sp_fib_entry_offload_unset(fib_entry);
4183 	case MLXSW_REG_RALUE_OP_WRITE_WRITE:
4184 		if (err)
4185 			return;
4186 		if (mlxsw_sp_fib_entry_should_offload(fib_entry))
4187 			mlxsw_sp_fib_entry_offload_set(fib_entry);
4188 		else
4189 			mlxsw_sp_fib_entry_offload_unset(fib_entry);
4190 		return;
4191 	default:
4192 		return;
4193 	}
4194 }
4195 
4196 static void
4197 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
4198 			      const struct mlxsw_sp_fib_entry *fib_entry,
4199 			      enum mlxsw_reg_ralue_op op)
4200 {
4201 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
4202 	enum mlxsw_reg_ralxx_protocol proto;
4203 	u32 *p_dip;
4204 
4205 	proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
4206 
4207 	switch (fib->proto) {
4208 	case MLXSW_SP_L3_PROTO_IPV4:
4209 		p_dip = (u32 *) fib_entry->fib_node->key.addr;
4210 		mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
4211 				      fib_entry->fib_node->key.prefix_len,
4212 				      *p_dip);
4213 		break;
4214 	case MLXSW_SP_L3_PROTO_IPV6:
4215 		mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
4216 				      fib_entry->fib_node->key.prefix_len,
4217 				      fib_entry->fib_node->key.addr);
4218 		break;
4219 	}
4220 }
4221 
4222 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
4223 {
4224 	enum mlxsw_reg_ratr_trap_action trap_action;
4225 	char ratr_pl[MLXSW_REG_RATR_LEN];
4226 	int err;
4227 
4228 	if (mlxsw_sp->router->adj_discard_index_valid)
4229 		return 0;
4230 
4231 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4232 				  &mlxsw_sp->router->adj_discard_index);
4233 	if (err)
4234 		return err;
4235 
4236 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
4237 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4238 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4239 			    mlxsw_sp->router->adj_discard_index, rif_index);
4240 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4241 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4242 	if (err)
4243 		goto err_ratr_write;
4244 
4245 	mlxsw_sp->router->adj_discard_index_valid = true;
4246 
4247 	return 0;
4248 
4249 err_ratr_write:
4250 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4251 			   mlxsw_sp->router->adj_discard_index);
4252 	return err;
4253 }
4254 
4255 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4256 					struct mlxsw_sp_fib_entry *fib_entry,
4257 					enum mlxsw_reg_ralue_op op)
4258 {
4259 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4260 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4261 	enum mlxsw_reg_ralue_trap_action trap_action;
4262 	u16 trap_id = 0;
4263 	u32 adjacency_index = 0;
4264 	u16 ecmp_size = 0;
4265 	int err;
4266 
4267 	/* In case the nexthop group adjacency index is valid, use it
4268 	 * with provided ECMP size. Otherwise, setup trap and pass
4269 	 * traffic to kernel.
4270 	 */
4271 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4272 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4273 		adjacency_index = fib_entry->nh_group->adj_index;
4274 		ecmp_size = fib_entry->nh_group->ecmp_size;
4275 	} else if (!nh_group->adj_index_valid && nh_group->count &&
4276 		   nh_group->nh_rif) {
4277 		err = mlxsw_sp_adj_discard_write(mlxsw_sp,
4278 						 nh_group->nh_rif->rif_index);
4279 		if (err)
4280 			return err;
4281 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4282 		adjacency_index = mlxsw_sp->router->adj_discard_index;
4283 		ecmp_size = 1;
4284 	} else {
4285 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4286 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4287 	}
4288 
4289 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4290 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4291 					adjacency_index, ecmp_size);
4292 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4293 }
4294 
4295 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4296 				       struct mlxsw_sp_fib_entry *fib_entry,
4297 				       enum mlxsw_reg_ralue_op op)
4298 {
4299 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
4300 	enum mlxsw_reg_ralue_trap_action trap_action;
4301 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4302 	u16 trap_id = 0;
4303 	u16 rif_index = 0;
4304 
4305 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4306 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4307 		rif_index = rif->rif_index;
4308 	} else {
4309 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4310 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4311 	}
4312 
4313 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4314 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4315 				       rif_index);
4316 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4317 }
4318 
4319 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4320 				      struct mlxsw_sp_fib_entry *fib_entry,
4321 				      enum mlxsw_reg_ralue_op op)
4322 {
4323 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4324 
4325 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4326 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4327 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4328 }
4329 
4330 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
4331 					   struct mlxsw_sp_fib_entry *fib_entry,
4332 					   enum mlxsw_reg_ralue_op op)
4333 {
4334 	enum mlxsw_reg_ralue_trap_action trap_action;
4335 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4336 
4337 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
4338 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4339 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
4340 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4341 }
4342 
4343 static int
4344 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
4345 				  struct mlxsw_sp_fib_entry *fib_entry,
4346 				  enum mlxsw_reg_ralue_op op)
4347 {
4348 	enum mlxsw_reg_ralue_trap_action trap_action;
4349 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4350 	u16 trap_id;
4351 
4352 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4353 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
4354 
4355 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4356 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
4357 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4358 }
4359 
4360 static int
4361 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4362 				 struct mlxsw_sp_fib_entry *fib_entry,
4363 				 enum mlxsw_reg_ralue_op op)
4364 {
4365 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4366 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4367 
4368 	if (WARN_ON(!ipip_entry))
4369 		return -EINVAL;
4370 
4371 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4372 	return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4373 				      fib_entry->decap.tunnel_index);
4374 }
4375 
4376 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4377 					   struct mlxsw_sp_fib_entry *fib_entry,
4378 					   enum mlxsw_reg_ralue_op op)
4379 {
4380 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4381 
4382 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4383 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
4384 					   fib_entry->decap.tunnel_index);
4385 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4386 }
4387 
4388 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4389 				   struct mlxsw_sp_fib_entry *fib_entry,
4390 				   enum mlxsw_reg_ralue_op op)
4391 {
4392 	switch (fib_entry->type) {
4393 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4394 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
4395 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4396 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
4397 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4398 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
4399 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4400 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
4401 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
4402 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
4403 							 op);
4404 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4405 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4406 							fib_entry, op);
4407 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4408 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
4409 	}
4410 	return -EINVAL;
4411 }
4412 
4413 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4414 				 struct mlxsw_sp_fib_entry *fib_entry,
4415 				 enum mlxsw_reg_ralue_op op)
4416 {
4417 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
4418 
4419 	mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
4420 
4421 	return err;
4422 }
4423 
4424 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4425 				     struct mlxsw_sp_fib_entry *fib_entry)
4426 {
4427 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4428 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
4429 }
4430 
4431 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4432 				  struct mlxsw_sp_fib_entry *fib_entry)
4433 {
4434 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4435 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
4436 }
4437 
4438 static int
4439 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4440 			     const struct fib_entry_notifier_info *fen_info,
4441 			     struct mlxsw_sp_fib_entry *fib_entry)
4442 {
4443 	struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
4444 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4445 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4446 	struct mlxsw_sp_ipip_entry *ipip_entry;
4447 	struct fib_info *fi = fen_info->fi;
4448 
4449 	switch (fen_info->type) {
4450 	case RTN_LOCAL:
4451 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4452 						 MLXSW_SP_L3_PROTO_IPV4, dip);
4453 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4454 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4455 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4456 							     fib_entry,
4457 							     ipip_entry);
4458 		}
4459 		if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id,
4460 						     dip.addr4)) {
4461 			u32 t_index;
4462 
4463 			t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp);
4464 			fib_entry->decap.tunnel_index = t_index;
4465 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
4466 			return 0;
4467 		}
4468 		/* fall through */
4469 	case RTN_BROADCAST:
4470 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4471 		return 0;
4472 	case RTN_BLACKHOLE:
4473 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
4474 		return 0;
4475 	case RTN_UNREACHABLE: /* fall through */
4476 	case RTN_PROHIBIT:
4477 		/* Packets hitting these routes need to be trapped, but
4478 		 * can do so with a lower priority than packets directed
4479 		 * at the host, so use action type local instead of trap.
4480 		 */
4481 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
4482 		return 0;
4483 	case RTN_UNICAST:
4484 		if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4485 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4486 		else
4487 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4488 		return 0;
4489 	default:
4490 		return -EINVAL;
4491 	}
4492 }
4493 
4494 static struct mlxsw_sp_fib4_entry *
4495 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4496 			   struct mlxsw_sp_fib_node *fib_node,
4497 			   const struct fib_entry_notifier_info *fen_info)
4498 {
4499 	struct mlxsw_sp_fib4_entry *fib4_entry;
4500 	struct mlxsw_sp_fib_entry *fib_entry;
4501 	int err;
4502 
4503 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4504 	if (!fib4_entry)
4505 		return ERR_PTR(-ENOMEM);
4506 	fib_entry = &fib4_entry->common;
4507 
4508 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4509 	if (err)
4510 		goto err_fib4_entry_type_set;
4511 
4512 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4513 	if (err)
4514 		goto err_nexthop4_group_get;
4515 
4516 	fib4_entry->prio = fen_info->fi->fib_priority;
4517 	fib4_entry->tb_id = fen_info->tb_id;
4518 	fib4_entry->type = fen_info->type;
4519 	fib4_entry->tos = fen_info->tos;
4520 
4521 	fib_entry->fib_node = fib_node;
4522 
4523 	return fib4_entry;
4524 
4525 err_nexthop4_group_get:
4526 err_fib4_entry_type_set:
4527 	kfree(fib4_entry);
4528 	return ERR_PTR(err);
4529 }
4530 
4531 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4532 					struct mlxsw_sp_fib4_entry *fib4_entry)
4533 {
4534 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4535 	kfree(fib4_entry);
4536 }
4537 
4538 static struct mlxsw_sp_fib4_entry *
4539 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4540 			   const struct fib_entry_notifier_info *fen_info)
4541 {
4542 	struct mlxsw_sp_fib4_entry *fib4_entry;
4543 	struct mlxsw_sp_fib_node *fib_node;
4544 	struct mlxsw_sp_fib *fib;
4545 	struct mlxsw_sp_vr *vr;
4546 
4547 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4548 	if (!vr)
4549 		return NULL;
4550 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4551 
4552 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4553 					    sizeof(fen_info->dst),
4554 					    fen_info->dst_len);
4555 	if (!fib_node)
4556 		return NULL;
4557 
4558 	list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4559 		if (fib4_entry->tb_id == fen_info->tb_id &&
4560 		    fib4_entry->tos == fen_info->tos &&
4561 		    fib4_entry->type == fen_info->type &&
4562 		    mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4563 		    fen_info->fi) {
4564 			return fib4_entry;
4565 		}
4566 	}
4567 
4568 	return NULL;
4569 }
4570 
4571 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4572 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4573 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4574 	.key_len = sizeof(struct mlxsw_sp_fib_key),
4575 	.automatic_shrinking = true,
4576 };
4577 
4578 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4579 				    struct mlxsw_sp_fib_node *fib_node)
4580 {
4581 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4582 				      mlxsw_sp_fib_ht_params);
4583 }
4584 
4585 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4586 				     struct mlxsw_sp_fib_node *fib_node)
4587 {
4588 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4589 			       mlxsw_sp_fib_ht_params);
4590 }
4591 
4592 static struct mlxsw_sp_fib_node *
4593 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4594 			 size_t addr_len, unsigned char prefix_len)
4595 {
4596 	struct mlxsw_sp_fib_key key;
4597 
4598 	memset(&key, 0, sizeof(key));
4599 	memcpy(key.addr, addr, addr_len);
4600 	key.prefix_len = prefix_len;
4601 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4602 }
4603 
4604 static struct mlxsw_sp_fib_node *
4605 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4606 			 size_t addr_len, unsigned char prefix_len)
4607 {
4608 	struct mlxsw_sp_fib_node *fib_node;
4609 
4610 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4611 	if (!fib_node)
4612 		return NULL;
4613 
4614 	INIT_LIST_HEAD(&fib_node->entry_list);
4615 	list_add(&fib_node->list, &fib->node_list);
4616 	memcpy(fib_node->key.addr, addr, addr_len);
4617 	fib_node->key.prefix_len = prefix_len;
4618 
4619 	return fib_node;
4620 }
4621 
4622 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4623 {
4624 	list_del(&fib_node->list);
4625 	WARN_ON(!list_empty(&fib_node->entry_list));
4626 	kfree(fib_node);
4627 }
4628 
4629 static bool
4630 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4631 				 const struct mlxsw_sp_fib_entry *fib_entry)
4632 {
4633 	return list_first_entry(&fib_node->entry_list,
4634 				struct mlxsw_sp_fib_entry, list) == fib_entry;
4635 }
4636 
4637 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4638 				      struct mlxsw_sp_fib_node *fib_node)
4639 {
4640 	struct mlxsw_sp_prefix_usage req_prefix_usage;
4641 	struct mlxsw_sp_fib *fib = fib_node->fib;
4642 	struct mlxsw_sp_lpm_tree *lpm_tree;
4643 	int err;
4644 
4645 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4646 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4647 		goto out;
4648 
4649 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4650 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4651 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4652 					 fib->proto);
4653 	if (IS_ERR(lpm_tree))
4654 		return PTR_ERR(lpm_tree);
4655 
4656 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4657 	if (err)
4658 		goto err_lpm_tree_replace;
4659 
4660 out:
4661 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
4662 	return 0;
4663 
4664 err_lpm_tree_replace:
4665 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4666 	return err;
4667 }
4668 
4669 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4670 					 struct mlxsw_sp_fib_node *fib_node)
4671 {
4672 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4673 	struct mlxsw_sp_prefix_usage req_prefix_usage;
4674 	struct mlxsw_sp_fib *fib = fib_node->fib;
4675 	int err;
4676 
4677 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4678 		return;
4679 	/* Try to construct a new LPM tree from the current prefix usage
4680 	 * minus the unused one. If we fail, continue using the old one.
4681 	 */
4682 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4683 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4684 				    fib_node->key.prefix_len);
4685 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4686 					 fib->proto);
4687 	if (IS_ERR(lpm_tree))
4688 		return;
4689 
4690 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4691 	if (err)
4692 		goto err_lpm_tree_replace;
4693 
4694 	return;
4695 
4696 err_lpm_tree_replace:
4697 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4698 }
4699 
4700 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4701 				  struct mlxsw_sp_fib_node *fib_node,
4702 				  struct mlxsw_sp_fib *fib)
4703 {
4704 	int err;
4705 
4706 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
4707 	if (err)
4708 		return err;
4709 	fib_node->fib = fib;
4710 
4711 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
4712 	if (err)
4713 		goto err_fib_lpm_tree_link;
4714 
4715 	return 0;
4716 
4717 err_fib_lpm_tree_link:
4718 	fib_node->fib = NULL;
4719 	mlxsw_sp_fib_node_remove(fib, fib_node);
4720 	return err;
4721 }
4722 
4723 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4724 				   struct mlxsw_sp_fib_node *fib_node)
4725 {
4726 	struct mlxsw_sp_fib *fib = fib_node->fib;
4727 
4728 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
4729 	fib_node->fib = NULL;
4730 	mlxsw_sp_fib_node_remove(fib, fib_node);
4731 }
4732 
4733 static struct mlxsw_sp_fib_node *
4734 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4735 		      size_t addr_len, unsigned char prefix_len,
4736 		      enum mlxsw_sp_l3proto proto)
4737 {
4738 	struct mlxsw_sp_fib_node *fib_node;
4739 	struct mlxsw_sp_fib *fib;
4740 	struct mlxsw_sp_vr *vr;
4741 	int err;
4742 
4743 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
4744 	if (IS_ERR(vr))
4745 		return ERR_CAST(vr);
4746 	fib = mlxsw_sp_vr_fib(vr, proto);
4747 
4748 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
4749 	if (fib_node)
4750 		return fib_node;
4751 
4752 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
4753 	if (!fib_node) {
4754 		err = -ENOMEM;
4755 		goto err_fib_node_create;
4756 	}
4757 
4758 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4759 	if (err)
4760 		goto err_fib_node_init;
4761 
4762 	return fib_node;
4763 
4764 err_fib_node_init:
4765 	mlxsw_sp_fib_node_destroy(fib_node);
4766 err_fib_node_create:
4767 	mlxsw_sp_vr_put(mlxsw_sp, vr);
4768 	return ERR_PTR(err);
4769 }
4770 
4771 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4772 				  struct mlxsw_sp_fib_node *fib_node)
4773 {
4774 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
4775 
4776 	if (!list_empty(&fib_node->entry_list))
4777 		return;
4778 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
4779 	mlxsw_sp_fib_node_destroy(fib_node);
4780 	mlxsw_sp_vr_put(mlxsw_sp, vr);
4781 }
4782 
4783 static struct mlxsw_sp_fib4_entry *
4784 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4785 			      const struct mlxsw_sp_fib4_entry *new4_entry)
4786 {
4787 	struct mlxsw_sp_fib4_entry *fib4_entry;
4788 
4789 	list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4790 		if (fib4_entry->tb_id > new4_entry->tb_id)
4791 			continue;
4792 		if (fib4_entry->tb_id != new4_entry->tb_id)
4793 			break;
4794 		if (fib4_entry->tos > new4_entry->tos)
4795 			continue;
4796 		if (fib4_entry->prio >= new4_entry->prio ||
4797 		    fib4_entry->tos < new4_entry->tos)
4798 			return fib4_entry;
4799 	}
4800 
4801 	return NULL;
4802 }
4803 
4804 static int
4805 mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4806 			       struct mlxsw_sp_fib4_entry *new4_entry)
4807 {
4808 	struct mlxsw_sp_fib_node *fib_node;
4809 
4810 	if (WARN_ON(!fib4_entry))
4811 		return -EINVAL;
4812 
4813 	fib_node = fib4_entry->common.fib_node;
4814 	list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4815 				 common.list) {
4816 		if (fib4_entry->tb_id != new4_entry->tb_id ||
4817 		    fib4_entry->tos != new4_entry->tos ||
4818 		    fib4_entry->prio != new4_entry->prio)
4819 			break;
4820 	}
4821 
4822 	list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
4823 	return 0;
4824 }
4825 
4826 static int
4827 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
4828 			       bool replace, bool append)
4829 {
4830 	struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
4831 	struct mlxsw_sp_fib4_entry *fib4_entry;
4832 
4833 	fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
4834 
4835 	if (append)
4836 		return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4837 	if (replace && WARN_ON(!fib4_entry))
4838 		return -EINVAL;
4839 
4840 	/* Insert new entry before replaced one, so that we can later
4841 	 * remove the second.
4842 	 */
4843 	if (fib4_entry) {
4844 		list_add_tail(&new4_entry->common.list,
4845 			      &fib4_entry->common.list);
4846 	} else {
4847 		struct mlxsw_sp_fib4_entry *last;
4848 
4849 		list_for_each_entry(last, &fib_node->entry_list, common.list) {
4850 			if (new4_entry->tb_id > last->tb_id)
4851 				break;
4852 			fib4_entry = last;
4853 		}
4854 
4855 		if (fib4_entry)
4856 			list_add(&new4_entry->common.list,
4857 				 &fib4_entry->common.list);
4858 		else
4859 			list_add(&new4_entry->common.list,
4860 				 &fib_node->entry_list);
4861 	}
4862 
4863 	return 0;
4864 }
4865 
4866 static void
4867 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
4868 {
4869 	list_del(&fib4_entry->common.list);
4870 }
4871 
4872 static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4873 				       struct mlxsw_sp_fib_entry *fib_entry)
4874 {
4875 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4876 
4877 	if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4878 		return 0;
4879 
4880 	/* To prevent packet loss, overwrite the previously offloaded
4881 	 * entry.
4882 	 */
4883 	if (!list_is_singular(&fib_node->entry_list)) {
4884 		enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4885 		struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4886 
4887 		mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4888 	}
4889 
4890 	return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4891 }
4892 
4893 static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4894 					struct mlxsw_sp_fib_entry *fib_entry)
4895 {
4896 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4897 
4898 	if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4899 		return;
4900 
4901 	/* Promote the next entry by overwriting the deleted entry */
4902 	if (!list_is_singular(&fib_node->entry_list)) {
4903 		struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4904 		enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4905 
4906 		mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4907 		mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4908 		return;
4909 	}
4910 
4911 	mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4912 }
4913 
4914 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4915 					 struct mlxsw_sp_fib4_entry *fib4_entry,
4916 					 bool replace, bool append)
4917 {
4918 	int err;
4919 
4920 	err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
4921 	if (err)
4922 		return err;
4923 
4924 	err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
4925 	if (err)
4926 		goto err_fib_node_entry_add;
4927 
4928 	return 0;
4929 
4930 err_fib_node_entry_add:
4931 	mlxsw_sp_fib4_node_list_remove(fib4_entry);
4932 	return err;
4933 }
4934 
4935 static void
4936 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4937 				struct mlxsw_sp_fib4_entry *fib4_entry)
4938 {
4939 	mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
4940 	mlxsw_sp_fib4_node_list_remove(fib4_entry);
4941 
4942 	if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4943 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
4944 }
4945 
4946 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
4947 					struct mlxsw_sp_fib4_entry *fib4_entry,
4948 					bool replace)
4949 {
4950 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4951 	struct mlxsw_sp_fib4_entry *replaced;
4952 
4953 	if (!replace)
4954 		return;
4955 
4956 	/* We inserted the new entry before replaced one */
4957 	replaced = list_next_entry(fib4_entry, common.list);
4958 
4959 	mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4960 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
4961 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4962 }
4963 
4964 static int
4965 mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
4966 			 const struct fib_entry_notifier_info *fen_info,
4967 			 bool replace, bool append)
4968 {
4969 	struct mlxsw_sp_fib4_entry *fib4_entry;
4970 	struct mlxsw_sp_fib_node *fib_node;
4971 	int err;
4972 
4973 	if (mlxsw_sp->router->aborted)
4974 		return 0;
4975 
4976 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4977 					 &fen_info->dst, sizeof(fen_info->dst),
4978 					 fen_info->dst_len,
4979 					 MLXSW_SP_L3_PROTO_IPV4);
4980 	if (IS_ERR(fib_node)) {
4981 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4982 		return PTR_ERR(fib_node);
4983 	}
4984 
4985 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4986 	if (IS_ERR(fib4_entry)) {
4987 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
4988 		err = PTR_ERR(fib4_entry);
4989 		goto err_fib4_entry_create;
4990 	}
4991 
4992 	err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
4993 					    append);
4994 	if (err) {
4995 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4996 		goto err_fib4_node_entry_link;
4997 	}
4998 
4999 	mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
5000 
5001 	return 0;
5002 
5003 err_fib4_node_entry_link:
5004 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5005 err_fib4_entry_create:
5006 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5007 	return err;
5008 }
5009 
5010 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
5011 				     struct fib_entry_notifier_info *fen_info)
5012 {
5013 	struct mlxsw_sp_fib4_entry *fib4_entry;
5014 	struct mlxsw_sp_fib_node *fib_node;
5015 
5016 	if (mlxsw_sp->router->aborted)
5017 		return;
5018 
5019 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
5020 	if (WARN_ON(!fib4_entry))
5021 		return;
5022 	fib_node = fib4_entry->common.fib_node;
5023 
5024 	mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5025 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5026 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5027 }
5028 
5029 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
5030 {
5031 	/* Packets with link-local destination IP arriving to the router
5032 	 * are trapped to the CPU, so no need to program specific routes
5033 	 * for them.
5034 	 */
5035 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
5036 		return true;
5037 
5038 	/* Multicast routes aren't supported, so ignore them. Neighbour
5039 	 * Discovery packets are specifically trapped.
5040 	 */
5041 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
5042 		return true;
5043 
5044 	/* Cloned routes are irrelevant in the forwarding path. */
5045 	if (rt->fib6_flags & RTF_CACHE)
5046 		return true;
5047 
5048 	return false;
5049 }
5050 
5051 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
5052 {
5053 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5054 
5055 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
5056 	if (!mlxsw_sp_rt6)
5057 		return ERR_PTR(-ENOMEM);
5058 
5059 	/* In case of route replace, replaced route is deleted with
5060 	 * no notification. Take reference to prevent accessing freed
5061 	 * memory.
5062 	 */
5063 	mlxsw_sp_rt6->rt = rt;
5064 	fib6_info_hold(rt);
5065 
5066 	return mlxsw_sp_rt6;
5067 }
5068 
5069 #if IS_ENABLED(CONFIG_IPV6)
5070 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5071 {
5072 	fib6_info_release(rt);
5073 }
5074 #else
5075 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5076 {
5077 }
5078 #endif
5079 
5080 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5081 {
5082 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
5083 	kfree(mlxsw_sp_rt6);
5084 }
5085 
5086 static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
5087 {
5088 	/* RTF_CACHE routes are ignored */
5089 	return !(rt->fib6_flags & RTF_ADDRCONF) &&
5090 		rt->fib6_nh->fib_nh_gw_family;
5091 }
5092 
5093 static struct fib6_info *
5094 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
5095 {
5096 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
5097 				list)->rt;
5098 }
5099 
5100 static struct mlxsw_sp_fib6_entry *
5101 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5102 				 const struct fib6_info *nrt, bool replace)
5103 {
5104 	struct mlxsw_sp_fib6_entry *fib6_entry;
5105 
5106 	if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
5107 		return NULL;
5108 
5109 	list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5110 		struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5111 
5112 		/* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
5113 		 * virtual router.
5114 		 */
5115 		if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
5116 			continue;
5117 		if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
5118 			break;
5119 		if (rt->fib6_metric < nrt->fib6_metric)
5120 			continue;
5121 		if (rt->fib6_metric == nrt->fib6_metric &&
5122 		    mlxsw_sp_fib6_rt_can_mp(rt))
5123 			return fib6_entry;
5124 		if (rt->fib6_metric > nrt->fib6_metric)
5125 			break;
5126 	}
5127 
5128 	return NULL;
5129 }
5130 
5131 static struct mlxsw_sp_rt6 *
5132 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
5133 			    const struct fib6_info *rt)
5134 {
5135 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5136 
5137 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
5138 		if (mlxsw_sp_rt6->rt == rt)
5139 			return mlxsw_sp_rt6;
5140 	}
5141 
5142 	return NULL;
5143 }
5144 
5145 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
5146 					const struct fib6_info *rt,
5147 					enum mlxsw_sp_ipip_type *ret)
5148 {
5149 	return rt->fib6_nh->fib_nh_dev &&
5150 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
5151 }
5152 
5153 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
5154 				       struct mlxsw_sp_nexthop_group *nh_grp,
5155 				       struct mlxsw_sp_nexthop *nh,
5156 				       const struct fib6_info *rt)
5157 {
5158 	const struct mlxsw_sp_ipip_ops *ipip_ops;
5159 	struct mlxsw_sp_ipip_entry *ipip_entry;
5160 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5161 	struct mlxsw_sp_rif *rif;
5162 	int err;
5163 
5164 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
5165 	if (ipip_entry) {
5166 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5167 		if (ipip_ops->can_offload(mlxsw_sp, dev,
5168 					  MLXSW_SP_L3_PROTO_IPV6)) {
5169 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
5170 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
5171 			return 0;
5172 		}
5173 	}
5174 
5175 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
5176 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
5177 	if (!rif)
5178 		return 0;
5179 	mlxsw_sp_nexthop_rif_init(nh, rif);
5180 
5181 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
5182 	if (err)
5183 		goto err_nexthop_neigh_init;
5184 
5185 	return 0;
5186 
5187 err_nexthop_neigh_init:
5188 	mlxsw_sp_nexthop_rif_fini(nh);
5189 	return err;
5190 }
5191 
5192 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
5193 					struct mlxsw_sp_nexthop *nh)
5194 {
5195 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5196 }
5197 
5198 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
5199 				  struct mlxsw_sp_nexthop_group *nh_grp,
5200 				  struct mlxsw_sp_nexthop *nh,
5201 				  const struct fib6_info *rt)
5202 {
5203 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5204 
5205 	nh->nh_grp = nh_grp;
5206 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
5207 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
5208 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5209 
5210 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5211 
5212 	if (!dev)
5213 		return 0;
5214 	nh->ifindex = dev->ifindex;
5215 
5216 	return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
5217 }
5218 
5219 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
5220 				   struct mlxsw_sp_nexthop *nh)
5221 {
5222 	mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5223 	list_del(&nh->router_list_node);
5224 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5225 }
5226 
5227 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5228 				    const struct fib6_info *rt)
5229 {
5230 	return rt->fib6_nh->fib_nh_gw_family ||
5231 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5232 }
5233 
5234 static struct mlxsw_sp_nexthop_group *
5235 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
5236 			       struct mlxsw_sp_fib6_entry *fib6_entry)
5237 {
5238 	struct mlxsw_sp_nexthop_group *nh_grp;
5239 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5240 	struct mlxsw_sp_nexthop *nh;
5241 	int i = 0;
5242 	int err;
5243 
5244 	nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
5245 			 GFP_KERNEL);
5246 	if (!nh_grp)
5247 		return ERR_PTR(-ENOMEM);
5248 	INIT_LIST_HEAD(&nh_grp->fib_list);
5249 #if IS_ENABLED(CONFIG_IPV6)
5250 	nh_grp->neigh_tbl = &nd_tbl;
5251 #endif
5252 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
5253 					struct mlxsw_sp_rt6, list);
5254 	nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5255 	nh_grp->count = fib6_entry->nrt6;
5256 	for (i = 0; i < nh_grp->count; i++) {
5257 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5258 
5259 		nh = &nh_grp->nexthops[i];
5260 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
5261 		if (err)
5262 			goto err_nexthop6_init;
5263 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
5264 	}
5265 
5266 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5267 	if (err)
5268 		goto err_nexthop_group_insert;
5269 
5270 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5271 	return nh_grp;
5272 
5273 err_nexthop_group_insert:
5274 err_nexthop6_init:
5275 	for (i--; i >= 0; i--) {
5276 		nh = &nh_grp->nexthops[i];
5277 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5278 	}
5279 	kfree(nh_grp);
5280 	return ERR_PTR(err);
5281 }
5282 
5283 static void
5284 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
5285 				struct mlxsw_sp_nexthop_group *nh_grp)
5286 {
5287 	struct mlxsw_sp_nexthop *nh;
5288 	int i = nh_grp->count;
5289 
5290 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5291 	for (i--; i >= 0; i--) {
5292 		nh = &nh_grp->nexthops[i];
5293 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5294 	}
5295 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5296 	WARN_ON(nh_grp->adj_index_valid);
5297 	kfree(nh_grp);
5298 }
5299 
5300 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
5301 				       struct mlxsw_sp_fib6_entry *fib6_entry)
5302 {
5303 	struct mlxsw_sp_nexthop_group *nh_grp;
5304 
5305 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
5306 	if (!nh_grp) {
5307 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
5308 		if (IS_ERR(nh_grp))
5309 			return PTR_ERR(nh_grp);
5310 	}
5311 
5312 	list_add_tail(&fib6_entry->common.nexthop_group_node,
5313 		      &nh_grp->fib_list);
5314 	fib6_entry->common.nh_group = nh_grp;
5315 
5316 	return 0;
5317 }
5318 
5319 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
5320 					struct mlxsw_sp_fib_entry *fib_entry)
5321 {
5322 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5323 
5324 	list_del(&fib_entry->nexthop_group_node);
5325 	if (!list_empty(&nh_grp->fib_list))
5326 		return;
5327 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5328 }
5329 
5330 static int
5331 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5332 			       struct mlxsw_sp_fib6_entry *fib6_entry)
5333 {
5334 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5335 	int err;
5336 
5337 	fib6_entry->common.nh_group = NULL;
5338 	list_del(&fib6_entry->common.nexthop_group_node);
5339 
5340 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5341 	if (err)
5342 		goto err_nexthop6_group_get;
5343 
5344 	/* In case this entry is offloaded, then the adjacency index
5345 	 * currently associated with it in the device's table is that
5346 	 * of the old group. Start using the new one instead.
5347 	 */
5348 	err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5349 	if (err)
5350 		goto err_fib_node_entry_add;
5351 
5352 	if (list_empty(&old_nh_grp->fib_list))
5353 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5354 
5355 	return 0;
5356 
5357 err_fib_node_entry_add:
5358 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5359 err_nexthop6_group_get:
5360 	list_add_tail(&fib6_entry->common.nexthop_group_node,
5361 		      &old_nh_grp->fib_list);
5362 	fib6_entry->common.nh_group = old_nh_grp;
5363 	return err;
5364 }
5365 
5366 static int
5367 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5368 				struct mlxsw_sp_fib6_entry *fib6_entry,
5369 				struct fib6_info **rt_arr, unsigned int nrt6)
5370 {
5371 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5372 	int err, i;
5373 
5374 	for (i = 0; i < nrt6; i++) {
5375 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5376 		if (IS_ERR(mlxsw_sp_rt6)) {
5377 			err = PTR_ERR(mlxsw_sp_rt6);
5378 			goto err_rt6_create;
5379 		}
5380 
5381 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5382 		fib6_entry->nrt6++;
5383 	}
5384 
5385 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5386 	if (err)
5387 		goto err_nexthop6_group_update;
5388 
5389 	return 0;
5390 
5391 err_nexthop6_group_update:
5392 	i = nrt6;
5393 err_rt6_create:
5394 	for (i--; i >= 0; i--) {
5395 		fib6_entry->nrt6--;
5396 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5397 					       struct mlxsw_sp_rt6, list);
5398 		list_del(&mlxsw_sp_rt6->list);
5399 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5400 	}
5401 	return err;
5402 }
5403 
5404 static void
5405 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5406 				struct mlxsw_sp_fib6_entry *fib6_entry,
5407 				struct fib6_info **rt_arr, unsigned int nrt6)
5408 {
5409 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5410 	int i;
5411 
5412 	for (i = 0; i < nrt6; i++) {
5413 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
5414 							   rt_arr[i]);
5415 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
5416 			continue;
5417 
5418 		fib6_entry->nrt6--;
5419 		list_del(&mlxsw_sp_rt6->list);
5420 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5421 	}
5422 
5423 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5424 }
5425 
5426 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5427 					 struct mlxsw_sp_fib_entry *fib_entry,
5428 					 const struct fib6_info *rt)
5429 {
5430 	/* Packets hitting RTF_REJECT routes need to be discarded by the
5431 	 * stack. We can rely on their destination device not having a
5432 	 * RIF (it's the loopback device) and can thus use action type
5433 	 * local, which will cause them to be trapped with a lower
5434 	 * priority than packets that need to be locally received.
5435 	 */
5436 	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5437 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5438 	else if (rt->fib6_type == RTN_BLACKHOLE)
5439 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5440 	else if (rt->fib6_flags & RTF_REJECT)
5441 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5442 	else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5443 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5444 	else
5445 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5446 }
5447 
5448 static void
5449 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5450 {
5451 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5452 
5453 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5454 				 list) {
5455 		fib6_entry->nrt6--;
5456 		list_del(&mlxsw_sp_rt6->list);
5457 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5458 	}
5459 }
5460 
5461 static struct mlxsw_sp_fib6_entry *
5462 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5463 			   struct mlxsw_sp_fib_node *fib_node,
5464 			   struct fib6_info **rt_arr, unsigned int nrt6)
5465 {
5466 	struct mlxsw_sp_fib6_entry *fib6_entry;
5467 	struct mlxsw_sp_fib_entry *fib_entry;
5468 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5469 	int err, i;
5470 
5471 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5472 	if (!fib6_entry)
5473 		return ERR_PTR(-ENOMEM);
5474 	fib_entry = &fib6_entry->common;
5475 
5476 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
5477 
5478 	for (i = 0; i < nrt6; i++) {
5479 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5480 		if (IS_ERR(mlxsw_sp_rt6)) {
5481 			err = PTR_ERR(mlxsw_sp_rt6);
5482 			goto err_rt6_create;
5483 		}
5484 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5485 		fib6_entry->nrt6++;
5486 	}
5487 
5488 	mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
5489 
5490 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5491 	if (err)
5492 		goto err_nexthop6_group_get;
5493 
5494 	fib_entry->fib_node = fib_node;
5495 
5496 	return fib6_entry;
5497 
5498 err_nexthop6_group_get:
5499 	i = nrt6;
5500 err_rt6_create:
5501 	for (i--; i >= 0; i--) {
5502 		fib6_entry->nrt6--;
5503 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5504 					       struct mlxsw_sp_rt6, list);
5505 		list_del(&mlxsw_sp_rt6->list);
5506 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5507 	}
5508 	kfree(fib6_entry);
5509 	return ERR_PTR(err);
5510 }
5511 
5512 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5513 					struct mlxsw_sp_fib6_entry *fib6_entry)
5514 {
5515 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5516 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5517 	WARN_ON(fib6_entry->nrt6);
5518 	kfree(fib6_entry);
5519 }
5520 
5521 static struct mlxsw_sp_fib6_entry *
5522 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5523 			      const struct fib6_info *nrt, bool replace)
5524 {
5525 	struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
5526 
5527 	list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5528 		struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5529 
5530 		if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
5531 			continue;
5532 		if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
5533 			break;
5534 		if (replace && rt->fib6_metric == nrt->fib6_metric) {
5535 			if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5536 			    mlxsw_sp_fib6_rt_can_mp(nrt))
5537 				return fib6_entry;
5538 			if (mlxsw_sp_fib6_rt_can_mp(nrt))
5539 				fallback = fallback ?: fib6_entry;
5540 		}
5541 		if (rt->fib6_metric > nrt->fib6_metric)
5542 			return fallback ?: fib6_entry;
5543 	}
5544 
5545 	return fallback;
5546 }
5547 
5548 static int
5549 mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5550 			       bool *p_replace)
5551 {
5552 	struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5553 	struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5554 	struct mlxsw_sp_fib6_entry *fib6_entry;
5555 
5556 	fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, *p_replace);
5557 
5558 	if (*p_replace && !fib6_entry)
5559 		*p_replace = false;
5560 
5561 	if (fib6_entry) {
5562 		list_add_tail(&new6_entry->common.list,
5563 			      &fib6_entry->common.list);
5564 	} else {
5565 		struct mlxsw_sp_fib6_entry *last;
5566 
5567 		list_for_each_entry(last, &fib_node->entry_list, common.list) {
5568 			struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5569 
5570 			if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
5571 				break;
5572 			fib6_entry = last;
5573 		}
5574 
5575 		if (fib6_entry)
5576 			list_add(&new6_entry->common.list,
5577 				 &fib6_entry->common.list);
5578 		else
5579 			list_add(&new6_entry->common.list,
5580 				 &fib_node->entry_list);
5581 	}
5582 
5583 	return 0;
5584 }
5585 
5586 static void
5587 mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5588 {
5589 	list_del(&fib6_entry->common.list);
5590 }
5591 
5592 static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
5593 					 struct mlxsw_sp_fib6_entry *fib6_entry,
5594 					 bool *p_replace)
5595 {
5596 	int err;
5597 
5598 	err = mlxsw_sp_fib6_node_list_insert(fib6_entry, p_replace);
5599 	if (err)
5600 		return err;
5601 
5602 	err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5603 	if (err)
5604 		goto err_fib_node_entry_add;
5605 
5606 	return 0;
5607 
5608 err_fib_node_entry_add:
5609 	mlxsw_sp_fib6_node_list_remove(fib6_entry);
5610 	return err;
5611 }
5612 
5613 static void
5614 mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5615 				struct mlxsw_sp_fib6_entry *fib6_entry)
5616 {
5617 	mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5618 	mlxsw_sp_fib6_node_list_remove(fib6_entry);
5619 }
5620 
5621 static struct mlxsw_sp_fib6_entry *
5622 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5623 			   const struct fib6_info *rt)
5624 {
5625 	struct mlxsw_sp_fib6_entry *fib6_entry;
5626 	struct mlxsw_sp_fib_node *fib_node;
5627 	struct mlxsw_sp_fib *fib;
5628 	struct mlxsw_sp_vr *vr;
5629 
5630 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5631 	if (!vr)
5632 		return NULL;
5633 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5634 
5635 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5636 					    sizeof(rt->fib6_dst.addr),
5637 					    rt->fib6_dst.plen);
5638 	if (!fib_node)
5639 		return NULL;
5640 
5641 	list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5642 		struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5643 
5644 		if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
5645 		    rt->fib6_metric == iter_rt->fib6_metric &&
5646 		    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5647 			return fib6_entry;
5648 	}
5649 
5650 	return NULL;
5651 }
5652 
5653 static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5654 					struct mlxsw_sp_fib6_entry *fib6_entry,
5655 					bool replace)
5656 {
5657 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5658 	struct mlxsw_sp_fib6_entry *replaced;
5659 
5660 	if (!replace)
5661 		return;
5662 
5663 	replaced = list_next_entry(fib6_entry, common.list);
5664 
5665 	mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5666 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5667 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5668 }
5669 
5670 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5671 				    struct fib6_info **rt_arr,
5672 				    unsigned int nrt6, bool replace)
5673 {
5674 	struct mlxsw_sp_fib6_entry *fib6_entry;
5675 	struct mlxsw_sp_fib_node *fib_node;
5676 	struct fib6_info *rt = rt_arr[0];
5677 	int err;
5678 
5679 	if (mlxsw_sp->router->aborted)
5680 		return 0;
5681 
5682 	if (rt->fib6_src.plen)
5683 		return -EINVAL;
5684 
5685 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5686 		return 0;
5687 
5688 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5689 					 &rt->fib6_dst.addr,
5690 					 sizeof(rt->fib6_dst.addr),
5691 					 rt->fib6_dst.plen,
5692 					 MLXSW_SP_L3_PROTO_IPV6);
5693 	if (IS_ERR(fib_node))
5694 		return PTR_ERR(fib_node);
5695 
5696 	/* Before creating a new entry, try to append route to an existing
5697 	 * multipath entry.
5698 	 */
5699 	fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
5700 	if (fib6_entry) {
5701 		err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry,
5702 						      rt_arr, nrt6);
5703 		if (err)
5704 			goto err_fib6_entry_nexthop_add;
5705 		return 0;
5706 	}
5707 
5708 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
5709 						nrt6);
5710 	if (IS_ERR(fib6_entry)) {
5711 		err = PTR_ERR(fib6_entry);
5712 		goto err_fib6_entry_create;
5713 	}
5714 
5715 	err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, &replace);
5716 	if (err)
5717 		goto err_fib6_node_entry_link;
5718 
5719 	mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5720 
5721 	return 0;
5722 
5723 err_fib6_node_entry_link:
5724 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5725 err_fib6_entry_create:
5726 err_fib6_entry_nexthop_add:
5727 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5728 	return err;
5729 }
5730 
5731 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5732 				     struct fib6_info **rt_arr,
5733 				     unsigned int nrt6)
5734 {
5735 	struct mlxsw_sp_fib6_entry *fib6_entry;
5736 	struct mlxsw_sp_fib_node *fib_node;
5737 	struct fib6_info *rt = rt_arr[0];
5738 
5739 	if (mlxsw_sp->router->aborted)
5740 		return;
5741 
5742 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5743 		return;
5744 
5745 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5746 	if (WARN_ON(!fib6_entry))
5747 		return;
5748 
5749 	/* If not all the nexthops are deleted, then only reduce the nexthop
5750 	 * group.
5751 	 */
5752 	if (nrt6 != fib6_entry->nrt6) {
5753 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
5754 						nrt6);
5755 		return;
5756 	}
5757 
5758 	fib_node = fib6_entry->common.fib_node;
5759 
5760 	mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5761 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5762 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5763 }
5764 
5765 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5766 					    enum mlxsw_reg_ralxx_protocol proto,
5767 					    u8 tree_id)
5768 {
5769 	char ralta_pl[MLXSW_REG_RALTA_LEN];
5770 	char ralst_pl[MLXSW_REG_RALST_LEN];
5771 	int i, err;
5772 
5773 	mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
5774 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5775 	if (err)
5776 		return err;
5777 
5778 	mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
5779 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5780 	if (err)
5781 		return err;
5782 
5783 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5784 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5785 		char raltb_pl[MLXSW_REG_RALTB_LEN];
5786 		char ralue_pl[MLXSW_REG_RALUE_LEN];
5787 
5788 		mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
5789 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5790 				      raltb_pl);
5791 		if (err)
5792 			return err;
5793 
5794 		mlxsw_reg_ralue_pack(ralue_pl, proto,
5795 				     MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
5796 		mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5797 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5798 				      ralue_pl);
5799 		if (err)
5800 			return err;
5801 	}
5802 
5803 	return 0;
5804 }
5805 
5806 static struct mlxsw_sp_mr_table *
5807 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5808 {
5809 	if (family == RTNL_FAMILY_IPMR)
5810 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5811 	else
5812 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5813 }
5814 
5815 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5816 				     struct mfc_entry_notifier_info *men_info,
5817 				     bool replace)
5818 {
5819 	struct mlxsw_sp_mr_table *mrt;
5820 	struct mlxsw_sp_vr *vr;
5821 
5822 	if (mlxsw_sp->router->aborted)
5823 		return 0;
5824 
5825 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
5826 	if (IS_ERR(vr))
5827 		return PTR_ERR(vr);
5828 
5829 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5830 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
5831 }
5832 
5833 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5834 				      struct mfc_entry_notifier_info *men_info)
5835 {
5836 	struct mlxsw_sp_mr_table *mrt;
5837 	struct mlxsw_sp_vr *vr;
5838 
5839 	if (mlxsw_sp->router->aborted)
5840 		return;
5841 
5842 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5843 	if (WARN_ON(!vr))
5844 		return;
5845 
5846 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5847 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
5848 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5849 }
5850 
5851 static int
5852 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5853 			      struct vif_entry_notifier_info *ven_info)
5854 {
5855 	struct mlxsw_sp_mr_table *mrt;
5856 	struct mlxsw_sp_rif *rif;
5857 	struct mlxsw_sp_vr *vr;
5858 
5859 	if (mlxsw_sp->router->aborted)
5860 		return 0;
5861 
5862 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
5863 	if (IS_ERR(vr))
5864 		return PTR_ERR(vr);
5865 
5866 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5867 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5868 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
5869 				   ven_info->vif_index,
5870 				   ven_info->vif_flags, rif);
5871 }
5872 
5873 static void
5874 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5875 			      struct vif_entry_notifier_info *ven_info)
5876 {
5877 	struct mlxsw_sp_mr_table *mrt;
5878 	struct mlxsw_sp_vr *vr;
5879 
5880 	if (mlxsw_sp->router->aborted)
5881 		return;
5882 
5883 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5884 	if (WARN_ON(!vr))
5885 		return;
5886 
5887 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5888 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
5889 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5890 }
5891 
5892 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5893 {
5894 	enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5895 	int err;
5896 
5897 	err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5898 					       MLXSW_SP_LPM_TREE_MIN);
5899 	if (err)
5900 		return err;
5901 
5902 	/* The multicast router code does not need an abort trap as by default,
5903 	 * packets that don't match any routes are trapped to the CPU.
5904 	 */
5905 
5906 	proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5907 	return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5908 						MLXSW_SP_LPM_TREE_MIN + 1);
5909 }
5910 
5911 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5912 				     struct mlxsw_sp_fib_node *fib_node)
5913 {
5914 	struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
5915 
5916 	list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5917 				 common.list) {
5918 		bool do_break = &tmp->common.list == &fib_node->entry_list;
5919 
5920 		mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5921 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5922 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5923 		/* Break when entry list is empty and node was freed.
5924 		 * Otherwise, we'll access freed memory in the next
5925 		 * iteration.
5926 		 */
5927 		if (do_break)
5928 			break;
5929 	}
5930 }
5931 
5932 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5933 				     struct mlxsw_sp_fib_node *fib_node)
5934 {
5935 	struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5936 
5937 	list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5938 				 common.list) {
5939 		bool do_break = &tmp->common.list == &fib_node->entry_list;
5940 
5941 		mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5942 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5943 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5944 		if (do_break)
5945 			break;
5946 	}
5947 }
5948 
5949 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5950 				    struct mlxsw_sp_fib_node *fib_node)
5951 {
5952 	switch (fib_node->fib->proto) {
5953 	case MLXSW_SP_L3_PROTO_IPV4:
5954 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5955 		break;
5956 	case MLXSW_SP_L3_PROTO_IPV6:
5957 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
5958 		break;
5959 	}
5960 }
5961 
5962 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5963 				  struct mlxsw_sp_vr *vr,
5964 				  enum mlxsw_sp_l3proto proto)
5965 {
5966 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5967 	struct mlxsw_sp_fib_node *fib_node, *tmp;
5968 
5969 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5970 		bool do_break = &tmp->list == &fib->node_list;
5971 
5972 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5973 		if (do_break)
5974 			break;
5975 	}
5976 }
5977 
5978 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
5979 {
5980 	int i, j;
5981 
5982 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5983 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5984 
5985 		if (!mlxsw_sp_vr_is_used(vr))
5986 			continue;
5987 
5988 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5989 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
5990 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
5991 
5992 		/* If virtual router was only used for IPv4, then it's no
5993 		 * longer used.
5994 		 */
5995 		if (!mlxsw_sp_vr_is_used(vr))
5996 			continue;
5997 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
5998 	}
5999 
6000 	/* After flushing all the routes, it is not possible anyone is still
6001 	 * using the adjacency index that is discarding packets, so free it in
6002 	 * case it was allocated.
6003 	 */
6004 	if (!mlxsw_sp->router->adj_discard_index_valid)
6005 		return;
6006 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
6007 			   mlxsw_sp->router->adj_discard_index);
6008 	mlxsw_sp->router->adj_discard_index_valid = false;
6009 }
6010 
6011 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
6012 {
6013 	int err;
6014 
6015 	if (mlxsw_sp->router->aborted)
6016 		return;
6017 	dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
6018 	mlxsw_sp_router_fib_flush(mlxsw_sp);
6019 	mlxsw_sp->router->aborted = true;
6020 	err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
6021 	if (err)
6022 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
6023 }
6024 
6025 struct mlxsw_sp_fib6_event_work {
6026 	struct fib6_info **rt_arr;
6027 	unsigned int nrt6;
6028 };
6029 
6030 struct mlxsw_sp_fib_event_work {
6031 	struct work_struct work;
6032 	union {
6033 		struct mlxsw_sp_fib6_event_work fib6_work;
6034 		struct fib_entry_notifier_info fen_info;
6035 		struct fib_rule_notifier_info fr_info;
6036 		struct fib_nh_notifier_info fnh_info;
6037 		struct mfc_entry_notifier_info men_info;
6038 		struct vif_entry_notifier_info ven_info;
6039 	};
6040 	struct mlxsw_sp *mlxsw_sp;
6041 	unsigned long event;
6042 };
6043 
6044 static int
6045 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
6046 			       struct fib6_entry_notifier_info *fen6_info)
6047 {
6048 	struct fib6_info *rt = fen6_info->rt;
6049 	struct fib6_info **rt_arr;
6050 	struct fib6_info *iter;
6051 	unsigned int nrt6;
6052 	int i = 0;
6053 
6054 	nrt6 = fen6_info->nsiblings + 1;
6055 
6056 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
6057 	if (!rt_arr)
6058 		return -ENOMEM;
6059 
6060 	fib6_work->rt_arr = rt_arr;
6061 	fib6_work->nrt6 = nrt6;
6062 
6063 	rt_arr[0] = rt;
6064 	fib6_info_hold(rt);
6065 
6066 	if (!fen6_info->nsiblings)
6067 		return 0;
6068 
6069 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
6070 		if (i == fen6_info->nsiblings)
6071 			break;
6072 
6073 		rt_arr[i + 1] = iter;
6074 		fib6_info_hold(iter);
6075 		i++;
6076 	}
6077 	WARN_ON_ONCE(i != fen6_info->nsiblings);
6078 
6079 	return 0;
6080 }
6081 
6082 static void
6083 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
6084 {
6085 	int i;
6086 
6087 	for (i = 0; i < fib6_work->nrt6; i++)
6088 		mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
6089 	kfree(fib6_work->rt_arr);
6090 }
6091 
6092 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
6093 {
6094 	struct mlxsw_sp_fib_event_work *fib_work =
6095 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6096 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6097 	bool replace, append;
6098 	int err;
6099 
6100 	/* Protect internal structures from changes */
6101 	rtnl_lock();
6102 	mlxsw_sp_span_respin(mlxsw_sp);
6103 
6104 	switch (fib_work->event) {
6105 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6106 	case FIB_EVENT_ENTRY_APPEND: /* fall through */
6107 	case FIB_EVENT_ENTRY_ADD:
6108 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6109 		append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
6110 		err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
6111 					       replace, append);
6112 		if (err)
6113 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6114 		fib_info_put(fib_work->fen_info.fi);
6115 		break;
6116 	case FIB_EVENT_ENTRY_DEL:
6117 		mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
6118 		fib_info_put(fib_work->fen_info.fi);
6119 		break;
6120 	case FIB_EVENT_NH_ADD: /* fall through */
6121 	case FIB_EVENT_NH_DEL:
6122 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
6123 					fib_work->fnh_info.fib_nh);
6124 		fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
6125 		break;
6126 	}
6127 	rtnl_unlock();
6128 	kfree(fib_work);
6129 }
6130 
6131 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
6132 {
6133 	struct mlxsw_sp_fib_event_work *fib_work =
6134 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6135 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6136 	bool replace;
6137 	int err;
6138 
6139 	rtnl_lock();
6140 	mlxsw_sp_span_respin(mlxsw_sp);
6141 
6142 	switch (fib_work->event) {
6143 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6144 	case FIB_EVENT_ENTRY_ADD:
6145 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6146 		err = mlxsw_sp_router_fib6_add(mlxsw_sp,
6147 					       fib_work->fib6_work.rt_arr,
6148 					       fib_work->fib6_work.nrt6,
6149 					       replace);
6150 		if (err)
6151 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6152 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6153 		break;
6154 	case FIB_EVENT_ENTRY_DEL:
6155 		mlxsw_sp_router_fib6_del(mlxsw_sp,
6156 					 fib_work->fib6_work.rt_arr,
6157 					 fib_work->fib6_work.nrt6);
6158 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6159 		break;
6160 	}
6161 	rtnl_unlock();
6162 	kfree(fib_work);
6163 }
6164 
6165 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
6166 {
6167 	struct mlxsw_sp_fib_event_work *fib_work =
6168 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6169 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6170 	bool replace;
6171 	int err;
6172 
6173 	rtnl_lock();
6174 	switch (fib_work->event) {
6175 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6176 	case FIB_EVENT_ENTRY_ADD:
6177 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6178 
6179 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
6180 						replace);
6181 		if (err)
6182 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6183 		mr_cache_put(fib_work->men_info.mfc);
6184 		break;
6185 	case FIB_EVENT_ENTRY_DEL:
6186 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
6187 		mr_cache_put(fib_work->men_info.mfc);
6188 		break;
6189 	case FIB_EVENT_VIF_ADD:
6190 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
6191 						    &fib_work->ven_info);
6192 		if (err)
6193 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6194 		dev_put(fib_work->ven_info.dev);
6195 		break;
6196 	case FIB_EVENT_VIF_DEL:
6197 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
6198 					      &fib_work->ven_info);
6199 		dev_put(fib_work->ven_info.dev);
6200 		break;
6201 	}
6202 	rtnl_unlock();
6203 	kfree(fib_work);
6204 }
6205 
6206 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
6207 				       struct fib_notifier_info *info)
6208 {
6209 	struct fib_entry_notifier_info *fen_info;
6210 	struct fib_nh_notifier_info *fnh_info;
6211 
6212 	switch (fib_work->event) {
6213 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6214 	case FIB_EVENT_ENTRY_APPEND: /* fall through */
6215 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6216 	case FIB_EVENT_ENTRY_DEL:
6217 		fen_info = container_of(info, struct fib_entry_notifier_info,
6218 					info);
6219 		fib_work->fen_info = *fen_info;
6220 		/* Take reference on fib_info to prevent it from being
6221 		 * freed while work is queued. Release it afterwards.
6222 		 */
6223 		fib_info_hold(fib_work->fen_info.fi);
6224 		break;
6225 	case FIB_EVENT_NH_ADD: /* fall through */
6226 	case FIB_EVENT_NH_DEL:
6227 		fnh_info = container_of(info, struct fib_nh_notifier_info,
6228 					info);
6229 		fib_work->fnh_info = *fnh_info;
6230 		fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
6231 		break;
6232 	}
6233 }
6234 
6235 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
6236 				      struct fib_notifier_info *info)
6237 {
6238 	struct fib6_entry_notifier_info *fen6_info;
6239 	int err;
6240 
6241 	switch (fib_work->event) {
6242 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6243 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6244 	case FIB_EVENT_ENTRY_DEL:
6245 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
6246 					 info);
6247 		err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
6248 						     fen6_info);
6249 		if (err)
6250 			return err;
6251 		break;
6252 	}
6253 
6254 	return 0;
6255 }
6256 
6257 static void
6258 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
6259 			    struct fib_notifier_info *info)
6260 {
6261 	switch (fib_work->event) {
6262 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6263 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6264 	case FIB_EVENT_ENTRY_DEL:
6265 		memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
6266 		mr_cache_hold(fib_work->men_info.mfc);
6267 		break;
6268 	case FIB_EVENT_VIF_ADD: /* fall through */
6269 	case FIB_EVENT_VIF_DEL:
6270 		memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
6271 		dev_hold(fib_work->ven_info.dev);
6272 		break;
6273 	}
6274 }
6275 
6276 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
6277 					  struct fib_notifier_info *info,
6278 					  struct mlxsw_sp *mlxsw_sp)
6279 {
6280 	struct netlink_ext_ack *extack = info->extack;
6281 	struct fib_rule_notifier_info *fr_info;
6282 	struct fib_rule *rule;
6283 	int err = 0;
6284 
6285 	/* nothing to do at the moment */
6286 	if (event == FIB_EVENT_RULE_DEL)
6287 		return 0;
6288 
6289 	if (mlxsw_sp->router->aborted)
6290 		return 0;
6291 
6292 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
6293 	rule = fr_info->rule;
6294 
6295 	/* Rule only affects locally generated traffic */
6296 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
6297 		return 0;
6298 
6299 	switch (info->family) {
6300 	case AF_INET:
6301 		if (!fib4_rule_default(rule) && !rule->l3mdev)
6302 			err = -EOPNOTSUPP;
6303 		break;
6304 	case AF_INET6:
6305 		if (!fib6_rule_default(rule) && !rule->l3mdev)
6306 			err = -EOPNOTSUPP;
6307 		break;
6308 	case RTNL_FAMILY_IPMR:
6309 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
6310 			err = -EOPNOTSUPP;
6311 		break;
6312 	case RTNL_FAMILY_IP6MR:
6313 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6314 			err = -EOPNOTSUPP;
6315 		break;
6316 	}
6317 
6318 	if (err < 0)
6319 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6320 
6321 	return err;
6322 }
6323 
6324 /* Called with rcu_read_lock() */
6325 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
6326 				     unsigned long event, void *ptr)
6327 {
6328 	struct mlxsw_sp_fib_event_work *fib_work;
6329 	struct fib_notifier_info *info = ptr;
6330 	struct mlxsw_sp_router *router;
6331 	int err;
6332 
6333 	if ((info->family != AF_INET && info->family != AF_INET6 &&
6334 	     info->family != RTNL_FAMILY_IPMR &&
6335 	     info->family != RTNL_FAMILY_IP6MR))
6336 		return NOTIFY_DONE;
6337 
6338 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6339 
6340 	switch (event) {
6341 	case FIB_EVENT_RULE_ADD: /* fall through */
6342 	case FIB_EVENT_RULE_DEL:
6343 		err = mlxsw_sp_router_fib_rule_event(event, info,
6344 						     router->mlxsw_sp);
6345 		return notifier_from_errno(err);
6346 	case FIB_EVENT_ENTRY_ADD:
6347 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6348 	case FIB_EVENT_ENTRY_APPEND:  /* fall through */
6349 		if (router->aborted) {
6350 			NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
6351 			return notifier_from_errno(-EINVAL);
6352 		}
6353 		if (info->family == AF_INET) {
6354 			struct fib_entry_notifier_info *fen_info = ptr;
6355 
6356 			if (fen_info->fi->fib_nh_is_v6) {
6357 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
6358 				return notifier_from_errno(-EINVAL);
6359 			}
6360 			if (fen_info->fi->nh) {
6361 				NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
6362 				return notifier_from_errno(-EINVAL);
6363 			}
6364 		} else if (info->family == AF_INET6) {
6365 			struct fib6_entry_notifier_info *fen6_info;
6366 
6367 			fen6_info = container_of(info,
6368 						 struct fib6_entry_notifier_info,
6369 						 info);
6370 			if (fen6_info->rt->nh) {
6371 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
6372 				return notifier_from_errno(-EINVAL);
6373 			}
6374 		}
6375 		break;
6376 	}
6377 
6378 	fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
6379 	if (WARN_ON(!fib_work))
6380 		return NOTIFY_BAD;
6381 
6382 	fib_work->mlxsw_sp = router->mlxsw_sp;
6383 	fib_work->event = event;
6384 
6385 	switch (info->family) {
6386 	case AF_INET:
6387 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
6388 		mlxsw_sp_router_fib4_event(fib_work, info);
6389 		break;
6390 	case AF_INET6:
6391 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
6392 		err = mlxsw_sp_router_fib6_event(fib_work, info);
6393 		if (err)
6394 			goto err_fib_event;
6395 		break;
6396 	case RTNL_FAMILY_IP6MR:
6397 	case RTNL_FAMILY_IPMR:
6398 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
6399 		mlxsw_sp_router_fibmr_event(fib_work, info);
6400 		break;
6401 	}
6402 
6403 	mlxsw_core_schedule_work(&fib_work->work);
6404 
6405 	return NOTIFY_DONE;
6406 
6407 err_fib_event:
6408 	kfree(fib_work);
6409 	return NOTIFY_BAD;
6410 }
6411 
6412 struct mlxsw_sp_rif *
6413 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
6414 			 const struct net_device *dev)
6415 {
6416 	int i;
6417 
6418 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6419 		if (mlxsw_sp->router->rifs[i] &&
6420 		    mlxsw_sp->router->rifs[i]->dev == dev)
6421 			return mlxsw_sp->router->rifs[i];
6422 
6423 	return NULL;
6424 }
6425 
6426 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
6427 {
6428 	char ritr_pl[MLXSW_REG_RITR_LEN];
6429 	int err;
6430 
6431 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
6432 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6433 	if (err)
6434 		return err;
6435 
6436 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
6437 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6438 }
6439 
6440 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6441 					  struct mlxsw_sp_rif *rif)
6442 {
6443 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6444 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6445 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6446 }
6447 
6448 static bool
6449 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6450 			   unsigned long event)
6451 {
6452 	struct inet6_dev *inet6_dev;
6453 	bool addr_list_empty = true;
6454 	struct in_device *idev;
6455 
6456 	switch (event) {
6457 	case NETDEV_UP:
6458 		return rif == NULL;
6459 	case NETDEV_DOWN:
6460 		idev = __in_dev_get_rtnl(dev);
6461 		if (idev && idev->ifa_list)
6462 			addr_list_empty = false;
6463 
6464 		inet6_dev = __in6_dev_get(dev);
6465 		if (addr_list_empty && inet6_dev &&
6466 		    !list_empty(&inet6_dev->addr_list))
6467 			addr_list_empty = false;
6468 
6469 		/* macvlans do not have a RIF, but rather piggy back on the
6470 		 * RIF of their lower device.
6471 		 */
6472 		if (netif_is_macvlan(dev) && addr_list_empty)
6473 			return true;
6474 
6475 		if (rif && addr_list_empty &&
6476 		    !netif_is_l3_slave(rif->dev))
6477 			return true;
6478 		/* It is possible we already removed the RIF ourselves
6479 		 * if it was assigned to a netdev that is now a bridge
6480 		 * or LAG slave.
6481 		 */
6482 		return false;
6483 	}
6484 
6485 	return false;
6486 }
6487 
6488 static enum mlxsw_sp_rif_type
6489 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6490 		      const struct net_device *dev)
6491 {
6492 	enum mlxsw_sp_fid_type type;
6493 
6494 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6495 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
6496 
6497 	/* Otherwise RIF type is derived from the type of the underlying FID. */
6498 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6499 		type = MLXSW_SP_FID_TYPE_8021Q;
6500 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6501 		type = MLXSW_SP_FID_TYPE_8021Q;
6502 	else if (netif_is_bridge_master(dev))
6503 		type = MLXSW_SP_FID_TYPE_8021D;
6504 	else
6505 		type = MLXSW_SP_FID_TYPE_RFID;
6506 
6507 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6508 }
6509 
6510 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6511 {
6512 	int i;
6513 
6514 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6515 		if (!mlxsw_sp->router->rifs[i]) {
6516 			*p_rif_index = i;
6517 			return 0;
6518 		}
6519 	}
6520 
6521 	return -ENOBUFS;
6522 }
6523 
6524 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6525 					       u16 vr_id,
6526 					       struct net_device *l3_dev)
6527 {
6528 	struct mlxsw_sp_rif *rif;
6529 
6530 	rif = kzalloc(rif_size, GFP_KERNEL);
6531 	if (!rif)
6532 		return NULL;
6533 
6534 	INIT_LIST_HEAD(&rif->nexthop_list);
6535 	INIT_LIST_HEAD(&rif->neigh_list);
6536 	if (l3_dev) {
6537 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
6538 		rif->mtu = l3_dev->mtu;
6539 		rif->dev = l3_dev;
6540 	}
6541 	rif->vr_id = vr_id;
6542 	rif->rif_index = rif_index;
6543 
6544 	return rif;
6545 }
6546 
6547 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6548 					   u16 rif_index)
6549 {
6550 	return mlxsw_sp->router->rifs[rif_index];
6551 }
6552 
6553 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6554 {
6555 	return rif->rif_index;
6556 }
6557 
6558 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6559 {
6560 	return lb_rif->common.rif_index;
6561 }
6562 
6563 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6564 {
6565 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
6566 	struct mlxsw_sp_vr *ul_vr;
6567 
6568 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
6569 	if (WARN_ON(IS_ERR(ul_vr)))
6570 		return 0;
6571 
6572 	return ul_vr->id;
6573 }
6574 
6575 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6576 {
6577 	return lb_rif->ul_rif_id;
6578 }
6579 
6580 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6581 {
6582 	return rif->dev->ifindex;
6583 }
6584 
6585 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6586 {
6587 	return rif->dev;
6588 }
6589 
6590 struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
6591 {
6592 	return rif->fid;
6593 }
6594 
6595 static struct mlxsw_sp_rif *
6596 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6597 		    const struct mlxsw_sp_rif_params *params,
6598 		    struct netlink_ext_ack *extack)
6599 {
6600 	u32 tb_id = l3mdev_fib_table(params->dev);
6601 	const struct mlxsw_sp_rif_ops *ops;
6602 	struct mlxsw_sp_fid *fid = NULL;
6603 	enum mlxsw_sp_rif_type type;
6604 	struct mlxsw_sp_rif *rif;
6605 	struct mlxsw_sp_vr *vr;
6606 	u16 rif_index;
6607 	int i, err;
6608 
6609 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6610 	ops = mlxsw_sp->rif_ops_arr[type];
6611 
6612 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6613 	if (IS_ERR(vr))
6614 		return ERR_CAST(vr);
6615 	vr->rif_count++;
6616 
6617 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6618 	if (err) {
6619 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6620 		goto err_rif_index_alloc;
6621 	}
6622 
6623 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6624 	if (!rif) {
6625 		err = -ENOMEM;
6626 		goto err_rif_alloc;
6627 	}
6628 	dev_hold(rif->dev);
6629 	mlxsw_sp->router->rifs[rif_index] = rif;
6630 	rif->mlxsw_sp = mlxsw_sp;
6631 	rif->ops = ops;
6632 
6633 	if (ops->fid_get) {
6634 		fid = ops->fid_get(rif, extack);
6635 		if (IS_ERR(fid)) {
6636 			err = PTR_ERR(fid);
6637 			goto err_fid_get;
6638 		}
6639 		rif->fid = fid;
6640 	}
6641 
6642 	if (ops->setup)
6643 		ops->setup(rif, params);
6644 
6645 	err = ops->configure(rif);
6646 	if (err)
6647 		goto err_configure;
6648 
6649 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6650 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6651 		if (err)
6652 			goto err_mr_rif_add;
6653 	}
6654 
6655 	mlxsw_sp_rif_counters_alloc(rif);
6656 
6657 	return rif;
6658 
6659 err_mr_rif_add:
6660 	for (i--; i >= 0; i--)
6661 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6662 	ops->deconfigure(rif);
6663 err_configure:
6664 	if (fid)
6665 		mlxsw_sp_fid_put(fid);
6666 err_fid_get:
6667 	mlxsw_sp->router->rifs[rif_index] = NULL;
6668 	dev_put(rif->dev);
6669 	kfree(rif);
6670 err_rif_alloc:
6671 err_rif_index_alloc:
6672 	vr->rif_count--;
6673 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6674 	return ERR_PTR(err);
6675 }
6676 
6677 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6678 {
6679 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
6680 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6681 	struct mlxsw_sp_fid *fid = rif->fid;
6682 	struct mlxsw_sp_vr *vr;
6683 	int i;
6684 
6685 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6686 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
6687 
6688 	mlxsw_sp_rif_counters_free(rif);
6689 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6690 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6691 	ops->deconfigure(rif);
6692 	if (fid)
6693 		/* Loopback RIFs are not associated with a FID. */
6694 		mlxsw_sp_fid_put(fid);
6695 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6696 	dev_put(rif->dev);
6697 	kfree(rif);
6698 	vr->rif_count--;
6699 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6700 }
6701 
6702 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6703 				 struct net_device *dev)
6704 {
6705 	struct mlxsw_sp_rif *rif;
6706 
6707 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6708 	if (!rif)
6709 		return;
6710 	mlxsw_sp_rif_destroy(rif);
6711 }
6712 
6713 static void
6714 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6715 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6716 {
6717 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6718 
6719 	params->vid = mlxsw_sp_port_vlan->vid;
6720 	params->lag = mlxsw_sp_port->lagged;
6721 	if (params->lag)
6722 		params->lag_id = mlxsw_sp_port->lag_id;
6723 	else
6724 		params->system_port = mlxsw_sp_port->local_port;
6725 }
6726 
6727 static struct mlxsw_sp_rif_subport *
6728 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
6729 {
6730 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
6731 }
6732 
6733 static struct mlxsw_sp_rif *
6734 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
6735 			 const struct mlxsw_sp_rif_params *params,
6736 			 struct netlink_ext_ack *extack)
6737 {
6738 	struct mlxsw_sp_rif_subport *rif_subport;
6739 	struct mlxsw_sp_rif *rif;
6740 
6741 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
6742 	if (!rif)
6743 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
6744 
6745 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
6746 	refcount_inc(&rif_subport->ref_count);
6747 	return rif;
6748 }
6749 
6750 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
6751 {
6752 	struct mlxsw_sp_rif_subport *rif_subport;
6753 
6754 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
6755 	if (!refcount_dec_and_test(&rif_subport->ref_count))
6756 		return;
6757 
6758 	mlxsw_sp_rif_destroy(rif);
6759 }
6760 
6761 static int
6762 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
6763 			       struct net_device *l3_dev,
6764 			       struct netlink_ext_ack *extack)
6765 {
6766 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6767 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
6768 	struct mlxsw_sp_rif_params params = {
6769 		.dev = l3_dev,
6770 	};
6771 	u16 vid = mlxsw_sp_port_vlan->vid;
6772 	struct mlxsw_sp_rif *rif;
6773 	struct mlxsw_sp_fid *fid;
6774 	int err;
6775 
6776 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
6777 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
6778 	if (IS_ERR(rif))
6779 		return PTR_ERR(rif);
6780 
6781 	/* FID was already created, just take a reference */
6782 	fid = rif->ops->fid_get(rif, extack);
6783 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6784 	if (err)
6785 		goto err_fid_port_vid_map;
6786 
6787 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
6788 	if (err)
6789 		goto err_port_vid_learning_set;
6790 
6791 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
6792 					BR_STATE_FORWARDING);
6793 	if (err)
6794 		goto err_port_vid_stp_set;
6795 
6796 	mlxsw_sp_port_vlan->fid = fid;
6797 
6798 	return 0;
6799 
6800 err_port_vid_stp_set:
6801 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6802 err_port_vid_learning_set:
6803 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6804 err_fid_port_vid_map:
6805 	mlxsw_sp_fid_put(fid);
6806 	mlxsw_sp_rif_subport_put(rif);
6807 	return err;
6808 }
6809 
6810 void
6811 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6812 {
6813 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6814 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
6815 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
6816 	u16 vid = mlxsw_sp_port_vlan->vid;
6817 
6818 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6819 		return;
6820 
6821 	mlxsw_sp_port_vlan->fid = NULL;
6822 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6823 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6824 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6825 	mlxsw_sp_fid_put(fid);
6826 	mlxsw_sp_rif_subport_put(rif);
6827 }
6828 
6829 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6830 					     struct net_device *port_dev,
6831 					     unsigned long event, u16 vid,
6832 					     struct netlink_ext_ack *extack)
6833 {
6834 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
6835 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
6836 
6837 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
6838 	if (WARN_ON(!mlxsw_sp_port_vlan))
6839 		return -EINVAL;
6840 
6841 	switch (event) {
6842 	case NETDEV_UP:
6843 		return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
6844 						      l3_dev, extack);
6845 	case NETDEV_DOWN:
6846 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6847 		break;
6848 	}
6849 
6850 	return 0;
6851 }
6852 
6853 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
6854 					unsigned long event,
6855 					struct netlink_ext_ack *extack)
6856 {
6857 	if (netif_is_bridge_port(port_dev) ||
6858 	    netif_is_lag_port(port_dev) ||
6859 	    netif_is_ovs_port(port_dev))
6860 		return 0;
6861 
6862 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
6863 						 MLXSW_SP_DEFAULT_VID, extack);
6864 }
6865 
6866 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6867 					 struct net_device *lag_dev,
6868 					 unsigned long event, u16 vid,
6869 					 struct netlink_ext_ack *extack)
6870 {
6871 	struct net_device *port_dev;
6872 	struct list_head *iter;
6873 	int err;
6874 
6875 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6876 		if (mlxsw_sp_port_dev_check(port_dev)) {
6877 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6878 								port_dev,
6879 								event, vid,
6880 								extack);
6881 			if (err)
6882 				return err;
6883 		}
6884 	}
6885 
6886 	return 0;
6887 }
6888 
6889 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
6890 				       unsigned long event,
6891 				       struct netlink_ext_ack *extack)
6892 {
6893 	if (netif_is_bridge_port(lag_dev))
6894 		return 0;
6895 
6896 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
6897 					     MLXSW_SP_DEFAULT_VID, extack);
6898 }
6899 
6900 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
6901 					  struct net_device *l3_dev,
6902 					  unsigned long event,
6903 					  struct netlink_ext_ack *extack)
6904 {
6905 	struct mlxsw_sp_rif_params params = {
6906 		.dev = l3_dev,
6907 	};
6908 	struct mlxsw_sp_rif *rif;
6909 
6910 	switch (event) {
6911 	case NETDEV_UP:
6912 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
6913 		if (IS_ERR(rif))
6914 			return PTR_ERR(rif);
6915 		break;
6916 	case NETDEV_DOWN:
6917 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6918 		mlxsw_sp_rif_destroy(rif);
6919 		break;
6920 	}
6921 
6922 	return 0;
6923 }
6924 
6925 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
6926 					struct net_device *vlan_dev,
6927 					unsigned long event,
6928 					struct netlink_ext_ack *extack)
6929 {
6930 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
6931 	u16 vid = vlan_dev_vlan_id(vlan_dev);
6932 
6933 	if (netif_is_bridge_port(vlan_dev))
6934 		return 0;
6935 
6936 	if (mlxsw_sp_port_dev_check(real_dev))
6937 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
6938 							 event, vid, extack);
6939 	else if (netif_is_lag_master(real_dev))
6940 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
6941 						     vid, extack);
6942 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
6943 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
6944 						      extack);
6945 
6946 	return 0;
6947 }
6948 
6949 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
6950 {
6951 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
6952 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6953 
6954 	return ether_addr_equal_masked(mac, vrrp4, mask);
6955 }
6956 
6957 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
6958 {
6959 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
6960 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6961 
6962 	return ether_addr_equal_masked(mac, vrrp6, mask);
6963 }
6964 
6965 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
6966 				const u8 *mac, bool adding)
6967 {
6968 	char ritr_pl[MLXSW_REG_RITR_LEN];
6969 	u8 vrrp_id = adding ? mac[5] : 0;
6970 	int err;
6971 
6972 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
6973 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
6974 		return 0;
6975 
6976 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
6977 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6978 	if (err)
6979 		return err;
6980 
6981 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
6982 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
6983 	else
6984 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
6985 
6986 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6987 }
6988 
6989 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
6990 				    const struct net_device *macvlan_dev,
6991 				    struct netlink_ext_ack *extack)
6992 {
6993 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6994 	struct mlxsw_sp_rif *rif;
6995 	int err;
6996 
6997 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6998 	if (!rif) {
6999 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
7000 		return -EOPNOTSUPP;
7001 	}
7002 
7003 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
7004 				  mlxsw_sp_fid_index(rif->fid), true);
7005 	if (err)
7006 		return err;
7007 
7008 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
7009 				   macvlan_dev->dev_addr, true);
7010 	if (err)
7011 		goto err_rif_vrrp_add;
7012 
7013 	/* Make sure the bridge driver does not have this MAC pointing at
7014 	 * some other port.
7015 	 */
7016 	if (rif->ops->fdb_del)
7017 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
7018 
7019 	return 0;
7020 
7021 err_rif_vrrp_add:
7022 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
7023 			    mlxsw_sp_fid_index(rif->fid), false);
7024 	return err;
7025 }
7026 
7027 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
7028 			      const struct net_device *macvlan_dev)
7029 {
7030 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
7031 	struct mlxsw_sp_rif *rif;
7032 
7033 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
7034 	/* If we do not have a RIF, then we already took care of
7035 	 * removing the macvlan's MAC during RIF deletion.
7036 	 */
7037 	if (!rif)
7038 		return;
7039 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
7040 			     false);
7041 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
7042 			    mlxsw_sp_fid_index(rif->fid), false);
7043 }
7044 
7045 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
7046 					   struct net_device *macvlan_dev,
7047 					   unsigned long event,
7048 					   struct netlink_ext_ack *extack)
7049 {
7050 	switch (event) {
7051 	case NETDEV_UP:
7052 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
7053 	case NETDEV_DOWN:
7054 		mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
7055 		break;
7056 	}
7057 
7058 	return 0;
7059 }
7060 
7061 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
7062 					       struct net_device *dev,
7063 					       const unsigned char *dev_addr,
7064 					       struct netlink_ext_ack *extack)
7065 {
7066 	struct mlxsw_sp_rif *rif;
7067 	int i;
7068 
7069 	/* A RIF is not created for macvlan netdevs. Their MAC is used to
7070 	 * populate the FDB
7071 	 */
7072 	if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
7073 		return 0;
7074 
7075 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7076 		rif = mlxsw_sp->router->rifs[i];
7077 		if (rif && rif->dev && rif->dev != dev &&
7078 		    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
7079 					     mlxsw_sp->mac_mask)) {
7080 			NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
7081 			return -EINVAL;
7082 		}
7083 	}
7084 
7085 	return 0;
7086 }
7087 
7088 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
7089 				     struct net_device *dev,
7090 				     unsigned long event,
7091 				     struct netlink_ext_ack *extack)
7092 {
7093 	if (mlxsw_sp_port_dev_check(dev))
7094 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
7095 	else if (netif_is_lag_master(dev))
7096 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
7097 	else if (netif_is_bridge_master(dev))
7098 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
7099 						      extack);
7100 	else if (is_vlan_dev(dev))
7101 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
7102 						    extack);
7103 	else if (netif_is_macvlan(dev))
7104 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
7105 						       extack);
7106 	else
7107 		return 0;
7108 }
7109 
7110 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
7111 				   unsigned long event, void *ptr)
7112 {
7113 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
7114 	struct net_device *dev = ifa->ifa_dev->dev;
7115 	struct mlxsw_sp_router *router;
7116 	struct mlxsw_sp_rif *rif;
7117 	int err = 0;
7118 
7119 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
7120 	if (event == NETDEV_UP)
7121 		goto out;
7122 
7123 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
7124 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
7125 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7126 		goto out;
7127 
7128 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
7129 out:
7130 	return notifier_from_errno(err);
7131 }
7132 
7133 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
7134 				  unsigned long event, void *ptr)
7135 {
7136 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
7137 	struct net_device *dev = ivi->ivi_dev->dev;
7138 	struct mlxsw_sp *mlxsw_sp;
7139 	struct mlxsw_sp_rif *rif;
7140 	int err = 0;
7141 
7142 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7143 	if (!mlxsw_sp)
7144 		goto out;
7145 
7146 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7147 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7148 		goto out;
7149 
7150 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7151 						  ivi->extack);
7152 	if (err)
7153 		goto out;
7154 
7155 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
7156 out:
7157 	return notifier_from_errno(err);
7158 }
7159 
7160 struct mlxsw_sp_inet6addr_event_work {
7161 	struct work_struct work;
7162 	struct mlxsw_sp *mlxsw_sp;
7163 	struct net_device *dev;
7164 	unsigned long event;
7165 };
7166 
7167 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
7168 {
7169 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
7170 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
7171 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
7172 	struct net_device *dev = inet6addr_work->dev;
7173 	unsigned long event = inet6addr_work->event;
7174 	struct mlxsw_sp_rif *rif;
7175 
7176 	rtnl_lock();
7177 
7178 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7179 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7180 		goto out;
7181 
7182 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
7183 out:
7184 	rtnl_unlock();
7185 	dev_put(dev);
7186 	kfree(inet6addr_work);
7187 }
7188 
7189 /* Called with rcu_read_lock() */
7190 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
7191 				    unsigned long event, void *ptr)
7192 {
7193 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
7194 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
7195 	struct net_device *dev = if6->idev->dev;
7196 	struct mlxsw_sp_router *router;
7197 
7198 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
7199 	if (event == NETDEV_UP)
7200 		return NOTIFY_DONE;
7201 
7202 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
7203 	if (!inet6addr_work)
7204 		return NOTIFY_BAD;
7205 
7206 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
7207 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
7208 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
7209 	inet6addr_work->dev = dev;
7210 	inet6addr_work->event = event;
7211 	dev_hold(dev);
7212 	mlxsw_core_schedule_work(&inet6addr_work->work);
7213 
7214 	return NOTIFY_DONE;
7215 }
7216 
7217 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
7218 				   unsigned long event, void *ptr)
7219 {
7220 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
7221 	struct net_device *dev = i6vi->i6vi_dev->dev;
7222 	struct mlxsw_sp *mlxsw_sp;
7223 	struct mlxsw_sp_rif *rif;
7224 	int err = 0;
7225 
7226 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7227 	if (!mlxsw_sp)
7228 		goto out;
7229 
7230 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7231 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7232 		goto out;
7233 
7234 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7235 						  i6vi->extack);
7236 	if (err)
7237 		goto out;
7238 
7239 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
7240 out:
7241 	return notifier_from_errno(err);
7242 }
7243 
7244 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7245 			     const char *mac, int mtu)
7246 {
7247 	char ritr_pl[MLXSW_REG_RITR_LEN];
7248 	int err;
7249 
7250 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7251 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7252 	if (err)
7253 		return err;
7254 
7255 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
7256 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
7257 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
7258 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7259 }
7260 
7261 static int
7262 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
7263 				  struct mlxsw_sp_rif *rif)
7264 {
7265 	struct net_device *dev = rif->dev;
7266 	u16 fid_index;
7267 	int err;
7268 
7269 	fid_index = mlxsw_sp_fid_index(rif->fid);
7270 
7271 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
7272 	if (err)
7273 		return err;
7274 
7275 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
7276 				dev->mtu);
7277 	if (err)
7278 		goto err_rif_edit;
7279 
7280 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
7281 	if (err)
7282 		goto err_rif_fdb_op;
7283 
7284 	if (rif->mtu != dev->mtu) {
7285 		struct mlxsw_sp_vr *vr;
7286 		int i;
7287 
7288 		/* The RIF is relevant only to its mr_table instance, as unlike
7289 		 * unicast routing, in multicast routing a RIF cannot be shared
7290 		 * between several multicast routing tables.
7291 		 */
7292 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
7293 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
7294 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
7295 						   rif, dev->mtu);
7296 	}
7297 
7298 	ether_addr_copy(rif->addr, dev->dev_addr);
7299 	rif->mtu = dev->mtu;
7300 
7301 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
7302 
7303 	return 0;
7304 
7305 err_rif_fdb_op:
7306 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
7307 err_rif_edit:
7308 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
7309 	return err;
7310 }
7311 
7312 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
7313 			    struct netdev_notifier_pre_changeaddr_info *info)
7314 {
7315 	struct netlink_ext_ack *extack;
7316 
7317 	extack = netdev_notifier_info_to_extack(&info->info);
7318 	return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
7319 						   info->dev_addr, extack);
7320 }
7321 
7322 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
7323 					 unsigned long event, void *ptr)
7324 {
7325 	struct mlxsw_sp *mlxsw_sp;
7326 	struct mlxsw_sp_rif *rif;
7327 
7328 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7329 	if (!mlxsw_sp)
7330 		return 0;
7331 
7332 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7333 	if (!rif)
7334 		return 0;
7335 
7336 	switch (event) {
7337 	case NETDEV_CHANGEMTU: /* fall through */
7338 	case NETDEV_CHANGEADDR:
7339 		return mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
7340 	case NETDEV_PRE_CHANGEADDR:
7341 		return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
7342 	}
7343 
7344 	return 0;
7345 }
7346 
7347 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
7348 				  struct net_device *l3_dev,
7349 				  struct netlink_ext_ack *extack)
7350 {
7351 	struct mlxsw_sp_rif *rif;
7352 
7353 	/* If netdev is already associated with a RIF, then we need to
7354 	 * destroy it and create a new one with the new virtual router ID.
7355 	 */
7356 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7357 	if (rif)
7358 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
7359 					  extack);
7360 
7361 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
7362 }
7363 
7364 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
7365 				    struct net_device *l3_dev)
7366 {
7367 	struct mlxsw_sp_rif *rif;
7368 
7369 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7370 	if (!rif)
7371 		return;
7372 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
7373 }
7374 
7375 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
7376 				 struct netdev_notifier_changeupper_info *info)
7377 {
7378 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
7379 	int err = 0;
7380 
7381 	/* We do not create a RIF for a macvlan, but only use it to
7382 	 * direct more MAC addresses to the router.
7383 	 */
7384 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
7385 		return 0;
7386 
7387 	switch (event) {
7388 	case NETDEV_PRECHANGEUPPER:
7389 		return 0;
7390 	case NETDEV_CHANGEUPPER:
7391 		if (info->linking) {
7392 			struct netlink_ext_ack *extack;
7393 
7394 			extack = netdev_notifier_info_to_extack(&info->info);
7395 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
7396 		} else {
7397 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
7398 		}
7399 		break;
7400 	}
7401 
7402 	return err;
7403 }
7404 
7405 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
7406 {
7407 	struct mlxsw_sp_rif *rif = data;
7408 
7409 	if (!netif_is_macvlan(dev))
7410 		return 0;
7411 
7412 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
7413 				   mlxsw_sp_fid_index(rif->fid), false);
7414 }
7415 
7416 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
7417 {
7418 	if (!netif_is_macvlan_port(rif->dev))
7419 		return 0;
7420 
7421 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
7422 	return netdev_walk_all_upper_dev_rcu(rif->dev,
7423 					     __mlxsw_sp_rif_macvlan_flush, rif);
7424 }
7425 
7426 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
7427 				       const struct mlxsw_sp_rif_params *params)
7428 {
7429 	struct mlxsw_sp_rif_subport *rif_subport;
7430 
7431 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7432 	refcount_set(&rif_subport->ref_count, 1);
7433 	rif_subport->vid = params->vid;
7434 	rif_subport->lag = params->lag;
7435 	if (params->lag)
7436 		rif_subport->lag_id = params->lag_id;
7437 	else
7438 		rif_subport->system_port = params->system_port;
7439 }
7440 
7441 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
7442 {
7443 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7444 	struct mlxsw_sp_rif_subport *rif_subport;
7445 	char ritr_pl[MLXSW_REG_RITR_LEN];
7446 
7447 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7448 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7449 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
7450 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7451 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
7452 				  rif_subport->lag ? rif_subport->lag_id :
7453 						     rif_subport->system_port,
7454 				  rif_subport->vid);
7455 
7456 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7457 }
7458 
7459 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
7460 {
7461 	int err;
7462 
7463 	err = mlxsw_sp_rif_subport_op(rif, true);
7464 	if (err)
7465 		return err;
7466 
7467 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7468 				  mlxsw_sp_fid_index(rif->fid), true);
7469 	if (err)
7470 		goto err_rif_fdb_op;
7471 
7472 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7473 	return 0;
7474 
7475 err_rif_fdb_op:
7476 	mlxsw_sp_rif_subport_op(rif, false);
7477 	return err;
7478 }
7479 
7480 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
7481 {
7482 	struct mlxsw_sp_fid *fid = rif->fid;
7483 
7484 	mlxsw_sp_fid_rif_set(fid, NULL);
7485 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7486 			    mlxsw_sp_fid_index(fid), false);
7487 	mlxsw_sp_rif_macvlan_flush(rif);
7488 	mlxsw_sp_rif_subport_op(rif, false);
7489 }
7490 
7491 static struct mlxsw_sp_fid *
7492 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
7493 			     struct netlink_ext_ack *extack)
7494 {
7495 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
7496 }
7497 
7498 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
7499 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
7500 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
7501 	.setup			= mlxsw_sp_rif_subport_setup,
7502 	.configure		= mlxsw_sp_rif_subport_configure,
7503 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
7504 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
7505 };
7506 
7507 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
7508 				    enum mlxsw_reg_ritr_if_type type,
7509 				    u16 vid_fid, bool enable)
7510 {
7511 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7512 	char ritr_pl[MLXSW_REG_RITR_LEN];
7513 
7514 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7515 			    rif->dev->mtu);
7516 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7517 	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
7518 
7519 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7520 }
7521 
7522 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7523 {
7524 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
7525 }
7526 
7527 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
7528 {
7529 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7530 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7531 	int err;
7532 
7533 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
7534 	if (err)
7535 		return err;
7536 
7537 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7538 				     mlxsw_sp_router_port(mlxsw_sp), true);
7539 	if (err)
7540 		goto err_fid_mc_flood_set;
7541 
7542 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7543 				     mlxsw_sp_router_port(mlxsw_sp), true);
7544 	if (err)
7545 		goto err_fid_bc_flood_set;
7546 
7547 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7548 				  mlxsw_sp_fid_index(rif->fid), true);
7549 	if (err)
7550 		goto err_rif_fdb_op;
7551 
7552 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7553 	return 0;
7554 
7555 err_rif_fdb_op:
7556 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7557 			       mlxsw_sp_router_port(mlxsw_sp), false);
7558 err_fid_bc_flood_set:
7559 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7560 			       mlxsw_sp_router_port(mlxsw_sp), false);
7561 err_fid_mc_flood_set:
7562 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
7563 	return err;
7564 }
7565 
7566 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
7567 {
7568 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7569 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7570 	struct mlxsw_sp_fid *fid = rif->fid;
7571 
7572 	mlxsw_sp_fid_rif_set(fid, NULL);
7573 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7574 			    mlxsw_sp_fid_index(fid), false);
7575 	mlxsw_sp_rif_macvlan_flush(rif);
7576 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7577 			       mlxsw_sp_router_port(mlxsw_sp), false);
7578 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7579 			       mlxsw_sp_router_port(mlxsw_sp), false);
7580 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
7581 }
7582 
7583 static struct mlxsw_sp_fid *
7584 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
7585 			  struct netlink_ext_ack *extack)
7586 {
7587 	struct net_device *br_dev = rif->dev;
7588 	u16 vid;
7589 	int err;
7590 
7591 	if (is_vlan_dev(rif->dev)) {
7592 		vid = vlan_dev_vlan_id(rif->dev);
7593 		br_dev = vlan_dev_real_dev(rif->dev);
7594 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
7595 			return ERR_PTR(-EINVAL);
7596 	} else {
7597 		err = br_vlan_get_pvid(rif->dev, &vid);
7598 		if (err < 0 || !vid) {
7599 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7600 			return ERR_PTR(-EINVAL);
7601 		}
7602 	}
7603 
7604 	return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack);
7605 }
7606 
7607 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7608 {
7609 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7610 	struct switchdev_notifier_fdb_info info;
7611 	struct net_device *br_dev;
7612 	struct net_device *dev;
7613 
7614 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7615 	dev = br_fdb_find_port(br_dev, mac, vid);
7616 	if (!dev)
7617 		return;
7618 
7619 	info.addr = mac;
7620 	info.vid = vid;
7621 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7622 				 NULL);
7623 }
7624 
7625 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
7626 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
7627 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7628 	.configure		= mlxsw_sp_rif_vlan_configure,
7629 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
7630 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
7631 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
7632 };
7633 
7634 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7635 {
7636 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7637 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7638 	int err;
7639 
7640 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7641 				       true);
7642 	if (err)
7643 		return err;
7644 
7645 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7646 				     mlxsw_sp_router_port(mlxsw_sp), true);
7647 	if (err)
7648 		goto err_fid_mc_flood_set;
7649 
7650 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7651 				     mlxsw_sp_router_port(mlxsw_sp), true);
7652 	if (err)
7653 		goto err_fid_bc_flood_set;
7654 
7655 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7656 				  mlxsw_sp_fid_index(rif->fid), true);
7657 	if (err)
7658 		goto err_rif_fdb_op;
7659 
7660 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7661 	return 0;
7662 
7663 err_rif_fdb_op:
7664 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7665 			       mlxsw_sp_router_port(mlxsw_sp), false);
7666 err_fid_bc_flood_set:
7667 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7668 			       mlxsw_sp_router_port(mlxsw_sp), false);
7669 err_fid_mc_flood_set:
7670 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7671 	return err;
7672 }
7673 
7674 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7675 {
7676 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7677 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7678 	struct mlxsw_sp_fid *fid = rif->fid;
7679 
7680 	mlxsw_sp_fid_rif_set(fid, NULL);
7681 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7682 			    mlxsw_sp_fid_index(fid), false);
7683 	mlxsw_sp_rif_macvlan_flush(rif);
7684 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7685 			       mlxsw_sp_router_port(mlxsw_sp), false);
7686 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7687 			       mlxsw_sp_router_port(mlxsw_sp), false);
7688 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7689 }
7690 
7691 static struct mlxsw_sp_fid *
7692 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7693 			 struct netlink_ext_ack *extack)
7694 {
7695 	return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack);
7696 }
7697 
7698 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7699 {
7700 	struct switchdev_notifier_fdb_info info;
7701 	struct net_device *dev;
7702 
7703 	dev = br_fdb_find_port(rif->dev, mac, 0);
7704 	if (!dev)
7705 		return;
7706 
7707 	info.addr = mac;
7708 	info.vid = 0;
7709 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7710 				 NULL);
7711 }
7712 
7713 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7714 	.type			= MLXSW_SP_RIF_TYPE_FID,
7715 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7716 	.configure		= mlxsw_sp_rif_fid_configure,
7717 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
7718 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
7719 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
7720 };
7721 
7722 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
7723 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
7724 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7725 	.configure		= mlxsw_sp_rif_fid_configure,
7726 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
7727 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
7728 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
7729 };
7730 
7731 static struct mlxsw_sp_rif_ipip_lb *
7732 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7733 {
7734 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7735 }
7736 
7737 static void
7738 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7739 			   const struct mlxsw_sp_rif_params *params)
7740 {
7741 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7742 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
7743 
7744 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7745 				 common);
7746 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7747 	rif_lb->lb_config = params_lb->lb_config;
7748 }
7749 
7750 static int
7751 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7752 {
7753 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7754 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7755 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7756 	struct mlxsw_sp_vr *ul_vr;
7757 	int err;
7758 
7759 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
7760 	if (IS_ERR(ul_vr))
7761 		return PTR_ERR(ul_vr);
7762 
7763 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
7764 	if (err)
7765 		goto err_loopback_op;
7766 
7767 	lb_rif->ul_vr_id = ul_vr->id;
7768 	lb_rif->ul_rif_id = 0;
7769 	++ul_vr->rif_count;
7770 	return 0;
7771 
7772 err_loopback_op:
7773 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7774 	return err;
7775 }
7776 
7777 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7778 {
7779 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7780 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7781 	struct mlxsw_sp_vr *ul_vr;
7782 
7783 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7784 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
7785 
7786 	--ul_vr->rif_count;
7787 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7788 }
7789 
7790 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
7791 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
7792 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
7793 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
7794 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
7795 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
7796 };
7797 
7798 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
7799 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
7800 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
7801 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
7802 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
7803 };
7804 
7805 static int
7806 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
7807 {
7808 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7809 	char ritr_pl[MLXSW_REG_RITR_LEN];
7810 
7811 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
7812 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
7813 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
7814 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
7815 
7816 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7817 }
7818 
7819 static struct mlxsw_sp_rif *
7820 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
7821 		       struct netlink_ext_ack *extack)
7822 {
7823 	struct mlxsw_sp_rif *ul_rif;
7824 	u16 rif_index;
7825 	int err;
7826 
7827 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
7828 	if (err) {
7829 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
7830 		return ERR_PTR(err);
7831 	}
7832 
7833 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
7834 	if (!ul_rif)
7835 		return ERR_PTR(-ENOMEM);
7836 
7837 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
7838 	ul_rif->mlxsw_sp = mlxsw_sp;
7839 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
7840 	if (err)
7841 		goto ul_rif_op_err;
7842 
7843 	return ul_rif;
7844 
7845 ul_rif_op_err:
7846 	mlxsw_sp->router->rifs[rif_index] = NULL;
7847 	kfree(ul_rif);
7848 	return ERR_PTR(err);
7849 }
7850 
7851 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
7852 {
7853 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7854 
7855 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
7856 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
7857 	kfree(ul_rif);
7858 }
7859 
7860 static struct mlxsw_sp_rif *
7861 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
7862 		    struct netlink_ext_ack *extack)
7863 {
7864 	struct mlxsw_sp_vr *vr;
7865 	int err;
7866 
7867 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
7868 	if (IS_ERR(vr))
7869 		return ERR_CAST(vr);
7870 
7871 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
7872 		return vr->ul_rif;
7873 
7874 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
7875 	if (IS_ERR(vr->ul_rif)) {
7876 		err = PTR_ERR(vr->ul_rif);
7877 		goto err_ul_rif_create;
7878 	}
7879 
7880 	vr->rif_count++;
7881 	refcount_set(&vr->ul_rif_refcnt, 1);
7882 
7883 	return vr->ul_rif;
7884 
7885 err_ul_rif_create:
7886 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7887 	return ERR_PTR(err);
7888 }
7889 
7890 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
7891 {
7892 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7893 	struct mlxsw_sp_vr *vr;
7894 
7895 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
7896 
7897 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
7898 		return;
7899 
7900 	vr->rif_count--;
7901 	mlxsw_sp_ul_rif_destroy(ul_rif);
7902 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7903 }
7904 
7905 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
7906 			       u16 *ul_rif_index)
7907 {
7908 	struct mlxsw_sp_rif *ul_rif;
7909 
7910 	ASSERT_RTNL();
7911 
7912 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7913 	if (IS_ERR(ul_rif))
7914 		return PTR_ERR(ul_rif);
7915 	*ul_rif_index = ul_rif->rif_index;
7916 
7917 	return 0;
7918 }
7919 
7920 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
7921 {
7922 	struct mlxsw_sp_rif *ul_rif;
7923 
7924 	ASSERT_RTNL();
7925 
7926 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
7927 	if (WARN_ON(!ul_rif))
7928 		return;
7929 
7930 	mlxsw_sp_ul_rif_put(ul_rif);
7931 }
7932 
7933 static int
7934 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7935 {
7936 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7937 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7938 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7939 	struct mlxsw_sp_rif *ul_rif;
7940 	int err;
7941 
7942 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7943 	if (IS_ERR(ul_rif))
7944 		return PTR_ERR(ul_rif);
7945 
7946 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
7947 	if (err)
7948 		goto err_loopback_op;
7949 
7950 	lb_rif->ul_vr_id = 0;
7951 	lb_rif->ul_rif_id = ul_rif->rif_index;
7952 
7953 	return 0;
7954 
7955 err_loopback_op:
7956 	mlxsw_sp_ul_rif_put(ul_rif);
7957 	return err;
7958 }
7959 
7960 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7961 {
7962 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7963 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7964 	struct mlxsw_sp_rif *ul_rif;
7965 
7966 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
7967 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
7968 	mlxsw_sp_ul_rif_put(ul_rif);
7969 }
7970 
7971 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
7972 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
7973 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
7974 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
7975 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
7976 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
7977 };
7978 
7979 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
7980 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
7981 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
7982 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
7983 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
7984 };
7985 
7986 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7987 {
7988 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7989 
7990 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
7991 					 sizeof(struct mlxsw_sp_rif *),
7992 					 GFP_KERNEL);
7993 	if (!mlxsw_sp->router->rifs)
7994 		return -ENOMEM;
7995 
7996 	return 0;
7997 }
7998 
7999 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
8000 {
8001 	int i;
8002 
8003 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
8004 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
8005 
8006 	kfree(mlxsw_sp->router->rifs);
8007 }
8008 
8009 static int
8010 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
8011 {
8012 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
8013 
8014 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
8015 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
8016 }
8017 
8018 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
8019 {
8020 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
8021 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
8022 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
8023 }
8024 
8025 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
8026 {
8027 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
8028 }
8029 
8030 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
8031 {
8032 	struct mlxsw_sp_router *router;
8033 
8034 	/* Flush pending FIB notifications and then flush the device's
8035 	 * table before requesting another dump. The FIB notification
8036 	 * block is unregistered, so no need to take RTNL.
8037 	 */
8038 	mlxsw_core_flush_owq();
8039 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
8040 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
8041 }
8042 
8043 #ifdef CONFIG_IP_ROUTE_MULTIPATH
8044 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
8045 {
8046 	mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
8047 }
8048 
8049 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
8050 {
8051 	mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
8052 }
8053 
8054 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8055 {
8056 	struct net *net = mlxsw_sp_net(mlxsw_sp);
8057 	bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
8058 
8059 	mlxsw_sp_mp_hash_header_set(recr2_pl,
8060 				    MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
8061 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
8062 	mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
8063 	mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
8064 	if (only_l3)
8065 		return;
8066 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
8067 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
8068 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
8069 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
8070 }
8071 
8072 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8073 {
8074 	bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
8075 
8076 	mlxsw_sp_mp_hash_header_set(recr2_pl,
8077 				    MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
8078 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
8079 	mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
8080 	mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
8081 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
8082 	if (only_l3) {
8083 		mlxsw_sp_mp_hash_field_set(recr2_pl,
8084 					   MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
8085 	} else {
8086 		mlxsw_sp_mp_hash_header_set(recr2_pl,
8087 					    MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
8088 		mlxsw_sp_mp_hash_field_set(recr2_pl,
8089 					   MLXSW_REG_RECR2_TCP_UDP_SPORT);
8090 		mlxsw_sp_mp_hash_field_set(recr2_pl,
8091 					   MLXSW_REG_RECR2_TCP_UDP_DPORT);
8092 	}
8093 }
8094 
8095 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8096 {
8097 	char recr2_pl[MLXSW_REG_RECR2_LEN];
8098 	u32 seed;
8099 
8100 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
8101 	mlxsw_reg_recr2_pack(recr2_pl, seed);
8102 	mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
8103 	mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
8104 
8105 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
8106 }
8107 #else
8108 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8109 {
8110 	return 0;
8111 }
8112 #endif
8113 
8114 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
8115 {
8116 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
8117 	unsigned int i;
8118 
8119 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
8120 
8121 	/* HW is determining switch priority based on DSCP-bits, but the
8122 	 * kernel is still doing that based on the ToS. Since there's a
8123 	 * mismatch in bits we need to make sure to translate the right
8124 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
8125 	 */
8126 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
8127 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
8128 
8129 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
8130 }
8131 
8132 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
8133 {
8134 	struct net *net = mlxsw_sp_net(mlxsw_sp);
8135 	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
8136 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
8137 	u64 max_rifs;
8138 	int err;
8139 
8140 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
8141 		return -EIO;
8142 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8143 
8144 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
8145 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
8146 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
8147 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8148 	if (err)
8149 		return err;
8150 	return 0;
8151 }
8152 
8153 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8154 {
8155 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
8156 
8157 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
8158 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8159 }
8160 
8161 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
8162 			 struct netlink_ext_ack *extack)
8163 {
8164 	struct mlxsw_sp_router *router;
8165 	int err;
8166 
8167 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
8168 	if (!router)
8169 		return -ENOMEM;
8170 	mlxsw_sp->router = router;
8171 	router->mlxsw_sp = mlxsw_sp;
8172 
8173 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
8174 	err = register_inetaddr_notifier(&router->inetaddr_nb);
8175 	if (err)
8176 		goto err_register_inetaddr_notifier;
8177 
8178 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
8179 	err = register_inet6addr_notifier(&router->inet6addr_nb);
8180 	if (err)
8181 		goto err_register_inet6addr_notifier;
8182 
8183 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
8184 	err = __mlxsw_sp_router_init(mlxsw_sp);
8185 	if (err)
8186 		goto err_router_init;
8187 
8188 	err = mlxsw_sp_rifs_init(mlxsw_sp);
8189 	if (err)
8190 		goto err_rifs_init;
8191 
8192 	err = mlxsw_sp_ipips_init(mlxsw_sp);
8193 	if (err)
8194 		goto err_ipips_init;
8195 
8196 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
8197 			      &mlxsw_sp_nexthop_ht_params);
8198 	if (err)
8199 		goto err_nexthop_ht_init;
8200 
8201 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
8202 			      &mlxsw_sp_nexthop_group_ht_params);
8203 	if (err)
8204 		goto err_nexthop_group_ht_init;
8205 
8206 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
8207 	err = mlxsw_sp_lpm_init(mlxsw_sp);
8208 	if (err)
8209 		goto err_lpm_init;
8210 
8211 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
8212 	if (err)
8213 		goto err_mr_init;
8214 
8215 	err = mlxsw_sp_vrs_init(mlxsw_sp);
8216 	if (err)
8217 		goto err_vrs_init;
8218 
8219 	err = mlxsw_sp_neigh_init(mlxsw_sp);
8220 	if (err)
8221 		goto err_neigh_init;
8222 
8223 	mlxsw_sp->router->netevent_nb.notifier_call =
8224 		mlxsw_sp_router_netevent_event;
8225 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8226 	if (err)
8227 		goto err_register_netevent_notifier;
8228 
8229 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
8230 	if (err)
8231 		goto err_mp_hash_init;
8232 
8233 	err = mlxsw_sp_dscp_init(mlxsw_sp);
8234 	if (err)
8235 		goto err_dscp_init;
8236 
8237 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
8238 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8239 				    &mlxsw_sp->router->fib_nb,
8240 				    mlxsw_sp_router_fib_dump_flush, extack);
8241 	if (err)
8242 		goto err_register_fib_notifier;
8243 
8244 	return 0;
8245 
8246 err_register_fib_notifier:
8247 err_dscp_init:
8248 err_mp_hash_init:
8249 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8250 err_register_netevent_notifier:
8251 	mlxsw_sp_neigh_fini(mlxsw_sp);
8252 err_neigh_init:
8253 	mlxsw_sp_vrs_fini(mlxsw_sp);
8254 err_vrs_init:
8255 	mlxsw_sp_mr_fini(mlxsw_sp);
8256 err_mr_init:
8257 	mlxsw_sp_lpm_fini(mlxsw_sp);
8258 err_lpm_init:
8259 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8260 err_nexthop_group_ht_init:
8261 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8262 err_nexthop_ht_init:
8263 	mlxsw_sp_ipips_fini(mlxsw_sp);
8264 err_ipips_init:
8265 	mlxsw_sp_rifs_fini(mlxsw_sp);
8266 err_rifs_init:
8267 	__mlxsw_sp_router_fini(mlxsw_sp);
8268 err_router_init:
8269 	unregister_inet6addr_notifier(&router->inet6addr_nb);
8270 err_register_inet6addr_notifier:
8271 	unregister_inetaddr_notifier(&router->inetaddr_nb);
8272 err_register_inetaddr_notifier:
8273 	kfree(mlxsw_sp->router);
8274 	return err;
8275 }
8276 
8277 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8278 {
8279 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8280 				&mlxsw_sp->router->fib_nb);
8281 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8282 	mlxsw_sp_neigh_fini(mlxsw_sp);
8283 	mlxsw_sp_vrs_fini(mlxsw_sp);
8284 	mlxsw_sp_mr_fini(mlxsw_sp);
8285 	mlxsw_sp_lpm_fini(mlxsw_sp);
8286 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8287 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8288 	mlxsw_sp_ipips_fini(mlxsw_sp);
8289 	mlxsw_sp_rifs_fini(mlxsw_sp);
8290 	__mlxsw_sp_router_fini(mlxsw_sp);
8291 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
8292 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
8293 	kfree(mlxsw_sp->router);
8294 }
8295