1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
32 #include <net/ipv6.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
35 
36 #include "spectrum.h"
37 #include "core.h"
38 #include "reg.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
46 
47 struct mlxsw_sp_fib;
48 struct mlxsw_sp_vr;
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
51 
52 struct mlxsw_sp_rif {
53 	struct list_head nexthop_list;
54 	struct list_head neigh_list;
55 	struct net_device *dev; /* NULL for underlay RIF */
56 	struct mlxsw_sp_fid *fid;
57 	unsigned char addr[ETH_ALEN];
58 	int mtu;
59 	u16 rif_index;
60 	u16 vr_id;
61 	const struct mlxsw_sp_rif_ops *ops;
62 	struct mlxsw_sp *mlxsw_sp;
63 
64 	unsigned int counter_ingress;
65 	bool counter_ingress_valid;
66 	unsigned int counter_egress;
67 	bool counter_egress_valid;
68 };
69 
70 struct mlxsw_sp_rif_params {
71 	struct net_device *dev;
72 	union {
73 		u16 system_port;
74 		u16 lag_id;
75 	};
76 	u16 vid;
77 	bool lag;
78 };
79 
80 struct mlxsw_sp_rif_subport {
81 	struct mlxsw_sp_rif common;
82 	refcount_t ref_count;
83 	union {
84 		u16 system_port;
85 		u16 lag_id;
86 	};
87 	u16 vid;
88 	bool lag;
89 };
90 
91 struct mlxsw_sp_rif_ipip_lb {
92 	struct mlxsw_sp_rif common;
93 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 	u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 	u16 ul_rif_id; /* Reserved for Spectrum. */
96 };
97 
98 struct mlxsw_sp_rif_params_ipip_lb {
99 	struct mlxsw_sp_rif_params common;
100 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
101 };
102 
103 struct mlxsw_sp_rif_ops {
104 	enum mlxsw_sp_rif_type type;
105 	size_t rif_size;
106 
107 	void (*setup)(struct mlxsw_sp_rif *rif,
108 		      const struct mlxsw_sp_rif_params *params);
109 	int (*configure)(struct mlxsw_sp_rif *rif);
110 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 					 struct netlink_ext_ack *extack);
113 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
114 };
115 
116 static struct mlxsw_sp_rif *
117 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
118 			 const struct net_device *dev);
119 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
120 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
121 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
122 				  struct mlxsw_sp_lpm_tree *lpm_tree);
123 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
124 				     const struct mlxsw_sp_fib *fib,
125 				     u8 tree_id);
126 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
127 				       const struct mlxsw_sp_fib *fib);
128 
129 static unsigned int *
130 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
131 			   enum mlxsw_sp_rif_counter_dir dir)
132 {
133 	switch (dir) {
134 	case MLXSW_SP_RIF_COUNTER_EGRESS:
135 		return &rif->counter_egress;
136 	case MLXSW_SP_RIF_COUNTER_INGRESS:
137 		return &rif->counter_ingress;
138 	}
139 	return NULL;
140 }
141 
142 static bool
143 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
144 			       enum mlxsw_sp_rif_counter_dir dir)
145 {
146 	switch (dir) {
147 	case MLXSW_SP_RIF_COUNTER_EGRESS:
148 		return rif->counter_egress_valid;
149 	case MLXSW_SP_RIF_COUNTER_INGRESS:
150 		return rif->counter_ingress_valid;
151 	}
152 	return false;
153 }
154 
155 static void
156 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
157 			       enum mlxsw_sp_rif_counter_dir dir,
158 			       bool valid)
159 {
160 	switch (dir) {
161 	case MLXSW_SP_RIF_COUNTER_EGRESS:
162 		rif->counter_egress_valid = valid;
163 		break;
164 	case MLXSW_SP_RIF_COUNTER_INGRESS:
165 		rif->counter_ingress_valid = valid;
166 		break;
167 	}
168 }
169 
170 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
171 				     unsigned int counter_index, bool enable,
172 				     enum mlxsw_sp_rif_counter_dir dir)
173 {
174 	char ritr_pl[MLXSW_REG_RITR_LEN];
175 	bool is_egress = false;
176 	int err;
177 
178 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
179 		is_egress = true;
180 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
181 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
182 	if (err)
183 		return err;
184 
185 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
186 				    is_egress);
187 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
188 }
189 
190 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
191 				   struct mlxsw_sp_rif *rif,
192 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
193 {
194 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
195 	unsigned int *p_counter_index;
196 	bool valid;
197 	int err;
198 
199 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
200 	if (!valid)
201 		return -EINVAL;
202 
203 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
204 	if (!p_counter_index)
205 		return -EINVAL;
206 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
207 			     MLXSW_REG_RICNT_OPCODE_NOP);
208 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
209 	if (err)
210 		return err;
211 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
212 	return 0;
213 }
214 
215 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
216 				      unsigned int counter_index)
217 {
218 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
219 
220 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
221 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
222 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
223 }
224 
225 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
226 			       struct mlxsw_sp_rif *rif,
227 			       enum mlxsw_sp_rif_counter_dir dir)
228 {
229 	unsigned int *p_counter_index;
230 	int err;
231 
232 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
233 	if (!p_counter_index)
234 		return -EINVAL;
235 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
236 				     p_counter_index);
237 	if (err)
238 		return err;
239 
240 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
241 	if (err)
242 		goto err_counter_clear;
243 
244 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
245 					*p_counter_index, true, dir);
246 	if (err)
247 		goto err_counter_edit;
248 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
249 	return 0;
250 
251 err_counter_edit:
252 err_counter_clear:
253 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
254 			      *p_counter_index);
255 	return err;
256 }
257 
258 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
259 			       struct mlxsw_sp_rif *rif,
260 			       enum mlxsw_sp_rif_counter_dir dir)
261 {
262 	unsigned int *p_counter_index;
263 
264 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
265 		return;
266 
267 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
268 	if (WARN_ON(!p_counter_index))
269 		return;
270 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
271 				  *p_counter_index, false, dir);
272 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
273 			      *p_counter_index);
274 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
275 }
276 
277 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
278 {
279 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
280 	struct devlink *devlink;
281 
282 	devlink = priv_to_devlink(mlxsw_sp->core);
283 	if (!devlink_dpipe_table_counter_enabled(devlink,
284 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
285 		return;
286 	mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
287 }
288 
289 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
290 {
291 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
292 
293 	mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
294 }
295 
296 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
297 
298 struct mlxsw_sp_prefix_usage {
299 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
300 };
301 
302 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
303 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
304 
305 static bool
306 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
307 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
308 {
309 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
310 }
311 
312 static void
313 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
314 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
315 {
316 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
317 }
318 
319 static void
320 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
321 			  unsigned char prefix_len)
322 {
323 	set_bit(prefix_len, prefix_usage->b);
324 }
325 
326 static void
327 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
328 			    unsigned char prefix_len)
329 {
330 	clear_bit(prefix_len, prefix_usage->b);
331 }
332 
333 struct mlxsw_sp_fib_key {
334 	unsigned char addr[sizeof(struct in6_addr)];
335 	unsigned char prefix_len;
336 };
337 
338 enum mlxsw_sp_fib_entry_type {
339 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
340 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
341 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
342 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
343 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
344 
345 	/* This is a special case of local delivery, where a packet should be
346 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
347 	 * because that's a type of next hop, not of FIB entry. (There can be
348 	 * several next hops in a REMOTE entry, and some of them may be
349 	 * encapsulating entries.)
350 	 */
351 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
352 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
353 };
354 
355 struct mlxsw_sp_nexthop_group;
356 struct mlxsw_sp_fib_entry;
357 
358 struct mlxsw_sp_fib_node {
359 	struct mlxsw_sp_fib_entry *fib_entry;
360 	struct list_head list;
361 	struct rhash_head ht_node;
362 	struct mlxsw_sp_fib *fib;
363 	struct mlxsw_sp_fib_key key;
364 };
365 
366 struct mlxsw_sp_fib_entry_decap {
367 	struct mlxsw_sp_ipip_entry *ipip_entry;
368 	u32 tunnel_index;
369 };
370 
371 struct mlxsw_sp_fib_entry {
372 	struct mlxsw_sp_fib_node *fib_node;
373 	enum mlxsw_sp_fib_entry_type type;
374 	struct list_head nexthop_group_node;
375 	struct mlxsw_sp_nexthop_group *nh_group;
376 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
377 };
378 
379 struct mlxsw_sp_fib4_entry {
380 	struct mlxsw_sp_fib_entry common;
381 	u32 tb_id;
382 	u32 prio;
383 	u8 tos;
384 	u8 type;
385 };
386 
387 struct mlxsw_sp_fib6_entry {
388 	struct mlxsw_sp_fib_entry common;
389 	struct list_head rt6_list;
390 	unsigned int nrt6;
391 };
392 
393 struct mlxsw_sp_rt6 {
394 	struct list_head list;
395 	struct fib6_info *rt;
396 };
397 
398 struct mlxsw_sp_lpm_tree {
399 	u8 id; /* tree ID */
400 	unsigned int ref_count;
401 	enum mlxsw_sp_l3proto proto;
402 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
403 	struct mlxsw_sp_prefix_usage prefix_usage;
404 };
405 
406 struct mlxsw_sp_fib {
407 	struct rhashtable ht;
408 	struct list_head node_list;
409 	struct mlxsw_sp_vr *vr;
410 	struct mlxsw_sp_lpm_tree *lpm_tree;
411 	enum mlxsw_sp_l3proto proto;
412 };
413 
414 struct mlxsw_sp_vr {
415 	u16 id; /* virtual router ID */
416 	u32 tb_id; /* kernel fib table id */
417 	unsigned int rif_count;
418 	struct mlxsw_sp_fib *fib4;
419 	struct mlxsw_sp_fib *fib6;
420 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
421 	struct mlxsw_sp_rif *ul_rif;
422 	refcount_t ul_rif_refcnt;
423 };
424 
425 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
426 
427 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
428 						struct mlxsw_sp_vr *vr,
429 						enum mlxsw_sp_l3proto proto)
430 {
431 	struct mlxsw_sp_lpm_tree *lpm_tree;
432 	struct mlxsw_sp_fib *fib;
433 	int err;
434 
435 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
436 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
437 	if (!fib)
438 		return ERR_PTR(-ENOMEM);
439 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
440 	if (err)
441 		goto err_rhashtable_init;
442 	INIT_LIST_HEAD(&fib->node_list);
443 	fib->proto = proto;
444 	fib->vr = vr;
445 	fib->lpm_tree = lpm_tree;
446 	mlxsw_sp_lpm_tree_hold(lpm_tree);
447 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
448 	if (err)
449 		goto err_lpm_tree_bind;
450 	return fib;
451 
452 err_lpm_tree_bind:
453 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
454 err_rhashtable_init:
455 	kfree(fib);
456 	return ERR_PTR(err);
457 }
458 
459 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
460 				 struct mlxsw_sp_fib *fib)
461 {
462 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
463 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
464 	WARN_ON(!list_empty(&fib->node_list));
465 	rhashtable_destroy(&fib->ht);
466 	kfree(fib);
467 }
468 
469 static struct mlxsw_sp_lpm_tree *
470 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
471 {
472 	static struct mlxsw_sp_lpm_tree *lpm_tree;
473 	int i;
474 
475 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
476 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
477 		if (lpm_tree->ref_count == 0)
478 			return lpm_tree;
479 	}
480 	return NULL;
481 }
482 
483 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
484 				   struct mlxsw_sp_lpm_tree *lpm_tree)
485 {
486 	char ralta_pl[MLXSW_REG_RALTA_LEN];
487 
488 	mlxsw_reg_ralta_pack(ralta_pl, true,
489 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
490 			     lpm_tree->id);
491 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
492 }
493 
494 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
495 				   struct mlxsw_sp_lpm_tree *lpm_tree)
496 {
497 	char ralta_pl[MLXSW_REG_RALTA_LEN];
498 
499 	mlxsw_reg_ralta_pack(ralta_pl, false,
500 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
501 			     lpm_tree->id);
502 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
503 }
504 
505 static int
506 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
507 				  struct mlxsw_sp_prefix_usage *prefix_usage,
508 				  struct mlxsw_sp_lpm_tree *lpm_tree)
509 {
510 	char ralst_pl[MLXSW_REG_RALST_LEN];
511 	u8 root_bin = 0;
512 	u8 prefix;
513 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
514 
515 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
516 		root_bin = prefix;
517 
518 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
519 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
520 		if (prefix == 0)
521 			continue;
522 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
523 					 MLXSW_REG_RALST_BIN_NO_CHILD);
524 		last_prefix = prefix;
525 	}
526 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
527 }
528 
529 static struct mlxsw_sp_lpm_tree *
530 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
531 			 struct mlxsw_sp_prefix_usage *prefix_usage,
532 			 enum mlxsw_sp_l3proto proto)
533 {
534 	struct mlxsw_sp_lpm_tree *lpm_tree;
535 	int err;
536 
537 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
538 	if (!lpm_tree)
539 		return ERR_PTR(-EBUSY);
540 	lpm_tree->proto = proto;
541 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
542 	if (err)
543 		return ERR_PTR(err);
544 
545 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
546 						lpm_tree);
547 	if (err)
548 		goto err_left_struct_set;
549 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
550 	       sizeof(lpm_tree->prefix_usage));
551 	memset(&lpm_tree->prefix_ref_count, 0,
552 	       sizeof(lpm_tree->prefix_ref_count));
553 	lpm_tree->ref_count = 1;
554 	return lpm_tree;
555 
556 err_left_struct_set:
557 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
558 	return ERR_PTR(err);
559 }
560 
561 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
562 				      struct mlxsw_sp_lpm_tree *lpm_tree)
563 {
564 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
565 }
566 
567 static struct mlxsw_sp_lpm_tree *
568 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
569 		      struct mlxsw_sp_prefix_usage *prefix_usage,
570 		      enum mlxsw_sp_l3proto proto)
571 {
572 	struct mlxsw_sp_lpm_tree *lpm_tree;
573 	int i;
574 
575 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
576 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
577 		if (lpm_tree->ref_count != 0 &&
578 		    lpm_tree->proto == proto &&
579 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
580 					     prefix_usage)) {
581 			mlxsw_sp_lpm_tree_hold(lpm_tree);
582 			return lpm_tree;
583 		}
584 	}
585 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
586 }
587 
588 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
589 {
590 	lpm_tree->ref_count++;
591 }
592 
593 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
594 				  struct mlxsw_sp_lpm_tree *lpm_tree)
595 {
596 	if (--lpm_tree->ref_count == 0)
597 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
598 }
599 
600 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
601 
602 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
603 {
604 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
605 	struct mlxsw_sp_lpm_tree *lpm_tree;
606 	u64 max_trees;
607 	int err, i;
608 
609 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
610 		return -EIO;
611 
612 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
613 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
614 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
615 					     sizeof(struct mlxsw_sp_lpm_tree),
616 					     GFP_KERNEL);
617 	if (!mlxsw_sp->router->lpm.trees)
618 		return -ENOMEM;
619 
620 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
621 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
622 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
623 	}
624 
625 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
626 					 MLXSW_SP_L3_PROTO_IPV4);
627 	if (IS_ERR(lpm_tree)) {
628 		err = PTR_ERR(lpm_tree);
629 		goto err_ipv4_tree_get;
630 	}
631 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
632 
633 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
634 					 MLXSW_SP_L3_PROTO_IPV6);
635 	if (IS_ERR(lpm_tree)) {
636 		err = PTR_ERR(lpm_tree);
637 		goto err_ipv6_tree_get;
638 	}
639 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
640 
641 	return 0;
642 
643 err_ipv6_tree_get:
644 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
645 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
646 err_ipv4_tree_get:
647 	kfree(mlxsw_sp->router->lpm.trees);
648 	return err;
649 }
650 
651 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
652 {
653 	struct mlxsw_sp_lpm_tree *lpm_tree;
654 
655 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
656 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
657 
658 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
659 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
660 
661 	kfree(mlxsw_sp->router->lpm.trees);
662 }
663 
664 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
665 {
666 	return !!vr->fib4 || !!vr->fib6 ||
667 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
668 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
669 }
670 
671 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
672 {
673 	struct mlxsw_sp_vr *vr;
674 	int i;
675 
676 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
677 		vr = &mlxsw_sp->router->vrs[i];
678 		if (!mlxsw_sp_vr_is_used(vr))
679 			return vr;
680 	}
681 	return NULL;
682 }
683 
684 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
685 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
686 {
687 	char raltb_pl[MLXSW_REG_RALTB_LEN];
688 
689 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
690 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
691 			     tree_id);
692 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
693 }
694 
695 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
696 				       const struct mlxsw_sp_fib *fib)
697 {
698 	char raltb_pl[MLXSW_REG_RALTB_LEN];
699 
700 	/* Bind to tree 0 which is default */
701 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
702 			     (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
703 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
704 }
705 
706 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
707 {
708 	/* For our purpose, squash main, default and local tables into one */
709 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
710 		tb_id = RT_TABLE_MAIN;
711 	return tb_id;
712 }
713 
714 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
715 					    u32 tb_id)
716 {
717 	struct mlxsw_sp_vr *vr;
718 	int i;
719 
720 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
721 
722 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
723 		vr = &mlxsw_sp->router->vrs[i];
724 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
725 			return vr;
726 	}
727 	return NULL;
728 }
729 
730 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
731 				u16 *vr_id)
732 {
733 	struct mlxsw_sp_vr *vr;
734 	int err = 0;
735 
736 	mutex_lock(&mlxsw_sp->router->lock);
737 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
738 	if (!vr) {
739 		err = -ESRCH;
740 		goto out;
741 	}
742 	*vr_id = vr->id;
743 out:
744 	mutex_unlock(&mlxsw_sp->router->lock);
745 	return err;
746 }
747 
748 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
749 					    enum mlxsw_sp_l3proto proto)
750 {
751 	switch (proto) {
752 	case MLXSW_SP_L3_PROTO_IPV4:
753 		return vr->fib4;
754 	case MLXSW_SP_L3_PROTO_IPV6:
755 		return vr->fib6;
756 	}
757 	return NULL;
758 }
759 
760 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
761 					      u32 tb_id,
762 					      struct netlink_ext_ack *extack)
763 {
764 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
765 	struct mlxsw_sp_fib *fib4;
766 	struct mlxsw_sp_fib *fib6;
767 	struct mlxsw_sp_vr *vr;
768 	int err;
769 
770 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
771 	if (!vr) {
772 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
773 		return ERR_PTR(-EBUSY);
774 	}
775 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
776 	if (IS_ERR(fib4))
777 		return ERR_CAST(fib4);
778 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
779 	if (IS_ERR(fib6)) {
780 		err = PTR_ERR(fib6);
781 		goto err_fib6_create;
782 	}
783 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
784 					     MLXSW_SP_L3_PROTO_IPV4);
785 	if (IS_ERR(mr4_table)) {
786 		err = PTR_ERR(mr4_table);
787 		goto err_mr4_table_create;
788 	}
789 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
790 					     MLXSW_SP_L3_PROTO_IPV6);
791 	if (IS_ERR(mr6_table)) {
792 		err = PTR_ERR(mr6_table);
793 		goto err_mr6_table_create;
794 	}
795 
796 	vr->fib4 = fib4;
797 	vr->fib6 = fib6;
798 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
799 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
800 	vr->tb_id = tb_id;
801 	return vr;
802 
803 err_mr6_table_create:
804 	mlxsw_sp_mr_table_destroy(mr4_table);
805 err_mr4_table_create:
806 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
807 err_fib6_create:
808 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
809 	return ERR_PTR(err);
810 }
811 
812 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
813 				struct mlxsw_sp_vr *vr)
814 {
815 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
816 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
817 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
818 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
819 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
820 	vr->fib6 = NULL;
821 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
822 	vr->fib4 = NULL;
823 }
824 
825 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
826 					   struct netlink_ext_ack *extack)
827 {
828 	struct mlxsw_sp_vr *vr;
829 
830 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
831 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
832 	if (!vr)
833 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
834 	return vr;
835 }
836 
837 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
838 {
839 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
840 	    list_empty(&vr->fib6->node_list) &&
841 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
842 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
843 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
844 }
845 
846 static bool
847 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
848 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
849 {
850 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
851 
852 	if (!mlxsw_sp_vr_is_used(vr))
853 		return false;
854 	if (fib->lpm_tree->id == tree_id)
855 		return true;
856 	return false;
857 }
858 
859 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
860 					struct mlxsw_sp_fib *fib,
861 					struct mlxsw_sp_lpm_tree *new_tree)
862 {
863 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
864 	int err;
865 
866 	fib->lpm_tree = new_tree;
867 	mlxsw_sp_lpm_tree_hold(new_tree);
868 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
869 	if (err)
870 		goto err_tree_bind;
871 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
872 	return 0;
873 
874 err_tree_bind:
875 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
876 	fib->lpm_tree = old_tree;
877 	return err;
878 }
879 
880 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
881 					 struct mlxsw_sp_fib *fib,
882 					 struct mlxsw_sp_lpm_tree *new_tree)
883 {
884 	enum mlxsw_sp_l3proto proto = fib->proto;
885 	struct mlxsw_sp_lpm_tree *old_tree;
886 	u8 old_id, new_id = new_tree->id;
887 	struct mlxsw_sp_vr *vr;
888 	int i, err;
889 
890 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
891 	old_id = old_tree->id;
892 
893 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
894 		vr = &mlxsw_sp->router->vrs[i];
895 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
896 			continue;
897 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
898 						   mlxsw_sp_vr_fib(vr, proto),
899 						   new_tree);
900 		if (err)
901 			goto err_tree_replace;
902 	}
903 
904 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
905 	       sizeof(new_tree->prefix_ref_count));
906 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
907 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
908 
909 	return 0;
910 
911 err_tree_replace:
912 	for (i--; i >= 0; i--) {
913 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
914 			continue;
915 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
916 					     mlxsw_sp_vr_fib(vr, proto),
917 					     old_tree);
918 	}
919 	return err;
920 }
921 
922 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
923 {
924 	struct mlxsw_sp_vr *vr;
925 	u64 max_vrs;
926 	int i;
927 
928 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
929 		return -EIO;
930 
931 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
932 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
933 					GFP_KERNEL);
934 	if (!mlxsw_sp->router->vrs)
935 		return -ENOMEM;
936 
937 	for (i = 0; i < max_vrs; i++) {
938 		vr = &mlxsw_sp->router->vrs[i];
939 		vr->id = i;
940 	}
941 
942 	return 0;
943 }
944 
945 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
946 
947 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
948 {
949 	/* At this stage we're guaranteed not to have new incoming
950 	 * FIB notifications and the work queue is free from FIBs
951 	 * sitting on top of mlxsw netdevs. However, we can still
952 	 * have other FIBs queued. Flush the queue before flushing
953 	 * the device's tables. No need for locks, as we're the only
954 	 * writer.
955 	 */
956 	mlxsw_core_flush_owq();
957 	mlxsw_sp_router_fib_flush(mlxsw_sp);
958 	kfree(mlxsw_sp->router->vrs);
959 }
960 
961 static struct net_device *
962 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
963 {
964 	struct ip_tunnel *tun = netdev_priv(ol_dev);
965 	struct net *net = dev_net(ol_dev);
966 
967 	return dev_get_by_index_rcu(net, tun->parms.link);
968 }
969 
970 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
971 {
972 	struct net_device *d;
973 	u32 tb_id;
974 
975 	rcu_read_lock();
976 	d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
977 	if (d)
978 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
979 	else
980 		tb_id = RT_TABLE_MAIN;
981 	rcu_read_unlock();
982 
983 	return tb_id;
984 }
985 
986 static struct mlxsw_sp_rif *
987 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
988 		    const struct mlxsw_sp_rif_params *params,
989 		    struct netlink_ext_ack *extack);
990 
991 static struct mlxsw_sp_rif_ipip_lb *
992 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
993 				enum mlxsw_sp_ipip_type ipipt,
994 				struct net_device *ol_dev,
995 				struct netlink_ext_ack *extack)
996 {
997 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
998 	const struct mlxsw_sp_ipip_ops *ipip_ops;
999 	struct mlxsw_sp_rif *rif;
1000 
1001 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1002 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1003 		.common.dev = ol_dev,
1004 		.common.lag = false,
1005 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1006 	};
1007 
1008 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1009 	if (IS_ERR(rif))
1010 		return ERR_CAST(rif);
1011 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1012 }
1013 
1014 static struct mlxsw_sp_ipip_entry *
1015 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1016 			  enum mlxsw_sp_ipip_type ipipt,
1017 			  struct net_device *ol_dev)
1018 {
1019 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1020 	struct mlxsw_sp_ipip_entry *ipip_entry;
1021 	struct mlxsw_sp_ipip_entry *ret = NULL;
1022 
1023 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1024 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1025 	if (!ipip_entry)
1026 		return ERR_PTR(-ENOMEM);
1027 
1028 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1029 							    ol_dev, NULL);
1030 	if (IS_ERR(ipip_entry->ol_lb)) {
1031 		ret = ERR_CAST(ipip_entry->ol_lb);
1032 		goto err_ol_ipip_lb_create;
1033 	}
1034 
1035 	ipip_entry->ipipt = ipipt;
1036 	ipip_entry->ol_dev = ol_dev;
1037 
1038 	switch (ipip_ops->ul_proto) {
1039 	case MLXSW_SP_L3_PROTO_IPV4:
1040 		ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1041 		break;
1042 	case MLXSW_SP_L3_PROTO_IPV6:
1043 		WARN_ON(1);
1044 		break;
1045 	}
1046 
1047 	return ipip_entry;
1048 
1049 err_ol_ipip_lb_create:
1050 	kfree(ipip_entry);
1051 	return ret;
1052 }
1053 
1054 static void
1055 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1056 {
1057 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1058 	kfree(ipip_entry);
1059 }
1060 
1061 static bool
1062 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1063 				  const enum mlxsw_sp_l3proto ul_proto,
1064 				  union mlxsw_sp_l3addr saddr,
1065 				  u32 ul_tb_id,
1066 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1067 {
1068 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1069 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1070 	union mlxsw_sp_l3addr tun_saddr;
1071 
1072 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1073 		return false;
1074 
1075 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1076 	return tun_ul_tb_id == ul_tb_id &&
1077 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1078 }
1079 
1080 static int
1081 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1082 			      struct mlxsw_sp_fib_entry *fib_entry,
1083 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1084 {
1085 	u32 tunnel_index;
1086 	int err;
1087 
1088 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1089 				  1, &tunnel_index);
1090 	if (err)
1091 		return err;
1092 
1093 	ipip_entry->decap_fib_entry = fib_entry;
1094 	fib_entry->decap.ipip_entry = ipip_entry;
1095 	fib_entry->decap.tunnel_index = tunnel_index;
1096 	return 0;
1097 }
1098 
1099 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1100 					  struct mlxsw_sp_fib_entry *fib_entry)
1101 {
1102 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1103 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1104 	fib_entry->decap.ipip_entry = NULL;
1105 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1106 			   1, fib_entry->decap.tunnel_index);
1107 }
1108 
1109 static struct mlxsw_sp_fib_node *
1110 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1111 			 size_t addr_len, unsigned char prefix_len);
1112 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1113 				     struct mlxsw_sp_fib_entry *fib_entry);
1114 
1115 static void
1116 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1117 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1118 {
1119 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1120 
1121 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1122 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1123 
1124 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1125 }
1126 
1127 static void
1128 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1129 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1130 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1131 {
1132 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1133 					  ipip_entry))
1134 		return;
1135 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1136 
1137 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1138 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1139 }
1140 
1141 static struct mlxsw_sp_fib_entry *
1142 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1143 				     enum mlxsw_sp_l3proto proto,
1144 				     const union mlxsw_sp_l3addr *addr,
1145 				     enum mlxsw_sp_fib_entry_type type)
1146 {
1147 	struct mlxsw_sp_fib_node *fib_node;
1148 	unsigned char addr_prefix_len;
1149 	struct mlxsw_sp_fib *fib;
1150 	struct mlxsw_sp_vr *vr;
1151 	const void *addrp;
1152 	size_t addr_len;
1153 	u32 addr4;
1154 
1155 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1156 	if (!vr)
1157 		return NULL;
1158 	fib = mlxsw_sp_vr_fib(vr, proto);
1159 
1160 	switch (proto) {
1161 	case MLXSW_SP_L3_PROTO_IPV4:
1162 		addr4 = be32_to_cpu(addr->addr4);
1163 		addrp = &addr4;
1164 		addr_len = 4;
1165 		addr_prefix_len = 32;
1166 		break;
1167 	case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1168 	default:
1169 		WARN_ON(1);
1170 		return NULL;
1171 	}
1172 
1173 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1174 					    addr_prefix_len);
1175 	if (!fib_node || fib_node->fib_entry->type != type)
1176 		return NULL;
1177 
1178 	return fib_node->fib_entry;
1179 }
1180 
1181 /* Given an IPIP entry, find the corresponding decap route. */
1182 static struct mlxsw_sp_fib_entry *
1183 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1184 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1185 {
1186 	static struct mlxsw_sp_fib_node *fib_node;
1187 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1188 	unsigned char saddr_prefix_len;
1189 	union mlxsw_sp_l3addr saddr;
1190 	struct mlxsw_sp_fib *ul_fib;
1191 	struct mlxsw_sp_vr *ul_vr;
1192 	const void *saddrp;
1193 	size_t saddr_len;
1194 	u32 ul_tb_id;
1195 	u32 saddr4;
1196 
1197 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1198 
1199 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1200 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1201 	if (!ul_vr)
1202 		return NULL;
1203 
1204 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1205 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1206 					   ipip_entry->ol_dev);
1207 
1208 	switch (ipip_ops->ul_proto) {
1209 	case MLXSW_SP_L3_PROTO_IPV4:
1210 		saddr4 = be32_to_cpu(saddr.addr4);
1211 		saddrp = &saddr4;
1212 		saddr_len = 4;
1213 		saddr_prefix_len = 32;
1214 		break;
1215 	case MLXSW_SP_L3_PROTO_IPV6:
1216 		WARN_ON(1);
1217 		return NULL;
1218 	}
1219 
1220 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1221 					    saddr_prefix_len);
1222 	if (!fib_node ||
1223 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1224 		return NULL;
1225 
1226 	return fib_node->fib_entry;
1227 }
1228 
1229 static struct mlxsw_sp_ipip_entry *
1230 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1231 			   enum mlxsw_sp_ipip_type ipipt,
1232 			   struct net_device *ol_dev)
1233 {
1234 	struct mlxsw_sp_ipip_entry *ipip_entry;
1235 
1236 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1237 	if (IS_ERR(ipip_entry))
1238 		return ipip_entry;
1239 
1240 	list_add_tail(&ipip_entry->ipip_list_node,
1241 		      &mlxsw_sp->router->ipip_list);
1242 
1243 	return ipip_entry;
1244 }
1245 
1246 static void
1247 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1248 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1249 {
1250 	list_del(&ipip_entry->ipip_list_node);
1251 	mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1252 }
1253 
1254 static bool
1255 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1256 				  const struct net_device *ul_dev,
1257 				  enum mlxsw_sp_l3proto ul_proto,
1258 				  union mlxsw_sp_l3addr ul_dip,
1259 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1260 {
1261 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1262 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1263 
1264 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1265 		return false;
1266 
1267 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1268 						 ul_tb_id, ipip_entry);
1269 }
1270 
1271 /* Given decap parameters, find the corresponding IPIP entry. */
1272 static struct mlxsw_sp_ipip_entry *
1273 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1274 				  const struct net_device *ul_dev,
1275 				  enum mlxsw_sp_l3proto ul_proto,
1276 				  union mlxsw_sp_l3addr ul_dip)
1277 {
1278 	struct mlxsw_sp_ipip_entry *ipip_entry;
1279 
1280 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1281 			    ipip_list_node)
1282 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1283 						      ul_proto, ul_dip,
1284 						      ipip_entry))
1285 			return ipip_entry;
1286 
1287 	return NULL;
1288 }
1289 
1290 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1291 				      const struct net_device *dev,
1292 				      enum mlxsw_sp_ipip_type *p_type)
1293 {
1294 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1295 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1296 	enum mlxsw_sp_ipip_type ipipt;
1297 
1298 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1299 		ipip_ops = router->ipip_ops_arr[ipipt];
1300 		if (dev->type == ipip_ops->dev_type) {
1301 			if (p_type)
1302 				*p_type = ipipt;
1303 			return true;
1304 		}
1305 	}
1306 	return false;
1307 }
1308 
1309 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1310 				const struct net_device *dev)
1311 {
1312 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1313 }
1314 
1315 static struct mlxsw_sp_ipip_entry *
1316 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1317 				   const struct net_device *ol_dev)
1318 {
1319 	struct mlxsw_sp_ipip_entry *ipip_entry;
1320 
1321 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1322 			    ipip_list_node)
1323 		if (ipip_entry->ol_dev == ol_dev)
1324 			return ipip_entry;
1325 
1326 	return NULL;
1327 }
1328 
1329 static struct mlxsw_sp_ipip_entry *
1330 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1331 				   const struct net_device *ul_dev,
1332 				   struct mlxsw_sp_ipip_entry *start)
1333 {
1334 	struct mlxsw_sp_ipip_entry *ipip_entry;
1335 
1336 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1337 					ipip_list_node);
1338 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1339 				     ipip_list_node) {
1340 		struct net_device *ol_dev = ipip_entry->ol_dev;
1341 		struct net_device *ipip_ul_dev;
1342 
1343 		rcu_read_lock();
1344 		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1345 		rcu_read_unlock();
1346 
1347 		if (ipip_ul_dev == ul_dev)
1348 			return ipip_entry;
1349 	}
1350 
1351 	return NULL;
1352 }
1353 
1354 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1355 				const struct net_device *dev)
1356 {
1357 	bool is_ipip_ul;
1358 
1359 	mutex_lock(&mlxsw_sp->router->lock);
1360 	is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1361 	mutex_unlock(&mlxsw_sp->router->lock);
1362 
1363 	return is_ipip_ul;
1364 }
1365 
1366 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1367 						const struct net_device *ol_dev,
1368 						enum mlxsw_sp_ipip_type ipipt)
1369 {
1370 	const struct mlxsw_sp_ipip_ops *ops
1371 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1372 
1373 	/* For deciding whether decap should be offloaded, we don't care about
1374 	 * overlay protocol, so ask whether either one is supported.
1375 	 */
1376 	return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1377 	       ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1378 }
1379 
1380 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1381 						struct net_device *ol_dev)
1382 {
1383 	struct mlxsw_sp_ipip_entry *ipip_entry;
1384 	enum mlxsw_sp_l3proto ul_proto;
1385 	enum mlxsw_sp_ipip_type ipipt;
1386 	union mlxsw_sp_l3addr saddr;
1387 	u32 ul_tb_id;
1388 
1389 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1390 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1391 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1392 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1393 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1394 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1395 							  saddr, ul_tb_id,
1396 							  NULL)) {
1397 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1398 								ol_dev);
1399 			if (IS_ERR(ipip_entry))
1400 				return PTR_ERR(ipip_entry);
1401 		}
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1408 						   struct net_device *ol_dev)
1409 {
1410 	struct mlxsw_sp_ipip_entry *ipip_entry;
1411 
1412 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1413 	if (ipip_entry)
1414 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1415 }
1416 
1417 static void
1418 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1419 				struct mlxsw_sp_ipip_entry *ipip_entry)
1420 {
1421 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1422 
1423 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1424 	if (decap_fib_entry)
1425 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1426 						  decap_fib_entry);
1427 }
1428 
1429 static int
1430 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1431 			u16 ul_rif_id, bool enable)
1432 {
1433 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1434 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1435 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1436 	char ritr_pl[MLXSW_REG_RITR_LEN];
1437 	u32 saddr4;
1438 
1439 	switch (lb_cf.ul_protocol) {
1440 	case MLXSW_SP_L3_PROTO_IPV4:
1441 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1442 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1443 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1444 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1445 			    MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1446 			    ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1447 		break;
1448 
1449 	case MLXSW_SP_L3_PROTO_IPV6:
1450 		return -EAFNOSUPPORT;
1451 	}
1452 
1453 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1454 }
1455 
1456 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1457 						 struct net_device *ol_dev)
1458 {
1459 	struct mlxsw_sp_ipip_entry *ipip_entry;
1460 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1461 	int err = 0;
1462 
1463 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1464 	if (ipip_entry) {
1465 		lb_rif = ipip_entry->ol_lb;
1466 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1467 					      lb_rif->ul_rif_id, true);
1468 		if (err)
1469 			goto out;
1470 		lb_rif->common.mtu = ol_dev->mtu;
1471 	}
1472 
1473 out:
1474 	return err;
1475 }
1476 
1477 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1478 						struct net_device *ol_dev)
1479 {
1480 	struct mlxsw_sp_ipip_entry *ipip_entry;
1481 
1482 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1483 	if (ipip_entry)
1484 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1485 }
1486 
1487 static void
1488 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1489 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1490 {
1491 	if (ipip_entry->decap_fib_entry)
1492 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1493 }
1494 
1495 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1496 						  struct net_device *ol_dev)
1497 {
1498 	struct mlxsw_sp_ipip_entry *ipip_entry;
1499 
1500 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1501 	if (ipip_entry)
1502 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1503 }
1504 
1505 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1506 					 struct mlxsw_sp_rif *old_rif,
1507 					 struct mlxsw_sp_rif *new_rif);
1508 static int
1509 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1510 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1511 				 bool keep_encap,
1512 				 struct netlink_ext_ack *extack)
1513 {
1514 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1515 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1516 
1517 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1518 						     ipip_entry->ipipt,
1519 						     ipip_entry->ol_dev,
1520 						     extack);
1521 	if (IS_ERR(new_lb_rif))
1522 		return PTR_ERR(new_lb_rif);
1523 	ipip_entry->ol_lb = new_lb_rif;
1524 
1525 	if (keep_encap)
1526 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1527 					     &new_lb_rif->common);
1528 
1529 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1530 
1531 	return 0;
1532 }
1533 
1534 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1535 					struct mlxsw_sp_rif *rif);
1536 
1537 /**
1538  * Update the offload related to an IPIP entry. This always updates decap, and
1539  * in addition to that it also:
1540  * @recreate_loopback: recreates the associated loopback RIF
1541  * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1542  *              relevant when recreate_loopback is true.
1543  * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1544  *                   is only relevant when recreate_loopback is false.
1545  */
1546 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1547 					struct mlxsw_sp_ipip_entry *ipip_entry,
1548 					bool recreate_loopback,
1549 					bool keep_encap,
1550 					bool update_nexthops,
1551 					struct netlink_ext_ack *extack)
1552 {
1553 	int err;
1554 
1555 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1556 	 * recreate it. That creates a window of opportunity where RALUE and
1557 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1558 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1559 	 * of RALUE, demote the decap route back.
1560 	 */
1561 	if (ipip_entry->decap_fib_entry)
1562 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1563 
1564 	if (recreate_loopback) {
1565 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1566 						       keep_encap, extack);
1567 		if (err)
1568 			return err;
1569 	} else if (update_nexthops) {
1570 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1571 					    &ipip_entry->ol_lb->common);
1572 	}
1573 
1574 	if (ipip_entry->ol_dev->flags & IFF_UP)
1575 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1576 
1577 	return 0;
1578 }
1579 
1580 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1581 						struct net_device *ol_dev,
1582 						struct netlink_ext_ack *extack)
1583 {
1584 	struct mlxsw_sp_ipip_entry *ipip_entry =
1585 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1586 
1587 	if (!ipip_entry)
1588 		return 0;
1589 
1590 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1591 						   true, false, false, extack);
1592 }
1593 
1594 static int
1595 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1596 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1597 				     struct net_device *ul_dev,
1598 				     bool *demote_this,
1599 				     struct netlink_ext_ack *extack)
1600 {
1601 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1602 	enum mlxsw_sp_l3proto ul_proto;
1603 	union mlxsw_sp_l3addr saddr;
1604 
1605 	/* Moving underlay to a different VRF might cause local address
1606 	 * conflict, and the conflicting tunnels need to be demoted.
1607 	 */
1608 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1609 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1610 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1611 						 saddr, ul_tb_id,
1612 						 ipip_entry)) {
1613 		*demote_this = true;
1614 		return 0;
1615 	}
1616 
1617 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1618 						   true, true, false, extack);
1619 }
1620 
1621 static int
1622 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1623 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1624 				    struct net_device *ul_dev)
1625 {
1626 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1627 						   false, false, true, NULL);
1628 }
1629 
1630 static int
1631 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1632 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1633 				      struct net_device *ul_dev)
1634 {
1635 	/* A down underlay device causes encapsulated packets to not be
1636 	 * forwarded, but decap still works. So refresh next hops without
1637 	 * touching anything else.
1638 	 */
1639 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1640 						   false, false, true, NULL);
1641 }
1642 
1643 static int
1644 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1645 					struct net_device *ol_dev,
1646 					struct netlink_ext_ack *extack)
1647 {
1648 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1649 	struct mlxsw_sp_ipip_entry *ipip_entry;
1650 	int err;
1651 
1652 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1653 	if (!ipip_entry)
1654 		/* A change might make a tunnel eligible for offloading, but
1655 		 * that is currently not implemented. What falls to slow path
1656 		 * stays there.
1657 		 */
1658 		return 0;
1659 
1660 	/* A change might make a tunnel not eligible for offloading. */
1661 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1662 						 ipip_entry->ipipt)) {
1663 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1664 		return 0;
1665 	}
1666 
1667 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1668 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1669 	return err;
1670 }
1671 
1672 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1673 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1674 {
1675 	struct net_device *ol_dev = ipip_entry->ol_dev;
1676 
1677 	if (ol_dev->flags & IFF_UP)
1678 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1679 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1680 }
1681 
1682 /* The configuration where several tunnels have the same local address in the
1683  * same underlay table needs special treatment in the HW. That is currently not
1684  * implemented in the driver. This function finds and demotes the first tunnel
1685  * with a given source address, except the one passed in in the argument
1686  * `except'.
1687  */
1688 bool
1689 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1690 				     enum mlxsw_sp_l3proto ul_proto,
1691 				     union mlxsw_sp_l3addr saddr,
1692 				     u32 ul_tb_id,
1693 				     const struct mlxsw_sp_ipip_entry *except)
1694 {
1695 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1696 
1697 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1698 				 ipip_list_node) {
1699 		if (ipip_entry != except &&
1700 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1701 						      ul_tb_id, ipip_entry)) {
1702 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1703 			return true;
1704 		}
1705 	}
1706 
1707 	return false;
1708 }
1709 
1710 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1711 						     struct net_device *ul_dev)
1712 {
1713 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1714 
1715 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1716 				 ipip_list_node) {
1717 		struct net_device *ol_dev = ipip_entry->ol_dev;
1718 		struct net_device *ipip_ul_dev;
1719 
1720 		rcu_read_lock();
1721 		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1722 		rcu_read_unlock();
1723 		if (ipip_ul_dev == ul_dev)
1724 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1725 	}
1726 }
1727 
1728 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1729 				     struct net_device *ol_dev,
1730 				     unsigned long event,
1731 				     struct netdev_notifier_info *info)
1732 {
1733 	struct netdev_notifier_changeupper_info *chup;
1734 	struct netlink_ext_ack *extack;
1735 	int err = 0;
1736 
1737 	mutex_lock(&mlxsw_sp->router->lock);
1738 	switch (event) {
1739 	case NETDEV_REGISTER:
1740 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1741 		break;
1742 	case NETDEV_UNREGISTER:
1743 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1744 		break;
1745 	case NETDEV_UP:
1746 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1747 		break;
1748 	case NETDEV_DOWN:
1749 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1750 		break;
1751 	case NETDEV_CHANGEUPPER:
1752 		chup = container_of(info, typeof(*chup), info);
1753 		extack = info->extack;
1754 		if (netif_is_l3_master(chup->upper_dev))
1755 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1756 								   ol_dev,
1757 								   extack);
1758 		break;
1759 	case NETDEV_CHANGE:
1760 		extack = info->extack;
1761 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1762 							      ol_dev, extack);
1763 		break;
1764 	case NETDEV_CHANGEMTU:
1765 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1766 		break;
1767 	}
1768 	mutex_unlock(&mlxsw_sp->router->lock);
1769 	return err;
1770 }
1771 
1772 static int
1773 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1774 				   struct mlxsw_sp_ipip_entry *ipip_entry,
1775 				   struct net_device *ul_dev,
1776 				   bool *demote_this,
1777 				   unsigned long event,
1778 				   struct netdev_notifier_info *info)
1779 {
1780 	struct netdev_notifier_changeupper_info *chup;
1781 	struct netlink_ext_ack *extack;
1782 
1783 	switch (event) {
1784 	case NETDEV_CHANGEUPPER:
1785 		chup = container_of(info, typeof(*chup), info);
1786 		extack = info->extack;
1787 		if (netif_is_l3_master(chup->upper_dev))
1788 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1789 								    ipip_entry,
1790 								    ul_dev,
1791 								    demote_this,
1792 								    extack);
1793 		break;
1794 
1795 	case NETDEV_UP:
1796 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1797 							   ul_dev);
1798 	case NETDEV_DOWN:
1799 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1800 							     ipip_entry,
1801 							     ul_dev);
1802 	}
1803 	return 0;
1804 }
1805 
1806 int
1807 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1808 				 struct net_device *ul_dev,
1809 				 unsigned long event,
1810 				 struct netdev_notifier_info *info)
1811 {
1812 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1813 	int err = 0;
1814 
1815 	mutex_lock(&mlxsw_sp->router->lock);
1816 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1817 								ul_dev,
1818 								ipip_entry))) {
1819 		struct mlxsw_sp_ipip_entry *prev;
1820 		bool demote_this = false;
1821 
1822 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1823 							 ul_dev, &demote_this,
1824 							 event, info);
1825 		if (err) {
1826 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1827 								 ul_dev);
1828 			break;
1829 		}
1830 
1831 		if (demote_this) {
1832 			if (list_is_first(&ipip_entry->ipip_list_node,
1833 					  &mlxsw_sp->router->ipip_list))
1834 				prev = NULL;
1835 			else
1836 				/* This can't be cached from previous iteration,
1837 				 * because that entry could be gone now.
1838 				 */
1839 				prev = list_prev_entry(ipip_entry,
1840 						       ipip_list_node);
1841 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1842 			ipip_entry = prev;
1843 		}
1844 	}
1845 	mutex_unlock(&mlxsw_sp->router->lock);
1846 
1847 	return err;
1848 }
1849 
1850 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1851 				      enum mlxsw_sp_l3proto ul_proto,
1852 				      const union mlxsw_sp_l3addr *ul_sip,
1853 				      u32 tunnel_index)
1854 {
1855 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1856 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1857 	struct mlxsw_sp_fib_entry *fib_entry;
1858 	int err = 0;
1859 
1860 	mutex_lock(&mlxsw_sp->router->lock);
1861 
1862 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
1863 		err = -EINVAL;
1864 		goto out;
1865 	}
1866 
1867 	router->nve_decap_config.ul_tb_id = ul_tb_id;
1868 	router->nve_decap_config.tunnel_index = tunnel_index;
1869 	router->nve_decap_config.ul_proto = ul_proto;
1870 	router->nve_decap_config.ul_sip = *ul_sip;
1871 	router->nve_decap_config.valid = true;
1872 
1873 	/* It is valid to create a tunnel with a local IP and only later
1874 	 * assign this IP address to a local interface
1875 	 */
1876 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1877 							 ul_proto, ul_sip,
1878 							 type);
1879 	if (!fib_entry)
1880 		goto out;
1881 
1882 	fib_entry->decap.tunnel_index = tunnel_index;
1883 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1884 
1885 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1886 	if (err)
1887 		goto err_fib_entry_update;
1888 
1889 	goto out;
1890 
1891 err_fib_entry_update:
1892 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1893 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1894 out:
1895 	mutex_unlock(&mlxsw_sp->router->lock);
1896 	return err;
1897 }
1898 
1899 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1900 				      enum mlxsw_sp_l3proto ul_proto,
1901 				      const union mlxsw_sp_l3addr *ul_sip)
1902 {
1903 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1904 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1905 	struct mlxsw_sp_fib_entry *fib_entry;
1906 
1907 	mutex_lock(&mlxsw_sp->router->lock);
1908 
1909 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
1910 		goto out;
1911 
1912 	router->nve_decap_config.valid = false;
1913 
1914 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1915 							 ul_proto, ul_sip,
1916 							 type);
1917 	if (!fib_entry)
1918 		goto out;
1919 
1920 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1921 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1922 out:
1923 	mutex_unlock(&mlxsw_sp->router->lock);
1924 }
1925 
1926 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
1927 					 u32 ul_tb_id,
1928 					 enum mlxsw_sp_l3proto ul_proto,
1929 					 const union mlxsw_sp_l3addr *ul_sip)
1930 {
1931 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1932 
1933 	return router->nve_decap_config.valid &&
1934 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
1935 	       router->nve_decap_config.ul_proto == ul_proto &&
1936 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
1937 		       sizeof(*ul_sip));
1938 }
1939 
1940 struct mlxsw_sp_neigh_key {
1941 	struct neighbour *n;
1942 };
1943 
1944 struct mlxsw_sp_neigh_entry {
1945 	struct list_head rif_list_node;
1946 	struct rhash_head ht_node;
1947 	struct mlxsw_sp_neigh_key key;
1948 	u16 rif;
1949 	bool connected;
1950 	unsigned char ha[ETH_ALEN];
1951 	struct list_head nexthop_list; /* list of nexthops using
1952 					* this neigh entry
1953 					*/
1954 	struct list_head nexthop_neighs_list_node;
1955 	unsigned int counter_index;
1956 	bool counter_valid;
1957 };
1958 
1959 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1960 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1961 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1962 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
1963 };
1964 
1965 struct mlxsw_sp_neigh_entry *
1966 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1967 			struct mlxsw_sp_neigh_entry *neigh_entry)
1968 {
1969 	if (!neigh_entry) {
1970 		if (list_empty(&rif->neigh_list))
1971 			return NULL;
1972 		else
1973 			return list_first_entry(&rif->neigh_list,
1974 						typeof(*neigh_entry),
1975 						rif_list_node);
1976 	}
1977 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
1978 		return NULL;
1979 	return list_next_entry(neigh_entry, rif_list_node);
1980 }
1981 
1982 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1983 {
1984 	return neigh_entry->key.n->tbl->family;
1985 }
1986 
1987 unsigned char *
1988 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1989 {
1990 	return neigh_entry->ha;
1991 }
1992 
1993 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1994 {
1995 	struct neighbour *n;
1996 
1997 	n = neigh_entry->key.n;
1998 	return ntohl(*((__be32 *) n->primary_key));
1999 }
2000 
2001 struct in6_addr *
2002 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2003 {
2004 	struct neighbour *n;
2005 
2006 	n = neigh_entry->key.n;
2007 	return (struct in6_addr *) &n->primary_key;
2008 }
2009 
2010 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2011 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2012 			       u64 *p_counter)
2013 {
2014 	if (!neigh_entry->counter_valid)
2015 		return -EINVAL;
2016 
2017 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2018 					 p_counter, NULL);
2019 }
2020 
2021 static struct mlxsw_sp_neigh_entry *
2022 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2023 			   u16 rif)
2024 {
2025 	struct mlxsw_sp_neigh_entry *neigh_entry;
2026 
2027 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2028 	if (!neigh_entry)
2029 		return NULL;
2030 
2031 	neigh_entry->key.n = n;
2032 	neigh_entry->rif = rif;
2033 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2034 
2035 	return neigh_entry;
2036 }
2037 
2038 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2039 {
2040 	kfree(neigh_entry);
2041 }
2042 
2043 static int
2044 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2045 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2046 {
2047 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2048 				      &neigh_entry->ht_node,
2049 				      mlxsw_sp_neigh_ht_params);
2050 }
2051 
2052 static void
2053 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2054 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2055 {
2056 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2057 			       &neigh_entry->ht_node,
2058 			       mlxsw_sp_neigh_ht_params);
2059 }
2060 
2061 static bool
2062 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2063 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2064 {
2065 	struct devlink *devlink;
2066 	const char *table_name;
2067 
2068 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2069 	case AF_INET:
2070 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2071 		break;
2072 	case AF_INET6:
2073 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2074 		break;
2075 	default:
2076 		WARN_ON(1);
2077 		return false;
2078 	}
2079 
2080 	devlink = priv_to_devlink(mlxsw_sp->core);
2081 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2082 }
2083 
2084 static void
2085 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2086 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2087 {
2088 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2089 		return;
2090 
2091 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2092 		return;
2093 
2094 	neigh_entry->counter_valid = true;
2095 }
2096 
2097 static void
2098 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2099 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2100 {
2101 	if (!neigh_entry->counter_valid)
2102 		return;
2103 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2104 				   neigh_entry->counter_index);
2105 	neigh_entry->counter_valid = false;
2106 }
2107 
2108 static struct mlxsw_sp_neigh_entry *
2109 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2110 {
2111 	struct mlxsw_sp_neigh_entry *neigh_entry;
2112 	struct mlxsw_sp_rif *rif;
2113 	int err;
2114 
2115 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2116 	if (!rif)
2117 		return ERR_PTR(-EINVAL);
2118 
2119 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2120 	if (!neigh_entry)
2121 		return ERR_PTR(-ENOMEM);
2122 
2123 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2124 	if (err)
2125 		goto err_neigh_entry_insert;
2126 
2127 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2128 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2129 
2130 	return neigh_entry;
2131 
2132 err_neigh_entry_insert:
2133 	mlxsw_sp_neigh_entry_free(neigh_entry);
2134 	return ERR_PTR(err);
2135 }
2136 
2137 static void
2138 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2139 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2140 {
2141 	list_del(&neigh_entry->rif_list_node);
2142 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2143 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2144 	mlxsw_sp_neigh_entry_free(neigh_entry);
2145 }
2146 
2147 static struct mlxsw_sp_neigh_entry *
2148 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2149 {
2150 	struct mlxsw_sp_neigh_key key;
2151 
2152 	key.n = n;
2153 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2154 				      &key, mlxsw_sp_neigh_ht_params);
2155 }
2156 
2157 static void
2158 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2159 {
2160 	unsigned long interval;
2161 
2162 #if IS_ENABLED(CONFIG_IPV6)
2163 	interval = min_t(unsigned long,
2164 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2165 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2166 #else
2167 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2168 #endif
2169 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2170 }
2171 
2172 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2173 						   char *rauhtd_pl,
2174 						   int ent_index)
2175 {
2176 	struct net_device *dev;
2177 	struct neighbour *n;
2178 	__be32 dipn;
2179 	u32 dip;
2180 	u16 rif;
2181 
2182 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2183 
2184 	if (!mlxsw_sp->router->rifs[rif]) {
2185 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2186 		return;
2187 	}
2188 
2189 	dipn = htonl(dip);
2190 	dev = mlxsw_sp->router->rifs[rif]->dev;
2191 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2192 	if (!n)
2193 		return;
2194 
2195 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2196 	neigh_event_send(n, NULL);
2197 	neigh_release(n);
2198 }
2199 
2200 #if IS_ENABLED(CONFIG_IPV6)
2201 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2202 						   char *rauhtd_pl,
2203 						   int rec_index)
2204 {
2205 	struct net_device *dev;
2206 	struct neighbour *n;
2207 	struct in6_addr dip;
2208 	u16 rif;
2209 
2210 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2211 					 (char *) &dip);
2212 
2213 	if (!mlxsw_sp->router->rifs[rif]) {
2214 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2215 		return;
2216 	}
2217 
2218 	dev = mlxsw_sp->router->rifs[rif]->dev;
2219 	n = neigh_lookup(&nd_tbl, &dip, dev);
2220 	if (!n)
2221 		return;
2222 
2223 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2224 	neigh_event_send(n, NULL);
2225 	neigh_release(n);
2226 }
2227 #else
2228 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2229 						   char *rauhtd_pl,
2230 						   int rec_index)
2231 {
2232 }
2233 #endif
2234 
2235 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2236 						   char *rauhtd_pl,
2237 						   int rec_index)
2238 {
2239 	u8 num_entries;
2240 	int i;
2241 
2242 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2243 								rec_index);
2244 	/* Hardware starts counting at 0, so add 1. */
2245 	num_entries++;
2246 
2247 	/* Each record consists of several neighbour entries. */
2248 	for (i = 0; i < num_entries; i++) {
2249 		int ent_index;
2250 
2251 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2252 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2253 						       ent_index);
2254 	}
2255 
2256 }
2257 
2258 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2259 						   char *rauhtd_pl,
2260 						   int rec_index)
2261 {
2262 	/* One record contains one entry. */
2263 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2264 					       rec_index);
2265 }
2266 
2267 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2268 					      char *rauhtd_pl, int rec_index)
2269 {
2270 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2271 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2272 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2273 						       rec_index);
2274 		break;
2275 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2276 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2277 						       rec_index);
2278 		break;
2279 	}
2280 }
2281 
2282 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2283 {
2284 	u8 num_rec, last_rec_index, num_entries;
2285 
2286 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2287 	last_rec_index = num_rec - 1;
2288 
2289 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2290 		return false;
2291 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2292 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2293 		return true;
2294 
2295 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2296 								last_rec_index);
2297 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2298 		return true;
2299 	return false;
2300 }
2301 
2302 static int
2303 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2304 				       char *rauhtd_pl,
2305 				       enum mlxsw_reg_rauhtd_type type)
2306 {
2307 	int i, num_rec;
2308 	int err;
2309 
2310 	/* Ensure the RIF we read from the device does not change mid-dump. */
2311 	mutex_lock(&mlxsw_sp->router->lock);
2312 	do {
2313 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2314 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2315 				      rauhtd_pl);
2316 		if (err) {
2317 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2318 			break;
2319 		}
2320 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2321 		for (i = 0; i < num_rec; i++)
2322 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2323 							  i);
2324 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2325 	mutex_unlock(&mlxsw_sp->router->lock);
2326 
2327 	return err;
2328 }
2329 
2330 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2331 {
2332 	enum mlxsw_reg_rauhtd_type type;
2333 	char *rauhtd_pl;
2334 	int err;
2335 
2336 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2337 	if (!rauhtd_pl)
2338 		return -ENOMEM;
2339 
2340 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2341 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2342 	if (err)
2343 		goto out;
2344 
2345 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2346 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2347 out:
2348 	kfree(rauhtd_pl);
2349 	return err;
2350 }
2351 
2352 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2353 {
2354 	struct mlxsw_sp_neigh_entry *neigh_entry;
2355 
2356 	mutex_lock(&mlxsw_sp->router->lock);
2357 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2358 			    nexthop_neighs_list_node)
2359 		/* If this neigh have nexthops, make the kernel think this neigh
2360 		 * is active regardless of the traffic.
2361 		 */
2362 		neigh_event_send(neigh_entry->key.n, NULL);
2363 	mutex_unlock(&mlxsw_sp->router->lock);
2364 }
2365 
2366 static void
2367 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2368 {
2369 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2370 
2371 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2372 			       msecs_to_jiffies(interval));
2373 }
2374 
2375 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2376 {
2377 	struct mlxsw_sp_router *router;
2378 	int err;
2379 
2380 	router = container_of(work, struct mlxsw_sp_router,
2381 			      neighs_update.dw.work);
2382 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2383 	if (err)
2384 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2385 
2386 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2387 
2388 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2389 }
2390 
2391 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2392 {
2393 	struct mlxsw_sp_neigh_entry *neigh_entry;
2394 	struct mlxsw_sp_router *router;
2395 
2396 	router = container_of(work, struct mlxsw_sp_router,
2397 			      nexthop_probe_dw.work);
2398 	/* Iterate over nexthop neighbours, find those who are unresolved and
2399 	 * send arp on them. This solves the chicken-egg problem when
2400 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2401 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2402 	 * using different nexthop.
2403 	 */
2404 	mutex_lock(&router->lock);
2405 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2406 			    nexthop_neighs_list_node)
2407 		if (!neigh_entry->connected)
2408 			neigh_event_send(neigh_entry->key.n, NULL);
2409 	mutex_unlock(&router->lock);
2410 
2411 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2412 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2413 }
2414 
2415 static void
2416 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2417 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2418 			      bool removing, bool dead);
2419 
2420 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2421 {
2422 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2423 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2424 }
2425 
2426 static int
2427 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2428 				struct mlxsw_sp_neigh_entry *neigh_entry,
2429 				enum mlxsw_reg_rauht_op op)
2430 {
2431 	struct neighbour *n = neigh_entry->key.n;
2432 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2433 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2434 
2435 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2436 			      dip);
2437 	if (neigh_entry->counter_valid)
2438 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2439 					     neigh_entry->counter_index);
2440 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2441 }
2442 
2443 static int
2444 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2445 				struct mlxsw_sp_neigh_entry *neigh_entry,
2446 				enum mlxsw_reg_rauht_op op)
2447 {
2448 	struct neighbour *n = neigh_entry->key.n;
2449 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2450 	const char *dip = n->primary_key;
2451 
2452 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2453 			      dip);
2454 	if (neigh_entry->counter_valid)
2455 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2456 					     neigh_entry->counter_index);
2457 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2458 }
2459 
2460 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2461 {
2462 	struct neighbour *n = neigh_entry->key.n;
2463 
2464 	/* Packets with a link-local destination address are trapped
2465 	 * after LPM lookup and never reach the neighbour table, so
2466 	 * there is no need to program such neighbours to the device.
2467 	 */
2468 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2469 	    IPV6_ADDR_LINKLOCAL)
2470 		return true;
2471 	return false;
2472 }
2473 
2474 static void
2475 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2476 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2477 			    bool adding)
2478 {
2479 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2480 	int err;
2481 
2482 	if (!adding && !neigh_entry->connected)
2483 		return;
2484 	neigh_entry->connected = adding;
2485 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2486 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2487 						      op);
2488 		if (err)
2489 			return;
2490 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2491 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2492 			return;
2493 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2494 						      op);
2495 		if (err)
2496 			return;
2497 	} else {
2498 		WARN_ON_ONCE(1);
2499 		return;
2500 	}
2501 
2502 	if (adding)
2503 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2504 	else
2505 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2506 }
2507 
2508 void
2509 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2510 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2511 				    bool adding)
2512 {
2513 	if (adding)
2514 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2515 	else
2516 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2517 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2518 }
2519 
2520 struct mlxsw_sp_netevent_work {
2521 	struct work_struct work;
2522 	struct mlxsw_sp *mlxsw_sp;
2523 	struct neighbour *n;
2524 };
2525 
2526 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2527 {
2528 	struct mlxsw_sp_netevent_work *net_work =
2529 		container_of(work, struct mlxsw_sp_netevent_work, work);
2530 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2531 	struct mlxsw_sp_neigh_entry *neigh_entry;
2532 	struct neighbour *n = net_work->n;
2533 	unsigned char ha[ETH_ALEN];
2534 	bool entry_connected;
2535 	u8 nud_state, dead;
2536 
2537 	/* If these parameters are changed after we release the lock,
2538 	 * then we are guaranteed to receive another event letting us
2539 	 * know about it.
2540 	 */
2541 	read_lock_bh(&n->lock);
2542 	memcpy(ha, n->ha, ETH_ALEN);
2543 	nud_state = n->nud_state;
2544 	dead = n->dead;
2545 	read_unlock_bh(&n->lock);
2546 
2547 	mutex_lock(&mlxsw_sp->router->lock);
2548 	mlxsw_sp_span_respin(mlxsw_sp);
2549 
2550 	entry_connected = nud_state & NUD_VALID && !dead;
2551 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2552 	if (!entry_connected && !neigh_entry)
2553 		goto out;
2554 	if (!neigh_entry) {
2555 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2556 		if (IS_ERR(neigh_entry))
2557 			goto out;
2558 	}
2559 
2560 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2561 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2562 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2563 				      dead);
2564 
2565 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2566 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2567 
2568 out:
2569 	mutex_unlock(&mlxsw_sp->router->lock);
2570 	neigh_release(n);
2571 	kfree(net_work);
2572 }
2573 
2574 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2575 
2576 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2577 {
2578 	struct mlxsw_sp_netevent_work *net_work =
2579 		container_of(work, struct mlxsw_sp_netevent_work, work);
2580 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2581 
2582 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2583 	kfree(net_work);
2584 }
2585 
2586 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2587 
2588 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2589 {
2590 	struct mlxsw_sp_netevent_work *net_work =
2591 		container_of(work, struct mlxsw_sp_netevent_work, work);
2592 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2593 
2594 	__mlxsw_sp_router_init(mlxsw_sp);
2595 	kfree(net_work);
2596 }
2597 
2598 static int mlxsw_sp_router_schedule_work(struct net *net,
2599 					 struct notifier_block *nb,
2600 					 void (*cb)(struct work_struct *))
2601 {
2602 	struct mlxsw_sp_netevent_work *net_work;
2603 	struct mlxsw_sp_router *router;
2604 
2605 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2606 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2607 		return NOTIFY_DONE;
2608 
2609 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2610 	if (!net_work)
2611 		return NOTIFY_BAD;
2612 
2613 	INIT_WORK(&net_work->work, cb);
2614 	net_work->mlxsw_sp = router->mlxsw_sp;
2615 	mlxsw_core_schedule_work(&net_work->work);
2616 	return NOTIFY_DONE;
2617 }
2618 
2619 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2620 					  unsigned long event, void *ptr)
2621 {
2622 	struct mlxsw_sp_netevent_work *net_work;
2623 	struct mlxsw_sp_port *mlxsw_sp_port;
2624 	struct mlxsw_sp *mlxsw_sp;
2625 	unsigned long interval;
2626 	struct neigh_parms *p;
2627 	struct neighbour *n;
2628 
2629 	switch (event) {
2630 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2631 		p = ptr;
2632 
2633 		/* We don't care about changes in the default table. */
2634 		if (!p->dev || (p->tbl->family != AF_INET &&
2635 				p->tbl->family != AF_INET6))
2636 			return NOTIFY_DONE;
2637 
2638 		/* We are in atomic context and can't take RTNL mutex,
2639 		 * so use RCU variant to walk the device chain.
2640 		 */
2641 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2642 		if (!mlxsw_sp_port)
2643 			return NOTIFY_DONE;
2644 
2645 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2646 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2647 		mlxsw_sp->router->neighs_update.interval = interval;
2648 
2649 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2650 		break;
2651 	case NETEVENT_NEIGH_UPDATE:
2652 		n = ptr;
2653 
2654 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2655 			return NOTIFY_DONE;
2656 
2657 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2658 		if (!mlxsw_sp_port)
2659 			return NOTIFY_DONE;
2660 
2661 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2662 		if (!net_work) {
2663 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2664 			return NOTIFY_BAD;
2665 		}
2666 
2667 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2668 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2669 		net_work->n = n;
2670 
2671 		/* Take a reference to ensure the neighbour won't be
2672 		 * destructed until we drop the reference in delayed
2673 		 * work.
2674 		 */
2675 		neigh_clone(n);
2676 		mlxsw_core_schedule_work(&net_work->work);
2677 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2678 		break;
2679 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2680 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2681 		return mlxsw_sp_router_schedule_work(ptr, nb,
2682 				mlxsw_sp_router_mp_hash_event_work);
2683 
2684 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2685 		return mlxsw_sp_router_schedule_work(ptr, nb,
2686 				mlxsw_sp_router_update_priority_work);
2687 	}
2688 
2689 	return NOTIFY_DONE;
2690 }
2691 
2692 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2693 {
2694 	int err;
2695 
2696 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2697 			      &mlxsw_sp_neigh_ht_params);
2698 	if (err)
2699 		return err;
2700 
2701 	/* Initialize the polling interval according to the default
2702 	 * table.
2703 	 */
2704 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2705 
2706 	/* Create the delayed works for the activity_update */
2707 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2708 			  mlxsw_sp_router_neighs_update_work);
2709 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2710 			  mlxsw_sp_router_probe_unresolved_nexthops);
2711 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2712 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2713 	return 0;
2714 }
2715 
2716 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2717 {
2718 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2719 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2720 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2721 }
2722 
2723 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2724 					 struct mlxsw_sp_rif *rif)
2725 {
2726 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2727 
2728 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2729 				 rif_list_node) {
2730 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2731 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2732 	}
2733 }
2734 
2735 enum mlxsw_sp_nexthop_type {
2736 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2737 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2738 };
2739 
2740 struct mlxsw_sp_nexthop_key {
2741 	struct fib_nh *fib_nh;
2742 };
2743 
2744 struct mlxsw_sp_nexthop {
2745 	struct list_head neigh_list_node; /* member of neigh entry list */
2746 	struct list_head rif_list_node;
2747 	struct list_head router_list_node;
2748 	struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2749 						* this belongs to
2750 						*/
2751 	struct rhash_head ht_node;
2752 	struct mlxsw_sp_nexthop_key key;
2753 	unsigned char gw_addr[sizeof(struct in6_addr)];
2754 	int ifindex;
2755 	int nh_weight;
2756 	int norm_nh_weight;
2757 	int num_adj_entries;
2758 	struct mlxsw_sp_rif *rif;
2759 	u8 should_offload:1, /* set indicates this neigh is connected and
2760 			      * should be put to KVD linear area of this group.
2761 			      */
2762 	   offloaded:1, /* set in case the neigh is actually put into
2763 			 * KVD linear area of this group.
2764 			 */
2765 	   update:1; /* set indicates that MAC of this neigh should be
2766 		      * updated in HW
2767 		      */
2768 	enum mlxsw_sp_nexthop_type type;
2769 	union {
2770 		struct mlxsw_sp_neigh_entry *neigh_entry;
2771 		struct mlxsw_sp_ipip_entry *ipip_entry;
2772 	};
2773 	unsigned int counter_index;
2774 	bool counter_valid;
2775 };
2776 
2777 struct mlxsw_sp_nexthop_group {
2778 	void *priv;
2779 	struct rhash_head ht_node;
2780 	struct list_head fib_list; /* list of fib entries that use this group */
2781 	struct neigh_table *neigh_tbl;
2782 	u8 adj_index_valid:1,
2783 	   gateway:1; /* routes using the group use a gateway */
2784 	u32 adj_index;
2785 	u16 ecmp_size;
2786 	u16 count;
2787 	int sum_norm_weight;
2788 	struct mlxsw_sp_nexthop nexthops[0];
2789 #define nh_rif	nexthops[0].rif
2790 };
2791 
2792 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2793 				    struct mlxsw_sp_nexthop *nh)
2794 {
2795 	struct devlink *devlink;
2796 
2797 	devlink = priv_to_devlink(mlxsw_sp->core);
2798 	if (!devlink_dpipe_table_counter_enabled(devlink,
2799 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2800 		return;
2801 
2802 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2803 		return;
2804 
2805 	nh->counter_valid = true;
2806 }
2807 
2808 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2809 				   struct mlxsw_sp_nexthop *nh)
2810 {
2811 	if (!nh->counter_valid)
2812 		return;
2813 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2814 	nh->counter_valid = false;
2815 }
2816 
2817 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2818 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2819 {
2820 	if (!nh->counter_valid)
2821 		return -EINVAL;
2822 
2823 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2824 					 p_counter, NULL);
2825 }
2826 
2827 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2828 					       struct mlxsw_sp_nexthop *nh)
2829 {
2830 	if (!nh) {
2831 		if (list_empty(&router->nexthop_list))
2832 			return NULL;
2833 		else
2834 			return list_first_entry(&router->nexthop_list,
2835 						typeof(*nh), router_list_node);
2836 	}
2837 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2838 		return NULL;
2839 	return list_next_entry(nh, router_list_node);
2840 }
2841 
2842 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2843 {
2844 	return nh->offloaded;
2845 }
2846 
2847 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2848 {
2849 	if (!nh->offloaded)
2850 		return NULL;
2851 	return nh->neigh_entry->ha;
2852 }
2853 
2854 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2855 			     u32 *p_adj_size, u32 *p_adj_hash_index)
2856 {
2857 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2858 	u32 adj_hash_index = 0;
2859 	int i;
2860 
2861 	if (!nh->offloaded || !nh_grp->adj_index_valid)
2862 		return -EINVAL;
2863 
2864 	*p_adj_index = nh_grp->adj_index;
2865 	*p_adj_size = nh_grp->ecmp_size;
2866 
2867 	for (i = 0; i < nh_grp->count; i++) {
2868 		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2869 
2870 		if (nh_iter == nh)
2871 			break;
2872 		if (nh_iter->offloaded)
2873 			adj_hash_index += nh_iter->num_adj_entries;
2874 	}
2875 
2876 	*p_adj_hash_index = adj_hash_index;
2877 	return 0;
2878 }
2879 
2880 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2881 {
2882 	return nh->rif;
2883 }
2884 
2885 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2886 {
2887 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2888 	int i;
2889 
2890 	for (i = 0; i < nh_grp->count; i++) {
2891 		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2892 
2893 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2894 			return true;
2895 	}
2896 	return false;
2897 }
2898 
2899 static struct fib_info *
2900 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2901 {
2902 	return nh_grp->priv;
2903 }
2904 
2905 struct mlxsw_sp_nexthop_group_cmp_arg {
2906 	enum mlxsw_sp_l3proto proto;
2907 	union {
2908 		struct fib_info *fi;
2909 		struct mlxsw_sp_fib6_entry *fib6_entry;
2910 	};
2911 };
2912 
2913 static bool
2914 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2915 				    const struct in6_addr *gw, int ifindex,
2916 				    int weight)
2917 {
2918 	int i;
2919 
2920 	for (i = 0; i < nh_grp->count; i++) {
2921 		const struct mlxsw_sp_nexthop *nh;
2922 
2923 		nh = &nh_grp->nexthops[i];
2924 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
2925 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2926 			return true;
2927 	}
2928 
2929 	return false;
2930 }
2931 
2932 static bool
2933 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2934 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
2935 {
2936 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2937 
2938 	if (nh_grp->count != fib6_entry->nrt6)
2939 		return false;
2940 
2941 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2942 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
2943 		struct in6_addr *gw;
2944 		int ifindex, weight;
2945 
2946 		ifindex = fib6_nh->fib_nh_dev->ifindex;
2947 		weight = fib6_nh->fib_nh_weight;
2948 		gw = &fib6_nh->fib_nh_gw6;
2949 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2950 							 weight))
2951 			return false;
2952 	}
2953 
2954 	return true;
2955 }
2956 
2957 static int
2958 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2959 {
2960 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2961 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2962 
2963 	switch (cmp_arg->proto) {
2964 	case MLXSW_SP_L3_PROTO_IPV4:
2965 		return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2966 	case MLXSW_SP_L3_PROTO_IPV6:
2967 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2968 						    cmp_arg->fib6_entry);
2969 	default:
2970 		WARN_ON(1);
2971 		return 1;
2972 	}
2973 }
2974 
2975 static int
2976 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2977 {
2978 	return nh_grp->neigh_tbl->family;
2979 }
2980 
2981 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2982 {
2983 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
2984 	const struct mlxsw_sp_nexthop *nh;
2985 	struct fib_info *fi;
2986 	unsigned int val;
2987 	int i;
2988 
2989 	switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2990 	case AF_INET:
2991 		fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2992 		return jhash(&fi, sizeof(fi), seed);
2993 	case AF_INET6:
2994 		val = nh_grp->count;
2995 		for (i = 0; i < nh_grp->count; i++) {
2996 			nh = &nh_grp->nexthops[i];
2997 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
2998 		}
2999 		return jhash(&val, sizeof(val), seed);
3000 	default:
3001 		WARN_ON(1);
3002 		return 0;
3003 	}
3004 }
3005 
3006 static u32
3007 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3008 {
3009 	unsigned int val = fib6_entry->nrt6;
3010 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3011 	struct net_device *dev;
3012 
3013 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3014 		dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
3015 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3016 	}
3017 
3018 	return jhash(&val, sizeof(val), seed);
3019 }
3020 
3021 static u32
3022 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3023 {
3024 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3025 
3026 	switch (cmp_arg->proto) {
3027 	case MLXSW_SP_L3_PROTO_IPV4:
3028 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3029 	case MLXSW_SP_L3_PROTO_IPV6:
3030 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3031 	default:
3032 		WARN_ON(1);
3033 		return 0;
3034 	}
3035 }
3036 
3037 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3038 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3039 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3040 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3041 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3042 };
3043 
3044 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3045 					 struct mlxsw_sp_nexthop_group *nh_grp)
3046 {
3047 	if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3048 	    !nh_grp->gateway)
3049 		return 0;
3050 
3051 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3052 				      &nh_grp->ht_node,
3053 				      mlxsw_sp_nexthop_group_ht_params);
3054 }
3055 
3056 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3057 					  struct mlxsw_sp_nexthop_group *nh_grp)
3058 {
3059 	if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3060 	    !nh_grp->gateway)
3061 		return;
3062 
3063 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3064 			       &nh_grp->ht_node,
3065 			       mlxsw_sp_nexthop_group_ht_params);
3066 }
3067 
3068 static struct mlxsw_sp_nexthop_group *
3069 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3070 			       struct fib_info *fi)
3071 {
3072 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3073 
3074 	cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
3075 	cmp_arg.fi = fi;
3076 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3077 				      &cmp_arg,
3078 				      mlxsw_sp_nexthop_group_ht_params);
3079 }
3080 
3081 static struct mlxsw_sp_nexthop_group *
3082 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3083 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3084 {
3085 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3086 
3087 	cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
3088 	cmp_arg.fib6_entry = fib6_entry;
3089 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3090 				      &cmp_arg,
3091 				      mlxsw_sp_nexthop_group_ht_params);
3092 }
3093 
3094 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3095 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3096 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3097 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3098 };
3099 
3100 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3101 				   struct mlxsw_sp_nexthop *nh)
3102 {
3103 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3104 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3105 }
3106 
3107 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3108 				    struct mlxsw_sp_nexthop *nh)
3109 {
3110 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3111 			       mlxsw_sp_nexthop_ht_params);
3112 }
3113 
3114 static struct mlxsw_sp_nexthop *
3115 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3116 			struct mlxsw_sp_nexthop_key key)
3117 {
3118 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3119 				      mlxsw_sp_nexthop_ht_params);
3120 }
3121 
3122 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3123 					     const struct mlxsw_sp_fib *fib,
3124 					     u32 adj_index, u16 ecmp_size,
3125 					     u32 new_adj_index,
3126 					     u16 new_ecmp_size)
3127 {
3128 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3129 
3130 	mlxsw_reg_raleu_pack(raleu_pl,
3131 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
3132 			     fib->vr->id, adj_index, ecmp_size, new_adj_index,
3133 			     new_ecmp_size);
3134 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3135 }
3136 
3137 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3138 					  struct mlxsw_sp_nexthop_group *nh_grp,
3139 					  u32 old_adj_index, u16 old_ecmp_size)
3140 {
3141 	struct mlxsw_sp_fib_entry *fib_entry;
3142 	struct mlxsw_sp_fib *fib = NULL;
3143 	int err;
3144 
3145 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3146 		if (fib == fib_entry->fib_node->fib)
3147 			continue;
3148 		fib = fib_entry->fib_node->fib;
3149 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3150 							old_adj_index,
3151 							old_ecmp_size,
3152 							nh_grp->adj_index,
3153 							nh_grp->ecmp_size);
3154 		if (err)
3155 			return err;
3156 	}
3157 	return 0;
3158 }
3159 
3160 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3161 				     struct mlxsw_sp_nexthop *nh)
3162 {
3163 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3164 	char ratr_pl[MLXSW_REG_RATR_LEN];
3165 
3166 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3167 			    true, MLXSW_REG_RATR_TYPE_ETHERNET,
3168 			    adj_index, neigh_entry->rif);
3169 	mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3170 	if (nh->counter_valid)
3171 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3172 	else
3173 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3174 
3175 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3176 }
3177 
3178 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3179 			    struct mlxsw_sp_nexthop *nh)
3180 {
3181 	int i;
3182 
3183 	for (i = 0; i < nh->num_adj_entries; i++) {
3184 		int err;
3185 
3186 		err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3187 		if (err)
3188 			return err;
3189 	}
3190 
3191 	return 0;
3192 }
3193 
3194 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3195 					  u32 adj_index,
3196 					  struct mlxsw_sp_nexthop *nh)
3197 {
3198 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3199 
3200 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3201 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3202 }
3203 
3204 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3205 					u32 adj_index,
3206 					struct mlxsw_sp_nexthop *nh)
3207 {
3208 	int i;
3209 
3210 	for (i = 0; i < nh->num_adj_entries; i++) {
3211 		int err;
3212 
3213 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3214 						     nh);
3215 		if (err)
3216 			return err;
3217 	}
3218 
3219 	return 0;
3220 }
3221 
3222 static int
3223 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3224 			      struct mlxsw_sp_nexthop_group *nh_grp,
3225 			      bool reallocate)
3226 {
3227 	u32 adj_index = nh_grp->adj_index; /* base */
3228 	struct mlxsw_sp_nexthop *nh;
3229 	int i;
3230 	int err;
3231 
3232 	for (i = 0; i < nh_grp->count; i++) {
3233 		nh = &nh_grp->nexthops[i];
3234 
3235 		if (!nh->should_offload) {
3236 			nh->offloaded = 0;
3237 			continue;
3238 		}
3239 
3240 		if (nh->update || reallocate) {
3241 			switch (nh->type) {
3242 			case MLXSW_SP_NEXTHOP_TYPE_ETH:
3243 				err = mlxsw_sp_nexthop_update
3244 					    (mlxsw_sp, adj_index, nh);
3245 				break;
3246 			case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3247 				err = mlxsw_sp_nexthop_ipip_update
3248 					    (mlxsw_sp, adj_index, nh);
3249 				break;
3250 			}
3251 			if (err)
3252 				return err;
3253 			nh->update = 0;
3254 			nh->offloaded = 1;
3255 		}
3256 		adj_index += nh->num_adj_entries;
3257 	}
3258 	return 0;
3259 }
3260 
3261 static int
3262 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3263 				    struct mlxsw_sp_nexthop_group *nh_grp)
3264 {
3265 	struct mlxsw_sp_fib_entry *fib_entry;
3266 	int err;
3267 
3268 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3269 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3270 		if (err)
3271 			return err;
3272 	}
3273 	return 0;
3274 }
3275 
3276 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3277 {
3278 	/* Valid sizes for an adjacency group are:
3279 	 * 1-64, 512, 1024, 2048 and 4096.
3280 	 */
3281 	if (*p_adj_grp_size <= 64)
3282 		return;
3283 	else if (*p_adj_grp_size <= 512)
3284 		*p_adj_grp_size = 512;
3285 	else if (*p_adj_grp_size <= 1024)
3286 		*p_adj_grp_size = 1024;
3287 	else if (*p_adj_grp_size <= 2048)
3288 		*p_adj_grp_size = 2048;
3289 	else
3290 		*p_adj_grp_size = 4096;
3291 }
3292 
3293 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3294 					     unsigned int alloc_size)
3295 {
3296 	if (alloc_size >= 4096)
3297 		*p_adj_grp_size = 4096;
3298 	else if (alloc_size >= 2048)
3299 		*p_adj_grp_size = 2048;
3300 	else if (alloc_size >= 1024)
3301 		*p_adj_grp_size = 1024;
3302 	else if (alloc_size >= 512)
3303 		*p_adj_grp_size = 512;
3304 }
3305 
3306 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3307 				     u16 *p_adj_grp_size)
3308 {
3309 	unsigned int alloc_size;
3310 	int err;
3311 
3312 	/* Round up the requested group size to the next size supported
3313 	 * by the device and make sure the request can be satisfied.
3314 	 */
3315 	mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3316 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3317 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3318 					      *p_adj_grp_size, &alloc_size);
3319 	if (err)
3320 		return err;
3321 	/* It is possible the allocation results in more allocated
3322 	 * entries than requested. Try to use as much of them as
3323 	 * possible.
3324 	 */
3325 	mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3326 
3327 	return 0;
3328 }
3329 
3330 static void
3331 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3332 {
3333 	int i, g = 0, sum_norm_weight = 0;
3334 	struct mlxsw_sp_nexthop *nh;
3335 
3336 	for (i = 0; i < nh_grp->count; i++) {
3337 		nh = &nh_grp->nexthops[i];
3338 
3339 		if (!nh->should_offload)
3340 			continue;
3341 		if (g > 0)
3342 			g = gcd(nh->nh_weight, g);
3343 		else
3344 			g = nh->nh_weight;
3345 	}
3346 
3347 	for (i = 0; i < nh_grp->count; i++) {
3348 		nh = &nh_grp->nexthops[i];
3349 
3350 		if (!nh->should_offload)
3351 			continue;
3352 		nh->norm_nh_weight = nh->nh_weight / g;
3353 		sum_norm_weight += nh->norm_nh_weight;
3354 	}
3355 
3356 	nh_grp->sum_norm_weight = sum_norm_weight;
3357 }
3358 
3359 static void
3360 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3361 {
3362 	int total = nh_grp->sum_norm_weight;
3363 	u16 ecmp_size = nh_grp->ecmp_size;
3364 	int i, weight = 0, lower_bound = 0;
3365 
3366 	for (i = 0; i < nh_grp->count; i++) {
3367 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3368 		int upper_bound;
3369 
3370 		if (!nh->should_offload)
3371 			continue;
3372 		weight += nh->norm_nh_weight;
3373 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3374 		nh->num_adj_entries = upper_bound - lower_bound;
3375 		lower_bound = upper_bound;
3376 	}
3377 }
3378 
3379 static struct mlxsw_sp_nexthop *
3380 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3381 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3382 
3383 static void
3384 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3385 					struct mlxsw_sp_nexthop_group *nh_grp)
3386 {
3387 	int i;
3388 
3389 	for (i = 0; i < nh_grp->count; i++) {
3390 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3391 
3392 		if (nh->offloaded)
3393 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3394 		else
3395 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3396 	}
3397 }
3398 
3399 static void
3400 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3401 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3402 {
3403 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3404 
3405 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3406 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3407 		struct mlxsw_sp_nexthop *nh;
3408 
3409 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3410 		if (nh && nh->offloaded)
3411 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3412 		else
3413 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3414 	}
3415 }
3416 
3417 static void
3418 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3419 					struct mlxsw_sp_nexthop_group *nh_grp)
3420 {
3421 	struct mlxsw_sp_fib6_entry *fib6_entry;
3422 
3423 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3424 	 * the same struct, so we need to iterate over all the routes using the
3425 	 * nexthop group and set / clear the offload indication for them.
3426 	 */
3427 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3428 			    common.nexthop_group_node)
3429 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3430 }
3431 
3432 static void
3433 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3434 				       struct mlxsw_sp_nexthop_group *nh_grp)
3435 {
3436 	switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
3437 	case AF_INET:
3438 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3439 		break;
3440 	case AF_INET6:
3441 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3442 		break;
3443 	}
3444 }
3445 
3446 static void
3447 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3448 			       struct mlxsw_sp_nexthop_group *nh_grp)
3449 {
3450 	u16 ecmp_size, old_ecmp_size;
3451 	struct mlxsw_sp_nexthop *nh;
3452 	bool offload_change = false;
3453 	u32 adj_index;
3454 	bool old_adj_index_valid;
3455 	u32 old_adj_index;
3456 	int i;
3457 	int err;
3458 
3459 	if (!nh_grp->gateway) {
3460 		mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3461 		return;
3462 	}
3463 
3464 	for (i = 0; i < nh_grp->count; i++) {
3465 		nh = &nh_grp->nexthops[i];
3466 
3467 		if (nh->should_offload != nh->offloaded) {
3468 			offload_change = true;
3469 			if (nh->should_offload)
3470 				nh->update = 1;
3471 		}
3472 	}
3473 	if (!offload_change) {
3474 		/* Nothing was added or removed, so no need to reallocate. Just
3475 		 * update MAC on existing adjacency indexes.
3476 		 */
3477 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
3478 		if (err) {
3479 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3480 			goto set_trap;
3481 		}
3482 		return;
3483 	}
3484 	mlxsw_sp_nexthop_group_normalize(nh_grp);
3485 	if (!nh_grp->sum_norm_weight)
3486 		/* No neigh of this group is connected so we just set
3487 		 * the trap and let everthing flow through kernel.
3488 		 */
3489 		goto set_trap;
3490 
3491 	ecmp_size = nh_grp->sum_norm_weight;
3492 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3493 	if (err)
3494 		/* No valid allocation size available. */
3495 		goto set_trap;
3496 
3497 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3498 				  ecmp_size, &adj_index);
3499 	if (err) {
3500 		/* We ran out of KVD linear space, just set the
3501 		 * trap and let everything flow through kernel.
3502 		 */
3503 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3504 		goto set_trap;
3505 	}
3506 	old_adj_index_valid = nh_grp->adj_index_valid;
3507 	old_adj_index = nh_grp->adj_index;
3508 	old_ecmp_size = nh_grp->ecmp_size;
3509 	nh_grp->adj_index_valid = 1;
3510 	nh_grp->adj_index = adj_index;
3511 	nh_grp->ecmp_size = ecmp_size;
3512 	mlxsw_sp_nexthop_group_rebalance(nh_grp);
3513 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
3514 	if (err) {
3515 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3516 		goto set_trap;
3517 	}
3518 
3519 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3520 
3521 	if (!old_adj_index_valid) {
3522 		/* The trap was set for fib entries, so we have to call
3523 		 * fib entry update to unset it and use adjacency index.
3524 		 */
3525 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3526 		if (err) {
3527 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3528 			goto set_trap;
3529 		}
3530 		return;
3531 	}
3532 
3533 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3534 					     old_adj_index, old_ecmp_size);
3535 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3536 			   old_ecmp_size, old_adj_index);
3537 	if (err) {
3538 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3539 		goto set_trap;
3540 	}
3541 
3542 	return;
3543 
3544 set_trap:
3545 	old_adj_index_valid = nh_grp->adj_index_valid;
3546 	nh_grp->adj_index_valid = 0;
3547 	for (i = 0; i < nh_grp->count; i++) {
3548 		nh = &nh_grp->nexthops[i];
3549 		nh->offloaded = 0;
3550 	}
3551 	err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3552 	if (err)
3553 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3554 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3555 	if (old_adj_index_valid)
3556 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3557 				   nh_grp->ecmp_size, nh_grp->adj_index);
3558 }
3559 
3560 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3561 					    bool removing)
3562 {
3563 	if (!removing)
3564 		nh->should_offload = 1;
3565 	else
3566 		nh->should_offload = 0;
3567 	nh->update = 1;
3568 }
3569 
3570 static int
3571 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3572 				    struct mlxsw_sp_neigh_entry *neigh_entry)
3573 {
3574 	struct neighbour *n, *old_n = neigh_entry->key.n;
3575 	struct mlxsw_sp_nexthop *nh;
3576 	bool entry_connected;
3577 	u8 nud_state, dead;
3578 	int err;
3579 
3580 	nh = list_first_entry(&neigh_entry->nexthop_list,
3581 			      struct mlxsw_sp_nexthop, neigh_list_node);
3582 
3583 	n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3584 	if (!n) {
3585 		n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3586 				 nh->rif->dev);
3587 		if (IS_ERR(n))
3588 			return PTR_ERR(n);
3589 		neigh_event_send(n, NULL);
3590 	}
3591 
3592 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3593 	neigh_entry->key.n = n;
3594 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3595 	if (err)
3596 		goto err_neigh_entry_insert;
3597 
3598 	read_lock_bh(&n->lock);
3599 	nud_state = n->nud_state;
3600 	dead = n->dead;
3601 	read_unlock_bh(&n->lock);
3602 	entry_connected = nud_state & NUD_VALID && !dead;
3603 
3604 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3605 			    neigh_list_node) {
3606 		neigh_release(old_n);
3607 		neigh_clone(n);
3608 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
3609 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3610 	}
3611 
3612 	neigh_release(n);
3613 
3614 	return 0;
3615 
3616 err_neigh_entry_insert:
3617 	neigh_entry->key.n = old_n;
3618 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3619 	neigh_release(n);
3620 	return err;
3621 }
3622 
3623 static void
3624 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3625 			      struct mlxsw_sp_neigh_entry *neigh_entry,
3626 			      bool removing, bool dead)
3627 {
3628 	struct mlxsw_sp_nexthop *nh;
3629 
3630 	if (list_empty(&neigh_entry->nexthop_list))
3631 		return;
3632 
3633 	if (dead) {
3634 		int err;
3635 
3636 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
3637 							  neigh_entry);
3638 		if (err)
3639 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
3640 		return;
3641 	}
3642 
3643 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3644 			    neigh_list_node) {
3645 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
3646 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3647 	}
3648 }
3649 
3650 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3651 				      struct mlxsw_sp_rif *rif)
3652 {
3653 	if (nh->rif)
3654 		return;
3655 
3656 	nh->rif = rif;
3657 	list_add(&nh->rif_list_node, &rif->nexthop_list);
3658 }
3659 
3660 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3661 {
3662 	if (!nh->rif)
3663 		return;
3664 
3665 	list_del(&nh->rif_list_node);
3666 	nh->rif = NULL;
3667 }
3668 
3669 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3670 				       struct mlxsw_sp_nexthop *nh)
3671 {
3672 	struct mlxsw_sp_neigh_entry *neigh_entry;
3673 	struct neighbour *n;
3674 	u8 nud_state, dead;
3675 	int err;
3676 
3677 	if (!nh->nh_grp->gateway || nh->neigh_entry)
3678 		return 0;
3679 
3680 	/* Take a reference of neigh here ensuring that neigh would
3681 	 * not be destructed before the nexthop entry is finished.
3682 	 * The reference is taken either in neigh_lookup() or
3683 	 * in neigh_create() in case n is not found.
3684 	 */
3685 	n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3686 	if (!n) {
3687 		n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3688 				 nh->rif->dev);
3689 		if (IS_ERR(n))
3690 			return PTR_ERR(n);
3691 		neigh_event_send(n, NULL);
3692 	}
3693 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3694 	if (!neigh_entry) {
3695 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3696 		if (IS_ERR(neigh_entry)) {
3697 			err = -EINVAL;
3698 			goto err_neigh_entry_create;
3699 		}
3700 	}
3701 
3702 	/* If that is the first nexthop connected to that neigh, add to
3703 	 * nexthop_neighs_list
3704 	 */
3705 	if (list_empty(&neigh_entry->nexthop_list))
3706 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3707 			      &mlxsw_sp->router->nexthop_neighs_list);
3708 
3709 	nh->neigh_entry = neigh_entry;
3710 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3711 	read_lock_bh(&n->lock);
3712 	nud_state = n->nud_state;
3713 	dead = n->dead;
3714 	read_unlock_bh(&n->lock);
3715 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3716 
3717 	return 0;
3718 
3719 err_neigh_entry_create:
3720 	neigh_release(n);
3721 	return err;
3722 }
3723 
3724 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3725 					struct mlxsw_sp_nexthop *nh)
3726 {
3727 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3728 	struct neighbour *n;
3729 
3730 	if (!neigh_entry)
3731 		return;
3732 	n = neigh_entry->key.n;
3733 
3734 	__mlxsw_sp_nexthop_neigh_update(nh, true);
3735 	list_del(&nh->neigh_list_node);
3736 	nh->neigh_entry = NULL;
3737 
3738 	/* If that is the last nexthop connected to that neigh, remove from
3739 	 * nexthop_neighs_list
3740 	 */
3741 	if (list_empty(&neigh_entry->nexthop_list))
3742 		list_del(&neigh_entry->nexthop_neighs_list_node);
3743 
3744 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3745 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3746 
3747 	neigh_release(n);
3748 }
3749 
3750 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3751 {
3752 	struct net_device *ul_dev;
3753 	bool is_up;
3754 
3755 	rcu_read_lock();
3756 	ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3757 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
3758 	rcu_read_unlock();
3759 
3760 	return is_up;
3761 }
3762 
3763 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3764 				       struct mlxsw_sp_nexthop *nh,
3765 				       struct mlxsw_sp_ipip_entry *ipip_entry)
3766 {
3767 	bool removing;
3768 
3769 	if (!nh->nh_grp->gateway || nh->ipip_entry)
3770 		return;
3771 
3772 	nh->ipip_entry = ipip_entry;
3773 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3774 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
3775 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3776 }
3777 
3778 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3779 				       struct mlxsw_sp_nexthop *nh)
3780 {
3781 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3782 
3783 	if (!ipip_entry)
3784 		return;
3785 
3786 	__mlxsw_sp_nexthop_neigh_update(nh, true);
3787 	nh->ipip_entry = NULL;
3788 }
3789 
3790 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3791 					const struct fib_nh *fib_nh,
3792 					enum mlxsw_sp_ipip_type *p_ipipt)
3793 {
3794 	struct net_device *dev = fib_nh->fib_nh_dev;
3795 
3796 	return dev &&
3797 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3798 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3799 }
3800 
3801 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3802 				       struct mlxsw_sp_nexthop *nh)
3803 {
3804 	switch (nh->type) {
3805 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
3806 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3807 		mlxsw_sp_nexthop_rif_fini(nh);
3808 		break;
3809 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3810 		mlxsw_sp_nexthop_rif_fini(nh);
3811 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3812 		break;
3813 	}
3814 }
3815 
3816 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3817 				       struct mlxsw_sp_nexthop *nh,
3818 				       struct fib_nh *fib_nh)
3819 {
3820 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3821 	struct net_device *dev = fib_nh->fib_nh_dev;
3822 	struct mlxsw_sp_ipip_entry *ipip_entry;
3823 	struct mlxsw_sp_rif *rif;
3824 	int err;
3825 
3826 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3827 	if (ipip_entry) {
3828 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3829 		if (ipip_ops->can_offload(mlxsw_sp, dev,
3830 					  MLXSW_SP_L3_PROTO_IPV4)) {
3831 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3832 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3833 			return 0;
3834 		}
3835 	}
3836 
3837 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3838 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3839 	if (!rif)
3840 		return 0;
3841 
3842 	mlxsw_sp_nexthop_rif_init(nh, rif);
3843 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3844 	if (err)
3845 		goto err_neigh_init;
3846 
3847 	return 0;
3848 
3849 err_neigh_init:
3850 	mlxsw_sp_nexthop_rif_fini(nh);
3851 	return err;
3852 }
3853 
3854 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3855 					struct mlxsw_sp_nexthop *nh)
3856 {
3857 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3858 }
3859 
3860 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3861 				  struct mlxsw_sp_nexthop_group *nh_grp,
3862 				  struct mlxsw_sp_nexthop *nh,
3863 				  struct fib_nh *fib_nh)
3864 {
3865 	struct net_device *dev = fib_nh->fib_nh_dev;
3866 	struct in_device *in_dev;
3867 	int err;
3868 
3869 	nh->nh_grp = nh_grp;
3870 	nh->key.fib_nh = fib_nh;
3871 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3872 	nh->nh_weight = fib_nh->fib_nh_weight;
3873 #else
3874 	nh->nh_weight = 1;
3875 #endif
3876 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
3877 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3878 	if (err)
3879 		return err;
3880 
3881 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3882 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3883 
3884 	if (!dev)
3885 		return 0;
3886 
3887 	rcu_read_lock();
3888 	in_dev = __in_dev_get_rcu(dev);
3889 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3890 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
3891 		rcu_read_unlock();
3892 		return 0;
3893 	}
3894 	rcu_read_unlock();
3895 
3896 	err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3897 	if (err)
3898 		goto err_nexthop_neigh_init;
3899 
3900 	return 0;
3901 
3902 err_nexthop_neigh_init:
3903 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3904 	return err;
3905 }
3906 
3907 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3908 				   struct mlxsw_sp_nexthop *nh)
3909 {
3910 	mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3911 	list_del(&nh->router_list_node);
3912 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
3913 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3914 }
3915 
3916 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3917 				    unsigned long event, struct fib_nh *fib_nh)
3918 {
3919 	struct mlxsw_sp_nexthop_key key;
3920 	struct mlxsw_sp_nexthop *nh;
3921 
3922 	if (mlxsw_sp->router->aborted)
3923 		return;
3924 
3925 	key.fib_nh = fib_nh;
3926 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3927 	if (!nh)
3928 		return;
3929 
3930 	switch (event) {
3931 	case FIB_EVENT_NH_ADD:
3932 		mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3933 		break;
3934 	case FIB_EVENT_NH_DEL:
3935 		mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3936 		break;
3937 	}
3938 
3939 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3940 }
3941 
3942 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3943 					struct mlxsw_sp_rif *rif)
3944 {
3945 	struct mlxsw_sp_nexthop *nh;
3946 	bool removing;
3947 
3948 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3949 		switch (nh->type) {
3950 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
3951 			removing = false;
3952 			break;
3953 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3954 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3955 			break;
3956 		default:
3957 			WARN_ON(1);
3958 			continue;
3959 		}
3960 
3961 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
3962 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3963 	}
3964 }
3965 
3966 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3967 					 struct mlxsw_sp_rif *old_rif,
3968 					 struct mlxsw_sp_rif *new_rif)
3969 {
3970 	struct mlxsw_sp_nexthop *nh;
3971 
3972 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3973 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3974 		nh->rif = new_rif;
3975 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3976 }
3977 
3978 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3979 					   struct mlxsw_sp_rif *rif)
3980 {
3981 	struct mlxsw_sp_nexthop *nh, *tmp;
3982 
3983 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
3984 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3985 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3986 	}
3987 }
3988 
3989 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3990 				   struct fib_info *fi)
3991 {
3992 	const struct fib_nh *nh = fib_info_nh(fi, 0);
3993 
3994 	return nh->fib_nh_scope == RT_SCOPE_LINK ||
3995 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
3996 }
3997 
3998 static struct mlxsw_sp_nexthop_group *
3999 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
4000 {
4001 	unsigned int nhs = fib_info_num_path(fi);
4002 	struct mlxsw_sp_nexthop_group *nh_grp;
4003 	struct mlxsw_sp_nexthop *nh;
4004 	struct fib_nh *fib_nh;
4005 	int i;
4006 	int err;
4007 
4008 	nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
4009 	if (!nh_grp)
4010 		return ERR_PTR(-ENOMEM);
4011 	nh_grp->priv = fi;
4012 	INIT_LIST_HEAD(&nh_grp->fib_list);
4013 	nh_grp->neigh_tbl = &arp_tbl;
4014 
4015 	nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
4016 	nh_grp->count = nhs;
4017 	fib_info_hold(fi);
4018 	for (i = 0; i < nh_grp->count; i++) {
4019 		nh = &nh_grp->nexthops[i];
4020 		fib_nh = fib_info_nh(fi, i);
4021 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
4022 		if (err)
4023 			goto err_nexthop4_init;
4024 	}
4025 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4026 	if (err)
4027 		goto err_nexthop_group_insert;
4028 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4029 	return nh_grp;
4030 
4031 err_nexthop_group_insert:
4032 err_nexthop4_init:
4033 	for (i--; i >= 0; i--) {
4034 		nh = &nh_grp->nexthops[i];
4035 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4036 	}
4037 	fib_info_put(fi);
4038 	kfree(nh_grp);
4039 	return ERR_PTR(err);
4040 }
4041 
4042 static void
4043 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
4044 				struct mlxsw_sp_nexthop_group *nh_grp)
4045 {
4046 	struct mlxsw_sp_nexthop *nh;
4047 	int i;
4048 
4049 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
4050 	for (i = 0; i < nh_grp->count; i++) {
4051 		nh = &nh_grp->nexthops[i];
4052 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4053 	}
4054 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4055 	WARN_ON_ONCE(nh_grp->adj_index_valid);
4056 	fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
4057 	kfree(nh_grp);
4058 }
4059 
4060 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
4061 				       struct mlxsw_sp_fib_entry *fib_entry,
4062 				       struct fib_info *fi)
4063 {
4064 	struct mlxsw_sp_nexthop_group *nh_grp;
4065 
4066 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
4067 	if (!nh_grp) {
4068 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
4069 		if (IS_ERR(nh_grp))
4070 			return PTR_ERR(nh_grp);
4071 	}
4072 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
4073 	fib_entry->nh_group = nh_grp;
4074 	return 0;
4075 }
4076 
4077 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
4078 					struct mlxsw_sp_fib_entry *fib_entry)
4079 {
4080 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4081 
4082 	list_del(&fib_entry->nexthop_group_node);
4083 	if (!list_empty(&nh_grp->fib_list))
4084 		return;
4085 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
4086 }
4087 
4088 static bool
4089 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4090 {
4091 	struct mlxsw_sp_fib4_entry *fib4_entry;
4092 
4093 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4094 				  common);
4095 	return !fib4_entry->tos;
4096 }
4097 
4098 static bool
4099 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4100 {
4101 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4102 
4103 	switch (fib_entry->fib_node->fib->proto) {
4104 	case MLXSW_SP_L3_PROTO_IPV4:
4105 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
4106 			return false;
4107 		break;
4108 	case MLXSW_SP_L3_PROTO_IPV6:
4109 		break;
4110 	}
4111 
4112 	switch (fib_entry->type) {
4113 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4114 		return !!nh_group->adj_index_valid;
4115 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4116 		return !!nh_group->nh_rif;
4117 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4118 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4119 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4120 		return true;
4121 	default:
4122 		return false;
4123 	}
4124 }
4125 
4126 static struct mlxsw_sp_nexthop *
4127 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4128 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4129 {
4130 	int i;
4131 
4132 	for (i = 0; i < nh_grp->count; i++) {
4133 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4134 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
4135 
4136 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4137 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4138 				    &rt->fib6_nh->fib_nh_gw6))
4139 			return nh;
4140 		continue;
4141 	}
4142 
4143 	return NULL;
4144 }
4145 
4146 static void
4147 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4148 				 struct mlxsw_sp_fib_entry *fib_entry)
4149 {
4150 	struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4151 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4152 	int dst_len = fib_entry->fib_node->key.prefix_len;
4153 	struct mlxsw_sp_fib4_entry *fib4_entry;
4154 	struct fib_rt_info fri;
4155 	bool should_offload;
4156 
4157 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4158 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4159 				  common);
4160 	fri.fi = fi;
4161 	fri.tb_id = fib4_entry->tb_id;
4162 	fri.dst = cpu_to_be32(*p_dst);
4163 	fri.dst_len = dst_len;
4164 	fri.tos = fib4_entry->tos;
4165 	fri.type = fib4_entry->type;
4166 	fri.offload = should_offload;
4167 	fri.trap = !should_offload;
4168 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4169 }
4170 
4171 static void
4172 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4173 				   struct mlxsw_sp_fib_entry *fib_entry)
4174 {
4175 	struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4176 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4177 	int dst_len = fib_entry->fib_node->key.prefix_len;
4178 	struct mlxsw_sp_fib4_entry *fib4_entry;
4179 	struct fib_rt_info fri;
4180 
4181 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4182 				  common);
4183 	fri.fi = fi;
4184 	fri.tb_id = fib4_entry->tb_id;
4185 	fri.dst = cpu_to_be32(*p_dst);
4186 	fri.dst_len = dst_len;
4187 	fri.tos = fib4_entry->tos;
4188 	fri.type = fib4_entry->type;
4189 	fri.offload = false;
4190 	fri.trap = false;
4191 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4192 }
4193 
4194 static void
4195 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4196 				 struct mlxsw_sp_fib_entry *fib_entry)
4197 {
4198 	struct mlxsw_sp_fib6_entry *fib6_entry;
4199 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4200 	bool should_offload;
4201 
4202 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4203 
4204 	/* In IPv6 a multipath route is represented using multiple routes, so
4205 	 * we need to set the flags on all of them.
4206 	 */
4207 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4208 				  common);
4209 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4210 		fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
4211 				       !should_offload);
4212 }
4213 
4214 static void
4215 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4216 				   struct mlxsw_sp_fib_entry *fib_entry)
4217 {
4218 	struct mlxsw_sp_fib6_entry *fib6_entry;
4219 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4220 
4221 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4222 				  common);
4223 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4224 		fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
4225 }
4226 
4227 static void
4228 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4229 				struct mlxsw_sp_fib_entry *fib_entry)
4230 {
4231 	switch (fib_entry->fib_node->fib->proto) {
4232 	case MLXSW_SP_L3_PROTO_IPV4:
4233 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
4234 		break;
4235 	case MLXSW_SP_L3_PROTO_IPV6:
4236 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
4237 		break;
4238 	}
4239 }
4240 
4241 static void
4242 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4243 				  struct mlxsw_sp_fib_entry *fib_entry)
4244 {
4245 	switch (fib_entry->fib_node->fib->proto) {
4246 	case MLXSW_SP_L3_PROTO_IPV4:
4247 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4248 		break;
4249 	case MLXSW_SP_L3_PROTO_IPV6:
4250 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4251 		break;
4252 	}
4253 }
4254 
4255 static void
4256 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
4257 				    struct mlxsw_sp_fib_entry *fib_entry,
4258 				    enum mlxsw_reg_ralue_op op)
4259 {
4260 	switch (op) {
4261 	case MLXSW_REG_RALUE_OP_WRITE_WRITE:
4262 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
4263 		break;
4264 	case MLXSW_REG_RALUE_OP_WRITE_DELETE:
4265 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4266 		break;
4267 	default:
4268 		break;
4269 	}
4270 }
4271 
4272 static void
4273 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
4274 			      const struct mlxsw_sp_fib_entry *fib_entry,
4275 			      enum mlxsw_reg_ralue_op op)
4276 {
4277 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
4278 	enum mlxsw_reg_ralxx_protocol proto;
4279 	u32 *p_dip;
4280 
4281 	proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
4282 
4283 	switch (fib->proto) {
4284 	case MLXSW_SP_L3_PROTO_IPV4:
4285 		p_dip = (u32 *) fib_entry->fib_node->key.addr;
4286 		mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
4287 				      fib_entry->fib_node->key.prefix_len,
4288 				      *p_dip);
4289 		break;
4290 	case MLXSW_SP_L3_PROTO_IPV6:
4291 		mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
4292 				      fib_entry->fib_node->key.prefix_len,
4293 				      fib_entry->fib_node->key.addr);
4294 		break;
4295 	}
4296 }
4297 
4298 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
4299 {
4300 	enum mlxsw_reg_ratr_trap_action trap_action;
4301 	char ratr_pl[MLXSW_REG_RATR_LEN];
4302 	int err;
4303 
4304 	if (mlxsw_sp->router->adj_discard_index_valid)
4305 		return 0;
4306 
4307 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4308 				  &mlxsw_sp->router->adj_discard_index);
4309 	if (err)
4310 		return err;
4311 
4312 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
4313 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4314 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4315 			    mlxsw_sp->router->adj_discard_index, rif_index);
4316 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4317 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4318 	if (err)
4319 		goto err_ratr_write;
4320 
4321 	mlxsw_sp->router->adj_discard_index_valid = true;
4322 
4323 	return 0;
4324 
4325 err_ratr_write:
4326 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4327 			   mlxsw_sp->router->adj_discard_index);
4328 	return err;
4329 }
4330 
4331 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4332 					struct mlxsw_sp_fib_entry *fib_entry,
4333 					enum mlxsw_reg_ralue_op op)
4334 {
4335 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4336 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4337 	enum mlxsw_reg_ralue_trap_action trap_action;
4338 	u16 trap_id = 0;
4339 	u32 adjacency_index = 0;
4340 	u16 ecmp_size = 0;
4341 	int err;
4342 
4343 	/* In case the nexthop group adjacency index is valid, use it
4344 	 * with provided ECMP size. Otherwise, setup trap and pass
4345 	 * traffic to kernel.
4346 	 */
4347 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4348 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4349 		adjacency_index = fib_entry->nh_group->adj_index;
4350 		ecmp_size = fib_entry->nh_group->ecmp_size;
4351 	} else if (!nh_group->adj_index_valid && nh_group->count &&
4352 		   nh_group->nh_rif) {
4353 		err = mlxsw_sp_adj_discard_write(mlxsw_sp,
4354 						 nh_group->nh_rif->rif_index);
4355 		if (err)
4356 			return err;
4357 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4358 		adjacency_index = mlxsw_sp->router->adj_discard_index;
4359 		ecmp_size = 1;
4360 	} else {
4361 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4362 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4363 	}
4364 
4365 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4366 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4367 					adjacency_index, ecmp_size);
4368 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4369 }
4370 
4371 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4372 				       struct mlxsw_sp_fib_entry *fib_entry,
4373 				       enum mlxsw_reg_ralue_op op)
4374 {
4375 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
4376 	enum mlxsw_reg_ralue_trap_action trap_action;
4377 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4378 	u16 trap_id = 0;
4379 	u16 rif_index = 0;
4380 
4381 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4382 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4383 		rif_index = rif->rif_index;
4384 	} else {
4385 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4386 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4387 	}
4388 
4389 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4390 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4391 				       rif_index);
4392 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4393 }
4394 
4395 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4396 				      struct mlxsw_sp_fib_entry *fib_entry,
4397 				      enum mlxsw_reg_ralue_op op)
4398 {
4399 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4400 
4401 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4402 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4403 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4404 }
4405 
4406 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
4407 					   struct mlxsw_sp_fib_entry *fib_entry,
4408 					   enum mlxsw_reg_ralue_op op)
4409 {
4410 	enum mlxsw_reg_ralue_trap_action trap_action;
4411 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4412 
4413 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
4414 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4415 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
4416 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4417 }
4418 
4419 static int
4420 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
4421 				  struct mlxsw_sp_fib_entry *fib_entry,
4422 				  enum mlxsw_reg_ralue_op op)
4423 {
4424 	enum mlxsw_reg_ralue_trap_action trap_action;
4425 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4426 	u16 trap_id;
4427 
4428 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4429 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
4430 
4431 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4432 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
4433 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4434 }
4435 
4436 static int
4437 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4438 				 struct mlxsw_sp_fib_entry *fib_entry,
4439 				 enum mlxsw_reg_ralue_op op)
4440 {
4441 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4442 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4443 
4444 	if (WARN_ON(!ipip_entry))
4445 		return -EINVAL;
4446 
4447 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4448 	return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4449 				      fib_entry->decap.tunnel_index);
4450 }
4451 
4452 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4453 					   struct mlxsw_sp_fib_entry *fib_entry,
4454 					   enum mlxsw_reg_ralue_op op)
4455 {
4456 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4457 
4458 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4459 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
4460 					   fib_entry->decap.tunnel_index);
4461 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4462 }
4463 
4464 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4465 				   struct mlxsw_sp_fib_entry *fib_entry,
4466 				   enum mlxsw_reg_ralue_op op)
4467 {
4468 	switch (fib_entry->type) {
4469 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4470 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
4471 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4472 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
4473 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4474 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
4475 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4476 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
4477 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
4478 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
4479 							 op);
4480 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4481 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4482 							fib_entry, op);
4483 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4484 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
4485 	}
4486 	return -EINVAL;
4487 }
4488 
4489 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4490 				 struct mlxsw_sp_fib_entry *fib_entry,
4491 				 enum mlxsw_reg_ralue_op op)
4492 {
4493 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
4494 
4495 	if (err)
4496 		return err;
4497 
4498 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
4499 
4500 	return err;
4501 }
4502 
4503 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4504 				     struct mlxsw_sp_fib_entry *fib_entry)
4505 {
4506 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4507 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
4508 }
4509 
4510 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4511 				  struct mlxsw_sp_fib_entry *fib_entry)
4512 {
4513 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4514 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
4515 }
4516 
4517 static int
4518 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4519 			     const struct fib_entry_notifier_info *fen_info,
4520 			     struct mlxsw_sp_fib_entry *fib_entry)
4521 {
4522 	struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
4523 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4524 	struct mlxsw_sp_router *router = mlxsw_sp->router;
4525 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4526 	struct mlxsw_sp_ipip_entry *ipip_entry;
4527 	struct fib_info *fi = fen_info->fi;
4528 
4529 	switch (fen_info->type) {
4530 	case RTN_LOCAL:
4531 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4532 						 MLXSW_SP_L3_PROTO_IPV4, dip);
4533 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4534 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4535 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4536 							     fib_entry,
4537 							     ipip_entry);
4538 		}
4539 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
4540 						 MLXSW_SP_L3_PROTO_IPV4,
4541 						 &dip)) {
4542 			u32 tunnel_index;
4543 
4544 			tunnel_index = router->nve_decap_config.tunnel_index;
4545 			fib_entry->decap.tunnel_index = tunnel_index;
4546 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
4547 			return 0;
4548 		}
4549 		/* fall through */
4550 	case RTN_BROADCAST:
4551 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4552 		return 0;
4553 	case RTN_BLACKHOLE:
4554 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
4555 		return 0;
4556 	case RTN_UNREACHABLE: /* fall through */
4557 	case RTN_PROHIBIT:
4558 		/* Packets hitting these routes need to be trapped, but
4559 		 * can do so with a lower priority than packets directed
4560 		 * at the host, so use action type local instead of trap.
4561 		 */
4562 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
4563 		return 0;
4564 	case RTN_UNICAST:
4565 		if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4566 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4567 		else
4568 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4569 		return 0;
4570 	default:
4571 		return -EINVAL;
4572 	}
4573 }
4574 
4575 static void
4576 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
4577 			       struct mlxsw_sp_fib_entry *fib_entry)
4578 {
4579 	switch (fib_entry->type) {
4580 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4581 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
4582 		break;
4583 	default:
4584 		break;
4585 	}
4586 }
4587 
4588 static struct mlxsw_sp_fib4_entry *
4589 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4590 			   struct mlxsw_sp_fib_node *fib_node,
4591 			   const struct fib_entry_notifier_info *fen_info)
4592 {
4593 	struct mlxsw_sp_fib4_entry *fib4_entry;
4594 	struct mlxsw_sp_fib_entry *fib_entry;
4595 	int err;
4596 
4597 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4598 	if (!fib4_entry)
4599 		return ERR_PTR(-ENOMEM);
4600 	fib_entry = &fib4_entry->common;
4601 
4602 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4603 	if (err)
4604 		goto err_fib4_entry_type_set;
4605 
4606 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4607 	if (err)
4608 		goto err_nexthop4_group_get;
4609 
4610 	fib4_entry->prio = fen_info->fi->fib_priority;
4611 	fib4_entry->tb_id = fen_info->tb_id;
4612 	fib4_entry->type = fen_info->type;
4613 	fib4_entry->tos = fen_info->tos;
4614 
4615 	fib_entry->fib_node = fib_node;
4616 
4617 	return fib4_entry;
4618 
4619 err_nexthop4_group_get:
4620 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
4621 err_fib4_entry_type_set:
4622 	kfree(fib4_entry);
4623 	return ERR_PTR(err);
4624 }
4625 
4626 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4627 					struct mlxsw_sp_fib4_entry *fib4_entry)
4628 {
4629 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4630 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
4631 	kfree(fib4_entry);
4632 }
4633 
4634 static struct mlxsw_sp_fib4_entry *
4635 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4636 			   const struct fib_entry_notifier_info *fen_info)
4637 {
4638 	struct mlxsw_sp_fib4_entry *fib4_entry;
4639 	struct mlxsw_sp_fib_node *fib_node;
4640 	struct mlxsw_sp_fib *fib;
4641 	struct mlxsw_sp_vr *vr;
4642 
4643 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4644 	if (!vr)
4645 		return NULL;
4646 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4647 
4648 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4649 					    sizeof(fen_info->dst),
4650 					    fen_info->dst_len);
4651 	if (!fib_node)
4652 		return NULL;
4653 
4654 	fib4_entry = container_of(fib_node->fib_entry,
4655 				  struct mlxsw_sp_fib4_entry, common);
4656 	if (fib4_entry->tb_id == fen_info->tb_id &&
4657 	    fib4_entry->tos == fen_info->tos &&
4658 	    fib4_entry->type == fen_info->type &&
4659 	    mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4660 	    fen_info->fi)
4661 		return fib4_entry;
4662 
4663 	return NULL;
4664 }
4665 
4666 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4667 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4668 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4669 	.key_len = sizeof(struct mlxsw_sp_fib_key),
4670 	.automatic_shrinking = true,
4671 };
4672 
4673 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4674 				    struct mlxsw_sp_fib_node *fib_node)
4675 {
4676 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4677 				      mlxsw_sp_fib_ht_params);
4678 }
4679 
4680 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4681 				     struct mlxsw_sp_fib_node *fib_node)
4682 {
4683 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4684 			       mlxsw_sp_fib_ht_params);
4685 }
4686 
4687 static struct mlxsw_sp_fib_node *
4688 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4689 			 size_t addr_len, unsigned char prefix_len)
4690 {
4691 	struct mlxsw_sp_fib_key key;
4692 
4693 	memset(&key, 0, sizeof(key));
4694 	memcpy(key.addr, addr, addr_len);
4695 	key.prefix_len = prefix_len;
4696 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4697 }
4698 
4699 static struct mlxsw_sp_fib_node *
4700 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4701 			 size_t addr_len, unsigned char prefix_len)
4702 {
4703 	struct mlxsw_sp_fib_node *fib_node;
4704 
4705 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4706 	if (!fib_node)
4707 		return NULL;
4708 
4709 	list_add(&fib_node->list, &fib->node_list);
4710 	memcpy(fib_node->key.addr, addr, addr_len);
4711 	fib_node->key.prefix_len = prefix_len;
4712 
4713 	return fib_node;
4714 }
4715 
4716 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4717 {
4718 	list_del(&fib_node->list);
4719 	kfree(fib_node);
4720 }
4721 
4722 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4723 				      struct mlxsw_sp_fib_node *fib_node)
4724 {
4725 	struct mlxsw_sp_prefix_usage req_prefix_usage;
4726 	struct mlxsw_sp_fib *fib = fib_node->fib;
4727 	struct mlxsw_sp_lpm_tree *lpm_tree;
4728 	int err;
4729 
4730 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4731 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4732 		goto out;
4733 
4734 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4735 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4736 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4737 					 fib->proto);
4738 	if (IS_ERR(lpm_tree))
4739 		return PTR_ERR(lpm_tree);
4740 
4741 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4742 	if (err)
4743 		goto err_lpm_tree_replace;
4744 
4745 out:
4746 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
4747 	return 0;
4748 
4749 err_lpm_tree_replace:
4750 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4751 	return err;
4752 }
4753 
4754 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4755 					 struct mlxsw_sp_fib_node *fib_node)
4756 {
4757 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4758 	struct mlxsw_sp_prefix_usage req_prefix_usage;
4759 	struct mlxsw_sp_fib *fib = fib_node->fib;
4760 	int err;
4761 
4762 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4763 		return;
4764 	/* Try to construct a new LPM tree from the current prefix usage
4765 	 * minus the unused one. If we fail, continue using the old one.
4766 	 */
4767 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4768 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4769 				    fib_node->key.prefix_len);
4770 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4771 					 fib->proto);
4772 	if (IS_ERR(lpm_tree))
4773 		return;
4774 
4775 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4776 	if (err)
4777 		goto err_lpm_tree_replace;
4778 
4779 	return;
4780 
4781 err_lpm_tree_replace:
4782 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4783 }
4784 
4785 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4786 				  struct mlxsw_sp_fib_node *fib_node,
4787 				  struct mlxsw_sp_fib *fib)
4788 {
4789 	int err;
4790 
4791 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
4792 	if (err)
4793 		return err;
4794 	fib_node->fib = fib;
4795 
4796 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
4797 	if (err)
4798 		goto err_fib_lpm_tree_link;
4799 
4800 	return 0;
4801 
4802 err_fib_lpm_tree_link:
4803 	fib_node->fib = NULL;
4804 	mlxsw_sp_fib_node_remove(fib, fib_node);
4805 	return err;
4806 }
4807 
4808 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4809 				   struct mlxsw_sp_fib_node *fib_node)
4810 {
4811 	struct mlxsw_sp_fib *fib = fib_node->fib;
4812 
4813 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
4814 	fib_node->fib = NULL;
4815 	mlxsw_sp_fib_node_remove(fib, fib_node);
4816 }
4817 
4818 static struct mlxsw_sp_fib_node *
4819 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4820 		      size_t addr_len, unsigned char prefix_len,
4821 		      enum mlxsw_sp_l3proto proto)
4822 {
4823 	struct mlxsw_sp_fib_node *fib_node;
4824 	struct mlxsw_sp_fib *fib;
4825 	struct mlxsw_sp_vr *vr;
4826 	int err;
4827 
4828 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
4829 	if (IS_ERR(vr))
4830 		return ERR_CAST(vr);
4831 	fib = mlxsw_sp_vr_fib(vr, proto);
4832 
4833 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
4834 	if (fib_node)
4835 		return fib_node;
4836 
4837 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
4838 	if (!fib_node) {
4839 		err = -ENOMEM;
4840 		goto err_fib_node_create;
4841 	}
4842 
4843 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4844 	if (err)
4845 		goto err_fib_node_init;
4846 
4847 	return fib_node;
4848 
4849 err_fib_node_init:
4850 	mlxsw_sp_fib_node_destroy(fib_node);
4851 err_fib_node_create:
4852 	mlxsw_sp_vr_put(mlxsw_sp, vr);
4853 	return ERR_PTR(err);
4854 }
4855 
4856 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4857 				  struct mlxsw_sp_fib_node *fib_node)
4858 {
4859 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
4860 
4861 	if (fib_node->fib_entry)
4862 		return;
4863 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
4864 	mlxsw_sp_fib_node_destroy(fib_node);
4865 	mlxsw_sp_vr_put(mlxsw_sp, vr);
4866 }
4867 
4868 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4869 					struct mlxsw_sp_fib_entry *fib_entry)
4870 {
4871 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4872 	int err;
4873 
4874 	fib_node->fib_entry = fib_entry;
4875 
4876 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4877 	if (err)
4878 		goto err_fib_entry_update;
4879 
4880 	return 0;
4881 
4882 err_fib_entry_update:
4883 	fib_node->fib_entry = NULL;
4884 	return err;
4885 }
4886 
4887 static void
4888 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4889 			       struct mlxsw_sp_fib_entry *fib_entry)
4890 {
4891 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4892 
4893 	mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4894 	fib_node->fib_entry = NULL;
4895 }
4896 
4897 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
4898 {
4899 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4900 	struct mlxsw_sp_fib4_entry *fib4_replaced;
4901 
4902 	if (!fib_node->fib_entry)
4903 		return true;
4904 
4905 	fib4_replaced = container_of(fib_node->fib_entry,
4906 				     struct mlxsw_sp_fib4_entry, common);
4907 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
4908 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
4909 		return false;
4910 
4911 	return true;
4912 }
4913 
4914 static int
4915 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
4916 			     const struct fib_entry_notifier_info *fen_info)
4917 {
4918 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
4919 	struct mlxsw_sp_fib_entry *replaced;
4920 	struct mlxsw_sp_fib_node *fib_node;
4921 	int err;
4922 
4923 	if (mlxsw_sp->router->aborted)
4924 		return 0;
4925 
4926 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4927 					 &fen_info->dst, sizeof(fen_info->dst),
4928 					 fen_info->dst_len,
4929 					 MLXSW_SP_L3_PROTO_IPV4);
4930 	if (IS_ERR(fib_node)) {
4931 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4932 		return PTR_ERR(fib_node);
4933 	}
4934 
4935 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4936 	if (IS_ERR(fib4_entry)) {
4937 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
4938 		err = PTR_ERR(fib4_entry);
4939 		goto err_fib4_entry_create;
4940 	}
4941 
4942 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
4943 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4944 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4945 		return 0;
4946 	}
4947 
4948 	replaced = fib_node->fib_entry;
4949 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
4950 	if (err) {
4951 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4952 		goto err_fib_node_entry_link;
4953 	}
4954 
4955 	/* Nothing to replace */
4956 	if (!replaced)
4957 		return 0;
4958 
4959 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
4960 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
4961 				     common);
4962 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
4963 
4964 	return 0;
4965 
4966 err_fib_node_entry_link:
4967 	fib_node->fib_entry = replaced;
4968 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4969 err_fib4_entry_create:
4970 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4971 	return err;
4972 }
4973 
4974 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4975 				     struct fib_entry_notifier_info *fen_info)
4976 {
4977 	struct mlxsw_sp_fib4_entry *fib4_entry;
4978 	struct mlxsw_sp_fib_node *fib_node;
4979 
4980 	if (mlxsw_sp->router->aborted)
4981 		return;
4982 
4983 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4984 	if (!fib4_entry)
4985 		return;
4986 	fib_node = fib4_entry->common.fib_node;
4987 
4988 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
4989 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4990 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4991 }
4992 
4993 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
4994 {
4995 	/* Packets with link-local destination IP arriving to the router
4996 	 * are trapped to the CPU, so no need to program specific routes
4997 	 * for them.
4998 	 */
4999 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
5000 		return true;
5001 
5002 	/* Multicast routes aren't supported, so ignore them. Neighbour
5003 	 * Discovery packets are specifically trapped.
5004 	 */
5005 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
5006 		return true;
5007 
5008 	/* Cloned routes are irrelevant in the forwarding path. */
5009 	if (rt->fib6_flags & RTF_CACHE)
5010 		return true;
5011 
5012 	return false;
5013 }
5014 
5015 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
5016 {
5017 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5018 
5019 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
5020 	if (!mlxsw_sp_rt6)
5021 		return ERR_PTR(-ENOMEM);
5022 
5023 	/* In case of route replace, replaced route is deleted with
5024 	 * no notification. Take reference to prevent accessing freed
5025 	 * memory.
5026 	 */
5027 	mlxsw_sp_rt6->rt = rt;
5028 	fib6_info_hold(rt);
5029 
5030 	return mlxsw_sp_rt6;
5031 }
5032 
5033 #if IS_ENABLED(CONFIG_IPV6)
5034 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5035 {
5036 	fib6_info_release(rt);
5037 }
5038 #else
5039 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5040 {
5041 }
5042 #endif
5043 
5044 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5045 {
5046 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
5047 
5048 	fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
5049 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
5050 	kfree(mlxsw_sp_rt6);
5051 }
5052 
5053 static struct fib6_info *
5054 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
5055 {
5056 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
5057 				list)->rt;
5058 }
5059 
5060 static struct mlxsw_sp_rt6 *
5061 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
5062 			    const struct fib6_info *rt)
5063 {
5064 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5065 
5066 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
5067 		if (mlxsw_sp_rt6->rt == rt)
5068 			return mlxsw_sp_rt6;
5069 	}
5070 
5071 	return NULL;
5072 }
5073 
5074 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
5075 					const struct fib6_info *rt,
5076 					enum mlxsw_sp_ipip_type *ret)
5077 {
5078 	return rt->fib6_nh->fib_nh_dev &&
5079 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
5080 }
5081 
5082 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
5083 				       struct mlxsw_sp_nexthop_group *nh_grp,
5084 				       struct mlxsw_sp_nexthop *nh,
5085 				       const struct fib6_info *rt)
5086 {
5087 	const struct mlxsw_sp_ipip_ops *ipip_ops;
5088 	struct mlxsw_sp_ipip_entry *ipip_entry;
5089 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5090 	struct mlxsw_sp_rif *rif;
5091 	int err;
5092 
5093 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
5094 	if (ipip_entry) {
5095 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5096 		if (ipip_ops->can_offload(mlxsw_sp, dev,
5097 					  MLXSW_SP_L3_PROTO_IPV6)) {
5098 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
5099 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
5100 			return 0;
5101 		}
5102 	}
5103 
5104 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
5105 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
5106 	if (!rif)
5107 		return 0;
5108 	mlxsw_sp_nexthop_rif_init(nh, rif);
5109 
5110 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
5111 	if (err)
5112 		goto err_nexthop_neigh_init;
5113 
5114 	return 0;
5115 
5116 err_nexthop_neigh_init:
5117 	mlxsw_sp_nexthop_rif_fini(nh);
5118 	return err;
5119 }
5120 
5121 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
5122 					struct mlxsw_sp_nexthop *nh)
5123 {
5124 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5125 }
5126 
5127 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
5128 				  struct mlxsw_sp_nexthop_group *nh_grp,
5129 				  struct mlxsw_sp_nexthop *nh,
5130 				  const struct fib6_info *rt)
5131 {
5132 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5133 
5134 	nh->nh_grp = nh_grp;
5135 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
5136 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
5137 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5138 
5139 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5140 
5141 	if (!dev)
5142 		return 0;
5143 	nh->ifindex = dev->ifindex;
5144 
5145 	return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
5146 }
5147 
5148 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
5149 				   struct mlxsw_sp_nexthop *nh)
5150 {
5151 	mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5152 	list_del(&nh->router_list_node);
5153 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5154 }
5155 
5156 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5157 				    const struct fib6_info *rt)
5158 {
5159 	return rt->fib6_nh->fib_nh_gw_family ||
5160 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5161 }
5162 
5163 static struct mlxsw_sp_nexthop_group *
5164 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
5165 			       struct mlxsw_sp_fib6_entry *fib6_entry)
5166 {
5167 	struct mlxsw_sp_nexthop_group *nh_grp;
5168 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5169 	struct mlxsw_sp_nexthop *nh;
5170 	int i = 0;
5171 	int err;
5172 
5173 	nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
5174 			 GFP_KERNEL);
5175 	if (!nh_grp)
5176 		return ERR_PTR(-ENOMEM);
5177 	INIT_LIST_HEAD(&nh_grp->fib_list);
5178 #if IS_ENABLED(CONFIG_IPV6)
5179 	nh_grp->neigh_tbl = &nd_tbl;
5180 #endif
5181 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
5182 					struct mlxsw_sp_rt6, list);
5183 	nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5184 	nh_grp->count = fib6_entry->nrt6;
5185 	for (i = 0; i < nh_grp->count; i++) {
5186 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5187 
5188 		nh = &nh_grp->nexthops[i];
5189 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
5190 		if (err)
5191 			goto err_nexthop6_init;
5192 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
5193 	}
5194 
5195 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5196 	if (err)
5197 		goto err_nexthop_group_insert;
5198 
5199 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5200 	return nh_grp;
5201 
5202 err_nexthop_group_insert:
5203 err_nexthop6_init:
5204 	for (i--; i >= 0; i--) {
5205 		nh = &nh_grp->nexthops[i];
5206 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5207 	}
5208 	kfree(nh_grp);
5209 	return ERR_PTR(err);
5210 }
5211 
5212 static void
5213 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
5214 				struct mlxsw_sp_nexthop_group *nh_grp)
5215 {
5216 	struct mlxsw_sp_nexthop *nh;
5217 	int i = nh_grp->count;
5218 
5219 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5220 	for (i--; i >= 0; i--) {
5221 		nh = &nh_grp->nexthops[i];
5222 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5223 	}
5224 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5225 	WARN_ON(nh_grp->adj_index_valid);
5226 	kfree(nh_grp);
5227 }
5228 
5229 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
5230 				       struct mlxsw_sp_fib6_entry *fib6_entry)
5231 {
5232 	struct mlxsw_sp_nexthop_group *nh_grp;
5233 
5234 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
5235 	if (!nh_grp) {
5236 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
5237 		if (IS_ERR(nh_grp))
5238 			return PTR_ERR(nh_grp);
5239 	}
5240 
5241 	list_add_tail(&fib6_entry->common.nexthop_group_node,
5242 		      &nh_grp->fib_list);
5243 	fib6_entry->common.nh_group = nh_grp;
5244 
5245 	/* The route and the nexthop are described by the same struct, so we
5246 	 * need to the update the nexthop offload indication for the new route.
5247 	 */
5248 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
5249 
5250 	return 0;
5251 }
5252 
5253 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
5254 					struct mlxsw_sp_fib_entry *fib_entry)
5255 {
5256 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5257 
5258 	list_del(&fib_entry->nexthop_group_node);
5259 	if (!list_empty(&nh_grp->fib_list))
5260 		return;
5261 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5262 }
5263 
5264 static int
5265 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5266 			       struct mlxsw_sp_fib6_entry *fib6_entry)
5267 {
5268 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5269 	int err;
5270 
5271 	fib6_entry->common.nh_group = NULL;
5272 	list_del(&fib6_entry->common.nexthop_group_node);
5273 
5274 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5275 	if (err)
5276 		goto err_nexthop6_group_get;
5277 
5278 	/* In case this entry is offloaded, then the adjacency index
5279 	 * currently associated with it in the device's table is that
5280 	 * of the old group. Start using the new one instead.
5281 	 */
5282 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
5283 	if (err)
5284 		goto err_fib_entry_update;
5285 
5286 	if (list_empty(&old_nh_grp->fib_list))
5287 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5288 
5289 	return 0;
5290 
5291 err_fib_entry_update:
5292 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5293 err_nexthop6_group_get:
5294 	list_add_tail(&fib6_entry->common.nexthop_group_node,
5295 		      &old_nh_grp->fib_list);
5296 	fib6_entry->common.nh_group = old_nh_grp;
5297 	return err;
5298 }
5299 
5300 static int
5301 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5302 				struct mlxsw_sp_fib6_entry *fib6_entry,
5303 				struct fib6_info **rt_arr, unsigned int nrt6)
5304 {
5305 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5306 	int err, i;
5307 
5308 	for (i = 0; i < nrt6; i++) {
5309 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5310 		if (IS_ERR(mlxsw_sp_rt6)) {
5311 			err = PTR_ERR(mlxsw_sp_rt6);
5312 			goto err_rt6_create;
5313 		}
5314 
5315 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5316 		fib6_entry->nrt6++;
5317 	}
5318 
5319 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5320 	if (err)
5321 		goto err_nexthop6_group_update;
5322 
5323 	return 0;
5324 
5325 err_nexthop6_group_update:
5326 	i = nrt6;
5327 err_rt6_create:
5328 	for (i--; i >= 0; i--) {
5329 		fib6_entry->nrt6--;
5330 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5331 					       struct mlxsw_sp_rt6, list);
5332 		list_del(&mlxsw_sp_rt6->list);
5333 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5334 	}
5335 	return err;
5336 }
5337 
5338 static void
5339 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5340 				struct mlxsw_sp_fib6_entry *fib6_entry,
5341 				struct fib6_info **rt_arr, unsigned int nrt6)
5342 {
5343 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5344 	int i;
5345 
5346 	for (i = 0; i < nrt6; i++) {
5347 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
5348 							   rt_arr[i]);
5349 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
5350 			continue;
5351 
5352 		fib6_entry->nrt6--;
5353 		list_del(&mlxsw_sp_rt6->list);
5354 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5355 	}
5356 
5357 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5358 }
5359 
5360 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5361 					 struct mlxsw_sp_fib_entry *fib_entry,
5362 					 const struct fib6_info *rt)
5363 {
5364 	/* Packets hitting RTF_REJECT routes need to be discarded by the
5365 	 * stack. We can rely on their destination device not having a
5366 	 * RIF (it's the loopback device) and can thus use action type
5367 	 * local, which will cause them to be trapped with a lower
5368 	 * priority than packets that need to be locally received.
5369 	 */
5370 	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5371 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5372 	else if (rt->fib6_type == RTN_BLACKHOLE)
5373 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5374 	else if (rt->fib6_flags & RTF_REJECT)
5375 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5376 	else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5377 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5378 	else
5379 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5380 }
5381 
5382 static void
5383 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5384 {
5385 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5386 
5387 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5388 				 list) {
5389 		fib6_entry->nrt6--;
5390 		list_del(&mlxsw_sp_rt6->list);
5391 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5392 	}
5393 }
5394 
5395 static struct mlxsw_sp_fib6_entry *
5396 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5397 			   struct mlxsw_sp_fib_node *fib_node,
5398 			   struct fib6_info **rt_arr, unsigned int nrt6)
5399 {
5400 	struct mlxsw_sp_fib6_entry *fib6_entry;
5401 	struct mlxsw_sp_fib_entry *fib_entry;
5402 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5403 	int err, i;
5404 
5405 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5406 	if (!fib6_entry)
5407 		return ERR_PTR(-ENOMEM);
5408 	fib_entry = &fib6_entry->common;
5409 
5410 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
5411 
5412 	for (i = 0; i < nrt6; i++) {
5413 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5414 		if (IS_ERR(mlxsw_sp_rt6)) {
5415 			err = PTR_ERR(mlxsw_sp_rt6);
5416 			goto err_rt6_create;
5417 		}
5418 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5419 		fib6_entry->nrt6++;
5420 	}
5421 
5422 	mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
5423 
5424 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5425 	if (err)
5426 		goto err_nexthop6_group_get;
5427 
5428 	fib_entry->fib_node = fib_node;
5429 
5430 	return fib6_entry;
5431 
5432 err_nexthop6_group_get:
5433 	i = nrt6;
5434 err_rt6_create:
5435 	for (i--; i >= 0; i--) {
5436 		fib6_entry->nrt6--;
5437 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5438 					       struct mlxsw_sp_rt6, list);
5439 		list_del(&mlxsw_sp_rt6->list);
5440 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5441 	}
5442 	kfree(fib6_entry);
5443 	return ERR_PTR(err);
5444 }
5445 
5446 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5447 					struct mlxsw_sp_fib6_entry *fib6_entry)
5448 {
5449 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5450 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5451 	WARN_ON(fib6_entry->nrt6);
5452 	kfree(fib6_entry);
5453 }
5454 
5455 static struct mlxsw_sp_fib6_entry *
5456 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5457 			   const struct fib6_info *rt)
5458 {
5459 	struct mlxsw_sp_fib6_entry *fib6_entry;
5460 	struct mlxsw_sp_fib_node *fib_node;
5461 	struct mlxsw_sp_fib *fib;
5462 	struct fib6_info *cmp_rt;
5463 	struct mlxsw_sp_vr *vr;
5464 
5465 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5466 	if (!vr)
5467 		return NULL;
5468 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5469 
5470 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5471 					    sizeof(rt->fib6_dst.addr),
5472 					    rt->fib6_dst.plen);
5473 	if (!fib_node)
5474 		return NULL;
5475 
5476 	fib6_entry = container_of(fib_node->fib_entry,
5477 				  struct mlxsw_sp_fib6_entry, common);
5478 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5479 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
5480 	    rt->fib6_metric == cmp_rt->fib6_metric &&
5481 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5482 		return fib6_entry;
5483 
5484 	return NULL;
5485 }
5486 
5487 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
5488 {
5489 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5490 	struct mlxsw_sp_fib6_entry *fib6_replaced;
5491 	struct fib6_info *rt, *rt_replaced;
5492 
5493 	if (!fib_node->fib_entry)
5494 		return true;
5495 
5496 	fib6_replaced = container_of(fib_node->fib_entry,
5497 				     struct mlxsw_sp_fib6_entry,
5498 				     common);
5499 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5500 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
5501 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
5502 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
5503 		return false;
5504 
5505 	return true;
5506 }
5507 
5508 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
5509 					struct fib6_info **rt_arr,
5510 					unsigned int nrt6)
5511 {
5512 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
5513 	struct mlxsw_sp_fib_entry *replaced;
5514 	struct mlxsw_sp_fib_node *fib_node;
5515 	struct fib6_info *rt = rt_arr[0];
5516 	int err;
5517 
5518 	if (mlxsw_sp->router->aborted)
5519 		return 0;
5520 
5521 	if (rt->fib6_src.plen)
5522 		return -EINVAL;
5523 
5524 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5525 		return 0;
5526 
5527 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5528 					 &rt->fib6_dst.addr,
5529 					 sizeof(rt->fib6_dst.addr),
5530 					 rt->fib6_dst.plen,
5531 					 MLXSW_SP_L3_PROTO_IPV6);
5532 	if (IS_ERR(fib_node))
5533 		return PTR_ERR(fib_node);
5534 
5535 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
5536 						nrt6);
5537 	if (IS_ERR(fib6_entry)) {
5538 		err = PTR_ERR(fib6_entry);
5539 		goto err_fib6_entry_create;
5540 	}
5541 
5542 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
5543 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5544 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5545 		return 0;
5546 	}
5547 
5548 	replaced = fib_node->fib_entry;
5549 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
5550 	if (err)
5551 		goto err_fib_node_entry_link;
5552 
5553 	/* Nothing to replace */
5554 	if (!replaced)
5555 		return 0;
5556 
5557 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5558 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
5559 				     common);
5560 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
5561 
5562 	return 0;
5563 
5564 err_fib_node_entry_link:
5565 	fib_node->fib_entry = replaced;
5566 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5567 err_fib6_entry_create:
5568 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5569 	return err;
5570 }
5571 
5572 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
5573 				       struct fib6_info **rt_arr,
5574 				       unsigned int nrt6)
5575 {
5576 	struct mlxsw_sp_fib6_entry *fib6_entry;
5577 	struct mlxsw_sp_fib_node *fib_node;
5578 	struct fib6_info *rt = rt_arr[0];
5579 	int err;
5580 
5581 	if (mlxsw_sp->router->aborted)
5582 		return 0;
5583 
5584 	if (rt->fib6_src.plen)
5585 		return -EINVAL;
5586 
5587 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5588 		return 0;
5589 
5590 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5591 					 &rt->fib6_dst.addr,
5592 					 sizeof(rt->fib6_dst.addr),
5593 					 rt->fib6_dst.plen,
5594 					 MLXSW_SP_L3_PROTO_IPV6);
5595 	if (IS_ERR(fib_node))
5596 		return PTR_ERR(fib_node);
5597 
5598 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
5599 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5600 		return -EINVAL;
5601 	}
5602 
5603 	fib6_entry = container_of(fib_node->fib_entry,
5604 				  struct mlxsw_sp_fib6_entry, common);
5605 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
5606 					      nrt6);
5607 	if (err)
5608 		goto err_fib6_entry_nexthop_add;
5609 
5610 	return 0;
5611 
5612 err_fib6_entry_nexthop_add:
5613 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5614 	return err;
5615 }
5616 
5617 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5618 				     struct fib6_info **rt_arr,
5619 				     unsigned int nrt6)
5620 {
5621 	struct mlxsw_sp_fib6_entry *fib6_entry;
5622 	struct mlxsw_sp_fib_node *fib_node;
5623 	struct fib6_info *rt = rt_arr[0];
5624 
5625 	if (mlxsw_sp->router->aborted)
5626 		return;
5627 
5628 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5629 		return;
5630 
5631 	/* Multipath routes are first added to the FIB trie and only then
5632 	 * notified. If we vetoed the addition, we will get a delete
5633 	 * notification for a route we do not have. Therefore, do not warn if
5634 	 * route was not found.
5635 	 */
5636 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5637 	if (!fib6_entry)
5638 		return;
5639 
5640 	/* If not all the nexthops are deleted, then only reduce the nexthop
5641 	 * group.
5642 	 */
5643 	if (nrt6 != fib6_entry->nrt6) {
5644 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
5645 						nrt6);
5646 		return;
5647 	}
5648 
5649 	fib_node = fib6_entry->common.fib_node;
5650 
5651 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
5652 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5653 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5654 }
5655 
5656 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5657 					    enum mlxsw_reg_ralxx_protocol proto,
5658 					    u8 tree_id)
5659 {
5660 	char ralta_pl[MLXSW_REG_RALTA_LEN];
5661 	char ralst_pl[MLXSW_REG_RALST_LEN];
5662 	int i, err;
5663 
5664 	mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
5665 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5666 	if (err)
5667 		return err;
5668 
5669 	mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
5670 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5671 	if (err)
5672 		return err;
5673 
5674 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5675 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5676 		char raltb_pl[MLXSW_REG_RALTB_LEN];
5677 		char ralue_pl[MLXSW_REG_RALUE_LEN];
5678 
5679 		mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
5680 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5681 				      raltb_pl);
5682 		if (err)
5683 			return err;
5684 
5685 		mlxsw_reg_ralue_pack(ralue_pl, proto,
5686 				     MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
5687 		mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5688 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5689 				      ralue_pl);
5690 		if (err)
5691 			return err;
5692 	}
5693 
5694 	return 0;
5695 }
5696 
5697 static struct mlxsw_sp_mr_table *
5698 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5699 {
5700 	if (family == RTNL_FAMILY_IPMR)
5701 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5702 	else
5703 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5704 }
5705 
5706 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5707 				     struct mfc_entry_notifier_info *men_info,
5708 				     bool replace)
5709 {
5710 	struct mlxsw_sp_mr_table *mrt;
5711 	struct mlxsw_sp_vr *vr;
5712 
5713 	if (mlxsw_sp->router->aborted)
5714 		return 0;
5715 
5716 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
5717 	if (IS_ERR(vr))
5718 		return PTR_ERR(vr);
5719 
5720 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5721 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
5722 }
5723 
5724 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5725 				      struct mfc_entry_notifier_info *men_info)
5726 {
5727 	struct mlxsw_sp_mr_table *mrt;
5728 	struct mlxsw_sp_vr *vr;
5729 
5730 	if (mlxsw_sp->router->aborted)
5731 		return;
5732 
5733 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5734 	if (WARN_ON(!vr))
5735 		return;
5736 
5737 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5738 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
5739 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5740 }
5741 
5742 static int
5743 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5744 			      struct vif_entry_notifier_info *ven_info)
5745 {
5746 	struct mlxsw_sp_mr_table *mrt;
5747 	struct mlxsw_sp_rif *rif;
5748 	struct mlxsw_sp_vr *vr;
5749 
5750 	if (mlxsw_sp->router->aborted)
5751 		return 0;
5752 
5753 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
5754 	if (IS_ERR(vr))
5755 		return PTR_ERR(vr);
5756 
5757 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5758 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5759 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
5760 				   ven_info->vif_index,
5761 				   ven_info->vif_flags, rif);
5762 }
5763 
5764 static void
5765 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5766 			      struct vif_entry_notifier_info *ven_info)
5767 {
5768 	struct mlxsw_sp_mr_table *mrt;
5769 	struct mlxsw_sp_vr *vr;
5770 
5771 	if (mlxsw_sp->router->aborted)
5772 		return;
5773 
5774 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5775 	if (WARN_ON(!vr))
5776 		return;
5777 
5778 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5779 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
5780 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5781 }
5782 
5783 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5784 {
5785 	enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5786 	int err;
5787 
5788 	err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5789 					       MLXSW_SP_LPM_TREE_MIN);
5790 	if (err)
5791 		return err;
5792 
5793 	/* The multicast router code does not need an abort trap as by default,
5794 	 * packets that don't match any routes are trapped to the CPU.
5795 	 */
5796 
5797 	proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5798 	return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5799 						MLXSW_SP_LPM_TREE_MIN + 1);
5800 }
5801 
5802 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5803 				     struct mlxsw_sp_fib_node *fib_node)
5804 {
5805 	struct mlxsw_sp_fib4_entry *fib4_entry;
5806 
5807 	fib4_entry = container_of(fib_node->fib_entry,
5808 				  struct mlxsw_sp_fib4_entry, common);
5809 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
5810 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5811 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5812 }
5813 
5814 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5815 				     struct mlxsw_sp_fib_node *fib_node)
5816 {
5817 	struct mlxsw_sp_fib6_entry *fib6_entry;
5818 
5819 	fib6_entry = container_of(fib_node->fib_entry,
5820 				  struct mlxsw_sp_fib6_entry, common);
5821 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
5822 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5823 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5824 }
5825 
5826 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5827 				    struct mlxsw_sp_fib_node *fib_node)
5828 {
5829 	switch (fib_node->fib->proto) {
5830 	case MLXSW_SP_L3_PROTO_IPV4:
5831 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5832 		break;
5833 	case MLXSW_SP_L3_PROTO_IPV6:
5834 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
5835 		break;
5836 	}
5837 }
5838 
5839 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5840 				  struct mlxsw_sp_vr *vr,
5841 				  enum mlxsw_sp_l3proto proto)
5842 {
5843 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5844 	struct mlxsw_sp_fib_node *fib_node, *tmp;
5845 
5846 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5847 		bool do_break = &tmp->list == &fib->node_list;
5848 
5849 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5850 		if (do_break)
5851 			break;
5852 	}
5853 }
5854 
5855 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
5856 {
5857 	int i, j;
5858 
5859 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5860 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5861 
5862 		if (!mlxsw_sp_vr_is_used(vr))
5863 			continue;
5864 
5865 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5866 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
5867 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
5868 
5869 		/* If virtual router was only used for IPv4, then it's no
5870 		 * longer used.
5871 		 */
5872 		if (!mlxsw_sp_vr_is_used(vr))
5873 			continue;
5874 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
5875 	}
5876 
5877 	/* After flushing all the routes, it is not possible anyone is still
5878 	 * using the adjacency index that is discarding packets, so free it in
5879 	 * case it was allocated.
5880 	 */
5881 	if (!mlxsw_sp->router->adj_discard_index_valid)
5882 		return;
5883 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5884 			   mlxsw_sp->router->adj_discard_index);
5885 	mlxsw_sp->router->adj_discard_index_valid = false;
5886 }
5887 
5888 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
5889 {
5890 	int err;
5891 
5892 	if (mlxsw_sp->router->aborted)
5893 		return;
5894 	dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
5895 	mlxsw_sp_router_fib_flush(mlxsw_sp);
5896 	mlxsw_sp->router->aborted = true;
5897 	err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5898 	if (err)
5899 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5900 }
5901 
5902 struct mlxsw_sp_fib6_event_work {
5903 	struct fib6_info **rt_arr;
5904 	unsigned int nrt6;
5905 };
5906 
5907 struct mlxsw_sp_fib_event_work {
5908 	struct work_struct work;
5909 	union {
5910 		struct mlxsw_sp_fib6_event_work fib6_work;
5911 		struct fib_entry_notifier_info fen_info;
5912 		struct fib_rule_notifier_info fr_info;
5913 		struct fib_nh_notifier_info fnh_info;
5914 		struct mfc_entry_notifier_info men_info;
5915 		struct vif_entry_notifier_info ven_info;
5916 	};
5917 	struct mlxsw_sp *mlxsw_sp;
5918 	unsigned long event;
5919 };
5920 
5921 static int
5922 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
5923 			       struct fib6_entry_notifier_info *fen6_info)
5924 {
5925 	struct fib6_info *rt = fen6_info->rt;
5926 	struct fib6_info **rt_arr;
5927 	struct fib6_info *iter;
5928 	unsigned int nrt6;
5929 	int i = 0;
5930 
5931 	nrt6 = fen6_info->nsiblings + 1;
5932 
5933 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
5934 	if (!rt_arr)
5935 		return -ENOMEM;
5936 
5937 	fib6_work->rt_arr = rt_arr;
5938 	fib6_work->nrt6 = nrt6;
5939 
5940 	rt_arr[0] = rt;
5941 	fib6_info_hold(rt);
5942 
5943 	if (!fen6_info->nsiblings)
5944 		return 0;
5945 
5946 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
5947 		if (i == fen6_info->nsiblings)
5948 			break;
5949 
5950 		rt_arr[i + 1] = iter;
5951 		fib6_info_hold(iter);
5952 		i++;
5953 	}
5954 	WARN_ON_ONCE(i != fen6_info->nsiblings);
5955 
5956 	return 0;
5957 }
5958 
5959 static void
5960 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
5961 {
5962 	int i;
5963 
5964 	for (i = 0; i < fib6_work->nrt6; i++)
5965 		mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
5966 	kfree(fib6_work->rt_arr);
5967 }
5968 
5969 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
5970 {
5971 	struct mlxsw_sp_fib_event_work *fib_work =
5972 		container_of(work, struct mlxsw_sp_fib_event_work, work);
5973 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5974 	int err;
5975 
5976 	mutex_lock(&mlxsw_sp->router->lock);
5977 	mlxsw_sp_span_respin(mlxsw_sp);
5978 
5979 	switch (fib_work->event) {
5980 	case FIB_EVENT_ENTRY_REPLACE:
5981 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
5982 						   &fib_work->fen_info);
5983 		if (err)
5984 			mlxsw_sp_router_fib_abort(mlxsw_sp);
5985 		fib_info_put(fib_work->fen_info.fi);
5986 		break;
5987 	case FIB_EVENT_ENTRY_DEL:
5988 		mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5989 		fib_info_put(fib_work->fen_info.fi);
5990 		break;
5991 	case FIB_EVENT_NH_ADD: /* fall through */
5992 	case FIB_EVENT_NH_DEL:
5993 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5994 					fib_work->fnh_info.fib_nh);
5995 		fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5996 		break;
5997 	}
5998 	mutex_unlock(&mlxsw_sp->router->lock);
5999 	kfree(fib_work);
6000 }
6001 
6002 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
6003 {
6004 	struct mlxsw_sp_fib_event_work *fib_work =
6005 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6006 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6007 	int err;
6008 
6009 	mutex_lock(&mlxsw_sp->router->lock);
6010 	mlxsw_sp_span_respin(mlxsw_sp);
6011 
6012 	switch (fib_work->event) {
6013 	case FIB_EVENT_ENTRY_REPLACE:
6014 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
6015 						   fib_work->fib6_work.rt_arr,
6016 						   fib_work->fib6_work.nrt6);
6017 		if (err)
6018 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6019 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6020 		break;
6021 	case FIB_EVENT_ENTRY_APPEND:
6022 		err = mlxsw_sp_router_fib6_append(mlxsw_sp,
6023 						  fib_work->fib6_work.rt_arr,
6024 						  fib_work->fib6_work.nrt6);
6025 		if (err)
6026 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6027 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6028 		break;
6029 	case FIB_EVENT_ENTRY_DEL:
6030 		mlxsw_sp_router_fib6_del(mlxsw_sp,
6031 					 fib_work->fib6_work.rt_arr,
6032 					 fib_work->fib6_work.nrt6);
6033 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6034 		break;
6035 	}
6036 	mutex_unlock(&mlxsw_sp->router->lock);
6037 	kfree(fib_work);
6038 }
6039 
6040 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
6041 {
6042 	struct mlxsw_sp_fib_event_work *fib_work =
6043 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6044 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6045 	bool replace;
6046 	int err;
6047 
6048 	rtnl_lock();
6049 	mutex_lock(&mlxsw_sp->router->lock);
6050 	switch (fib_work->event) {
6051 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6052 	case FIB_EVENT_ENTRY_ADD:
6053 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6054 
6055 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
6056 						replace);
6057 		if (err)
6058 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6059 		mr_cache_put(fib_work->men_info.mfc);
6060 		break;
6061 	case FIB_EVENT_ENTRY_DEL:
6062 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
6063 		mr_cache_put(fib_work->men_info.mfc);
6064 		break;
6065 	case FIB_EVENT_VIF_ADD:
6066 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
6067 						    &fib_work->ven_info);
6068 		if (err)
6069 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6070 		dev_put(fib_work->ven_info.dev);
6071 		break;
6072 	case FIB_EVENT_VIF_DEL:
6073 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
6074 					      &fib_work->ven_info);
6075 		dev_put(fib_work->ven_info.dev);
6076 		break;
6077 	}
6078 	mutex_unlock(&mlxsw_sp->router->lock);
6079 	rtnl_unlock();
6080 	kfree(fib_work);
6081 }
6082 
6083 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
6084 				       struct fib_notifier_info *info)
6085 {
6086 	struct fib_entry_notifier_info *fen_info;
6087 	struct fib_nh_notifier_info *fnh_info;
6088 
6089 	switch (fib_work->event) {
6090 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6091 	case FIB_EVENT_ENTRY_DEL:
6092 		fen_info = container_of(info, struct fib_entry_notifier_info,
6093 					info);
6094 		fib_work->fen_info = *fen_info;
6095 		/* Take reference on fib_info to prevent it from being
6096 		 * freed while work is queued. Release it afterwards.
6097 		 */
6098 		fib_info_hold(fib_work->fen_info.fi);
6099 		break;
6100 	case FIB_EVENT_NH_ADD: /* fall through */
6101 	case FIB_EVENT_NH_DEL:
6102 		fnh_info = container_of(info, struct fib_nh_notifier_info,
6103 					info);
6104 		fib_work->fnh_info = *fnh_info;
6105 		fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
6106 		break;
6107 	}
6108 }
6109 
6110 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
6111 				      struct fib_notifier_info *info)
6112 {
6113 	struct fib6_entry_notifier_info *fen6_info;
6114 	int err;
6115 
6116 	switch (fib_work->event) {
6117 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6118 	case FIB_EVENT_ENTRY_APPEND: /* fall through */
6119 	case FIB_EVENT_ENTRY_DEL:
6120 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
6121 					 info);
6122 		err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
6123 						     fen6_info);
6124 		if (err)
6125 			return err;
6126 		break;
6127 	}
6128 
6129 	return 0;
6130 }
6131 
6132 static void
6133 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
6134 			    struct fib_notifier_info *info)
6135 {
6136 	switch (fib_work->event) {
6137 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6138 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6139 	case FIB_EVENT_ENTRY_DEL:
6140 		memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
6141 		mr_cache_hold(fib_work->men_info.mfc);
6142 		break;
6143 	case FIB_EVENT_VIF_ADD: /* fall through */
6144 	case FIB_EVENT_VIF_DEL:
6145 		memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
6146 		dev_hold(fib_work->ven_info.dev);
6147 		break;
6148 	}
6149 }
6150 
6151 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
6152 					  struct fib_notifier_info *info,
6153 					  struct mlxsw_sp *mlxsw_sp)
6154 {
6155 	struct netlink_ext_ack *extack = info->extack;
6156 	struct fib_rule_notifier_info *fr_info;
6157 	struct fib_rule *rule;
6158 	int err = 0;
6159 
6160 	/* nothing to do at the moment */
6161 	if (event == FIB_EVENT_RULE_DEL)
6162 		return 0;
6163 
6164 	if (mlxsw_sp->router->aborted)
6165 		return 0;
6166 
6167 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
6168 	rule = fr_info->rule;
6169 
6170 	/* Rule only affects locally generated traffic */
6171 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
6172 		return 0;
6173 
6174 	switch (info->family) {
6175 	case AF_INET:
6176 		if (!fib4_rule_default(rule) && !rule->l3mdev)
6177 			err = -EOPNOTSUPP;
6178 		break;
6179 	case AF_INET6:
6180 		if (!fib6_rule_default(rule) && !rule->l3mdev)
6181 			err = -EOPNOTSUPP;
6182 		break;
6183 	case RTNL_FAMILY_IPMR:
6184 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
6185 			err = -EOPNOTSUPP;
6186 		break;
6187 	case RTNL_FAMILY_IP6MR:
6188 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6189 			err = -EOPNOTSUPP;
6190 		break;
6191 	}
6192 
6193 	if (err < 0)
6194 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6195 
6196 	return err;
6197 }
6198 
6199 /* Called with rcu_read_lock() */
6200 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
6201 				     unsigned long event, void *ptr)
6202 {
6203 	struct mlxsw_sp_fib_event_work *fib_work;
6204 	struct fib_notifier_info *info = ptr;
6205 	struct mlxsw_sp_router *router;
6206 	int err;
6207 
6208 	if ((info->family != AF_INET && info->family != AF_INET6 &&
6209 	     info->family != RTNL_FAMILY_IPMR &&
6210 	     info->family != RTNL_FAMILY_IP6MR))
6211 		return NOTIFY_DONE;
6212 
6213 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6214 
6215 	switch (event) {
6216 	case FIB_EVENT_RULE_ADD: /* fall through */
6217 	case FIB_EVENT_RULE_DEL:
6218 		err = mlxsw_sp_router_fib_rule_event(event, info,
6219 						     router->mlxsw_sp);
6220 		return notifier_from_errno(err);
6221 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6222 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6223 	case FIB_EVENT_ENTRY_APPEND:
6224 		if (router->aborted) {
6225 			NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
6226 			return notifier_from_errno(-EINVAL);
6227 		}
6228 		if (info->family == AF_INET) {
6229 			struct fib_entry_notifier_info *fen_info = ptr;
6230 
6231 			if (fen_info->fi->fib_nh_is_v6) {
6232 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
6233 				return notifier_from_errno(-EINVAL);
6234 			}
6235 			if (fen_info->fi->nh) {
6236 				NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
6237 				return notifier_from_errno(-EINVAL);
6238 			}
6239 		} else if (info->family == AF_INET6) {
6240 			struct fib6_entry_notifier_info *fen6_info;
6241 
6242 			fen6_info = container_of(info,
6243 						 struct fib6_entry_notifier_info,
6244 						 info);
6245 			if (fen6_info->rt->nh) {
6246 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
6247 				return notifier_from_errno(-EINVAL);
6248 			}
6249 		}
6250 		break;
6251 	}
6252 
6253 	fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
6254 	if (WARN_ON(!fib_work))
6255 		return NOTIFY_BAD;
6256 
6257 	fib_work->mlxsw_sp = router->mlxsw_sp;
6258 	fib_work->event = event;
6259 
6260 	switch (info->family) {
6261 	case AF_INET:
6262 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
6263 		mlxsw_sp_router_fib4_event(fib_work, info);
6264 		break;
6265 	case AF_INET6:
6266 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
6267 		err = mlxsw_sp_router_fib6_event(fib_work, info);
6268 		if (err)
6269 			goto err_fib_event;
6270 		break;
6271 	case RTNL_FAMILY_IP6MR:
6272 	case RTNL_FAMILY_IPMR:
6273 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
6274 		mlxsw_sp_router_fibmr_event(fib_work, info);
6275 		break;
6276 	}
6277 
6278 	mlxsw_core_schedule_work(&fib_work->work);
6279 
6280 	return NOTIFY_DONE;
6281 
6282 err_fib_event:
6283 	kfree(fib_work);
6284 	return NOTIFY_BAD;
6285 }
6286 
6287 static struct mlxsw_sp_rif *
6288 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
6289 			 const struct net_device *dev)
6290 {
6291 	int i;
6292 
6293 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6294 		if (mlxsw_sp->router->rifs[i] &&
6295 		    mlxsw_sp->router->rifs[i]->dev == dev)
6296 			return mlxsw_sp->router->rifs[i];
6297 
6298 	return NULL;
6299 }
6300 
6301 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
6302 			 const struct net_device *dev)
6303 {
6304 	struct mlxsw_sp_rif *rif;
6305 
6306 	mutex_lock(&mlxsw_sp->router->lock);
6307 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6308 	mutex_unlock(&mlxsw_sp->router->lock);
6309 
6310 	return rif;
6311 }
6312 
6313 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
6314 {
6315 	struct mlxsw_sp_rif *rif;
6316 	u16 vid = 0;
6317 
6318 	mutex_lock(&mlxsw_sp->router->lock);
6319 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6320 	if (!rif)
6321 		goto out;
6322 
6323 	/* We only return the VID for VLAN RIFs. Otherwise we return an
6324 	 * invalid value (0).
6325 	 */
6326 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
6327 		goto out;
6328 
6329 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6330 
6331 out:
6332 	mutex_unlock(&mlxsw_sp->router->lock);
6333 	return vid;
6334 }
6335 
6336 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
6337 {
6338 	char ritr_pl[MLXSW_REG_RITR_LEN];
6339 	int err;
6340 
6341 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
6342 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6343 	if (err)
6344 		return err;
6345 
6346 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
6347 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6348 }
6349 
6350 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6351 					  struct mlxsw_sp_rif *rif)
6352 {
6353 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6354 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6355 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6356 }
6357 
6358 static bool
6359 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6360 			   unsigned long event)
6361 {
6362 	struct inet6_dev *inet6_dev;
6363 	bool addr_list_empty = true;
6364 	struct in_device *idev;
6365 
6366 	switch (event) {
6367 	case NETDEV_UP:
6368 		return rif == NULL;
6369 	case NETDEV_DOWN:
6370 		rcu_read_lock();
6371 		idev = __in_dev_get_rcu(dev);
6372 		if (idev && idev->ifa_list)
6373 			addr_list_empty = false;
6374 
6375 		inet6_dev = __in6_dev_get(dev);
6376 		if (addr_list_empty && inet6_dev &&
6377 		    !list_empty(&inet6_dev->addr_list))
6378 			addr_list_empty = false;
6379 		rcu_read_unlock();
6380 
6381 		/* macvlans do not have a RIF, but rather piggy back on the
6382 		 * RIF of their lower device.
6383 		 */
6384 		if (netif_is_macvlan(dev) && addr_list_empty)
6385 			return true;
6386 
6387 		if (rif && addr_list_empty &&
6388 		    !netif_is_l3_slave(rif->dev))
6389 			return true;
6390 		/* It is possible we already removed the RIF ourselves
6391 		 * if it was assigned to a netdev that is now a bridge
6392 		 * or LAG slave.
6393 		 */
6394 		return false;
6395 	}
6396 
6397 	return false;
6398 }
6399 
6400 static enum mlxsw_sp_rif_type
6401 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6402 		      const struct net_device *dev)
6403 {
6404 	enum mlxsw_sp_fid_type type;
6405 
6406 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6407 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
6408 
6409 	/* Otherwise RIF type is derived from the type of the underlying FID. */
6410 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6411 		type = MLXSW_SP_FID_TYPE_8021Q;
6412 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6413 		type = MLXSW_SP_FID_TYPE_8021Q;
6414 	else if (netif_is_bridge_master(dev))
6415 		type = MLXSW_SP_FID_TYPE_8021D;
6416 	else
6417 		type = MLXSW_SP_FID_TYPE_RFID;
6418 
6419 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6420 }
6421 
6422 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6423 {
6424 	int i;
6425 
6426 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6427 		if (!mlxsw_sp->router->rifs[i]) {
6428 			*p_rif_index = i;
6429 			return 0;
6430 		}
6431 	}
6432 
6433 	return -ENOBUFS;
6434 }
6435 
6436 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6437 					       u16 vr_id,
6438 					       struct net_device *l3_dev)
6439 {
6440 	struct mlxsw_sp_rif *rif;
6441 
6442 	rif = kzalloc(rif_size, GFP_KERNEL);
6443 	if (!rif)
6444 		return NULL;
6445 
6446 	INIT_LIST_HEAD(&rif->nexthop_list);
6447 	INIT_LIST_HEAD(&rif->neigh_list);
6448 	if (l3_dev) {
6449 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
6450 		rif->mtu = l3_dev->mtu;
6451 		rif->dev = l3_dev;
6452 	}
6453 	rif->vr_id = vr_id;
6454 	rif->rif_index = rif_index;
6455 
6456 	return rif;
6457 }
6458 
6459 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6460 					   u16 rif_index)
6461 {
6462 	return mlxsw_sp->router->rifs[rif_index];
6463 }
6464 
6465 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6466 {
6467 	return rif->rif_index;
6468 }
6469 
6470 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6471 {
6472 	return lb_rif->common.rif_index;
6473 }
6474 
6475 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6476 {
6477 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
6478 	struct mlxsw_sp_vr *ul_vr;
6479 
6480 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
6481 	if (WARN_ON(IS_ERR(ul_vr)))
6482 		return 0;
6483 
6484 	return ul_vr->id;
6485 }
6486 
6487 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6488 {
6489 	return lb_rif->ul_rif_id;
6490 }
6491 
6492 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6493 {
6494 	return rif->dev->ifindex;
6495 }
6496 
6497 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6498 {
6499 	return rif->dev;
6500 }
6501 
6502 static struct mlxsw_sp_rif *
6503 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6504 		    const struct mlxsw_sp_rif_params *params,
6505 		    struct netlink_ext_ack *extack)
6506 {
6507 	u32 tb_id = l3mdev_fib_table(params->dev);
6508 	const struct mlxsw_sp_rif_ops *ops;
6509 	struct mlxsw_sp_fid *fid = NULL;
6510 	enum mlxsw_sp_rif_type type;
6511 	struct mlxsw_sp_rif *rif;
6512 	struct mlxsw_sp_vr *vr;
6513 	u16 rif_index;
6514 	int i, err;
6515 
6516 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6517 	ops = mlxsw_sp->rif_ops_arr[type];
6518 
6519 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6520 	if (IS_ERR(vr))
6521 		return ERR_CAST(vr);
6522 	vr->rif_count++;
6523 
6524 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6525 	if (err) {
6526 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6527 		goto err_rif_index_alloc;
6528 	}
6529 
6530 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6531 	if (!rif) {
6532 		err = -ENOMEM;
6533 		goto err_rif_alloc;
6534 	}
6535 	dev_hold(rif->dev);
6536 	mlxsw_sp->router->rifs[rif_index] = rif;
6537 	rif->mlxsw_sp = mlxsw_sp;
6538 	rif->ops = ops;
6539 
6540 	if (ops->fid_get) {
6541 		fid = ops->fid_get(rif, extack);
6542 		if (IS_ERR(fid)) {
6543 			err = PTR_ERR(fid);
6544 			goto err_fid_get;
6545 		}
6546 		rif->fid = fid;
6547 	}
6548 
6549 	if (ops->setup)
6550 		ops->setup(rif, params);
6551 
6552 	err = ops->configure(rif);
6553 	if (err)
6554 		goto err_configure;
6555 
6556 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6557 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6558 		if (err)
6559 			goto err_mr_rif_add;
6560 	}
6561 
6562 	mlxsw_sp_rif_counters_alloc(rif);
6563 
6564 	return rif;
6565 
6566 err_mr_rif_add:
6567 	for (i--; i >= 0; i--)
6568 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6569 	ops->deconfigure(rif);
6570 err_configure:
6571 	if (fid)
6572 		mlxsw_sp_fid_put(fid);
6573 err_fid_get:
6574 	mlxsw_sp->router->rifs[rif_index] = NULL;
6575 	dev_put(rif->dev);
6576 	kfree(rif);
6577 err_rif_alloc:
6578 err_rif_index_alloc:
6579 	vr->rif_count--;
6580 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6581 	return ERR_PTR(err);
6582 }
6583 
6584 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6585 {
6586 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
6587 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6588 	struct mlxsw_sp_fid *fid = rif->fid;
6589 	struct mlxsw_sp_vr *vr;
6590 	int i;
6591 
6592 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6593 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
6594 
6595 	mlxsw_sp_rif_counters_free(rif);
6596 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6597 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6598 	ops->deconfigure(rif);
6599 	if (fid)
6600 		/* Loopback RIFs are not associated with a FID. */
6601 		mlxsw_sp_fid_put(fid);
6602 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6603 	dev_put(rif->dev);
6604 	kfree(rif);
6605 	vr->rif_count--;
6606 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6607 }
6608 
6609 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6610 				 struct net_device *dev)
6611 {
6612 	struct mlxsw_sp_rif *rif;
6613 
6614 	mutex_lock(&mlxsw_sp->router->lock);
6615 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6616 	if (!rif)
6617 		goto out;
6618 	mlxsw_sp_rif_destroy(rif);
6619 out:
6620 	mutex_unlock(&mlxsw_sp->router->lock);
6621 }
6622 
6623 static void
6624 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6625 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6626 {
6627 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6628 
6629 	params->vid = mlxsw_sp_port_vlan->vid;
6630 	params->lag = mlxsw_sp_port->lagged;
6631 	if (params->lag)
6632 		params->lag_id = mlxsw_sp_port->lag_id;
6633 	else
6634 		params->system_port = mlxsw_sp_port->local_port;
6635 }
6636 
6637 static struct mlxsw_sp_rif_subport *
6638 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
6639 {
6640 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
6641 }
6642 
6643 static struct mlxsw_sp_rif *
6644 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
6645 			 const struct mlxsw_sp_rif_params *params,
6646 			 struct netlink_ext_ack *extack)
6647 {
6648 	struct mlxsw_sp_rif_subport *rif_subport;
6649 	struct mlxsw_sp_rif *rif;
6650 
6651 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
6652 	if (!rif)
6653 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
6654 
6655 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
6656 	refcount_inc(&rif_subport->ref_count);
6657 	return rif;
6658 }
6659 
6660 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
6661 {
6662 	struct mlxsw_sp_rif_subport *rif_subport;
6663 
6664 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
6665 	if (!refcount_dec_and_test(&rif_subport->ref_count))
6666 		return;
6667 
6668 	mlxsw_sp_rif_destroy(rif);
6669 }
6670 
6671 static int
6672 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
6673 			       struct net_device *l3_dev,
6674 			       struct netlink_ext_ack *extack)
6675 {
6676 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6677 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
6678 	struct mlxsw_sp_rif_params params = {
6679 		.dev = l3_dev,
6680 	};
6681 	u16 vid = mlxsw_sp_port_vlan->vid;
6682 	struct mlxsw_sp_rif *rif;
6683 	struct mlxsw_sp_fid *fid;
6684 	int err;
6685 
6686 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
6687 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
6688 	if (IS_ERR(rif))
6689 		return PTR_ERR(rif);
6690 
6691 	/* FID was already created, just take a reference */
6692 	fid = rif->ops->fid_get(rif, extack);
6693 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6694 	if (err)
6695 		goto err_fid_port_vid_map;
6696 
6697 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
6698 	if (err)
6699 		goto err_port_vid_learning_set;
6700 
6701 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
6702 					BR_STATE_FORWARDING);
6703 	if (err)
6704 		goto err_port_vid_stp_set;
6705 
6706 	mlxsw_sp_port_vlan->fid = fid;
6707 
6708 	return 0;
6709 
6710 err_port_vid_stp_set:
6711 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6712 err_port_vid_learning_set:
6713 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6714 err_fid_port_vid_map:
6715 	mlxsw_sp_fid_put(fid);
6716 	mlxsw_sp_rif_subport_put(rif);
6717 	return err;
6718 }
6719 
6720 static void
6721 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6722 {
6723 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6724 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
6725 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
6726 	u16 vid = mlxsw_sp_port_vlan->vid;
6727 
6728 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6729 		return;
6730 
6731 	mlxsw_sp_port_vlan->fid = NULL;
6732 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6733 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6734 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6735 	mlxsw_sp_fid_put(fid);
6736 	mlxsw_sp_rif_subport_put(rif);
6737 }
6738 
6739 void
6740 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6741 {
6742 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
6743 
6744 	mutex_lock(&mlxsw_sp->router->lock);
6745 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6746 	mutex_unlock(&mlxsw_sp->router->lock);
6747 }
6748 
6749 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6750 					     struct net_device *port_dev,
6751 					     unsigned long event, u16 vid,
6752 					     struct netlink_ext_ack *extack)
6753 {
6754 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
6755 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
6756 
6757 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
6758 	if (WARN_ON(!mlxsw_sp_port_vlan))
6759 		return -EINVAL;
6760 
6761 	switch (event) {
6762 	case NETDEV_UP:
6763 		return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
6764 						      l3_dev, extack);
6765 	case NETDEV_DOWN:
6766 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6767 		break;
6768 	}
6769 
6770 	return 0;
6771 }
6772 
6773 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
6774 					unsigned long event,
6775 					struct netlink_ext_ack *extack)
6776 {
6777 	if (netif_is_bridge_port(port_dev) ||
6778 	    netif_is_lag_port(port_dev) ||
6779 	    netif_is_ovs_port(port_dev))
6780 		return 0;
6781 
6782 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
6783 						 MLXSW_SP_DEFAULT_VID, extack);
6784 }
6785 
6786 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6787 					 struct net_device *lag_dev,
6788 					 unsigned long event, u16 vid,
6789 					 struct netlink_ext_ack *extack)
6790 {
6791 	struct net_device *port_dev;
6792 	struct list_head *iter;
6793 	int err;
6794 
6795 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6796 		if (mlxsw_sp_port_dev_check(port_dev)) {
6797 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6798 								port_dev,
6799 								event, vid,
6800 								extack);
6801 			if (err)
6802 				return err;
6803 		}
6804 	}
6805 
6806 	return 0;
6807 }
6808 
6809 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
6810 				       unsigned long event,
6811 				       struct netlink_ext_ack *extack)
6812 {
6813 	if (netif_is_bridge_port(lag_dev))
6814 		return 0;
6815 
6816 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
6817 					     MLXSW_SP_DEFAULT_VID, extack);
6818 }
6819 
6820 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
6821 					  struct net_device *l3_dev,
6822 					  unsigned long event,
6823 					  struct netlink_ext_ack *extack)
6824 {
6825 	struct mlxsw_sp_rif_params params = {
6826 		.dev = l3_dev,
6827 	};
6828 	struct mlxsw_sp_rif *rif;
6829 
6830 	switch (event) {
6831 	case NETDEV_UP:
6832 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
6833 		if (IS_ERR(rif))
6834 			return PTR_ERR(rif);
6835 		break;
6836 	case NETDEV_DOWN:
6837 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6838 		mlxsw_sp_rif_destroy(rif);
6839 		break;
6840 	}
6841 
6842 	return 0;
6843 }
6844 
6845 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
6846 					struct net_device *vlan_dev,
6847 					unsigned long event,
6848 					struct netlink_ext_ack *extack)
6849 {
6850 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
6851 	u16 vid = vlan_dev_vlan_id(vlan_dev);
6852 
6853 	if (netif_is_bridge_port(vlan_dev))
6854 		return 0;
6855 
6856 	if (mlxsw_sp_port_dev_check(real_dev))
6857 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
6858 							 event, vid, extack);
6859 	else if (netif_is_lag_master(real_dev))
6860 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
6861 						     vid, extack);
6862 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
6863 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
6864 						      extack);
6865 
6866 	return 0;
6867 }
6868 
6869 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
6870 {
6871 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
6872 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6873 
6874 	return ether_addr_equal_masked(mac, vrrp4, mask);
6875 }
6876 
6877 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
6878 {
6879 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
6880 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6881 
6882 	return ether_addr_equal_masked(mac, vrrp6, mask);
6883 }
6884 
6885 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
6886 				const u8 *mac, bool adding)
6887 {
6888 	char ritr_pl[MLXSW_REG_RITR_LEN];
6889 	u8 vrrp_id = adding ? mac[5] : 0;
6890 	int err;
6891 
6892 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
6893 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
6894 		return 0;
6895 
6896 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
6897 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6898 	if (err)
6899 		return err;
6900 
6901 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
6902 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
6903 	else
6904 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
6905 
6906 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6907 }
6908 
6909 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
6910 				    const struct net_device *macvlan_dev,
6911 				    struct netlink_ext_ack *extack)
6912 {
6913 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6914 	struct mlxsw_sp_rif *rif;
6915 	int err;
6916 
6917 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6918 	if (!rif) {
6919 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
6920 		return -EOPNOTSUPP;
6921 	}
6922 
6923 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6924 				  mlxsw_sp_fid_index(rif->fid), true);
6925 	if (err)
6926 		return err;
6927 
6928 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
6929 				   macvlan_dev->dev_addr, true);
6930 	if (err)
6931 		goto err_rif_vrrp_add;
6932 
6933 	/* Make sure the bridge driver does not have this MAC pointing at
6934 	 * some other port.
6935 	 */
6936 	if (rif->ops->fdb_del)
6937 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
6938 
6939 	return 0;
6940 
6941 err_rif_vrrp_add:
6942 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6943 			    mlxsw_sp_fid_index(rif->fid), false);
6944 	return err;
6945 }
6946 
6947 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6948 				       const struct net_device *macvlan_dev)
6949 {
6950 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6951 	struct mlxsw_sp_rif *rif;
6952 
6953 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6954 	/* If we do not have a RIF, then we already took care of
6955 	 * removing the macvlan's MAC during RIF deletion.
6956 	 */
6957 	if (!rif)
6958 		return;
6959 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
6960 			     false);
6961 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6962 			    mlxsw_sp_fid_index(rif->fid), false);
6963 }
6964 
6965 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6966 			      const struct net_device *macvlan_dev)
6967 {
6968 	mutex_lock(&mlxsw_sp->router->lock);
6969 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6970 	mutex_unlock(&mlxsw_sp->router->lock);
6971 }
6972 
6973 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
6974 					   struct net_device *macvlan_dev,
6975 					   unsigned long event,
6976 					   struct netlink_ext_ack *extack)
6977 {
6978 	switch (event) {
6979 	case NETDEV_UP:
6980 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
6981 	case NETDEV_DOWN:
6982 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6983 		break;
6984 	}
6985 
6986 	return 0;
6987 }
6988 
6989 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
6990 					       struct net_device *dev,
6991 					       const unsigned char *dev_addr,
6992 					       struct netlink_ext_ack *extack)
6993 {
6994 	struct mlxsw_sp_rif *rif;
6995 	int i;
6996 
6997 	/* A RIF is not created for macvlan netdevs. Their MAC is used to
6998 	 * populate the FDB
6999 	 */
7000 	if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
7001 		return 0;
7002 
7003 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7004 		rif = mlxsw_sp->router->rifs[i];
7005 		if (rif && rif->ops &&
7006 		    rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
7007 			continue;
7008 		if (rif && rif->dev && rif->dev != dev &&
7009 		    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
7010 					     mlxsw_sp->mac_mask)) {
7011 			NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
7012 			return -EINVAL;
7013 		}
7014 	}
7015 
7016 	return 0;
7017 }
7018 
7019 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
7020 				     struct net_device *dev,
7021 				     unsigned long event,
7022 				     struct netlink_ext_ack *extack)
7023 {
7024 	if (mlxsw_sp_port_dev_check(dev))
7025 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
7026 	else if (netif_is_lag_master(dev))
7027 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
7028 	else if (netif_is_bridge_master(dev))
7029 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
7030 						      extack);
7031 	else if (is_vlan_dev(dev))
7032 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
7033 						    extack);
7034 	else if (netif_is_macvlan(dev))
7035 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
7036 						       extack);
7037 	else
7038 		return 0;
7039 }
7040 
7041 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
7042 				   unsigned long event, void *ptr)
7043 {
7044 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
7045 	struct net_device *dev = ifa->ifa_dev->dev;
7046 	struct mlxsw_sp_router *router;
7047 	struct mlxsw_sp_rif *rif;
7048 	int err = 0;
7049 
7050 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
7051 	if (event == NETDEV_UP)
7052 		return NOTIFY_DONE;
7053 
7054 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
7055 	mutex_lock(&router->lock);
7056 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
7057 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7058 		goto out;
7059 
7060 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
7061 out:
7062 	mutex_unlock(&router->lock);
7063 	return notifier_from_errno(err);
7064 }
7065 
7066 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
7067 				  unsigned long event, void *ptr)
7068 {
7069 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
7070 	struct net_device *dev = ivi->ivi_dev->dev;
7071 	struct mlxsw_sp *mlxsw_sp;
7072 	struct mlxsw_sp_rif *rif;
7073 	int err = 0;
7074 
7075 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7076 	if (!mlxsw_sp)
7077 		return NOTIFY_DONE;
7078 
7079 	mutex_lock(&mlxsw_sp->router->lock);
7080 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7081 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7082 		goto out;
7083 
7084 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7085 						  ivi->extack);
7086 	if (err)
7087 		goto out;
7088 
7089 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
7090 out:
7091 	mutex_unlock(&mlxsw_sp->router->lock);
7092 	return notifier_from_errno(err);
7093 }
7094 
7095 struct mlxsw_sp_inet6addr_event_work {
7096 	struct work_struct work;
7097 	struct mlxsw_sp *mlxsw_sp;
7098 	struct net_device *dev;
7099 	unsigned long event;
7100 };
7101 
7102 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
7103 {
7104 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
7105 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
7106 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
7107 	struct net_device *dev = inet6addr_work->dev;
7108 	unsigned long event = inet6addr_work->event;
7109 	struct mlxsw_sp_rif *rif;
7110 
7111 	rtnl_lock();
7112 	mutex_lock(&mlxsw_sp->router->lock);
7113 
7114 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7115 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7116 		goto out;
7117 
7118 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
7119 out:
7120 	mutex_unlock(&mlxsw_sp->router->lock);
7121 	rtnl_unlock();
7122 	dev_put(dev);
7123 	kfree(inet6addr_work);
7124 }
7125 
7126 /* Called with rcu_read_lock() */
7127 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
7128 				    unsigned long event, void *ptr)
7129 {
7130 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
7131 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
7132 	struct net_device *dev = if6->idev->dev;
7133 	struct mlxsw_sp_router *router;
7134 
7135 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
7136 	if (event == NETDEV_UP)
7137 		return NOTIFY_DONE;
7138 
7139 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
7140 	if (!inet6addr_work)
7141 		return NOTIFY_BAD;
7142 
7143 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
7144 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
7145 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
7146 	inet6addr_work->dev = dev;
7147 	inet6addr_work->event = event;
7148 	dev_hold(dev);
7149 	mlxsw_core_schedule_work(&inet6addr_work->work);
7150 
7151 	return NOTIFY_DONE;
7152 }
7153 
7154 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
7155 				   unsigned long event, void *ptr)
7156 {
7157 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
7158 	struct net_device *dev = i6vi->i6vi_dev->dev;
7159 	struct mlxsw_sp *mlxsw_sp;
7160 	struct mlxsw_sp_rif *rif;
7161 	int err = 0;
7162 
7163 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7164 	if (!mlxsw_sp)
7165 		return NOTIFY_DONE;
7166 
7167 	mutex_lock(&mlxsw_sp->router->lock);
7168 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7169 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7170 		goto out;
7171 
7172 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7173 						  i6vi->extack);
7174 	if (err)
7175 		goto out;
7176 
7177 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
7178 out:
7179 	mutex_unlock(&mlxsw_sp->router->lock);
7180 	return notifier_from_errno(err);
7181 }
7182 
7183 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7184 			     const char *mac, int mtu)
7185 {
7186 	char ritr_pl[MLXSW_REG_RITR_LEN];
7187 	int err;
7188 
7189 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7190 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7191 	if (err)
7192 		return err;
7193 
7194 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
7195 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
7196 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
7197 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7198 }
7199 
7200 static int
7201 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
7202 				  struct mlxsw_sp_rif *rif)
7203 {
7204 	struct net_device *dev = rif->dev;
7205 	u16 fid_index;
7206 	int err;
7207 
7208 	fid_index = mlxsw_sp_fid_index(rif->fid);
7209 
7210 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
7211 	if (err)
7212 		return err;
7213 
7214 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
7215 				dev->mtu);
7216 	if (err)
7217 		goto err_rif_edit;
7218 
7219 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
7220 	if (err)
7221 		goto err_rif_fdb_op;
7222 
7223 	if (rif->mtu != dev->mtu) {
7224 		struct mlxsw_sp_vr *vr;
7225 		int i;
7226 
7227 		/* The RIF is relevant only to its mr_table instance, as unlike
7228 		 * unicast routing, in multicast routing a RIF cannot be shared
7229 		 * between several multicast routing tables.
7230 		 */
7231 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
7232 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
7233 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
7234 						   rif, dev->mtu);
7235 	}
7236 
7237 	ether_addr_copy(rif->addr, dev->dev_addr);
7238 	rif->mtu = dev->mtu;
7239 
7240 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
7241 
7242 	return 0;
7243 
7244 err_rif_fdb_op:
7245 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
7246 err_rif_edit:
7247 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
7248 	return err;
7249 }
7250 
7251 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
7252 			    struct netdev_notifier_pre_changeaddr_info *info)
7253 {
7254 	struct netlink_ext_ack *extack;
7255 
7256 	extack = netdev_notifier_info_to_extack(&info->info);
7257 	return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
7258 						   info->dev_addr, extack);
7259 }
7260 
7261 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
7262 					 unsigned long event, void *ptr)
7263 {
7264 	struct mlxsw_sp *mlxsw_sp;
7265 	struct mlxsw_sp_rif *rif;
7266 	int err = 0;
7267 
7268 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7269 	if (!mlxsw_sp)
7270 		return 0;
7271 
7272 	mutex_lock(&mlxsw_sp->router->lock);
7273 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7274 	if (!rif)
7275 		goto out;
7276 
7277 	switch (event) {
7278 	case NETDEV_CHANGEMTU: /* fall through */
7279 	case NETDEV_CHANGEADDR:
7280 		err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
7281 		break;
7282 	case NETDEV_PRE_CHANGEADDR:
7283 		err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
7284 		break;
7285 	}
7286 
7287 out:
7288 	mutex_unlock(&mlxsw_sp->router->lock);
7289 	return err;
7290 }
7291 
7292 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
7293 				  struct net_device *l3_dev,
7294 				  struct netlink_ext_ack *extack)
7295 {
7296 	struct mlxsw_sp_rif *rif;
7297 
7298 	/* If netdev is already associated with a RIF, then we need to
7299 	 * destroy it and create a new one with the new virtual router ID.
7300 	 */
7301 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7302 	if (rif)
7303 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
7304 					  extack);
7305 
7306 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
7307 }
7308 
7309 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
7310 				    struct net_device *l3_dev)
7311 {
7312 	struct mlxsw_sp_rif *rif;
7313 
7314 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7315 	if (!rif)
7316 		return;
7317 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
7318 }
7319 
7320 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
7321 				 struct netdev_notifier_changeupper_info *info)
7322 {
7323 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
7324 	int err = 0;
7325 
7326 	/* We do not create a RIF for a macvlan, but only use it to
7327 	 * direct more MAC addresses to the router.
7328 	 */
7329 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
7330 		return 0;
7331 
7332 	mutex_lock(&mlxsw_sp->router->lock);
7333 	switch (event) {
7334 	case NETDEV_PRECHANGEUPPER:
7335 		break;
7336 	case NETDEV_CHANGEUPPER:
7337 		if (info->linking) {
7338 			struct netlink_ext_ack *extack;
7339 
7340 			extack = netdev_notifier_info_to_extack(&info->info);
7341 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
7342 		} else {
7343 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
7344 		}
7345 		break;
7346 	}
7347 	mutex_unlock(&mlxsw_sp->router->lock);
7348 
7349 	return err;
7350 }
7351 
7352 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
7353 {
7354 	struct mlxsw_sp_rif *rif = data;
7355 
7356 	if (!netif_is_macvlan(dev))
7357 		return 0;
7358 
7359 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
7360 				   mlxsw_sp_fid_index(rif->fid), false);
7361 }
7362 
7363 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
7364 {
7365 	if (!netif_is_macvlan_port(rif->dev))
7366 		return 0;
7367 
7368 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
7369 	return netdev_walk_all_upper_dev_rcu(rif->dev,
7370 					     __mlxsw_sp_rif_macvlan_flush, rif);
7371 }
7372 
7373 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
7374 				       const struct mlxsw_sp_rif_params *params)
7375 {
7376 	struct mlxsw_sp_rif_subport *rif_subport;
7377 
7378 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7379 	refcount_set(&rif_subport->ref_count, 1);
7380 	rif_subport->vid = params->vid;
7381 	rif_subport->lag = params->lag;
7382 	if (params->lag)
7383 		rif_subport->lag_id = params->lag_id;
7384 	else
7385 		rif_subport->system_port = params->system_port;
7386 }
7387 
7388 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
7389 {
7390 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7391 	struct mlxsw_sp_rif_subport *rif_subport;
7392 	char ritr_pl[MLXSW_REG_RITR_LEN];
7393 
7394 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7395 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7396 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
7397 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7398 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
7399 				  rif_subport->lag ? rif_subport->lag_id :
7400 						     rif_subport->system_port,
7401 				  rif_subport->vid);
7402 
7403 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7404 }
7405 
7406 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
7407 {
7408 	int err;
7409 
7410 	err = mlxsw_sp_rif_subport_op(rif, true);
7411 	if (err)
7412 		return err;
7413 
7414 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7415 				  mlxsw_sp_fid_index(rif->fid), true);
7416 	if (err)
7417 		goto err_rif_fdb_op;
7418 
7419 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7420 	return 0;
7421 
7422 err_rif_fdb_op:
7423 	mlxsw_sp_rif_subport_op(rif, false);
7424 	return err;
7425 }
7426 
7427 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
7428 {
7429 	struct mlxsw_sp_fid *fid = rif->fid;
7430 
7431 	mlxsw_sp_fid_rif_set(fid, NULL);
7432 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7433 			    mlxsw_sp_fid_index(fid), false);
7434 	mlxsw_sp_rif_macvlan_flush(rif);
7435 	mlxsw_sp_rif_subport_op(rif, false);
7436 }
7437 
7438 static struct mlxsw_sp_fid *
7439 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
7440 			     struct netlink_ext_ack *extack)
7441 {
7442 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
7443 }
7444 
7445 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
7446 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
7447 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
7448 	.setup			= mlxsw_sp_rif_subport_setup,
7449 	.configure		= mlxsw_sp_rif_subport_configure,
7450 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
7451 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
7452 };
7453 
7454 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
7455 				    enum mlxsw_reg_ritr_if_type type,
7456 				    u16 vid_fid, bool enable)
7457 {
7458 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7459 	char ritr_pl[MLXSW_REG_RITR_LEN];
7460 
7461 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7462 			    rif->dev->mtu);
7463 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7464 	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
7465 
7466 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7467 }
7468 
7469 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7470 {
7471 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
7472 }
7473 
7474 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
7475 {
7476 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7477 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7478 	int err;
7479 
7480 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
7481 	if (err)
7482 		return err;
7483 
7484 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7485 				     mlxsw_sp_router_port(mlxsw_sp), true);
7486 	if (err)
7487 		goto err_fid_mc_flood_set;
7488 
7489 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7490 				     mlxsw_sp_router_port(mlxsw_sp), true);
7491 	if (err)
7492 		goto err_fid_bc_flood_set;
7493 
7494 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7495 				  mlxsw_sp_fid_index(rif->fid), true);
7496 	if (err)
7497 		goto err_rif_fdb_op;
7498 
7499 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7500 	return 0;
7501 
7502 err_rif_fdb_op:
7503 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7504 			       mlxsw_sp_router_port(mlxsw_sp), false);
7505 err_fid_bc_flood_set:
7506 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7507 			       mlxsw_sp_router_port(mlxsw_sp), false);
7508 err_fid_mc_flood_set:
7509 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
7510 	return err;
7511 }
7512 
7513 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
7514 {
7515 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7516 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7517 	struct mlxsw_sp_fid *fid = rif->fid;
7518 
7519 	mlxsw_sp_fid_rif_set(fid, NULL);
7520 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7521 			    mlxsw_sp_fid_index(fid), false);
7522 	mlxsw_sp_rif_macvlan_flush(rif);
7523 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7524 			       mlxsw_sp_router_port(mlxsw_sp), false);
7525 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7526 			       mlxsw_sp_router_port(mlxsw_sp), false);
7527 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
7528 }
7529 
7530 static struct mlxsw_sp_fid *
7531 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
7532 			  struct netlink_ext_ack *extack)
7533 {
7534 	struct net_device *br_dev = rif->dev;
7535 	u16 vid;
7536 	int err;
7537 
7538 	if (is_vlan_dev(rif->dev)) {
7539 		vid = vlan_dev_vlan_id(rif->dev);
7540 		br_dev = vlan_dev_real_dev(rif->dev);
7541 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
7542 			return ERR_PTR(-EINVAL);
7543 	} else {
7544 		err = br_vlan_get_pvid(rif->dev, &vid);
7545 		if (err < 0 || !vid) {
7546 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7547 			return ERR_PTR(-EINVAL);
7548 		}
7549 	}
7550 
7551 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
7552 }
7553 
7554 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7555 {
7556 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7557 	struct switchdev_notifier_fdb_info info;
7558 	struct net_device *br_dev;
7559 	struct net_device *dev;
7560 
7561 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7562 	dev = br_fdb_find_port(br_dev, mac, vid);
7563 	if (!dev)
7564 		return;
7565 
7566 	info.addr = mac;
7567 	info.vid = vid;
7568 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7569 				 NULL);
7570 }
7571 
7572 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
7573 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
7574 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7575 	.configure		= mlxsw_sp_rif_vlan_configure,
7576 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
7577 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
7578 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
7579 };
7580 
7581 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7582 {
7583 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7584 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7585 	int err;
7586 
7587 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7588 				       true);
7589 	if (err)
7590 		return err;
7591 
7592 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7593 				     mlxsw_sp_router_port(mlxsw_sp), true);
7594 	if (err)
7595 		goto err_fid_mc_flood_set;
7596 
7597 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7598 				     mlxsw_sp_router_port(mlxsw_sp), true);
7599 	if (err)
7600 		goto err_fid_bc_flood_set;
7601 
7602 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7603 				  mlxsw_sp_fid_index(rif->fid), true);
7604 	if (err)
7605 		goto err_rif_fdb_op;
7606 
7607 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7608 	return 0;
7609 
7610 err_rif_fdb_op:
7611 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7612 			       mlxsw_sp_router_port(mlxsw_sp), false);
7613 err_fid_bc_flood_set:
7614 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7615 			       mlxsw_sp_router_port(mlxsw_sp), false);
7616 err_fid_mc_flood_set:
7617 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7618 	return err;
7619 }
7620 
7621 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7622 {
7623 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7624 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7625 	struct mlxsw_sp_fid *fid = rif->fid;
7626 
7627 	mlxsw_sp_fid_rif_set(fid, NULL);
7628 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7629 			    mlxsw_sp_fid_index(fid), false);
7630 	mlxsw_sp_rif_macvlan_flush(rif);
7631 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7632 			       mlxsw_sp_router_port(mlxsw_sp), false);
7633 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7634 			       mlxsw_sp_router_port(mlxsw_sp), false);
7635 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7636 }
7637 
7638 static struct mlxsw_sp_fid *
7639 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7640 			 struct netlink_ext_ack *extack)
7641 {
7642 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
7643 }
7644 
7645 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7646 {
7647 	struct switchdev_notifier_fdb_info info;
7648 	struct net_device *dev;
7649 
7650 	dev = br_fdb_find_port(rif->dev, mac, 0);
7651 	if (!dev)
7652 		return;
7653 
7654 	info.addr = mac;
7655 	info.vid = 0;
7656 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7657 				 NULL);
7658 }
7659 
7660 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7661 	.type			= MLXSW_SP_RIF_TYPE_FID,
7662 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7663 	.configure		= mlxsw_sp_rif_fid_configure,
7664 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
7665 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
7666 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
7667 };
7668 
7669 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
7670 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
7671 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7672 	.configure		= mlxsw_sp_rif_fid_configure,
7673 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
7674 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
7675 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
7676 };
7677 
7678 static struct mlxsw_sp_rif_ipip_lb *
7679 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7680 {
7681 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7682 }
7683 
7684 static void
7685 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7686 			   const struct mlxsw_sp_rif_params *params)
7687 {
7688 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7689 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
7690 
7691 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7692 				 common);
7693 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7694 	rif_lb->lb_config = params_lb->lb_config;
7695 }
7696 
7697 static int
7698 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7699 {
7700 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7701 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7702 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7703 	struct mlxsw_sp_vr *ul_vr;
7704 	int err;
7705 
7706 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
7707 	if (IS_ERR(ul_vr))
7708 		return PTR_ERR(ul_vr);
7709 
7710 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
7711 	if (err)
7712 		goto err_loopback_op;
7713 
7714 	lb_rif->ul_vr_id = ul_vr->id;
7715 	lb_rif->ul_rif_id = 0;
7716 	++ul_vr->rif_count;
7717 	return 0;
7718 
7719 err_loopback_op:
7720 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7721 	return err;
7722 }
7723 
7724 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7725 {
7726 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7727 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7728 	struct mlxsw_sp_vr *ul_vr;
7729 
7730 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7731 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
7732 
7733 	--ul_vr->rif_count;
7734 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7735 }
7736 
7737 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
7738 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
7739 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
7740 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
7741 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
7742 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
7743 };
7744 
7745 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
7746 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
7747 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
7748 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
7749 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
7750 };
7751 
7752 static int
7753 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
7754 {
7755 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7756 	char ritr_pl[MLXSW_REG_RITR_LEN];
7757 
7758 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
7759 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
7760 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
7761 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
7762 
7763 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7764 }
7765 
7766 static struct mlxsw_sp_rif *
7767 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
7768 		       struct netlink_ext_ack *extack)
7769 {
7770 	struct mlxsw_sp_rif *ul_rif;
7771 	u16 rif_index;
7772 	int err;
7773 
7774 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
7775 	if (err) {
7776 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
7777 		return ERR_PTR(err);
7778 	}
7779 
7780 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
7781 	if (!ul_rif)
7782 		return ERR_PTR(-ENOMEM);
7783 
7784 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
7785 	ul_rif->mlxsw_sp = mlxsw_sp;
7786 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
7787 	if (err)
7788 		goto ul_rif_op_err;
7789 
7790 	return ul_rif;
7791 
7792 ul_rif_op_err:
7793 	mlxsw_sp->router->rifs[rif_index] = NULL;
7794 	kfree(ul_rif);
7795 	return ERR_PTR(err);
7796 }
7797 
7798 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
7799 {
7800 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7801 
7802 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
7803 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
7804 	kfree(ul_rif);
7805 }
7806 
7807 static struct mlxsw_sp_rif *
7808 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
7809 		    struct netlink_ext_ack *extack)
7810 {
7811 	struct mlxsw_sp_vr *vr;
7812 	int err;
7813 
7814 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
7815 	if (IS_ERR(vr))
7816 		return ERR_CAST(vr);
7817 
7818 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
7819 		return vr->ul_rif;
7820 
7821 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
7822 	if (IS_ERR(vr->ul_rif)) {
7823 		err = PTR_ERR(vr->ul_rif);
7824 		goto err_ul_rif_create;
7825 	}
7826 
7827 	vr->rif_count++;
7828 	refcount_set(&vr->ul_rif_refcnt, 1);
7829 
7830 	return vr->ul_rif;
7831 
7832 err_ul_rif_create:
7833 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7834 	return ERR_PTR(err);
7835 }
7836 
7837 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
7838 {
7839 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7840 	struct mlxsw_sp_vr *vr;
7841 
7842 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
7843 
7844 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
7845 		return;
7846 
7847 	vr->rif_count--;
7848 	mlxsw_sp_ul_rif_destroy(ul_rif);
7849 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7850 }
7851 
7852 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
7853 			       u16 *ul_rif_index)
7854 {
7855 	struct mlxsw_sp_rif *ul_rif;
7856 	int err = 0;
7857 
7858 	mutex_lock(&mlxsw_sp->router->lock);
7859 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7860 	if (IS_ERR(ul_rif)) {
7861 		err = PTR_ERR(ul_rif);
7862 		goto out;
7863 	}
7864 	*ul_rif_index = ul_rif->rif_index;
7865 out:
7866 	mutex_unlock(&mlxsw_sp->router->lock);
7867 	return err;
7868 }
7869 
7870 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
7871 {
7872 	struct mlxsw_sp_rif *ul_rif;
7873 
7874 	mutex_lock(&mlxsw_sp->router->lock);
7875 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
7876 	if (WARN_ON(!ul_rif))
7877 		goto out;
7878 
7879 	mlxsw_sp_ul_rif_put(ul_rif);
7880 out:
7881 	mutex_unlock(&mlxsw_sp->router->lock);
7882 }
7883 
7884 static int
7885 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7886 {
7887 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7888 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7889 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7890 	struct mlxsw_sp_rif *ul_rif;
7891 	int err;
7892 
7893 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7894 	if (IS_ERR(ul_rif))
7895 		return PTR_ERR(ul_rif);
7896 
7897 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
7898 	if (err)
7899 		goto err_loopback_op;
7900 
7901 	lb_rif->ul_vr_id = 0;
7902 	lb_rif->ul_rif_id = ul_rif->rif_index;
7903 
7904 	return 0;
7905 
7906 err_loopback_op:
7907 	mlxsw_sp_ul_rif_put(ul_rif);
7908 	return err;
7909 }
7910 
7911 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7912 {
7913 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7914 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7915 	struct mlxsw_sp_rif *ul_rif;
7916 
7917 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
7918 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
7919 	mlxsw_sp_ul_rif_put(ul_rif);
7920 }
7921 
7922 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
7923 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
7924 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
7925 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
7926 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
7927 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
7928 };
7929 
7930 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
7931 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
7932 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
7933 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
7934 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
7935 };
7936 
7937 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7938 {
7939 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7940 
7941 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
7942 					 sizeof(struct mlxsw_sp_rif *),
7943 					 GFP_KERNEL);
7944 	if (!mlxsw_sp->router->rifs)
7945 		return -ENOMEM;
7946 
7947 	return 0;
7948 }
7949 
7950 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
7951 {
7952 	int i;
7953 
7954 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7955 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
7956 
7957 	kfree(mlxsw_sp->router->rifs);
7958 }
7959 
7960 static int
7961 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
7962 {
7963 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
7964 
7965 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
7966 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
7967 }
7968 
7969 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
7970 {
7971 	int err;
7972 
7973 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
7974 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
7975 
7976 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
7977 	if (err)
7978 		return err;
7979 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
7980 	if (err)
7981 		return err;
7982 
7983 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
7984 }
7985 
7986 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
7987 {
7988 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
7989 }
7990 
7991 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
7992 {
7993 	struct mlxsw_sp_router *router;
7994 
7995 	/* Flush pending FIB notifications and then flush the device's
7996 	 * table before requesting another dump. The FIB notification
7997 	 * block is unregistered, so no need to take RTNL.
7998 	 */
7999 	mlxsw_core_flush_owq();
8000 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
8001 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
8002 }
8003 
8004 #ifdef CONFIG_IP_ROUTE_MULTIPATH
8005 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
8006 {
8007 	mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
8008 }
8009 
8010 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
8011 {
8012 	mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
8013 }
8014 
8015 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8016 {
8017 	struct net *net = mlxsw_sp_net(mlxsw_sp);
8018 	bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
8019 
8020 	mlxsw_sp_mp_hash_header_set(recr2_pl,
8021 				    MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
8022 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
8023 	mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
8024 	mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
8025 	if (only_l3)
8026 		return;
8027 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
8028 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
8029 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
8030 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
8031 }
8032 
8033 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8034 {
8035 	bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
8036 
8037 	mlxsw_sp_mp_hash_header_set(recr2_pl,
8038 				    MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
8039 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
8040 	mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
8041 	mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
8042 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
8043 	if (only_l3) {
8044 		mlxsw_sp_mp_hash_field_set(recr2_pl,
8045 					   MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
8046 	} else {
8047 		mlxsw_sp_mp_hash_header_set(recr2_pl,
8048 					    MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
8049 		mlxsw_sp_mp_hash_field_set(recr2_pl,
8050 					   MLXSW_REG_RECR2_TCP_UDP_SPORT);
8051 		mlxsw_sp_mp_hash_field_set(recr2_pl,
8052 					   MLXSW_REG_RECR2_TCP_UDP_DPORT);
8053 	}
8054 }
8055 
8056 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8057 {
8058 	char recr2_pl[MLXSW_REG_RECR2_LEN];
8059 	u32 seed;
8060 
8061 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
8062 	mlxsw_reg_recr2_pack(recr2_pl, seed);
8063 	mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
8064 	mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
8065 
8066 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
8067 }
8068 #else
8069 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8070 {
8071 	return 0;
8072 }
8073 #endif
8074 
8075 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
8076 {
8077 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
8078 	unsigned int i;
8079 
8080 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
8081 
8082 	/* HW is determining switch priority based on DSCP-bits, but the
8083 	 * kernel is still doing that based on the ToS. Since there's a
8084 	 * mismatch in bits we need to make sure to translate the right
8085 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
8086 	 */
8087 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
8088 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
8089 
8090 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
8091 }
8092 
8093 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
8094 {
8095 	struct net *net = mlxsw_sp_net(mlxsw_sp);
8096 	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
8097 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
8098 	u64 max_rifs;
8099 	int err;
8100 
8101 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
8102 		return -EIO;
8103 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8104 
8105 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
8106 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
8107 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
8108 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8109 	if (err)
8110 		return err;
8111 	return 0;
8112 }
8113 
8114 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8115 {
8116 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
8117 
8118 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
8119 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8120 }
8121 
8122 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
8123 			 struct netlink_ext_ack *extack)
8124 {
8125 	struct mlxsw_sp_router *router;
8126 	int err;
8127 
8128 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
8129 	if (!router)
8130 		return -ENOMEM;
8131 	mutex_init(&router->lock);
8132 	mlxsw_sp->router = router;
8133 	router->mlxsw_sp = mlxsw_sp;
8134 
8135 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
8136 	err = register_inetaddr_notifier(&router->inetaddr_nb);
8137 	if (err)
8138 		goto err_register_inetaddr_notifier;
8139 
8140 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
8141 	err = register_inet6addr_notifier(&router->inet6addr_nb);
8142 	if (err)
8143 		goto err_register_inet6addr_notifier;
8144 
8145 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
8146 	err = __mlxsw_sp_router_init(mlxsw_sp);
8147 	if (err)
8148 		goto err_router_init;
8149 
8150 	err = mlxsw_sp_rifs_init(mlxsw_sp);
8151 	if (err)
8152 		goto err_rifs_init;
8153 
8154 	err = mlxsw_sp_ipips_init(mlxsw_sp);
8155 	if (err)
8156 		goto err_ipips_init;
8157 
8158 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
8159 			      &mlxsw_sp_nexthop_ht_params);
8160 	if (err)
8161 		goto err_nexthop_ht_init;
8162 
8163 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
8164 			      &mlxsw_sp_nexthop_group_ht_params);
8165 	if (err)
8166 		goto err_nexthop_group_ht_init;
8167 
8168 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
8169 	err = mlxsw_sp_lpm_init(mlxsw_sp);
8170 	if (err)
8171 		goto err_lpm_init;
8172 
8173 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
8174 	if (err)
8175 		goto err_mr_init;
8176 
8177 	err = mlxsw_sp_vrs_init(mlxsw_sp);
8178 	if (err)
8179 		goto err_vrs_init;
8180 
8181 	err = mlxsw_sp_neigh_init(mlxsw_sp);
8182 	if (err)
8183 		goto err_neigh_init;
8184 
8185 	mlxsw_sp->router->netevent_nb.notifier_call =
8186 		mlxsw_sp_router_netevent_event;
8187 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8188 	if (err)
8189 		goto err_register_netevent_notifier;
8190 
8191 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
8192 	if (err)
8193 		goto err_mp_hash_init;
8194 
8195 	err = mlxsw_sp_dscp_init(mlxsw_sp);
8196 	if (err)
8197 		goto err_dscp_init;
8198 
8199 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
8200 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8201 				    &mlxsw_sp->router->fib_nb,
8202 				    mlxsw_sp_router_fib_dump_flush, extack);
8203 	if (err)
8204 		goto err_register_fib_notifier;
8205 
8206 	return 0;
8207 
8208 err_register_fib_notifier:
8209 err_dscp_init:
8210 err_mp_hash_init:
8211 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8212 err_register_netevent_notifier:
8213 	mlxsw_sp_neigh_fini(mlxsw_sp);
8214 err_neigh_init:
8215 	mlxsw_sp_vrs_fini(mlxsw_sp);
8216 err_vrs_init:
8217 	mlxsw_sp_mr_fini(mlxsw_sp);
8218 err_mr_init:
8219 	mlxsw_sp_lpm_fini(mlxsw_sp);
8220 err_lpm_init:
8221 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8222 err_nexthop_group_ht_init:
8223 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8224 err_nexthop_ht_init:
8225 	mlxsw_sp_ipips_fini(mlxsw_sp);
8226 err_ipips_init:
8227 	mlxsw_sp_rifs_fini(mlxsw_sp);
8228 err_rifs_init:
8229 	__mlxsw_sp_router_fini(mlxsw_sp);
8230 err_router_init:
8231 	unregister_inet6addr_notifier(&router->inet6addr_nb);
8232 err_register_inet6addr_notifier:
8233 	unregister_inetaddr_notifier(&router->inetaddr_nb);
8234 err_register_inetaddr_notifier:
8235 	mutex_destroy(&mlxsw_sp->router->lock);
8236 	kfree(mlxsw_sp->router);
8237 	return err;
8238 }
8239 
8240 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8241 {
8242 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8243 				&mlxsw_sp->router->fib_nb);
8244 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8245 	mlxsw_sp_neigh_fini(mlxsw_sp);
8246 	mlxsw_sp_vrs_fini(mlxsw_sp);
8247 	mlxsw_sp_mr_fini(mlxsw_sp);
8248 	mlxsw_sp_lpm_fini(mlxsw_sp);
8249 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8250 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8251 	mlxsw_sp_ipips_fini(mlxsw_sp);
8252 	mlxsw_sp_rifs_fini(mlxsw_sp);
8253 	__mlxsw_sp_router_fini(mlxsw_sp);
8254 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
8255 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
8256 	mutex_destroy(&mlxsw_sp->router->lock);
8257 	kfree(mlxsw_sp->router);
8258 }
8259