1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
32 #include <net/ipv6.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
35 
36 #include "spectrum.h"
37 #include "core.h"
38 #include "reg.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
46 
47 struct mlxsw_sp_fib;
48 struct mlxsw_sp_vr;
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
51 
52 struct mlxsw_sp_rif {
53 	struct list_head nexthop_list;
54 	struct list_head neigh_list;
55 	struct net_device *dev; /* NULL for underlay RIF */
56 	struct mlxsw_sp_fid *fid;
57 	unsigned char addr[ETH_ALEN];
58 	int mtu;
59 	u16 rif_index;
60 	u16 vr_id;
61 	const struct mlxsw_sp_rif_ops *ops;
62 	struct mlxsw_sp *mlxsw_sp;
63 
64 	unsigned int counter_ingress;
65 	bool counter_ingress_valid;
66 	unsigned int counter_egress;
67 	bool counter_egress_valid;
68 };
69 
70 struct mlxsw_sp_rif_params {
71 	struct net_device *dev;
72 	union {
73 		u16 system_port;
74 		u16 lag_id;
75 	};
76 	u16 vid;
77 	bool lag;
78 };
79 
80 struct mlxsw_sp_rif_subport {
81 	struct mlxsw_sp_rif common;
82 	refcount_t ref_count;
83 	union {
84 		u16 system_port;
85 		u16 lag_id;
86 	};
87 	u16 vid;
88 	bool lag;
89 };
90 
91 struct mlxsw_sp_rif_ipip_lb {
92 	struct mlxsw_sp_rif common;
93 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 	u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 	u16 ul_rif_id; /* Reserved for Spectrum. */
96 };
97 
98 struct mlxsw_sp_rif_params_ipip_lb {
99 	struct mlxsw_sp_rif_params common;
100 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
101 };
102 
103 struct mlxsw_sp_rif_ops {
104 	enum mlxsw_sp_rif_type type;
105 	size_t rif_size;
106 
107 	void (*setup)(struct mlxsw_sp_rif *rif,
108 		      const struct mlxsw_sp_rif_params *params);
109 	int (*configure)(struct mlxsw_sp_rif *rif);
110 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 					 struct netlink_ext_ack *extack);
113 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
114 };
115 
116 static struct mlxsw_sp_rif *
117 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
118 			 const struct net_device *dev);
119 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
120 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
121 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
122 				  struct mlxsw_sp_lpm_tree *lpm_tree);
123 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
124 				     const struct mlxsw_sp_fib *fib,
125 				     u8 tree_id);
126 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
127 				       const struct mlxsw_sp_fib *fib);
128 
129 static unsigned int *
130 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
131 			   enum mlxsw_sp_rif_counter_dir dir)
132 {
133 	switch (dir) {
134 	case MLXSW_SP_RIF_COUNTER_EGRESS:
135 		return &rif->counter_egress;
136 	case MLXSW_SP_RIF_COUNTER_INGRESS:
137 		return &rif->counter_ingress;
138 	}
139 	return NULL;
140 }
141 
142 static bool
143 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
144 			       enum mlxsw_sp_rif_counter_dir dir)
145 {
146 	switch (dir) {
147 	case MLXSW_SP_RIF_COUNTER_EGRESS:
148 		return rif->counter_egress_valid;
149 	case MLXSW_SP_RIF_COUNTER_INGRESS:
150 		return rif->counter_ingress_valid;
151 	}
152 	return false;
153 }
154 
155 static void
156 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
157 			       enum mlxsw_sp_rif_counter_dir dir,
158 			       bool valid)
159 {
160 	switch (dir) {
161 	case MLXSW_SP_RIF_COUNTER_EGRESS:
162 		rif->counter_egress_valid = valid;
163 		break;
164 	case MLXSW_SP_RIF_COUNTER_INGRESS:
165 		rif->counter_ingress_valid = valid;
166 		break;
167 	}
168 }
169 
170 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
171 				     unsigned int counter_index, bool enable,
172 				     enum mlxsw_sp_rif_counter_dir dir)
173 {
174 	char ritr_pl[MLXSW_REG_RITR_LEN];
175 	bool is_egress = false;
176 	int err;
177 
178 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
179 		is_egress = true;
180 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
181 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
182 	if (err)
183 		return err;
184 
185 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
186 				    is_egress);
187 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
188 }
189 
190 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
191 				   struct mlxsw_sp_rif *rif,
192 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
193 {
194 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
195 	unsigned int *p_counter_index;
196 	bool valid;
197 	int err;
198 
199 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
200 	if (!valid)
201 		return -EINVAL;
202 
203 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
204 	if (!p_counter_index)
205 		return -EINVAL;
206 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
207 			     MLXSW_REG_RICNT_OPCODE_NOP);
208 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
209 	if (err)
210 		return err;
211 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
212 	return 0;
213 }
214 
215 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
216 				      unsigned int counter_index)
217 {
218 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
219 
220 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
221 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
222 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
223 }
224 
225 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
226 			       struct mlxsw_sp_rif *rif,
227 			       enum mlxsw_sp_rif_counter_dir dir)
228 {
229 	unsigned int *p_counter_index;
230 	int err;
231 
232 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
233 	if (!p_counter_index)
234 		return -EINVAL;
235 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
236 				     p_counter_index);
237 	if (err)
238 		return err;
239 
240 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
241 	if (err)
242 		goto err_counter_clear;
243 
244 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
245 					*p_counter_index, true, dir);
246 	if (err)
247 		goto err_counter_edit;
248 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
249 	return 0;
250 
251 err_counter_edit:
252 err_counter_clear:
253 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
254 			      *p_counter_index);
255 	return err;
256 }
257 
258 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
259 			       struct mlxsw_sp_rif *rif,
260 			       enum mlxsw_sp_rif_counter_dir dir)
261 {
262 	unsigned int *p_counter_index;
263 
264 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
265 		return;
266 
267 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
268 	if (WARN_ON(!p_counter_index))
269 		return;
270 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
271 				  *p_counter_index, false, dir);
272 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
273 			      *p_counter_index);
274 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
275 }
276 
277 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
278 {
279 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
280 	struct devlink *devlink;
281 
282 	devlink = priv_to_devlink(mlxsw_sp->core);
283 	if (!devlink_dpipe_table_counter_enabled(devlink,
284 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
285 		return;
286 	mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
287 }
288 
289 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
290 {
291 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
292 
293 	mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
294 }
295 
296 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
297 
298 struct mlxsw_sp_prefix_usage {
299 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
300 };
301 
302 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
303 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
304 
305 static bool
306 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
307 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
308 {
309 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
310 }
311 
312 static void
313 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
314 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
315 {
316 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
317 }
318 
319 static void
320 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
321 			  unsigned char prefix_len)
322 {
323 	set_bit(prefix_len, prefix_usage->b);
324 }
325 
326 static void
327 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
328 			    unsigned char prefix_len)
329 {
330 	clear_bit(prefix_len, prefix_usage->b);
331 }
332 
333 struct mlxsw_sp_fib_key {
334 	unsigned char addr[sizeof(struct in6_addr)];
335 	unsigned char prefix_len;
336 };
337 
338 enum mlxsw_sp_fib_entry_type {
339 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
340 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
341 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
342 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
343 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
344 
345 	/* This is a special case of local delivery, where a packet should be
346 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
347 	 * because that's a type of next hop, not of FIB entry. (There can be
348 	 * several next hops in a REMOTE entry, and some of them may be
349 	 * encapsulating entries.)
350 	 */
351 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
352 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
353 };
354 
355 struct mlxsw_sp_nexthop_group;
356 struct mlxsw_sp_fib_entry;
357 
358 struct mlxsw_sp_fib_node {
359 	struct mlxsw_sp_fib_entry *fib_entry;
360 	struct list_head list;
361 	struct rhash_head ht_node;
362 	struct mlxsw_sp_fib *fib;
363 	struct mlxsw_sp_fib_key key;
364 };
365 
366 struct mlxsw_sp_fib_entry_decap {
367 	struct mlxsw_sp_ipip_entry *ipip_entry;
368 	u32 tunnel_index;
369 };
370 
371 struct mlxsw_sp_fib_entry {
372 	struct mlxsw_sp_fib_node *fib_node;
373 	enum mlxsw_sp_fib_entry_type type;
374 	struct list_head nexthop_group_node;
375 	struct mlxsw_sp_nexthop_group *nh_group;
376 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
377 };
378 
379 struct mlxsw_sp_fib4_entry {
380 	struct mlxsw_sp_fib_entry common;
381 	u32 tb_id;
382 	u32 prio;
383 	u8 tos;
384 	u8 type;
385 };
386 
387 struct mlxsw_sp_fib6_entry {
388 	struct mlxsw_sp_fib_entry common;
389 	struct list_head rt6_list;
390 	unsigned int nrt6;
391 };
392 
393 struct mlxsw_sp_rt6 {
394 	struct list_head list;
395 	struct fib6_info *rt;
396 };
397 
398 struct mlxsw_sp_lpm_tree {
399 	u8 id; /* tree ID */
400 	unsigned int ref_count;
401 	enum mlxsw_sp_l3proto proto;
402 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
403 	struct mlxsw_sp_prefix_usage prefix_usage;
404 };
405 
406 struct mlxsw_sp_fib {
407 	struct rhashtable ht;
408 	struct list_head node_list;
409 	struct mlxsw_sp_vr *vr;
410 	struct mlxsw_sp_lpm_tree *lpm_tree;
411 	enum mlxsw_sp_l3proto proto;
412 };
413 
414 struct mlxsw_sp_vr {
415 	u16 id; /* virtual router ID */
416 	u32 tb_id; /* kernel fib table id */
417 	unsigned int rif_count;
418 	struct mlxsw_sp_fib *fib4;
419 	struct mlxsw_sp_fib *fib6;
420 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
421 	struct mlxsw_sp_rif *ul_rif;
422 	refcount_t ul_rif_refcnt;
423 };
424 
425 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
426 
427 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
428 						struct mlxsw_sp_vr *vr,
429 						enum mlxsw_sp_l3proto proto)
430 {
431 	struct mlxsw_sp_lpm_tree *lpm_tree;
432 	struct mlxsw_sp_fib *fib;
433 	int err;
434 
435 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
436 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
437 	if (!fib)
438 		return ERR_PTR(-ENOMEM);
439 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
440 	if (err)
441 		goto err_rhashtable_init;
442 	INIT_LIST_HEAD(&fib->node_list);
443 	fib->proto = proto;
444 	fib->vr = vr;
445 	fib->lpm_tree = lpm_tree;
446 	mlxsw_sp_lpm_tree_hold(lpm_tree);
447 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
448 	if (err)
449 		goto err_lpm_tree_bind;
450 	return fib;
451 
452 err_lpm_tree_bind:
453 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
454 err_rhashtable_init:
455 	kfree(fib);
456 	return ERR_PTR(err);
457 }
458 
459 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
460 				 struct mlxsw_sp_fib *fib)
461 {
462 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
463 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
464 	WARN_ON(!list_empty(&fib->node_list));
465 	rhashtable_destroy(&fib->ht);
466 	kfree(fib);
467 }
468 
469 static struct mlxsw_sp_lpm_tree *
470 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
471 {
472 	static struct mlxsw_sp_lpm_tree *lpm_tree;
473 	int i;
474 
475 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
476 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
477 		if (lpm_tree->ref_count == 0)
478 			return lpm_tree;
479 	}
480 	return NULL;
481 }
482 
483 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
484 				   struct mlxsw_sp_lpm_tree *lpm_tree)
485 {
486 	char ralta_pl[MLXSW_REG_RALTA_LEN];
487 
488 	mlxsw_reg_ralta_pack(ralta_pl, true,
489 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
490 			     lpm_tree->id);
491 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
492 }
493 
494 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
495 				   struct mlxsw_sp_lpm_tree *lpm_tree)
496 {
497 	char ralta_pl[MLXSW_REG_RALTA_LEN];
498 
499 	mlxsw_reg_ralta_pack(ralta_pl, false,
500 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
501 			     lpm_tree->id);
502 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
503 }
504 
505 static int
506 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
507 				  struct mlxsw_sp_prefix_usage *prefix_usage,
508 				  struct mlxsw_sp_lpm_tree *lpm_tree)
509 {
510 	char ralst_pl[MLXSW_REG_RALST_LEN];
511 	u8 root_bin = 0;
512 	u8 prefix;
513 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
514 
515 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
516 		root_bin = prefix;
517 
518 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
519 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
520 		if (prefix == 0)
521 			continue;
522 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
523 					 MLXSW_REG_RALST_BIN_NO_CHILD);
524 		last_prefix = prefix;
525 	}
526 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
527 }
528 
529 static struct mlxsw_sp_lpm_tree *
530 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
531 			 struct mlxsw_sp_prefix_usage *prefix_usage,
532 			 enum mlxsw_sp_l3proto proto)
533 {
534 	struct mlxsw_sp_lpm_tree *lpm_tree;
535 	int err;
536 
537 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
538 	if (!lpm_tree)
539 		return ERR_PTR(-EBUSY);
540 	lpm_tree->proto = proto;
541 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
542 	if (err)
543 		return ERR_PTR(err);
544 
545 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
546 						lpm_tree);
547 	if (err)
548 		goto err_left_struct_set;
549 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
550 	       sizeof(lpm_tree->prefix_usage));
551 	memset(&lpm_tree->prefix_ref_count, 0,
552 	       sizeof(lpm_tree->prefix_ref_count));
553 	lpm_tree->ref_count = 1;
554 	return lpm_tree;
555 
556 err_left_struct_set:
557 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
558 	return ERR_PTR(err);
559 }
560 
561 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
562 				      struct mlxsw_sp_lpm_tree *lpm_tree)
563 {
564 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
565 }
566 
567 static struct mlxsw_sp_lpm_tree *
568 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
569 		      struct mlxsw_sp_prefix_usage *prefix_usage,
570 		      enum mlxsw_sp_l3proto proto)
571 {
572 	struct mlxsw_sp_lpm_tree *lpm_tree;
573 	int i;
574 
575 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
576 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
577 		if (lpm_tree->ref_count != 0 &&
578 		    lpm_tree->proto == proto &&
579 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
580 					     prefix_usage)) {
581 			mlxsw_sp_lpm_tree_hold(lpm_tree);
582 			return lpm_tree;
583 		}
584 	}
585 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
586 }
587 
588 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
589 {
590 	lpm_tree->ref_count++;
591 }
592 
593 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
594 				  struct mlxsw_sp_lpm_tree *lpm_tree)
595 {
596 	if (--lpm_tree->ref_count == 0)
597 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
598 }
599 
600 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
601 
602 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
603 {
604 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
605 	struct mlxsw_sp_lpm_tree *lpm_tree;
606 	u64 max_trees;
607 	int err, i;
608 
609 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
610 		return -EIO;
611 
612 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
613 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
614 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
615 					     sizeof(struct mlxsw_sp_lpm_tree),
616 					     GFP_KERNEL);
617 	if (!mlxsw_sp->router->lpm.trees)
618 		return -ENOMEM;
619 
620 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
621 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
622 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
623 	}
624 
625 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
626 					 MLXSW_SP_L3_PROTO_IPV4);
627 	if (IS_ERR(lpm_tree)) {
628 		err = PTR_ERR(lpm_tree);
629 		goto err_ipv4_tree_get;
630 	}
631 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
632 
633 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
634 					 MLXSW_SP_L3_PROTO_IPV6);
635 	if (IS_ERR(lpm_tree)) {
636 		err = PTR_ERR(lpm_tree);
637 		goto err_ipv6_tree_get;
638 	}
639 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
640 
641 	return 0;
642 
643 err_ipv6_tree_get:
644 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
645 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
646 err_ipv4_tree_get:
647 	kfree(mlxsw_sp->router->lpm.trees);
648 	return err;
649 }
650 
651 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
652 {
653 	struct mlxsw_sp_lpm_tree *lpm_tree;
654 
655 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
656 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
657 
658 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
659 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
660 
661 	kfree(mlxsw_sp->router->lpm.trees);
662 }
663 
664 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
665 {
666 	return !!vr->fib4 || !!vr->fib6 ||
667 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
668 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
669 }
670 
671 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
672 {
673 	struct mlxsw_sp_vr *vr;
674 	int i;
675 
676 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
677 		vr = &mlxsw_sp->router->vrs[i];
678 		if (!mlxsw_sp_vr_is_used(vr))
679 			return vr;
680 	}
681 	return NULL;
682 }
683 
684 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
685 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
686 {
687 	char raltb_pl[MLXSW_REG_RALTB_LEN];
688 
689 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
690 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
691 			     tree_id);
692 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
693 }
694 
695 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
696 				       const struct mlxsw_sp_fib *fib)
697 {
698 	char raltb_pl[MLXSW_REG_RALTB_LEN];
699 
700 	/* Bind to tree 0 which is default */
701 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
702 			     (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
703 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
704 }
705 
706 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
707 {
708 	/* For our purpose, squash main, default and local tables into one */
709 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
710 		tb_id = RT_TABLE_MAIN;
711 	return tb_id;
712 }
713 
714 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
715 					    u32 tb_id)
716 {
717 	struct mlxsw_sp_vr *vr;
718 	int i;
719 
720 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
721 
722 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
723 		vr = &mlxsw_sp->router->vrs[i];
724 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
725 			return vr;
726 	}
727 	return NULL;
728 }
729 
730 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
731 				u16 *vr_id)
732 {
733 	struct mlxsw_sp_vr *vr;
734 	int err = 0;
735 
736 	mutex_lock(&mlxsw_sp->router->lock);
737 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
738 	if (!vr) {
739 		err = -ESRCH;
740 		goto out;
741 	}
742 	*vr_id = vr->id;
743 out:
744 	mutex_unlock(&mlxsw_sp->router->lock);
745 	return err;
746 }
747 
748 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
749 					    enum mlxsw_sp_l3proto proto)
750 {
751 	switch (proto) {
752 	case MLXSW_SP_L3_PROTO_IPV4:
753 		return vr->fib4;
754 	case MLXSW_SP_L3_PROTO_IPV6:
755 		return vr->fib6;
756 	}
757 	return NULL;
758 }
759 
760 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
761 					      u32 tb_id,
762 					      struct netlink_ext_ack *extack)
763 {
764 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
765 	struct mlxsw_sp_fib *fib4;
766 	struct mlxsw_sp_fib *fib6;
767 	struct mlxsw_sp_vr *vr;
768 	int err;
769 
770 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
771 	if (!vr) {
772 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
773 		return ERR_PTR(-EBUSY);
774 	}
775 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
776 	if (IS_ERR(fib4))
777 		return ERR_CAST(fib4);
778 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
779 	if (IS_ERR(fib6)) {
780 		err = PTR_ERR(fib6);
781 		goto err_fib6_create;
782 	}
783 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
784 					     MLXSW_SP_L3_PROTO_IPV4);
785 	if (IS_ERR(mr4_table)) {
786 		err = PTR_ERR(mr4_table);
787 		goto err_mr4_table_create;
788 	}
789 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
790 					     MLXSW_SP_L3_PROTO_IPV6);
791 	if (IS_ERR(mr6_table)) {
792 		err = PTR_ERR(mr6_table);
793 		goto err_mr6_table_create;
794 	}
795 
796 	vr->fib4 = fib4;
797 	vr->fib6 = fib6;
798 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
799 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
800 	vr->tb_id = tb_id;
801 	return vr;
802 
803 err_mr6_table_create:
804 	mlxsw_sp_mr_table_destroy(mr4_table);
805 err_mr4_table_create:
806 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
807 err_fib6_create:
808 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
809 	return ERR_PTR(err);
810 }
811 
812 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
813 				struct mlxsw_sp_vr *vr)
814 {
815 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
816 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
817 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
818 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
819 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
820 	vr->fib6 = NULL;
821 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
822 	vr->fib4 = NULL;
823 }
824 
825 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
826 					   struct netlink_ext_ack *extack)
827 {
828 	struct mlxsw_sp_vr *vr;
829 
830 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
831 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
832 	if (!vr)
833 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
834 	return vr;
835 }
836 
837 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
838 {
839 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
840 	    list_empty(&vr->fib6->node_list) &&
841 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
842 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
843 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
844 }
845 
846 static bool
847 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
848 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
849 {
850 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
851 
852 	if (!mlxsw_sp_vr_is_used(vr))
853 		return false;
854 	if (fib->lpm_tree->id == tree_id)
855 		return true;
856 	return false;
857 }
858 
859 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
860 					struct mlxsw_sp_fib *fib,
861 					struct mlxsw_sp_lpm_tree *new_tree)
862 {
863 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
864 	int err;
865 
866 	fib->lpm_tree = new_tree;
867 	mlxsw_sp_lpm_tree_hold(new_tree);
868 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
869 	if (err)
870 		goto err_tree_bind;
871 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
872 	return 0;
873 
874 err_tree_bind:
875 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
876 	fib->lpm_tree = old_tree;
877 	return err;
878 }
879 
880 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
881 					 struct mlxsw_sp_fib *fib,
882 					 struct mlxsw_sp_lpm_tree *new_tree)
883 {
884 	enum mlxsw_sp_l3proto proto = fib->proto;
885 	struct mlxsw_sp_lpm_tree *old_tree;
886 	u8 old_id, new_id = new_tree->id;
887 	struct mlxsw_sp_vr *vr;
888 	int i, err;
889 
890 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
891 	old_id = old_tree->id;
892 
893 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
894 		vr = &mlxsw_sp->router->vrs[i];
895 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
896 			continue;
897 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
898 						   mlxsw_sp_vr_fib(vr, proto),
899 						   new_tree);
900 		if (err)
901 			goto err_tree_replace;
902 	}
903 
904 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
905 	       sizeof(new_tree->prefix_ref_count));
906 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
907 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
908 
909 	return 0;
910 
911 err_tree_replace:
912 	for (i--; i >= 0; i--) {
913 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
914 			continue;
915 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
916 					     mlxsw_sp_vr_fib(vr, proto),
917 					     old_tree);
918 	}
919 	return err;
920 }
921 
922 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
923 {
924 	struct mlxsw_sp_vr *vr;
925 	u64 max_vrs;
926 	int i;
927 
928 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
929 		return -EIO;
930 
931 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
932 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
933 					GFP_KERNEL);
934 	if (!mlxsw_sp->router->vrs)
935 		return -ENOMEM;
936 
937 	for (i = 0; i < max_vrs; i++) {
938 		vr = &mlxsw_sp->router->vrs[i];
939 		vr->id = i;
940 	}
941 
942 	return 0;
943 }
944 
945 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
946 
947 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
948 {
949 	/* At this stage we're guaranteed not to have new incoming
950 	 * FIB notifications and the work queue is free from FIBs
951 	 * sitting on top of mlxsw netdevs. However, we can still
952 	 * have other FIBs queued. Flush the queue before flushing
953 	 * the device's tables. No need for locks, as we're the only
954 	 * writer.
955 	 */
956 	mlxsw_core_flush_owq();
957 	mlxsw_sp_router_fib_flush(mlxsw_sp);
958 	kfree(mlxsw_sp->router->vrs);
959 }
960 
961 static struct net_device *
962 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
963 {
964 	struct ip_tunnel *tun = netdev_priv(ol_dev);
965 	struct net *net = dev_net(ol_dev);
966 
967 	return dev_get_by_index_rcu(net, tun->parms.link);
968 }
969 
970 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
971 {
972 	struct net_device *d;
973 	u32 tb_id;
974 
975 	rcu_read_lock();
976 	d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
977 	if (d)
978 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
979 	else
980 		tb_id = RT_TABLE_MAIN;
981 	rcu_read_unlock();
982 
983 	return tb_id;
984 }
985 
986 static struct mlxsw_sp_rif *
987 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
988 		    const struct mlxsw_sp_rif_params *params,
989 		    struct netlink_ext_ack *extack);
990 
991 static struct mlxsw_sp_rif_ipip_lb *
992 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
993 				enum mlxsw_sp_ipip_type ipipt,
994 				struct net_device *ol_dev,
995 				struct netlink_ext_ack *extack)
996 {
997 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
998 	const struct mlxsw_sp_ipip_ops *ipip_ops;
999 	struct mlxsw_sp_rif *rif;
1000 
1001 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1002 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1003 		.common.dev = ol_dev,
1004 		.common.lag = false,
1005 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1006 	};
1007 
1008 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1009 	if (IS_ERR(rif))
1010 		return ERR_CAST(rif);
1011 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1012 }
1013 
1014 static struct mlxsw_sp_ipip_entry *
1015 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1016 			  enum mlxsw_sp_ipip_type ipipt,
1017 			  struct net_device *ol_dev)
1018 {
1019 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1020 	struct mlxsw_sp_ipip_entry *ipip_entry;
1021 	struct mlxsw_sp_ipip_entry *ret = NULL;
1022 
1023 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1024 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1025 	if (!ipip_entry)
1026 		return ERR_PTR(-ENOMEM);
1027 
1028 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1029 							    ol_dev, NULL);
1030 	if (IS_ERR(ipip_entry->ol_lb)) {
1031 		ret = ERR_CAST(ipip_entry->ol_lb);
1032 		goto err_ol_ipip_lb_create;
1033 	}
1034 
1035 	ipip_entry->ipipt = ipipt;
1036 	ipip_entry->ol_dev = ol_dev;
1037 
1038 	switch (ipip_ops->ul_proto) {
1039 	case MLXSW_SP_L3_PROTO_IPV4:
1040 		ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1041 		break;
1042 	case MLXSW_SP_L3_PROTO_IPV6:
1043 		WARN_ON(1);
1044 		break;
1045 	}
1046 
1047 	return ipip_entry;
1048 
1049 err_ol_ipip_lb_create:
1050 	kfree(ipip_entry);
1051 	return ret;
1052 }
1053 
1054 static void
1055 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1056 {
1057 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1058 	kfree(ipip_entry);
1059 }
1060 
1061 static bool
1062 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1063 				  const enum mlxsw_sp_l3proto ul_proto,
1064 				  union mlxsw_sp_l3addr saddr,
1065 				  u32 ul_tb_id,
1066 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1067 {
1068 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1069 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1070 	union mlxsw_sp_l3addr tun_saddr;
1071 
1072 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1073 		return false;
1074 
1075 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1076 	return tun_ul_tb_id == ul_tb_id &&
1077 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1078 }
1079 
1080 static int
1081 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1082 			      struct mlxsw_sp_fib_entry *fib_entry,
1083 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1084 {
1085 	u32 tunnel_index;
1086 	int err;
1087 
1088 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1089 				  1, &tunnel_index);
1090 	if (err)
1091 		return err;
1092 
1093 	ipip_entry->decap_fib_entry = fib_entry;
1094 	fib_entry->decap.ipip_entry = ipip_entry;
1095 	fib_entry->decap.tunnel_index = tunnel_index;
1096 	return 0;
1097 }
1098 
1099 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1100 					  struct mlxsw_sp_fib_entry *fib_entry)
1101 {
1102 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1103 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1104 	fib_entry->decap.ipip_entry = NULL;
1105 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1106 			   1, fib_entry->decap.tunnel_index);
1107 }
1108 
1109 static struct mlxsw_sp_fib_node *
1110 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1111 			 size_t addr_len, unsigned char prefix_len);
1112 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1113 				     struct mlxsw_sp_fib_entry *fib_entry);
1114 
1115 static void
1116 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1117 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1118 {
1119 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1120 
1121 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1122 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1123 
1124 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1125 }
1126 
1127 static void
1128 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1129 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1130 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1131 {
1132 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1133 					  ipip_entry))
1134 		return;
1135 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1136 
1137 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1138 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1139 }
1140 
1141 static struct mlxsw_sp_fib_entry *
1142 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1143 				     enum mlxsw_sp_l3proto proto,
1144 				     const union mlxsw_sp_l3addr *addr,
1145 				     enum mlxsw_sp_fib_entry_type type)
1146 {
1147 	struct mlxsw_sp_fib_node *fib_node;
1148 	unsigned char addr_prefix_len;
1149 	struct mlxsw_sp_fib *fib;
1150 	struct mlxsw_sp_vr *vr;
1151 	const void *addrp;
1152 	size_t addr_len;
1153 	u32 addr4;
1154 
1155 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1156 	if (!vr)
1157 		return NULL;
1158 	fib = mlxsw_sp_vr_fib(vr, proto);
1159 
1160 	switch (proto) {
1161 	case MLXSW_SP_L3_PROTO_IPV4:
1162 		addr4 = be32_to_cpu(addr->addr4);
1163 		addrp = &addr4;
1164 		addr_len = 4;
1165 		addr_prefix_len = 32;
1166 		break;
1167 	case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1168 	default:
1169 		WARN_ON(1);
1170 		return NULL;
1171 	}
1172 
1173 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1174 					    addr_prefix_len);
1175 	if (!fib_node || fib_node->fib_entry->type != type)
1176 		return NULL;
1177 
1178 	return fib_node->fib_entry;
1179 }
1180 
1181 /* Given an IPIP entry, find the corresponding decap route. */
1182 static struct mlxsw_sp_fib_entry *
1183 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1184 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1185 {
1186 	static struct mlxsw_sp_fib_node *fib_node;
1187 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1188 	unsigned char saddr_prefix_len;
1189 	union mlxsw_sp_l3addr saddr;
1190 	struct mlxsw_sp_fib *ul_fib;
1191 	struct mlxsw_sp_vr *ul_vr;
1192 	const void *saddrp;
1193 	size_t saddr_len;
1194 	u32 ul_tb_id;
1195 	u32 saddr4;
1196 
1197 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1198 
1199 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1200 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1201 	if (!ul_vr)
1202 		return NULL;
1203 
1204 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1205 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1206 					   ipip_entry->ol_dev);
1207 
1208 	switch (ipip_ops->ul_proto) {
1209 	case MLXSW_SP_L3_PROTO_IPV4:
1210 		saddr4 = be32_to_cpu(saddr.addr4);
1211 		saddrp = &saddr4;
1212 		saddr_len = 4;
1213 		saddr_prefix_len = 32;
1214 		break;
1215 	default:
1216 		WARN_ON(1);
1217 		return NULL;
1218 	}
1219 
1220 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1221 					    saddr_prefix_len);
1222 	if (!fib_node ||
1223 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1224 		return NULL;
1225 
1226 	return fib_node->fib_entry;
1227 }
1228 
1229 static struct mlxsw_sp_ipip_entry *
1230 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1231 			   enum mlxsw_sp_ipip_type ipipt,
1232 			   struct net_device *ol_dev)
1233 {
1234 	struct mlxsw_sp_ipip_entry *ipip_entry;
1235 
1236 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1237 	if (IS_ERR(ipip_entry))
1238 		return ipip_entry;
1239 
1240 	list_add_tail(&ipip_entry->ipip_list_node,
1241 		      &mlxsw_sp->router->ipip_list);
1242 
1243 	return ipip_entry;
1244 }
1245 
1246 static void
1247 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1248 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1249 {
1250 	list_del(&ipip_entry->ipip_list_node);
1251 	mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1252 }
1253 
1254 static bool
1255 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1256 				  const struct net_device *ul_dev,
1257 				  enum mlxsw_sp_l3proto ul_proto,
1258 				  union mlxsw_sp_l3addr ul_dip,
1259 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1260 {
1261 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1262 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1263 
1264 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1265 		return false;
1266 
1267 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1268 						 ul_tb_id, ipip_entry);
1269 }
1270 
1271 /* Given decap parameters, find the corresponding IPIP entry. */
1272 static struct mlxsw_sp_ipip_entry *
1273 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1274 				  const struct net_device *ul_dev,
1275 				  enum mlxsw_sp_l3proto ul_proto,
1276 				  union mlxsw_sp_l3addr ul_dip)
1277 {
1278 	struct mlxsw_sp_ipip_entry *ipip_entry;
1279 
1280 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1281 			    ipip_list_node)
1282 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1283 						      ul_proto, ul_dip,
1284 						      ipip_entry))
1285 			return ipip_entry;
1286 
1287 	return NULL;
1288 }
1289 
1290 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1291 				      const struct net_device *dev,
1292 				      enum mlxsw_sp_ipip_type *p_type)
1293 {
1294 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1295 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1296 	enum mlxsw_sp_ipip_type ipipt;
1297 
1298 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1299 		ipip_ops = router->ipip_ops_arr[ipipt];
1300 		if (dev->type == ipip_ops->dev_type) {
1301 			if (p_type)
1302 				*p_type = ipipt;
1303 			return true;
1304 		}
1305 	}
1306 	return false;
1307 }
1308 
1309 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1310 				const struct net_device *dev)
1311 {
1312 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1313 }
1314 
1315 static struct mlxsw_sp_ipip_entry *
1316 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1317 				   const struct net_device *ol_dev)
1318 {
1319 	struct mlxsw_sp_ipip_entry *ipip_entry;
1320 
1321 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1322 			    ipip_list_node)
1323 		if (ipip_entry->ol_dev == ol_dev)
1324 			return ipip_entry;
1325 
1326 	return NULL;
1327 }
1328 
1329 static struct mlxsw_sp_ipip_entry *
1330 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1331 				   const struct net_device *ul_dev,
1332 				   struct mlxsw_sp_ipip_entry *start)
1333 {
1334 	struct mlxsw_sp_ipip_entry *ipip_entry;
1335 
1336 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1337 					ipip_list_node);
1338 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1339 				     ipip_list_node) {
1340 		struct net_device *ol_dev = ipip_entry->ol_dev;
1341 		struct net_device *ipip_ul_dev;
1342 
1343 		rcu_read_lock();
1344 		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1345 		rcu_read_unlock();
1346 
1347 		if (ipip_ul_dev == ul_dev)
1348 			return ipip_entry;
1349 	}
1350 
1351 	return NULL;
1352 }
1353 
1354 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1355 				const struct net_device *dev)
1356 {
1357 	bool is_ipip_ul;
1358 
1359 	mutex_lock(&mlxsw_sp->router->lock);
1360 	is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1361 	mutex_unlock(&mlxsw_sp->router->lock);
1362 
1363 	return is_ipip_ul;
1364 }
1365 
1366 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1367 						const struct net_device *ol_dev,
1368 						enum mlxsw_sp_ipip_type ipipt)
1369 {
1370 	const struct mlxsw_sp_ipip_ops *ops
1371 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1372 
1373 	/* For deciding whether decap should be offloaded, we don't care about
1374 	 * overlay protocol, so ask whether either one is supported.
1375 	 */
1376 	return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1377 	       ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1378 }
1379 
1380 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1381 						struct net_device *ol_dev)
1382 {
1383 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1384 	struct mlxsw_sp_ipip_entry *ipip_entry;
1385 	enum mlxsw_sp_l3proto ul_proto;
1386 	union mlxsw_sp_l3addr saddr;
1387 	u32 ul_tb_id;
1388 
1389 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1390 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1391 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1392 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1393 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1394 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1395 							  saddr, ul_tb_id,
1396 							  NULL)) {
1397 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1398 								ol_dev);
1399 			if (IS_ERR(ipip_entry))
1400 				return PTR_ERR(ipip_entry);
1401 		}
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1408 						   struct net_device *ol_dev)
1409 {
1410 	struct mlxsw_sp_ipip_entry *ipip_entry;
1411 
1412 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1413 	if (ipip_entry)
1414 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1415 }
1416 
1417 static void
1418 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1419 				struct mlxsw_sp_ipip_entry *ipip_entry)
1420 {
1421 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1422 
1423 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1424 	if (decap_fib_entry)
1425 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1426 						  decap_fib_entry);
1427 }
1428 
1429 static int
1430 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1431 			u16 ul_rif_id, bool enable)
1432 {
1433 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1434 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1435 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1436 	char ritr_pl[MLXSW_REG_RITR_LEN];
1437 	u32 saddr4;
1438 
1439 	switch (lb_cf.ul_protocol) {
1440 	case MLXSW_SP_L3_PROTO_IPV4:
1441 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1442 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1443 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1444 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1445 			    MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1446 			    ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1447 		break;
1448 
1449 	case MLXSW_SP_L3_PROTO_IPV6:
1450 		return -EAFNOSUPPORT;
1451 	}
1452 
1453 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1454 }
1455 
1456 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1457 						 struct net_device *ol_dev)
1458 {
1459 	struct mlxsw_sp_ipip_entry *ipip_entry;
1460 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1461 	int err = 0;
1462 
1463 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1464 	if (ipip_entry) {
1465 		lb_rif = ipip_entry->ol_lb;
1466 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1467 					      lb_rif->ul_rif_id, true);
1468 		if (err)
1469 			goto out;
1470 		lb_rif->common.mtu = ol_dev->mtu;
1471 	}
1472 
1473 out:
1474 	return err;
1475 }
1476 
1477 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1478 						struct net_device *ol_dev)
1479 {
1480 	struct mlxsw_sp_ipip_entry *ipip_entry;
1481 
1482 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1483 	if (ipip_entry)
1484 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1485 }
1486 
1487 static void
1488 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1489 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1490 {
1491 	if (ipip_entry->decap_fib_entry)
1492 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1493 }
1494 
1495 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1496 						  struct net_device *ol_dev)
1497 {
1498 	struct mlxsw_sp_ipip_entry *ipip_entry;
1499 
1500 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1501 	if (ipip_entry)
1502 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1503 }
1504 
1505 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1506 					 struct mlxsw_sp_rif *old_rif,
1507 					 struct mlxsw_sp_rif *new_rif);
1508 static int
1509 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1510 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1511 				 bool keep_encap,
1512 				 struct netlink_ext_ack *extack)
1513 {
1514 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1515 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1516 
1517 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1518 						     ipip_entry->ipipt,
1519 						     ipip_entry->ol_dev,
1520 						     extack);
1521 	if (IS_ERR(new_lb_rif))
1522 		return PTR_ERR(new_lb_rif);
1523 	ipip_entry->ol_lb = new_lb_rif;
1524 
1525 	if (keep_encap)
1526 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1527 					     &new_lb_rif->common);
1528 
1529 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1530 
1531 	return 0;
1532 }
1533 
1534 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1535 					struct mlxsw_sp_rif *rif);
1536 
1537 /**
1538  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1539  * @mlxsw_sp: mlxsw_sp.
1540  * @ipip_entry: IPIP entry.
1541  * @recreate_loopback: Recreates the associated loopback RIF.
1542  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1543  *              relevant when recreate_loopback is true.
1544  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1545  *                   is only relevant when recreate_loopback is false.
1546  * @extack: extack.
1547  *
1548  * Return: Non-zero value on failure.
1549  */
1550 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1551 					struct mlxsw_sp_ipip_entry *ipip_entry,
1552 					bool recreate_loopback,
1553 					bool keep_encap,
1554 					bool update_nexthops,
1555 					struct netlink_ext_ack *extack)
1556 {
1557 	int err;
1558 
1559 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1560 	 * recreate it. That creates a window of opportunity where RALUE and
1561 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1562 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1563 	 * of RALUE, demote the decap route back.
1564 	 */
1565 	if (ipip_entry->decap_fib_entry)
1566 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1567 
1568 	if (recreate_loopback) {
1569 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1570 						       keep_encap, extack);
1571 		if (err)
1572 			return err;
1573 	} else if (update_nexthops) {
1574 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1575 					    &ipip_entry->ol_lb->common);
1576 	}
1577 
1578 	if (ipip_entry->ol_dev->flags & IFF_UP)
1579 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1580 
1581 	return 0;
1582 }
1583 
1584 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1585 						struct net_device *ol_dev,
1586 						struct netlink_ext_ack *extack)
1587 {
1588 	struct mlxsw_sp_ipip_entry *ipip_entry =
1589 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1590 
1591 	if (!ipip_entry)
1592 		return 0;
1593 
1594 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1595 						   true, false, false, extack);
1596 }
1597 
1598 static int
1599 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1600 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1601 				     struct net_device *ul_dev,
1602 				     bool *demote_this,
1603 				     struct netlink_ext_ack *extack)
1604 {
1605 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1606 	enum mlxsw_sp_l3proto ul_proto;
1607 	union mlxsw_sp_l3addr saddr;
1608 
1609 	/* Moving underlay to a different VRF might cause local address
1610 	 * conflict, and the conflicting tunnels need to be demoted.
1611 	 */
1612 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1613 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1614 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1615 						 saddr, ul_tb_id,
1616 						 ipip_entry)) {
1617 		*demote_this = true;
1618 		return 0;
1619 	}
1620 
1621 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1622 						   true, true, false, extack);
1623 }
1624 
1625 static int
1626 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1627 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1628 				    struct net_device *ul_dev)
1629 {
1630 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1631 						   false, false, true, NULL);
1632 }
1633 
1634 static int
1635 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1636 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1637 				      struct net_device *ul_dev)
1638 {
1639 	/* A down underlay device causes encapsulated packets to not be
1640 	 * forwarded, but decap still works. So refresh next hops without
1641 	 * touching anything else.
1642 	 */
1643 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1644 						   false, false, true, NULL);
1645 }
1646 
1647 static int
1648 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1649 					struct net_device *ol_dev,
1650 					struct netlink_ext_ack *extack)
1651 {
1652 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1653 	struct mlxsw_sp_ipip_entry *ipip_entry;
1654 	int err;
1655 
1656 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1657 	if (!ipip_entry)
1658 		/* A change might make a tunnel eligible for offloading, but
1659 		 * that is currently not implemented. What falls to slow path
1660 		 * stays there.
1661 		 */
1662 		return 0;
1663 
1664 	/* A change might make a tunnel not eligible for offloading. */
1665 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1666 						 ipip_entry->ipipt)) {
1667 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1668 		return 0;
1669 	}
1670 
1671 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1672 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1673 	return err;
1674 }
1675 
1676 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1677 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1678 {
1679 	struct net_device *ol_dev = ipip_entry->ol_dev;
1680 
1681 	if (ol_dev->flags & IFF_UP)
1682 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1683 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1684 }
1685 
1686 /* The configuration where several tunnels have the same local address in the
1687  * same underlay table needs special treatment in the HW. That is currently not
1688  * implemented in the driver. This function finds and demotes the first tunnel
1689  * with a given source address, except the one passed in in the argument
1690  * `except'.
1691  */
1692 bool
1693 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1694 				     enum mlxsw_sp_l3proto ul_proto,
1695 				     union mlxsw_sp_l3addr saddr,
1696 				     u32 ul_tb_id,
1697 				     const struct mlxsw_sp_ipip_entry *except)
1698 {
1699 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1700 
1701 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1702 				 ipip_list_node) {
1703 		if (ipip_entry != except &&
1704 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1705 						      ul_tb_id, ipip_entry)) {
1706 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1707 			return true;
1708 		}
1709 	}
1710 
1711 	return false;
1712 }
1713 
1714 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1715 						     struct net_device *ul_dev)
1716 {
1717 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1718 
1719 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1720 				 ipip_list_node) {
1721 		struct net_device *ol_dev = ipip_entry->ol_dev;
1722 		struct net_device *ipip_ul_dev;
1723 
1724 		rcu_read_lock();
1725 		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1726 		rcu_read_unlock();
1727 		if (ipip_ul_dev == ul_dev)
1728 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1729 	}
1730 }
1731 
1732 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1733 				     struct net_device *ol_dev,
1734 				     unsigned long event,
1735 				     struct netdev_notifier_info *info)
1736 {
1737 	struct netdev_notifier_changeupper_info *chup;
1738 	struct netlink_ext_ack *extack;
1739 	int err = 0;
1740 
1741 	mutex_lock(&mlxsw_sp->router->lock);
1742 	switch (event) {
1743 	case NETDEV_REGISTER:
1744 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1745 		break;
1746 	case NETDEV_UNREGISTER:
1747 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1748 		break;
1749 	case NETDEV_UP:
1750 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1751 		break;
1752 	case NETDEV_DOWN:
1753 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1754 		break;
1755 	case NETDEV_CHANGEUPPER:
1756 		chup = container_of(info, typeof(*chup), info);
1757 		extack = info->extack;
1758 		if (netif_is_l3_master(chup->upper_dev))
1759 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1760 								   ol_dev,
1761 								   extack);
1762 		break;
1763 	case NETDEV_CHANGE:
1764 		extack = info->extack;
1765 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1766 							      ol_dev, extack);
1767 		break;
1768 	case NETDEV_CHANGEMTU:
1769 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1770 		break;
1771 	}
1772 	mutex_unlock(&mlxsw_sp->router->lock);
1773 	return err;
1774 }
1775 
1776 static int
1777 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1778 				   struct mlxsw_sp_ipip_entry *ipip_entry,
1779 				   struct net_device *ul_dev,
1780 				   bool *demote_this,
1781 				   unsigned long event,
1782 				   struct netdev_notifier_info *info)
1783 {
1784 	struct netdev_notifier_changeupper_info *chup;
1785 	struct netlink_ext_ack *extack;
1786 
1787 	switch (event) {
1788 	case NETDEV_CHANGEUPPER:
1789 		chup = container_of(info, typeof(*chup), info);
1790 		extack = info->extack;
1791 		if (netif_is_l3_master(chup->upper_dev))
1792 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1793 								    ipip_entry,
1794 								    ul_dev,
1795 								    demote_this,
1796 								    extack);
1797 		break;
1798 
1799 	case NETDEV_UP:
1800 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1801 							   ul_dev);
1802 	case NETDEV_DOWN:
1803 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1804 							     ipip_entry,
1805 							     ul_dev);
1806 	}
1807 	return 0;
1808 }
1809 
1810 int
1811 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1812 				 struct net_device *ul_dev,
1813 				 unsigned long event,
1814 				 struct netdev_notifier_info *info)
1815 {
1816 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1817 	int err = 0;
1818 
1819 	mutex_lock(&mlxsw_sp->router->lock);
1820 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1821 								ul_dev,
1822 								ipip_entry))) {
1823 		struct mlxsw_sp_ipip_entry *prev;
1824 		bool demote_this = false;
1825 
1826 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1827 							 ul_dev, &demote_this,
1828 							 event, info);
1829 		if (err) {
1830 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1831 								 ul_dev);
1832 			break;
1833 		}
1834 
1835 		if (demote_this) {
1836 			if (list_is_first(&ipip_entry->ipip_list_node,
1837 					  &mlxsw_sp->router->ipip_list))
1838 				prev = NULL;
1839 			else
1840 				/* This can't be cached from previous iteration,
1841 				 * because that entry could be gone now.
1842 				 */
1843 				prev = list_prev_entry(ipip_entry,
1844 						       ipip_list_node);
1845 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1846 			ipip_entry = prev;
1847 		}
1848 	}
1849 	mutex_unlock(&mlxsw_sp->router->lock);
1850 
1851 	return err;
1852 }
1853 
1854 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1855 				      enum mlxsw_sp_l3proto ul_proto,
1856 				      const union mlxsw_sp_l3addr *ul_sip,
1857 				      u32 tunnel_index)
1858 {
1859 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1860 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1861 	struct mlxsw_sp_fib_entry *fib_entry;
1862 	int err = 0;
1863 
1864 	mutex_lock(&mlxsw_sp->router->lock);
1865 
1866 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
1867 		err = -EINVAL;
1868 		goto out;
1869 	}
1870 
1871 	router->nve_decap_config.ul_tb_id = ul_tb_id;
1872 	router->nve_decap_config.tunnel_index = tunnel_index;
1873 	router->nve_decap_config.ul_proto = ul_proto;
1874 	router->nve_decap_config.ul_sip = *ul_sip;
1875 	router->nve_decap_config.valid = true;
1876 
1877 	/* It is valid to create a tunnel with a local IP and only later
1878 	 * assign this IP address to a local interface
1879 	 */
1880 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1881 							 ul_proto, ul_sip,
1882 							 type);
1883 	if (!fib_entry)
1884 		goto out;
1885 
1886 	fib_entry->decap.tunnel_index = tunnel_index;
1887 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1888 
1889 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1890 	if (err)
1891 		goto err_fib_entry_update;
1892 
1893 	goto out;
1894 
1895 err_fib_entry_update:
1896 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1897 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1898 out:
1899 	mutex_unlock(&mlxsw_sp->router->lock);
1900 	return err;
1901 }
1902 
1903 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1904 				      enum mlxsw_sp_l3proto ul_proto,
1905 				      const union mlxsw_sp_l3addr *ul_sip)
1906 {
1907 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1908 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1909 	struct mlxsw_sp_fib_entry *fib_entry;
1910 
1911 	mutex_lock(&mlxsw_sp->router->lock);
1912 
1913 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
1914 		goto out;
1915 
1916 	router->nve_decap_config.valid = false;
1917 
1918 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1919 							 ul_proto, ul_sip,
1920 							 type);
1921 	if (!fib_entry)
1922 		goto out;
1923 
1924 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1925 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1926 out:
1927 	mutex_unlock(&mlxsw_sp->router->lock);
1928 }
1929 
1930 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
1931 					 u32 ul_tb_id,
1932 					 enum mlxsw_sp_l3proto ul_proto,
1933 					 const union mlxsw_sp_l3addr *ul_sip)
1934 {
1935 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1936 
1937 	return router->nve_decap_config.valid &&
1938 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
1939 	       router->nve_decap_config.ul_proto == ul_proto &&
1940 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
1941 		       sizeof(*ul_sip));
1942 }
1943 
1944 struct mlxsw_sp_neigh_key {
1945 	struct neighbour *n;
1946 };
1947 
1948 struct mlxsw_sp_neigh_entry {
1949 	struct list_head rif_list_node;
1950 	struct rhash_head ht_node;
1951 	struct mlxsw_sp_neigh_key key;
1952 	u16 rif;
1953 	bool connected;
1954 	unsigned char ha[ETH_ALEN];
1955 	struct list_head nexthop_list; /* list of nexthops using
1956 					* this neigh entry
1957 					*/
1958 	struct list_head nexthop_neighs_list_node;
1959 	unsigned int counter_index;
1960 	bool counter_valid;
1961 };
1962 
1963 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1964 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1965 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1966 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
1967 };
1968 
1969 struct mlxsw_sp_neigh_entry *
1970 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1971 			struct mlxsw_sp_neigh_entry *neigh_entry)
1972 {
1973 	if (!neigh_entry) {
1974 		if (list_empty(&rif->neigh_list))
1975 			return NULL;
1976 		else
1977 			return list_first_entry(&rif->neigh_list,
1978 						typeof(*neigh_entry),
1979 						rif_list_node);
1980 	}
1981 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
1982 		return NULL;
1983 	return list_next_entry(neigh_entry, rif_list_node);
1984 }
1985 
1986 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1987 {
1988 	return neigh_entry->key.n->tbl->family;
1989 }
1990 
1991 unsigned char *
1992 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1993 {
1994 	return neigh_entry->ha;
1995 }
1996 
1997 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1998 {
1999 	struct neighbour *n;
2000 
2001 	n = neigh_entry->key.n;
2002 	return ntohl(*((__be32 *) n->primary_key));
2003 }
2004 
2005 struct in6_addr *
2006 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2007 {
2008 	struct neighbour *n;
2009 
2010 	n = neigh_entry->key.n;
2011 	return (struct in6_addr *) &n->primary_key;
2012 }
2013 
2014 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2015 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2016 			       u64 *p_counter)
2017 {
2018 	if (!neigh_entry->counter_valid)
2019 		return -EINVAL;
2020 
2021 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2022 					 p_counter, NULL);
2023 }
2024 
2025 static struct mlxsw_sp_neigh_entry *
2026 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2027 			   u16 rif)
2028 {
2029 	struct mlxsw_sp_neigh_entry *neigh_entry;
2030 
2031 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2032 	if (!neigh_entry)
2033 		return NULL;
2034 
2035 	neigh_entry->key.n = n;
2036 	neigh_entry->rif = rif;
2037 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2038 
2039 	return neigh_entry;
2040 }
2041 
2042 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2043 {
2044 	kfree(neigh_entry);
2045 }
2046 
2047 static int
2048 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2049 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2050 {
2051 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2052 				      &neigh_entry->ht_node,
2053 				      mlxsw_sp_neigh_ht_params);
2054 }
2055 
2056 static void
2057 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2058 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2059 {
2060 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2061 			       &neigh_entry->ht_node,
2062 			       mlxsw_sp_neigh_ht_params);
2063 }
2064 
2065 static bool
2066 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2067 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2068 {
2069 	struct devlink *devlink;
2070 	const char *table_name;
2071 
2072 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2073 	case AF_INET:
2074 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2075 		break;
2076 	case AF_INET6:
2077 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2078 		break;
2079 	default:
2080 		WARN_ON(1);
2081 		return false;
2082 	}
2083 
2084 	devlink = priv_to_devlink(mlxsw_sp->core);
2085 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2086 }
2087 
2088 static void
2089 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2090 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2091 {
2092 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2093 		return;
2094 
2095 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2096 		return;
2097 
2098 	neigh_entry->counter_valid = true;
2099 }
2100 
2101 static void
2102 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2103 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2104 {
2105 	if (!neigh_entry->counter_valid)
2106 		return;
2107 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2108 				   neigh_entry->counter_index);
2109 	neigh_entry->counter_valid = false;
2110 }
2111 
2112 static struct mlxsw_sp_neigh_entry *
2113 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2114 {
2115 	struct mlxsw_sp_neigh_entry *neigh_entry;
2116 	struct mlxsw_sp_rif *rif;
2117 	int err;
2118 
2119 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2120 	if (!rif)
2121 		return ERR_PTR(-EINVAL);
2122 
2123 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2124 	if (!neigh_entry)
2125 		return ERR_PTR(-ENOMEM);
2126 
2127 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2128 	if (err)
2129 		goto err_neigh_entry_insert;
2130 
2131 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2132 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2133 
2134 	return neigh_entry;
2135 
2136 err_neigh_entry_insert:
2137 	mlxsw_sp_neigh_entry_free(neigh_entry);
2138 	return ERR_PTR(err);
2139 }
2140 
2141 static void
2142 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2143 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2144 {
2145 	list_del(&neigh_entry->rif_list_node);
2146 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2147 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2148 	mlxsw_sp_neigh_entry_free(neigh_entry);
2149 }
2150 
2151 static struct mlxsw_sp_neigh_entry *
2152 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2153 {
2154 	struct mlxsw_sp_neigh_key key;
2155 
2156 	key.n = n;
2157 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2158 				      &key, mlxsw_sp_neigh_ht_params);
2159 }
2160 
2161 static void
2162 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2163 {
2164 	unsigned long interval;
2165 
2166 #if IS_ENABLED(CONFIG_IPV6)
2167 	interval = min_t(unsigned long,
2168 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2169 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2170 #else
2171 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2172 #endif
2173 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2174 }
2175 
2176 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2177 						   char *rauhtd_pl,
2178 						   int ent_index)
2179 {
2180 	struct net_device *dev;
2181 	struct neighbour *n;
2182 	__be32 dipn;
2183 	u32 dip;
2184 	u16 rif;
2185 
2186 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2187 
2188 	if (!mlxsw_sp->router->rifs[rif]) {
2189 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2190 		return;
2191 	}
2192 
2193 	dipn = htonl(dip);
2194 	dev = mlxsw_sp->router->rifs[rif]->dev;
2195 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2196 	if (!n)
2197 		return;
2198 
2199 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2200 	neigh_event_send(n, NULL);
2201 	neigh_release(n);
2202 }
2203 
2204 #if IS_ENABLED(CONFIG_IPV6)
2205 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2206 						   char *rauhtd_pl,
2207 						   int rec_index)
2208 {
2209 	struct net_device *dev;
2210 	struct neighbour *n;
2211 	struct in6_addr dip;
2212 	u16 rif;
2213 
2214 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2215 					 (char *) &dip);
2216 
2217 	if (!mlxsw_sp->router->rifs[rif]) {
2218 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2219 		return;
2220 	}
2221 
2222 	dev = mlxsw_sp->router->rifs[rif]->dev;
2223 	n = neigh_lookup(&nd_tbl, &dip, dev);
2224 	if (!n)
2225 		return;
2226 
2227 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2228 	neigh_event_send(n, NULL);
2229 	neigh_release(n);
2230 }
2231 #else
2232 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2233 						   char *rauhtd_pl,
2234 						   int rec_index)
2235 {
2236 }
2237 #endif
2238 
2239 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2240 						   char *rauhtd_pl,
2241 						   int rec_index)
2242 {
2243 	u8 num_entries;
2244 	int i;
2245 
2246 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2247 								rec_index);
2248 	/* Hardware starts counting at 0, so add 1. */
2249 	num_entries++;
2250 
2251 	/* Each record consists of several neighbour entries. */
2252 	for (i = 0; i < num_entries; i++) {
2253 		int ent_index;
2254 
2255 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2256 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2257 						       ent_index);
2258 	}
2259 
2260 }
2261 
2262 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2263 						   char *rauhtd_pl,
2264 						   int rec_index)
2265 {
2266 	/* One record contains one entry. */
2267 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2268 					       rec_index);
2269 }
2270 
2271 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2272 					      char *rauhtd_pl, int rec_index)
2273 {
2274 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2275 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2276 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2277 						       rec_index);
2278 		break;
2279 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2280 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2281 						       rec_index);
2282 		break;
2283 	}
2284 }
2285 
2286 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2287 {
2288 	u8 num_rec, last_rec_index, num_entries;
2289 
2290 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2291 	last_rec_index = num_rec - 1;
2292 
2293 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2294 		return false;
2295 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2296 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2297 		return true;
2298 
2299 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2300 								last_rec_index);
2301 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2302 		return true;
2303 	return false;
2304 }
2305 
2306 static int
2307 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2308 				       char *rauhtd_pl,
2309 				       enum mlxsw_reg_rauhtd_type type)
2310 {
2311 	int i, num_rec;
2312 	int err;
2313 
2314 	/* Ensure the RIF we read from the device does not change mid-dump. */
2315 	mutex_lock(&mlxsw_sp->router->lock);
2316 	do {
2317 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2318 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2319 				      rauhtd_pl);
2320 		if (err) {
2321 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2322 			break;
2323 		}
2324 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2325 		for (i = 0; i < num_rec; i++)
2326 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2327 							  i);
2328 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2329 	mutex_unlock(&mlxsw_sp->router->lock);
2330 
2331 	return err;
2332 }
2333 
2334 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2335 {
2336 	enum mlxsw_reg_rauhtd_type type;
2337 	char *rauhtd_pl;
2338 	int err;
2339 
2340 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2341 	if (!rauhtd_pl)
2342 		return -ENOMEM;
2343 
2344 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2345 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2346 	if (err)
2347 		goto out;
2348 
2349 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2350 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2351 out:
2352 	kfree(rauhtd_pl);
2353 	return err;
2354 }
2355 
2356 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2357 {
2358 	struct mlxsw_sp_neigh_entry *neigh_entry;
2359 
2360 	mutex_lock(&mlxsw_sp->router->lock);
2361 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2362 			    nexthop_neighs_list_node)
2363 		/* If this neigh have nexthops, make the kernel think this neigh
2364 		 * is active regardless of the traffic.
2365 		 */
2366 		neigh_event_send(neigh_entry->key.n, NULL);
2367 	mutex_unlock(&mlxsw_sp->router->lock);
2368 }
2369 
2370 static void
2371 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2372 {
2373 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2374 
2375 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2376 			       msecs_to_jiffies(interval));
2377 }
2378 
2379 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2380 {
2381 	struct mlxsw_sp_router *router;
2382 	int err;
2383 
2384 	router = container_of(work, struct mlxsw_sp_router,
2385 			      neighs_update.dw.work);
2386 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2387 	if (err)
2388 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2389 
2390 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2391 
2392 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2393 }
2394 
2395 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2396 {
2397 	struct mlxsw_sp_neigh_entry *neigh_entry;
2398 	struct mlxsw_sp_router *router;
2399 
2400 	router = container_of(work, struct mlxsw_sp_router,
2401 			      nexthop_probe_dw.work);
2402 	/* Iterate over nexthop neighbours, find those who are unresolved and
2403 	 * send arp on them. This solves the chicken-egg problem when
2404 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2405 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2406 	 * using different nexthop.
2407 	 */
2408 	mutex_lock(&router->lock);
2409 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2410 			    nexthop_neighs_list_node)
2411 		if (!neigh_entry->connected)
2412 			neigh_event_send(neigh_entry->key.n, NULL);
2413 	mutex_unlock(&router->lock);
2414 
2415 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2416 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2417 }
2418 
2419 static void
2420 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2421 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2422 			      bool removing, bool dead);
2423 
2424 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2425 {
2426 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2427 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2428 }
2429 
2430 static int
2431 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2432 				struct mlxsw_sp_neigh_entry *neigh_entry,
2433 				enum mlxsw_reg_rauht_op op)
2434 {
2435 	struct neighbour *n = neigh_entry->key.n;
2436 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2437 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2438 
2439 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2440 			      dip);
2441 	if (neigh_entry->counter_valid)
2442 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2443 					     neigh_entry->counter_index);
2444 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2445 }
2446 
2447 static int
2448 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2449 				struct mlxsw_sp_neigh_entry *neigh_entry,
2450 				enum mlxsw_reg_rauht_op op)
2451 {
2452 	struct neighbour *n = neigh_entry->key.n;
2453 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2454 	const char *dip = n->primary_key;
2455 
2456 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2457 			      dip);
2458 	if (neigh_entry->counter_valid)
2459 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2460 					     neigh_entry->counter_index);
2461 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2462 }
2463 
2464 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2465 {
2466 	struct neighbour *n = neigh_entry->key.n;
2467 
2468 	/* Packets with a link-local destination address are trapped
2469 	 * after LPM lookup and never reach the neighbour table, so
2470 	 * there is no need to program such neighbours to the device.
2471 	 */
2472 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2473 	    IPV6_ADDR_LINKLOCAL)
2474 		return true;
2475 	return false;
2476 }
2477 
2478 static void
2479 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2480 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2481 			    bool adding)
2482 {
2483 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2484 	int err;
2485 
2486 	if (!adding && !neigh_entry->connected)
2487 		return;
2488 	neigh_entry->connected = adding;
2489 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2490 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2491 						      op);
2492 		if (err)
2493 			return;
2494 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2495 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2496 			return;
2497 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2498 						      op);
2499 		if (err)
2500 			return;
2501 	} else {
2502 		WARN_ON_ONCE(1);
2503 		return;
2504 	}
2505 
2506 	if (adding)
2507 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2508 	else
2509 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2510 }
2511 
2512 void
2513 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2514 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2515 				    bool adding)
2516 {
2517 	if (adding)
2518 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2519 	else
2520 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2521 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2522 }
2523 
2524 struct mlxsw_sp_netevent_work {
2525 	struct work_struct work;
2526 	struct mlxsw_sp *mlxsw_sp;
2527 	struct neighbour *n;
2528 };
2529 
2530 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2531 {
2532 	struct mlxsw_sp_netevent_work *net_work =
2533 		container_of(work, struct mlxsw_sp_netevent_work, work);
2534 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2535 	struct mlxsw_sp_neigh_entry *neigh_entry;
2536 	struct neighbour *n = net_work->n;
2537 	unsigned char ha[ETH_ALEN];
2538 	bool entry_connected;
2539 	u8 nud_state, dead;
2540 
2541 	/* If these parameters are changed after we release the lock,
2542 	 * then we are guaranteed to receive another event letting us
2543 	 * know about it.
2544 	 */
2545 	read_lock_bh(&n->lock);
2546 	memcpy(ha, n->ha, ETH_ALEN);
2547 	nud_state = n->nud_state;
2548 	dead = n->dead;
2549 	read_unlock_bh(&n->lock);
2550 
2551 	mutex_lock(&mlxsw_sp->router->lock);
2552 	mlxsw_sp_span_respin(mlxsw_sp);
2553 
2554 	entry_connected = nud_state & NUD_VALID && !dead;
2555 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2556 	if (!entry_connected && !neigh_entry)
2557 		goto out;
2558 	if (!neigh_entry) {
2559 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2560 		if (IS_ERR(neigh_entry))
2561 			goto out;
2562 	}
2563 
2564 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2565 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2566 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2567 				      dead);
2568 
2569 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2570 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2571 
2572 out:
2573 	mutex_unlock(&mlxsw_sp->router->lock);
2574 	neigh_release(n);
2575 	kfree(net_work);
2576 }
2577 
2578 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2579 
2580 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2581 {
2582 	struct mlxsw_sp_netevent_work *net_work =
2583 		container_of(work, struct mlxsw_sp_netevent_work, work);
2584 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2585 
2586 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2587 	kfree(net_work);
2588 }
2589 
2590 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2591 
2592 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2593 {
2594 	struct mlxsw_sp_netevent_work *net_work =
2595 		container_of(work, struct mlxsw_sp_netevent_work, work);
2596 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2597 
2598 	__mlxsw_sp_router_init(mlxsw_sp);
2599 	kfree(net_work);
2600 }
2601 
2602 static int mlxsw_sp_router_schedule_work(struct net *net,
2603 					 struct notifier_block *nb,
2604 					 void (*cb)(struct work_struct *))
2605 {
2606 	struct mlxsw_sp_netevent_work *net_work;
2607 	struct mlxsw_sp_router *router;
2608 
2609 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2610 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2611 		return NOTIFY_DONE;
2612 
2613 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2614 	if (!net_work)
2615 		return NOTIFY_BAD;
2616 
2617 	INIT_WORK(&net_work->work, cb);
2618 	net_work->mlxsw_sp = router->mlxsw_sp;
2619 	mlxsw_core_schedule_work(&net_work->work);
2620 	return NOTIFY_DONE;
2621 }
2622 
2623 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2624 					  unsigned long event, void *ptr)
2625 {
2626 	struct mlxsw_sp_netevent_work *net_work;
2627 	struct mlxsw_sp_port *mlxsw_sp_port;
2628 	struct mlxsw_sp *mlxsw_sp;
2629 	unsigned long interval;
2630 	struct neigh_parms *p;
2631 	struct neighbour *n;
2632 
2633 	switch (event) {
2634 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2635 		p = ptr;
2636 
2637 		/* We don't care about changes in the default table. */
2638 		if (!p->dev || (p->tbl->family != AF_INET &&
2639 				p->tbl->family != AF_INET6))
2640 			return NOTIFY_DONE;
2641 
2642 		/* We are in atomic context and can't take RTNL mutex,
2643 		 * so use RCU variant to walk the device chain.
2644 		 */
2645 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2646 		if (!mlxsw_sp_port)
2647 			return NOTIFY_DONE;
2648 
2649 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2650 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2651 		mlxsw_sp->router->neighs_update.interval = interval;
2652 
2653 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2654 		break;
2655 	case NETEVENT_NEIGH_UPDATE:
2656 		n = ptr;
2657 
2658 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2659 			return NOTIFY_DONE;
2660 
2661 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2662 		if (!mlxsw_sp_port)
2663 			return NOTIFY_DONE;
2664 
2665 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2666 		if (!net_work) {
2667 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2668 			return NOTIFY_BAD;
2669 		}
2670 
2671 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2672 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2673 		net_work->n = n;
2674 
2675 		/* Take a reference to ensure the neighbour won't be
2676 		 * destructed until we drop the reference in delayed
2677 		 * work.
2678 		 */
2679 		neigh_clone(n);
2680 		mlxsw_core_schedule_work(&net_work->work);
2681 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2682 		break;
2683 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2684 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2685 		return mlxsw_sp_router_schedule_work(ptr, nb,
2686 				mlxsw_sp_router_mp_hash_event_work);
2687 
2688 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2689 		return mlxsw_sp_router_schedule_work(ptr, nb,
2690 				mlxsw_sp_router_update_priority_work);
2691 	}
2692 
2693 	return NOTIFY_DONE;
2694 }
2695 
2696 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2697 {
2698 	int err;
2699 
2700 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2701 			      &mlxsw_sp_neigh_ht_params);
2702 	if (err)
2703 		return err;
2704 
2705 	/* Initialize the polling interval according to the default
2706 	 * table.
2707 	 */
2708 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2709 
2710 	/* Create the delayed works for the activity_update */
2711 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2712 			  mlxsw_sp_router_neighs_update_work);
2713 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2714 			  mlxsw_sp_router_probe_unresolved_nexthops);
2715 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2716 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2717 	return 0;
2718 }
2719 
2720 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2721 {
2722 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2723 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2724 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2725 }
2726 
2727 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2728 					 struct mlxsw_sp_rif *rif)
2729 {
2730 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2731 
2732 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2733 				 rif_list_node) {
2734 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2735 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2736 	}
2737 }
2738 
2739 enum mlxsw_sp_nexthop_type {
2740 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2741 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2742 };
2743 
2744 struct mlxsw_sp_nexthop_key {
2745 	struct fib_nh *fib_nh;
2746 };
2747 
2748 struct mlxsw_sp_nexthop {
2749 	struct list_head neigh_list_node; /* member of neigh entry list */
2750 	struct list_head rif_list_node;
2751 	struct list_head router_list_node;
2752 	struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2753 						* this belongs to
2754 						*/
2755 	struct rhash_head ht_node;
2756 	struct mlxsw_sp_nexthop_key key;
2757 	unsigned char gw_addr[sizeof(struct in6_addr)];
2758 	int ifindex;
2759 	int nh_weight;
2760 	int norm_nh_weight;
2761 	int num_adj_entries;
2762 	struct mlxsw_sp_rif *rif;
2763 	u8 should_offload:1, /* set indicates this neigh is connected and
2764 			      * should be put to KVD linear area of this group.
2765 			      */
2766 	   offloaded:1, /* set in case the neigh is actually put into
2767 			 * KVD linear area of this group.
2768 			 */
2769 	   update:1; /* set indicates that MAC of this neigh should be
2770 		      * updated in HW
2771 		      */
2772 	enum mlxsw_sp_nexthop_type type;
2773 	union {
2774 		struct mlxsw_sp_neigh_entry *neigh_entry;
2775 		struct mlxsw_sp_ipip_entry *ipip_entry;
2776 	};
2777 	unsigned int counter_index;
2778 	bool counter_valid;
2779 };
2780 
2781 struct mlxsw_sp_nexthop_group {
2782 	void *priv;
2783 	struct rhash_head ht_node;
2784 	struct list_head fib_list; /* list of fib entries that use this group */
2785 	struct neigh_table *neigh_tbl;
2786 	u8 adj_index_valid:1,
2787 	   gateway:1; /* routes using the group use a gateway */
2788 	u32 adj_index;
2789 	u16 ecmp_size;
2790 	u16 count;
2791 	int sum_norm_weight;
2792 	struct mlxsw_sp_nexthop nexthops[0];
2793 #define nh_rif	nexthops[0].rif
2794 };
2795 
2796 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2797 				    struct mlxsw_sp_nexthop *nh)
2798 {
2799 	struct devlink *devlink;
2800 
2801 	devlink = priv_to_devlink(mlxsw_sp->core);
2802 	if (!devlink_dpipe_table_counter_enabled(devlink,
2803 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2804 		return;
2805 
2806 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2807 		return;
2808 
2809 	nh->counter_valid = true;
2810 }
2811 
2812 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2813 				   struct mlxsw_sp_nexthop *nh)
2814 {
2815 	if (!nh->counter_valid)
2816 		return;
2817 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2818 	nh->counter_valid = false;
2819 }
2820 
2821 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2822 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2823 {
2824 	if (!nh->counter_valid)
2825 		return -EINVAL;
2826 
2827 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2828 					 p_counter, NULL);
2829 }
2830 
2831 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2832 					       struct mlxsw_sp_nexthop *nh)
2833 {
2834 	if (!nh) {
2835 		if (list_empty(&router->nexthop_list))
2836 			return NULL;
2837 		else
2838 			return list_first_entry(&router->nexthop_list,
2839 						typeof(*nh), router_list_node);
2840 	}
2841 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2842 		return NULL;
2843 	return list_next_entry(nh, router_list_node);
2844 }
2845 
2846 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2847 {
2848 	return nh->offloaded;
2849 }
2850 
2851 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2852 {
2853 	if (!nh->offloaded)
2854 		return NULL;
2855 	return nh->neigh_entry->ha;
2856 }
2857 
2858 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2859 			     u32 *p_adj_size, u32 *p_adj_hash_index)
2860 {
2861 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2862 	u32 adj_hash_index = 0;
2863 	int i;
2864 
2865 	if (!nh->offloaded || !nh_grp->adj_index_valid)
2866 		return -EINVAL;
2867 
2868 	*p_adj_index = nh_grp->adj_index;
2869 	*p_adj_size = nh_grp->ecmp_size;
2870 
2871 	for (i = 0; i < nh_grp->count; i++) {
2872 		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2873 
2874 		if (nh_iter == nh)
2875 			break;
2876 		if (nh_iter->offloaded)
2877 			adj_hash_index += nh_iter->num_adj_entries;
2878 	}
2879 
2880 	*p_adj_hash_index = adj_hash_index;
2881 	return 0;
2882 }
2883 
2884 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2885 {
2886 	return nh->rif;
2887 }
2888 
2889 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2890 {
2891 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2892 	int i;
2893 
2894 	for (i = 0; i < nh_grp->count; i++) {
2895 		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2896 
2897 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2898 			return true;
2899 	}
2900 	return false;
2901 }
2902 
2903 static struct fib_info *
2904 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2905 {
2906 	return nh_grp->priv;
2907 }
2908 
2909 struct mlxsw_sp_nexthop_group_cmp_arg {
2910 	enum mlxsw_sp_l3proto proto;
2911 	union {
2912 		struct fib_info *fi;
2913 		struct mlxsw_sp_fib6_entry *fib6_entry;
2914 	};
2915 };
2916 
2917 static bool
2918 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2919 				    const struct in6_addr *gw, int ifindex,
2920 				    int weight)
2921 {
2922 	int i;
2923 
2924 	for (i = 0; i < nh_grp->count; i++) {
2925 		const struct mlxsw_sp_nexthop *nh;
2926 
2927 		nh = &nh_grp->nexthops[i];
2928 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
2929 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2930 			return true;
2931 	}
2932 
2933 	return false;
2934 }
2935 
2936 static bool
2937 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2938 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
2939 {
2940 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2941 
2942 	if (nh_grp->count != fib6_entry->nrt6)
2943 		return false;
2944 
2945 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2946 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
2947 		struct in6_addr *gw;
2948 		int ifindex, weight;
2949 
2950 		ifindex = fib6_nh->fib_nh_dev->ifindex;
2951 		weight = fib6_nh->fib_nh_weight;
2952 		gw = &fib6_nh->fib_nh_gw6;
2953 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2954 							 weight))
2955 			return false;
2956 	}
2957 
2958 	return true;
2959 }
2960 
2961 static int
2962 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2963 {
2964 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2965 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2966 
2967 	switch (cmp_arg->proto) {
2968 	case MLXSW_SP_L3_PROTO_IPV4:
2969 		return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2970 	case MLXSW_SP_L3_PROTO_IPV6:
2971 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2972 						    cmp_arg->fib6_entry);
2973 	default:
2974 		WARN_ON(1);
2975 		return 1;
2976 	}
2977 }
2978 
2979 static int
2980 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2981 {
2982 	return nh_grp->neigh_tbl->family;
2983 }
2984 
2985 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2986 {
2987 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
2988 	const struct mlxsw_sp_nexthop *nh;
2989 	struct fib_info *fi;
2990 	unsigned int val;
2991 	int i;
2992 
2993 	switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2994 	case AF_INET:
2995 		fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2996 		return jhash(&fi, sizeof(fi), seed);
2997 	case AF_INET6:
2998 		val = nh_grp->count;
2999 		for (i = 0; i < nh_grp->count; i++) {
3000 			nh = &nh_grp->nexthops[i];
3001 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3002 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3003 		}
3004 		return jhash(&val, sizeof(val), seed);
3005 	default:
3006 		WARN_ON(1);
3007 		return 0;
3008 	}
3009 }
3010 
3011 static u32
3012 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3013 {
3014 	unsigned int val = fib6_entry->nrt6;
3015 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3016 
3017 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3018 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3019 		struct net_device *dev = fib6_nh->fib_nh_dev;
3020 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3021 
3022 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3023 		val ^= jhash(gw, sizeof(*gw), seed);
3024 	}
3025 
3026 	return jhash(&val, sizeof(val), seed);
3027 }
3028 
3029 static u32
3030 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3031 {
3032 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3033 
3034 	switch (cmp_arg->proto) {
3035 	case MLXSW_SP_L3_PROTO_IPV4:
3036 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3037 	case MLXSW_SP_L3_PROTO_IPV6:
3038 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3039 	default:
3040 		WARN_ON(1);
3041 		return 0;
3042 	}
3043 }
3044 
3045 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3046 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3047 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3048 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3049 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3050 };
3051 
3052 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3053 					 struct mlxsw_sp_nexthop_group *nh_grp)
3054 {
3055 	if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3056 	    !nh_grp->gateway)
3057 		return 0;
3058 
3059 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3060 				      &nh_grp->ht_node,
3061 				      mlxsw_sp_nexthop_group_ht_params);
3062 }
3063 
3064 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3065 					  struct mlxsw_sp_nexthop_group *nh_grp)
3066 {
3067 	if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3068 	    !nh_grp->gateway)
3069 		return;
3070 
3071 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3072 			       &nh_grp->ht_node,
3073 			       mlxsw_sp_nexthop_group_ht_params);
3074 }
3075 
3076 static struct mlxsw_sp_nexthop_group *
3077 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3078 			       struct fib_info *fi)
3079 {
3080 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3081 
3082 	cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
3083 	cmp_arg.fi = fi;
3084 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3085 				      &cmp_arg,
3086 				      mlxsw_sp_nexthop_group_ht_params);
3087 }
3088 
3089 static struct mlxsw_sp_nexthop_group *
3090 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3091 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3092 {
3093 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3094 
3095 	cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
3096 	cmp_arg.fib6_entry = fib6_entry;
3097 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3098 				      &cmp_arg,
3099 				      mlxsw_sp_nexthop_group_ht_params);
3100 }
3101 
3102 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3103 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3104 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3105 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3106 };
3107 
3108 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3109 				   struct mlxsw_sp_nexthop *nh)
3110 {
3111 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3112 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3113 }
3114 
3115 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3116 				    struct mlxsw_sp_nexthop *nh)
3117 {
3118 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3119 			       mlxsw_sp_nexthop_ht_params);
3120 }
3121 
3122 static struct mlxsw_sp_nexthop *
3123 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3124 			struct mlxsw_sp_nexthop_key key)
3125 {
3126 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3127 				      mlxsw_sp_nexthop_ht_params);
3128 }
3129 
3130 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3131 					     const struct mlxsw_sp_fib *fib,
3132 					     u32 adj_index, u16 ecmp_size,
3133 					     u32 new_adj_index,
3134 					     u16 new_ecmp_size)
3135 {
3136 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3137 
3138 	mlxsw_reg_raleu_pack(raleu_pl,
3139 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
3140 			     fib->vr->id, adj_index, ecmp_size, new_adj_index,
3141 			     new_ecmp_size);
3142 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3143 }
3144 
3145 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3146 					  struct mlxsw_sp_nexthop_group *nh_grp,
3147 					  u32 old_adj_index, u16 old_ecmp_size)
3148 {
3149 	struct mlxsw_sp_fib_entry *fib_entry;
3150 	struct mlxsw_sp_fib *fib = NULL;
3151 	int err;
3152 
3153 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3154 		if (fib == fib_entry->fib_node->fib)
3155 			continue;
3156 		fib = fib_entry->fib_node->fib;
3157 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3158 							old_adj_index,
3159 							old_ecmp_size,
3160 							nh_grp->adj_index,
3161 							nh_grp->ecmp_size);
3162 		if (err)
3163 			return err;
3164 	}
3165 	return 0;
3166 }
3167 
3168 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3169 				     struct mlxsw_sp_nexthop *nh)
3170 {
3171 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3172 	char ratr_pl[MLXSW_REG_RATR_LEN];
3173 
3174 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3175 			    true, MLXSW_REG_RATR_TYPE_ETHERNET,
3176 			    adj_index, neigh_entry->rif);
3177 	mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3178 	if (nh->counter_valid)
3179 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3180 	else
3181 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3182 
3183 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3184 }
3185 
3186 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3187 			    struct mlxsw_sp_nexthop *nh)
3188 {
3189 	int i;
3190 
3191 	for (i = 0; i < nh->num_adj_entries; i++) {
3192 		int err;
3193 
3194 		err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3195 		if (err)
3196 			return err;
3197 	}
3198 
3199 	return 0;
3200 }
3201 
3202 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3203 					  u32 adj_index,
3204 					  struct mlxsw_sp_nexthop *nh)
3205 {
3206 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3207 
3208 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3209 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3210 }
3211 
3212 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3213 					u32 adj_index,
3214 					struct mlxsw_sp_nexthop *nh)
3215 {
3216 	int i;
3217 
3218 	for (i = 0; i < nh->num_adj_entries; i++) {
3219 		int err;
3220 
3221 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3222 						     nh);
3223 		if (err)
3224 			return err;
3225 	}
3226 
3227 	return 0;
3228 }
3229 
3230 static int
3231 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3232 			      struct mlxsw_sp_nexthop_group *nh_grp,
3233 			      bool reallocate)
3234 {
3235 	u32 adj_index = nh_grp->adj_index; /* base */
3236 	struct mlxsw_sp_nexthop *nh;
3237 	int i;
3238 
3239 	for (i = 0; i < nh_grp->count; i++) {
3240 		nh = &nh_grp->nexthops[i];
3241 
3242 		if (!nh->should_offload) {
3243 			nh->offloaded = 0;
3244 			continue;
3245 		}
3246 
3247 		if (nh->update || reallocate) {
3248 			int err = 0;
3249 
3250 			switch (nh->type) {
3251 			case MLXSW_SP_NEXTHOP_TYPE_ETH:
3252 				err = mlxsw_sp_nexthop_update
3253 					    (mlxsw_sp, adj_index, nh);
3254 				break;
3255 			case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3256 				err = mlxsw_sp_nexthop_ipip_update
3257 					    (mlxsw_sp, adj_index, nh);
3258 				break;
3259 			}
3260 			if (err)
3261 				return err;
3262 			nh->update = 0;
3263 			nh->offloaded = 1;
3264 		}
3265 		adj_index += nh->num_adj_entries;
3266 	}
3267 	return 0;
3268 }
3269 
3270 static int
3271 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3272 				    struct mlxsw_sp_nexthop_group *nh_grp)
3273 {
3274 	struct mlxsw_sp_fib_entry *fib_entry;
3275 	int err;
3276 
3277 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3278 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3279 		if (err)
3280 			return err;
3281 	}
3282 	return 0;
3283 }
3284 
3285 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3286 {
3287 	/* Valid sizes for an adjacency group are:
3288 	 * 1-64, 512, 1024, 2048 and 4096.
3289 	 */
3290 	if (*p_adj_grp_size <= 64)
3291 		return;
3292 	else if (*p_adj_grp_size <= 512)
3293 		*p_adj_grp_size = 512;
3294 	else if (*p_adj_grp_size <= 1024)
3295 		*p_adj_grp_size = 1024;
3296 	else if (*p_adj_grp_size <= 2048)
3297 		*p_adj_grp_size = 2048;
3298 	else
3299 		*p_adj_grp_size = 4096;
3300 }
3301 
3302 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3303 					     unsigned int alloc_size)
3304 {
3305 	if (alloc_size >= 4096)
3306 		*p_adj_grp_size = 4096;
3307 	else if (alloc_size >= 2048)
3308 		*p_adj_grp_size = 2048;
3309 	else if (alloc_size >= 1024)
3310 		*p_adj_grp_size = 1024;
3311 	else if (alloc_size >= 512)
3312 		*p_adj_grp_size = 512;
3313 }
3314 
3315 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3316 				     u16 *p_adj_grp_size)
3317 {
3318 	unsigned int alloc_size;
3319 	int err;
3320 
3321 	/* Round up the requested group size to the next size supported
3322 	 * by the device and make sure the request can be satisfied.
3323 	 */
3324 	mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3325 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3326 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3327 					      *p_adj_grp_size, &alloc_size);
3328 	if (err)
3329 		return err;
3330 	/* It is possible the allocation results in more allocated
3331 	 * entries than requested. Try to use as much of them as
3332 	 * possible.
3333 	 */
3334 	mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3335 
3336 	return 0;
3337 }
3338 
3339 static void
3340 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3341 {
3342 	int i, g = 0, sum_norm_weight = 0;
3343 	struct mlxsw_sp_nexthop *nh;
3344 
3345 	for (i = 0; i < nh_grp->count; i++) {
3346 		nh = &nh_grp->nexthops[i];
3347 
3348 		if (!nh->should_offload)
3349 			continue;
3350 		if (g > 0)
3351 			g = gcd(nh->nh_weight, g);
3352 		else
3353 			g = nh->nh_weight;
3354 	}
3355 
3356 	for (i = 0; i < nh_grp->count; i++) {
3357 		nh = &nh_grp->nexthops[i];
3358 
3359 		if (!nh->should_offload)
3360 			continue;
3361 		nh->norm_nh_weight = nh->nh_weight / g;
3362 		sum_norm_weight += nh->norm_nh_weight;
3363 	}
3364 
3365 	nh_grp->sum_norm_weight = sum_norm_weight;
3366 }
3367 
3368 static void
3369 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3370 {
3371 	int total = nh_grp->sum_norm_weight;
3372 	u16 ecmp_size = nh_grp->ecmp_size;
3373 	int i, weight = 0, lower_bound = 0;
3374 
3375 	for (i = 0; i < nh_grp->count; i++) {
3376 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3377 		int upper_bound;
3378 
3379 		if (!nh->should_offload)
3380 			continue;
3381 		weight += nh->norm_nh_weight;
3382 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3383 		nh->num_adj_entries = upper_bound - lower_bound;
3384 		lower_bound = upper_bound;
3385 	}
3386 }
3387 
3388 static struct mlxsw_sp_nexthop *
3389 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3390 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3391 
3392 static void
3393 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3394 					struct mlxsw_sp_nexthop_group *nh_grp)
3395 {
3396 	int i;
3397 
3398 	for (i = 0; i < nh_grp->count; i++) {
3399 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3400 
3401 		if (nh->offloaded)
3402 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3403 		else
3404 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3405 	}
3406 }
3407 
3408 static void
3409 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3410 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3411 {
3412 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3413 
3414 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3415 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3416 		struct mlxsw_sp_nexthop *nh;
3417 
3418 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3419 		if (nh && nh->offloaded)
3420 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3421 		else
3422 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3423 	}
3424 }
3425 
3426 static void
3427 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3428 					struct mlxsw_sp_nexthop_group *nh_grp)
3429 {
3430 	struct mlxsw_sp_fib6_entry *fib6_entry;
3431 
3432 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3433 	 * the same struct, so we need to iterate over all the routes using the
3434 	 * nexthop group and set / clear the offload indication for them.
3435 	 */
3436 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3437 			    common.nexthop_group_node)
3438 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3439 }
3440 
3441 static void
3442 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3443 				       struct mlxsw_sp_nexthop_group *nh_grp)
3444 {
3445 	switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
3446 	case AF_INET:
3447 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3448 		break;
3449 	case AF_INET6:
3450 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3451 		break;
3452 	}
3453 }
3454 
3455 static void
3456 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3457 			       struct mlxsw_sp_nexthop_group *nh_grp)
3458 {
3459 	u16 ecmp_size, old_ecmp_size;
3460 	struct mlxsw_sp_nexthop *nh;
3461 	bool offload_change = false;
3462 	u32 adj_index;
3463 	bool old_adj_index_valid;
3464 	u32 old_adj_index;
3465 	int i;
3466 	int err;
3467 
3468 	if (!nh_grp->gateway) {
3469 		mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3470 		return;
3471 	}
3472 
3473 	for (i = 0; i < nh_grp->count; i++) {
3474 		nh = &nh_grp->nexthops[i];
3475 
3476 		if (nh->should_offload != nh->offloaded) {
3477 			offload_change = true;
3478 			if (nh->should_offload)
3479 				nh->update = 1;
3480 		}
3481 	}
3482 	if (!offload_change) {
3483 		/* Nothing was added or removed, so no need to reallocate. Just
3484 		 * update MAC on existing adjacency indexes.
3485 		 */
3486 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
3487 		if (err) {
3488 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3489 			goto set_trap;
3490 		}
3491 		return;
3492 	}
3493 	mlxsw_sp_nexthop_group_normalize(nh_grp);
3494 	if (!nh_grp->sum_norm_weight)
3495 		/* No neigh of this group is connected so we just set
3496 		 * the trap and let everthing flow through kernel.
3497 		 */
3498 		goto set_trap;
3499 
3500 	ecmp_size = nh_grp->sum_norm_weight;
3501 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3502 	if (err)
3503 		/* No valid allocation size available. */
3504 		goto set_trap;
3505 
3506 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3507 				  ecmp_size, &adj_index);
3508 	if (err) {
3509 		/* We ran out of KVD linear space, just set the
3510 		 * trap and let everything flow through kernel.
3511 		 */
3512 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3513 		goto set_trap;
3514 	}
3515 	old_adj_index_valid = nh_grp->adj_index_valid;
3516 	old_adj_index = nh_grp->adj_index;
3517 	old_ecmp_size = nh_grp->ecmp_size;
3518 	nh_grp->adj_index_valid = 1;
3519 	nh_grp->adj_index = adj_index;
3520 	nh_grp->ecmp_size = ecmp_size;
3521 	mlxsw_sp_nexthop_group_rebalance(nh_grp);
3522 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
3523 	if (err) {
3524 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3525 		goto set_trap;
3526 	}
3527 
3528 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3529 
3530 	if (!old_adj_index_valid) {
3531 		/* The trap was set for fib entries, so we have to call
3532 		 * fib entry update to unset it and use adjacency index.
3533 		 */
3534 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3535 		if (err) {
3536 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3537 			goto set_trap;
3538 		}
3539 		return;
3540 	}
3541 
3542 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3543 					     old_adj_index, old_ecmp_size);
3544 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3545 			   old_ecmp_size, old_adj_index);
3546 	if (err) {
3547 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3548 		goto set_trap;
3549 	}
3550 
3551 	return;
3552 
3553 set_trap:
3554 	old_adj_index_valid = nh_grp->adj_index_valid;
3555 	nh_grp->adj_index_valid = 0;
3556 	for (i = 0; i < nh_grp->count; i++) {
3557 		nh = &nh_grp->nexthops[i];
3558 		nh->offloaded = 0;
3559 	}
3560 	err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3561 	if (err)
3562 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3563 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3564 	if (old_adj_index_valid)
3565 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3566 				   nh_grp->ecmp_size, nh_grp->adj_index);
3567 }
3568 
3569 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3570 					    bool removing)
3571 {
3572 	if (!removing)
3573 		nh->should_offload = 1;
3574 	else
3575 		nh->should_offload = 0;
3576 	nh->update = 1;
3577 }
3578 
3579 static int
3580 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3581 				    struct mlxsw_sp_neigh_entry *neigh_entry)
3582 {
3583 	struct neighbour *n, *old_n = neigh_entry->key.n;
3584 	struct mlxsw_sp_nexthop *nh;
3585 	bool entry_connected;
3586 	u8 nud_state, dead;
3587 	int err;
3588 
3589 	nh = list_first_entry(&neigh_entry->nexthop_list,
3590 			      struct mlxsw_sp_nexthop, neigh_list_node);
3591 
3592 	n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3593 	if (!n) {
3594 		n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3595 				 nh->rif->dev);
3596 		if (IS_ERR(n))
3597 			return PTR_ERR(n);
3598 		neigh_event_send(n, NULL);
3599 	}
3600 
3601 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3602 	neigh_entry->key.n = n;
3603 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3604 	if (err)
3605 		goto err_neigh_entry_insert;
3606 
3607 	read_lock_bh(&n->lock);
3608 	nud_state = n->nud_state;
3609 	dead = n->dead;
3610 	read_unlock_bh(&n->lock);
3611 	entry_connected = nud_state & NUD_VALID && !dead;
3612 
3613 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3614 			    neigh_list_node) {
3615 		neigh_release(old_n);
3616 		neigh_clone(n);
3617 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
3618 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3619 	}
3620 
3621 	neigh_release(n);
3622 
3623 	return 0;
3624 
3625 err_neigh_entry_insert:
3626 	neigh_entry->key.n = old_n;
3627 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3628 	neigh_release(n);
3629 	return err;
3630 }
3631 
3632 static void
3633 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3634 			      struct mlxsw_sp_neigh_entry *neigh_entry,
3635 			      bool removing, bool dead)
3636 {
3637 	struct mlxsw_sp_nexthop *nh;
3638 
3639 	if (list_empty(&neigh_entry->nexthop_list))
3640 		return;
3641 
3642 	if (dead) {
3643 		int err;
3644 
3645 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
3646 							  neigh_entry);
3647 		if (err)
3648 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
3649 		return;
3650 	}
3651 
3652 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3653 			    neigh_list_node) {
3654 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
3655 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3656 	}
3657 }
3658 
3659 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3660 				      struct mlxsw_sp_rif *rif)
3661 {
3662 	if (nh->rif)
3663 		return;
3664 
3665 	nh->rif = rif;
3666 	list_add(&nh->rif_list_node, &rif->nexthop_list);
3667 }
3668 
3669 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3670 {
3671 	if (!nh->rif)
3672 		return;
3673 
3674 	list_del(&nh->rif_list_node);
3675 	nh->rif = NULL;
3676 }
3677 
3678 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3679 				       struct mlxsw_sp_nexthop *nh)
3680 {
3681 	struct mlxsw_sp_neigh_entry *neigh_entry;
3682 	struct neighbour *n;
3683 	u8 nud_state, dead;
3684 	int err;
3685 
3686 	if (!nh->nh_grp->gateway || nh->neigh_entry)
3687 		return 0;
3688 
3689 	/* Take a reference of neigh here ensuring that neigh would
3690 	 * not be destructed before the nexthop entry is finished.
3691 	 * The reference is taken either in neigh_lookup() or
3692 	 * in neigh_create() in case n is not found.
3693 	 */
3694 	n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3695 	if (!n) {
3696 		n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3697 				 nh->rif->dev);
3698 		if (IS_ERR(n))
3699 			return PTR_ERR(n);
3700 		neigh_event_send(n, NULL);
3701 	}
3702 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3703 	if (!neigh_entry) {
3704 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3705 		if (IS_ERR(neigh_entry)) {
3706 			err = -EINVAL;
3707 			goto err_neigh_entry_create;
3708 		}
3709 	}
3710 
3711 	/* If that is the first nexthop connected to that neigh, add to
3712 	 * nexthop_neighs_list
3713 	 */
3714 	if (list_empty(&neigh_entry->nexthop_list))
3715 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3716 			      &mlxsw_sp->router->nexthop_neighs_list);
3717 
3718 	nh->neigh_entry = neigh_entry;
3719 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3720 	read_lock_bh(&n->lock);
3721 	nud_state = n->nud_state;
3722 	dead = n->dead;
3723 	read_unlock_bh(&n->lock);
3724 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3725 
3726 	return 0;
3727 
3728 err_neigh_entry_create:
3729 	neigh_release(n);
3730 	return err;
3731 }
3732 
3733 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3734 					struct mlxsw_sp_nexthop *nh)
3735 {
3736 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3737 	struct neighbour *n;
3738 
3739 	if (!neigh_entry)
3740 		return;
3741 	n = neigh_entry->key.n;
3742 
3743 	__mlxsw_sp_nexthop_neigh_update(nh, true);
3744 	list_del(&nh->neigh_list_node);
3745 	nh->neigh_entry = NULL;
3746 
3747 	/* If that is the last nexthop connected to that neigh, remove from
3748 	 * nexthop_neighs_list
3749 	 */
3750 	if (list_empty(&neigh_entry->nexthop_list))
3751 		list_del(&neigh_entry->nexthop_neighs_list_node);
3752 
3753 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3754 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3755 
3756 	neigh_release(n);
3757 }
3758 
3759 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3760 {
3761 	struct net_device *ul_dev;
3762 	bool is_up;
3763 
3764 	rcu_read_lock();
3765 	ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3766 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
3767 	rcu_read_unlock();
3768 
3769 	return is_up;
3770 }
3771 
3772 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3773 				       struct mlxsw_sp_nexthop *nh,
3774 				       struct mlxsw_sp_ipip_entry *ipip_entry)
3775 {
3776 	bool removing;
3777 
3778 	if (!nh->nh_grp->gateway || nh->ipip_entry)
3779 		return;
3780 
3781 	nh->ipip_entry = ipip_entry;
3782 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3783 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
3784 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3785 }
3786 
3787 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3788 				       struct mlxsw_sp_nexthop *nh)
3789 {
3790 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3791 
3792 	if (!ipip_entry)
3793 		return;
3794 
3795 	__mlxsw_sp_nexthop_neigh_update(nh, true);
3796 	nh->ipip_entry = NULL;
3797 }
3798 
3799 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3800 					const struct fib_nh *fib_nh,
3801 					enum mlxsw_sp_ipip_type *p_ipipt)
3802 {
3803 	struct net_device *dev = fib_nh->fib_nh_dev;
3804 
3805 	return dev &&
3806 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3807 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3808 }
3809 
3810 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3811 				       struct mlxsw_sp_nexthop *nh)
3812 {
3813 	switch (nh->type) {
3814 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
3815 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3816 		mlxsw_sp_nexthop_rif_fini(nh);
3817 		break;
3818 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3819 		mlxsw_sp_nexthop_rif_fini(nh);
3820 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3821 		break;
3822 	}
3823 }
3824 
3825 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3826 				       struct mlxsw_sp_nexthop *nh,
3827 				       struct fib_nh *fib_nh)
3828 {
3829 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3830 	struct net_device *dev = fib_nh->fib_nh_dev;
3831 	struct mlxsw_sp_ipip_entry *ipip_entry;
3832 	struct mlxsw_sp_rif *rif;
3833 	int err;
3834 
3835 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3836 	if (ipip_entry) {
3837 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3838 		if (ipip_ops->can_offload(mlxsw_sp, dev,
3839 					  MLXSW_SP_L3_PROTO_IPV4)) {
3840 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3841 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3842 			return 0;
3843 		}
3844 	}
3845 
3846 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3847 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3848 	if (!rif)
3849 		return 0;
3850 
3851 	mlxsw_sp_nexthop_rif_init(nh, rif);
3852 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3853 	if (err)
3854 		goto err_neigh_init;
3855 
3856 	return 0;
3857 
3858 err_neigh_init:
3859 	mlxsw_sp_nexthop_rif_fini(nh);
3860 	return err;
3861 }
3862 
3863 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3864 					struct mlxsw_sp_nexthop *nh)
3865 {
3866 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3867 }
3868 
3869 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3870 				  struct mlxsw_sp_nexthop_group *nh_grp,
3871 				  struct mlxsw_sp_nexthop *nh,
3872 				  struct fib_nh *fib_nh)
3873 {
3874 	struct net_device *dev = fib_nh->fib_nh_dev;
3875 	struct in_device *in_dev;
3876 	int err;
3877 
3878 	nh->nh_grp = nh_grp;
3879 	nh->key.fib_nh = fib_nh;
3880 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3881 	nh->nh_weight = fib_nh->fib_nh_weight;
3882 #else
3883 	nh->nh_weight = 1;
3884 #endif
3885 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
3886 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3887 	if (err)
3888 		return err;
3889 
3890 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3891 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3892 
3893 	if (!dev)
3894 		return 0;
3895 
3896 	rcu_read_lock();
3897 	in_dev = __in_dev_get_rcu(dev);
3898 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3899 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
3900 		rcu_read_unlock();
3901 		return 0;
3902 	}
3903 	rcu_read_unlock();
3904 
3905 	err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3906 	if (err)
3907 		goto err_nexthop_neigh_init;
3908 
3909 	return 0;
3910 
3911 err_nexthop_neigh_init:
3912 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3913 	return err;
3914 }
3915 
3916 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3917 				   struct mlxsw_sp_nexthop *nh)
3918 {
3919 	mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3920 	list_del(&nh->router_list_node);
3921 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
3922 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3923 }
3924 
3925 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3926 				    unsigned long event, struct fib_nh *fib_nh)
3927 {
3928 	struct mlxsw_sp_nexthop_key key;
3929 	struct mlxsw_sp_nexthop *nh;
3930 
3931 	if (mlxsw_sp->router->aborted)
3932 		return;
3933 
3934 	key.fib_nh = fib_nh;
3935 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3936 	if (!nh)
3937 		return;
3938 
3939 	switch (event) {
3940 	case FIB_EVENT_NH_ADD:
3941 		mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3942 		break;
3943 	case FIB_EVENT_NH_DEL:
3944 		mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3945 		break;
3946 	}
3947 
3948 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3949 }
3950 
3951 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3952 					struct mlxsw_sp_rif *rif)
3953 {
3954 	struct mlxsw_sp_nexthop *nh;
3955 	bool removing;
3956 
3957 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3958 		switch (nh->type) {
3959 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
3960 			removing = false;
3961 			break;
3962 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3963 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3964 			break;
3965 		default:
3966 			WARN_ON(1);
3967 			continue;
3968 		}
3969 
3970 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
3971 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3972 	}
3973 }
3974 
3975 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3976 					 struct mlxsw_sp_rif *old_rif,
3977 					 struct mlxsw_sp_rif *new_rif)
3978 {
3979 	struct mlxsw_sp_nexthop *nh;
3980 
3981 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3982 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3983 		nh->rif = new_rif;
3984 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3985 }
3986 
3987 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3988 					   struct mlxsw_sp_rif *rif)
3989 {
3990 	struct mlxsw_sp_nexthop *nh, *tmp;
3991 
3992 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
3993 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3994 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3995 	}
3996 }
3997 
3998 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3999 				   struct fib_info *fi)
4000 {
4001 	const struct fib_nh *nh = fib_info_nh(fi, 0);
4002 
4003 	return nh->fib_nh_scope == RT_SCOPE_LINK ||
4004 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
4005 }
4006 
4007 static struct mlxsw_sp_nexthop_group *
4008 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
4009 {
4010 	unsigned int nhs = fib_info_num_path(fi);
4011 	struct mlxsw_sp_nexthop_group *nh_grp;
4012 	struct mlxsw_sp_nexthop *nh;
4013 	struct fib_nh *fib_nh;
4014 	int i;
4015 	int err;
4016 
4017 	nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
4018 	if (!nh_grp)
4019 		return ERR_PTR(-ENOMEM);
4020 	nh_grp->priv = fi;
4021 	INIT_LIST_HEAD(&nh_grp->fib_list);
4022 	nh_grp->neigh_tbl = &arp_tbl;
4023 
4024 	nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
4025 	nh_grp->count = nhs;
4026 	fib_info_hold(fi);
4027 	for (i = 0; i < nh_grp->count; i++) {
4028 		nh = &nh_grp->nexthops[i];
4029 		fib_nh = fib_info_nh(fi, i);
4030 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
4031 		if (err)
4032 			goto err_nexthop4_init;
4033 	}
4034 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4035 	if (err)
4036 		goto err_nexthop_group_insert;
4037 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4038 	return nh_grp;
4039 
4040 err_nexthop_group_insert:
4041 err_nexthop4_init:
4042 	for (i--; i >= 0; i--) {
4043 		nh = &nh_grp->nexthops[i];
4044 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4045 	}
4046 	fib_info_put(fi);
4047 	kfree(nh_grp);
4048 	return ERR_PTR(err);
4049 }
4050 
4051 static void
4052 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
4053 				struct mlxsw_sp_nexthop_group *nh_grp)
4054 {
4055 	struct mlxsw_sp_nexthop *nh;
4056 	int i;
4057 
4058 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
4059 	for (i = 0; i < nh_grp->count; i++) {
4060 		nh = &nh_grp->nexthops[i];
4061 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4062 	}
4063 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4064 	WARN_ON_ONCE(nh_grp->adj_index_valid);
4065 	fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
4066 	kfree(nh_grp);
4067 }
4068 
4069 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
4070 				       struct mlxsw_sp_fib_entry *fib_entry,
4071 				       struct fib_info *fi)
4072 {
4073 	struct mlxsw_sp_nexthop_group *nh_grp;
4074 
4075 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
4076 	if (!nh_grp) {
4077 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
4078 		if (IS_ERR(nh_grp))
4079 			return PTR_ERR(nh_grp);
4080 	}
4081 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
4082 	fib_entry->nh_group = nh_grp;
4083 	return 0;
4084 }
4085 
4086 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
4087 					struct mlxsw_sp_fib_entry *fib_entry)
4088 {
4089 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4090 
4091 	list_del(&fib_entry->nexthop_group_node);
4092 	if (!list_empty(&nh_grp->fib_list))
4093 		return;
4094 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
4095 }
4096 
4097 static bool
4098 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4099 {
4100 	struct mlxsw_sp_fib4_entry *fib4_entry;
4101 
4102 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4103 				  common);
4104 	return !fib4_entry->tos;
4105 }
4106 
4107 static bool
4108 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4109 {
4110 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4111 
4112 	switch (fib_entry->fib_node->fib->proto) {
4113 	case MLXSW_SP_L3_PROTO_IPV4:
4114 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
4115 			return false;
4116 		break;
4117 	case MLXSW_SP_L3_PROTO_IPV6:
4118 		break;
4119 	}
4120 
4121 	switch (fib_entry->type) {
4122 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4123 		return !!nh_group->adj_index_valid;
4124 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4125 		return !!nh_group->nh_rif;
4126 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4127 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4128 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4129 		return true;
4130 	default:
4131 		return false;
4132 	}
4133 }
4134 
4135 static struct mlxsw_sp_nexthop *
4136 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4137 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4138 {
4139 	int i;
4140 
4141 	for (i = 0; i < nh_grp->count; i++) {
4142 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4143 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
4144 
4145 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4146 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4147 				    &rt->fib6_nh->fib_nh_gw6))
4148 			return nh;
4149 		continue;
4150 	}
4151 
4152 	return NULL;
4153 }
4154 
4155 static void
4156 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4157 				 struct mlxsw_sp_fib_entry *fib_entry)
4158 {
4159 	struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4160 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4161 	int dst_len = fib_entry->fib_node->key.prefix_len;
4162 	struct mlxsw_sp_fib4_entry *fib4_entry;
4163 	struct fib_rt_info fri;
4164 	bool should_offload;
4165 
4166 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4167 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4168 				  common);
4169 	fri.fi = fi;
4170 	fri.tb_id = fib4_entry->tb_id;
4171 	fri.dst = cpu_to_be32(*p_dst);
4172 	fri.dst_len = dst_len;
4173 	fri.tos = fib4_entry->tos;
4174 	fri.type = fib4_entry->type;
4175 	fri.offload = should_offload;
4176 	fri.trap = !should_offload;
4177 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4178 }
4179 
4180 static void
4181 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4182 				   struct mlxsw_sp_fib_entry *fib_entry)
4183 {
4184 	struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4185 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4186 	int dst_len = fib_entry->fib_node->key.prefix_len;
4187 	struct mlxsw_sp_fib4_entry *fib4_entry;
4188 	struct fib_rt_info fri;
4189 
4190 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4191 				  common);
4192 	fri.fi = fi;
4193 	fri.tb_id = fib4_entry->tb_id;
4194 	fri.dst = cpu_to_be32(*p_dst);
4195 	fri.dst_len = dst_len;
4196 	fri.tos = fib4_entry->tos;
4197 	fri.type = fib4_entry->type;
4198 	fri.offload = false;
4199 	fri.trap = false;
4200 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4201 }
4202 
4203 static void
4204 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4205 				 struct mlxsw_sp_fib_entry *fib_entry)
4206 {
4207 	struct mlxsw_sp_fib6_entry *fib6_entry;
4208 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4209 	bool should_offload;
4210 
4211 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4212 
4213 	/* In IPv6 a multipath route is represented using multiple routes, so
4214 	 * we need to set the flags on all of them.
4215 	 */
4216 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4217 				  common);
4218 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4219 		fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
4220 				       !should_offload);
4221 }
4222 
4223 static void
4224 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4225 				   struct mlxsw_sp_fib_entry *fib_entry)
4226 {
4227 	struct mlxsw_sp_fib6_entry *fib6_entry;
4228 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4229 
4230 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4231 				  common);
4232 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4233 		fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
4234 }
4235 
4236 static void
4237 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4238 				struct mlxsw_sp_fib_entry *fib_entry)
4239 {
4240 	switch (fib_entry->fib_node->fib->proto) {
4241 	case MLXSW_SP_L3_PROTO_IPV4:
4242 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
4243 		break;
4244 	case MLXSW_SP_L3_PROTO_IPV6:
4245 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
4246 		break;
4247 	}
4248 }
4249 
4250 static void
4251 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4252 				  struct mlxsw_sp_fib_entry *fib_entry)
4253 {
4254 	switch (fib_entry->fib_node->fib->proto) {
4255 	case MLXSW_SP_L3_PROTO_IPV4:
4256 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4257 		break;
4258 	case MLXSW_SP_L3_PROTO_IPV6:
4259 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4260 		break;
4261 	}
4262 }
4263 
4264 static void
4265 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
4266 				    struct mlxsw_sp_fib_entry *fib_entry,
4267 				    enum mlxsw_reg_ralue_op op)
4268 {
4269 	switch (op) {
4270 	case MLXSW_REG_RALUE_OP_WRITE_WRITE:
4271 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
4272 		break;
4273 	case MLXSW_REG_RALUE_OP_WRITE_DELETE:
4274 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4275 		break;
4276 	default:
4277 		break;
4278 	}
4279 }
4280 
4281 static void
4282 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
4283 			      const struct mlxsw_sp_fib_entry *fib_entry,
4284 			      enum mlxsw_reg_ralue_op op)
4285 {
4286 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
4287 	enum mlxsw_reg_ralxx_protocol proto;
4288 	u32 *p_dip;
4289 
4290 	proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
4291 
4292 	switch (fib->proto) {
4293 	case MLXSW_SP_L3_PROTO_IPV4:
4294 		p_dip = (u32 *) fib_entry->fib_node->key.addr;
4295 		mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
4296 				      fib_entry->fib_node->key.prefix_len,
4297 				      *p_dip);
4298 		break;
4299 	case MLXSW_SP_L3_PROTO_IPV6:
4300 		mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
4301 				      fib_entry->fib_node->key.prefix_len,
4302 				      fib_entry->fib_node->key.addr);
4303 		break;
4304 	}
4305 }
4306 
4307 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
4308 {
4309 	enum mlxsw_reg_ratr_trap_action trap_action;
4310 	char ratr_pl[MLXSW_REG_RATR_LEN];
4311 	int err;
4312 
4313 	if (mlxsw_sp->router->adj_discard_index_valid)
4314 		return 0;
4315 
4316 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4317 				  &mlxsw_sp->router->adj_discard_index);
4318 	if (err)
4319 		return err;
4320 
4321 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
4322 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4323 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4324 			    mlxsw_sp->router->adj_discard_index, rif_index);
4325 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4326 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4327 	if (err)
4328 		goto err_ratr_write;
4329 
4330 	mlxsw_sp->router->adj_discard_index_valid = true;
4331 
4332 	return 0;
4333 
4334 err_ratr_write:
4335 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4336 			   mlxsw_sp->router->adj_discard_index);
4337 	return err;
4338 }
4339 
4340 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4341 					struct mlxsw_sp_fib_entry *fib_entry,
4342 					enum mlxsw_reg_ralue_op op)
4343 {
4344 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4345 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4346 	enum mlxsw_reg_ralue_trap_action trap_action;
4347 	u16 trap_id = 0;
4348 	u32 adjacency_index = 0;
4349 	u16 ecmp_size = 0;
4350 	int err;
4351 
4352 	/* In case the nexthop group adjacency index is valid, use it
4353 	 * with provided ECMP size. Otherwise, setup trap and pass
4354 	 * traffic to kernel.
4355 	 */
4356 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4357 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4358 		adjacency_index = fib_entry->nh_group->adj_index;
4359 		ecmp_size = fib_entry->nh_group->ecmp_size;
4360 	} else if (!nh_group->adj_index_valid && nh_group->count &&
4361 		   nh_group->nh_rif) {
4362 		err = mlxsw_sp_adj_discard_write(mlxsw_sp,
4363 						 nh_group->nh_rif->rif_index);
4364 		if (err)
4365 			return err;
4366 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4367 		adjacency_index = mlxsw_sp->router->adj_discard_index;
4368 		ecmp_size = 1;
4369 	} else {
4370 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4371 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4372 	}
4373 
4374 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4375 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4376 					adjacency_index, ecmp_size);
4377 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4378 }
4379 
4380 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4381 				       struct mlxsw_sp_fib_entry *fib_entry,
4382 				       enum mlxsw_reg_ralue_op op)
4383 {
4384 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
4385 	enum mlxsw_reg_ralue_trap_action trap_action;
4386 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4387 	u16 trap_id = 0;
4388 	u16 rif_index = 0;
4389 
4390 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4391 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4392 		rif_index = rif->rif_index;
4393 	} else {
4394 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4395 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4396 	}
4397 
4398 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4399 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4400 				       rif_index);
4401 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4402 }
4403 
4404 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4405 				      struct mlxsw_sp_fib_entry *fib_entry,
4406 				      enum mlxsw_reg_ralue_op op)
4407 {
4408 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4409 
4410 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4411 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4412 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4413 }
4414 
4415 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
4416 					   struct mlxsw_sp_fib_entry *fib_entry,
4417 					   enum mlxsw_reg_ralue_op op)
4418 {
4419 	enum mlxsw_reg_ralue_trap_action trap_action;
4420 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4421 
4422 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
4423 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4424 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
4425 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4426 }
4427 
4428 static int
4429 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
4430 				  struct mlxsw_sp_fib_entry *fib_entry,
4431 				  enum mlxsw_reg_ralue_op op)
4432 {
4433 	enum mlxsw_reg_ralue_trap_action trap_action;
4434 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4435 	u16 trap_id;
4436 
4437 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4438 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
4439 
4440 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4441 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
4442 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4443 }
4444 
4445 static int
4446 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4447 				 struct mlxsw_sp_fib_entry *fib_entry,
4448 				 enum mlxsw_reg_ralue_op op)
4449 {
4450 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4451 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4452 
4453 	if (WARN_ON(!ipip_entry))
4454 		return -EINVAL;
4455 
4456 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4457 	return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4458 				      fib_entry->decap.tunnel_index);
4459 }
4460 
4461 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4462 					   struct mlxsw_sp_fib_entry *fib_entry,
4463 					   enum mlxsw_reg_ralue_op op)
4464 {
4465 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4466 
4467 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4468 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
4469 					   fib_entry->decap.tunnel_index);
4470 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4471 }
4472 
4473 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4474 				   struct mlxsw_sp_fib_entry *fib_entry,
4475 				   enum mlxsw_reg_ralue_op op)
4476 {
4477 	switch (fib_entry->type) {
4478 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4479 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
4480 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4481 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
4482 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4483 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
4484 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4485 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
4486 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
4487 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
4488 							 op);
4489 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4490 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4491 							fib_entry, op);
4492 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4493 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
4494 	}
4495 	return -EINVAL;
4496 }
4497 
4498 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4499 				 struct mlxsw_sp_fib_entry *fib_entry,
4500 				 enum mlxsw_reg_ralue_op op)
4501 {
4502 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
4503 
4504 	if (err)
4505 		return err;
4506 
4507 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
4508 
4509 	return err;
4510 }
4511 
4512 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4513 				     struct mlxsw_sp_fib_entry *fib_entry)
4514 {
4515 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4516 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
4517 }
4518 
4519 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4520 				  struct mlxsw_sp_fib_entry *fib_entry)
4521 {
4522 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4523 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
4524 }
4525 
4526 static int
4527 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4528 			     const struct fib_entry_notifier_info *fen_info,
4529 			     struct mlxsw_sp_fib_entry *fib_entry)
4530 {
4531 	struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
4532 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4533 	struct mlxsw_sp_router *router = mlxsw_sp->router;
4534 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4535 	struct mlxsw_sp_ipip_entry *ipip_entry;
4536 	struct fib_info *fi = fen_info->fi;
4537 
4538 	switch (fen_info->type) {
4539 	case RTN_LOCAL:
4540 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4541 						 MLXSW_SP_L3_PROTO_IPV4, dip);
4542 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4543 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4544 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4545 							     fib_entry,
4546 							     ipip_entry);
4547 		}
4548 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
4549 						 MLXSW_SP_L3_PROTO_IPV4,
4550 						 &dip)) {
4551 			u32 tunnel_index;
4552 
4553 			tunnel_index = router->nve_decap_config.tunnel_index;
4554 			fib_entry->decap.tunnel_index = tunnel_index;
4555 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
4556 			return 0;
4557 		}
4558 		/* fall through */
4559 	case RTN_BROADCAST:
4560 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4561 		return 0;
4562 	case RTN_BLACKHOLE:
4563 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
4564 		return 0;
4565 	case RTN_UNREACHABLE: /* fall through */
4566 	case RTN_PROHIBIT:
4567 		/* Packets hitting these routes need to be trapped, but
4568 		 * can do so with a lower priority than packets directed
4569 		 * at the host, so use action type local instead of trap.
4570 		 */
4571 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
4572 		return 0;
4573 	case RTN_UNICAST:
4574 		if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4575 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4576 		else
4577 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4578 		return 0;
4579 	default:
4580 		return -EINVAL;
4581 	}
4582 }
4583 
4584 static void
4585 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
4586 			       struct mlxsw_sp_fib_entry *fib_entry)
4587 {
4588 	switch (fib_entry->type) {
4589 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4590 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
4591 		break;
4592 	default:
4593 		break;
4594 	}
4595 }
4596 
4597 static struct mlxsw_sp_fib4_entry *
4598 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4599 			   struct mlxsw_sp_fib_node *fib_node,
4600 			   const struct fib_entry_notifier_info *fen_info)
4601 {
4602 	struct mlxsw_sp_fib4_entry *fib4_entry;
4603 	struct mlxsw_sp_fib_entry *fib_entry;
4604 	int err;
4605 
4606 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4607 	if (!fib4_entry)
4608 		return ERR_PTR(-ENOMEM);
4609 	fib_entry = &fib4_entry->common;
4610 
4611 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4612 	if (err)
4613 		goto err_fib4_entry_type_set;
4614 
4615 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4616 	if (err)
4617 		goto err_nexthop4_group_get;
4618 
4619 	fib4_entry->prio = fen_info->fi->fib_priority;
4620 	fib4_entry->tb_id = fen_info->tb_id;
4621 	fib4_entry->type = fen_info->type;
4622 	fib4_entry->tos = fen_info->tos;
4623 
4624 	fib_entry->fib_node = fib_node;
4625 
4626 	return fib4_entry;
4627 
4628 err_nexthop4_group_get:
4629 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
4630 err_fib4_entry_type_set:
4631 	kfree(fib4_entry);
4632 	return ERR_PTR(err);
4633 }
4634 
4635 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4636 					struct mlxsw_sp_fib4_entry *fib4_entry)
4637 {
4638 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4639 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
4640 	kfree(fib4_entry);
4641 }
4642 
4643 static struct mlxsw_sp_fib4_entry *
4644 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4645 			   const struct fib_entry_notifier_info *fen_info)
4646 {
4647 	struct mlxsw_sp_fib4_entry *fib4_entry;
4648 	struct mlxsw_sp_fib_node *fib_node;
4649 	struct mlxsw_sp_fib *fib;
4650 	struct mlxsw_sp_vr *vr;
4651 
4652 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4653 	if (!vr)
4654 		return NULL;
4655 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4656 
4657 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4658 					    sizeof(fen_info->dst),
4659 					    fen_info->dst_len);
4660 	if (!fib_node)
4661 		return NULL;
4662 
4663 	fib4_entry = container_of(fib_node->fib_entry,
4664 				  struct mlxsw_sp_fib4_entry, common);
4665 	if (fib4_entry->tb_id == fen_info->tb_id &&
4666 	    fib4_entry->tos == fen_info->tos &&
4667 	    fib4_entry->type == fen_info->type &&
4668 	    mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4669 	    fen_info->fi)
4670 		return fib4_entry;
4671 
4672 	return NULL;
4673 }
4674 
4675 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4676 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4677 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4678 	.key_len = sizeof(struct mlxsw_sp_fib_key),
4679 	.automatic_shrinking = true,
4680 };
4681 
4682 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4683 				    struct mlxsw_sp_fib_node *fib_node)
4684 {
4685 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4686 				      mlxsw_sp_fib_ht_params);
4687 }
4688 
4689 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4690 				     struct mlxsw_sp_fib_node *fib_node)
4691 {
4692 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4693 			       mlxsw_sp_fib_ht_params);
4694 }
4695 
4696 static struct mlxsw_sp_fib_node *
4697 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4698 			 size_t addr_len, unsigned char prefix_len)
4699 {
4700 	struct mlxsw_sp_fib_key key;
4701 
4702 	memset(&key, 0, sizeof(key));
4703 	memcpy(key.addr, addr, addr_len);
4704 	key.prefix_len = prefix_len;
4705 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4706 }
4707 
4708 static struct mlxsw_sp_fib_node *
4709 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4710 			 size_t addr_len, unsigned char prefix_len)
4711 {
4712 	struct mlxsw_sp_fib_node *fib_node;
4713 
4714 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4715 	if (!fib_node)
4716 		return NULL;
4717 
4718 	list_add(&fib_node->list, &fib->node_list);
4719 	memcpy(fib_node->key.addr, addr, addr_len);
4720 	fib_node->key.prefix_len = prefix_len;
4721 
4722 	return fib_node;
4723 }
4724 
4725 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4726 {
4727 	list_del(&fib_node->list);
4728 	kfree(fib_node);
4729 }
4730 
4731 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4732 				      struct mlxsw_sp_fib_node *fib_node)
4733 {
4734 	struct mlxsw_sp_prefix_usage req_prefix_usage;
4735 	struct mlxsw_sp_fib *fib = fib_node->fib;
4736 	struct mlxsw_sp_lpm_tree *lpm_tree;
4737 	int err;
4738 
4739 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4740 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4741 		goto out;
4742 
4743 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4744 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4745 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4746 					 fib->proto);
4747 	if (IS_ERR(lpm_tree))
4748 		return PTR_ERR(lpm_tree);
4749 
4750 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4751 	if (err)
4752 		goto err_lpm_tree_replace;
4753 
4754 out:
4755 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
4756 	return 0;
4757 
4758 err_lpm_tree_replace:
4759 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4760 	return err;
4761 }
4762 
4763 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4764 					 struct mlxsw_sp_fib_node *fib_node)
4765 {
4766 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4767 	struct mlxsw_sp_prefix_usage req_prefix_usage;
4768 	struct mlxsw_sp_fib *fib = fib_node->fib;
4769 	int err;
4770 
4771 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4772 		return;
4773 	/* Try to construct a new LPM tree from the current prefix usage
4774 	 * minus the unused one. If we fail, continue using the old one.
4775 	 */
4776 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4777 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4778 				    fib_node->key.prefix_len);
4779 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4780 					 fib->proto);
4781 	if (IS_ERR(lpm_tree))
4782 		return;
4783 
4784 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4785 	if (err)
4786 		goto err_lpm_tree_replace;
4787 
4788 	return;
4789 
4790 err_lpm_tree_replace:
4791 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4792 }
4793 
4794 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4795 				  struct mlxsw_sp_fib_node *fib_node,
4796 				  struct mlxsw_sp_fib *fib)
4797 {
4798 	int err;
4799 
4800 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
4801 	if (err)
4802 		return err;
4803 	fib_node->fib = fib;
4804 
4805 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
4806 	if (err)
4807 		goto err_fib_lpm_tree_link;
4808 
4809 	return 0;
4810 
4811 err_fib_lpm_tree_link:
4812 	fib_node->fib = NULL;
4813 	mlxsw_sp_fib_node_remove(fib, fib_node);
4814 	return err;
4815 }
4816 
4817 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4818 				   struct mlxsw_sp_fib_node *fib_node)
4819 {
4820 	struct mlxsw_sp_fib *fib = fib_node->fib;
4821 
4822 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
4823 	fib_node->fib = NULL;
4824 	mlxsw_sp_fib_node_remove(fib, fib_node);
4825 }
4826 
4827 static struct mlxsw_sp_fib_node *
4828 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4829 		      size_t addr_len, unsigned char prefix_len,
4830 		      enum mlxsw_sp_l3proto proto)
4831 {
4832 	struct mlxsw_sp_fib_node *fib_node;
4833 	struct mlxsw_sp_fib *fib;
4834 	struct mlxsw_sp_vr *vr;
4835 	int err;
4836 
4837 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
4838 	if (IS_ERR(vr))
4839 		return ERR_CAST(vr);
4840 	fib = mlxsw_sp_vr_fib(vr, proto);
4841 
4842 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
4843 	if (fib_node)
4844 		return fib_node;
4845 
4846 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
4847 	if (!fib_node) {
4848 		err = -ENOMEM;
4849 		goto err_fib_node_create;
4850 	}
4851 
4852 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4853 	if (err)
4854 		goto err_fib_node_init;
4855 
4856 	return fib_node;
4857 
4858 err_fib_node_init:
4859 	mlxsw_sp_fib_node_destroy(fib_node);
4860 err_fib_node_create:
4861 	mlxsw_sp_vr_put(mlxsw_sp, vr);
4862 	return ERR_PTR(err);
4863 }
4864 
4865 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4866 				  struct mlxsw_sp_fib_node *fib_node)
4867 {
4868 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
4869 
4870 	if (fib_node->fib_entry)
4871 		return;
4872 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
4873 	mlxsw_sp_fib_node_destroy(fib_node);
4874 	mlxsw_sp_vr_put(mlxsw_sp, vr);
4875 }
4876 
4877 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4878 					struct mlxsw_sp_fib_entry *fib_entry)
4879 {
4880 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4881 	int err;
4882 
4883 	fib_node->fib_entry = fib_entry;
4884 
4885 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4886 	if (err)
4887 		goto err_fib_entry_update;
4888 
4889 	return 0;
4890 
4891 err_fib_entry_update:
4892 	fib_node->fib_entry = NULL;
4893 	return err;
4894 }
4895 
4896 static void
4897 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4898 			       struct mlxsw_sp_fib_entry *fib_entry)
4899 {
4900 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4901 
4902 	mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4903 	fib_node->fib_entry = NULL;
4904 }
4905 
4906 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
4907 {
4908 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4909 	struct mlxsw_sp_fib4_entry *fib4_replaced;
4910 
4911 	if (!fib_node->fib_entry)
4912 		return true;
4913 
4914 	fib4_replaced = container_of(fib_node->fib_entry,
4915 				     struct mlxsw_sp_fib4_entry, common);
4916 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
4917 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
4918 		return false;
4919 
4920 	return true;
4921 }
4922 
4923 static int
4924 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
4925 			     const struct fib_entry_notifier_info *fen_info)
4926 {
4927 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
4928 	struct mlxsw_sp_fib_entry *replaced;
4929 	struct mlxsw_sp_fib_node *fib_node;
4930 	int err;
4931 
4932 	if (mlxsw_sp->router->aborted)
4933 		return 0;
4934 
4935 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4936 					 &fen_info->dst, sizeof(fen_info->dst),
4937 					 fen_info->dst_len,
4938 					 MLXSW_SP_L3_PROTO_IPV4);
4939 	if (IS_ERR(fib_node)) {
4940 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4941 		return PTR_ERR(fib_node);
4942 	}
4943 
4944 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4945 	if (IS_ERR(fib4_entry)) {
4946 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
4947 		err = PTR_ERR(fib4_entry);
4948 		goto err_fib4_entry_create;
4949 	}
4950 
4951 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
4952 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4953 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4954 		return 0;
4955 	}
4956 
4957 	replaced = fib_node->fib_entry;
4958 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
4959 	if (err) {
4960 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4961 		goto err_fib_node_entry_link;
4962 	}
4963 
4964 	/* Nothing to replace */
4965 	if (!replaced)
4966 		return 0;
4967 
4968 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
4969 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
4970 				     common);
4971 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
4972 
4973 	return 0;
4974 
4975 err_fib_node_entry_link:
4976 	fib_node->fib_entry = replaced;
4977 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4978 err_fib4_entry_create:
4979 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4980 	return err;
4981 }
4982 
4983 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4984 				     struct fib_entry_notifier_info *fen_info)
4985 {
4986 	struct mlxsw_sp_fib4_entry *fib4_entry;
4987 	struct mlxsw_sp_fib_node *fib_node;
4988 
4989 	if (mlxsw_sp->router->aborted)
4990 		return;
4991 
4992 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4993 	if (!fib4_entry)
4994 		return;
4995 	fib_node = fib4_entry->common.fib_node;
4996 
4997 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
4998 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4999 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5000 }
5001 
5002 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
5003 {
5004 	/* Packets with link-local destination IP arriving to the router
5005 	 * are trapped to the CPU, so no need to program specific routes
5006 	 * for them. Only allow prefix routes (usually one fe80::/64) so
5007 	 * that packets are trapped for the right reason.
5008 	 */
5009 	if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
5010 	    (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
5011 		return true;
5012 
5013 	/* Multicast routes aren't supported, so ignore them. Neighbour
5014 	 * Discovery packets are specifically trapped.
5015 	 */
5016 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
5017 		return true;
5018 
5019 	/* Cloned routes are irrelevant in the forwarding path. */
5020 	if (rt->fib6_flags & RTF_CACHE)
5021 		return true;
5022 
5023 	return false;
5024 }
5025 
5026 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
5027 {
5028 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5029 
5030 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
5031 	if (!mlxsw_sp_rt6)
5032 		return ERR_PTR(-ENOMEM);
5033 
5034 	/* In case of route replace, replaced route is deleted with
5035 	 * no notification. Take reference to prevent accessing freed
5036 	 * memory.
5037 	 */
5038 	mlxsw_sp_rt6->rt = rt;
5039 	fib6_info_hold(rt);
5040 
5041 	return mlxsw_sp_rt6;
5042 }
5043 
5044 #if IS_ENABLED(CONFIG_IPV6)
5045 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5046 {
5047 	fib6_info_release(rt);
5048 }
5049 #else
5050 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5051 {
5052 }
5053 #endif
5054 
5055 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5056 {
5057 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
5058 
5059 	fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
5060 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
5061 	kfree(mlxsw_sp_rt6);
5062 }
5063 
5064 static struct fib6_info *
5065 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
5066 {
5067 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
5068 				list)->rt;
5069 }
5070 
5071 static struct mlxsw_sp_rt6 *
5072 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
5073 			    const struct fib6_info *rt)
5074 {
5075 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5076 
5077 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
5078 		if (mlxsw_sp_rt6->rt == rt)
5079 			return mlxsw_sp_rt6;
5080 	}
5081 
5082 	return NULL;
5083 }
5084 
5085 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
5086 					const struct fib6_info *rt,
5087 					enum mlxsw_sp_ipip_type *ret)
5088 {
5089 	return rt->fib6_nh->fib_nh_dev &&
5090 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
5091 }
5092 
5093 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
5094 				       struct mlxsw_sp_nexthop_group *nh_grp,
5095 				       struct mlxsw_sp_nexthop *nh,
5096 				       const struct fib6_info *rt)
5097 {
5098 	const struct mlxsw_sp_ipip_ops *ipip_ops;
5099 	struct mlxsw_sp_ipip_entry *ipip_entry;
5100 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5101 	struct mlxsw_sp_rif *rif;
5102 	int err;
5103 
5104 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
5105 	if (ipip_entry) {
5106 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5107 		if (ipip_ops->can_offload(mlxsw_sp, dev,
5108 					  MLXSW_SP_L3_PROTO_IPV6)) {
5109 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
5110 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
5111 			return 0;
5112 		}
5113 	}
5114 
5115 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
5116 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
5117 	if (!rif)
5118 		return 0;
5119 	mlxsw_sp_nexthop_rif_init(nh, rif);
5120 
5121 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
5122 	if (err)
5123 		goto err_nexthop_neigh_init;
5124 
5125 	return 0;
5126 
5127 err_nexthop_neigh_init:
5128 	mlxsw_sp_nexthop_rif_fini(nh);
5129 	return err;
5130 }
5131 
5132 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
5133 					struct mlxsw_sp_nexthop *nh)
5134 {
5135 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5136 }
5137 
5138 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
5139 				  struct mlxsw_sp_nexthop_group *nh_grp,
5140 				  struct mlxsw_sp_nexthop *nh,
5141 				  const struct fib6_info *rt)
5142 {
5143 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5144 
5145 	nh->nh_grp = nh_grp;
5146 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
5147 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
5148 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5149 
5150 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5151 
5152 	if (!dev)
5153 		return 0;
5154 	nh->ifindex = dev->ifindex;
5155 
5156 	return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
5157 }
5158 
5159 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
5160 				   struct mlxsw_sp_nexthop *nh)
5161 {
5162 	mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5163 	list_del(&nh->router_list_node);
5164 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5165 }
5166 
5167 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5168 				    const struct fib6_info *rt)
5169 {
5170 	return rt->fib6_nh->fib_nh_gw_family ||
5171 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5172 }
5173 
5174 static struct mlxsw_sp_nexthop_group *
5175 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
5176 			       struct mlxsw_sp_fib6_entry *fib6_entry)
5177 {
5178 	struct mlxsw_sp_nexthop_group *nh_grp;
5179 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5180 	struct mlxsw_sp_nexthop *nh;
5181 	int i = 0;
5182 	int err;
5183 
5184 	nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
5185 			 GFP_KERNEL);
5186 	if (!nh_grp)
5187 		return ERR_PTR(-ENOMEM);
5188 	INIT_LIST_HEAD(&nh_grp->fib_list);
5189 #if IS_ENABLED(CONFIG_IPV6)
5190 	nh_grp->neigh_tbl = &nd_tbl;
5191 #endif
5192 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
5193 					struct mlxsw_sp_rt6, list);
5194 	nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5195 	nh_grp->count = fib6_entry->nrt6;
5196 	for (i = 0; i < nh_grp->count; i++) {
5197 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5198 
5199 		nh = &nh_grp->nexthops[i];
5200 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
5201 		if (err)
5202 			goto err_nexthop6_init;
5203 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
5204 	}
5205 
5206 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5207 	if (err)
5208 		goto err_nexthop_group_insert;
5209 
5210 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5211 	return nh_grp;
5212 
5213 err_nexthop_group_insert:
5214 err_nexthop6_init:
5215 	for (i--; i >= 0; i--) {
5216 		nh = &nh_grp->nexthops[i];
5217 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5218 	}
5219 	kfree(nh_grp);
5220 	return ERR_PTR(err);
5221 }
5222 
5223 static void
5224 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
5225 				struct mlxsw_sp_nexthop_group *nh_grp)
5226 {
5227 	struct mlxsw_sp_nexthop *nh;
5228 	int i = nh_grp->count;
5229 
5230 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5231 	for (i--; i >= 0; i--) {
5232 		nh = &nh_grp->nexthops[i];
5233 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5234 	}
5235 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5236 	WARN_ON(nh_grp->adj_index_valid);
5237 	kfree(nh_grp);
5238 }
5239 
5240 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
5241 				       struct mlxsw_sp_fib6_entry *fib6_entry)
5242 {
5243 	struct mlxsw_sp_nexthop_group *nh_grp;
5244 
5245 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
5246 	if (!nh_grp) {
5247 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
5248 		if (IS_ERR(nh_grp))
5249 			return PTR_ERR(nh_grp);
5250 	}
5251 
5252 	list_add_tail(&fib6_entry->common.nexthop_group_node,
5253 		      &nh_grp->fib_list);
5254 	fib6_entry->common.nh_group = nh_grp;
5255 
5256 	/* The route and the nexthop are described by the same struct, so we
5257 	 * need to the update the nexthop offload indication for the new route.
5258 	 */
5259 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
5260 
5261 	return 0;
5262 }
5263 
5264 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
5265 					struct mlxsw_sp_fib_entry *fib_entry)
5266 {
5267 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5268 
5269 	list_del(&fib_entry->nexthop_group_node);
5270 	if (!list_empty(&nh_grp->fib_list))
5271 		return;
5272 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5273 }
5274 
5275 static int
5276 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5277 			       struct mlxsw_sp_fib6_entry *fib6_entry)
5278 {
5279 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5280 	int err;
5281 
5282 	fib6_entry->common.nh_group = NULL;
5283 	list_del(&fib6_entry->common.nexthop_group_node);
5284 
5285 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5286 	if (err)
5287 		goto err_nexthop6_group_get;
5288 
5289 	/* In case this entry is offloaded, then the adjacency index
5290 	 * currently associated with it in the device's table is that
5291 	 * of the old group. Start using the new one instead.
5292 	 */
5293 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
5294 	if (err)
5295 		goto err_fib_entry_update;
5296 
5297 	if (list_empty(&old_nh_grp->fib_list))
5298 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5299 
5300 	return 0;
5301 
5302 err_fib_entry_update:
5303 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5304 err_nexthop6_group_get:
5305 	list_add_tail(&fib6_entry->common.nexthop_group_node,
5306 		      &old_nh_grp->fib_list);
5307 	fib6_entry->common.nh_group = old_nh_grp;
5308 	return err;
5309 }
5310 
5311 static int
5312 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5313 				struct mlxsw_sp_fib6_entry *fib6_entry,
5314 				struct fib6_info **rt_arr, unsigned int nrt6)
5315 {
5316 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5317 	int err, i;
5318 
5319 	for (i = 0; i < nrt6; i++) {
5320 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5321 		if (IS_ERR(mlxsw_sp_rt6)) {
5322 			err = PTR_ERR(mlxsw_sp_rt6);
5323 			goto err_rt6_create;
5324 		}
5325 
5326 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5327 		fib6_entry->nrt6++;
5328 	}
5329 
5330 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5331 	if (err)
5332 		goto err_nexthop6_group_update;
5333 
5334 	return 0;
5335 
5336 err_nexthop6_group_update:
5337 	i = nrt6;
5338 err_rt6_create:
5339 	for (i--; i >= 0; i--) {
5340 		fib6_entry->nrt6--;
5341 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5342 					       struct mlxsw_sp_rt6, list);
5343 		list_del(&mlxsw_sp_rt6->list);
5344 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5345 	}
5346 	return err;
5347 }
5348 
5349 static void
5350 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5351 				struct mlxsw_sp_fib6_entry *fib6_entry,
5352 				struct fib6_info **rt_arr, unsigned int nrt6)
5353 {
5354 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5355 	int i;
5356 
5357 	for (i = 0; i < nrt6; i++) {
5358 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
5359 							   rt_arr[i]);
5360 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
5361 			continue;
5362 
5363 		fib6_entry->nrt6--;
5364 		list_del(&mlxsw_sp_rt6->list);
5365 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5366 	}
5367 
5368 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5369 }
5370 
5371 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5372 					 struct mlxsw_sp_fib_entry *fib_entry,
5373 					 const struct fib6_info *rt)
5374 {
5375 	/* Packets hitting RTF_REJECT routes need to be discarded by the
5376 	 * stack. We can rely on their destination device not having a
5377 	 * RIF (it's the loopback device) and can thus use action type
5378 	 * local, which will cause them to be trapped with a lower
5379 	 * priority than packets that need to be locally received.
5380 	 */
5381 	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5382 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5383 	else if (rt->fib6_type == RTN_BLACKHOLE)
5384 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5385 	else if (rt->fib6_flags & RTF_REJECT)
5386 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5387 	else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5388 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5389 	else
5390 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5391 }
5392 
5393 static void
5394 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5395 {
5396 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5397 
5398 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5399 				 list) {
5400 		fib6_entry->nrt6--;
5401 		list_del(&mlxsw_sp_rt6->list);
5402 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5403 	}
5404 }
5405 
5406 static struct mlxsw_sp_fib6_entry *
5407 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5408 			   struct mlxsw_sp_fib_node *fib_node,
5409 			   struct fib6_info **rt_arr, unsigned int nrt6)
5410 {
5411 	struct mlxsw_sp_fib6_entry *fib6_entry;
5412 	struct mlxsw_sp_fib_entry *fib_entry;
5413 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5414 	int err, i;
5415 
5416 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5417 	if (!fib6_entry)
5418 		return ERR_PTR(-ENOMEM);
5419 	fib_entry = &fib6_entry->common;
5420 
5421 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
5422 
5423 	for (i = 0; i < nrt6; i++) {
5424 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5425 		if (IS_ERR(mlxsw_sp_rt6)) {
5426 			err = PTR_ERR(mlxsw_sp_rt6);
5427 			goto err_rt6_create;
5428 		}
5429 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5430 		fib6_entry->nrt6++;
5431 	}
5432 
5433 	mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
5434 
5435 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5436 	if (err)
5437 		goto err_nexthop6_group_get;
5438 
5439 	fib_entry->fib_node = fib_node;
5440 
5441 	return fib6_entry;
5442 
5443 err_nexthop6_group_get:
5444 	i = nrt6;
5445 err_rt6_create:
5446 	for (i--; i >= 0; i--) {
5447 		fib6_entry->nrt6--;
5448 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5449 					       struct mlxsw_sp_rt6, list);
5450 		list_del(&mlxsw_sp_rt6->list);
5451 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5452 	}
5453 	kfree(fib6_entry);
5454 	return ERR_PTR(err);
5455 }
5456 
5457 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5458 					struct mlxsw_sp_fib6_entry *fib6_entry)
5459 {
5460 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5461 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5462 	WARN_ON(fib6_entry->nrt6);
5463 	kfree(fib6_entry);
5464 }
5465 
5466 static struct mlxsw_sp_fib6_entry *
5467 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5468 			   const struct fib6_info *rt)
5469 {
5470 	struct mlxsw_sp_fib6_entry *fib6_entry;
5471 	struct mlxsw_sp_fib_node *fib_node;
5472 	struct mlxsw_sp_fib *fib;
5473 	struct fib6_info *cmp_rt;
5474 	struct mlxsw_sp_vr *vr;
5475 
5476 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5477 	if (!vr)
5478 		return NULL;
5479 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5480 
5481 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5482 					    sizeof(rt->fib6_dst.addr),
5483 					    rt->fib6_dst.plen);
5484 	if (!fib_node)
5485 		return NULL;
5486 
5487 	fib6_entry = container_of(fib_node->fib_entry,
5488 				  struct mlxsw_sp_fib6_entry, common);
5489 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5490 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
5491 	    rt->fib6_metric == cmp_rt->fib6_metric &&
5492 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5493 		return fib6_entry;
5494 
5495 	return NULL;
5496 }
5497 
5498 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
5499 {
5500 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5501 	struct mlxsw_sp_fib6_entry *fib6_replaced;
5502 	struct fib6_info *rt, *rt_replaced;
5503 
5504 	if (!fib_node->fib_entry)
5505 		return true;
5506 
5507 	fib6_replaced = container_of(fib_node->fib_entry,
5508 				     struct mlxsw_sp_fib6_entry,
5509 				     common);
5510 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5511 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
5512 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
5513 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
5514 		return false;
5515 
5516 	return true;
5517 }
5518 
5519 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
5520 					struct fib6_info **rt_arr,
5521 					unsigned int nrt6)
5522 {
5523 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
5524 	struct mlxsw_sp_fib_entry *replaced;
5525 	struct mlxsw_sp_fib_node *fib_node;
5526 	struct fib6_info *rt = rt_arr[0];
5527 	int err;
5528 
5529 	if (mlxsw_sp->router->aborted)
5530 		return 0;
5531 
5532 	if (rt->fib6_src.plen)
5533 		return -EINVAL;
5534 
5535 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5536 		return 0;
5537 
5538 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5539 					 &rt->fib6_dst.addr,
5540 					 sizeof(rt->fib6_dst.addr),
5541 					 rt->fib6_dst.plen,
5542 					 MLXSW_SP_L3_PROTO_IPV6);
5543 	if (IS_ERR(fib_node))
5544 		return PTR_ERR(fib_node);
5545 
5546 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
5547 						nrt6);
5548 	if (IS_ERR(fib6_entry)) {
5549 		err = PTR_ERR(fib6_entry);
5550 		goto err_fib6_entry_create;
5551 	}
5552 
5553 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
5554 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5555 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5556 		return 0;
5557 	}
5558 
5559 	replaced = fib_node->fib_entry;
5560 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
5561 	if (err)
5562 		goto err_fib_node_entry_link;
5563 
5564 	/* Nothing to replace */
5565 	if (!replaced)
5566 		return 0;
5567 
5568 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5569 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
5570 				     common);
5571 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
5572 
5573 	return 0;
5574 
5575 err_fib_node_entry_link:
5576 	fib_node->fib_entry = replaced;
5577 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5578 err_fib6_entry_create:
5579 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5580 	return err;
5581 }
5582 
5583 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
5584 				       struct fib6_info **rt_arr,
5585 				       unsigned int nrt6)
5586 {
5587 	struct mlxsw_sp_fib6_entry *fib6_entry;
5588 	struct mlxsw_sp_fib_node *fib_node;
5589 	struct fib6_info *rt = rt_arr[0];
5590 	int err;
5591 
5592 	if (mlxsw_sp->router->aborted)
5593 		return 0;
5594 
5595 	if (rt->fib6_src.plen)
5596 		return -EINVAL;
5597 
5598 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5599 		return 0;
5600 
5601 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5602 					 &rt->fib6_dst.addr,
5603 					 sizeof(rt->fib6_dst.addr),
5604 					 rt->fib6_dst.plen,
5605 					 MLXSW_SP_L3_PROTO_IPV6);
5606 	if (IS_ERR(fib_node))
5607 		return PTR_ERR(fib_node);
5608 
5609 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
5610 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5611 		return -EINVAL;
5612 	}
5613 
5614 	fib6_entry = container_of(fib_node->fib_entry,
5615 				  struct mlxsw_sp_fib6_entry, common);
5616 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
5617 					      nrt6);
5618 	if (err)
5619 		goto err_fib6_entry_nexthop_add;
5620 
5621 	return 0;
5622 
5623 err_fib6_entry_nexthop_add:
5624 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5625 	return err;
5626 }
5627 
5628 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5629 				     struct fib6_info **rt_arr,
5630 				     unsigned int nrt6)
5631 {
5632 	struct mlxsw_sp_fib6_entry *fib6_entry;
5633 	struct mlxsw_sp_fib_node *fib_node;
5634 	struct fib6_info *rt = rt_arr[0];
5635 
5636 	if (mlxsw_sp->router->aborted)
5637 		return;
5638 
5639 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5640 		return;
5641 
5642 	/* Multipath routes are first added to the FIB trie and only then
5643 	 * notified. If we vetoed the addition, we will get a delete
5644 	 * notification for a route we do not have. Therefore, do not warn if
5645 	 * route was not found.
5646 	 */
5647 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5648 	if (!fib6_entry)
5649 		return;
5650 
5651 	/* If not all the nexthops are deleted, then only reduce the nexthop
5652 	 * group.
5653 	 */
5654 	if (nrt6 != fib6_entry->nrt6) {
5655 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
5656 						nrt6);
5657 		return;
5658 	}
5659 
5660 	fib_node = fib6_entry->common.fib_node;
5661 
5662 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
5663 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5664 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5665 }
5666 
5667 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5668 					    enum mlxsw_reg_ralxx_protocol proto,
5669 					    u8 tree_id)
5670 {
5671 	char ralta_pl[MLXSW_REG_RALTA_LEN];
5672 	char ralst_pl[MLXSW_REG_RALST_LEN];
5673 	int i, err;
5674 
5675 	mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
5676 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5677 	if (err)
5678 		return err;
5679 
5680 	mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
5681 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5682 	if (err)
5683 		return err;
5684 
5685 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5686 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5687 		char raltb_pl[MLXSW_REG_RALTB_LEN];
5688 		char ralue_pl[MLXSW_REG_RALUE_LEN];
5689 
5690 		mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
5691 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5692 				      raltb_pl);
5693 		if (err)
5694 			return err;
5695 
5696 		mlxsw_reg_ralue_pack(ralue_pl, proto,
5697 				     MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
5698 		mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5699 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5700 				      ralue_pl);
5701 		if (err)
5702 			return err;
5703 	}
5704 
5705 	return 0;
5706 }
5707 
5708 static struct mlxsw_sp_mr_table *
5709 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5710 {
5711 	if (family == RTNL_FAMILY_IPMR)
5712 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5713 	else
5714 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5715 }
5716 
5717 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5718 				     struct mfc_entry_notifier_info *men_info,
5719 				     bool replace)
5720 {
5721 	struct mlxsw_sp_mr_table *mrt;
5722 	struct mlxsw_sp_vr *vr;
5723 
5724 	if (mlxsw_sp->router->aborted)
5725 		return 0;
5726 
5727 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
5728 	if (IS_ERR(vr))
5729 		return PTR_ERR(vr);
5730 
5731 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5732 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
5733 }
5734 
5735 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5736 				      struct mfc_entry_notifier_info *men_info)
5737 {
5738 	struct mlxsw_sp_mr_table *mrt;
5739 	struct mlxsw_sp_vr *vr;
5740 
5741 	if (mlxsw_sp->router->aborted)
5742 		return;
5743 
5744 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5745 	if (WARN_ON(!vr))
5746 		return;
5747 
5748 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5749 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
5750 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5751 }
5752 
5753 static int
5754 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5755 			      struct vif_entry_notifier_info *ven_info)
5756 {
5757 	struct mlxsw_sp_mr_table *mrt;
5758 	struct mlxsw_sp_rif *rif;
5759 	struct mlxsw_sp_vr *vr;
5760 
5761 	if (mlxsw_sp->router->aborted)
5762 		return 0;
5763 
5764 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
5765 	if (IS_ERR(vr))
5766 		return PTR_ERR(vr);
5767 
5768 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5769 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5770 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
5771 				   ven_info->vif_index,
5772 				   ven_info->vif_flags, rif);
5773 }
5774 
5775 static void
5776 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5777 			      struct vif_entry_notifier_info *ven_info)
5778 {
5779 	struct mlxsw_sp_mr_table *mrt;
5780 	struct mlxsw_sp_vr *vr;
5781 
5782 	if (mlxsw_sp->router->aborted)
5783 		return;
5784 
5785 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5786 	if (WARN_ON(!vr))
5787 		return;
5788 
5789 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5790 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
5791 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5792 }
5793 
5794 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5795 {
5796 	enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5797 	int err;
5798 
5799 	err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5800 					       MLXSW_SP_LPM_TREE_MIN);
5801 	if (err)
5802 		return err;
5803 
5804 	/* The multicast router code does not need an abort trap as by default,
5805 	 * packets that don't match any routes are trapped to the CPU.
5806 	 */
5807 
5808 	proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5809 	return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5810 						MLXSW_SP_LPM_TREE_MIN + 1);
5811 }
5812 
5813 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5814 				     struct mlxsw_sp_fib_node *fib_node)
5815 {
5816 	struct mlxsw_sp_fib4_entry *fib4_entry;
5817 
5818 	fib4_entry = container_of(fib_node->fib_entry,
5819 				  struct mlxsw_sp_fib4_entry, common);
5820 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
5821 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5822 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5823 }
5824 
5825 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5826 				     struct mlxsw_sp_fib_node *fib_node)
5827 {
5828 	struct mlxsw_sp_fib6_entry *fib6_entry;
5829 
5830 	fib6_entry = container_of(fib_node->fib_entry,
5831 				  struct mlxsw_sp_fib6_entry, common);
5832 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
5833 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5834 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5835 }
5836 
5837 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5838 				    struct mlxsw_sp_fib_node *fib_node)
5839 {
5840 	switch (fib_node->fib->proto) {
5841 	case MLXSW_SP_L3_PROTO_IPV4:
5842 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5843 		break;
5844 	case MLXSW_SP_L3_PROTO_IPV6:
5845 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
5846 		break;
5847 	}
5848 }
5849 
5850 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5851 				  struct mlxsw_sp_vr *vr,
5852 				  enum mlxsw_sp_l3proto proto)
5853 {
5854 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5855 	struct mlxsw_sp_fib_node *fib_node, *tmp;
5856 
5857 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5858 		bool do_break = &tmp->list == &fib->node_list;
5859 
5860 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5861 		if (do_break)
5862 			break;
5863 	}
5864 }
5865 
5866 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
5867 {
5868 	int i, j;
5869 
5870 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5871 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5872 
5873 		if (!mlxsw_sp_vr_is_used(vr))
5874 			continue;
5875 
5876 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5877 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
5878 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
5879 
5880 		/* If virtual router was only used for IPv4, then it's no
5881 		 * longer used.
5882 		 */
5883 		if (!mlxsw_sp_vr_is_used(vr))
5884 			continue;
5885 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
5886 	}
5887 
5888 	/* After flushing all the routes, it is not possible anyone is still
5889 	 * using the adjacency index that is discarding packets, so free it in
5890 	 * case it was allocated.
5891 	 */
5892 	if (!mlxsw_sp->router->adj_discard_index_valid)
5893 		return;
5894 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
5895 			   mlxsw_sp->router->adj_discard_index);
5896 	mlxsw_sp->router->adj_discard_index_valid = false;
5897 }
5898 
5899 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
5900 {
5901 	int err;
5902 
5903 	if (mlxsw_sp->router->aborted)
5904 		return;
5905 	dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
5906 	mlxsw_sp_router_fib_flush(mlxsw_sp);
5907 	mlxsw_sp->router->aborted = true;
5908 	err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5909 	if (err)
5910 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5911 }
5912 
5913 struct mlxsw_sp_fib6_event_work {
5914 	struct fib6_info **rt_arr;
5915 	unsigned int nrt6;
5916 };
5917 
5918 struct mlxsw_sp_fib_event_work {
5919 	struct work_struct work;
5920 	union {
5921 		struct mlxsw_sp_fib6_event_work fib6_work;
5922 		struct fib_entry_notifier_info fen_info;
5923 		struct fib_rule_notifier_info fr_info;
5924 		struct fib_nh_notifier_info fnh_info;
5925 		struct mfc_entry_notifier_info men_info;
5926 		struct vif_entry_notifier_info ven_info;
5927 	};
5928 	struct mlxsw_sp *mlxsw_sp;
5929 	unsigned long event;
5930 };
5931 
5932 static int
5933 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
5934 			       struct fib6_entry_notifier_info *fen6_info)
5935 {
5936 	struct fib6_info *rt = fen6_info->rt;
5937 	struct fib6_info **rt_arr;
5938 	struct fib6_info *iter;
5939 	unsigned int nrt6;
5940 	int i = 0;
5941 
5942 	nrt6 = fen6_info->nsiblings + 1;
5943 
5944 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
5945 	if (!rt_arr)
5946 		return -ENOMEM;
5947 
5948 	fib6_work->rt_arr = rt_arr;
5949 	fib6_work->nrt6 = nrt6;
5950 
5951 	rt_arr[0] = rt;
5952 	fib6_info_hold(rt);
5953 
5954 	if (!fen6_info->nsiblings)
5955 		return 0;
5956 
5957 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
5958 		if (i == fen6_info->nsiblings)
5959 			break;
5960 
5961 		rt_arr[i + 1] = iter;
5962 		fib6_info_hold(iter);
5963 		i++;
5964 	}
5965 	WARN_ON_ONCE(i != fen6_info->nsiblings);
5966 
5967 	return 0;
5968 }
5969 
5970 static void
5971 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
5972 {
5973 	int i;
5974 
5975 	for (i = 0; i < fib6_work->nrt6; i++)
5976 		mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
5977 	kfree(fib6_work->rt_arr);
5978 }
5979 
5980 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
5981 {
5982 	struct mlxsw_sp_fib_event_work *fib_work =
5983 		container_of(work, struct mlxsw_sp_fib_event_work, work);
5984 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5985 	int err;
5986 
5987 	mutex_lock(&mlxsw_sp->router->lock);
5988 	mlxsw_sp_span_respin(mlxsw_sp);
5989 
5990 	switch (fib_work->event) {
5991 	case FIB_EVENT_ENTRY_REPLACE:
5992 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
5993 						   &fib_work->fen_info);
5994 		if (err)
5995 			mlxsw_sp_router_fib_abort(mlxsw_sp);
5996 		fib_info_put(fib_work->fen_info.fi);
5997 		break;
5998 	case FIB_EVENT_ENTRY_DEL:
5999 		mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
6000 		fib_info_put(fib_work->fen_info.fi);
6001 		break;
6002 	case FIB_EVENT_NH_ADD: /* fall through */
6003 	case FIB_EVENT_NH_DEL:
6004 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
6005 					fib_work->fnh_info.fib_nh);
6006 		fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
6007 		break;
6008 	}
6009 	mutex_unlock(&mlxsw_sp->router->lock);
6010 	kfree(fib_work);
6011 }
6012 
6013 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
6014 {
6015 	struct mlxsw_sp_fib_event_work *fib_work =
6016 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6017 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6018 	int err;
6019 
6020 	mutex_lock(&mlxsw_sp->router->lock);
6021 	mlxsw_sp_span_respin(mlxsw_sp);
6022 
6023 	switch (fib_work->event) {
6024 	case FIB_EVENT_ENTRY_REPLACE:
6025 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
6026 						   fib_work->fib6_work.rt_arr,
6027 						   fib_work->fib6_work.nrt6);
6028 		if (err)
6029 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6030 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6031 		break;
6032 	case FIB_EVENT_ENTRY_APPEND:
6033 		err = mlxsw_sp_router_fib6_append(mlxsw_sp,
6034 						  fib_work->fib6_work.rt_arr,
6035 						  fib_work->fib6_work.nrt6);
6036 		if (err)
6037 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6038 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6039 		break;
6040 	case FIB_EVENT_ENTRY_DEL:
6041 		mlxsw_sp_router_fib6_del(mlxsw_sp,
6042 					 fib_work->fib6_work.rt_arr,
6043 					 fib_work->fib6_work.nrt6);
6044 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6045 		break;
6046 	}
6047 	mutex_unlock(&mlxsw_sp->router->lock);
6048 	kfree(fib_work);
6049 }
6050 
6051 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
6052 {
6053 	struct mlxsw_sp_fib_event_work *fib_work =
6054 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6055 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6056 	bool replace;
6057 	int err;
6058 
6059 	rtnl_lock();
6060 	mutex_lock(&mlxsw_sp->router->lock);
6061 	switch (fib_work->event) {
6062 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6063 	case FIB_EVENT_ENTRY_ADD:
6064 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6065 
6066 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
6067 						replace);
6068 		if (err)
6069 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6070 		mr_cache_put(fib_work->men_info.mfc);
6071 		break;
6072 	case FIB_EVENT_ENTRY_DEL:
6073 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
6074 		mr_cache_put(fib_work->men_info.mfc);
6075 		break;
6076 	case FIB_EVENT_VIF_ADD:
6077 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
6078 						    &fib_work->ven_info);
6079 		if (err)
6080 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6081 		dev_put(fib_work->ven_info.dev);
6082 		break;
6083 	case FIB_EVENT_VIF_DEL:
6084 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
6085 					      &fib_work->ven_info);
6086 		dev_put(fib_work->ven_info.dev);
6087 		break;
6088 	}
6089 	mutex_unlock(&mlxsw_sp->router->lock);
6090 	rtnl_unlock();
6091 	kfree(fib_work);
6092 }
6093 
6094 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
6095 				       struct fib_notifier_info *info)
6096 {
6097 	struct fib_entry_notifier_info *fen_info;
6098 	struct fib_nh_notifier_info *fnh_info;
6099 
6100 	switch (fib_work->event) {
6101 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6102 	case FIB_EVENT_ENTRY_DEL:
6103 		fen_info = container_of(info, struct fib_entry_notifier_info,
6104 					info);
6105 		fib_work->fen_info = *fen_info;
6106 		/* Take reference on fib_info to prevent it from being
6107 		 * freed while work is queued. Release it afterwards.
6108 		 */
6109 		fib_info_hold(fib_work->fen_info.fi);
6110 		break;
6111 	case FIB_EVENT_NH_ADD: /* fall through */
6112 	case FIB_EVENT_NH_DEL:
6113 		fnh_info = container_of(info, struct fib_nh_notifier_info,
6114 					info);
6115 		fib_work->fnh_info = *fnh_info;
6116 		fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
6117 		break;
6118 	}
6119 }
6120 
6121 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
6122 				      struct fib_notifier_info *info)
6123 {
6124 	struct fib6_entry_notifier_info *fen6_info;
6125 	int err;
6126 
6127 	switch (fib_work->event) {
6128 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6129 	case FIB_EVENT_ENTRY_APPEND: /* fall through */
6130 	case FIB_EVENT_ENTRY_DEL:
6131 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
6132 					 info);
6133 		err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
6134 						     fen6_info);
6135 		if (err)
6136 			return err;
6137 		break;
6138 	}
6139 
6140 	return 0;
6141 }
6142 
6143 static void
6144 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
6145 			    struct fib_notifier_info *info)
6146 {
6147 	switch (fib_work->event) {
6148 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6149 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6150 	case FIB_EVENT_ENTRY_DEL:
6151 		memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
6152 		mr_cache_hold(fib_work->men_info.mfc);
6153 		break;
6154 	case FIB_EVENT_VIF_ADD: /* fall through */
6155 	case FIB_EVENT_VIF_DEL:
6156 		memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
6157 		dev_hold(fib_work->ven_info.dev);
6158 		break;
6159 	}
6160 }
6161 
6162 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
6163 					  struct fib_notifier_info *info,
6164 					  struct mlxsw_sp *mlxsw_sp)
6165 {
6166 	struct netlink_ext_ack *extack = info->extack;
6167 	struct fib_rule_notifier_info *fr_info;
6168 	struct fib_rule *rule;
6169 	int err = 0;
6170 
6171 	/* nothing to do at the moment */
6172 	if (event == FIB_EVENT_RULE_DEL)
6173 		return 0;
6174 
6175 	if (mlxsw_sp->router->aborted)
6176 		return 0;
6177 
6178 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
6179 	rule = fr_info->rule;
6180 
6181 	/* Rule only affects locally generated traffic */
6182 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
6183 		return 0;
6184 
6185 	switch (info->family) {
6186 	case AF_INET:
6187 		if (!fib4_rule_default(rule) && !rule->l3mdev)
6188 			err = -EOPNOTSUPP;
6189 		break;
6190 	case AF_INET6:
6191 		if (!fib6_rule_default(rule) && !rule->l3mdev)
6192 			err = -EOPNOTSUPP;
6193 		break;
6194 	case RTNL_FAMILY_IPMR:
6195 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
6196 			err = -EOPNOTSUPP;
6197 		break;
6198 	case RTNL_FAMILY_IP6MR:
6199 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6200 			err = -EOPNOTSUPP;
6201 		break;
6202 	}
6203 
6204 	if (err < 0)
6205 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6206 
6207 	return err;
6208 }
6209 
6210 /* Called with rcu_read_lock() */
6211 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
6212 				     unsigned long event, void *ptr)
6213 {
6214 	struct mlxsw_sp_fib_event_work *fib_work;
6215 	struct fib_notifier_info *info = ptr;
6216 	struct mlxsw_sp_router *router;
6217 	int err;
6218 
6219 	if ((info->family != AF_INET && info->family != AF_INET6 &&
6220 	     info->family != RTNL_FAMILY_IPMR &&
6221 	     info->family != RTNL_FAMILY_IP6MR))
6222 		return NOTIFY_DONE;
6223 
6224 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6225 
6226 	switch (event) {
6227 	case FIB_EVENT_RULE_ADD: /* fall through */
6228 	case FIB_EVENT_RULE_DEL:
6229 		err = mlxsw_sp_router_fib_rule_event(event, info,
6230 						     router->mlxsw_sp);
6231 		return notifier_from_errno(err);
6232 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6233 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6234 	case FIB_EVENT_ENTRY_APPEND:
6235 		if (router->aborted) {
6236 			NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
6237 			return notifier_from_errno(-EINVAL);
6238 		}
6239 		if (info->family == AF_INET) {
6240 			struct fib_entry_notifier_info *fen_info = ptr;
6241 
6242 			if (fen_info->fi->fib_nh_is_v6) {
6243 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
6244 				return notifier_from_errno(-EINVAL);
6245 			}
6246 			if (fen_info->fi->nh) {
6247 				NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
6248 				return notifier_from_errno(-EINVAL);
6249 			}
6250 		} else if (info->family == AF_INET6) {
6251 			struct fib6_entry_notifier_info *fen6_info;
6252 
6253 			fen6_info = container_of(info,
6254 						 struct fib6_entry_notifier_info,
6255 						 info);
6256 			if (fen6_info->rt->nh) {
6257 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
6258 				return notifier_from_errno(-EINVAL);
6259 			}
6260 		}
6261 		break;
6262 	}
6263 
6264 	fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
6265 	if (WARN_ON(!fib_work))
6266 		return NOTIFY_BAD;
6267 
6268 	fib_work->mlxsw_sp = router->mlxsw_sp;
6269 	fib_work->event = event;
6270 
6271 	switch (info->family) {
6272 	case AF_INET:
6273 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
6274 		mlxsw_sp_router_fib4_event(fib_work, info);
6275 		break;
6276 	case AF_INET6:
6277 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
6278 		err = mlxsw_sp_router_fib6_event(fib_work, info);
6279 		if (err)
6280 			goto err_fib_event;
6281 		break;
6282 	case RTNL_FAMILY_IP6MR:
6283 	case RTNL_FAMILY_IPMR:
6284 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
6285 		mlxsw_sp_router_fibmr_event(fib_work, info);
6286 		break;
6287 	}
6288 
6289 	mlxsw_core_schedule_work(&fib_work->work);
6290 
6291 	return NOTIFY_DONE;
6292 
6293 err_fib_event:
6294 	kfree(fib_work);
6295 	return NOTIFY_BAD;
6296 }
6297 
6298 static struct mlxsw_sp_rif *
6299 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
6300 			 const struct net_device *dev)
6301 {
6302 	int i;
6303 
6304 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6305 		if (mlxsw_sp->router->rifs[i] &&
6306 		    mlxsw_sp->router->rifs[i]->dev == dev)
6307 			return mlxsw_sp->router->rifs[i];
6308 
6309 	return NULL;
6310 }
6311 
6312 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
6313 			 const struct net_device *dev)
6314 {
6315 	struct mlxsw_sp_rif *rif;
6316 
6317 	mutex_lock(&mlxsw_sp->router->lock);
6318 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6319 	mutex_unlock(&mlxsw_sp->router->lock);
6320 
6321 	return rif;
6322 }
6323 
6324 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
6325 {
6326 	struct mlxsw_sp_rif *rif;
6327 	u16 vid = 0;
6328 
6329 	mutex_lock(&mlxsw_sp->router->lock);
6330 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6331 	if (!rif)
6332 		goto out;
6333 
6334 	/* We only return the VID for VLAN RIFs. Otherwise we return an
6335 	 * invalid value (0).
6336 	 */
6337 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
6338 		goto out;
6339 
6340 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6341 
6342 out:
6343 	mutex_unlock(&mlxsw_sp->router->lock);
6344 	return vid;
6345 }
6346 
6347 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
6348 {
6349 	char ritr_pl[MLXSW_REG_RITR_LEN];
6350 	int err;
6351 
6352 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
6353 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6354 	if (err)
6355 		return err;
6356 
6357 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
6358 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6359 }
6360 
6361 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6362 					  struct mlxsw_sp_rif *rif)
6363 {
6364 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6365 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6366 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6367 }
6368 
6369 static bool
6370 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6371 			   unsigned long event)
6372 {
6373 	struct inet6_dev *inet6_dev;
6374 	bool addr_list_empty = true;
6375 	struct in_device *idev;
6376 
6377 	switch (event) {
6378 	case NETDEV_UP:
6379 		return rif == NULL;
6380 	case NETDEV_DOWN:
6381 		rcu_read_lock();
6382 		idev = __in_dev_get_rcu(dev);
6383 		if (idev && idev->ifa_list)
6384 			addr_list_empty = false;
6385 
6386 		inet6_dev = __in6_dev_get(dev);
6387 		if (addr_list_empty && inet6_dev &&
6388 		    !list_empty(&inet6_dev->addr_list))
6389 			addr_list_empty = false;
6390 		rcu_read_unlock();
6391 
6392 		/* macvlans do not have a RIF, but rather piggy back on the
6393 		 * RIF of their lower device.
6394 		 */
6395 		if (netif_is_macvlan(dev) && addr_list_empty)
6396 			return true;
6397 
6398 		if (rif && addr_list_empty &&
6399 		    !netif_is_l3_slave(rif->dev))
6400 			return true;
6401 		/* It is possible we already removed the RIF ourselves
6402 		 * if it was assigned to a netdev that is now a bridge
6403 		 * or LAG slave.
6404 		 */
6405 		return false;
6406 	}
6407 
6408 	return false;
6409 }
6410 
6411 static enum mlxsw_sp_rif_type
6412 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6413 		      const struct net_device *dev)
6414 {
6415 	enum mlxsw_sp_fid_type type;
6416 
6417 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6418 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
6419 
6420 	/* Otherwise RIF type is derived from the type of the underlying FID. */
6421 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6422 		type = MLXSW_SP_FID_TYPE_8021Q;
6423 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6424 		type = MLXSW_SP_FID_TYPE_8021Q;
6425 	else if (netif_is_bridge_master(dev))
6426 		type = MLXSW_SP_FID_TYPE_8021D;
6427 	else
6428 		type = MLXSW_SP_FID_TYPE_RFID;
6429 
6430 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6431 }
6432 
6433 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6434 {
6435 	int i;
6436 
6437 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6438 		if (!mlxsw_sp->router->rifs[i]) {
6439 			*p_rif_index = i;
6440 			return 0;
6441 		}
6442 	}
6443 
6444 	return -ENOBUFS;
6445 }
6446 
6447 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6448 					       u16 vr_id,
6449 					       struct net_device *l3_dev)
6450 {
6451 	struct mlxsw_sp_rif *rif;
6452 
6453 	rif = kzalloc(rif_size, GFP_KERNEL);
6454 	if (!rif)
6455 		return NULL;
6456 
6457 	INIT_LIST_HEAD(&rif->nexthop_list);
6458 	INIT_LIST_HEAD(&rif->neigh_list);
6459 	if (l3_dev) {
6460 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
6461 		rif->mtu = l3_dev->mtu;
6462 		rif->dev = l3_dev;
6463 	}
6464 	rif->vr_id = vr_id;
6465 	rif->rif_index = rif_index;
6466 
6467 	return rif;
6468 }
6469 
6470 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6471 					   u16 rif_index)
6472 {
6473 	return mlxsw_sp->router->rifs[rif_index];
6474 }
6475 
6476 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6477 {
6478 	return rif->rif_index;
6479 }
6480 
6481 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6482 {
6483 	return lb_rif->common.rif_index;
6484 }
6485 
6486 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6487 {
6488 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
6489 	struct mlxsw_sp_vr *ul_vr;
6490 
6491 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
6492 	if (WARN_ON(IS_ERR(ul_vr)))
6493 		return 0;
6494 
6495 	return ul_vr->id;
6496 }
6497 
6498 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6499 {
6500 	return lb_rif->ul_rif_id;
6501 }
6502 
6503 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6504 {
6505 	return rif->dev->ifindex;
6506 }
6507 
6508 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6509 {
6510 	return rif->dev;
6511 }
6512 
6513 static struct mlxsw_sp_rif *
6514 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6515 		    const struct mlxsw_sp_rif_params *params,
6516 		    struct netlink_ext_ack *extack)
6517 {
6518 	u32 tb_id = l3mdev_fib_table(params->dev);
6519 	const struct mlxsw_sp_rif_ops *ops;
6520 	struct mlxsw_sp_fid *fid = NULL;
6521 	enum mlxsw_sp_rif_type type;
6522 	struct mlxsw_sp_rif *rif;
6523 	struct mlxsw_sp_vr *vr;
6524 	u16 rif_index;
6525 	int i, err;
6526 
6527 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6528 	ops = mlxsw_sp->rif_ops_arr[type];
6529 
6530 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6531 	if (IS_ERR(vr))
6532 		return ERR_CAST(vr);
6533 	vr->rif_count++;
6534 
6535 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6536 	if (err) {
6537 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6538 		goto err_rif_index_alloc;
6539 	}
6540 
6541 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6542 	if (!rif) {
6543 		err = -ENOMEM;
6544 		goto err_rif_alloc;
6545 	}
6546 	dev_hold(rif->dev);
6547 	mlxsw_sp->router->rifs[rif_index] = rif;
6548 	rif->mlxsw_sp = mlxsw_sp;
6549 	rif->ops = ops;
6550 
6551 	if (ops->fid_get) {
6552 		fid = ops->fid_get(rif, extack);
6553 		if (IS_ERR(fid)) {
6554 			err = PTR_ERR(fid);
6555 			goto err_fid_get;
6556 		}
6557 		rif->fid = fid;
6558 	}
6559 
6560 	if (ops->setup)
6561 		ops->setup(rif, params);
6562 
6563 	err = ops->configure(rif);
6564 	if (err)
6565 		goto err_configure;
6566 
6567 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6568 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6569 		if (err)
6570 			goto err_mr_rif_add;
6571 	}
6572 
6573 	mlxsw_sp_rif_counters_alloc(rif);
6574 
6575 	return rif;
6576 
6577 err_mr_rif_add:
6578 	for (i--; i >= 0; i--)
6579 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6580 	ops->deconfigure(rif);
6581 err_configure:
6582 	if (fid)
6583 		mlxsw_sp_fid_put(fid);
6584 err_fid_get:
6585 	mlxsw_sp->router->rifs[rif_index] = NULL;
6586 	dev_put(rif->dev);
6587 	kfree(rif);
6588 err_rif_alloc:
6589 err_rif_index_alloc:
6590 	vr->rif_count--;
6591 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6592 	return ERR_PTR(err);
6593 }
6594 
6595 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6596 {
6597 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
6598 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6599 	struct mlxsw_sp_fid *fid = rif->fid;
6600 	struct mlxsw_sp_vr *vr;
6601 	int i;
6602 
6603 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6604 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
6605 
6606 	mlxsw_sp_rif_counters_free(rif);
6607 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6608 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6609 	ops->deconfigure(rif);
6610 	if (fid)
6611 		/* Loopback RIFs are not associated with a FID. */
6612 		mlxsw_sp_fid_put(fid);
6613 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6614 	dev_put(rif->dev);
6615 	kfree(rif);
6616 	vr->rif_count--;
6617 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6618 }
6619 
6620 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6621 				 struct net_device *dev)
6622 {
6623 	struct mlxsw_sp_rif *rif;
6624 
6625 	mutex_lock(&mlxsw_sp->router->lock);
6626 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6627 	if (!rif)
6628 		goto out;
6629 	mlxsw_sp_rif_destroy(rif);
6630 out:
6631 	mutex_unlock(&mlxsw_sp->router->lock);
6632 }
6633 
6634 static void
6635 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6636 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6637 {
6638 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6639 
6640 	params->vid = mlxsw_sp_port_vlan->vid;
6641 	params->lag = mlxsw_sp_port->lagged;
6642 	if (params->lag)
6643 		params->lag_id = mlxsw_sp_port->lag_id;
6644 	else
6645 		params->system_port = mlxsw_sp_port->local_port;
6646 }
6647 
6648 static struct mlxsw_sp_rif_subport *
6649 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
6650 {
6651 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
6652 }
6653 
6654 static struct mlxsw_sp_rif *
6655 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
6656 			 const struct mlxsw_sp_rif_params *params,
6657 			 struct netlink_ext_ack *extack)
6658 {
6659 	struct mlxsw_sp_rif_subport *rif_subport;
6660 	struct mlxsw_sp_rif *rif;
6661 
6662 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
6663 	if (!rif)
6664 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
6665 
6666 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
6667 	refcount_inc(&rif_subport->ref_count);
6668 	return rif;
6669 }
6670 
6671 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
6672 {
6673 	struct mlxsw_sp_rif_subport *rif_subport;
6674 
6675 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
6676 	if (!refcount_dec_and_test(&rif_subport->ref_count))
6677 		return;
6678 
6679 	mlxsw_sp_rif_destroy(rif);
6680 }
6681 
6682 static int
6683 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
6684 			       struct net_device *l3_dev,
6685 			       struct netlink_ext_ack *extack)
6686 {
6687 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6688 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
6689 	struct mlxsw_sp_rif_params params = {
6690 		.dev = l3_dev,
6691 	};
6692 	u16 vid = mlxsw_sp_port_vlan->vid;
6693 	struct mlxsw_sp_rif *rif;
6694 	struct mlxsw_sp_fid *fid;
6695 	int err;
6696 
6697 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
6698 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
6699 	if (IS_ERR(rif))
6700 		return PTR_ERR(rif);
6701 
6702 	/* FID was already created, just take a reference */
6703 	fid = rif->ops->fid_get(rif, extack);
6704 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6705 	if (err)
6706 		goto err_fid_port_vid_map;
6707 
6708 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
6709 	if (err)
6710 		goto err_port_vid_learning_set;
6711 
6712 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
6713 					BR_STATE_FORWARDING);
6714 	if (err)
6715 		goto err_port_vid_stp_set;
6716 
6717 	mlxsw_sp_port_vlan->fid = fid;
6718 
6719 	return 0;
6720 
6721 err_port_vid_stp_set:
6722 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6723 err_port_vid_learning_set:
6724 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6725 err_fid_port_vid_map:
6726 	mlxsw_sp_fid_put(fid);
6727 	mlxsw_sp_rif_subport_put(rif);
6728 	return err;
6729 }
6730 
6731 static void
6732 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6733 {
6734 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6735 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
6736 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
6737 	u16 vid = mlxsw_sp_port_vlan->vid;
6738 
6739 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6740 		return;
6741 
6742 	mlxsw_sp_port_vlan->fid = NULL;
6743 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6744 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6745 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6746 	mlxsw_sp_fid_put(fid);
6747 	mlxsw_sp_rif_subport_put(rif);
6748 }
6749 
6750 void
6751 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6752 {
6753 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
6754 
6755 	mutex_lock(&mlxsw_sp->router->lock);
6756 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6757 	mutex_unlock(&mlxsw_sp->router->lock);
6758 }
6759 
6760 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6761 					     struct net_device *port_dev,
6762 					     unsigned long event, u16 vid,
6763 					     struct netlink_ext_ack *extack)
6764 {
6765 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
6766 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
6767 
6768 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
6769 	if (WARN_ON(!mlxsw_sp_port_vlan))
6770 		return -EINVAL;
6771 
6772 	switch (event) {
6773 	case NETDEV_UP:
6774 		return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
6775 						      l3_dev, extack);
6776 	case NETDEV_DOWN:
6777 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6778 		break;
6779 	}
6780 
6781 	return 0;
6782 }
6783 
6784 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
6785 					unsigned long event,
6786 					struct netlink_ext_ack *extack)
6787 {
6788 	if (netif_is_bridge_port(port_dev) ||
6789 	    netif_is_lag_port(port_dev) ||
6790 	    netif_is_ovs_port(port_dev))
6791 		return 0;
6792 
6793 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
6794 						 MLXSW_SP_DEFAULT_VID, extack);
6795 }
6796 
6797 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6798 					 struct net_device *lag_dev,
6799 					 unsigned long event, u16 vid,
6800 					 struct netlink_ext_ack *extack)
6801 {
6802 	struct net_device *port_dev;
6803 	struct list_head *iter;
6804 	int err;
6805 
6806 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6807 		if (mlxsw_sp_port_dev_check(port_dev)) {
6808 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6809 								port_dev,
6810 								event, vid,
6811 								extack);
6812 			if (err)
6813 				return err;
6814 		}
6815 	}
6816 
6817 	return 0;
6818 }
6819 
6820 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
6821 				       unsigned long event,
6822 				       struct netlink_ext_ack *extack)
6823 {
6824 	if (netif_is_bridge_port(lag_dev))
6825 		return 0;
6826 
6827 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
6828 					     MLXSW_SP_DEFAULT_VID, extack);
6829 }
6830 
6831 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
6832 					  struct net_device *l3_dev,
6833 					  unsigned long event,
6834 					  struct netlink_ext_ack *extack)
6835 {
6836 	struct mlxsw_sp_rif_params params = {
6837 		.dev = l3_dev,
6838 	};
6839 	struct mlxsw_sp_rif *rif;
6840 
6841 	switch (event) {
6842 	case NETDEV_UP:
6843 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
6844 		if (IS_ERR(rif))
6845 			return PTR_ERR(rif);
6846 		break;
6847 	case NETDEV_DOWN:
6848 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6849 		mlxsw_sp_rif_destroy(rif);
6850 		break;
6851 	}
6852 
6853 	return 0;
6854 }
6855 
6856 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
6857 					struct net_device *vlan_dev,
6858 					unsigned long event,
6859 					struct netlink_ext_ack *extack)
6860 {
6861 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
6862 	u16 vid = vlan_dev_vlan_id(vlan_dev);
6863 
6864 	if (netif_is_bridge_port(vlan_dev))
6865 		return 0;
6866 
6867 	if (mlxsw_sp_port_dev_check(real_dev))
6868 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
6869 							 event, vid, extack);
6870 	else if (netif_is_lag_master(real_dev))
6871 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
6872 						     vid, extack);
6873 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
6874 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
6875 						      extack);
6876 
6877 	return 0;
6878 }
6879 
6880 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
6881 {
6882 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
6883 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6884 
6885 	return ether_addr_equal_masked(mac, vrrp4, mask);
6886 }
6887 
6888 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
6889 {
6890 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
6891 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6892 
6893 	return ether_addr_equal_masked(mac, vrrp6, mask);
6894 }
6895 
6896 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
6897 				const u8 *mac, bool adding)
6898 {
6899 	char ritr_pl[MLXSW_REG_RITR_LEN];
6900 	u8 vrrp_id = adding ? mac[5] : 0;
6901 	int err;
6902 
6903 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
6904 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
6905 		return 0;
6906 
6907 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
6908 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6909 	if (err)
6910 		return err;
6911 
6912 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
6913 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
6914 	else
6915 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
6916 
6917 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6918 }
6919 
6920 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
6921 				    const struct net_device *macvlan_dev,
6922 				    struct netlink_ext_ack *extack)
6923 {
6924 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6925 	struct mlxsw_sp_rif *rif;
6926 	int err;
6927 
6928 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6929 	if (!rif) {
6930 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
6931 		return -EOPNOTSUPP;
6932 	}
6933 
6934 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6935 				  mlxsw_sp_fid_index(rif->fid), true);
6936 	if (err)
6937 		return err;
6938 
6939 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
6940 				   macvlan_dev->dev_addr, true);
6941 	if (err)
6942 		goto err_rif_vrrp_add;
6943 
6944 	/* Make sure the bridge driver does not have this MAC pointing at
6945 	 * some other port.
6946 	 */
6947 	if (rif->ops->fdb_del)
6948 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
6949 
6950 	return 0;
6951 
6952 err_rif_vrrp_add:
6953 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6954 			    mlxsw_sp_fid_index(rif->fid), false);
6955 	return err;
6956 }
6957 
6958 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6959 				       const struct net_device *macvlan_dev)
6960 {
6961 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6962 	struct mlxsw_sp_rif *rif;
6963 
6964 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6965 	/* If we do not have a RIF, then we already took care of
6966 	 * removing the macvlan's MAC during RIF deletion.
6967 	 */
6968 	if (!rif)
6969 		return;
6970 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
6971 			     false);
6972 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6973 			    mlxsw_sp_fid_index(rif->fid), false);
6974 }
6975 
6976 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6977 			      const struct net_device *macvlan_dev)
6978 {
6979 	mutex_lock(&mlxsw_sp->router->lock);
6980 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6981 	mutex_unlock(&mlxsw_sp->router->lock);
6982 }
6983 
6984 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
6985 					   struct net_device *macvlan_dev,
6986 					   unsigned long event,
6987 					   struct netlink_ext_ack *extack)
6988 {
6989 	switch (event) {
6990 	case NETDEV_UP:
6991 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
6992 	case NETDEV_DOWN:
6993 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6994 		break;
6995 	}
6996 
6997 	return 0;
6998 }
6999 
7000 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
7001 					       struct net_device *dev,
7002 					       const unsigned char *dev_addr,
7003 					       struct netlink_ext_ack *extack)
7004 {
7005 	struct mlxsw_sp_rif *rif;
7006 	int i;
7007 
7008 	/* A RIF is not created for macvlan netdevs. Their MAC is used to
7009 	 * populate the FDB
7010 	 */
7011 	if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
7012 		return 0;
7013 
7014 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7015 		rif = mlxsw_sp->router->rifs[i];
7016 		if (rif && rif->ops &&
7017 		    rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
7018 			continue;
7019 		if (rif && rif->dev && rif->dev != dev &&
7020 		    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
7021 					     mlxsw_sp->mac_mask)) {
7022 			NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
7023 			return -EINVAL;
7024 		}
7025 	}
7026 
7027 	return 0;
7028 }
7029 
7030 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
7031 				     struct net_device *dev,
7032 				     unsigned long event,
7033 				     struct netlink_ext_ack *extack)
7034 {
7035 	if (mlxsw_sp_port_dev_check(dev))
7036 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
7037 	else if (netif_is_lag_master(dev))
7038 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
7039 	else if (netif_is_bridge_master(dev))
7040 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
7041 						      extack);
7042 	else if (is_vlan_dev(dev))
7043 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
7044 						    extack);
7045 	else if (netif_is_macvlan(dev))
7046 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
7047 						       extack);
7048 	else
7049 		return 0;
7050 }
7051 
7052 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
7053 				   unsigned long event, void *ptr)
7054 {
7055 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
7056 	struct net_device *dev = ifa->ifa_dev->dev;
7057 	struct mlxsw_sp_router *router;
7058 	struct mlxsw_sp_rif *rif;
7059 	int err = 0;
7060 
7061 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
7062 	if (event == NETDEV_UP)
7063 		return NOTIFY_DONE;
7064 
7065 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
7066 	mutex_lock(&router->lock);
7067 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
7068 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7069 		goto out;
7070 
7071 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
7072 out:
7073 	mutex_unlock(&router->lock);
7074 	return notifier_from_errno(err);
7075 }
7076 
7077 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
7078 				  unsigned long event, void *ptr)
7079 {
7080 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
7081 	struct net_device *dev = ivi->ivi_dev->dev;
7082 	struct mlxsw_sp *mlxsw_sp;
7083 	struct mlxsw_sp_rif *rif;
7084 	int err = 0;
7085 
7086 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7087 	if (!mlxsw_sp)
7088 		return NOTIFY_DONE;
7089 
7090 	mutex_lock(&mlxsw_sp->router->lock);
7091 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7092 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7093 		goto out;
7094 
7095 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7096 						  ivi->extack);
7097 	if (err)
7098 		goto out;
7099 
7100 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
7101 out:
7102 	mutex_unlock(&mlxsw_sp->router->lock);
7103 	return notifier_from_errno(err);
7104 }
7105 
7106 struct mlxsw_sp_inet6addr_event_work {
7107 	struct work_struct work;
7108 	struct mlxsw_sp *mlxsw_sp;
7109 	struct net_device *dev;
7110 	unsigned long event;
7111 };
7112 
7113 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
7114 {
7115 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
7116 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
7117 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
7118 	struct net_device *dev = inet6addr_work->dev;
7119 	unsigned long event = inet6addr_work->event;
7120 	struct mlxsw_sp_rif *rif;
7121 
7122 	rtnl_lock();
7123 	mutex_lock(&mlxsw_sp->router->lock);
7124 
7125 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7126 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7127 		goto out;
7128 
7129 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
7130 out:
7131 	mutex_unlock(&mlxsw_sp->router->lock);
7132 	rtnl_unlock();
7133 	dev_put(dev);
7134 	kfree(inet6addr_work);
7135 }
7136 
7137 /* Called with rcu_read_lock() */
7138 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
7139 				    unsigned long event, void *ptr)
7140 {
7141 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
7142 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
7143 	struct net_device *dev = if6->idev->dev;
7144 	struct mlxsw_sp_router *router;
7145 
7146 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
7147 	if (event == NETDEV_UP)
7148 		return NOTIFY_DONE;
7149 
7150 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
7151 	if (!inet6addr_work)
7152 		return NOTIFY_BAD;
7153 
7154 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
7155 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
7156 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
7157 	inet6addr_work->dev = dev;
7158 	inet6addr_work->event = event;
7159 	dev_hold(dev);
7160 	mlxsw_core_schedule_work(&inet6addr_work->work);
7161 
7162 	return NOTIFY_DONE;
7163 }
7164 
7165 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
7166 				   unsigned long event, void *ptr)
7167 {
7168 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
7169 	struct net_device *dev = i6vi->i6vi_dev->dev;
7170 	struct mlxsw_sp *mlxsw_sp;
7171 	struct mlxsw_sp_rif *rif;
7172 	int err = 0;
7173 
7174 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7175 	if (!mlxsw_sp)
7176 		return NOTIFY_DONE;
7177 
7178 	mutex_lock(&mlxsw_sp->router->lock);
7179 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7180 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7181 		goto out;
7182 
7183 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7184 						  i6vi->extack);
7185 	if (err)
7186 		goto out;
7187 
7188 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
7189 out:
7190 	mutex_unlock(&mlxsw_sp->router->lock);
7191 	return notifier_from_errno(err);
7192 }
7193 
7194 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7195 			     const char *mac, int mtu)
7196 {
7197 	char ritr_pl[MLXSW_REG_RITR_LEN];
7198 	int err;
7199 
7200 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7201 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7202 	if (err)
7203 		return err;
7204 
7205 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
7206 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
7207 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
7208 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7209 }
7210 
7211 static int
7212 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
7213 				  struct mlxsw_sp_rif *rif)
7214 {
7215 	struct net_device *dev = rif->dev;
7216 	u16 fid_index;
7217 	int err;
7218 
7219 	fid_index = mlxsw_sp_fid_index(rif->fid);
7220 
7221 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
7222 	if (err)
7223 		return err;
7224 
7225 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
7226 				dev->mtu);
7227 	if (err)
7228 		goto err_rif_edit;
7229 
7230 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
7231 	if (err)
7232 		goto err_rif_fdb_op;
7233 
7234 	if (rif->mtu != dev->mtu) {
7235 		struct mlxsw_sp_vr *vr;
7236 		int i;
7237 
7238 		/* The RIF is relevant only to its mr_table instance, as unlike
7239 		 * unicast routing, in multicast routing a RIF cannot be shared
7240 		 * between several multicast routing tables.
7241 		 */
7242 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
7243 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
7244 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
7245 						   rif, dev->mtu);
7246 	}
7247 
7248 	ether_addr_copy(rif->addr, dev->dev_addr);
7249 	rif->mtu = dev->mtu;
7250 
7251 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
7252 
7253 	return 0;
7254 
7255 err_rif_fdb_op:
7256 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
7257 err_rif_edit:
7258 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
7259 	return err;
7260 }
7261 
7262 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
7263 			    struct netdev_notifier_pre_changeaddr_info *info)
7264 {
7265 	struct netlink_ext_ack *extack;
7266 
7267 	extack = netdev_notifier_info_to_extack(&info->info);
7268 	return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
7269 						   info->dev_addr, extack);
7270 }
7271 
7272 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
7273 					 unsigned long event, void *ptr)
7274 {
7275 	struct mlxsw_sp *mlxsw_sp;
7276 	struct mlxsw_sp_rif *rif;
7277 	int err = 0;
7278 
7279 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7280 	if (!mlxsw_sp)
7281 		return 0;
7282 
7283 	mutex_lock(&mlxsw_sp->router->lock);
7284 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7285 	if (!rif)
7286 		goto out;
7287 
7288 	switch (event) {
7289 	case NETDEV_CHANGEMTU: /* fall through */
7290 	case NETDEV_CHANGEADDR:
7291 		err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
7292 		break;
7293 	case NETDEV_PRE_CHANGEADDR:
7294 		err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
7295 		break;
7296 	}
7297 
7298 out:
7299 	mutex_unlock(&mlxsw_sp->router->lock);
7300 	return err;
7301 }
7302 
7303 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
7304 				  struct net_device *l3_dev,
7305 				  struct netlink_ext_ack *extack)
7306 {
7307 	struct mlxsw_sp_rif *rif;
7308 
7309 	/* If netdev is already associated with a RIF, then we need to
7310 	 * destroy it and create a new one with the new virtual router ID.
7311 	 */
7312 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7313 	if (rif)
7314 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
7315 					  extack);
7316 
7317 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
7318 }
7319 
7320 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
7321 				    struct net_device *l3_dev)
7322 {
7323 	struct mlxsw_sp_rif *rif;
7324 
7325 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7326 	if (!rif)
7327 		return;
7328 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
7329 }
7330 
7331 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
7332 				 struct netdev_notifier_changeupper_info *info)
7333 {
7334 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
7335 	int err = 0;
7336 
7337 	/* We do not create a RIF for a macvlan, but only use it to
7338 	 * direct more MAC addresses to the router.
7339 	 */
7340 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
7341 		return 0;
7342 
7343 	mutex_lock(&mlxsw_sp->router->lock);
7344 	switch (event) {
7345 	case NETDEV_PRECHANGEUPPER:
7346 		break;
7347 	case NETDEV_CHANGEUPPER:
7348 		if (info->linking) {
7349 			struct netlink_ext_ack *extack;
7350 
7351 			extack = netdev_notifier_info_to_extack(&info->info);
7352 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
7353 		} else {
7354 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
7355 		}
7356 		break;
7357 	}
7358 	mutex_unlock(&mlxsw_sp->router->lock);
7359 
7360 	return err;
7361 }
7362 
7363 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
7364 {
7365 	struct mlxsw_sp_rif *rif = data;
7366 
7367 	if (!netif_is_macvlan(dev))
7368 		return 0;
7369 
7370 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
7371 				   mlxsw_sp_fid_index(rif->fid), false);
7372 }
7373 
7374 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
7375 {
7376 	if (!netif_is_macvlan_port(rif->dev))
7377 		return 0;
7378 
7379 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
7380 	return netdev_walk_all_upper_dev_rcu(rif->dev,
7381 					     __mlxsw_sp_rif_macvlan_flush, rif);
7382 }
7383 
7384 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
7385 				       const struct mlxsw_sp_rif_params *params)
7386 {
7387 	struct mlxsw_sp_rif_subport *rif_subport;
7388 
7389 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7390 	refcount_set(&rif_subport->ref_count, 1);
7391 	rif_subport->vid = params->vid;
7392 	rif_subport->lag = params->lag;
7393 	if (params->lag)
7394 		rif_subport->lag_id = params->lag_id;
7395 	else
7396 		rif_subport->system_port = params->system_port;
7397 }
7398 
7399 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
7400 {
7401 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7402 	struct mlxsw_sp_rif_subport *rif_subport;
7403 	char ritr_pl[MLXSW_REG_RITR_LEN];
7404 
7405 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7406 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7407 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
7408 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7409 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
7410 				  rif_subport->lag ? rif_subport->lag_id :
7411 						     rif_subport->system_port,
7412 				  rif_subport->vid);
7413 
7414 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7415 }
7416 
7417 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
7418 {
7419 	int err;
7420 
7421 	err = mlxsw_sp_rif_subport_op(rif, true);
7422 	if (err)
7423 		return err;
7424 
7425 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7426 				  mlxsw_sp_fid_index(rif->fid), true);
7427 	if (err)
7428 		goto err_rif_fdb_op;
7429 
7430 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7431 	return 0;
7432 
7433 err_rif_fdb_op:
7434 	mlxsw_sp_rif_subport_op(rif, false);
7435 	return err;
7436 }
7437 
7438 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
7439 {
7440 	struct mlxsw_sp_fid *fid = rif->fid;
7441 
7442 	mlxsw_sp_fid_rif_set(fid, NULL);
7443 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7444 			    mlxsw_sp_fid_index(fid), false);
7445 	mlxsw_sp_rif_macvlan_flush(rif);
7446 	mlxsw_sp_rif_subport_op(rif, false);
7447 }
7448 
7449 static struct mlxsw_sp_fid *
7450 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
7451 			     struct netlink_ext_ack *extack)
7452 {
7453 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
7454 }
7455 
7456 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
7457 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
7458 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
7459 	.setup			= mlxsw_sp_rif_subport_setup,
7460 	.configure		= mlxsw_sp_rif_subport_configure,
7461 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
7462 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
7463 };
7464 
7465 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
7466 				    enum mlxsw_reg_ritr_if_type type,
7467 				    u16 vid_fid, bool enable)
7468 {
7469 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7470 	char ritr_pl[MLXSW_REG_RITR_LEN];
7471 
7472 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7473 			    rif->dev->mtu);
7474 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7475 	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
7476 
7477 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7478 }
7479 
7480 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7481 {
7482 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
7483 }
7484 
7485 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7486 {
7487 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7488 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7489 	int err;
7490 
7491 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7492 				       true);
7493 	if (err)
7494 		return err;
7495 
7496 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7497 				     mlxsw_sp_router_port(mlxsw_sp), true);
7498 	if (err)
7499 		goto err_fid_mc_flood_set;
7500 
7501 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7502 				     mlxsw_sp_router_port(mlxsw_sp), true);
7503 	if (err)
7504 		goto err_fid_bc_flood_set;
7505 
7506 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7507 				  mlxsw_sp_fid_index(rif->fid), true);
7508 	if (err)
7509 		goto err_rif_fdb_op;
7510 
7511 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7512 	return 0;
7513 
7514 err_rif_fdb_op:
7515 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7516 			       mlxsw_sp_router_port(mlxsw_sp), false);
7517 err_fid_bc_flood_set:
7518 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7519 			       mlxsw_sp_router_port(mlxsw_sp), false);
7520 err_fid_mc_flood_set:
7521 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7522 	return err;
7523 }
7524 
7525 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7526 {
7527 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7528 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7529 	struct mlxsw_sp_fid *fid = rif->fid;
7530 
7531 	mlxsw_sp_fid_rif_set(fid, NULL);
7532 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7533 			    mlxsw_sp_fid_index(fid), false);
7534 	mlxsw_sp_rif_macvlan_flush(rif);
7535 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7536 			       mlxsw_sp_router_port(mlxsw_sp), false);
7537 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7538 			       mlxsw_sp_router_port(mlxsw_sp), false);
7539 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7540 }
7541 
7542 static struct mlxsw_sp_fid *
7543 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7544 			 struct netlink_ext_ack *extack)
7545 {
7546 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
7547 }
7548 
7549 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7550 {
7551 	struct switchdev_notifier_fdb_info info;
7552 	struct net_device *dev;
7553 
7554 	dev = br_fdb_find_port(rif->dev, mac, 0);
7555 	if (!dev)
7556 		return;
7557 
7558 	info.addr = mac;
7559 	info.vid = 0;
7560 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7561 				 NULL);
7562 }
7563 
7564 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7565 	.type			= MLXSW_SP_RIF_TYPE_FID,
7566 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7567 	.configure		= mlxsw_sp_rif_fid_configure,
7568 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
7569 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
7570 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
7571 };
7572 
7573 static struct mlxsw_sp_fid *
7574 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
7575 			  struct netlink_ext_ack *extack)
7576 {
7577 	struct net_device *br_dev;
7578 	u16 vid;
7579 	int err;
7580 
7581 	if (is_vlan_dev(rif->dev)) {
7582 		vid = vlan_dev_vlan_id(rif->dev);
7583 		br_dev = vlan_dev_real_dev(rif->dev);
7584 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
7585 			return ERR_PTR(-EINVAL);
7586 	} else {
7587 		err = br_vlan_get_pvid(rif->dev, &vid);
7588 		if (err < 0 || !vid) {
7589 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7590 			return ERR_PTR(-EINVAL);
7591 		}
7592 	}
7593 
7594 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
7595 }
7596 
7597 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7598 {
7599 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7600 	struct switchdev_notifier_fdb_info info;
7601 	struct net_device *br_dev;
7602 	struct net_device *dev;
7603 
7604 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7605 	dev = br_fdb_find_port(br_dev, mac, vid);
7606 	if (!dev)
7607 		return;
7608 
7609 	info.addr = mac;
7610 	info.vid = vid;
7611 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7612 				 NULL);
7613 }
7614 
7615 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
7616 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
7617 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7618 	.configure		= mlxsw_sp_rif_fid_configure,
7619 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
7620 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
7621 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
7622 };
7623 
7624 static struct mlxsw_sp_rif_ipip_lb *
7625 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7626 {
7627 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7628 }
7629 
7630 static void
7631 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7632 			   const struct mlxsw_sp_rif_params *params)
7633 {
7634 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7635 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
7636 
7637 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7638 				 common);
7639 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7640 	rif_lb->lb_config = params_lb->lb_config;
7641 }
7642 
7643 static int
7644 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7645 {
7646 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7647 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7648 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7649 	struct mlxsw_sp_vr *ul_vr;
7650 	int err;
7651 
7652 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
7653 	if (IS_ERR(ul_vr))
7654 		return PTR_ERR(ul_vr);
7655 
7656 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
7657 	if (err)
7658 		goto err_loopback_op;
7659 
7660 	lb_rif->ul_vr_id = ul_vr->id;
7661 	lb_rif->ul_rif_id = 0;
7662 	++ul_vr->rif_count;
7663 	return 0;
7664 
7665 err_loopback_op:
7666 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7667 	return err;
7668 }
7669 
7670 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7671 {
7672 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7673 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7674 	struct mlxsw_sp_vr *ul_vr;
7675 
7676 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7677 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
7678 
7679 	--ul_vr->rif_count;
7680 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7681 }
7682 
7683 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
7684 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
7685 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
7686 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
7687 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
7688 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
7689 };
7690 
7691 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
7692 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
7693 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
7694 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
7695 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
7696 };
7697 
7698 static int
7699 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
7700 {
7701 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7702 	char ritr_pl[MLXSW_REG_RITR_LEN];
7703 
7704 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
7705 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
7706 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
7707 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
7708 
7709 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7710 }
7711 
7712 static struct mlxsw_sp_rif *
7713 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
7714 		       struct netlink_ext_ack *extack)
7715 {
7716 	struct mlxsw_sp_rif *ul_rif;
7717 	u16 rif_index;
7718 	int err;
7719 
7720 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
7721 	if (err) {
7722 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
7723 		return ERR_PTR(err);
7724 	}
7725 
7726 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
7727 	if (!ul_rif)
7728 		return ERR_PTR(-ENOMEM);
7729 
7730 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
7731 	ul_rif->mlxsw_sp = mlxsw_sp;
7732 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
7733 	if (err)
7734 		goto ul_rif_op_err;
7735 
7736 	return ul_rif;
7737 
7738 ul_rif_op_err:
7739 	mlxsw_sp->router->rifs[rif_index] = NULL;
7740 	kfree(ul_rif);
7741 	return ERR_PTR(err);
7742 }
7743 
7744 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
7745 {
7746 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7747 
7748 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
7749 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
7750 	kfree(ul_rif);
7751 }
7752 
7753 static struct mlxsw_sp_rif *
7754 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
7755 		    struct netlink_ext_ack *extack)
7756 {
7757 	struct mlxsw_sp_vr *vr;
7758 	int err;
7759 
7760 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
7761 	if (IS_ERR(vr))
7762 		return ERR_CAST(vr);
7763 
7764 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
7765 		return vr->ul_rif;
7766 
7767 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
7768 	if (IS_ERR(vr->ul_rif)) {
7769 		err = PTR_ERR(vr->ul_rif);
7770 		goto err_ul_rif_create;
7771 	}
7772 
7773 	vr->rif_count++;
7774 	refcount_set(&vr->ul_rif_refcnt, 1);
7775 
7776 	return vr->ul_rif;
7777 
7778 err_ul_rif_create:
7779 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7780 	return ERR_PTR(err);
7781 }
7782 
7783 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
7784 {
7785 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7786 	struct mlxsw_sp_vr *vr;
7787 
7788 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
7789 
7790 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
7791 		return;
7792 
7793 	vr->rif_count--;
7794 	mlxsw_sp_ul_rif_destroy(ul_rif);
7795 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7796 }
7797 
7798 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
7799 			       u16 *ul_rif_index)
7800 {
7801 	struct mlxsw_sp_rif *ul_rif;
7802 	int err = 0;
7803 
7804 	mutex_lock(&mlxsw_sp->router->lock);
7805 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7806 	if (IS_ERR(ul_rif)) {
7807 		err = PTR_ERR(ul_rif);
7808 		goto out;
7809 	}
7810 	*ul_rif_index = ul_rif->rif_index;
7811 out:
7812 	mutex_unlock(&mlxsw_sp->router->lock);
7813 	return err;
7814 }
7815 
7816 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
7817 {
7818 	struct mlxsw_sp_rif *ul_rif;
7819 
7820 	mutex_lock(&mlxsw_sp->router->lock);
7821 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
7822 	if (WARN_ON(!ul_rif))
7823 		goto out;
7824 
7825 	mlxsw_sp_ul_rif_put(ul_rif);
7826 out:
7827 	mutex_unlock(&mlxsw_sp->router->lock);
7828 }
7829 
7830 static int
7831 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7832 {
7833 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7834 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7835 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7836 	struct mlxsw_sp_rif *ul_rif;
7837 	int err;
7838 
7839 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7840 	if (IS_ERR(ul_rif))
7841 		return PTR_ERR(ul_rif);
7842 
7843 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
7844 	if (err)
7845 		goto err_loopback_op;
7846 
7847 	lb_rif->ul_vr_id = 0;
7848 	lb_rif->ul_rif_id = ul_rif->rif_index;
7849 
7850 	return 0;
7851 
7852 err_loopback_op:
7853 	mlxsw_sp_ul_rif_put(ul_rif);
7854 	return err;
7855 }
7856 
7857 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7858 {
7859 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7860 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7861 	struct mlxsw_sp_rif *ul_rif;
7862 
7863 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
7864 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
7865 	mlxsw_sp_ul_rif_put(ul_rif);
7866 }
7867 
7868 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
7869 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
7870 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
7871 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
7872 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
7873 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
7874 };
7875 
7876 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
7877 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
7878 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
7879 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
7880 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
7881 };
7882 
7883 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7884 {
7885 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7886 
7887 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
7888 					 sizeof(struct mlxsw_sp_rif *),
7889 					 GFP_KERNEL);
7890 	if (!mlxsw_sp->router->rifs)
7891 		return -ENOMEM;
7892 
7893 	return 0;
7894 }
7895 
7896 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
7897 {
7898 	int i;
7899 
7900 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7901 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
7902 
7903 	kfree(mlxsw_sp->router->rifs);
7904 }
7905 
7906 static int
7907 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
7908 {
7909 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
7910 
7911 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
7912 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
7913 }
7914 
7915 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
7916 {
7917 	int err;
7918 
7919 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
7920 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
7921 
7922 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
7923 	if (err)
7924 		return err;
7925 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
7926 	if (err)
7927 		return err;
7928 
7929 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
7930 }
7931 
7932 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
7933 {
7934 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
7935 }
7936 
7937 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
7938 {
7939 	struct mlxsw_sp_router *router;
7940 
7941 	/* Flush pending FIB notifications and then flush the device's
7942 	 * table before requesting another dump. The FIB notification
7943 	 * block is unregistered, so no need to take RTNL.
7944 	 */
7945 	mlxsw_core_flush_owq();
7946 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7947 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
7948 }
7949 
7950 #ifdef CONFIG_IP_ROUTE_MULTIPATH
7951 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7952 {
7953 	mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7954 }
7955 
7956 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7957 {
7958 	mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7959 }
7960 
7961 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
7962 {
7963 	struct net *net = mlxsw_sp_net(mlxsw_sp);
7964 	bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
7965 
7966 	mlxsw_sp_mp_hash_header_set(recr2_pl,
7967 				    MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7968 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7969 	mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7970 	mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7971 	if (only_l3)
7972 		return;
7973 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7974 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7975 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7976 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7977 }
7978 
7979 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
7980 {
7981 	bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
7982 
7983 	mlxsw_sp_mp_hash_header_set(recr2_pl,
7984 				    MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7985 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7986 	mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7987 	mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
7988 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
7989 	if (only_l3) {
7990 		mlxsw_sp_mp_hash_field_set(recr2_pl,
7991 					   MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7992 	} else {
7993 		mlxsw_sp_mp_hash_header_set(recr2_pl,
7994 					    MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
7995 		mlxsw_sp_mp_hash_field_set(recr2_pl,
7996 					   MLXSW_REG_RECR2_TCP_UDP_SPORT);
7997 		mlxsw_sp_mp_hash_field_set(recr2_pl,
7998 					   MLXSW_REG_RECR2_TCP_UDP_DPORT);
7999 	}
8000 }
8001 
8002 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8003 {
8004 	char recr2_pl[MLXSW_REG_RECR2_LEN];
8005 	u32 seed;
8006 
8007 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
8008 	mlxsw_reg_recr2_pack(recr2_pl, seed);
8009 	mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
8010 	mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
8011 
8012 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
8013 }
8014 #else
8015 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8016 {
8017 	return 0;
8018 }
8019 #endif
8020 
8021 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
8022 {
8023 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
8024 	unsigned int i;
8025 
8026 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
8027 
8028 	/* HW is determining switch priority based on DSCP-bits, but the
8029 	 * kernel is still doing that based on the ToS. Since there's a
8030 	 * mismatch in bits we need to make sure to translate the right
8031 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
8032 	 */
8033 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
8034 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
8035 
8036 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
8037 }
8038 
8039 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
8040 {
8041 	struct net *net = mlxsw_sp_net(mlxsw_sp);
8042 	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
8043 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
8044 	u64 max_rifs;
8045 	int err;
8046 
8047 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
8048 		return -EIO;
8049 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8050 
8051 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
8052 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
8053 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
8054 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8055 	if (err)
8056 		return err;
8057 	return 0;
8058 }
8059 
8060 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8061 {
8062 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
8063 
8064 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
8065 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8066 }
8067 
8068 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
8069 			 struct netlink_ext_ack *extack)
8070 {
8071 	struct mlxsw_sp_router *router;
8072 	int err;
8073 
8074 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
8075 	if (!router)
8076 		return -ENOMEM;
8077 	mutex_init(&router->lock);
8078 	mlxsw_sp->router = router;
8079 	router->mlxsw_sp = mlxsw_sp;
8080 
8081 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
8082 	err = register_inetaddr_notifier(&router->inetaddr_nb);
8083 	if (err)
8084 		goto err_register_inetaddr_notifier;
8085 
8086 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
8087 	err = register_inet6addr_notifier(&router->inet6addr_nb);
8088 	if (err)
8089 		goto err_register_inet6addr_notifier;
8090 
8091 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
8092 	err = __mlxsw_sp_router_init(mlxsw_sp);
8093 	if (err)
8094 		goto err_router_init;
8095 
8096 	err = mlxsw_sp_rifs_init(mlxsw_sp);
8097 	if (err)
8098 		goto err_rifs_init;
8099 
8100 	err = mlxsw_sp_ipips_init(mlxsw_sp);
8101 	if (err)
8102 		goto err_ipips_init;
8103 
8104 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
8105 			      &mlxsw_sp_nexthop_ht_params);
8106 	if (err)
8107 		goto err_nexthop_ht_init;
8108 
8109 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
8110 			      &mlxsw_sp_nexthop_group_ht_params);
8111 	if (err)
8112 		goto err_nexthop_group_ht_init;
8113 
8114 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
8115 	err = mlxsw_sp_lpm_init(mlxsw_sp);
8116 	if (err)
8117 		goto err_lpm_init;
8118 
8119 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
8120 	if (err)
8121 		goto err_mr_init;
8122 
8123 	err = mlxsw_sp_vrs_init(mlxsw_sp);
8124 	if (err)
8125 		goto err_vrs_init;
8126 
8127 	err = mlxsw_sp_neigh_init(mlxsw_sp);
8128 	if (err)
8129 		goto err_neigh_init;
8130 
8131 	mlxsw_sp->router->netevent_nb.notifier_call =
8132 		mlxsw_sp_router_netevent_event;
8133 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8134 	if (err)
8135 		goto err_register_netevent_notifier;
8136 
8137 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
8138 	if (err)
8139 		goto err_mp_hash_init;
8140 
8141 	err = mlxsw_sp_dscp_init(mlxsw_sp);
8142 	if (err)
8143 		goto err_dscp_init;
8144 
8145 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
8146 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8147 				    &mlxsw_sp->router->fib_nb,
8148 				    mlxsw_sp_router_fib_dump_flush, extack);
8149 	if (err)
8150 		goto err_register_fib_notifier;
8151 
8152 	return 0;
8153 
8154 err_register_fib_notifier:
8155 err_dscp_init:
8156 err_mp_hash_init:
8157 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8158 err_register_netevent_notifier:
8159 	mlxsw_sp_neigh_fini(mlxsw_sp);
8160 err_neigh_init:
8161 	mlxsw_sp_vrs_fini(mlxsw_sp);
8162 err_vrs_init:
8163 	mlxsw_sp_mr_fini(mlxsw_sp);
8164 err_mr_init:
8165 	mlxsw_sp_lpm_fini(mlxsw_sp);
8166 err_lpm_init:
8167 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8168 err_nexthop_group_ht_init:
8169 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8170 err_nexthop_ht_init:
8171 	mlxsw_sp_ipips_fini(mlxsw_sp);
8172 err_ipips_init:
8173 	mlxsw_sp_rifs_fini(mlxsw_sp);
8174 err_rifs_init:
8175 	__mlxsw_sp_router_fini(mlxsw_sp);
8176 err_router_init:
8177 	unregister_inet6addr_notifier(&router->inet6addr_nb);
8178 err_register_inet6addr_notifier:
8179 	unregister_inetaddr_notifier(&router->inetaddr_nb);
8180 err_register_inetaddr_notifier:
8181 	mutex_destroy(&mlxsw_sp->router->lock);
8182 	kfree(mlxsw_sp->router);
8183 	return err;
8184 }
8185 
8186 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8187 {
8188 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8189 				&mlxsw_sp->router->fib_nb);
8190 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8191 	mlxsw_sp_neigh_fini(mlxsw_sp);
8192 	mlxsw_sp_vrs_fini(mlxsw_sp);
8193 	mlxsw_sp_mr_fini(mlxsw_sp);
8194 	mlxsw_sp_lpm_fini(mlxsw_sp);
8195 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8196 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8197 	mlxsw_sp_ipips_fini(mlxsw_sp);
8198 	mlxsw_sp_rifs_fini(mlxsw_sp);
8199 	__mlxsw_sp_router_fini(mlxsw_sp);
8200 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
8201 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
8202 	mutex_destroy(&mlxsw_sp->router->lock);
8203 	kfree(mlxsw_sp->router);
8204 }
8205