1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
32 #include <net/ipv6.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
35 
36 #include "spectrum.h"
37 #include "core.h"
38 #include "reg.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
46 
47 struct mlxsw_sp_fib;
48 struct mlxsw_sp_vr;
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
51 
52 struct mlxsw_sp_rif {
53 	struct list_head nexthop_list;
54 	struct list_head neigh_list;
55 	struct net_device *dev; /* NULL for underlay RIF */
56 	struct mlxsw_sp_fid *fid;
57 	unsigned char addr[ETH_ALEN];
58 	int mtu;
59 	u16 rif_index;
60 	u16 vr_id;
61 	const struct mlxsw_sp_rif_ops *ops;
62 	struct mlxsw_sp *mlxsw_sp;
63 
64 	unsigned int counter_ingress;
65 	bool counter_ingress_valid;
66 	unsigned int counter_egress;
67 	bool counter_egress_valid;
68 };
69 
70 struct mlxsw_sp_rif_params {
71 	struct net_device *dev;
72 	union {
73 		u16 system_port;
74 		u16 lag_id;
75 	};
76 	u16 vid;
77 	bool lag;
78 };
79 
80 struct mlxsw_sp_rif_subport {
81 	struct mlxsw_sp_rif common;
82 	refcount_t ref_count;
83 	union {
84 		u16 system_port;
85 		u16 lag_id;
86 	};
87 	u16 vid;
88 	bool lag;
89 };
90 
91 struct mlxsw_sp_rif_ipip_lb {
92 	struct mlxsw_sp_rif common;
93 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 	u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 	u16 ul_rif_id; /* Reserved for Spectrum. */
96 };
97 
98 struct mlxsw_sp_rif_params_ipip_lb {
99 	struct mlxsw_sp_rif_params common;
100 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
101 };
102 
103 struct mlxsw_sp_rif_ops {
104 	enum mlxsw_sp_rif_type type;
105 	size_t rif_size;
106 
107 	void (*setup)(struct mlxsw_sp_rif *rif,
108 		      const struct mlxsw_sp_rif_params *params);
109 	int (*configure)(struct mlxsw_sp_rif *rif);
110 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 					 struct netlink_ext_ack *extack);
113 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
114 };
115 
116 struct mlxsw_sp_router_ops {
117 	int (*init)(struct mlxsw_sp *mlxsw_sp);
118 	int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
119 };
120 
121 static struct mlxsw_sp_rif *
122 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
123 			 const struct net_device *dev);
124 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
125 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
126 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
127 				  struct mlxsw_sp_lpm_tree *lpm_tree);
128 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
129 				     const struct mlxsw_sp_fib *fib,
130 				     u8 tree_id);
131 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
132 				       const struct mlxsw_sp_fib *fib);
133 
134 static unsigned int *
135 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
136 			   enum mlxsw_sp_rif_counter_dir dir)
137 {
138 	switch (dir) {
139 	case MLXSW_SP_RIF_COUNTER_EGRESS:
140 		return &rif->counter_egress;
141 	case MLXSW_SP_RIF_COUNTER_INGRESS:
142 		return &rif->counter_ingress;
143 	}
144 	return NULL;
145 }
146 
147 static bool
148 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
149 			       enum mlxsw_sp_rif_counter_dir dir)
150 {
151 	switch (dir) {
152 	case MLXSW_SP_RIF_COUNTER_EGRESS:
153 		return rif->counter_egress_valid;
154 	case MLXSW_SP_RIF_COUNTER_INGRESS:
155 		return rif->counter_ingress_valid;
156 	}
157 	return false;
158 }
159 
160 static void
161 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
162 			       enum mlxsw_sp_rif_counter_dir dir,
163 			       bool valid)
164 {
165 	switch (dir) {
166 	case MLXSW_SP_RIF_COUNTER_EGRESS:
167 		rif->counter_egress_valid = valid;
168 		break;
169 	case MLXSW_SP_RIF_COUNTER_INGRESS:
170 		rif->counter_ingress_valid = valid;
171 		break;
172 	}
173 }
174 
175 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
176 				     unsigned int counter_index, bool enable,
177 				     enum mlxsw_sp_rif_counter_dir dir)
178 {
179 	char ritr_pl[MLXSW_REG_RITR_LEN];
180 	bool is_egress = false;
181 	int err;
182 
183 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
184 		is_egress = true;
185 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
186 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
187 	if (err)
188 		return err;
189 
190 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
191 				    is_egress);
192 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
193 }
194 
195 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
196 				   struct mlxsw_sp_rif *rif,
197 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
198 {
199 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
200 	unsigned int *p_counter_index;
201 	bool valid;
202 	int err;
203 
204 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
205 	if (!valid)
206 		return -EINVAL;
207 
208 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
209 	if (!p_counter_index)
210 		return -EINVAL;
211 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
212 			     MLXSW_REG_RICNT_OPCODE_NOP);
213 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
214 	if (err)
215 		return err;
216 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
217 	return 0;
218 }
219 
220 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
221 				      unsigned int counter_index)
222 {
223 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
224 
225 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
226 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
227 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
228 }
229 
230 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
231 			       struct mlxsw_sp_rif *rif,
232 			       enum mlxsw_sp_rif_counter_dir dir)
233 {
234 	unsigned int *p_counter_index;
235 	int err;
236 
237 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
238 	if (!p_counter_index)
239 		return -EINVAL;
240 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
241 				     p_counter_index);
242 	if (err)
243 		return err;
244 
245 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
246 	if (err)
247 		goto err_counter_clear;
248 
249 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
250 					*p_counter_index, true, dir);
251 	if (err)
252 		goto err_counter_edit;
253 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
254 	return 0;
255 
256 err_counter_edit:
257 err_counter_clear:
258 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
259 			      *p_counter_index);
260 	return err;
261 }
262 
263 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
264 			       struct mlxsw_sp_rif *rif,
265 			       enum mlxsw_sp_rif_counter_dir dir)
266 {
267 	unsigned int *p_counter_index;
268 
269 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
270 		return;
271 
272 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
273 	if (WARN_ON(!p_counter_index))
274 		return;
275 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
276 				  *p_counter_index, false, dir);
277 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
278 			      *p_counter_index);
279 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
280 }
281 
282 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
283 {
284 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
285 	struct devlink *devlink;
286 
287 	devlink = priv_to_devlink(mlxsw_sp->core);
288 	if (!devlink_dpipe_table_counter_enabled(devlink,
289 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
290 		return;
291 	mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
292 }
293 
294 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
295 {
296 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
297 
298 	mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
299 }
300 
301 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
302 
303 struct mlxsw_sp_prefix_usage {
304 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
305 };
306 
307 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
308 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
309 
310 static bool
311 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
312 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
313 {
314 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
315 }
316 
317 static void
318 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
319 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
320 {
321 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
322 }
323 
324 static void
325 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
326 			  unsigned char prefix_len)
327 {
328 	set_bit(prefix_len, prefix_usage->b);
329 }
330 
331 static void
332 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
333 			    unsigned char prefix_len)
334 {
335 	clear_bit(prefix_len, prefix_usage->b);
336 }
337 
338 struct mlxsw_sp_fib_key {
339 	unsigned char addr[sizeof(struct in6_addr)];
340 	unsigned char prefix_len;
341 };
342 
343 enum mlxsw_sp_fib_entry_type {
344 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
345 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
346 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
347 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
348 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
349 
350 	/* This is a special case of local delivery, where a packet should be
351 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
352 	 * because that's a type of next hop, not of FIB entry. (There can be
353 	 * several next hops in a REMOTE entry, and some of them may be
354 	 * encapsulating entries.)
355 	 */
356 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
357 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
358 };
359 
360 struct mlxsw_sp_nexthop_group_info;
361 struct mlxsw_sp_nexthop_group;
362 struct mlxsw_sp_fib_entry;
363 
364 struct mlxsw_sp_fib_node {
365 	struct mlxsw_sp_fib_entry *fib_entry;
366 	struct list_head list;
367 	struct rhash_head ht_node;
368 	struct mlxsw_sp_fib *fib;
369 	struct mlxsw_sp_fib_key key;
370 };
371 
372 struct mlxsw_sp_fib_entry_decap {
373 	struct mlxsw_sp_ipip_entry *ipip_entry;
374 	u32 tunnel_index;
375 };
376 
377 static struct mlxsw_sp_fib_entry_priv *
378 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
379 {
380 	struct mlxsw_sp_fib_entry_priv *priv;
381 
382 	if (!ll_ops->fib_entry_priv_size)
383 		/* No need to have priv */
384 		return NULL;
385 
386 	priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
387 	if (!priv)
388 		return ERR_PTR(-ENOMEM);
389 	refcount_set(&priv->refcnt, 1);
390 	return priv;
391 }
392 
393 static void
394 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
395 {
396 	kfree(priv);
397 }
398 
399 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
400 {
401 	refcount_inc(&priv->refcnt);
402 }
403 
404 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
405 {
406 	if (!priv || !refcount_dec_and_test(&priv->refcnt))
407 		return;
408 	mlxsw_sp_fib_entry_priv_destroy(priv);
409 }
410 
411 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
412 						struct mlxsw_sp_fib_entry_priv *priv)
413 {
414 	if (!priv)
415 		return;
416 	mlxsw_sp_fib_entry_priv_hold(priv);
417 	list_add(&priv->list, &op_ctx->fib_entry_priv_list);
418 }
419 
420 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
421 {
422 	struct mlxsw_sp_fib_entry_priv *priv, *tmp;
423 
424 	list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
425 		mlxsw_sp_fib_entry_priv_put(priv);
426 	INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
427 }
428 
429 struct mlxsw_sp_fib_entry {
430 	struct mlxsw_sp_fib_node *fib_node;
431 	enum mlxsw_sp_fib_entry_type type;
432 	struct list_head nexthop_group_node;
433 	struct mlxsw_sp_nexthop_group *nh_group;
434 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
435 	struct mlxsw_sp_fib_entry_priv *priv;
436 };
437 
438 struct mlxsw_sp_fib4_entry {
439 	struct mlxsw_sp_fib_entry common;
440 	struct fib_info *fi;
441 	u32 tb_id;
442 	u8 tos;
443 	u8 type;
444 };
445 
446 struct mlxsw_sp_fib6_entry {
447 	struct mlxsw_sp_fib_entry common;
448 	struct list_head rt6_list;
449 	unsigned int nrt6;
450 };
451 
452 struct mlxsw_sp_rt6 {
453 	struct list_head list;
454 	struct fib6_info *rt;
455 };
456 
457 struct mlxsw_sp_lpm_tree {
458 	u8 id; /* tree ID */
459 	unsigned int ref_count;
460 	enum mlxsw_sp_l3proto proto;
461 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
462 	struct mlxsw_sp_prefix_usage prefix_usage;
463 };
464 
465 struct mlxsw_sp_fib {
466 	struct rhashtable ht;
467 	struct list_head node_list;
468 	struct mlxsw_sp_vr *vr;
469 	struct mlxsw_sp_lpm_tree *lpm_tree;
470 	enum mlxsw_sp_l3proto proto;
471 	const struct mlxsw_sp_router_ll_ops *ll_ops;
472 };
473 
474 struct mlxsw_sp_vr {
475 	u16 id; /* virtual router ID */
476 	u32 tb_id; /* kernel fib table id */
477 	unsigned int rif_count;
478 	struct mlxsw_sp_fib *fib4;
479 	struct mlxsw_sp_fib *fib6;
480 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
481 	struct mlxsw_sp_rif *ul_rif;
482 	refcount_t ul_rif_refcnt;
483 };
484 
485 static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
486 					 enum mlxsw_sp_l3proto proto)
487 {
488 	return 0;
489 }
490 
491 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
492 {
493 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
494 			       xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
495 }
496 
497 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
498 {
499 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
500 			       xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
501 }
502 
503 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
504 {
505 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
506 			       xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
507 }
508 
509 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
510 
511 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
512 						struct mlxsw_sp_vr *vr,
513 						enum mlxsw_sp_l3proto proto)
514 {
515 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
516 	struct mlxsw_sp_lpm_tree *lpm_tree;
517 	struct mlxsw_sp_fib *fib;
518 	int err;
519 
520 	err = ll_ops->init(mlxsw_sp, vr->id, proto);
521 	if (err)
522 		return ERR_PTR(err);
523 
524 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
525 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
526 	if (!fib)
527 		return ERR_PTR(-ENOMEM);
528 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
529 	if (err)
530 		goto err_rhashtable_init;
531 	INIT_LIST_HEAD(&fib->node_list);
532 	fib->proto = proto;
533 	fib->vr = vr;
534 	fib->lpm_tree = lpm_tree;
535 	fib->ll_ops = ll_ops;
536 	mlxsw_sp_lpm_tree_hold(lpm_tree);
537 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
538 	if (err)
539 		goto err_lpm_tree_bind;
540 	return fib;
541 
542 err_lpm_tree_bind:
543 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
544 err_rhashtable_init:
545 	kfree(fib);
546 	return ERR_PTR(err);
547 }
548 
549 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
550 				 struct mlxsw_sp_fib *fib)
551 {
552 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
553 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
554 	WARN_ON(!list_empty(&fib->node_list));
555 	rhashtable_destroy(&fib->ht);
556 	kfree(fib);
557 }
558 
559 static struct mlxsw_sp_lpm_tree *
560 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
561 {
562 	static struct mlxsw_sp_lpm_tree *lpm_tree;
563 	int i;
564 
565 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
566 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
567 		if (lpm_tree->ref_count == 0)
568 			return lpm_tree;
569 	}
570 	return NULL;
571 }
572 
573 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
574 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
575 				   struct mlxsw_sp_lpm_tree *lpm_tree)
576 {
577 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
578 
579 	mlxsw_reg_xralta_pack(xralta_pl, true,
580 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
581 			      lpm_tree->id);
582 	return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
583 }
584 
585 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
586 				   const struct mlxsw_sp_router_ll_ops *ll_ops,
587 				   struct mlxsw_sp_lpm_tree *lpm_tree)
588 {
589 	char xralta_pl[MLXSW_REG_XRALTA_LEN];
590 
591 	mlxsw_reg_xralta_pack(xralta_pl, false,
592 			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
593 			      lpm_tree->id);
594 	ll_ops->ralta_write(mlxsw_sp, xralta_pl);
595 }
596 
597 static int
598 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
599 				  const struct mlxsw_sp_router_ll_ops *ll_ops,
600 				  struct mlxsw_sp_prefix_usage *prefix_usage,
601 				  struct mlxsw_sp_lpm_tree *lpm_tree)
602 {
603 	char xralst_pl[MLXSW_REG_XRALST_LEN];
604 	u8 root_bin = 0;
605 	u8 prefix;
606 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
607 
608 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
609 		root_bin = prefix;
610 
611 	mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
612 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
613 		if (prefix == 0)
614 			continue;
615 		mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
616 					  MLXSW_REG_RALST_BIN_NO_CHILD);
617 		last_prefix = prefix;
618 	}
619 	return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
620 }
621 
622 static struct mlxsw_sp_lpm_tree *
623 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
624 			 const struct mlxsw_sp_router_ll_ops *ll_ops,
625 			 struct mlxsw_sp_prefix_usage *prefix_usage,
626 			 enum mlxsw_sp_l3proto proto)
627 {
628 	struct mlxsw_sp_lpm_tree *lpm_tree;
629 	int err;
630 
631 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
632 	if (!lpm_tree)
633 		return ERR_PTR(-EBUSY);
634 	lpm_tree->proto = proto;
635 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
636 	if (err)
637 		return ERR_PTR(err);
638 
639 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
640 	if (err)
641 		goto err_left_struct_set;
642 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
643 	       sizeof(lpm_tree->prefix_usage));
644 	memset(&lpm_tree->prefix_ref_count, 0,
645 	       sizeof(lpm_tree->prefix_ref_count));
646 	lpm_tree->ref_count = 1;
647 	return lpm_tree;
648 
649 err_left_struct_set:
650 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
651 	return ERR_PTR(err);
652 }
653 
654 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
655 				      const struct mlxsw_sp_router_ll_ops *ll_ops,
656 				      struct mlxsw_sp_lpm_tree *lpm_tree)
657 {
658 	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
659 }
660 
661 static struct mlxsw_sp_lpm_tree *
662 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
663 		      struct mlxsw_sp_prefix_usage *prefix_usage,
664 		      enum mlxsw_sp_l3proto proto)
665 {
666 	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
667 	struct mlxsw_sp_lpm_tree *lpm_tree;
668 	int i;
669 
670 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
671 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
672 		if (lpm_tree->ref_count != 0 &&
673 		    lpm_tree->proto == proto &&
674 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
675 					     prefix_usage)) {
676 			mlxsw_sp_lpm_tree_hold(lpm_tree);
677 			return lpm_tree;
678 		}
679 	}
680 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
681 }
682 
683 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
684 {
685 	lpm_tree->ref_count++;
686 }
687 
688 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
689 				  struct mlxsw_sp_lpm_tree *lpm_tree)
690 {
691 	const struct mlxsw_sp_router_ll_ops *ll_ops =
692 				mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
693 
694 	if (--lpm_tree->ref_count == 0)
695 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
696 }
697 
698 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
699 
700 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
701 {
702 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
703 	struct mlxsw_sp_lpm_tree *lpm_tree;
704 	u64 max_trees;
705 	int err, i;
706 
707 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
708 		return -EIO;
709 
710 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
711 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
712 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
713 					     sizeof(struct mlxsw_sp_lpm_tree),
714 					     GFP_KERNEL);
715 	if (!mlxsw_sp->router->lpm.trees)
716 		return -ENOMEM;
717 
718 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
719 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
720 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
721 	}
722 
723 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
724 					 MLXSW_SP_L3_PROTO_IPV4);
725 	if (IS_ERR(lpm_tree)) {
726 		err = PTR_ERR(lpm_tree);
727 		goto err_ipv4_tree_get;
728 	}
729 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
730 
731 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
732 					 MLXSW_SP_L3_PROTO_IPV6);
733 	if (IS_ERR(lpm_tree)) {
734 		err = PTR_ERR(lpm_tree);
735 		goto err_ipv6_tree_get;
736 	}
737 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
738 
739 	return 0;
740 
741 err_ipv6_tree_get:
742 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
743 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
744 err_ipv4_tree_get:
745 	kfree(mlxsw_sp->router->lpm.trees);
746 	return err;
747 }
748 
749 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
750 {
751 	struct mlxsw_sp_lpm_tree *lpm_tree;
752 
753 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
754 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
755 
756 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
757 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
758 
759 	kfree(mlxsw_sp->router->lpm.trees);
760 }
761 
762 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
763 {
764 	return !!vr->fib4 || !!vr->fib6 ||
765 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
766 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
767 }
768 
769 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
770 {
771 	struct mlxsw_sp_vr *vr;
772 	int i;
773 
774 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
775 		vr = &mlxsw_sp->router->vrs[i];
776 		if (!mlxsw_sp_vr_is_used(vr))
777 			return vr;
778 	}
779 	return NULL;
780 }
781 
782 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
783 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
784 {
785 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
786 
787 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
788 			      (enum mlxsw_reg_ralxx_protocol) fib->proto,
789 			      tree_id);
790 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
791 }
792 
793 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
794 				       const struct mlxsw_sp_fib *fib)
795 {
796 	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
797 
798 	/* Bind to tree 0 which is default */
799 	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
800 			      (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
801 	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
802 }
803 
804 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
805 {
806 	/* For our purpose, squash main, default and local tables into one */
807 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
808 		tb_id = RT_TABLE_MAIN;
809 	return tb_id;
810 }
811 
812 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
813 					    u32 tb_id)
814 {
815 	struct mlxsw_sp_vr *vr;
816 	int i;
817 
818 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
819 
820 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
821 		vr = &mlxsw_sp->router->vrs[i];
822 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
823 			return vr;
824 	}
825 	return NULL;
826 }
827 
828 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
829 				u16 *vr_id)
830 {
831 	struct mlxsw_sp_vr *vr;
832 	int err = 0;
833 
834 	mutex_lock(&mlxsw_sp->router->lock);
835 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
836 	if (!vr) {
837 		err = -ESRCH;
838 		goto out;
839 	}
840 	*vr_id = vr->id;
841 out:
842 	mutex_unlock(&mlxsw_sp->router->lock);
843 	return err;
844 }
845 
846 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
847 					    enum mlxsw_sp_l3proto proto)
848 {
849 	switch (proto) {
850 	case MLXSW_SP_L3_PROTO_IPV4:
851 		return vr->fib4;
852 	case MLXSW_SP_L3_PROTO_IPV6:
853 		return vr->fib6;
854 	}
855 	return NULL;
856 }
857 
858 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
859 					      u32 tb_id,
860 					      struct netlink_ext_ack *extack)
861 {
862 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
863 	struct mlxsw_sp_fib *fib4;
864 	struct mlxsw_sp_fib *fib6;
865 	struct mlxsw_sp_vr *vr;
866 	int err;
867 
868 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
869 	if (!vr) {
870 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
871 		return ERR_PTR(-EBUSY);
872 	}
873 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
874 	if (IS_ERR(fib4))
875 		return ERR_CAST(fib4);
876 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
877 	if (IS_ERR(fib6)) {
878 		err = PTR_ERR(fib6);
879 		goto err_fib6_create;
880 	}
881 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
882 					     MLXSW_SP_L3_PROTO_IPV4);
883 	if (IS_ERR(mr4_table)) {
884 		err = PTR_ERR(mr4_table);
885 		goto err_mr4_table_create;
886 	}
887 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
888 					     MLXSW_SP_L3_PROTO_IPV6);
889 	if (IS_ERR(mr6_table)) {
890 		err = PTR_ERR(mr6_table);
891 		goto err_mr6_table_create;
892 	}
893 
894 	vr->fib4 = fib4;
895 	vr->fib6 = fib6;
896 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
897 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
898 	vr->tb_id = tb_id;
899 	return vr;
900 
901 err_mr6_table_create:
902 	mlxsw_sp_mr_table_destroy(mr4_table);
903 err_mr4_table_create:
904 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
905 err_fib6_create:
906 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
907 	return ERR_PTR(err);
908 }
909 
910 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
911 				struct mlxsw_sp_vr *vr)
912 {
913 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
914 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
915 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
916 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
917 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
918 	vr->fib6 = NULL;
919 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
920 	vr->fib4 = NULL;
921 }
922 
923 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
924 					   struct netlink_ext_ack *extack)
925 {
926 	struct mlxsw_sp_vr *vr;
927 
928 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
929 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
930 	if (!vr)
931 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
932 	return vr;
933 }
934 
935 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
936 {
937 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
938 	    list_empty(&vr->fib6->node_list) &&
939 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
940 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
941 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
942 }
943 
944 static bool
945 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
946 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
947 {
948 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
949 
950 	if (!mlxsw_sp_vr_is_used(vr))
951 		return false;
952 	if (fib->lpm_tree->id == tree_id)
953 		return true;
954 	return false;
955 }
956 
957 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
958 					struct mlxsw_sp_fib *fib,
959 					struct mlxsw_sp_lpm_tree *new_tree)
960 {
961 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
962 	int err;
963 
964 	fib->lpm_tree = new_tree;
965 	mlxsw_sp_lpm_tree_hold(new_tree);
966 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
967 	if (err)
968 		goto err_tree_bind;
969 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
970 	return 0;
971 
972 err_tree_bind:
973 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
974 	fib->lpm_tree = old_tree;
975 	return err;
976 }
977 
978 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
979 					 struct mlxsw_sp_fib *fib,
980 					 struct mlxsw_sp_lpm_tree *new_tree)
981 {
982 	enum mlxsw_sp_l3proto proto = fib->proto;
983 	struct mlxsw_sp_lpm_tree *old_tree;
984 	u8 old_id, new_id = new_tree->id;
985 	struct mlxsw_sp_vr *vr;
986 	int i, err;
987 
988 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
989 	old_id = old_tree->id;
990 
991 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
992 		vr = &mlxsw_sp->router->vrs[i];
993 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
994 			continue;
995 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
996 						   mlxsw_sp_vr_fib(vr, proto),
997 						   new_tree);
998 		if (err)
999 			goto err_tree_replace;
1000 	}
1001 
1002 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1003 	       sizeof(new_tree->prefix_ref_count));
1004 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1005 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1006 
1007 	return 0;
1008 
1009 err_tree_replace:
1010 	for (i--; i >= 0; i--) {
1011 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1012 			continue;
1013 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1014 					     mlxsw_sp_vr_fib(vr, proto),
1015 					     old_tree);
1016 	}
1017 	return err;
1018 }
1019 
1020 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1021 {
1022 	struct mlxsw_sp_vr *vr;
1023 	u64 max_vrs;
1024 	int i;
1025 
1026 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1027 		return -EIO;
1028 
1029 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1030 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1031 					GFP_KERNEL);
1032 	if (!mlxsw_sp->router->vrs)
1033 		return -ENOMEM;
1034 
1035 	for (i = 0; i < max_vrs; i++) {
1036 		vr = &mlxsw_sp->router->vrs[i];
1037 		vr->id = i;
1038 	}
1039 
1040 	return 0;
1041 }
1042 
1043 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1044 
1045 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1046 {
1047 	/* At this stage we're guaranteed not to have new incoming
1048 	 * FIB notifications and the work queue is free from FIBs
1049 	 * sitting on top of mlxsw netdevs. However, we can still
1050 	 * have other FIBs queued. Flush the queue before flushing
1051 	 * the device's tables. No need for locks, as we're the only
1052 	 * writer.
1053 	 */
1054 	mlxsw_core_flush_owq();
1055 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1056 	kfree(mlxsw_sp->router->vrs);
1057 }
1058 
1059 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1060 {
1061 	struct net_device *d;
1062 	u32 tb_id;
1063 
1064 	rcu_read_lock();
1065 	d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1066 	if (d)
1067 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1068 	else
1069 		tb_id = RT_TABLE_MAIN;
1070 	rcu_read_unlock();
1071 
1072 	return tb_id;
1073 }
1074 
1075 static struct mlxsw_sp_rif *
1076 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1077 		    const struct mlxsw_sp_rif_params *params,
1078 		    struct netlink_ext_ack *extack);
1079 
1080 static struct mlxsw_sp_rif_ipip_lb *
1081 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1082 				enum mlxsw_sp_ipip_type ipipt,
1083 				struct net_device *ol_dev,
1084 				struct netlink_ext_ack *extack)
1085 {
1086 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1087 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1088 	struct mlxsw_sp_rif *rif;
1089 
1090 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1091 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1092 		.common.dev = ol_dev,
1093 		.common.lag = false,
1094 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1095 	};
1096 
1097 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1098 	if (IS_ERR(rif))
1099 		return ERR_CAST(rif);
1100 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1101 }
1102 
1103 static struct mlxsw_sp_ipip_entry *
1104 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1105 			  enum mlxsw_sp_ipip_type ipipt,
1106 			  struct net_device *ol_dev)
1107 {
1108 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1109 	struct mlxsw_sp_ipip_entry *ipip_entry;
1110 	struct mlxsw_sp_ipip_entry *ret = NULL;
1111 	int err;
1112 
1113 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1114 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1115 	if (!ipip_entry)
1116 		return ERR_PTR(-ENOMEM);
1117 
1118 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1119 							    ol_dev, NULL);
1120 	if (IS_ERR(ipip_entry->ol_lb)) {
1121 		ret = ERR_CAST(ipip_entry->ol_lb);
1122 		goto err_ol_ipip_lb_create;
1123 	}
1124 
1125 	ipip_entry->ipipt = ipipt;
1126 	ipip_entry->ol_dev = ol_dev;
1127 	ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1128 
1129 	err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1130 	if (err) {
1131 		ret = ERR_PTR(err);
1132 		goto err_rem_ip_addr_set;
1133 	}
1134 
1135 	return ipip_entry;
1136 
1137 err_rem_ip_addr_set:
1138 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1139 err_ol_ipip_lb_create:
1140 	kfree(ipip_entry);
1141 	return ret;
1142 }
1143 
1144 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1145 					struct mlxsw_sp_ipip_entry *ipip_entry)
1146 {
1147 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1148 		mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1149 
1150 	ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1151 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1152 	kfree(ipip_entry);
1153 }
1154 
1155 static bool
1156 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1157 				  const enum mlxsw_sp_l3proto ul_proto,
1158 				  union mlxsw_sp_l3addr saddr,
1159 				  u32 ul_tb_id,
1160 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1161 {
1162 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1163 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1164 	union mlxsw_sp_l3addr tun_saddr;
1165 
1166 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1167 		return false;
1168 
1169 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1170 	return tun_ul_tb_id == ul_tb_id &&
1171 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1172 }
1173 
1174 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1175 						 enum mlxsw_sp_ipip_type ipipt)
1176 {
1177 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1178 
1179 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1180 
1181 	/* Not all tunnels require to increase the default pasing depth
1182 	 * (96 bytes).
1183 	 */
1184 	if (ipip_ops->inc_parsing_depth)
1185 		return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1186 
1187 	return 0;
1188 }
1189 
1190 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1191 						  enum mlxsw_sp_ipip_type ipipt)
1192 {
1193 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1194 		mlxsw_sp->router->ipip_ops_arr[ipipt];
1195 
1196 	if (ipip_ops->inc_parsing_depth)
1197 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1198 }
1199 
1200 static int
1201 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1202 			      struct mlxsw_sp_fib_entry *fib_entry,
1203 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1204 {
1205 	u32 tunnel_index;
1206 	int err;
1207 
1208 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1209 				  1, &tunnel_index);
1210 	if (err)
1211 		return err;
1212 
1213 	err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1214 						    ipip_entry->ipipt);
1215 	if (err)
1216 		goto err_parsing_depth_inc;
1217 
1218 	ipip_entry->decap_fib_entry = fib_entry;
1219 	fib_entry->decap.ipip_entry = ipip_entry;
1220 	fib_entry->decap.tunnel_index = tunnel_index;
1221 
1222 	return 0;
1223 
1224 err_parsing_depth_inc:
1225 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1226 			   fib_entry->decap.tunnel_index);
1227 	return err;
1228 }
1229 
1230 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1231 					  struct mlxsw_sp_fib_entry *fib_entry)
1232 {
1233 	enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1234 
1235 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1236 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1237 	fib_entry->decap.ipip_entry = NULL;
1238 	mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1239 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1240 			   1, fib_entry->decap.tunnel_index);
1241 }
1242 
1243 static struct mlxsw_sp_fib_node *
1244 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1245 			 size_t addr_len, unsigned char prefix_len);
1246 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1247 				     struct mlxsw_sp_fib_entry *fib_entry);
1248 
1249 static void
1250 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1251 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1252 {
1253 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1254 
1255 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1256 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1257 
1258 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1259 }
1260 
1261 static void
1262 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1263 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1264 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1265 {
1266 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1267 					  ipip_entry))
1268 		return;
1269 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1270 
1271 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1272 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1273 }
1274 
1275 static struct mlxsw_sp_fib_entry *
1276 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1277 				     enum mlxsw_sp_l3proto proto,
1278 				     const union mlxsw_sp_l3addr *addr,
1279 				     enum mlxsw_sp_fib_entry_type type)
1280 {
1281 	struct mlxsw_sp_fib_node *fib_node;
1282 	unsigned char addr_prefix_len;
1283 	struct mlxsw_sp_fib *fib;
1284 	struct mlxsw_sp_vr *vr;
1285 	const void *addrp;
1286 	size_t addr_len;
1287 	u32 addr4;
1288 
1289 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1290 	if (!vr)
1291 		return NULL;
1292 	fib = mlxsw_sp_vr_fib(vr, proto);
1293 
1294 	switch (proto) {
1295 	case MLXSW_SP_L3_PROTO_IPV4:
1296 		addr4 = be32_to_cpu(addr->addr4);
1297 		addrp = &addr4;
1298 		addr_len = 4;
1299 		addr_prefix_len = 32;
1300 		break;
1301 	case MLXSW_SP_L3_PROTO_IPV6:
1302 	default:
1303 		WARN_ON(1);
1304 		return NULL;
1305 	}
1306 
1307 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1308 					    addr_prefix_len);
1309 	if (!fib_node || fib_node->fib_entry->type != type)
1310 		return NULL;
1311 
1312 	return fib_node->fib_entry;
1313 }
1314 
1315 /* Given an IPIP entry, find the corresponding decap route. */
1316 static struct mlxsw_sp_fib_entry *
1317 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1318 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1319 {
1320 	static struct mlxsw_sp_fib_node *fib_node;
1321 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1322 	unsigned char saddr_prefix_len;
1323 	union mlxsw_sp_l3addr saddr;
1324 	struct mlxsw_sp_fib *ul_fib;
1325 	struct mlxsw_sp_vr *ul_vr;
1326 	const void *saddrp;
1327 	size_t saddr_len;
1328 	u32 ul_tb_id;
1329 	u32 saddr4;
1330 
1331 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1332 
1333 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1334 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1335 	if (!ul_vr)
1336 		return NULL;
1337 
1338 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1339 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1340 					   ipip_entry->ol_dev);
1341 
1342 	switch (ipip_ops->ul_proto) {
1343 	case MLXSW_SP_L3_PROTO_IPV4:
1344 		saddr4 = be32_to_cpu(saddr.addr4);
1345 		saddrp = &saddr4;
1346 		saddr_len = 4;
1347 		saddr_prefix_len = 32;
1348 		break;
1349 	case MLXSW_SP_L3_PROTO_IPV6:
1350 		saddrp = &saddr.addr6;
1351 		saddr_len = 16;
1352 		saddr_prefix_len = 128;
1353 		break;
1354 	default:
1355 		WARN_ON(1);
1356 		return NULL;
1357 	}
1358 
1359 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1360 					    saddr_prefix_len);
1361 	if (!fib_node ||
1362 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1363 		return NULL;
1364 
1365 	return fib_node->fib_entry;
1366 }
1367 
1368 static struct mlxsw_sp_ipip_entry *
1369 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1370 			   enum mlxsw_sp_ipip_type ipipt,
1371 			   struct net_device *ol_dev)
1372 {
1373 	struct mlxsw_sp_ipip_entry *ipip_entry;
1374 
1375 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1376 	if (IS_ERR(ipip_entry))
1377 		return ipip_entry;
1378 
1379 	list_add_tail(&ipip_entry->ipip_list_node,
1380 		      &mlxsw_sp->router->ipip_list);
1381 
1382 	return ipip_entry;
1383 }
1384 
1385 static void
1386 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1387 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1388 {
1389 	list_del(&ipip_entry->ipip_list_node);
1390 	mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1391 }
1392 
1393 static bool
1394 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1395 				  const struct net_device *ul_dev,
1396 				  enum mlxsw_sp_l3proto ul_proto,
1397 				  union mlxsw_sp_l3addr ul_dip,
1398 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1399 {
1400 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1401 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1402 
1403 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1404 		return false;
1405 
1406 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1407 						 ul_tb_id, ipip_entry);
1408 }
1409 
1410 /* Given decap parameters, find the corresponding IPIP entry. */
1411 static struct mlxsw_sp_ipip_entry *
1412 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1413 				  enum mlxsw_sp_l3proto ul_proto,
1414 				  union mlxsw_sp_l3addr ul_dip)
1415 {
1416 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1417 	struct net_device *ul_dev;
1418 
1419 	rcu_read_lock();
1420 
1421 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1422 	if (!ul_dev)
1423 		goto out_unlock;
1424 
1425 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1426 			    ipip_list_node)
1427 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1428 						      ul_proto, ul_dip,
1429 						      ipip_entry))
1430 			goto out_unlock;
1431 
1432 	rcu_read_unlock();
1433 
1434 	return NULL;
1435 
1436 out_unlock:
1437 	rcu_read_unlock();
1438 	return ipip_entry;
1439 }
1440 
1441 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1442 				      const struct net_device *dev,
1443 				      enum mlxsw_sp_ipip_type *p_type)
1444 {
1445 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1446 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1447 	enum mlxsw_sp_ipip_type ipipt;
1448 
1449 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1450 		ipip_ops = router->ipip_ops_arr[ipipt];
1451 		if (dev->type == ipip_ops->dev_type) {
1452 			if (p_type)
1453 				*p_type = ipipt;
1454 			return true;
1455 		}
1456 	}
1457 	return false;
1458 }
1459 
1460 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1461 				const struct net_device *dev)
1462 {
1463 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1464 }
1465 
1466 static struct mlxsw_sp_ipip_entry *
1467 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1468 				   const struct net_device *ol_dev)
1469 {
1470 	struct mlxsw_sp_ipip_entry *ipip_entry;
1471 
1472 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1473 			    ipip_list_node)
1474 		if (ipip_entry->ol_dev == ol_dev)
1475 			return ipip_entry;
1476 
1477 	return NULL;
1478 }
1479 
1480 static struct mlxsw_sp_ipip_entry *
1481 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1482 				   const struct net_device *ul_dev,
1483 				   struct mlxsw_sp_ipip_entry *start)
1484 {
1485 	struct mlxsw_sp_ipip_entry *ipip_entry;
1486 
1487 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1488 					ipip_list_node);
1489 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1490 				     ipip_list_node) {
1491 		struct net_device *ol_dev = ipip_entry->ol_dev;
1492 		struct net_device *ipip_ul_dev;
1493 
1494 		rcu_read_lock();
1495 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1496 		rcu_read_unlock();
1497 
1498 		if (ipip_ul_dev == ul_dev)
1499 			return ipip_entry;
1500 	}
1501 
1502 	return NULL;
1503 }
1504 
1505 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1506 				const struct net_device *dev)
1507 {
1508 	bool is_ipip_ul;
1509 
1510 	mutex_lock(&mlxsw_sp->router->lock);
1511 	is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1512 	mutex_unlock(&mlxsw_sp->router->lock);
1513 
1514 	return is_ipip_ul;
1515 }
1516 
1517 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1518 						const struct net_device *ol_dev,
1519 						enum mlxsw_sp_ipip_type ipipt)
1520 {
1521 	const struct mlxsw_sp_ipip_ops *ops
1522 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1523 
1524 	return ops->can_offload(mlxsw_sp, ol_dev);
1525 }
1526 
1527 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1528 						struct net_device *ol_dev)
1529 {
1530 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1531 	struct mlxsw_sp_ipip_entry *ipip_entry;
1532 	enum mlxsw_sp_l3proto ul_proto;
1533 	union mlxsw_sp_l3addr saddr;
1534 	u32 ul_tb_id;
1535 
1536 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1537 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1538 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1539 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1540 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1541 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1542 							  saddr, ul_tb_id,
1543 							  NULL)) {
1544 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1545 								ol_dev);
1546 			if (IS_ERR(ipip_entry))
1547 				return PTR_ERR(ipip_entry);
1548 		}
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1555 						   struct net_device *ol_dev)
1556 {
1557 	struct mlxsw_sp_ipip_entry *ipip_entry;
1558 
1559 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1560 	if (ipip_entry)
1561 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1562 }
1563 
1564 static void
1565 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1566 				struct mlxsw_sp_ipip_entry *ipip_entry)
1567 {
1568 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1569 
1570 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1571 	if (decap_fib_entry)
1572 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1573 						  decap_fib_entry);
1574 }
1575 
1576 static int
1577 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1578 			u16 ul_rif_id, bool enable)
1579 {
1580 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1581 	enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1582 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1583 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1584 	char ritr_pl[MLXSW_REG_RITR_LEN];
1585 	struct in6_addr *saddr6;
1586 	u32 saddr4;
1587 
1588 	ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1589 	switch (lb_cf.ul_protocol) {
1590 	case MLXSW_SP_L3_PROTO_IPV4:
1591 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1592 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1593 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1594 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1595 						   ipip_options, ul_vr_id,
1596 						   ul_rif_id, saddr4,
1597 						   lb_cf.okey);
1598 		break;
1599 
1600 	case MLXSW_SP_L3_PROTO_IPV6:
1601 		saddr6 = &lb_cf.saddr.addr6;
1602 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1603 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1604 		mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1605 						   ipip_options, ul_vr_id,
1606 						   ul_rif_id, saddr6,
1607 						   lb_cf.okey);
1608 		break;
1609 	}
1610 
1611 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1612 }
1613 
1614 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1615 						 struct net_device *ol_dev)
1616 {
1617 	struct mlxsw_sp_ipip_entry *ipip_entry;
1618 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1619 	int err = 0;
1620 
1621 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1622 	if (ipip_entry) {
1623 		lb_rif = ipip_entry->ol_lb;
1624 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1625 					      lb_rif->ul_rif_id, true);
1626 		if (err)
1627 			goto out;
1628 		lb_rif->common.mtu = ol_dev->mtu;
1629 	}
1630 
1631 out:
1632 	return err;
1633 }
1634 
1635 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1636 						struct net_device *ol_dev)
1637 {
1638 	struct mlxsw_sp_ipip_entry *ipip_entry;
1639 
1640 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1641 	if (ipip_entry)
1642 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1643 }
1644 
1645 static void
1646 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1647 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1648 {
1649 	if (ipip_entry->decap_fib_entry)
1650 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1651 }
1652 
1653 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1654 						  struct net_device *ol_dev)
1655 {
1656 	struct mlxsw_sp_ipip_entry *ipip_entry;
1657 
1658 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1659 	if (ipip_entry)
1660 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1661 }
1662 
1663 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1664 					 struct mlxsw_sp_rif *old_rif,
1665 					 struct mlxsw_sp_rif *new_rif);
1666 static int
1667 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1668 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1669 				 bool keep_encap,
1670 				 struct netlink_ext_ack *extack)
1671 {
1672 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1673 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1674 
1675 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1676 						     ipip_entry->ipipt,
1677 						     ipip_entry->ol_dev,
1678 						     extack);
1679 	if (IS_ERR(new_lb_rif))
1680 		return PTR_ERR(new_lb_rif);
1681 	ipip_entry->ol_lb = new_lb_rif;
1682 
1683 	if (keep_encap)
1684 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1685 					     &new_lb_rif->common);
1686 
1687 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1688 
1689 	return 0;
1690 }
1691 
1692 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1693 					struct mlxsw_sp_rif *rif);
1694 
1695 /**
1696  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1697  * @mlxsw_sp: mlxsw_sp.
1698  * @ipip_entry: IPIP entry.
1699  * @recreate_loopback: Recreates the associated loopback RIF.
1700  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1701  *              relevant when recreate_loopback is true.
1702  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1703  *                   is only relevant when recreate_loopback is false.
1704  * @extack: extack.
1705  *
1706  * Return: Non-zero value on failure.
1707  */
1708 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1709 					struct mlxsw_sp_ipip_entry *ipip_entry,
1710 					bool recreate_loopback,
1711 					bool keep_encap,
1712 					bool update_nexthops,
1713 					struct netlink_ext_ack *extack)
1714 {
1715 	int err;
1716 
1717 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1718 	 * recreate it. That creates a window of opportunity where RALUE and
1719 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1720 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1721 	 * of RALUE, demote the decap route back.
1722 	 */
1723 	if (ipip_entry->decap_fib_entry)
1724 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1725 
1726 	if (recreate_loopback) {
1727 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1728 						       keep_encap, extack);
1729 		if (err)
1730 			return err;
1731 	} else if (update_nexthops) {
1732 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1733 					    &ipip_entry->ol_lb->common);
1734 	}
1735 
1736 	if (ipip_entry->ol_dev->flags & IFF_UP)
1737 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1738 
1739 	return 0;
1740 }
1741 
1742 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1743 						struct net_device *ol_dev,
1744 						struct netlink_ext_ack *extack)
1745 {
1746 	struct mlxsw_sp_ipip_entry *ipip_entry =
1747 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1748 
1749 	if (!ipip_entry)
1750 		return 0;
1751 
1752 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1753 						   true, false, false, extack);
1754 }
1755 
1756 static int
1757 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1758 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1759 				     struct net_device *ul_dev,
1760 				     bool *demote_this,
1761 				     struct netlink_ext_ack *extack)
1762 {
1763 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1764 	enum mlxsw_sp_l3proto ul_proto;
1765 	union mlxsw_sp_l3addr saddr;
1766 
1767 	/* Moving underlay to a different VRF might cause local address
1768 	 * conflict, and the conflicting tunnels need to be demoted.
1769 	 */
1770 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1771 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1772 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1773 						 saddr, ul_tb_id,
1774 						 ipip_entry)) {
1775 		*demote_this = true;
1776 		return 0;
1777 	}
1778 
1779 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1780 						   true, true, false, extack);
1781 }
1782 
1783 static int
1784 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1785 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1786 				    struct net_device *ul_dev)
1787 {
1788 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1789 						   false, false, true, NULL);
1790 }
1791 
1792 static int
1793 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1794 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1795 				      struct net_device *ul_dev)
1796 {
1797 	/* A down underlay device causes encapsulated packets to not be
1798 	 * forwarded, but decap still works. So refresh next hops without
1799 	 * touching anything else.
1800 	 */
1801 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1802 						   false, false, true, NULL);
1803 }
1804 
1805 static int
1806 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1807 					struct net_device *ol_dev,
1808 					struct netlink_ext_ack *extack)
1809 {
1810 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1811 	struct mlxsw_sp_ipip_entry *ipip_entry;
1812 	int err;
1813 
1814 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1815 	if (!ipip_entry)
1816 		/* A change might make a tunnel eligible for offloading, but
1817 		 * that is currently not implemented. What falls to slow path
1818 		 * stays there.
1819 		 */
1820 		return 0;
1821 
1822 	/* A change might make a tunnel not eligible for offloading. */
1823 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1824 						 ipip_entry->ipipt)) {
1825 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1826 		return 0;
1827 	}
1828 
1829 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1830 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1831 	return err;
1832 }
1833 
1834 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1835 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1836 {
1837 	struct net_device *ol_dev = ipip_entry->ol_dev;
1838 
1839 	if (ol_dev->flags & IFF_UP)
1840 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1841 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1842 }
1843 
1844 /* The configuration where several tunnels have the same local address in the
1845  * same underlay table needs special treatment in the HW. That is currently not
1846  * implemented in the driver. This function finds and demotes the first tunnel
1847  * with a given source address, except the one passed in in the argument
1848  * `except'.
1849  */
1850 bool
1851 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1852 				     enum mlxsw_sp_l3proto ul_proto,
1853 				     union mlxsw_sp_l3addr saddr,
1854 				     u32 ul_tb_id,
1855 				     const struct mlxsw_sp_ipip_entry *except)
1856 {
1857 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1858 
1859 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1860 				 ipip_list_node) {
1861 		if (ipip_entry != except &&
1862 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1863 						      ul_tb_id, ipip_entry)) {
1864 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1865 			return true;
1866 		}
1867 	}
1868 
1869 	return false;
1870 }
1871 
1872 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1873 						     struct net_device *ul_dev)
1874 {
1875 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1876 
1877 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1878 				 ipip_list_node) {
1879 		struct net_device *ol_dev = ipip_entry->ol_dev;
1880 		struct net_device *ipip_ul_dev;
1881 
1882 		rcu_read_lock();
1883 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1884 		rcu_read_unlock();
1885 		if (ipip_ul_dev == ul_dev)
1886 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1887 	}
1888 }
1889 
1890 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1891 				     struct net_device *ol_dev,
1892 				     unsigned long event,
1893 				     struct netdev_notifier_info *info)
1894 {
1895 	struct netdev_notifier_changeupper_info *chup;
1896 	struct netlink_ext_ack *extack;
1897 	int err = 0;
1898 
1899 	mutex_lock(&mlxsw_sp->router->lock);
1900 	switch (event) {
1901 	case NETDEV_REGISTER:
1902 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1903 		break;
1904 	case NETDEV_UNREGISTER:
1905 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1906 		break;
1907 	case NETDEV_UP:
1908 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1909 		break;
1910 	case NETDEV_DOWN:
1911 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1912 		break;
1913 	case NETDEV_CHANGEUPPER:
1914 		chup = container_of(info, typeof(*chup), info);
1915 		extack = info->extack;
1916 		if (netif_is_l3_master(chup->upper_dev))
1917 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1918 								   ol_dev,
1919 								   extack);
1920 		break;
1921 	case NETDEV_CHANGE:
1922 		extack = info->extack;
1923 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1924 							      ol_dev, extack);
1925 		break;
1926 	case NETDEV_CHANGEMTU:
1927 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1928 		break;
1929 	}
1930 	mutex_unlock(&mlxsw_sp->router->lock);
1931 	return err;
1932 }
1933 
1934 static int
1935 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1936 				   struct mlxsw_sp_ipip_entry *ipip_entry,
1937 				   struct net_device *ul_dev,
1938 				   bool *demote_this,
1939 				   unsigned long event,
1940 				   struct netdev_notifier_info *info)
1941 {
1942 	struct netdev_notifier_changeupper_info *chup;
1943 	struct netlink_ext_ack *extack;
1944 
1945 	switch (event) {
1946 	case NETDEV_CHANGEUPPER:
1947 		chup = container_of(info, typeof(*chup), info);
1948 		extack = info->extack;
1949 		if (netif_is_l3_master(chup->upper_dev))
1950 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1951 								    ipip_entry,
1952 								    ul_dev,
1953 								    demote_this,
1954 								    extack);
1955 		break;
1956 
1957 	case NETDEV_UP:
1958 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1959 							   ul_dev);
1960 	case NETDEV_DOWN:
1961 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1962 							     ipip_entry,
1963 							     ul_dev);
1964 	}
1965 	return 0;
1966 }
1967 
1968 int
1969 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1970 				 struct net_device *ul_dev,
1971 				 unsigned long event,
1972 				 struct netdev_notifier_info *info)
1973 {
1974 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1975 	int err = 0;
1976 
1977 	mutex_lock(&mlxsw_sp->router->lock);
1978 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1979 								ul_dev,
1980 								ipip_entry))) {
1981 		struct mlxsw_sp_ipip_entry *prev;
1982 		bool demote_this = false;
1983 
1984 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1985 							 ul_dev, &demote_this,
1986 							 event, info);
1987 		if (err) {
1988 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1989 								 ul_dev);
1990 			break;
1991 		}
1992 
1993 		if (demote_this) {
1994 			if (list_is_first(&ipip_entry->ipip_list_node,
1995 					  &mlxsw_sp->router->ipip_list))
1996 				prev = NULL;
1997 			else
1998 				/* This can't be cached from previous iteration,
1999 				 * because that entry could be gone now.
2000 				 */
2001 				prev = list_prev_entry(ipip_entry,
2002 						       ipip_list_node);
2003 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2004 			ipip_entry = prev;
2005 		}
2006 	}
2007 	mutex_unlock(&mlxsw_sp->router->lock);
2008 
2009 	return err;
2010 }
2011 
2012 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2013 				      enum mlxsw_sp_l3proto ul_proto,
2014 				      const union mlxsw_sp_l3addr *ul_sip,
2015 				      u32 tunnel_index)
2016 {
2017 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2018 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2019 	struct mlxsw_sp_fib_entry *fib_entry;
2020 	int err = 0;
2021 
2022 	mutex_lock(&mlxsw_sp->router->lock);
2023 
2024 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2025 		err = -EINVAL;
2026 		goto out;
2027 	}
2028 
2029 	router->nve_decap_config.ul_tb_id = ul_tb_id;
2030 	router->nve_decap_config.tunnel_index = tunnel_index;
2031 	router->nve_decap_config.ul_proto = ul_proto;
2032 	router->nve_decap_config.ul_sip = *ul_sip;
2033 	router->nve_decap_config.valid = true;
2034 
2035 	/* It is valid to create a tunnel with a local IP and only later
2036 	 * assign this IP address to a local interface
2037 	 */
2038 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2039 							 ul_proto, ul_sip,
2040 							 type);
2041 	if (!fib_entry)
2042 		goto out;
2043 
2044 	fib_entry->decap.tunnel_index = tunnel_index;
2045 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2046 
2047 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2048 	if (err)
2049 		goto err_fib_entry_update;
2050 
2051 	goto out;
2052 
2053 err_fib_entry_update:
2054 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2055 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2056 out:
2057 	mutex_unlock(&mlxsw_sp->router->lock);
2058 	return err;
2059 }
2060 
2061 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2062 				      enum mlxsw_sp_l3proto ul_proto,
2063 				      const union mlxsw_sp_l3addr *ul_sip)
2064 {
2065 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2066 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2067 	struct mlxsw_sp_fib_entry *fib_entry;
2068 
2069 	mutex_lock(&mlxsw_sp->router->lock);
2070 
2071 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2072 		goto out;
2073 
2074 	router->nve_decap_config.valid = false;
2075 
2076 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2077 							 ul_proto, ul_sip,
2078 							 type);
2079 	if (!fib_entry)
2080 		goto out;
2081 
2082 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2083 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2084 out:
2085 	mutex_unlock(&mlxsw_sp->router->lock);
2086 }
2087 
2088 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2089 					 u32 ul_tb_id,
2090 					 enum mlxsw_sp_l3proto ul_proto,
2091 					 const union mlxsw_sp_l3addr *ul_sip)
2092 {
2093 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2094 
2095 	return router->nve_decap_config.valid &&
2096 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2097 	       router->nve_decap_config.ul_proto == ul_proto &&
2098 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2099 		       sizeof(*ul_sip));
2100 }
2101 
2102 struct mlxsw_sp_neigh_key {
2103 	struct neighbour *n;
2104 };
2105 
2106 struct mlxsw_sp_neigh_entry {
2107 	struct list_head rif_list_node;
2108 	struct rhash_head ht_node;
2109 	struct mlxsw_sp_neigh_key key;
2110 	u16 rif;
2111 	bool connected;
2112 	unsigned char ha[ETH_ALEN];
2113 	struct list_head nexthop_list; /* list of nexthops using
2114 					* this neigh entry
2115 					*/
2116 	struct list_head nexthop_neighs_list_node;
2117 	unsigned int counter_index;
2118 	bool counter_valid;
2119 };
2120 
2121 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2122 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2123 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2124 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2125 };
2126 
2127 struct mlxsw_sp_neigh_entry *
2128 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2129 			struct mlxsw_sp_neigh_entry *neigh_entry)
2130 {
2131 	if (!neigh_entry) {
2132 		if (list_empty(&rif->neigh_list))
2133 			return NULL;
2134 		else
2135 			return list_first_entry(&rif->neigh_list,
2136 						typeof(*neigh_entry),
2137 						rif_list_node);
2138 	}
2139 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2140 		return NULL;
2141 	return list_next_entry(neigh_entry, rif_list_node);
2142 }
2143 
2144 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2145 {
2146 	return neigh_entry->key.n->tbl->family;
2147 }
2148 
2149 unsigned char *
2150 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2151 {
2152 	return neigh_entry->ha;
2153 }
2154 
2155 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2156 {
2157 	struct neighbour *n;
2158 
2159 	n = neigh_entry->key.n;
2160 	return ntohl(*((__be32 *) n->primary_key));
2161 }
2162 
2163 struct in6_addr *
2164 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2165 {
2166 	struct neighbour *n;
2167 
2168 	n = neigh_entry->key.n;
2169 	return (struct in6_addr *) &n->primary_key;
2170 }
2171 
2172 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2173 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2174 			       u64 *p_counter)
2175 {
2176 	if (!neigh_entry->counter_valid)
2177 		return -EINVAL;
2178 
2179 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2180 					 p_counter, NULL);
2181 }
2182 
2183 static struct mlxsw_sp_neigh_entry *
2184 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2185 			   u16 rif)
2186 {
2187 	struct mlxsw_sp_neigh_entry *neigh_entry;
2188 
2189 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2190 	if (!neigh_entry)
2191 		return NULL;
2192 
2193 	neigh_entry->key.n = n;
2194 	neigh_entry->rif = rif;
2195 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2196 
2197 	return neigh_entry;
2198 }
2199 
2200 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2201 {
2202 	kfree(neigh_entry);
2203 }
2204 
2205 static int
2206 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2207 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2208 {
2209 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2210 				      &neigh_entry->ht_node,
2211 				      mlxsw_sp_neigh_ht_params);
2212 }
2213 
2214 static void
2215 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2216 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2217 {
2218 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2219 			       &neigh_entry->ht_node,
2220 			       mlxsw_sp_neigh_ht_params);
2221 }
2222 
2223 static bool
2224 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2225 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2226 {
2227 	struct devlink *devlink;
2228 	const char *table_name;
2229 
2230 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2231 	case AF_INET:
2232 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2233 		break;
2234 	case AF_INET6:
2235 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2236 		break;
2237 	default:
2238 		WARN_ON(1);
2239 		return false;
2240 	}
2241 
2242 	devlink = priv_to_devlink(mlxsw_sp->core);
2243 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2244 }
2245 
2246 static void
2247 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2248 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2249 {
2250 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2251 		return;
2252 
2253 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2254 		return;
2255 
2256 	neigh_entry->counter_valid = true;
2257 }
2258 
2259 static void
2260 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2261 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2262 {
2263 	if (!neigh_entry->counter_valid)
2264 		return;
2265 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2266 				   neigh_entry->counter_index);
2267 	neigh_entry->counter_valid = false;
2268 }
2269 
2270 static struct mlxsw_sp_neigh_entry *
2271 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2272 {
2273 	struct mlxsw_sp_neigh_entry *neigh_entry;
2274 	struct mlxsw_sp_rif *rif;
2275 	int err;
2276 
2277 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2278 	if (!rif)
2279 		return ERR_PTR(-EINVAL);
2280 
2281 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2282 	if (!neigh_entry)
2283 		return ERR_PTR(-ENOMEM);
2284 
2285 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2286 	if (err)
2287 		goto err_neigh_entry_insert;
2288 
2289 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2290 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2291 
2292 	return neigh_entry;
2293 
2294 err_neigh_entry_insert:
2295 	mlxsw_sp_neigh_entry_free(neigh_entry);
2296 	return ERR_PTR(err);
2297 }
2298 
2299 static void
2300 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2301 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2302 {
2303 	list_del(&neigh_entry->rif_list_node);
2304 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2305 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2306 	mlxsw_sp_neigh_entry_free(neigh_entry);
2307 }
2308 
2309 static struct mlxsw_sp_neigh_entry *
2310 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2311 {
2312 	struct mlxsw_sp_neigh_key key;
2313 
2314 	key.n = n;
2315 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2316 				      &key, mlxsw_sp_neigh_ht_params);
2317 }
2318 
2319 static void
2320 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2321 {
2322 	unsigned long interval;
2323 
2324 #if IS_ENABLED(CONFIG_IPV6)
2325 	interval = min_t(unsigned long,
2326 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2327 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2328 #else
2329 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2330 #endif
2331 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2332 }
2333 
2334 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2335 						   char *rauhtd_pl,
2336 						   int ent_index)
2337 {
2338 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2339 	struct net_device *dev;
2340 	struct neighbour *n;
2341 	__be32 dipn;
2342 	u32 dip;
2343 	u16 rif;
2344 
2345 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2346 
2347 	if (WARN_ON_ONCE(rif >= max_rifs))
2348 		return;
2349 	if (!mlxsw_sp->router->rifs[rif]) {
2350 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2351 		return;
2352 	}
2353 
2354 	dipn = htonl(dip);
2355 	dev = mlxsw_sp->router->rifs[rif]->dev;
2356 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2357 	if (!n)
2358 		return;
2359 
2360 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2361 	neigh_event_send(n, NULL);
2362 	neigh_release(n);
2363 }
2364 
2365 #if IS_ENABLED(CONFIG_IPV6)
2366 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2367 						   char *rauhtd_pl,
2368 						   int rec_index)
2369 {
2370 	struct net_device *dev;
2371 	struct neighbour *n;
2372 	struct in6_addr dip;
2373 	u16 rif;
2374 
2375 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2376 					 (char *) &dip);
2377 
2378 	if (!mlxsw_sp->router->rifs[rif]) {
2379 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2380 		return;
2381 	}
2382 
2383 	dev = mlxsw_sp->router->rifs[rif]->dev;
2384 	n = neigh_lookup(&nd_tbl, &dip, dev);
2385 	if (!n)
2386 		return;
2387 
2388 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2389 	neigh_event_send(n, NULL);
2390 	neigh_release(n);
2391 }
2392 #else
2393 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2394 						   char *rauhtd_pl,
2395 						   int rec_index)
2396 {
2397 }
2398 #endif
2399 
2400 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2401 						   char *rauhtd_pl,
2402 						   int rec_index)
2403 {
2404 	u8 num_entries;
2405 	int i;
2406 
2407 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2408 								rec_index);
2409 	/* Hardware starts counting at 0, so add 1. */
2410 	num_entries++;
2411 
2412 	/* Each record consists of several neighbour entries. */
2413 	for (i = 0; i < num_entries; i++) {
2414 		int ent_index;
2415 
2416 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2417 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2418 						       ent_index);
2419 	}
2420 
2421 }
2422 
2423 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2424 						   char *rauhtd_pl,
2425 						   int rec_index)
2426 {
2427 	/* One record contains one entry. */
2428 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2429 					       rec_index);
2430 }
2431 
2432 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2433 					      char *rauhtd_pl, int rec_index)
2434 {
2435 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2436 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2437 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2438 						       rec_index);
2439 		break;
2440 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2441 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2442 						       rec_index);
2443 		break;
2444 	}
2445 }
2446 
2447 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2448 {
2449 	u8 num_rec, last_rec_index, num_entries;
2450 
2451 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2452 	last_rec_index = num_rec - 1;
2453 
2454 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2455 		return false;
2456 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2457 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2458 		return true;
2459 
2460 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2461 								last_rec_index);
2462 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2463 		return true;
2464 	return false;
2465 }
2466 
2467 static int
2468 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2469 				       char *rauhtd_pl,
2470 				       enum mlxsw_reg_rauhtd_type type)
2471 {
2472 	int i, num_rec;
2473 	int err;
2474 
2475 	/* Ensure the RIF we read from the device does not change mid-dump. */
2476 	mutex_lock(&mlxsw_sp->router->lock);
2477 	do {
2478 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2479 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2480 				      rauhtd_pl);
2481 		if (err) {
2482 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2483 			break;
2484 		}
2485 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2486 		for (i = 0; i < num_rec; i++)
2487 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2488 							  i);
2489 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2490 	mutex_unlock(&mlxsw_sp->router->lock);
2491 
2492 	return err;
2493 }
2494 
2495 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2496 {
2497 	enum mlxsw_reg_rauhtd_type type;
2498 	char *rauhtd_pl;
2499 	int err;
2500 
2501 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2502 	if (!rauhtd_pl)
2503 		return -ENOMEM;
2504 
2505 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2506 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2507 	if (err)
2508 		goto out;
2509 
2510 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2511 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2512 out:
2513 	kfree(rauhtd_pl);
2514 	return err;
2515 }
2516 
2517 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2518 {
2519 	struct mlxsw_sp_neigh_entry *neigh_entry;
2520 
2521 	mutex_lock(&mlxsw_sp->router->lock);
2522 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2523 			    nexthop_neighs_list_node)
2524 		/* If this neigh have nexthops, make the kernel think this neigh
2525 		 * is active regardless of the traffic.
2526 		 */
2527 		neigh_event_send(neigh_entry->key.n, NULL);
2528 	mutex_unlock(&mlxsw_sp->router->lock);
2529 }
2530 
2531 static void
2532 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2533 {
2534 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2535 
2536 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2537 			       msecs_to_jiffies(interval));
2538 }
2539 
2540 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2541 {
2542 	struct mlxsw_sp_router *router;
2543 	int err;
2544 
2545 	router = container_of(work, struct mlxsw_sp_router,
2546 			      neighs_update.dw.work);
2547 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2548 	if (err)
2549 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2550 
2551 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2552 
2553 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2554 }
2555 
2556 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2557 {
2558 	struct mlxsw_sp_neigh_entry *neigh_entry;
2559 	struct mlxsw_sp_router *router;
2560 
2561 	router = container_of(work, struct mlxsw_sp_router,
2562 			      nexthop_probe_dw.work);
2563 	/* Iterate over nexthop neighbours, find those who are unresolved and
2564 	 * send arp on them. This solves the chicken-egg problem when
2565 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2566 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2567 	 * using different nexthop.
2568 	 */
2569 	mutex_lock(&router->lock);
2570 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2571 			    nexthop_neighs_list_node)
2572 		if (!neigh_entry->connected)
2573 			neigh_event_send(neigh_entry->key.n, NULL);
2574 	mutex_unlock(&router->lock);
2575 
2576 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2577 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2578 }
2579 
2580 static void
2581 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2582 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2583 			      bool removing, bool dead);
2584 
2585 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2586 {
2587 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2588 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2589 }
2590 
2591 static int
2592 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2593 				struct mlxsw_sp_neigh_entry *neigh_entry,
2594 				enum mlxsw_reg_rauht_op op)
2595 {
2596 	struct neighbour *n = neigh_entry->key.n;
2597 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2598 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2599 
2600 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2601 			      dip);
2602 	if (neigh_entry->counter_valid)
2603 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2604 					     neigh_entry->counter_index);
2605 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2606 }
2607 
2608 static int
2609 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2610 				struct mlxsw_sp_neigh_entry *neigh_entry,
2611 				enum mlxsw_reg_rauht_op op)
2612 {
2613 	struct neighbour *n = neigh_entry->key.n;
2614 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2615 	const char *dip = n->primary_key;
2616 
2617 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2618 			      dip);
2619 	if (neigh_entry->counter_valid)
2620 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2621 					     neigh_entry->counter_index);
2622 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2623 }
2624 
2625 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2626 {
2627 	struct neighbour *n = neigh_entry->key.n;
2628 
2629 	/* Packets with a link-local destination address are trapped
2630 	 * after LPM lookup and never reach the neighbour table, so
2631 	 * there is no need to program such neighbours to the device.
2632 	 */
2633 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2634 	    IPV6_ADDR_LINKLOCAL)
2635 		return true;
2636 	return false;
2637 }
2638 
2639 static void
2640 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2641 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2642 			    bool adding)
2643 {
2644 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2645 	int err;
2646 
2647 	if (!adding && !neigh_entry->connected)
2648 		return;
2649 	neigh_entry->connected = adding;
2650 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2651 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2652 						      op);
2653 		if (err)
2654 			return;
2655 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2656 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2657 			return;
2658 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2659 						      op);
2660 		if (err)
2661 			return;
2662 	} else {
2663 		WARN_ON_ONCE(1);
2664 		return;
2665 	}
2666 
2667 	if (adding)
2668 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2669 	else
2670 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2671 }
2672 
2673 void
2674 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2675 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2676 				    bool adding)
2677 {
2678 	if (adding)
2679 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2680 	else
2681 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2682 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2683 }
2684 
2685 struct mlxsw_sp_netevent_work {
2686 	struct work_struct work;
2687 	struct mlxsw_sp *mlxsw_sp;
2688 	struct neighbour *n;
2689 };
2690 
2691 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2692 {
2693 	struct mlxsw_sp_netevent_work *net_work =
2694 		container_of(work, struct mlxsw_sp_netevent_work, work);
2695 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2696 	struct mlxsw_sp_neigh_entry *neigh_entry;
2697 	struct neighbour *n = net_work->n;
2698 	unsigned char ha[ETH_ALEN];
2699 	bool entry_connected;
2700 	u8 nud_state, dead;
2701 
2702 	/* If these parameters are changed after we release the lock,
2703 	 * then we are guaranteed to receive another event letting us
2704 	 * know about it.
2705 	 */
2706 	read_lock_bh(&n->lock);
2707 	memcpy(ha, n->ha, ETH_ALEN);
2708 	nud_state = n->nud_state;
2709 	dead = n->dead;
2710 	read_unlock_bh(&n->lock);
2711 
2712 	mutex_lock(&mlxsw_sp->router->lock);
2713 	mlxsw_sp_span_respin(mlxsw_sp);
2714 
2715 	entry_connected = nud_state & NUD_VALID && !dead;
2716 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2717 	if (!entry_connected && !neigh_entry)
2718 		goto out;
2719 	if (!neigh_entry) {
2720 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2721 		if (IS_ERR(neigh_entry))
2722 			goto out;
2723 	}
2724 
2725 	if (neigh_entry->connected && entry_connected &&
2726 	    !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2727 		goto out;
2728 
2729 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2730 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2731 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2732 				      dead);
2733 
2734 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2735 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2736 
2737 out:
2738 	mutex_unlock(&mlxsw_sp->router->lock);
2739 	neigh_release(n);
2740 	kfree(net_work);
2741 }
2742 
2743 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2744 
2745 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2746 {
2747 	struct mlxsw_sp_netevent_work *net_work =
2748 		container_of(work, struct mlxsw_sp_netevent_work, work);
2749 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2750 
2751 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2752 	kfree(net_work);
2753 }
2754 
2755 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2756 
2757 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2758 {
2759 	struct mlxsw_sp_netevent_work *net_work =
2760 		container_of(work, struct mlxsw_sp_netevent_work, work);
2761 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2762 
2763 	__mlxsw_sp_router_init(mlxsw_sp);
2764 	kfree(net_work);
2765 }
2766 
2767 static int mlxsw_sp_router_schedule_work(struct net *net,
2768 					 struct notifier_block *nb,
2769 					 void (*cb)(struct work_struct *))
2770 {
2771 	struct mlxsw_sp_netevent_work *net_work;
2772 	struct mlxsw_sp_router *router;
2773 
2774 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2775 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2776 		return NOTIFY_DONE;
2777 
2778 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2779 	if (!net_work)
2780 		return NOTIFY_BAD;
2781 
2782 	INIT_WORK(&net_work->work, cb);
2783 	net_work->mlxsw_sp = router->mlxsw_sp;
2784 	mlxsw_core_schedule_work(&net_work->work);
2785 	return NOTIFY_DONE;
2786 }
2787 
2788 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2789 					  unsigned long event, void *ptr)
2790 {
2791 	struct mlxsw_sp_netevent_work *net_work;
2792 	struct mlxsw_sp_port *mlxsw_sp_port;
2793 	struct mlxsw_sp *mlxsw_sp;
2794 	unsigned long interval;
2795 	struct neigh_parms *p;
2796 	struct neighbour *n;
2797 
2798 	switch (event) {
2799 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2800 		p = ptr;
2801 
2802 		/* We don't care about changes in the default table. */
2803 		if (!p->dev || (p->tbl->family != AF_INET &&
2804 				p->tbl->family != AF_INET6))
2805 			return NOTIFY_DONE;
2806 
2807 		/* We are in atomic context and can't take RTNL mutex,
2808 		 * so use RCU variant to walk the device chain.
2809 		 */
2810 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2811 		if (!mlxsw_sp_port)
2812 			return NOTIFY_DONE;
2813 
2814 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2815 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2816 		mlxsw_sp->router->neighs_update.interval = interval;
2817 
2818 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2819 		break;
2820 	case NETEVENT_NEIGH_UPDATE:
2821 		n = ptr;
2822 
2823 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2824 			return NOTIFY_DONE;
2825 
2826 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2827 		if (!mlxsw_sp_port)
2828 			return NOTIFY_DONE;
2829 
2830 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2831 		if (!net_work) {
2832 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2833 			return NOTIFY_BAD;
2834 		}
2835 
2836 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2837 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2838 		net_work->n = n;
2839 
2840 		/* Take a reference to ensure the neighbour won't be
2841 		 * destructed until we drop the reference in delayed
2842 		 * work.
2843 		 */
2844 		neigh_clone(n);
2845 		mlxsw_core_schedule_work(&net_work->work);
2846 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2847 		break;
2848 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2849 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2850 		return mlxsw_sp_router_schedule_work(ptr, nb,
2851 				mlxsw_sp_router_mp_hash_event_work);
2852 
2853 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2854 		return mlxsw_sp_router_schedule_work(ptr, nb,
2855 				mlxsw_sp_router_update_priority_work);
2856 	}
2857 
2858 	return NOTIFY_DONE;
2859 }
2860 
2861 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2862 {
2863 	int err;
2864 
2865 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2866 			      &mlxsw_sp_neigh_ht_params);
2867 	if (err)
2868 		return err;
2869 
2870 	/* Initialize the polling interval according to the default
2871 	 * table.
2872 	 */
2873 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2874 
2875 	/* Create the delayed works for the activity_update */
2876 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2877 			  mlxsw_sp_router_neighs_update_work);
2878 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2879 			  mlxsw_sp_router_probe_unresolved_nexthops);
2880 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2881 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2882 	return 0;
2883 }
2884 
2885 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2886 {
2887 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2888 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2889 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2890 }
2891 
2892 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2893 					 struct mlxsw_sp_rif *rif)
2894 {
2895 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2896 
2897 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2898 				 rif_list_node) {
2899 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2900 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2901 	}
2902 }
2903 
2904 enum mlxsw_sp_nexthop_type {
2905 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2906 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2907 };
2908 
2909 enum mlxsw_sp_nexthop_action {
2910 	/* Nexthop forwards packets to an egress RIF */
2911 	MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2912 	/* Nexthop discards packets */
2913 	MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2914 	/* Nexthop traps packets */
2915 	MLXSW_SP_NEXTHOP_ACTION_TRAP,
2916 };
2917 
2918 struct mlxsw_sp_nexthop_key {
2919 	struct fib_nh *fib_nh;
2920 };
2921 
2922 struct mlxsw_sp_nexthop {
2923 	struct list_head neigh_list_node; /* member of neigh entry list */
2924 	struct list_head rif_list_node;
2925 	struct list_head router_list_node;
2926 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2927 						   * this nexthop belongs to
2928 						   */
2929 	struct rhash_head ht_node;
2930 	struct neigh_table *neigh_tbl;
2931 	struct mlxsw_sp_nexthop_key key;
2932 	unsigned char gw_addr[sizeof(struct in6_addr)];
2933 	int ifindex;
2934 	int nh_weight;
2935 	int norm_nh_weight;
2936 	int num_adj_entries;
2937 	struct mlxsw_sp_rif *rif;
2938 	u8 should_offload:1, /* set indicates this nexthop should be written
2939 			      * to the adjacency table.
2940 			      */
2941 	   offloaded:1, /* set indicates this nexthop was written to the
2942 			 * adjacency table.
2943 			 */
2944 	   update:1; /* set indicates this nexthop should be updated in the
2945 		      * adjacency table (f.e., its MAC changed).
2946 		      */
2947 	enum mlxsw_sp_nexthop_action action;
2948 	enum mlxsw_sp_nexthop_type type;
2949 	union {
2950 		struct mlxsw_sp_neigh_entry *neigh_entry;
2951 		struct mlxsw_sp_ipip_entry *ipip_entry;
2952 	};
2953 	unsigned int counter_index;
2954 	bool counter_valid;
2955 };
2956 
2957 enum mlxsw_sp_nexthop_group_type {
2958 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2959 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2960 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
2961 };
2962 
2963 struct mlxsw_sp_nexthop_group_info {
2964 	struct mlxsw_sp_nexthop_group *nh_grp;
2965 	u32 adj_index;
2966 	u16 ecmp_size;
2967 	u16 count;
2968 	int sum_norm_weight;
2969 	u8 adj_index_valid:1,
2970 	   gateway:1, /* routes using the group use a gateway */
2971 	   is_resilient:1;
2972 	struct list_head list; /* member in nh_res_grp_list */
2973 	struct mlxsw_sp_nexthop nexthops[0];
2974 #define nh_rif	nexthops[0].rif
2975 };
2976 
2977 struct mlxsw_sp_nexthop_group_vr_key {
2978 	u16 vr_id;
2979 	enum mlxsw_sp_l3proto proto;
2980 };
2981 
2982 struct mlxsw_sp_nexthop_group_vr_entry {
2983 	struct list_head list; /* member in vr_list */
2984 	struct rhash_head ht_node; /* member in vr_ht */
2985 	refcount_t ref_count;
2986 	struct mlxsw_sp_nexthop_group_vr_key key;
2987 };
2988 
2989 struct mlxsw_sp_nexthop_group {
2990 	struct rhash_head ht_node;
2991 	struct list_head fib_list; /* list of fib entries that use this group */
2992 	union {
2993 		struct {
2994 			struct fib_info *fi;
2995 		} ipv4;
2996 		struct {
2997 			u32 id;
2998 		} obj;
2999 	};
3000 	struct mlxsw_sp_nexthop_group_info *nhgi;
3001 	struct list_head vr_list;
3002 	struct rhashtable vr_ht;
3003 	enum mlxsw_sp_nexthop_group_type type;
3004 	bool can_destroy;
3005 };
3006 
3007 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3008 				    struct mlxsw_sp_nexthop *nh)
3009 {
3010 	struct devlink *devlink;
3011 
3012 	devlink = priv_to_devlink(mlxsw_sp->core);
3013 	if (!devlink_dpipe_table_counter_enabled(devlink,
3014 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3015 		return;
3016 
3017 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3018 		return;
3019 
3020 	nh->counter_valid = true;
3021 }
3022 
3023 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3024 				   struct mlxsw_sp_nexthop *nh)
3025 {
3026 	if (!nh->counter_valid)
3027 		return;
3028 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3029 	nh->counter_valid = false;
3030 }
3031 
3032 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3033 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3034 {
3035 	if (!nh->counter_valid)
3036 		return -EINVAL;
3037 
3038 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3039 					 p_counter, NULL);
3040 }
3041 
3042 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3043 					       struct mlxsw_sp_nexthop *nh)
3044 {
3045 	if (!nh) {
3046 		if (list_empty(&router->nexthop_list))
3047 			return NULL;
3048 		else
3049 			return list_first_entry(&router->nexthop_list,
3050 						typeof(*nh), router_list_node);
3051 	}
3052 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3053 		return NULL;
3054 	return list_next_entry(nh, router_list_node);
3055 }
3056 
3057 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3058 {
3059 	return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3060 }
3061 
3062 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3063 {
3064 	if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3065 	    !mlxsw_sp_nexthop_is_forward(nh))
3066 		return NULL;
3067 	return nh->neigh_entry->ha;
3068 }
3069 
3070 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3071 			     u32 *p_adj_size, u32 *p_adj_hash_index)
3072 {
3073 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3074 	u32 adj_hash_index = 0;
3075 	int i;
3076 
3077 	if (!nh->offloaded || !nhgi->adj_index_valid)
3078 		return -EINVAL;
3079 
3080 	*p_adj_index = nhgi->adj_index;
3081 	*p_adj_size = nhgi->ecmp_size;
3082 
3083 	for (i = 0; i < nhgi->count; i++) {
3084 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3085 
3086 		if (nh_iter == nh)
3087 			break;
3088 		if (nh_iter->offloaded)
3089 			adj_hash_index += nh_iter->num_adj_entries;
3090 	}
3091 
3092 	*p_adj_hash_index = adj_hash_index;
3093 	return 0;
3094 }
3095 
3096 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3097 {
3098 	return nh->rif;
3099 }
3100 
3101 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3102 {
3103 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3104 	int i;
3105 
3106 	for (i = 0; i < nhgi->count; i++) {
3107 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3108 
3109 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3110 			return true;
3111 	}
3112 	return false;
3113 }
3114 
3115 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3116 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3117 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3118 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3119 	.automatic_shrinking = true,
3120 };
3121 
3122 static struct mlxsw_sp_nexthop_group_vr_entry *
3123 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3124 				       const struct mlxsw_sp_fib *fib)
3125 {
3126 	struct mlxsw_sp_nexthop_group_vr_key key;
3127 
3128 	memset(&key, 0, sizeof(key));
3129 	key.vr_id = fib->vr->id;
3130 	key.proto = fib->proto;
3131 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3132 				      mlxsw_sp_nexthop_group_vr_ht_params);
3133 }
3134 
3135 static int
3136 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3137 				       const struct mlxsw_sp_fib *fib)
3138 {
3139 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3140 	int err;
3141 
3142 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3143 	if (!vr_entry)
3144 		return -ENOMEM;
3145 
3146 	vr_entry->key.vr_id = fib->vr->id;
3147 	vr_entry->key.proto = fib->proto;
3148 	refcount_set(&vr_entry->ref_count, 1);
3149 
3150 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3151 				     mlxsw_sp_nexthop_group_vr_ht_params);
3152 	if (err)
3153 		goto err_hashtable_insert;
3154 
3155 	list_add(&vr_entry->list, &nh_grp->vr_list);
3156 
3157 	return 0;
3158 
3159 err_hashtable_insert:
3160 	kfree(vr_entry);
3161 	return err;
3162 }
3163 
3164 static void
3165 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3166 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3167 {
3168 	list_del(&vr_entry->list);
3169 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3170 			       mlxsw_sp_nexthop_group_vr_ht_params);
3171 	kfree(vr_entry);
3172 }
3173 
3174 static int
3175 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3176 			       const struct mlxsw_sp_fib *fib)
3177 {
3178 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3179 
3180 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3181 	if (vr_entry) {
3182 		refcount_inc(&vr_entry->ref_count);
3183 		return 0;
3184 	}
3185 
3186 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3187 }
3188 
3189 static void
3190 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3191 				 const struct mlxsw_sp_fib *fib)
3192 {
3193 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3194 
3195 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3196 	if (WARN_ON_ONCE(!vr_entry))
3197 		return;
3198 
3199 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3200 		return;
3201 
3202 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3203 }
3204 
3205 struct mlxsw_sp_nexthop_group_cmp_arg {
3206 	enum mlxsw_sp_nexthop_group_type type;
3207 	union {
3208 		struct fib_info *fi;
3209 		struct mlxsw_sp_fib6_entry *fib6_entry;
3210 		u32 id;
3211 	};
3212 };
3213 
3214 static bool
3215 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3216 				    const struct in6_addr *gw, int ifindex,
3217 				    int weight)
3218 {
3219 	int i;
3220 
3221 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3222 		const struct mlxsw_sp_nexthop *nh;
3223 
3224 		nh = &nh_grp->nhgi->nexthops[i];
3225 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3226 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3227 			return true;
3228 	}
3229 
3230 	return false;
3231 }
3232 
3233 static bool
3234 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3235 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3236 {
3237 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3238 
3239 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3240 		return false;
3241 
3242 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3243 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3244 		struct in6_addr *gw;
3245 		int ifindex, weight;
3246 
3247 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3248 		weight = fib6_nh->fib_nh_weight;
3249 		gw = &fib6_nh->fib_nh_gw6;
3250 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3251 							 weight))
3252 			return false;
3253 	}
3254 
3255 	return true;
3256 }
3257 
3258 static int
3259 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3260 {
3261 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3262 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3263 
3264 	if (nh_grp->type != cmp_arg->type)
3265 		return 1;
3266 
3267 	switch (cmp_arg->type) {
3268 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3269 		return cmp_arg->fi != nh_grp->ipv4.fi;
3270 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3271 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3272 						    cmp_arg->fib6_entry);
3273 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3274 		return cmp_arg->id != nh_grp->obj.id;
3275 	default:
3276 		WARN_ON(1);
3277 		return 1;
3278 	}
3279 }
3280 
3281 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3282 {
3283 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3284 	const struct mlxsw_sp_nexthop *nh;
3285 	struct fib_info *fi;
3286 	unsigned int val;
3287 	int i;
3288 
3289 	switch (nh_grp->type) {
3290 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3291 		fi = nh_grp->ipv4.fi;
3292 		return jhash(&fi, sizeof(fi), seed);
3293 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3294 		val = nh_grp->nhgi->count;
3295 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3296 			nh = &nh_grp->nhgi->nexthops[i];
3297 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3298 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3299 		}
3300 		return jhash(&val, sizeof(val), seed);
3301 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3302 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3303 	default:
3304 		WARN_ON(1);
3305 		return 0;
3306 	}
3307 }
3308 
3309 static u32
3310 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3311 {
3312 	unsigned int val = fib6_entry->nrt6;
3313 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3314 
3315 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3316 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3317 		struct net_device *dev = fib6_nh->fib_nh_dev;
3318 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3319 
3320 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3321 		val ^= jhash(gw, sizeof(*gw), seed);
3322 	}
3323 
3324 	return jhash(&val, sizeof(val), seed);
3325 }
3326 
3327 static u32
3328 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3329 {
3330 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3331 
3332 	switch (cmp_arg->type) {
3333 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3334 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3335 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3336 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3337 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3338 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3339 	default:
3340 		WARN_ON(1);
3341 		return 0;
3342 	}
3343 }
3344 
3345 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3346 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3347 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3348 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3349 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3350 };
3351 
3352 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3353 					 struct mlxsw_sp_nexthop_group *nh_grp)
3354 {
3355 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3356 	    !nh_grp->nhgi->gateway)
3357 		return 0;
3358 
3359 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3360 				      &nh_grp->ht_node,
3361 				      mlxsw_sp_nexthop_group_ht_params);
3362 }
3363 
3364 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3365 					  struct mlxsw_sp_nexthop_group *nh_grp)
3366 {
3367 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3368 	    !nh_grp->nhgi->gateway)
3369 		return;
3370 
3371 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3372 			       &nh_grp->ht_node,
3373 			       mlxsw_sp_nexthop_group_ht_params);
3374 }
3375 
3376 static struct mlxsw_sp_nexthop_group *
3377 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3378 			       struct fib_info *fi)
3379 {
3380 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3381 
3382 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3383 	cmp_arg.fi = fi;
3384 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3385 				      &cmp_arg,
3386 				      mlxsw_sp_nexthop_group_ht_params);
3387 }
3388 
3389 static struct mlxsw_sp_nexthop_group *
3390 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3391 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3392 {
3393 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3394 
3395 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3396 	cmp_arg.fib6_entry = fib6_entry;
3397 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3398 				      &cmp_arg,
3399 				      mlxsw_sp_nexthop_group_ht_params);
3400 }
3401 
3402 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3403 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3404 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3405 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3406 };
3407 
3408 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3409 				   struct mlxsw_sp_nexthop *nh)
3410 {
3411 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3412 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3413 }
3414 
3415 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3416 				    struct mlxsw_sp_nexthop *nh)
3417 {
3418 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3419 			       mlxsw_sp_nexthop_ht_params);
3420 }
3421 
3422 static struct mlxsw_sp_nexthop *
3423 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3424 			struct mlxsw_sp_nexthop_key key)
3425 {
3426 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3427 				      mlxsw_sp_nexthop_ht_params);
3428 }
3429 
3430 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3431 					     enum mlxsw_sp_l3proto proto,
3432 					     u16 vr_id,
3433 					     u32 adj_index, u16 ecmp_size,
3434 					     u32 new_adj_index,
3435 					     u16 new_ecmp_size)
3436 {
3437 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3438 
3439 	mlxsw_reg_raleu_pack(raleu_pl,
3440 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3441 			     adj_index, ecmp_size, new_adj_index,
3442 			     new_ecmp_size);
3443 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3444 }
3445 
3446 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3447 					  struct mlxsw_sp_nexthop_group *nh_grp,
3448 					  u32 old_adj_index, u16 old_ecmp_size)
3449 {
3450 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3451 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3452 	int err;
3453 
3454 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3455 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3456 							vr_entry->key.proto,
3457 							vr_entry->key.vr_id,
3458 							old_adj_index,
3459 							old_ecmp_size,
3460 							nhgi->adj_index,
3461 							nhgi->ecmp_size);
3462 		if (err)
3463 			goto err_mass_update_vr;
3464 	}
3465 	return 0;
3466 
3467 err_mass_update_vr:
3468 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3469 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3470 						  vr_entry->key.vr_id,
3471 						  nhgi->adj_index,
3472 						  nhgi->ecmp_size,
3473 						  old_adj_index, old_ecmp_size);
3474 	return err;
3475 }
3476 
3477 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3478 					 u32 adj_index,
3479 					 struct mlxsw_sp_nexthop *nh,
3480 					 bool force, char *ratr_pl)
3481 {
3482 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3483 	enum mlxsw_reg_ratr_op op;
3484 	u16 rif_index;
3485 
3486 	rif_index = nh->rif ? nh->rif->rif_index :
3487 			      mlxsw_sp->router->lb_rif_index;
3488 	op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3489 		     MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3490 	mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3491 			    adj_index, rif_index);
3492 	switch (nh->action) {
3493 	case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3494 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3495 		break;
3496 	case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3497 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3498 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3499 		break;
3500 	case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3501 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3502 					       MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3503 		mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3504 		break;
3505 	default:
3506 		WARN_ON_ONCE(1);
3507 		return -EINVAL;
3508 	}
3509 	if (nh->counter_valid)
3510 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3511 	else
3512 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3513 
3514 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3515 }
3516 
3517 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3518 				struct mlxsw_sp_nexthop *nh, bool force,
3519 				char *ratr_pl)
3520 {
3521 	int i;
3522 
3523 	for (i = 0; i < nh->num_adj_entries; i++) {
3524 		int err;
3525 
3526 		err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3527 						    nh, force, ratr_pl);
3528 		if (err)
3529 			return err;
3530 	}
3531 
3532 	return 0;
3533 }
3534 
3535 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3536 					  u32 adj_index,
3537 					  struct mlxsw_sp_nexthop *nh,
3538 					  bool force, char *ratr_pl)
3539 {
3540 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3541 
3542 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3543 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3544 					force, ratr_pl);
3545 }
3546 
3547 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3548 					u32 adj_index,
3549 					struct mlxsw_sp_nexthop *nh, bool force,
3550 					char *ratr_pl)
3551 {
3552 	int i;
3553 
3554 	for (i = 0; i < nh->num_adj_entries; i++) {
3555 		int err;
3556 
3557 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3558 						     nh, force, ratr_pl);
3559 		if (err)
3560 			return err;
3561 	}
3562 
3563 	return 0;
3564 }
3565 
3566 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3567 				   struct mlxsw_sp_nexthop *nh, bool force,
3568 				   char *ratr_pl)
3569 {
3570 	/* When action is discard or trap, the nexthop must be
3571 	 * programmed as an Ethernet nexthop.
3572 	 */
3573 	if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3574 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3575 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3576 		return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3577 						   force, ratr_pl);
3578 	else
3579 		return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3580 						    force, ratr_pl);
3581 }
3582 
3583 static int
3584 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3585 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3586 			      bool reallocate)
3587 {
3588 	char ratr_pl[MLXSW_REG_RATR_LEN];
3589 	u32 adj_index = nhgi->adj_index; /* base */
3590 	struct mlxsw_sp_nexthop *nh;
3591 	int i;
3592 
3593 	for (i = 0; i < nhgi->count; i++) {
3594 		nh = &nhgi->nexthops[i];
3595 
3596 		if (!nh->should_offload) {
3597 			nh->offloaded = 0;
3598 			continue;
3599 		}
3600 
3601 		if (nh->update || reallocate) {
3602 			int err = 0;
3603 
3604 			err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3605 						      true, ratr_pl);
3606 			if (err)
3607 				return err;
3608 			nh->update = 0;
3609 			nh->offloaded = 1;
3610 		}
3611 		adj_index += nh->num_adj_entries;
3612 	}
3613 	return 0;
3614 }
3615 
3616 static int
3617 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3618 				    struct mlxsw_sp_nexthop_group *nh_grp)
3619 {
3620 	struct mlxsw_sp_fib_entry *fib_entry;
3621 	int err;
3622 
3623 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3624 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3625 		if (err)
3626 			return err;
3627 	}
3628 	return 0;
3629 }
3630 
3631 struct mlxsw_sp_adj_grp_size_range {
3632 	u16 start; /* Inclusive */
3633 	u16 end; /* Inclusive */
3634 };
3635 
3636 /* Ordered by range start value */
3637 static const struct mlxsw_sp_adj_grp_size_range
3638 mlxsw_sp1_adj_grp_size_ranges[] = {
3639 	{ .start = 1, .end = 64 },
3640 	{ .start = 512, .end = 512 },
3641 	{ .start = 1024, .end = 1024 },
3642 	{ .start = 2048, .end = 2048 },
3643 	{ .start = 4096, .end = 4096 },
3644 };
3645 
3646 /* Ordered by range start value */
3647 static const struct mlxsw_sp_adj_grp_size_range
3648 mlxsw_sp2_adj_grp_size_ranges[] = {
3649 	{ .start = 1, .end = 128 },
3650 	{ .start = 256, .end = 256 },
3651 	{ .start = 512, .end = 512 },
3652 	{ .start = 1024, .end = 1024 },
3653 	{ .start = 2048, .end = 2048 },
3654 	{ .start = 4096, .end = 4096 },
3655 };
3656 
3657 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3658 					   u16 *p_adj_grp_size)
3659 {
3660 	int i;
3661 
3662 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3663 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3664 
3665 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3666 
3667 		if (*p_adj_grp_size >= size_range->start &&
3668 		    *p_adj_grp_size <= size_range->end)
3669 			return;
3670 
3671 		if (*p_adj_grp_size <= size_range->end) {
3672 			*p_adj_grp_size = size_range->end;
3673 			return;
3674 		}
3675 	}
3676 }
3677 
3678 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3679 					     u16 *p_adj_grp_size,
3680 					     unsigned int alloc_size)
3681 {
3682 	int i;
3683 
3684 	for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3685 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3686 
3687 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3688 
3689 		if (alloc_size >= size_range->end) {
3690 			*p_adj_grp_size = size_range->end;
3691 			return;
3692 		}
3693 	}
3694 }
3695 
3696 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3697 				     u16 *p_adj_grp_size)
3698 {
3699 	unsigned int alloc_size;
3700 	int err;
3701 
3702 	/* Round up the requested group size to the next size supported
3703 	 * by the device and make sure the request can be satisfied.
3704 	 */
3705 	mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3706 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3707 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3708 					      *p_adj_grp_size, &alloc_size);
3709 	if (err)
3710 		return err;
3711 	/* It is possible the allocation results in more allocated
3712 	 * entries than requested. Try to use as much of them as
3713 	 * possible.
3714 	 */
3715 	mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3716 
3717 	return 0;
3718 }
3719 
3720 static void
3721 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3722 {
3723 	int i, g = 0, sum_norm_weight = 0;
3724 	struct mlxsw_sp_nexthop *nh;
3725 
3726 	for (i = 0; i < nhgi->count; i++) {
3727 		nh = &nhgi->nexthops[i];
3728 
3729 		if (!nh->should_offload)
3730 			continue;
3731 		if (g > 0)
3732 			g = gcd(nh->nh_weight, g);
3733 		else
3734 			g = nh->nh_weight;
3735 	}
3736 
3737 	for (i = 0; i < nhgi->count; i++) {
3738 		nh = &nhgi->nexthops[i];
3739 
3740 		if (!nh->should_offload)
3741 			continue;
3742 		nh->norm_nh_weight = nh->nh_weight / g;
3743 		sum_norm_weight += nh->norm_nh_weight;
3744 	}
3745 
3746 	nhgi->sum_norm_weight = sum_norm_weight;
3747 }
3748 
3749 static void
3750 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3751 {
3752 	int i, weight = 0, lower_bound = 0;
3753 	int total = nhgi->sum_norm_weight;
3754 	u16 ecmp_size = nhgi->ecmp_size;
3755 
3756 	for (i = 0; i < nhgi->count; i++) {
3757 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3758 		int upper_bound;
3759 
3760 		if (!nh->should_offload)
3761 			continue;
3762 		weight += nh->norm_nh_weight;
3763 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3764 		nh->num_adj_entries = upper_bound - lower_bound;
3765 		lower_bound = upper_bound;
3766 	}
3767 }
3768 
3769 static struct mlxsw_sp_nexthop *
3770 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3771 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3772 
3773 static void
3774 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3775 					struct mlxsw_sp_nexthop_group *nh_grp)
3776 {
3777 	int i;
3778 
3779 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3780 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3781 
3782 		if (nh->offloaded)
3783 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3784 		else
3785 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3786 	}
3787 }
3788 
3789 static void
3790 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3791 					  struct mlxsw_sp_fib6_entry *fib6_entry)
3792 {
3793 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3794 
3795 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3796 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3797 		struct mlxsw_sp_nexthop *nh;
3798 
3799 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3800 		if (nh && nh->offloaded)
3801 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3802 		else
3803 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3804 	}
3805 }
3806 
3807 static void
3808 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3809 					struct mlxsw_sp_nexthop_group *nh_grp)
3810 {
3811 	struct mlxsw_sp_fib6_entry *fib6_entry;
3812 
3813 	/* Unfortunately, in IPv6 the route and the nexthop are described by
3814 	 * the same struct, so we need to iterate over all the routes using the
3815 	 * nexthop group and set / clear the offload indication for them.
3816 	 */
3817 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3818 			    common.nexthop_group_node)
3819 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3820 }
3821 
3822 static void
3823 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3824 					const struct mlxsw_sp_nexthop *nh,
3825 					u16 bucket_index)
3826 {
3827 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3828 	bool offload = false, trap = false;
3829 
3830 	if (nh->offloaded) {
3831 		if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3832 			trap = true;
3833 		else
3834 			offload = true;
3835 	}
3836 	nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3837 				    bucket_index, offload, trap);
3838 }
3839 
3840 static void
3841 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3842 					   struct mlxsw_sp_nexthop_group *nh_grp)
3843 {
3844 	int i;
3845 
3846 	/* Do not update the flags if the nexthop group is being destroyed
3847 	 * since:
3848 	 * 1. The nexthop objects is being deleted, in which case the flags are
3849 	 * irrelevant.
3850 	 * 2. The nexthop group was replaced by a newer group, in which case
3851 	 * the flags of the nexthop object were already updated based on the
3852 	 * new group.
3853 	 */
3854 	if (nh_grp->can_destroy)
3855 		return;
3856 
3857 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3858 			     nh_grp->nhgi->adj_index_valid, false);
3859 
3860 	/* Update flags of individual nexthop buckets in case of a resilient
3861 	 * nexthop group.
3862 	 */
3863 	if (!nh_grp->nhgi->is_resilient)
3864 		return;
3865 
3866 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3867 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3868 
3869 		mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3870 	}
3871 }
3872 
3873 static void
3874 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3875 				       struct mlxsw_sp_nexthop_group *nh_grp)
3876 {
3877 	switch (nh_grp->type) {
3878 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3879 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3880 		break;
3881 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3882 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3883 		break;
3884 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3885 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3886 		break;
3887 	}
3888 }
3889 
3890 static int
3891 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3892 			       struct mlxsw_sp_nexthop_group *nh_grp)
3893 {
3894 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3895 	u16 ecmp_size, old_ecmp_size;
3896 	struct mlxsw_sp_nexthop *nh;
3897 	bool offload_change = false;
3898 	u32 adj_index;
3899 	bool old_adj_index_valid;
3900 	u32 old_adj_index;
3901 	int i, err2, err;
3902 
3903 	if (!nhgi->gateway)
3904 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3905 
3906 	for (i = 0; i < nhgi->count; i++) {
3907 		nh = &nhgi->nexthops[i];
3908 
3909 		if (nh->should_offload != nh->offloaded) {
3910 			offload_change = true;
3911 			if (nh->should_offload)
3912 				nh->update = 1;
3913 		}
3914 	}
3915 	if (!offload_change) {
3916 		/* Nothing was added or removed, so no need to reallocate. Just
3917 		 * update MAC on existing adjacency indexes.
3918 		 */
3919 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3920 		if (err) {
3921 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3922 			goto set_trap;
3923 		}
3924 		/* Flags of individual nexthop buckets might need to be
3925 		 * updated.
3926 		 */
3927 		mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3928 		return 0;
3929 	}
3930 	mlxsw_sp_nexthop_group_normalize(nhgi);
3931 	if (!nhgi->sum_norm_weight) {
3932 		/* No neigh of this group is connected so we just set
3933 		 * the trap and let everthing flow through kernel.
3934 		 */
3935 		err = 0;
3936 		goto set_trap;
3937 	}
3938 
3939 	ecmp_size = nhgi->sum_norm_weight;
3940 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3941 	if (err)
3942 		/* No valid allocation size available. */
3943 		goto set_trap;
3944 
3945 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3946 				  ecmp_size, &adj_index);
3947 	if (err) {
3948 		/* We ran out of KVD linear space, just set the
3949 		 * trap and let everything flow through kernel.
3950 		 */
3951 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3952 		goto set_trap;
3953 	}
3954 	old_adj_index_valid = nhgi->adj_index_valid;
3955 	old_adj_index = nhgi->adj_index;
3956 	old_ecmp_size = nhgi->ecmp_size;
3957 	nhgi->adj_index_valid = 1;
3958 	nhgi->adj_index = adj_index;
3959 	nhgi->ecmp_size = ecmp_size;
3960 	mlxsw_sp_nexthop_group_rebalance(nhgi);
3961 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
3962 	if (err) {
3963 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3964 		goto set_trap;
3965 	}
3966 
3967 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3968 
3969 	if (!old_adj_index_valid) {
3970 		/* The trap was set for fib entries, so we have to call
3971 		 * fib entry update to unset it and use adjacency index.
3972 		 */
3973 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3974 		if (err) {
3975 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3976 			goto set_trap;
3977 		}
3978 		return 0;
3979 	}
3980 
3981 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3982 					     old_adj_index, old_ecmp_size);
3983 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3984 			   old_ecmp_size, old_adj_index);
3985 	if (err) {
3986 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3987 		goto set_trap;
3988 	}
3989 
3990 	return 0;
3991 
3992 set_trap:
3993 	old_adj_index_valid = nhgi->adj_index_valid;
3994 	nhgi->adj_index_valid = 0;
3995 	for (i = 0; i < nhgi->count; i++) {
3996 		nh = &nhgi->nexthops[i];
3997 		nh->offloaded = 0;
3998 	}
3999 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4000 	if (err2)
4001 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4002 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4003 	if (old_adj_index_valid)
4004 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4005 				   nhgi->ecmp_size, nhgi->adj_index);
4006 	return err;
4007 }
4008 
4009 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4010 					    bool removing)
4011 {
4012 	if (!removing) {
4013 		nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4014 		nh->should_offload = 1;
4015 	} else if (nh->nhgi->is_resilient) {
4016 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4017 		nh->should_offload = 1;
4018 	} else {
4019 		nh->should_offload = 0;
4020 	}
4021 	nh->update = 1;
4022 }
4023 
4024 static int
4025 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4026 				    struct mlxsw_sp_neigh_entry *neigh_entry)
4027 {
4028 	struct neighbour *n, *old_n = neigh_entry->key.n;
4029 	struct mlxsw_sp_nexthop *nh;
4030 	bool entry_connected;
4031 	u8 nud_state, dead;
4032 	int err;
4033 
4034 	nh = list_first_entry(&neigh_entry->nexthop_list,
4035 			      struct mlxsw_sp_nexthop, neigh_list_node);
4036 
4037 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4038 	if (!n) {
4039 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4040 		if (IS_ERR(n))
4041 			return PTR_ERR(n);
4042 		neigh_event_send(n, NULL);
4043 	}
4044 
4045 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4046 	neigh_entry->key.n = n;
4047 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4048 	if (err)
4049 		goto err_neigh_entry_insert;
4050 
4051 	read_lock_bh(&n->lock);
4052 	nud_state = n->nud_state;
4053 	dead = n->dead;
4054 	read_unlock_bh(&n->lock);
4055 	entry_connected = nud_state & NUD_VALID && !dead;
4056 
4057 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4058 			    neigh_list_node) {
4059 		neigh_release(old_n);
4060 		neigh_clone(n);
4061 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4062 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4063 	}
4064 
4065 	neigh_release(n);
4066 
4067 	return 0;
4068 
4069 err_neigh_entry_insert:
4070 	neigh_entry->key.n = old_n;
4071 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4072 	neigh_release(n);
4073 	return err;
4074 }
4075 
4076 static void
4077 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4078 			      struct mlxsw_sp_neigh_entry *neigh_entry,
4079 			      bool removing, bool dead)
4080 {
4081 	struct mlxsw_sp_nexthop *nh;
4082 
4083 	if (list_empty(&neigh_entry->nexthop_list))
4084 		return;
4085 
4086 	if (dead) {
4087 		int err;
4088 
4089 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4090 							  neigh_entry);
4091 		if (err)
4092 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4093 		return;
4094 	}
4095 
4096 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4097 			    neigh_list_node) {
4098 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4099 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4100 	}
4101 }
4102 
4103 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4104 				      struct mlxsw_sp_rif *rif)
4105 {
4106 	if (nh->rif)
4107 		return;
4108 
4109 	nh->rif = rif;
4110 	list_add(&nh->rif_list_node, &rif->nexthop_list);
4111 }
4112 
4113 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4114 {
4115 	if (!nh->rif)
4116 		return;
4117 
4118 	list_del(&nh->rif_list_node);
4119 	nh->rif = NULL;
4120 }
4121 
4122 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4123 				       struct mlxsw_sp_nexthop *nh)
4124 {
4125 	struct mlxsw_sp_neigh_entry *neigh_entry;
4126 	struct neighbour *n;
4127 	u8 nud_state, dead;
4128 	int err;
4129 
4130 	if (!nh->nhgi->gateway || nh->neigh_entry)
4131 		return 0;
4132 
4133 	/* Take a reference of neigh here ensuring that neigh would
4134 	 * not be destructed before the nexthop entry is finished.
4135 	 * The reference is taken either in neigh_lookup() or
4136 	 * in neigh_create() in case n is not found.
4137 	 */
4138 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4139 	if (!n) {
4140 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4141 		if (IS_ERR(n))
4142 			return PTR_ERR(n);
4143 		neigh_event_send(n, NULL);
4144 	}
4145 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4146 	if (!neigh_entry) {
4147 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4148 		if (IS_ERR(neigh_entry)) {
4149 			err = -EINVAL;
4150 			goto err_neigh_entry_create;
4151 		}
4152 	}
4153 
4154 	/* If that is the first nexthop connected to that neigh, add to
4155 	 * nexthop_neighs_list
4156 	 */
4157 	if (list_empty(&neigh_entry->nexthop_list))
4158 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4159 			      &mlxsw_sp->router->nexthop_neighs_list);
4160 
4161 	nh->neigh_entry = neigh_entry;
4162 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4163 	read_lock_bh(&n->lock);
4164 	nud_state = n->nud_state;
4165 	dead = n->dead;
4166 	read_unlock_bh(&n->lock);
4167 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4168 
4169 	return 0;
4170 
4171 err_neigh_entry_create:
4172 	neigh_release(n);
4173 	return err;
4174 }
4175 
4176 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4177 					struct mlxsw_sp_nexthop *nh)
4178 {
4179 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4180 	struct neighbour *n;
4181 
4182 	if (!neigh_entry)
4183 		return;
4184 	n = neigh_entry->key.n;
4185 
4186 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4187 	list_del(&nh->neigh_list_node);
4188 	nh->neigh_entry = NULL;
4189 
4190 	/* If that is the last nexthop connected to that neigh, remove from
4191 	 * nexthop_neighs_list
4192 	 */
4193 	if (list_empty(&neigh_entry->nexthop_list))
4194 		list_del(&neigh_entry->nexthop_neighs_list_node);
4195 
4196 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4197 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4198 
4199 	neigh_release(n);
4200 }
4201 
4202 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4203 {
4204 	struct net_device *ul_dev;
4205 	bool is_up;
4206 
4207 	rcu_read_lock();
4208 	ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4209 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4210 	rcu_read_unlock();
4211 
4212 	return is_up;
4213 }
4214 
4215 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4216 				       struct mlxsw_sp_nexthop *nh,
4217 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4218 {
4219 	bool removing;
4220 
4221 	if (!nh->nhgi->gateway || nh->ipip_entry)
4222 		return;
4223 
4224 	nh->ipip_entry = ipip_entry;
4225 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4226 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4227 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4228 }
4229 
4230 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4231 				       struct mlxsw_sp_nexthop *nh)
4232 {
4233 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4234 
4235 	if (!ipip_entry)
4236 		return;
4237 
4238 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4239 	nh->ipip_entry = NULL;
4240 }
4241 
4242 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4243 					const struct fib_nh *fib_nh,
4244 					enum mlxsw_sp_ipip_type *p_ipipt)
4245 {
4246 	struct net_device *dev = fib_nh->fib_nh_dev;
4247 
4248 	return dev &&
4249 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4250 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4251 }
4252 
4253 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4254 				      struct mlxsw_sp_nexthop *nh,
4255 				      const struct net_device *dev)
4256 {
4257 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4258 	struct mlxsw_sp_ipip_entry *ipip_entry;
4259 	struct mlxsw_sp_rif *rif;
4260 	int err;
4261 
4262 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4263 	if (ipip_entry) {
4264 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4265 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4266 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4267 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4268 			return 0;
4269 		}
4270 	}
4271 
4272 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4273 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4274 	if (!rif)
4275 		return 0;
4276 
4277 	mlxsw_sp_nexthop_rif_init(nh, rif);
4278 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4279 	if (err)
4280 		goto err_neigh_init;
4281 
4282 	return 0;
4283 
4284 err_neigh_init:
4285 	mlxsw_sp_nexthop_rif_fini(nh);
4286 	return err;
4287 }
4288 
4289 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4290 				       struct mlxsw_sp_nexthop *nh)
4291 {
4292 	switch (nh->type) {
4293 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4294 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4295 		mlxsw_sp_nexthop_rif_fini(nh);
4296 		break;
4297 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4298 		mlxsw_sp_nexthop_rif_fini(nh);
4299 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4300 		break;
4301 	}
4302 }
4303 
4304 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4305 				  struct mlxsw_sp_nexthop_group *nh_grp,
4306 				  struct mlxsw_sp_nexthop *nh,
4307 				  struct fib_nh *fib_nh)
4308 {
4309 	struct net_device *dev = fib_nh->fib_nh_dev;
4310 	struct in_device *in_dev;
4311 	int err;
4312 
4313 	nh->nhgi = nh_grp->nhgi;
4314 	nh->key.fib_nh = fib_nh;
4315 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4316 	nh->nh_weight = fib_nh->fib_nh_weight;
4317 #else
4318 	nh->nh_weight = 1;
4319 #endif
4320 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4321 	nh->neigh_tbl = &arp_tbl;
4322 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4323 	if (err)
4324 		return err;
4325 
4326 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4327 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4328 
4329 	if (!dev)
4330 		return 0;
4331 	nh->ifindex = dev->ifindex;
4332 
4333 	rcu_read_lock();
4334 	in_dev = __in_dev_get_rcu(dev);
4335 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4336 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4337 		rcu_read_unlock();
4338 		return 0;
4339 	}
4340 	rcu_read_unlock();
4341 
4342 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4343 	if (err)
4344 		goto err_nexthop_neigh_init;
4345 
4346 	return 0;
4347 
4348 err_nexthop_neigh_init:
4349 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4350 	return err;
4351 }
4352 
4353 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4354 				   struct mlxsw_sp_nexthop *nh)
4355 {
4356 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4357 	list_del(&nh->router_list_node);
4358 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4359 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4360 }
4361 
4362 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4363 				    unsigned long event, struct fib_nh *fib_nh)
4364 {
4365 	struct mlxsw_sp_nexthop_key key;
4366 	struct mlxsw_sp_nexthop *nh;
4367 
4368 	key.fib_nh = fib_nh;
4369 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4370 	if (!nh)
4371 		return;
4372 
4373 	switch (event) {
4374 	case FIB_EVENT_NH_ADD:
4375 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4376 		break;
4377 	case FIB_EVENT_NH_DEL:
4378 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4379 		break;
4380 	}
4381 
4382 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4383 }
4384 
4385 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4386 					struct mlxsw_sp_rif *rif)
4387 {
4388 	struct mlxsw_sp_nexthop *nh;
4389 	bool removing;
4390 
4391 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4392 		switch (nh->type) {
4393 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4394 			removing = false;
4395 			break;
4396 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4397 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4398 			break;
4399 		default:
4400 			WARN_ON(1);
4401 			continue;
4402 		}
4403 
4404 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4405 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4406 	}
4407 }
4408 
4409 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4410 					 struct mlxsw_sp_rif *old_rif,
4411 					 struct mlxsw_sp_rif *new_rif)
4412 {
4413 	struct mlxsw_sp_nexthop *nh;
4414 
4415 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4416 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4417 		nh->rif = new_rif;
4418 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4419 }
4420 
4421 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4422 					   struct mlxsw_sp_rif *rif)
4423 {
4424 	struct mlxsw_sp_nexthop *nh, *tmp;
4425 
4426 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4427 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4428 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4429 	}
4430 }
4431 
4432 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4433 {
4434 	enum mlxsw_reg_ratr_trap_action trap_action;
4435 	char ratr_pl[MLXSW_REG_RATR_LEN];
4436 	int err;
4437 
4438 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4439 				  &mlxsw_sp->router->adj_trap_index);
4440 	if (err)
4441 		return err;
4442 
4443 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4444 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4445 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4446 			    mlxsw_sp->router->adj_trap_index,
4447 			    mlxsw_sp->router->lb_rif_index);
4448 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4449 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4450 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4451 	if (err)
4452 		goto err_ratr_write;
4453 
4454 	return 0;
4455 
4456 err_ratr_write:
4457 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4458 			   mlxsw_sp->router->adj_trap_index);
4459 	return err;
4460 }
4461 
4462 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4463 {
4464 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4465 			   mlxsw_sp->router->adj_trap_index);
4466 }
4467 
4468 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4469 {
4470 	int err;
4471 
4472 	if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4473 		return 0;
4474 
4475 	err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4476 	if (err)
4477 		return err;
4478 
4479 	refcount_set(&mlxsw_sp->router->num_groups, 1);
4480 
4481 	return 0;
4482 }
4483 
4484 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4485 {
4486 	if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4487 		return;
4488 
4489 	mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4490 }
4491 
4492 static void
4493 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4494 			     const struct mlxsw_sp_nexthop_group *nh_grp,
4495 			     unsigned long *activity)
4496 {
4497 	char *ratrad_pl;
4498 	int i, err;
4499 
4500 	ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4501 	if (!ratrad_pl)
4502 		return;
4503 
4504 	mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4505 			      nh_grp->nhgi->count);
4506 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4507 	if (err)
4508 		goto out;
4509 
4510 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4511 		if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4512 			continue;
4513 		bitmap_set(activity, i, 1);
4514 	}
4515 
4516 out:
4517 	kfree(ratrad_pl);
4518 }
4519 
4520 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4521 
4522 static void
4523 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4524 				const struct mlxsw_sp_nexthop_group *nh_grp)
4525 {
4526 	unsigned long *activity;
4527 
4528 	activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4529 	if (!activity)
4530 		return;
4531 
4532 	mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4533 	nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4534 					nh_grp->nhgi->count, activity);
4535 
4536 	bitmap_free(activity);
4537 }
4538 
4539 static void
4540 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4541 {
4542 	unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4543 
4544 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4545 			       msecs_to_jiffies(interval));
4546 }
4547 
4548 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4549 {
4550 	struct mlxsw_sp_nexthop_group_info *nhgi;
4551 	struct mlxsw_sp_router *router;
4552 	bool reschedule = false;
4553 
4554 	router = container_of(work, struct mlxsw_sp_router,
4555 			      nh_grp_activity_dw.work);
4556 
4557 	mutex_lock(&router->lock);
4558 
4559 	list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4560 		mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4561 		reschedule = true;
4562 	}
4563 
4564 	mutex_unlock(&router->lock);
4565 
4566 	if (!reschedule)
4567 		return;
4568 	mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4569 }
4570 
4571 static int
4572 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4573 				     const struct nh_notifier_single_info *nh,
4574 				     struct netlink_ext_ack *extack)
4575 {
4576 	int err = -EINVAL;
4577 
4578 	if (nh->is_fdb)
4579 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4580 	else if (nh->has_encap)
4581 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4582 	else
4583 		err = 0;
4584 
4585 	return err;
4586 }
4587 
4588 static int
4589 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4590 					  const struct nh_notifier_single_info *nh,
4591 					  struct netlink_ext_ack *extack)
4592 {
4593 	int err;
4594 
4595 	err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4596 	if (err)
4597 		return err;
4598 
4599 	/* Device only nexthops with an IPIP device are programmed as
4600 	 * encapsulating adjacency entries.
4601 	 */
4602 	if (!nh->gw_family && !nh->is_reject &&
4603 	    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4604 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4605 		return -EINVAL;
4606 	}
4607 
4608 	return 0;
4609 }
4610 
4611 static int
4612 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4613 				    const struct nh_notifier_grp_info *nh_grp,
4614 				    struct netlink_ext_ack *extack)
4615 {
4616 	int i;
4617 
4618 	if (nh_grp->is_fdb) {
4619 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4620 		return -EINVAL;
4621 	}
4622 
4623 	for (i = 0; i < nh_grp->num_nh; i++) {
4624 		const struct nh_notifier_single_info *nh;
4625 		int err;
4626 
4627 		nh = &nh_grp->nh_entries[i].nh;
4628 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4629 								extack);
4630 		if (err)
4631 			return err;
4632 	}
4633 
4634 	return 0;
4635 }
4636 
4637 static int
4638 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4639 					     const struct nh_notifier_res_table_info *nh_res_table,
4640 					     struct netlink_ext_ack *extack)
4641 {
4642 	unsigned int alloc_size;
4643 	bool valid_size = false;
4644 	int err, i;
4645 
4646 	if (nh_res_table->num_nh_buckets < 32) {
4647 		NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4648 		return -EINVAL;
4649 	}
4650 
4651 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4652 		const struct mlxsw_sp_adj_grp_size_range *size_range;
4653 
4654 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4655 
4656 		if (nh_res_table->num_nh_buckets >= size_range->start &&
4657 		    nh_res_table->num_nh_buckets <= size_range->end) {
4658 			valid_size = true;
4659 			break;
4660 		}
4661 	}
4662 
4663 	if (!valid_size) {
4664 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4665 		return -EINVAL;
4666 	}
4667 
4668 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4669 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4670 					      nh_res_table->num_nh_buckets,
4671 					      &alloc_size);
4672 	if (err || nh_res_table->num_nh_buckets != alloc_size) {
4673 		NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4674 		return -EINVAL;
4675 	}
4676 
4677 	return 0;
4678 }
4679 
4680 static int
4681 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4682 					const struct nh_notifier_res_table_info *nh_res_table,
4683 					struct netlink_ext_ack *extack)
4684 {
4685 	int err;
4686 	u16 i;
4687 
4688 	err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4689 							   nh_res_table,
4690 							   extack);
4691 	if (err)
4692 		return err;
4693 
4694 	for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4695 		const struct nh_notifier_single_info *nh;
4696 		int err;
4697 
4698 		nh = &nh_res_table->nhs[i];
4699 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4700 								extack);
4701 		if (err)
4702 			return err;
4703 	}
4704 
4705 	return 0;
4706 }
4707 
4708 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4709 					 unsigned long event,
4710 					 struct nh_notifier_info *info)
4711 {
4712 	struct nh_notifier_single_info *nh;
4713 
4714 	if (event != NEXTHOP_EVENT_REPLACE &&
4715 	    event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4716 	    event != NEXTHOP_EVENT_BUCKET_REPLACE)
4717 		return 0;
4718 
4719 	switch (info->type) {
4720 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4721 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4722 							    info->extack);
4723 	case NH_NOTIFIER_INFO_TYPE_GRP:
4724 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4725 							   info->nh_grp,
4726 							   info->extack);
4727 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4728 		return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4729 							       info->nh_res_table,
4730 							       info->extack);
4731 	case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4732 		nh = &info->nh_res_bucket->new_nh;
4733 		return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4734 								 info->extack);
4735 	default:
4736 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4737 		return -EOPNOTSUPP;
4738 	}
4739 }
4740 
4741 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4742 					    const struct nh_notifier_info *info)
4743 {
4744 	const struct net_device *dev;
4745 
4746 	switch (info->type) {
4747 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4748 		dev = info->nh->dev;
4749 		return info->nh->gw_family || info->nh->is_reject ||
4750 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4751 	case NH_NOTIFIER_INFO_TYPE_GRP:
4752 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4753 		/* Already validated earlier. */
4754 		return true;
4755 	default:
4756 		return false;
4757 	}
4758 }
4759 
4760 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4761 						struct mlxsw_sp_nexthop *nh)
4762 {
4763 	u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4764 
4765 	nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4766 	nh->should_offload = 1;
4767 	/* While nexthops that discard packets do not forward packets
4768 	 * via an egress RIF, they still need to be programmed using a
4769 	 * valid RIF, so use the loopback RIF created during init.
4770 	 */
4771 	nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4772 }
4773 
4774 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4775 						struct mlxsw_sp_nexthop *nh)
4776 {
4777 	nh->rif = NULL;
4778 	nh->should_offload = 0;
4779 }
4780 
4781 static int
4782 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4783 			  struct mlxsw_sp_nexthop_group *nh_grp,
4784 			  struct mlxsw_sp_nexthop *nh,
4785 			  struct nh_notifier_single_info *nh_obj, int weight)
4786 {
4787 	struct net_device *dev = nh_obj->dev;
4788 	int err;
4789 
4790 	nh->nhgi = nh_grp->nhgi;
4791 	nh->nh_weight = weight;
4792 
4793 	switch (nh_obj->gw_family) {
4794 	case AF_INET:
4795 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4796 		nh->neigh_tbl = &arp_tbl;
4797 		break;
4798 	case AF_INET6:
4799 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4800 #if IS_ENABLED(CONFIG_IPV6)
4801 		nh->neigh_tbl = &nd_tbl;
4802 #endif
4803 		break;
4804 	}
4805 
4806 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4807 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4808 	nh->ifindex = dev->ifindex;
4809 
4810 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4811 	if (err)
4812 		goto err_type_init;
4813 
4814 	if (nh_obj->is_reject)
4815 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4816 
4817 	/* In a resilient nexthop group, all the nexthops must be written to
4818 	 * the adjacency table. Even if they do not have a valid neighbour or
4819 	 * RIF.
4820 	 */
4821 	if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4822 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4823 		nh->should_offload = 1;
4824 	}
4825 
4826 	return 0;
4827 
4828 err_type_init:
4829 	list_del(&nh->router_list_node);
4830 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4831 	return err;
4832 }
4833 
4834 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4835 				      struct mlxsw_sp_nexthop *nh)
4836 {
4837 	if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4838 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4839 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4840 	list_del(&nh->router_list_node);
4841 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4842 	nh->should_offload = 0;
4843 }
4844 
4845 static int
4846 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4847 				     struct mlxsw_sp_nexthop_group *nh_grp,
4848 				     struct nh_notifier_info *info)
4849 {
4850 	struct mlxsw_sp_nexthop_group_info *nhgi;
4851 	struct mlxsw_sp_nexthop *nh;
4852 	bool is_resilient = false;
4853 	unsigned int nhs;
4854 	int err, i;
4855 
4856 	switch (info->type) {
4857 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
4858 		nhs = 1;
4859 		break;
4860 	case NH_NOTIFIER_INFO_TYPE_GRP:
4861 		nhs = info->nh_grp->num_nh;
4862 		break;
4863 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4864 		nhs = info->nh_res_table->num_nh_buckets;
4865 		is_resilient = true;
4866 		break;
4867 	default:
4868 		return -EINVAL;
4869 	}
4870 
4871 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4872 	if (!nhgi)
4873 		return -ENOMEM;
4874 	nh_grp->nhgi = nhgi;
4875 	nhgi->nh_grp = nh_grp;
4876 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4877 	nhgi->is_resilient = is_resilient;
4878 	nhgi->count = nhs;
4879 	for (i = 0; i < nhgi->count; i++) {
4880 		struct nh_notifier_single_info *nh_obj;
4881 		int weight;
4882 
4883 		nh = &nhgi->nexthops[i];
4884 		switch (info->type) {
4885 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
4886 			nh_obj = info->nh;
4887 			weight = 1;
4888 			break;
4889 		case NH_NOTIFIER_INFO_TYPE_GRP:
4890 			nh_obj = &info->nh_grp->nh_entries[i].nh;
4891 			weight = info->nh_grp->nh_entries[i].weight;
4892 			break;
4893 		case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4894 			nh_obj = &info->nh_res_table->nhs[i];
4895 			weight = 1;
4896 			break;
4897 		default:
4898 			err = -EINVAL;
4899 			goto err_nexthop_obj_init;
4900 		}
4901 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4902 						weight);
4903 		if (err)
4904 			goto err_nexthop_obj_init;
4905 	}
4906 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
4907 	if (err)
4908 		goto err_group_inc;
4909 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4910 	if (err) {
4911 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4912 		goto err_group_refresh;
4913 	}
4914 
4915 	/* Add resilient nexthop groups to a list so that the activity of their
4916 	 * nexthop buckets will be periodically queried and cleared.
4917 	 */
4918 	if (nhgi->is_resilient) {
4919 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4920 			mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4921 		list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4922 	}
4923 
4924 	return 0;
4925 
4926 err_group_refresh:
4927 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4928 err_group_inc:
4929 	i = nhgi->count;
4930 err_nexthop_obj_init:
4931 	for (i--; i >= 0; i--) {
4932 		nh = &nhgi->nexthops[i];
4933 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4934 	}
4935 	kfree(nhgi);
4936 	return err;
4937 }
4938 
4939 static void
4940 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4941 				     struct mlxsw_sp_nexthop_group *nh_grp)
4942 {
4943 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4944 	struct mlxsw_sp_router *router = mlxsw_sp->router;
4945 	int i;
4946 
4947 	if (nhgi->is_resilient) {
4948 		list_del(&nhgi->list);
4949 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4950 			cancel_delayed_work(&router->nh_grp_activity_dw);
4951 	}
4952 
4953 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4954 	for (i = nhgi->count - 1; i >= 0; i--) {
4955 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4956 
4957 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4958 	}
4959 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4960 	WARN_ON_ONCE(nhgi->adj_index_valid);
4961 	kfree(nhgi);
4962 }
4963 
4964 static struct mlxsw_sp_nexthop_group *
4965 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
4966 				  struct nh_notifier_info *info)
4967 {
4968 	struct mlxsw_sp_nexthop_group *nh_grp;
4969 	int err;
4970 
4971 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4972 	if (!nh_grp)
4973 		return ERR_PTR(-ENOMEM);
4974 	INIT_LIST_HEAD(&nh_grp->vr_list);
4975 	err = rhashtable_init(&nh_grp->vr_ht,
4976 			      &mlxsw_sp_nexthop_group_vr_ht_params);
4977 	if (err)
4978 		goto err_nexthop_group_vr_ht_init;
4979 	INIT_LIST_HEAD(&nh_grp->fib_list);
4980 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4981 	nh_grp->obj.id = info->id;
4982 
4983 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
4984 	if (err)
4985 		goto err_nexthop_group_info_init;
4986 
4987 	nh_grp->can_destroy = false;
4988 
4989 	return nh_grp;
4990 
4991 err_nexthop_group_info_init:
4992 	rhashtable_destroy(&nh_grp->vr_ht);
4993 err_nexthop_group_vr_ht_init:
4994 	kfree(nh_grp);
4995 	return ERR_PTR(err);
4996 }
4997 
4998 static void
4999 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5000 				   struct mlxsw_sp_nexthop_group *nh_grp)
5001 {
5002 	if (!nh_grp->can_destroy)
5003 		return;
5004 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5005 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5006 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5007 	rhashtable_destroy(&nh_grp->vr_ht);
5008 	kfree(nh_grp);
5009 }
5010 
5011 static struct mlxsw_sp_nexthop_group *
5012 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5013 {
5014 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5015 
5016 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5017 	cmp_arg.id = id;
5018 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5019 				      &cmp_arg,
5020 				      mlxsw_sp_nexthop_group_ht_params);
5021 }
5022 
5023 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5024 					  struct mlxsw_sp_nexthop_group *nh_grp)
5025 {
5026 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5027 }
5028 
5029 static int
5030 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5031 				   struct mlxsw_sp_nexthop_group *nh_grp,
5032 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
5033 				   struct netlink_ext_ack *extack)
5034 {
5035 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5036 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5037 	int err;
5038 
5039 	old_nh_grp->nhgi = new_nhgi;
5040 	new_nhgi->nh_grp = old_nh_grp;
5041 	nh_grp->nhgi = old_nhgi;
5042 	old_nhgi->nh_grp = nh_grp;
5043 
5044 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5045 		/* Both the old adjacency index and the new one are valid.
5046 		 * Routes are currently using the old one. Tell the device to
5047 		 * replace the old adjacency index with the new one.
5048 		 */
5049 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5050 						     old_nhgi->adj_index,
5051 						     old_nhgi->ecmp_size);
5052 		if (err) {
5053 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5054 			goto err_out;
5055 		}
5056 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5057 		/* The old adjacency index is valid, while the new one is not.
5058 		 * Iterate over all the routes using the group and change them
5059 		 * to trap packets to the CPU.
5060 		 */
5061 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5062 		if (err) {
5063 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5064 			goto err_out;
5065 		}
5066 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5067 		/* The old adjacency index is invalid, while the new one is.
5068 		 * Iterate over all the routes using the group and change them
5069 		 * to forward packets using the new valid index.
5070 		 */
5071 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5072 		if (err) {
5073 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5074 			goto err_out;
5075 		}
5076 	}
5077 
5078 	/* Make sure the flags are set / cleared based on the new nexthop group
5079 	 * information.
5080 	 */
5081 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5082 
5083 	/* At this point 'nh_grp' is just a shell that is not used by anyone
5084 	 * and its nexthop group info is the old info that was just replaced
5085 	 * with the new one. Remove it.
5086 	 */
5087 	nh_grp->can_destroy = true;
5088 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5089 
5090 	return 0;
5091 
5092 err_out:
5093 	old_nhgi->nh_grp = old_nh_grp;
5094 	nh_grp->nhgi = new_nhgi;
5095 	new_nhgi->nh_grp = nh_grp;
5096 	old_nh_grp->nhgi = old_nhgi;
5097 	return err;
5098 }
5099 
5100 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5101 				    struct nh_notifier_info *info)
5102 {
5103 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5104 	struct netlink_ext_ack *extack = info->extack;
5105 	int err;
5106 
5107 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5108 	if (IS_ERR(nh_grp))
5109 		return PTR_ERR(nh_grp);
5110 
5111 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5112 	if (!old_nh_grp)
5113 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5114 	else
5115 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5116 							 old_nh_grp, extack);
5117 
5118 	if (err) {
5119 		nh_grp->can_destroy = true;
5120 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5121 	}
5122 
5123 	return err;
5124 }
5125 
5126 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5127 				     struct nh_notifier_info *info)
5128 {
5129 	struct mlxsw_sp_nexthop_group *nh_grp;
5130 
5131 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5132 	if (!nh_grp)
5133 		return;
5134 
5135 	nh_grp->can_destroy = true;
5136 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5137 
5138 	/* If the group still has routes using it, then defer the delete
5139 	 * operation until the last route using it is deleted.
5140 	 */
5141 	if (!list_empty(&nh_grp->fib_list))
5142 		return;
5143 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5144 }
5145 
5146 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5147 					     u32 adj_index, char *ratr_pl)
5148 {
5149 	MLXSW_REG_ZERO(ratr, ratr_pl);
5150 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5151 	mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5152 	mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5153 
5154 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5155 }
5156 
5157 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5158 {
5159 	/* Clear the opcode and activity on both the old and new payload as
5160 	 * they are irrelevant for the comparison.
5161 	 */
5162 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5163 	mlxsw_reg_ratr_a_set(ratr_pl, 0);
5164 	mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5165 	mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5166 
5167 	/* If the contents of the adjacency entry are consistent with the
5168 	 * replacement request, then replacement was successful.
5169 	 */
5170 	if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5171 		return 0;
5172 
5173 	return -EINVAL;
5174 }
5175 
5176 static int
5177 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5178 				       struct mlxsw_sp_nexthop *nh,
5179 				       struct nh_notifier_info *info)
5180 {
5181 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5182 	struct netlink_ext_ack *extack = info->extack;
5183 	bool force = info->nh_res_bucket->force;
5184 	char ratr_pl_new[MLXSW_REG_RATR_LEN];
5185 	char ratr_pl[MLXSW_REG_RATR_LEN];
5186 	u32 adj_index;
5187 	int err;
5188 
5189 	/* No point in trying an atomic replacement if the idle timer interval
5190 	 * is smaller than the interval in which we query and clear activity.
5191 	 */
5192 	if (!force && info->nh_res_bucket->idle_timer_ms <
5193 	    MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5194 		force = true;
5195 
5196 	adj_index = nh->nhgi->adj_index + bucket_index;
5197 	err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5198 	if (err) {
5199 		NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5200 		return err;
5201 	}
5202 
5203 	if (!force) {
5204 		err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5205 							ratr_pl_new);
5206 		if (err) {
5207 			NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5208 			return err;
5209 		}
5210 
5211 		err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5212 		if (err) {
5213 			NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5214 			return err;
5215 		}
5216 	}
5217 
5218 	nh->update = 0;
5219 	nh->offloaded = 1;
5220 	mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5221 
5222 	return 0;
5223 }
5224 
5225 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5226 					       struct nh_notifier_info *info)
5227 {
5228 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5229 	struct netlink_ext_ack *extack = info->extack;
5230 	struct mlxsw_sp_nexthop_group_info *nhgi;
5231 	struct nh_notifier_single_info *nh_obj;
5232 	struct mlxsw_sp_nexthop_group *nh_grp;
5233 	struct mlxsw_sp_nexthop *nh;
5234 	int err;
5235 
5236 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5237 	if (!nh_grp) {
5238 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5239 		return -EINVAL;
5240 	}
5241 
5242 	nhgi = nh_grp->nhgi;
5243 
5244 	if (bucket_index >= nhgi->count) {
5245 		NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5246 		return -EINVAL;
5247 	}
5248 
5249 	nh = &nhgi->nexthops[bucket_index];
5250 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5251 
5252 	nh_obj = &info->nh_res_bucket->new_nh;
5253 	err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5254 	if (err) {
5255 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5256 		goto err_nexthop_obj_init;
5257 	}
5258 
5259 	err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5260 	if (err)
5261 		goto err_nexthop_obj_bucket_adj_update;
5262 
5263 	return 0;
5264 
5265 err_nexthop_obj_bucket_adj_update:
5266 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5267 err_nexthop_obj_init:
5268 	nh_obj = &info->nh_res_bucket->old_nh;
5269 	mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5270 	/* The old adjacency entry was not overwritten */
5271 	nh->update = 0;
5272 	nh->offloaded = 1;
5273 	return err;
5274 }
5275 
5276 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5277 				      unsigned long event, void *ptr)
5278 {
5279 	struct nh_notifier_info *info = ptr;
5280 	struct mlxsw_sp_router *router;
5281 	int err = 0;
5282 
5283 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5284 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5285 	if (err)
5286 		goto out;
5287 
5288 	mutex_lock(&router->lock);
5289 
5290 	switch (event) {
5291 	case NEXTHOP_EVENT_REPLACE:
5292 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5293 		break;
5294 	case NEXTHOP_EVENT_DEL:
5295 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5296 		break;
5297 	case NEXTHOP_EVENT_BUCKET_REPLACE:
5298 		err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5299 							  info);
5300 		break;
5301 	default:
5302 		break;
5303 	}
5304 
5305 	mutex_unlock(&router->lock);
5306 
5307 out:
5308 	return notifier_from_errno(err);
5309 }
5310 
5311 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5312 				   struct fib_info *fi)
5313 {
5314 	const struct fib_nh *nh = fib_info_nh(fi, 0);
5315 
5316 	return nh->fib_nh_scope == RT_SCOPE_LINK ||
5317 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5318 }
5319 
5320 static int
5321 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5322 				  struct mlxsw_sp_nexthop_group *nh_grp)
5323 {
5324 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5325 	struct mlxsw_sp_nexthop_group_info *nhgi;
5326 	struct mlxsw_sp_nexthop *nh;
5327 	int err, i;
5328 
5329 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5330 	if (!nhgi)
5331 		return -ENOMEM;
5332 	nh_grp->nhgi = nhgi;
5333 	nhgi->nh_grp = nh_grp;
5334 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5335 	nhgi->count = nhs;
5336 	for (i = 0; i < nhgi->count; i++) {
5337 		struct fib_nh *fib_nh;
5338 
5339 		nh = &nhgi->nexthops[i];
5340 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5341 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5342 		if (err)
5343 			goto err_nexthop4_init;
5344 	}
5345 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5346 	if (err)
5347 		goto err_group_inc;
5348 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5349 	if (err)
5350 		goto err_group_refresh;
5351 
5352 	return 0;
5353 
5354 err_group_refresh:
5355 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5356 err_group_inc:
5357 	i = nhgi->count;
5358 err_nexthop4_init:
5359 	for (i--; i >= 0; i--) {
5360 		nh = &nhgi->nexthops[i];
5361 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5362 	}
5363 	kfree(nhgi);
5364 	return err;
5365 }
5366 
5367 static void
5368 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5369 				  struct mlxsw_sp_nexthop_group *nh_grp)
5370 {
5371 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5372 	int i;
5373 
5374 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5375 	for (i = nhgi->count - 1; i >= 0; i--) {
5376 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5377 
5378 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5379 	}
5380 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5381 	WARN_ON_ONCE(nhgi->adj_index_valid);
5382 	kfree(nhgi);
5383 }
5384 
5385 static struct mlxsw_sp_nexthop_group *
5386 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5387 {
5388 	struct mlxsw_sp_nexthop_group *nh_grp;
5389 	int err;
5390 
5391 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5392 	if (!nh_grp)
5393 		return ERR_PTR(-ENOMEM);
5394 	INIT_LIST_HEAD(&nh_grp->vr_list);
5395 	err = rhashtable_init(&nh_grp->vr_ht,
5396 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5397 	if (err)
5398 		goto err_nexthop_group_vr_ht_init;
5399 	INIT_LIST_HEAD(&nh_grp->fib_list);
5400 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5401 	nh_grp->ipv4.fi = fi;
5402 	fib_info_hold(fi);
5403 
5404 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5405 	if (err)
5406 		goto err_nexthop_group_info_init;
5407 
5408 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5409 	if (err)
5410 		goto err_nexthop_group_insert;
5411 
5412 	nh_grp->can_destroy = true;
5413 
5414 	return nh_grp;
5415 
5416 err_nexthop_group_insert:
5417 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5418 err_nexthop_group_info_init:
5419 	fib_info_put(fi);
5420 	rhashtable_destroy(&nh_grp->vr_ht);
5421 err_nexthop_group_vr_ht_init:
5422 	kfree(nh_grp);
5423 	return ERR_PTR(err);
5424 }
5425 
5426 static void
5427 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5428 				struct mlxsw_sp_nexthop_group *nh_grp)
5429 {
5430 	if (!nh_grp->can_destroy)
5431 		return;
5432 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5433 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5434 	fib_info_put(nh_grp->ipv4.fi);
5435 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5436 	rhashtable_destroy(&nh_grp->vr_ht);
5437 	kfree(nh_grp);
5438 }
5439 
5440 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5441 				       struct mlxsw_sp_fib_entry *fib_entry,
5442 				       struct fib_info *fi)
5443 {
5444 	struct mlxsw_sp_nexthop_group *nh_grp;
5445 
5446 	if (fi->nh) {
5447 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5448 							   fi->nh->id);
5449 		if (WARN_ON_ONCE(!nh_grp))
5450 			return -EINVAL;
5451 		goto out;
5452 	}
5453 
5454 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5455 	if (!nh_grp) {
5456 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5457 		if (IS_ERR(nh_grp))
5458 			return PTR_ERR(nh_grp);
5459 	}
5460 out:
5461 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5462 	fib_entry->nh_group = nh_grp;
5463 	return 0;
5464 }
5465 
5466 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5467 					struct mlxsw_sp_fib_entry *fib_entry)
5468 {
5469 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5470 
5471 	list_del(&fib_entry->nexthop_group_node);
5472 	if (!list_empty(&nh_grp->fib_list))
5473 		return;
5474 
5475 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5476 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5477 		return;
5478 	}
5479 
5480 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5481 }
5482 
5483 static bool
5484 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5485 {
5486 	struct mlxsw_sp_fib4_entry *fib4_entry;
5487 
5488 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5489 				  common);
5490 	return !fib4_entry->tos;
5491 }
5492 
5493 static bool
5494 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5495 {
5496 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5497 
5498 	switch (fib_entry->fib_node->fib->proto) {
5499 	case MLXSW_SP_L3_PROTO_IPV4:
5500 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5501 			return false;
5502 		break;
5503 	case MLXSW_SP_L3_PROTO_IPV6:
5504 		break;
5505 	}
5506 
5507 	switch (fib_entry->type) {
5508 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5509 		return !!nh_group->nhgi->adj_index_valid;
5510 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5511 		return !!nh_group->nhgi->nh_rif;
5512 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5513 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5514 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5515 		return true;
5516 	default:
5517 		return false;
5518 	}
5519 }
5520 
5521 static struct mlxsw_sp_nexthop *
5522 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5523 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5524 {
5525 	int i;
5526 
5527 	for (i = 0; i < nh_grp->nhgi->count; i++) {
5528 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5529 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5530 
5531 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5532 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5533 				    &rt->fib6_nh->fib_nh_gw6))
5534 			return nh;
5535 	}
5536 
5537 	return NULL;
5538 }
5539 
5540 static void
5541 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5542 				      struct fib_entry_notifier_info *fen_info)
5543 {
5544 	u32 *p_dst = (u32 *) &fen_info->dst;
5545 	struct fib_rt_info fri;
5546 
5547 	fri.fi = fen_info->fi;
5548 	fri.tb_id = fen_info->tb_id;
5549 	fri.dst = cpu_to_be32(*p_dst);
5550 	fri.dst_len = fen_info->dst_len;
5551 	fri.tos = fen_info->tos;
5552 	fri.type = fen_info->type;
5553 	fri.offload = false;
5554 	fri.trap = false;
5555 	fri.offload_failed = true;
5556 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5557 }
5558 
5559 static void
5560 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5561 				 struct mlxsw_sp_fib_entry *fib_entry)
5562 {
5563 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5564 	int dst_len = fib_entry->fib_node->key.prefix_len;
5565 	struct mlxsw_sp_fib4_entry *fib4_entry;
5566 	struct fib_rt_info fri;
5567 	bool should_offload;
5568 
5569 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5570 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5571 				  common);
5572 	fri.fi = fib4_entry->fi;
5573 	fri.tb_id = fib4_entry->tb_id;
5574 	fri.dst = cpu_to_be32(*p_dst);
5575 	fri.dst_len = dst_len;
5576 	fri.tos = fib4_entry->tos;
5577 	fri.type = fib4_entry->type;
5578 	fri.offload = should_offload;
5579 	fri.trap = !should_offload;
5580 	fri.offload_failed = false;
5581 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5582 }
5583 
5584 static void
5585 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5586 				   struct mlxsw_sp_fib_entry *fib_entry)
5587 {
5588 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5589 	int dst_len = fib_entry->fib_node->key.prefix_len;
5590 	struct mlxsw_sp_fib4_entry *fib4_entry;
5591 	struct fib_rt_info fri;
5592 
5593 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5594 				  common);
5595 	fri.fi = fib4_entry->fi;
5596 	fri.tb_id = fib4_entry->tb_id;
5597 	fri.dst = cpu_to_be32(*p_dst);
5598 	fri.dst_len = dst_len;
5599 	fri.tos = fib4_entry->tos;
5600 	fri.type = fib4_entry->type;
5601 	fri.offload = false;
5602 	fri.trap = false;
5603 	fri.offload_failed = false;
5604 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5605 }
5606 
5607 #if IS_ENABLED(CONFIG_IPV6)
5608 static void
5609 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5610 				      struct fib6_info **rt_arr,
5611 				      unsigned int nrt6)
5612 {
5613 	int i;
5614 
5615 	/* In IPv6 a multipath route is represented using multiple routes, so
5616 	 * we need to set the flags on all of them.
5617 	 */
5618 	for (i = 0; i < nrt6; i++)
5619 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5620 				       false, false, true);
5621 }
5622 #else
5623 static void
5624 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5625 				      struct fib6_info **rt_arr,
5626 				      unsigned int nrt6)
5627 {
5628 }
5629 #endif
5630 
5631 #if IS_ENABLED(CONFIG_IPV6)
5632 static void
5633 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5634 				 struct mlxsw_sp_fib_entry *fib_entry)
5635 {
5636 	struct mlxsw_sp_fib6_entry *fib6_entry;
5637 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5638 	bool should_offload;
5639 
5640 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5641 
5642 	/* In IPv6 a multipath route is represented using multiple routes, so
5643 	 * we need to set the flags on all of them.
5644 	 */
5645 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5646 				  common);
5647 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5648 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5649 				       should_offload, !should_offload, false);
5650 }
5651 #else
5652 static void
5653 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5654 				 struct mlxsw_sp_fib_entry *fib_entry)
5655 {
5656 }
5657 #endif
5658 
5659 #if IS_ENABLED(CONFIG_IPV6)
5660 static void
5661 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5662 				   struct mlxsw_sp_fib_entry *fib_entry)
5663 {
5664 	struct mlxsw_sp_fib6_entry *fib6_entry;
5665 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5666 
5667 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5668 				  common);
5669 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5670 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5671 				       false, false, false);
5672 }
5673 #else
5674 static void
5675 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5676 				   struct mlxsw_sp_fib_entry *fib_entry)
5677 {
5678 }
5679 #endif
5680 
5681 static void
5682 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5683 				struct mlxsw_sp_fib_entry *fib_entry)
5684 {
5685 	switch (fib_entry->fib_node->fib->proto) {
5686 	case MLXSW_SP_L3_PROTO_IPV4:
5687 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5688 		break;
5689 	case MLXSW_SP_L3_PROTO_IPV6:
5690 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5691 		break;
5692 	}
5693 }
5694 
5695 static void
5696 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5697 				  struct mlxsw_sp_fib_entry *fib_entry)
5698 {
5699 	switch (fib_entry->fib_node->fib->proto) {
5700 	case MLXSW_SP_L3_PROTO_IPV4:
5701 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5702 		break;
5703 	case MLXSW_SP_L3_PROTO_IPV6:
5704 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5705 		break;
5706 	}
5707 }
5708 
5709 static void
5710 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5711 				    struct mlxsw_sp_fib_entry *fib_entry,
5712 				    enum mlxsw_sp_fib_entry_op op)
5713 {
5714 	switch (op) {
5715 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5716 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5717 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5718 		break;
5719 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5720 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5721 		break;
5722 	default:
5723 		break;
5724 	}
5725 }
5726 
5727 struct mlxsw_sp_fib_entry_op_ctx_basic {
5728 	char ralue_pl[MLXSW_REG_RALUE_LEN];
5729 };
5730 
5731 static void
5732 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5733 					enum mlxsw_sp_l3proto proto,
5734 					enum mlxsw_sp_fib_entry_op op,
5735 					u16 virtual_router, u8 prefix_len,
5736 					unsigned char *addr,
5737 					struct mlxsw_sp_fib_entry_priv *priv)
5738 {
5739 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5740 	enum mlxsw_reg_ralxx_protocol ralxx_proto;
5741 	char *ralue_pl = op_ctx_basic->ralue_pl;
5742 	enum mlxsw_reg_ralue_op ralue_op;
5743 
5744 	ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
5745 
5746 	switch (op) {
5747 	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5748 	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5749 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
5750 		break;
5751 	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5752 		ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
5753 		break;
5754 	default:
5755 		WARN_ON_ONCE(1);
5756 		return;
5757 	}
5758 
5759 	switch (proto) {
5760 	case MLXSW_SP_L3_PROTO_IPV4:
5761 		mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
5762 				      virtual_router, prefix_len, (u32 *) addr);
5763 		break;
5764 	case MLXSW_SP_L3_PROTO_IPV6:
5765 		mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
5766 				      virtual_router, prefix_len, addr);
5767 		break;
5768 	}
5769 }
5770 
5771 static void
5772 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5773 						   enum mlxsw_reg_ralue_trap_action trap_action,
5774 						   u16 trap_id, u32 adjacency_index, u16 ecmp_size)
5775 {
5776 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5777 
5778 	mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
5779 					trap_id, adjacency_index, ecmp_size);
5780 }
5781 
5782 static void
5783 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5784 						  enum mlxsw_reg_ralue_trap_action trap_action,
5785 						  u16 trap_id, u16 local_erif)
5786 {
5787 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5788 
5789 	mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
5790 				       trap_id, local_erif);
5791 }
5792 
5793 static void
5794 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
5795 {
5796 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5797 
5798 	mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
5799 }
5800 
5801 static void
5802 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5803 						      u32 tunnel_ptr)
5804 {
5805 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5806 
5807 	mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
5808 }
5809 
5810 static int
5811 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5812 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5813 					  bool *postponed_for_bulk)
5814 {
5815 	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5816 
5817 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5818 			       op_ctx_basic->ralue_pl);
5819 }
5820 
5821 static bool
5822 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
5823 {
5824 	return true;
5825 }
5826 
5827 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5828 				    struct mlxsw_sp_fib_entry *fib_entry,
5829 				    enum mlxsw_sp_fib_entry_op op)
5830 {
5831 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5832 
5833 	mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
5834 	fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
5835 				    fib_entry->fib_node->key.prefix_len,
5836 				    fib_entry->fib_node->key.addr,
5837 				    fib_entry->priv);
5838 }
5839 
5840 static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5841 				     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5842 				     const struct mlxsw_sp_router_ll_ops *ll_ops)
5843 {
5844 	bool postponed_for_bulk = false;
5845 	int err;
5846 
5847 	err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
5848 	if (!postponed_for_bulk)
5849 		mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
5850 	return err;
5851 }
5852 
5853 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5854 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5855 					struct mlxsw_sp_fib_entry *fib_entry,
5856 					enum mlxsw_sp_fib_entry_op op)
5857 {
5858 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5859 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5860 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5861 	enum mlxsw_reg_ralue_trap_action trap_action;
5862 	u16 trap_id = 0;
5863 	u32 adjacency_index = 0;
5864 	u16 ecmp_size = 0;
5865 
5866 	/* In case the nexthop group adjacency index is valid, use it
5867 	 * with provided ECMP size. Otherwise, setup trap and pass
5868 	 * traffic to kernel.
5869 	 */
5870 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5871 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5872 		adjacency_index = nhgi->adj_index;
5873 		ecmp_size = nhgi->ecmp_size;
5874 	} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5875 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5876 		adjacency_index = mlxsw_sp->router->adj_trap_index;
5877 		ecmp_size = 1;
5878 	} else {
5879 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5880 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5881 	}
5882 
5883 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5884 	ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
5885 					  adjacency_index, ecmp_size);
5886 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5887 }
5888 
5889 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5890 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5891 				       struct mlxsw_sp_fib_entry *fib_entry,
5892 				       enum mlxsw_sp_fib_entry_op op)
5893 {
5894 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5895 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5896 	enum mlxsw_reg_ralue_trap_action trap_action;
5897 	u16 trap_id = 0;
5898 	u16 rif_index = 0;
5899 
5900 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5901 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5902 		rif_index = rif->rif_index;
5903 	} else {
5904 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5905 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5906 	}
5907 
5908 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5909 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
5910 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5911 }
5912 
5913 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5914 				      struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5915 				      struct mlxsw_sp_fib_entry *fib_entry,
5916 				      enum mlxsw_sp_fib_entry_op op)
5917 {
5918 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5919 
5920 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5921 	ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5922 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5923 }
5924 
5925 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5926 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5927 					   struct mlxsw_sp_fib_entry *fib_entry,
5928 					   enum mlxsw_sp_fib_entry_op op)
5929 {
5930 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5931 	enum mlxsw_reg_ralue_trap_action trap_action;
5932 
5933 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5934 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5935 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
5936 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5937 }
5938 
5939 static int
5940 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5941 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5942 				  struct mlxsw_sp_fib_entry *fib_entry,
5943 				  enum mlxsw_sp_fib_entry_op op)
5944 {
5945 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5946 	enum mlxsw_reg_ralue_trap_action trap_action;
5947 	u16 trap_id;
5948 
5949 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5950 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5951 
5952 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5953 	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
5954 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5955 }
5956 
5957 static int
5958 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5959 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5960 				 struct mlxsw_sp_fib_entry *fib_entry,
5961 				 enum mlxsw_sp_fib_entry_op op)
5962 {
5963 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5964 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5965 	const struct mlxsw_sp_ipip_ops *ipip_ops;
5966 	int err;
5967 
5968 	if (WARN_ON(!ipip_entry))
5969 		return -EINVAL;
5970 
5971 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5972 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5973 				     fib_entry->decap.tunnel_index);
5974 	if (err)
5975 		return err;
5976 
5977 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5978 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5979 					     fib_entry->decap.tunnel_index);
5980 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5981 }
5982 
5983 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5984 					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5985 					   struct mlxsw_sp_fib_entry *fib_entry,
5986 					   enum mlxsw_sp_fib_entry_op op)
5987 {
5988 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5989 
5990 	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5991 	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5992 					     fib_entry->decap.tunnel_index);
5993 	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5994 }
5995 
5996 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5997 				   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5998 				   struct mlxsw_sp_fib_entry *fib_entry,
5999 				   enum mlxsw_sp_fib_entry_op op)
6000 {
6001 	switch (fib_entry->type) {
6002 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6003 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
6004 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6005 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
6006 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6007 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
6008 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6009 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
6010 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6011 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
6012 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6013 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
6014 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6015 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
6016 	}
6017 	return -EINVAL;
6018 }
6019 
6020 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6021 				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6022 				 struct mlxsw_sp_fib_entry *fib_entry,
6023 				 enum mlxsw_sp_fib_entry_op op)
6024 {
6025 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
6026 
6027 	if (err)
6028 		return err;
6029 
6030 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6031 
6032 	return err;
6033 }
6034 
6035 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6036 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6037 				       struct mlxsw_sp_fib_entry *fib_entry,
6038 				       bool is_new)
6039 {
6040 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6041 				     is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
6042 					      MLXSW_SP_FIB_ENTRY_OP_UPDATE);
6043 }
6044 
6045 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6046 				     struct mlxsw_sp_fib_entry *fib_entry)
6047 {
6048 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6049 
6050 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6051 	return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
6052 }
6053 
6054 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6055 				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6056 				  struct mlxsw_sp_fib_entry *fib_entry)
6057 {
6058 	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6059 
6060 	if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
6061 		return 0;
6062 	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6063 				     MLXSW_SP_FIB_ENTRY_OP_DELETE);
6064 }
6065 
6066 static int
6067 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6068 			     const struct fib_entry_notifier_info *fen_info,
6069 			     struct mlxsw_sp_fib_entry *fib_entry)
6070 {
6071 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6072 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6073 	struct mlxsw_sp_router *router = mlxsw_sp->router;
6074 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6075 	int ifindex = nhgi->nexthops[0].ifindex;
6076 	struct mlxsw_sp_ipip_entry *ipip_entry;
6077 
6078 	switch (fen_info->type) {
6079 	case RTN_LOCAL:
6080 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6081 							       MLXSW_SP_L3_PROTO_IPV4, dip);
6082 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6083 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6084 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6085 							     fib_entry,
6086 							     ipip_entry);
6087 		}
6088 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6089 						 MLXSW_SP_L3_PROTO_IPV4,
6090 						 &dip)) {
6091 			u32 tunnel_index;
6092 
6093 			tunnel_index = router->nve_decap_config.tunnel_index;
6094 			fib_entry->decap.tunnel_index = tunnel_index;
6095 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6096 			return 0;
6097 		}
6098 		fallthrough;
6099 	case RTN_BROADCAST:
6100 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6101 		return 0;
6102 	case RTN_BLACKHOLE:
6103 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6104 		return 0;
6105 	case RTN_UNREACHABLE:
6106 	case RTN_PROHIBIT:
6107 		/* Packets hitting these routes need to be trapped, but
6108 		 * can do so with a lower priority than packets directed
6109 		 * at the host, so use action type local instead of trap.
6110 		 */
6111 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6112 		return 0;
6113 	case RTN_UNICAST:
6114 		if (nhgi->gateway)
6115 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6116 		else
6117 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6118 		return 0;
6119 	default:
6120 		return -EINVAL;
6121 	}
6122 }
6123 
6124 static void
6125 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6126 			      struct mlxsw_sp_fib_entry *fib_entry)
6127 {
6128 	switch (fib_entry->type) {
6129 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6130 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6131 		break;
6132 	default:
6133 		break;
6134 	}
6135 }
6136 
6137 static void
6138 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6139 			       struct mlxsw_sp_fib4_entry *fib4_entry)
6140 {
6141 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6142 }
6143 
6144 static struct mlxsw_sp_fib4_entry *
6145 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6146 			   struct mlxsw_sp_fib_node *fib_node,
6147 			   const struct fib_entry_notifier_info *fen_info)
6148 {
6149 	struct mlxsw_sp_fib4_entry *fib4_entry;
6150 	struct mlxsw_sp_fib_entry *fib_entry;
6151 	int err;
6152 
6153 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6154 	if (!fib4_entry)
6155 		return ERR_PTR(-ENOMEM);
6156 	fib_entry = &fib4_entry->common;
6157 
6158 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6159 	if (IS_ERR(fib_entry->priv)) {
6160 		err = PTR_ERR(fib_entry->priv);
6161 		goto err_fib_entry_priv_create;
6162 	}
6163 
6164 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6165 	if (err)
6166 		goto err_nexthop4_group_get;
6167 
6168 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6169 					     fib_node->fib);
6170 	if (err)
6171 		goto err_nexthop_group_vr_link;
6172 
6173 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6174 	if (err)
6175 		goto err_fib4_entry_type_set;
6176 
6177 	fib4_entry->fi = fen_info->fi;
6178 	fib_info_hold(fib4_entry->fi);
6179 	fib4_entry->tb_id = fen_info->tb_id;
6180 	fib4_entry->type = fen_info->type;
6181 	fib4_entry->tos = fen_info->tos;
6182 
6183 	fib_entry->fib_node = fib_node;
6184 
6185 	return fib4_entry;
6186 
6187 err_fib4_entry_type_set:
6188 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6189 err_nexthop_group_vr_link:
6190 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6191 err_nexthop4_group_get:
6192 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6193 err_fib_entry_priv_create:
6194 	kfree(fib4_entry);
6195 	return ERR_PTR(err);
6196 }
6197 
6198 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6199 					struct mlxsw_sp_fib4_entry *fib4_entry)
6200 {
6201 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6202 
6203 	fib_info_put(fib4_entry->fi);
6204 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6205 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6206 					 fib_node->fib);
6207 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6208 	mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
6209 	kfree(fib4_entry);
6210 }
6211 
6212 static struct mlxsw_sp_fib4_entry *
6213 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6214 			   const struct fib_entry_notifier_info *fen_info)
6215 {
6216 	struct mlxsw_sp_fib4_entry *fib4_entry;
6217 	struct mlxsw_sp_fib_node *fib_node;
6218 	struct mlxsw_sp_fib *fib;
6219 	struct mlxsw_sp_vr *vr;
6220 
6221 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6222 	if (!vr)
6223 		return NULL;
6224 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6225 
6226 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6227 					    sizeof(fen_info->dst),
6228 					    fen_info->dst_len);
6229 	if (!fib_node)
6230 		return NULL;
6231 
6232 	fib4_entry = container_of(fib_node->fib_entry,
6233 				  struct mlxsw_sp_fib4_entry, common);
6234 	if (fib4_entry->tb_id == fen_info->tb_id &&
6235 	    fib4_entry->tos == fen_info->tos &&
6236 	    fib4_entry->type == fen_info->type &&
6237 	    fib4_entry->fi == fen_info->fi)
6238 		return fib4_entry;
6239 
6240 	return NULL;
6241 }
6242 
6243 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6244 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6245 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6246 	.key_len = sizeof(struct mlxsw_sp_fib_key),
6247 	.automatic_shrinking = true,
6248 };
6249 
6250 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6251 				    struct mlxsw_sp_fib_node *fib_node)
6252 {
6253 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6254 				      mlxsw_sp_fib_ht_params);
6255 }
6256 
6257 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6258 				     struct mlxsw_sp_fib_node *fib_node)
6259 {
6260 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6261 			       mlxsw_sp_fib_ht_params);
6262 }
6263 
6264 static struct mlxsw_sp_fib_node *
6265 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6266 			 size_t addr_len, unsigned char prefix_len)
6267 {
6268 	struct mlxsw_sp_fib_key key;
6269 
6270 	memset(&key, 0, sizeof(key));
6271 	memcpy(key.addr, addr, addr_len);
6272 	key.prefix_len = prefix_len;
6273 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6274 }
6275 
6276 static struct mlxsw_sp_fib_node *
6277 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6278 			 size_t addr_len, unsigned char prefix_len)
6279 {
6280 	struct mlxsw_sp_fib_node *fib_node;
6281 
6282 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6283 	if (!fib_node)
6284 		return NULL;
6285 
6286 	list_add(&fib_node->list, &fib->node_list);
6287 	memcpy(fib_node->key.addr, addr, addr_len);
6288 	fib_node->key.prefix_len = prefix_len;
6289 
6290 	return fib_node;
6291 }
6292 
6293 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6294 {
6295 	list_del(&fib_node->list);
6296 	kfree(fib_node);
6297 }
6298 
6299 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6300 				      struct mlxsw_sp_fib_node *fib_node)
6301 {
6302 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6303 	struct mlxsw_sp_fib *fib = fib_node->fib;
6304 	struct mlxsw_sp_lpm_tree *lpm_tree;
6305 	int err;
6306 
6307 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6308 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6309 		goto out;
6310 
6311 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6312 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6313 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6314 					 fib->proto);
6315 	if (IS_ERR(lpm_tree))
6316 		return PTR_ERR(lpm_tree);
6317 
6318 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6319 	if (err)
6320 		goto err_lpm_tree_replace;
6321 
6322 out:
6323 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6324 	return 0;
6325 
6326 err_lpm_tree_replace:
6327 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6328 	return err;
6329 }
6330 
6331 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6332 					 struct mlxsw_sp_fib_node *fib_node)
6333 {
6334 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6335 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6336 	struct mlxsw_sp_fib *fib = fib_node->fib;
6337 	int err;
6338 
6339 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6340 		return;
6341 	/* Try to construct a new LPM tree from the current prefix usage
6342 	 * minus the unused one. If we fail, continue using the old one.
6343 	 */
6344 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6345 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6346 				    fib_node->key.prefix_len);
6347 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6348 					 fib->proto);
6349 	if (IS_ERR(lpm_tree))
6350 		return;
6351 
6352 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6353 	if (err)
6354 		goto err_lpm_tree_replace;
6355 
6356 	return;
6357 
6358 err_lpm_tree_replace:
6359 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6360 }
6361 
6362 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6363 				  struct mlxsw_sp_fib_node *fib_node,
6364 				  struct mlxsw_sp_fib *fib)
6365 {
6366 	int err;
6367 
6368 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
6369 	if (err)
6370 		return err;
6371 	fib_node->fib = fib;
6372 
6373 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6374 	if (err)
6375 		goto err_fib_lpm_tree_link;
6376 
6377 	return 0;
6378 
6379 err_fib_lpm_tree_link:
6380 	fib_node->fib = NULL;
6381 	mlxsw_sp_fib_node_remove(fib, fib_node);
6382 	return err;
6383 }
6384 
6385 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6386 				   struct mlxsw_sp_fib_node *fib_node)
6387 {
6388 	struct mlxsw_sp_fib *fib = fib_node->fib;
6389 
6390 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6391 	fib_node->fib = NULL;
6392 	mlxsw_sp_fib_node_remove(fib, fib_node);
6393 }
6394 
6395 static struct mlxsw_sp_fib_node *
6396 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6397 		      size_t addr_len, unsigned char prefix_len,
6398 		      enum mlxsw_sp_l3proto proto)
6399 {
6400 	struct mlxsw_sp_fib_node *fib_node;
6401 	struct mlxsw_sp_fib *fib;
6402 	struct mlxsw_sp_vr *vr;
6403 	int err;
6404 
6405 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6406 	if (IS_ERR(vr))
6407 		return ERR_CAST(vr);
6408 	fib = mlxsw_sp_vr_fib(vr, proto);
6409 
6410 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6411 	if (fib_node)
6412 		return fib_node;
6413 
6414 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6415 	if (!fib_node) {
6416 		err = -ENOMEM;
6417 		goto err_fib_node_create;
6418 	}
6419 
6420 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6421 	if (err)
6422 		goto err_fib_node_init;
6423 
6424 	return fib_node;
6425 
6426 err_fib_node_init:
6427 	mlxsw_sp_fib_node_destroy(fib_node);
6428 err_fib_node_create:
6429 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6430 	return ERR_PTR(err);
6431 }
6432 
6433 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6434 				  struct mlxsw_sp_fib_node *fib_node)
6435 {
6436 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6437 
6438 	if (fib_node->fib_entry)
6439 		return;
6440 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6441 	mlxsw_sp_fib_node_destroy(fib_node);
6442 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6443 }
6444 
6445 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6446 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6447 					struct mlxsw_sp_fib_entry *fib_entry)
6448 {
6449 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6450 	bool is_new = !fib_node->fib_entry;
6451 	int err;
6452 
6453 	fib_node->fib_entry = fib_entry;
6454 
6455 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
6456 	if (err)
6457 		goto err_fib_entry_update;
6458 
6459 	return 0;
6460 
6461 err_fib_entry_update:
6462 	fib_node->fib_entry = NULL;
6463 	return err;
6464 }
6465 
6466 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6467 					    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6468 					    struct mlxsw_sp_fib_entry *fib_entry)
6469 {
6470 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6471 	int err;
6472 
6473 	err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
6474 	fib_node->fib_entry = NULL;
6475 	return err;
6476 }
6477 
6478 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6479 					   struct mlxsw_sp_fib_entry *fib_entry)
6480 {
6481 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6482 
6483 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6484 	__mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
6485 }
6486 
6487 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6488 {
6489 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6490 	struct mlxsw_sp_fib4_entry *fib4_replaced;
6491 
6492 	if (!fib_node->fib_entry)
6493 		return true;
6494 
6495 	fib4_replaced = container_of(fib_node->fib_entry,
6496 				     struct mlxsw_sp_fib4_entry, common);
6497 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6498 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
6499 		return false;
6500 
6501 	return true;
6502 }
6503 
6504 static int
6505 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6506 			     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6507 			     const struct fib_entry_notifier_info *fen_info)
6508 {
6509 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6510 	struct mlxsw_sp_fib_entry *replaced;
6511 	struct mlxsw_sp_fib_node *fib_node;
6512 	int err;
6513 
6514 	if (fen_info->fi->nh &&
6515 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6516 		return 0;
6517 
6518 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6519 					 &fen_info->dst, sizeof(fen_info->dst),
6520 					 fen_info->dst_len,
6521 					 MLXSW_SP_L3_PROTO_IPV4);
6522 	if (IS_ERR(fib_node)) {
6523 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6524 		return PTR_ERR(fib_node);
6525 	}
6526 
6527 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6528 	if (IS_ERR(fib4_entry)) {
6529 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6530 		err = PTR_ERR(fib4_entry);
6531 		goto err_fib4_entry_create;
6532 	}
6533 
6534 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6535 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6536 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6537 		return 0;
6538 	}
6539 
6540 	replaced = fib_node->fib_entry;
6541 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
6542 	if (err) {
6543 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6544 		goto err_fib_node_entry_link;
6545 	}
6546 
6547 	/* Nothing to replace */
6548 	if (!replaced)
6549 		return 0;
6550 
6551 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6552 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6553 				     common);
6554 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6555 
6556 	return 0;
6557 
6558 err_fib_node_entry_link:
6559 	fib_node->fib_entry = replaced;
6560 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6561 err_fib4_entry_create:
6562 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6563 	return err;
6564 }
6565 
6566 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6567 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6568 				    struct fib_entry_notifier_info *fen_info)
6569 {
6570 	struct mlxsw_sp_fib4_entry *fib4_entry;
6571 	struct mlxsw_sp_fib_node *fib_node;
6572 	int err;
6573 
6574 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6575 	if (!fib4_entry)
6576 		return 0;
6577 	fib_node = fib4_entry->common.fib_node;
6578 
6579 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
6580 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6581 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6582 	return err;
6583 }
6584 
6585 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6586 {
6587 	/* Multicast routes aren't supported, so ignore them. Neighbour
6588 	 * Discovery packets are specifically trapped.
6589 	 */
6590 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6591 		return true;
6592 
6593 	/* Cloned routes are irrelevant in the forwarding path. */
6594 	if (rt->fib6_flags & RTF_CACHE)
6595 		return true;
6596 
6597 	return false;
6598 }
6599 
6600 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6601 {
6602 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6603 
6604 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6605 	if (!mlxsw_sp_rt6)
6606 		return ERR_PTR(-ENOMEM);
6607 
6608 	/* In case of route replace, replaced route is deleted with
6609 	 * no notification. Take reference to prevent accessing freed
6610 	 * memory.
6611 	 */
6612 	mlxsw_sp_rt6->rt = rt;
6613 	fib6_info_hold(rt);
6614 
6615 	return mlxsw_sp_rt6;
6616 }
6617 
6618 #if IS_ENABLED(CONFIG_IPV6)
6619 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6620 {
6621 	fib6_info_release(rt);
6622 }
6623 #else
6624 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6625 {
6626 }
6627 #endif
6628 
6629 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6630 {
6631 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6632 
6633 	if (!mlxsw_sp_rt6->rt->nh)
6634 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6635 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6636 	kfree(mlxsw_sp_rt6);
6637 }
6638 
6639 static struct fib6_info *
6640 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6641 {
6642 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6643 				list)->rt;
6644 }
6645 
6646 static struct mlxsw_sp_rt6 *
6647 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6648 			    const struct fib6_info *rt)
6649 {
6650 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6651 
6652 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6653 		if (mlxsw_sp_rt6->rt == rt)
6654 			return mlxsw_sp_rt6;
6655 	}
6656 
6657 	return NULL;
6658 }
6659 
6660 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6661 					const struct fib6_info *rt,
6662 					enum mlxsw_sp_ipip_type *ret)
6663 {
6664 	return rt->fib6_nh->fib_nh_dev &&
6665 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6666 }
6667 
6668 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6669 				  struct mlxsw_sp_nexthop_group *nh_grp,
6670 				  struct mlxsw_sp_nexthop *nh,
6671 				  const struct fib6_info *rt)
6672 {
6673 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6674 
6675 	nh->nhgi = nh_grp->nhgi;
6676 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6677 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6678 #if IS_ENABLED(CONFIG_IPV6)
6679 	nh->neigh_tbl = &nd_tbl;
6680 #endif
6681 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6682 
6683 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6684 
6685 	if (!dev)
6686 		return 0;
6687 	nh->ifindex = dev->ifindex;
6688 
6689 	return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6690 }
6691 
6692 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6693 				   struct mlxsw_sp_nexthop *nh)
6694 {
6695 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6696 	list_del(&nh->router_list_node);
6697 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6698 }
6699 
6700 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6701 				    const struct fib6_info *rt)
6702 {
6703 	return rt->fib6_nh->fib_nh_gw_family ||
6704 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6705 }
6706 
6707 static int
6708 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6709 				  struct mlxsw_sp_nexthop_group *nh_grp,
6710 				  struct mlxsw_sp_fib6_entry *fib6_entry)
6711 {
6712 	struct mlxsw_sp_nexthop_group_info *nhgi;
6713 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6714 	struct mlxsw_sp_nexthop *nh;
6715 	int err, i;
6716 
6717 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6718 		       GFP_KERNEL);
6719 	if (!nhgi)
6720 		return -ENOMEM;
6721 	nh_grp->nhgi = nhgi;
6722 	nhgi->nh_grp = nh_grp;
6723 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6724 					struct mlxsw_sp_rt6, list);
6725 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6726 	nhgi->count = fib6_entry->nrt6;
6727 	for (i = 0; i < nhgi->count; i++) {
6728 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
6729 
6730 		nh = &nhgi->nexthops[i];
6731 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6732 		if (err)
6733 			goto err_nexthop6_init;
6734 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6735 	}
6736 	nh_grp->nhgi = nhgi;
6737 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6738 	if (err)
6739 		goto err_group_inc;
6740 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6741 	if (err)
6742 		goto err_group_refresh;
6743 
6744 	return 0;
6745 
6746 err_group_refresh:
6747 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6748 err_group_inc:
6749 	i = nhgi->count;
6750 err_nexthop6_init:
6751 	for (i--; i >= 0; i--) {
6752 		nh = &nhgi->nexthops[i];
6753 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6754 	}
6755 	kfree(nhgi);
6756 	return err;
6757 }
6758 
6759 static void
6760 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6761 				  struct mlxsw_sp_nexthop_group *nh_grp)
6762 {
6763 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6764 	int i;
6765 
6766 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6767 	for (i = nhgi->count - 1; i >= 0; i--) {
6768 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6769 
6770 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6771 	}
6772 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6773 	WARN_ON_ONCE(nhgi->adj_index_valid);
6774 	kfree(nhgi);
6775 }
6776 
6777 static struct mlxsw_sp_nexthop_group *
6778 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6779 			       struct mlxsw_sp_fib6_entry *fib6_entry)
6780 {
6781 	struct mlxsw_sp_nexthop_group *nh_grp;
6782 	int err;
6783 
6784 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6785 	if (!nh_grp)
6786 		return ERR_PTR(-ENOMEM);
6787 	INIT_LIST_HEAD(&nh_grp->vr_list);
6788 	err = rhashtable_init(&nh_grp->vr_ht,
6789 			      &mlxsw_sp_nexthop_group_vr_ht_params);
6790 	if (err)
6791 		goto err_nexthop_group_vr_ht_init;
6792 	INIT_LIST_HEAD(&nh_grp->fib_list);
6793 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6794 
6795 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6796 	if (err)
6797 		goto err_nexthop_group_info_init;
6798 
6799 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6800 	if (err)
6801 		goto err_nexthop_group_insert;
6802 
6803 	nh_grp->can_destroy = true;
6804 
6805 	return nh_grp;
6806 
6807 err_nexthop_group_insert:
6808 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6809 err_nexthop_group_info_init:
6810 	rhashtable_destroy(&nh_grp->vr_ht);
6811 err_nexthop_group_vr_ht_init:
6812 	kfree(nh_grp);
6813 	return ERR_PTR(err);
6814 }
6815 
6816 static void
6817 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6818 				struct mlxsw_sp_nexthop_group *nh_grp)
6819 {
6820 	if (!nh_grp->can_destroy)
6821 		return;
6822 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6823 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6824 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6825 	rhashtable_destroy(&nh_grp->vr_ht);
6826 	kfree(nh_grp);
6827 }
6828 
6829 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6830 				       struct mlxsw_sp_fib6_entry *fib6_entry)
6831 {
6832 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6833 	struct mlxsw_sp_nexthop_group *nh_grp;
6834 
6835 	if (rt->nh) {
6836 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6837 							   rt->nh->id);
6838 		if (WARN_ON_ONCE(!nh_grp))
6839 			return -EINVAL;
6840 		goto out;
6841 	}
6842 
6843 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6844 	if (!nh_grp) {
6845 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6846 		if (IS_ERR(nh_grp))
6847 			return PTR_ERR(nh_grp);
6848 	}
6849 
6850 	/* The route and the nexthop are described by the same struct, so we
6851 	 * need to the update the nexthop offload indication for the new route.
6852 	 */
6853 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6854 
6855 out:
6856 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6857 		      &nh_grp->fib_list);
6858 	fib6_entry->common.nh_group = nh_grp;
6859 
6860 	return 0;
6861 }
6862 
6863 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6864 					struct mlxsw_sp_fib_entry *fib_entry)
6865 {
6866 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6867 
6868 	list_del(&fib_entry->nexthop_group_node);
6869 	if (!list_empty(&nh_grp->fib_list))
6870 		return;
6871 
6872 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6873 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6874 		return;
6875 	}
6876 
6877 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6878 }
6879 
6880 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6881 					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6882 					  struct mlxsw_sp_fib6_entry *fib6_entry)
6883 {
6884 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6885 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6886 	int err;
6887 
6888 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6889 	fib6_entry->common.nh_group = NULL;
6890 	list_del(&fib6_entry->common.nexthop_group_node);
6891 
6892 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6893 	if (err)
6894 		goto err_nexthop6_group_get;
6895 
6896 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6897 					     fib_node->fib);
6898 	if (err)
6899 		goto err_nexthop_group_vr_link;
6900 
6901 	/* In case this entry is offloaded, then the adjacency index
6902 	 * currently associated with it in the device's table is that
6903 	 * of the old group. Start using the new one instead.
6904 	 */
6905 	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
6906 					  &fib6_entry->common, false);
6907 	if (err)
6908 		goto err_fib_entry_update;
6909 
6910 	if (list_empty(&old_nh_grp->fib_list))
6911 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6912 
6913 	return 0;
6914 
6915 err_fib_entry_update:
6916 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6917 					 fib_node->fib);
6918 err_nexthop_group_vr_link:
6919 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6920 err_nexthop6_group_get:
6921 	list_add_tail(&fib6_entry->common.nexthop_group_node,
6922 		      &old_nh_grp->fib_list);
6923 	fib6_entry->common.nh_group = old_nh_grp;
6924 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6925 	return err;
6926 }
6927 
6928 static int
6929 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6930 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6931 				struct mlxsw_sp_fib6_entry *fib6_entry,
6932 				struct fib6_info **rt_arr, unsigned int nrt6)
6933 {
6934 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6935 	int err, i;
6936 
6937 	for (i = 0; i < nrt6; i++) {
6938 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6939 		if (IS_ERR(mlxsw_sp_rt6)) {
6940 			err = PTR_ERR(mlxsw_sp_rt6);
6941 			goto err_rt6_create;
6942 		}
6943 
6944 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6945 		fib6_entry->nrt6++;
6946 	}
6947 
6948 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6949 	if (err)
6950 		goto err_nexthop6_group_update;
6951 
6952 	return 0;
6953 
6954 err_nexthop6_group_update:
6955 	i = nrt6;
6956 err_rt6_create:
6957 	for (i--; i >= 0; i--) {
6958 		fib6_entry->nrt6--;
6959 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6960 					       struct mlxsw_sp_rt6, list);
6961 		list_del(&mlxsw_sp_rt6->list);
6962 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6963 	}
6964 	return err;
6965 }
6966 
6967 static void
6968 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6969 				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6970 				struct mlxsw_sp_fib6_entry *fib6_entry,
6971 				struct fib6_info **rt_arr, unsigned int nrt6)
6972 {
6973 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6974 	int i;
6975 
6976 	for (i = 0; i < nrt6; i++) {
6977 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6978 							   rt_arr[i]);
6979 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6980 			continue;
6981 
6982 		fib6_entry->nrt6--;
6983 		list_del(&mlxsw_sp_rt6->list);
6984 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6985 	}
6986 
6987 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6988 }
6989 
6990 static int
6991 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
6992 				   struct mlxsw_sp_fib_entry *fib_entry,
6993 				   const struct fib6_info *rt)
6994 {
6995 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6996 	union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
6997 	int ifindex = nhgi->nexthops[0].ifindex;
6998 	struct mlxsw_sp_ipip_entry *ipip_entry;
6999 
7000 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7001 	ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7002 						       MLXSW_SP_L3_PROTO_IPV6,
7003 						       dip);
7004 
7005 	if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7006 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7007 		return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7008 						     ipip_entry);
7009 	}
7010 
7011 	return 0;
7012 }
7013 
7014 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7015 					struct mlxsw_sp_fib_entry *fib_entry,
7016 					const struct fib6_info *rt)
7017 {
7018 	if (rt->fib6_flags & RTF_LOCAL)
7019 		return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7020 							  rt);
7021 	if (rt->fib6_flags & RTF_ANYCAST)
7022 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7023 	else if (rt->fib6_type == RTN_BLACKHOLE)
7024 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7025 	else if (rt->fib6_flags & RTF_REJECT)
7026 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7027 	else if (fib_entry->nh_group->nhgi->gateway)
7028 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7029 	else
7030 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7031 
7032 	return 0;
7033 }
7034 
7035 static void
7036 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7037 {
7038 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7039 
7040 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7041 				 list) {
7042 		fib6_entry->nrt6--;
7043 		list_del(&mlxsw_sp_rt6->list);
7044 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7045 	}
7046 }
7047 
7048 static struct mlxsw_sp_fib6_entry *
7049 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7050 			   struct mlxsw_sp_fib_node *fib_node,
7051 			   struct fib6_info **rt_arr, unsigned int nrt6)
7052 {
7053 	struct mlxsw_sp_fib6_entry *fib6_entry;
7054 	struct mlxsw_sp_fib_entry *fib_entry;
7055 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7056 	int err, i;
7057 
7058 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7059 	if (!fib6_entry)
7060 		return ERR_PTR(-ENOMEM);
7061 	fib_entry = &fib6_entry->common;
7062 
7063 	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
7064 	if (IS_ERR(fib_entry->priv)) {
7065 		err = PTR_ERR(fib_entry->priv);
7066 		goto err_fib_entry_priv_create;
7067 	}
7068 
7069 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
7070 
7071 	for (i = 0; i < nrt6; i++) {
7072 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7073 		if (IS_ERR(mlxsw_sp_rt6)) {
7074 			err = PTR_ERR(mlxsw_sp_rt6);
7075 			goto err_rt6_create;
7076 		}
7077 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7078 		fib6_entry->nrt6++;
7079 	}
7080 
7081 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7082 	if (err)
7083 		goto err_nexthop6_group_get;
7084 
7085 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7086 					     fib_node->fib);
7087 	if (err)
7088 		goto err_nexthop_group_vr_link;
7089 
7090 	err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7091 	if (err)
7092 		goto err_fib6_entry_type_set;
7093 
7094 	fib_entry->fib_node = fib_node;
7095 
7096 	return fib6_entry;
7097 
7098 err_fib6_entry_type_set:
7099 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7100 err_nexthop_group_vr_link:
7101 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7102 err_nexthop6_group_get:
7103 	i = nrt6;
7104 err_rt6_create:
7105 	for (i--; i >= 0; i--) {
7106 		fib6_entry->nrt6--;
7107 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7108 					       struct mlxsw_sp_rt6, list);
7109 		list_del(&mlxsw_sp_rt6->list);
7110 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7111 	}
7112 	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
7113 err_fib_entry_priv_create:
7114 	kfree(fib6_entry);
7115 	return ERR_PTR(err);
7116 }
7117 
7118 static void
7119 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7120 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7121 {
7122 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7123 }
7124 
7125 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7126 					struct mlxsw_sp_fib6_entry *fib6_entry)
7127 {
7128 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7129 
7130 	mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7131 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7132 					 fib_node->fib);
7133 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7134 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7135 	WARN_ON(fib6_entry->nrt6);
7136 	mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
7137 	kfree(fib6_entry);
7138 }
7139 
7140 static struct mlxsw_sp_fib6_entry *
7141 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7142 			   const struct fib6_info *rt)
7143 {
7144 	struct mlxsw_sp_fib6_entry *fib6_entry;
7145 	struct mlxsw_sp_fib_node *fib_node;
7146 	struct mlxsw_sp_fib *fib;
7147 	struct fib6_info *cmp_rt;
7148 	struct mlxsw_sp_vr *vr;
7149 
7150 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7151 	if (!vr)
7152 		return NULL;
7153 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7154 
7155 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7156 					    sizeof(rt->fib6_dst.addr),
7157 					    rt->fib6_dst.plen);
7158 	if (!fib_node)
7159 		return NULL;
7160 
7161 	fib6_entry = container_of(fib_node->fib_entry,
7162 				  struct mlxsw_sp_fib6_entry, common);
7163 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7164 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7165 	    rt->fib6_metric == cmp_rt->fib6_metric &&
7166 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7167 		return fib6_entry;
7168 
7169 	return NULL;
7170 }
7171 
7172 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7173 {
7174 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7175 	struct mlxsw_sp_fib6_entry *fib6_replaced;
7176 	struct fib6_info *rt, *rt_replaced;
7177 
7178 	if (!fib_node->fib_entry)
7179 		return true;
7180 
7181 	fib6_replaced = container_of(fib_node->fib_entry,
7182 				     struct mlxsw_sp_fib6_entry,
7183 				     common);
7184 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7185 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7186 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7187 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7188 		return false;
7189 
7190 	return true;
7191 }
7192 
7193 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7194 					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7195 					struct fib6_info **rt_arr, unsigned int nrt6)
7196 {
7197 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7198 	struct mlxsw_sp_fib_entry *replaced;
7199 	struct mlxsw_sp_fib_node *fib_node;
7200 	struct fib6_info *rt = rt_arr[0];
7201 	int err;
7202 
7203 	if (rt->fib6_src.plen)
7204 		return -EINVAL;
7205 
7206 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7207 		return 0;
7208 
7209 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7210 		return 0;
7211 
7212 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7213 					 &rt->fib6_dst.addr,
7214 					 sizeof(rt->fib6_dst.addr),
7215 					 rt->fib6_dst.plen,
7216 					 MLXSW_SP_L3_PROTO_IPV6);
7217 	if (IS_ERR(fib_node))
7218 		return PTR_ERR(fib_node);
7219 
7220 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7221 						nrt6);
7222 	if (IS_ERR(fib6_entry)) {
7223 		err = PTR_ERR(fib6_entry);
7224 		goto err_fib6_entry_create;
7225 	}
7226 
7227 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7228 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7229 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7230 		return 0;
7231 	}
7232 
7233 	replaced = fib_node->fib_entry;
7234 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
7235 	if (err)
7236 		goto err_fib_node_entry_link;
7237 
7238 	/* Nothing to replace */
7239 	if (!replaced)
7240 		return 0;
7241 
7242 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7243 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7244 				     common);
7245 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7246 
7247 	return 0;
7248 
7249 err_fib_node_entry_link:
7250 	fib_node->fib_entry = replaced;
7251 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7252 err_fib6_entry_create:
7253 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7254 	return err;
7255 }
7256 
7257 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7258 				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7259 				       struct fib6_info **rt_arr, unsigned int nrt6)
7260 {
7261 	struct mlxsw_sp_fib6_entry *fib6_entry;
7262 	struct mlxsw_sp_fib_node *fib_node;
7263 	struct fib6_info *rt = rt_arr[0];
7264 	int err;
7265 
7266 	if (rt->fib6_src.plen)
7267 		return -EINVAL;
7268 
7269 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7270 		return 0;
7271 
7272 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7273 					 &rt->fib6_dst.addr,
7274 					 sizeof(rt->fib6_dst.addr),
7275 					 rt->fib6_dst.plen,
7276 					 MLXSW_SP_L3_PROTO_IPV6);
7277 	if (IS_ERR(fib_node))
7278 		return PTR_ERR(fib_node);
7279 
7280 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7281 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7282 		return -EINVAL;
7283 	}
7284 
7285 	fib6_entry = container_of(fib_node->fib_entry,
7286 				  struct mlxsw_sp_fib6_entry, common);
7287 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7288 	if (err)
7289 		goto err_fib6_entry_nexthop_add;
7290 
7291 	return 0;
7292 
7293 err_fib6_entry_nexthop_add:
7294 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7295 	return err;
7296 }
7297 
7298 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7299 				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7300 				    struct fib6_info **rt_arr, unsigned int nrt6)
7301 {
7302 	struct mlxsw_sp_fib6_entry *fib6_entry;
7303 	struct mlxsw_sp_fib_node *fib_node;
7304 	struct fib6_info *rt = rt_arr[0];
7305 	int err;
7306 
7307 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7308 		return 0;
7309 
7310 	/* Multipath routes are first added to the FIB trie and only then
7311 	 * notified. If we vetoed the addition, we will get a delete
7312 	 * notification for a route we do not have. Therefore, do not warn if
7313 	 * route was not found.
7314 	 */
7315 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7316 	if (!fib6_entry)
7317 		return 0;
7318 
7319 	/* If not all the nexthops are deleted, then only reduce the nexthop
7320 	 * group.
7321 	 */
7322 	if (nrt6 != fib6_entry->nrt6) {
7323 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7324 		return 0;
7325 	}
7326 
7327 	fib_node = fib6_entry->common.fib_node;
7328 
7329 	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
7330 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7331 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7332 	return err;
7333 }
7334 
7335 static struct mlxsw_sp_mr_table *
7336 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7337 {
7338 	if (family == RTNL_FAMILY_IPMR)
7339 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7340 	else
7341 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7342 }
7343 
7344 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7345 				     struct mfc_entry_notifier_info *men_info,
7346 				     bool replace)
7347 {
7348 	struct mlxsw_sp_mr_table *mrt;
7349 	struct mlxsw_sp_vr *vr;
7350 
7351 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7352 	if (IS_ERR(vr))
7353 		return PTR_ERR(vr);
7354 
7355 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7356 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7357 }
7358 
7359 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7360 				      struct mfc_entry_notifier_info *men_info)
7361 {
7362 	struct mlxsw_sp_mr_table *mrt;
7363 	struct mlxsw_sp_vr *vr;
7364 
7365 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7366 	if (WARN_ON(!vr))
7367 		return;
7368 
7369 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7370 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7371 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7372 }
7373 
7374 static int
7375 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7376 			      struct vif_entry_notifier_info *ven_info)
7377 {
7378 	struct mlxsw_sp_mr_table *mrt;
7379 	struct mlxsw_sp_rif *rif;
7380 	struct mlxsw_sp_vr *vr;
7381 
7382 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7383 	if (IS_ERR(vr))
7384 		return PTR_ERR(vr);
7385 
7386 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7387 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7388 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7389 				   ven_info->vif_index,
7390 				   ven_info->vif_flags, rif);
7391 }
7392 
7393 static void
7394 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7395 			      struct vif_entry_notifier_info *ven_info)
7396 {
7397 	struct mlxsw_sp_mr_table *mrt;
7398 	struct mlxsw_sp_vr *vr;
7399 
7400 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7401 	if (WARN_ON(!vr))
7402 		return;
7403 
7404 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7405 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7406 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7407 }
7408 
7409 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7410 				     struct mlxsw_sp_fib_node *fib_node)
7411 {
7412 	struct mlxsw_sp_fib4_entry *fib4_entry;
7413 
7414 	fib4_entry = container_of(fib_node->fib_entry,
7415 				  struct mlxsw_sp_fib4_entry, common);
7416 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7417 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7418 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7419 }
7420 
7421 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7422 				     struct mlxsw_sp_fib_node *fib_node)
7423 {
7424 	struct mlxsw_sp_fib6_entry *fib6_entry;
7425 
7426 	fib6_entry = container_of(fib_node->fib_entry,
7427 				  struct mlxsw_sp_fib6_entry, common);
7428 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7429 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7430 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7431 }
7432 
7433 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7434 				    struct mlxsw_sp_fib_node *fib_node)
7435 {
7436 	switch (fib_node->fib->proto) {
7437 	case MLXSW_SP_L3_PROTO_IPV4:
7438 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7439 		break;
7440 	case MLXSW_SP_L3_PROTO_IPV6:
7441 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7442 		break;
7443 	}
7444 }
7445 
7446 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7447 				  struct mlxsw_sp_vr *vr,
7448 				  enum mlxsw_sp_l3proto proto)
7449 {
7450 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7451 	struct mlxsw_sp_fib_node *fib_node, *tmp;
7452 
7453 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7454 		bool do_break = &tmp->list == &fib->node_list;
7455 
7456 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7457 		if (do_break)
7458 			break;
7459 	}
7460 }
7461 
7462 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7463 {
7464 	int i, j;
7465 
7466 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7467 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7468 
7469 		if (!mlxsw_sp_vr_is_used(vr))
7470 			continue;
7471 
7472 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7473 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7474 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7475 
7476 		/* If virtual router was only used for IPv4, then it's no
7477 		 * longer used.
7478 		 */
7479 		if (!mlxsw_sp_vr_is_used(vr))
7480 			continue;
7481 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7482 	}
7483 }
7484 
7485 struct mlxsw_sp_fib6_event {
7486 	struct fib6_info **rt_arr;
7487 	unsigned int nrt6;
7488 };
7489 
7490 struct mlxsw_sp_fib_event {
7491 	struct list_head list; /* node in fib queue */
7492 	union {
7493 		struct mlxsw_sp_fib6_event fib6_event;
7494 		struct fib_entry_notifier_info fen_info;
7495 		struct fib_rule_notifier_info fr_info;
7496 		struct fib_nh_notifier_info fnh_info;
7497 		struct mfc_entry_notifier_info men_info;
7498 		struct vif_entry_notifier_info ven_info;
7499 	};
7500 	struct mlxsw_sp *mlxsw_sp;
7501 	unsigned long event;
7502 	int family;
7503 };
7504 
7505 static int
7506 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
7507 				struct fib6_entry_notifier_info *fen6_info)
7508 {
7509 	struct fib6_info *rt = fen6_info->rt;
7510 	struct fib6_info **rt_arr;
7511 	struct fib6_info *iter;
7512 	unsigned int nrt6;
7513 	int i = 0;
7514 
7515 	nrt6 = fen6_info->nsiblings + 1;
7516 
7517 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7518 	if (!rt_arr)
7519 		return -ENOMEM;
7520 
7521 	fib6_event->rt_arr = rt_arr;
7522 	fib6_event->nrt6 = nrt6;
7523 
7524 	rt_arr[0] = rt;
7525 	fib6_info_hold(rt);
7526 
7527 	if (!fen6_info->nsiblings)
7528 		return 0;
7529 
7530 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7531 		if (i == fen6_info->nsiblings)
7532 			break;
7533 
7534 		rt_arr[i + 1] = iter;
7535 		fib6_info_hold(iter);
7536 		i++;
7537 	}
7538 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7539 
7540 	return 0;
7541 }
7542 
7543 static void
7544 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
7545 {
7546 	int i;
7547 
7548 	for (i = 0; i < fib6_event->nrt6; i++)
7549 		mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
7550 	kfree(fib6_event->rt_arr);
7551 }
7552 
7553 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
7554 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7555 					       struct mlxsw_sp_fib_event *fib_event)
7556 {
7557 	int err;
7558 
7559 	mlxsw_sp_span_respin(mlxsw_sp);
7560 
7561 	switch (fib_event->event) {
7562 	case FIB_EVENT_ENTRY_REPLACE:
7563 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
7564 		if (err) {
7565 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7566 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7567 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7568 							      &fib_event->fen_info);
7569 		}
7570 		fib_info_put(fib_event->fen_info.fi);
7571 		break;
7572 	case FIB_EVENT_ENTRY_DEL:
7573 		err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
7574 		if (err)
7575 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7576 		fib_info_put(fib_event->fen_info.fi);
7577 		break;
7578 	case FIB_EVENT_NH_ADD:
7579 	case FIB_EVENT_NH_DEL:
7580 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
7581 		fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
7582 		break;
7583 	}
7584 }
7585 
7586 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
7587 					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7588 					       struct mlxsw_sp_fib_event *fib_event)
7589 {
7590 	struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
7591 	int err;
7592 
7593 	mlxsw_sp_span_respin(mlxsw_sp);
7594 
7595 	switch (fib_event->event) {
7596 	case FIB_EVENT_ENTRY_REPLACE:
7597 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7598 						   fib_event->fib6_event.nrt6);
7599 		if (err) {
7600 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7601 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7602 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7603 							      fib6_event->rt_arr,
7604 							      fib6_event->nrt6);
7605 		}
7606 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7607 		break;
7608 	case FIB_EVENT_ENTRY_APPEND:
7609 		err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7610 						  fib_event->fib6_event.nrt6);
7611 		if (err) {
7612 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7613 			dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7614 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7615 							      fib6_event->rt_arr,
7616 							      fib6_event->nrt6);
7617 		}
7618 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7619 		break;
7620 	case FIB_EVENT_ENTRY_DEL:
7621 		err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7622 					       fib_event->fib6_event.nrt6);
7623 		if (err)
7624 			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7625 		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7626 		break;
7627 	}
7628 }
7629 
7630 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
7631 						struct mlxsw_sp_fib_event *fib_event)
7632 {
7633 	bool replace;
7634 	int err;
7635 
7636 	rtnl_lock();
7637 	mutex_lock(&mlxsw_sp->router->lock);
7638 	switch (fib_event->event) {
7639 	case FIB_EVENT_ENTRY_REPLACE:
7640 	case FIB_EVENT_ENTRY_ADD:
7641 		replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
7642 
7643 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
7644 		if (err)
7645 			dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7646 		mr_cache_put(fib_event->men_info.mfc);
7647 		break;
7648 	case FIB_EVENT_ENTRY_DEL:
7649 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
7650 		mr_cache_put(fib_event->men_info.mfc);
7651 		break;
7652 	case FIB_EVENT_VIF_ADD:
7653 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7654 						    &fib_event->ven_info);
7655 		if (err)
7656 			dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7657 		dev_put(fib_event->ven_info.dev);
7658 		break;
7659 	case FIB_EVENT_VIF_DEL:
7660 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
7661 		dev_put(fib_event->ven_info.dev);
7662 		break;
7663 	}
7664 	mutex_unlock(&mlxsw_sp->router->lock);
7665 	rtnl_unlock();
7666 }
7667 
7668 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
7669 {
7670 	struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
7671 	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
7672 	struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
7673 	struct mlxsw_sp_fib_event *next_fib_event;
7674 	struct mlxsw_sp_fib_event *fib_event;
7675 	int last_family = AF_UNSPEC;
7676 	LIST_HEAD(fib_event_queue);
7677 
7678 	spin_lock_bh(&router->fib_event_queue_lock);
7679 	list_splice_init(&router->fib_event_queue, &fib_event_queue);
7680 	spin_unlock_bh(&router->fib_event_queue_lock);
7681 
7682 	/* Router lock is held here to make sure per-instance
7683 	 * operation context is not used in between FIB4/6 events
7684 	 * processing.
7685 	 */
7686 	mutex_lock(&router->lock);
7687 	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7688 	list_for_each_entry_safe(fib_event, next_fib_event,
7689 				 &fib_event_queue, list) {
7690 		/* Check if the next entry in the queue exists and it is
7691 		 * of the same type (family and event) as the currect one.
7692 		 * In that case it is permitted to do the bulking
7693 		 * of multiple FIB entries to a single register write.
7694 		 */
7695 		op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
7696 				  fib_event->family == next_fib_event->family &&
7697 				  fib_event->event == next_fib_event->event;
7698 		op_ctx->event = fib_event->event;
7699 
7700 		/* In case family of this and the previous entry are different, context
7701 		 * reinitialization is going to be needed now, indicate that.
7702 		 * Note that since last_family is initialized to AF_UNSPEC, this is always
7703 		 * going to happen for the first entry processed in the work.
7704 		 */
7705 		if (fib_event->family != last_family)
7706 			op_ctx->initialized = false;
7707 
7708 		switch (fib_event->family) {
7709 		case AF_INET:
7710 			mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
7711 							   fib_event);
7712 			break;
7713 		case AF_INET6:
7714 			mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
7715 							   fib_event);
7716 			break;
7717 		case RTNL_FAMILY_IP6MR:
7718 		case RTNL_FAMILY_IPMR:
7719 			/* Unlock here as inside FIBMR the lock is taken again
7720 			 * under RTNL. The per-instance operation context
7721 			 * is not used by FIBMR.
7722 			 */
7723 			mutex_unlock(&router->lock);
7724 			mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
7725 							    fib_event);
7726 			mutex_lock(&router->lock);
7727 			break;
7728 		default:
7729 			WARN_ON_ONCE(1);
7730 		}
7731 		last_family = fib_event->family;
7732 		kfree(fib_event);
7733 		cond_resched();
7734 	}
7735 	WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
7736 	mutex_unlock(&router->lock);
7737 }
7738 
7739 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
7740 				       struct fib_notifier_info *info)
7741 {
7742 	struct fib_entry_notifier_info *fen_info;
7743 	struct fib_nh_notifier_info *fnh_info;
7744 
7745 	switch (fib_event->event) {
7746 	case FIB_EVENT_ENTRY_REPLACE:
7747 	case FIB_EVENT_ENTRY_DEL:
7748 		fen_info = container_of(info, struct fib_entry_notifier_info,
7749 					info);
7750 		fib_event->fen_info = *fen_info;
7751 		/* Take reference on fib_info to prevent it from being
7752 		 * freed while event is queued. Release it afterwards.
7753 		 */
7754 		fib_info_hold(fib_event->fen_info.fi);
7755 		break;
7756 	case FIB_EVENT_NH_ADD:
7757 	case FIB_EVENT_NH_DEL:
7758 		fnh_info = container_of(info, struct fib_nh_notifier_info,
7759 					info);
7760 		fib_event->fnh_info = *fnh_info;
7761 		fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
7762 		break;
7763 	}
7764 }
7765 
7766 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
7767 				      struct fib_notifier_info *info)
7768 {
7769 	struct fib6_entry_notifier_info *fen6_info;
7770 	int err;
7771 
7772 	switch (fib_event->event) {
7773 	case FIB_EVENT_ENTRY_REPLACE:
7774 	case FIB_EVENT_ENTRY_APPEND:
7775 	case FIB_EVENT_ENTRY_DEL:
7776 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
7777 					 info);
7778 		err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
7779 						      fen6_info);
7780 		if (err)
7781 			return err;
7782 		break;
7783 	}
7784 
7785 	return 0;
7786 }
7787 
7788 static void
7789 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
7790 			    struct fib_notifier_info *info)
7791 {
7792 	switch (fib_event->event) {
7793 	case FIB_EVENT_ENTRY_REPLACE:
7794 	case FIB_EVENT_ENTRY_ADD:
7795 	case FIB_EVENT_ENTRY_DEL:
7796 		memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
7797 		mr_cache_hold(fib_event->men_info.mfc);
7798 		break;
7799 	case FIB_EVENT_VIF_ADD:
7800 	case FIB_EVENT_VIF_DEL:
7801 		memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
7802 		dev_hold(fib_event->ven_info.dev);
7803 		break;
7804 	}
7805 }
7806 
7807 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7808 					  struct fib_notifier_info *info,
7809 					  struct mlxsw_sp *mlxsw_sp)
7810 {
7811 	struct netlink_ext_ack *extack = info->extack;
7812 	struct fib_rule_notifier_info *fr_info;
7813 	struct fib_rule *rule;
7814 	int err = 0;
7815 
7816 	/* nothing to do at the moment */
7817 	if (event == FIB_EVENT_RULE_DEL)
7818 		return 0;
7819 
7820 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
7821 	rule = fr_info->rule;
7822 
7823 	/* Rule only affects locally generated traffic */
7824 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7825 		return 0;
7826 
7827 	switch (info->family) {
7828 	case AF_INET:
7829 		if (!fib4_rule_default(rule) && !rule->l3mdev)
7830 			err = -EOPNOTSUPP;
7831 		break;
7832 	case AF_INET6:
7833 		if (!fib6_rule_default(rule) && !rule->l3mdev)
7834 			err = -EOPNOTSUPP;
7835 		break;
7836 	case RTNL_FAMILY_IPMR:
7837 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
7838 			err = -EOPNOTSUPP;
7839 		break;
7840 	case RTNL_FAMILY_IP6MR:
7841 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7842 			err = -EOPNOTSUPP;
7843 		break;
7844 	}
7845 
7846 	if (err < 0)
7847 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7848 
7849 	return err;
7850 }
7851 
7852 /* Called with rcu_read_lock() */
7853 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7854 				     unsigned long event, void *ptr)
7855 {
7856 	struct mlxsw_sp_fib_event *fib_event;
7857 	struct fib_notifier_info *info = ptr;
7858 	struct mlxsw_sp_router *router;
7859 	int err;
7860 
7861 	if ((info->family != AF_INET && info->family != AF_INET6 &&
7862 	     info->family != RTNL_FAMILY_IPMR &&
7863 	     info->family != RTNL_FAMILY_IP6MR))
7864 		return NOTIFY_DONE;
7865 
7866 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7867 
7868 	switch (event) {
7869 	case FIB_EVENT_RULE_ADD:
7870 	case FIB_EVENT_RULE_DEL:
7871 		err = mlxsw_sp_router_fib_rule_event(event, info,
7872 						     router->mlxsw_sp);
7873 		return notifier_from_errno(err);
7874 	case FIB_EVENT_ENTRY_ADD:
7875 	case FIB_EVENT_ENTRY_REPLACE:
7876 	case FIB_EVENT_ENTRY_APPEND:
7877 		if (info->family == AF_INET) {
7878 			struct fib_entry_notifier_info *fen_info = ptr;
7879 
7880 			if (fen_info->fi->fib_nh_is_v6) {
7881 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7882 				return notifier_from_errno(-EINVAL);
7883 			}
7884 		}
7885 		break;
7886 	}
7887 
7888 	fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
7889 	if (!fib_event)
7890 		return NOTIFY_BAD;
7891 
7892 	fib_event->mlxsw_sp = router->mlxsw_sp;
7893 	fib_event->event = event;
7894 	fib_event->family = info->family;
7895 
7896 	switch (info->family) {
7897 	case AF_INET:
7898 		mlxsw_sp_router_fib4_event(fib_event, info);
7899 		break;
7900 	case AF_INET6:
7901 		err = mlxsw_sp_router_fib6_event(fib_event, info);
7902 		if (err)
7903 			goto err_fib_event;
7904 		break;
7905 	case RTNL_FAMILY_IP6MR:
7906 	case RTNL_FAMILY_IPMR:
7907 		mlxsw_sp_router_fibmr_event(fib_event, info);
7908 		break;
7909 	}
7910 
7911 	/* Enqueue the event and trigger the work */
7912 	spin_lock_bh(&router->fib_event_queue_lock);
7913 	list_add_tail(&fib_event->list, &router->fib_event_queue);
7914 	spin_unlock_bh(&router->fib_event_queue_lock);
7915 	mlxsw_core_schedule_work(&router->fib_event_work);
7916 
7917 	return NOTIFY_DONE;
7918 
7919 err_fib_event:
7920 	kfree(fib_event);
7921 	return NOTIFY_BAD;
7922 }
7923 
7924 static struct mlxsw_sp_rif *
7925 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7926 			 const struct net_device *dev)
7927 {
7928 	int i;
7929 
7930 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7931 		if (mlxsw_sp->router->rifs[i] &&
7932 		    mlxsw_sp->router->rifs[i]->dev == dev)
7933 			return mlxsw_sp->router->rifs[i];
7934 
7935 	return NULL;
7936 }
7937 
7938 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7939 			 const struct net_device *dev)
7940 {
7941 	struct mlxsw_sp_rif *rif;
7942 
7943 	mutex_lock(&mlxsw_sp->router->lock);
7944 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7945 	mutex_unlock(&mlxsw_sp->router->lock);
7946 
7947 	return rif;
7948 }
7949 
7950 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7951 {
7952 	struct mlxsw_sp_rif *rif;
7953 	u16 vid = 0;
7954 
7955 	mutex_lock(&mlxsw_sp->router->lock);
7956 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7957 	if (!rif)
7958 		goto out;
7959 
7960 	/* We only return the VID for VLAN RIFs. Otherwise we return an
7961 	 * invalid value (0).
7962 	 */
7963 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7964 		goto out;
7965 
7966 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7967 
7968 out:
7969 	mutex_unlock(&mlxsw_sp->router->lock);
7970 	return vid;
7971 }
7972 
7973 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7974 {
7975 	char ritr_pl[MLXSW_REG_RITR_LEN];
7976 	int err;
7977 
7978 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7979 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7980 	if (err)
7981 		return err;
7982 
7983 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
7984 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7985 }
7986 
7987 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
7988 					  struct mlxsw_sp_rif *rif)
7989 {
7990 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
7991 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
7992 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
7993 }
7994 
7995 static bool
7996 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
7997 			   unsigned long event)
7998 {
7999 	struct inet6_dev *inet6_dev;
8000 	bool addr_list_empty = true;
8001 	struct in_device *idev;
8002 
8003 	switch (event) {
8004 	case NETDEV_UP:
8005 		return rif == NULL;
8006 	case NETDEV_DOWN:
8007 		rcu_read_lock();
8008 		idev = __in_dev_get_rcu(dev);
8009 		if (idev && idev->ifa_list)
8010 			addr_list_empty = false;
8011 
8012 		inet6_dev = __in6_dev_get(dev);
8013 		if (addr_list_empty && inet6_dev &&
8014 		    !list_empty(&inet6_dev->addr_list))
8015 			addr_list_empty = false;
8016 		rcu_read_unlock();
8017 
8018 		/* macvlans do not have a RIF, but rather piggy back on the
8019 		 * RIF of their lower device.
8020 		 */
8021 		if (netif_is_macvlan(dev) && addr_list_empty)
8022 			return true;
8023 
8024 		if (rif && addr_list_empty &&
8025 		    !netif_is_l3_slave(rif->dev))
8026 			return true;
8027 		/* It is possible we already removed the RIF ourselves
8028 		 * if it was assigned to a netdev that is now a bridge
8029 		 * or LAG slave.
8030 		 */
8031 		return false;
8032 	}
8033 
8034 	return false;
8035 }
8036 
8037 static enum mlxsw_sp_rif_type
8038 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8039 		      const struct net_device *dev)
8040 {
8041 	enum mlxsw_sp_fid_type type;
8042 
8043 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8044 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
8045 
8046 	/* Otherwise RIF type is derived from the type of the underlying FID. */
8047 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8048 		type = MLXSW_SP_FID_TYPE_8021Q;
8049 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8050 		type = MLXSW_SP_FID_TYPE_8021Q;
8051 	else if (netif_is_bridge_master(dev))
8052 		type = MLXSW_SP_FID_TYPE_8021D;
8053 	else
8054 		type = MLXSW_SP_FID_TYPE_RFID;
8055 
8056 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8057 }
8058 
8059 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
8060 {
8061 	int i;
8062 
8063 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8064 		if (!mlxsw_sp->router->rifs[i]) {
8065 			*p_rif_index = i;
8066 			return 0;
8067 		}
8068 	}
8069 
8070 	return -ENOBUFS;
8071 }
8072 
8073 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8074 					       u16 vr_id,
8075 					       struct net_device *l3_dev)
8076 {
8077 	struct mlxsw_sp_rif *rif;
8078 
8079 	rif = kzalloc(rif_size, GFP_KERNEL);
8080 	if (!rif)
8081 		return NULL;
8082 
8083 	INIT_LIST_HEAD(&rif->nexthop_list);
8084 	INIT_LIST_HEAD(&rif->neigh_list);
8085 	if (l3_dev) {
8086 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
8087 		rif->mtu = l3_dev->mtu;
8088 		rif->dev = l3_dev;
8089 	}
8090 	rif->vr_id = vr_id;
8091 	rif->rif_index = rif_index;
8092 
8093 	return rif;
8094 }
8095 
8096 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8097 					   u16 rif_index)
8098 {
8099 	return mlxsw_sp->router->rifs[rif_index];
8100 }
8101 
8102 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8103 {
8104 	return rif->rif_index;
8105 }
8106 
8107 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8108 {
8109 	return lb_rif->common.rif_index;
8110 }
8111 
8112 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8113 {
8114 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
8115 	struct mlxsw_sp_vr *ul_vr;
8116 
8117 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8118 	if (WARN_ON(IS_ERR(ul_vr)))
8119 		return 0;
8120 
8121 	return ul_vr->id;
8122 }
8123 
8124 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8125 {
8126 	return lb_rif->ul_rif_id;
8127 }
8128 
8129 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8130 {
8131 	return rif->dev->ifindex;
8132 }
8133 
8134 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8135 {
8136 	return rif->dev;
8137 }
8138 
8139 static struct mlxsw_sp_rif *
8140 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8141 		    const struct mlxsw_sp_rif_params *params,
8142 		    struct netlink_ext_ack *extack)
8143 {
8144 	u32 tb_id = l3mdev_fib_table(params->dev);
8145 	const struct mlxsw_sp_rif_ops *ops;
8146 	struct mlxsw_sp_fid *fid = NULL;
8147 	enum mlxsw_sp_rif_type type;
8148 	struct mlxsw_sp_rif *rif;
8149 	struct mlxsw_sp_vr *vr;
8150 	u16 rif_index;
8151 	int i, err;
8152 
8153 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8154 	ops = mlxsw_sp->router->rif_ops_arr[type];
8155 
8156 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8157 	if (IS_ERR(vr))
8158 		return ERR_CAST(vr);
8159 	vr->rif_count++;
8160 
8161 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8162 	if (err) {
8163 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8164 		goto err_rif_index_alloc;
8165 	}
8166 
8167 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8168 	if (!rif) {
8169 		err = -ENOMEM;
8170 		goto err_rif_alloc;
8171 	}
8172 	dev_hold(rif->dev);
8173 	mlxsw_sp->router->rifs[rif_index] = rif;
8174 	rif->mlxsw_sp = mlxsw_sp;
8175 	rif->ops = ops;
8176 
8177 	if (ops->fid_get) {
8178 		fid = ops->fid_get(rif, extack);
8179 		if (IS_ERR(fid)) {
8180 			err = PTR_ERR(fid);
8181 			goto err_fid_get;
8182 		}
8183 		rif->fid = fid;
8184 	}
8185 
8186 	if (ops->setup)
8187 		ops->setup(rif, params);
8188 
8189 	err = ops->configure(rif);
8190 	if (err)
8191 		goto err_configure;
8192 
8193 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8194 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8195 		if (err)
8196 			goto err_mr_rif_add;
8197 	}
8198 
8199 	mlxsw_sp_rif_counters_alloc(rif);
8200 
8201 	return rif;
8202 
8203 err_mr_rif_add:
8204 	for (i--; i >= 0; i--)
8205 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8206 	ops->deconfigure(rif);
8207 err_configure:
8208 	if (fid)
8209 		mlxsw_sp_fid_put(fid);
8210 err_fid_get:
8211 	mlxsw_sp->router->rifs[rif_index] = NULL;
8212 	dev_put(rif->dev);
8213 	kfree(rif);
8214 err_rif_alloc:
8215 err_rif_index_alloc:
8216 	vr->rif_count--;
8217 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8218 	return ERR_PTR(err);
8219 }
8220 
8221 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8222 {
8223 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
8224 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8225 	struct mlxsw_sp_fid *fid = rif->fid;
8226 	struct mlxsw_sp_vr *vr;
8227 	int i;
8228 
8229 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8230 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
8231 
8232 	mlxsw_sp_rif_counters_free(rif);
8233 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8234 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8235 	ops->deconfigure(rif);
8236 	if (fid)
8237 		/* Loopback RIFs are not associated with a FID. */
8238 		mlxsw_sp_fid_put(fid);
8239 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8240 	dev_put(rif->dev);
8241 	kfree(rif);
8242 	vr->rif_count--;
8243 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8244 }
8245 
8246 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8247 				 struct net_device *dev)
8248 {
8249 	struct mlxsw_sp_rif *rif;
8250 
8251 	mutex_lock(&mlxsw_sp->router->lock);
8252 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8253 	if (!rif)
8254 		goto out;
8255 	mlxsw_sp_rif_destroy(rif);
8256 out:
8257 	mutex_unlock(&mlxsw_sp->router->lock);
8258 }
8259 
8260 static void
8261 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8262 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8263 {
8264 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8265 
8266 	params->vid = mlxsw_sp_port_vlan->vid;
8267 	params->lag = mlxsw_sp_port->lagged;
8268 	if (params->lag)
8269 		params->lag_id = mlxsw_sp_port->lag_id;
8270 	else
8271 		params->system_port = mlxsw_sp_port->local_port;
8272 }
8273 
8274 static struct mlxsw_sp_rif_subport *
8275 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8276 {
8277 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
8278 }
8279 
8280 static struct mlxsw_sp_rif *
8281 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8282 			 const struct mlxsw_sp_rif_params *params,
8283 			 struct netlink_ext_ack *extack)
8284 {
8285 	struct mlxsw_sp_rif_subport *rif_subport;
8286 	struct mlxsw_sp_rif *rif;
8287 
8288 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8289 	if (!rif)
8290 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8291 
8292 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8293 	refcount_inc(&rif_subport->ref_count);
8294 	return rif;
8295 }
8296 
8297 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8298 {
8299 	struct mlxsw_sp_rif_subport *rif_subport;
8300 
8301 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8302 	if (!refcount_dec_and_test(&rif_subport->ref_count))
8303 		return;
8304 
8305 	mlxsw_sp_rif_destroy(rif);
8306 }
8307 
8308 static int
8309 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8310 				 struct net_device *l3_dev,
8311 				 struct netlink_ext_ack *extack)
8312 {
8313 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8314 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8315 	struct mlxsw_sp_rif_params params = {
8316 		.dev = l3_dev,
8317 	};
8318 	u16 vid = mlxsw_sp_port_vlan->vid;
8319 	struct mlxsw_sp_rif *rif;
8320 	struct mlxsw_sp_fid *fid;
8321 	int err;
8322 
8323 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
8324 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
8325 	if (IS_ERR(rif))
8326 		return PTR_ERR(rif);
8327 
8328 	/* FID was already created, just take a reference */
8329 	fid = rif->ops->fid_get(rif, extack);
8330 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8331 	if (err)
8332 		goto err_fid_port_vid_map;
8333 
8334 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8335 	if (err)
8336 		goto err_port_vid_learning_set;
8337 
8338 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8339 					BR_STATE_FORWARDING);
8340 	if (err)
8341 		goto err_port_vid_stp_set;
8342 
8343 	mlxsw_sp_port_vlan->fid = fid;
8344 
8345 	return 0;
8346 
8347 err_port_vid_stp_set:
8348 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8349 err_port_vid_learning_set:
8350 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8351 err_fid_port_vid_map:
8352 	mlxsw_sp_fid_put(fid);
8353 	mlxsw_sp_rif_subport_put(rif);
8354 	return err;
8355 }
8356 
8357 static void
8358 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8359 {
8360 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8361 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8362 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8363 	u16 vid = mlxsw_sp_port_vlan->vid;
8364 
8365 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8366 		return;
8367 
8368 	mlxsw_sp_port_vlan->fid = NULL;
8369 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8370 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8371 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8372 	mlxsw_sp_fid_put(fid);
8373 	mlxsw_sp_rif_subport_put(rif);
8374 }
8375 
8376 int
8377 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8378 			       struct net_device *l3_dev,
8379 			       struct netlink_ext_ack *extack)
8380 {
8381 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8382 	struct mlxsw_sp_rif *rif;
8383 	int err = 0;
8384 
8385 	mutex_lock(&mlxsw_sp->router->lock);
8386 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8387 	if (!rif)
8388 		goto out;
8389 
8390 	err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8391 					       extack);
8392 out:
8393 	mutex_unlock(&mlxsw_sp->router->lock);
8394 	return err;
8395 }
8396 
8397 void
8398 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8399 {
8400 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8401 
8402 	mutex_lock(&mlxsw_sp->router->lock);
8403 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8404 	mutex_unlock(&mlxsw_sp->router->lock);
8405 }
8406 
8407 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8408 					     struct net_device *port_dev,
8409 					     unsigned long event, u16 vid,
8410 					     struct netlink_ext_ack *extack)
8411 {
8412 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8413 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8414 
8415 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8416 	if (WARN_ON(!mlxsw_sp_port_vlan))
8417 		return -EINVAL;
8418 
8419 	switch (event) {
8420 	case NETDEV_UP:
8421 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8422 							l3_dev, extack);
8423 	case NETDEV_DOWN:
8424 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8425 		break;
8426 	}
8427 
8428 	return 0;
8429 }
8430 
8431 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8432 					unsigned long event,
8433 					struct netlink_ext_ack *extack)
8434 {
8435 	if (netif_is_bridge_port(port_dev) ||
8436 	    netif_is_lag_port(port_dev) ||
8437 	    netif_is_ovs_port(port_dev))
8438 		return 0;
8439 
8440 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8441 						 MLXSW_SP_DEFAULT_VID, extack);
8442 }
8443 
8444 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8445 					 struct net_device *lag_dev,
8446 					 unsigned long event, u16 vid,
8447 					 struct netlink_ext_ack *extack)
8448 {
8449 	struct net_device *port_dev;
8450 	struct list_head *iter;
8451 	int err;
8452 
8453 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8454 		if (mlxsw_sp_port_dev_check(port_dev)) {
8455 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8456 								port_dev,
8457 								event, vid,
8458 								extack);
8459 			if (err)
8460 				return err;
8461 		}
8462 	}
8463 
8464 	return 0;
8465 }
8466 
8467 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8468 				       unsigned long event,
8469 				       struct netlink_ext_ack *extack)
8470 {
8471 	if (netif_is_bridge_port(lag_dev))
8472 		return 0;
8473 
8474 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8475 					     MLXSW_SP_DEFAULT_VID, extack);
8476 }
8477 
8478 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8479 					  struct net_device *l3_dev,
8480 					  unsigned long event,
8481 					  struct netlink_ext_ack *extack)
8482 {
8483 	struct mlxsw_sp_rif_params params = {
8484 		.dev = l3_dev,
8485 	};
8486 	struct mlxsw_sp_rif *rif;
8487 
8488 	switch (event) {
8489 	case NETDEV_UP:
8490 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8491 			u16 proto;
8492 
8493 			br_vlan_get_proto(l3_dev, &proto);
8494 			if (proto == ETH_P_8021AD) {
8495 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8496 				return -EOPNOTSUPP;
8497 			}
8498 		}
8499 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8500 		if (IS_ERR(rif))
8501 			return PTR_ERR(rif);
8502 		break;
8503 	case NETDEV_DOWN:
8504 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8505 		mlxsw_sp_rif_destroy(rif);
8506 		break;
8507 	}
8508 
8509 	return 0;
8510 }
8511 
8512 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8513 					struct net_device *vlan_dev,
8514 					unsigned long event,
8515 					struct netlink_ext_ack *extack)
8516 {
8517 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8518 	u16 vid = vlan_dev_vlan_id(vlan_dev);
8519 
8520 	if (netif_is_bridge_port(vlan_dev))
8521 		return 0;
8522 
8523 	if (mlxsw_sp_port_dev_check(real_dev))
8524 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8525 							 event, vid, extack);
8526 	else if (netif_is_lag_master(real_dev))
8527 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8528 						     vid, extack);
8529 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8530 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8531 						      extack);
8532 
8533 	return 0;
8534 }
8535 
8536 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8537 {
8538 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
8539 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8540 
8541 	return ether_addr_equal_masked(mac, vrrp4, mask);
8542 }
8543 
8544 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
8545 {
8546 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
8547 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8548 
8549 	return ether_addr_equal_masked(mac, vrrp6, mask);
8550 }
8551 
8552 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8553 				const u8 *mac, bool adding)
8554 {
8555 	char ritr_pl[MLXSW_REG_RITR_LEN];
8556 	u8 vrrp_id = adding ? mac[5] : 0;
8557 	int err;
8558 
8559 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
8560 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
8561 		return 0;
8562 
8563 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8564 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8565 	if (err)
8566 		return err;
8567 
8568 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
8569 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
8570 	else
8571 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
8572 
8573 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8574 }
8575 
8576 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
8577 				    const struct net_device *macvlan_dev,
8578 				    struct netlink_ext_ack *extack)
8579 {
8580 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8581 	struct mlxsw_sp_rif *rif;
8582 	int err;
8583 
8584 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8585 	if (!rif) {
8586 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
8587 		return -EOPNOTSUPP;
8588 	}
8589 
8590 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8591 				  mlxsw_sp_fid_index(rif->fid), true);
8592 	if (err)
8593 		return err;
8594 
8595 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
8596 				   macvlan_dev->dev_addr, true);
8597 	if (err)
8598 		goto err_rif_vrrp_add;
8599 
8600 	/* Make sure the bridge driver does not have this MAC pointing at
8601 	 * some other port.
8602 	 */
8603 	if (rif->ops->fdb_del)
8604 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
8605 
8606 	return 0;
8607 
8608 err_rif_vrrp_add:
8609 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8610 			    mlxsw_sp_fid_index(rif->fid), false);
8611 	return err;
8612 }
8613 
8614 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8615 				       const struct net_device *macvlan_dev)
8616 {
8617 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8618 	struct mlxsw_sp_rif *rif;
8619 
8620 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8621 	/* If we do not have a RIF, then we already took care of
8622 	 * removing the macvlan's MAC during RIF deletion.
8623 	 */
8624 	if (!rif)
8625 		return;
8626 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
8627 			     false);
8628 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8629 			    mlxsw_sp_fid_index(rif->fid), false);
8630 }
8631 
8632 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8633 			      const struct net_device *macvlan_dev)
8634 {
8635 	mutex_lock(&mlxsw_sp->router->lock);
8636 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8637 	mutex_unlock(&mlxsw_sp->router->lock);
8638 }
8639 
8640 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
8641 					   struct net_device *macvlan_dev,
8642 					   unsigned long event,
8643 					   struct netlink_ext_ack *extack)
8644 {
8645 	switch (event) {
8646 	case NETDEV_UP:
8647 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
8648 	case NETDEV_DOWN:
8649 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8650 		break;
8651 	}
8652 
8653 	return 0;
8654 }
8655 
8656 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
8657 					       struct net_device *dev,
8658 					       const unsigned char *dev_addr,
8659 					       struct netlink_ext_ack *extack)
8660 {
8661 	struct mlxsw_sp_rif *rif;
8662 	int i;
8663 
8664 	/* A RIF is not created for macvlan netdevs. Their MAC is used to
8665 	 * populate the FDB
8666 	 */
8667 	if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
8668 		return 0;
8669 
8670 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8671 		rif = mlxsw_sp->router->rifs[i];
8672 		if (rif && rif->ops &&
8673 		    rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
8674 			continue;
8675 		if (rif && rif->dev && rif->dev != dev &&
8676 		    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
8677 					     mlxsw_sp->mac_mask)) {
8678 			NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
8679 			return -EINVAL;
8680 		}
8681 	}
8682 
8683 	return 0;
8684 }
8685 
8686 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
8687 				     struct net_device *dev,
8688 				     unsigned long event,
8689 				     struct netlink_ext_ack *extack)
8690 {
8691 	if (mlxsw_sp_port_dev_check(dev))
8692 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
8693 	else if (netif_is_lag_master(dev))
8694 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
8695 	else if (netif_is_bridge_master(dev))
8696 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
8697 						      extack);
8698 	else if (is_vlan_dev(dev))
8699 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
8700 						    extack);
8701 	else if (netif_is_macvlan(dev))
8702 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
8703 						       extack);
8704 	else
8705 		return 0;
8706 }
8707 
8708 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
8709 				   unsigned long event, void *ptr)
8710 {
8711 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
8712 	struct net_device *dev = ifa->ifa_dev->dev;
8713 	struct mlxsw_sp_router *router;
8714 	struct mlxsw_sp_rif *rif;
8715 	int err = 0;
8716 
8717 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
8718 	if (event == NETDEV_UP)
8719 		return NOTIFY_DONE;
8720 
8721 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
8722 	mutex_lock(&router->lock);
8723 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
8724 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8725 		goto out;
8726 
8727 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
8728 out:
8729 	mutex_unlock(&router->lock);
8730 	return notifier_from_errno(err);
8731 }
8732 
8733 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
8734 				  unsigned long event, void *ptr)
8735 {
8736 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
8737 	struct net_device *dev = ivi->ivi_dev->dev;
8738 	struct mlxsw_sp *mlxsw_sp;
8739 	struct mlxsw_sp_rif *rif;
8740 	int err = 0;
8741 
8742 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8743 	if (!mlxsw_sp)
8744 		return NOTIFY_DONE;
8745 
8746 	mutex_lock(&mlxsw_sp->router->lock);
8747 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8748 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8749 		goto out;
8750 
8751 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
8752 						  ivi->extack);
8753 	if (err)
8754 		goto out;
8755 
8756 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
8757 out:
8758 	mutex_unlock(&mlxsw_sp->router->lock);
8759 	return notifier_from_errno(err);
8760 }
8761 
8762 struct mlxsw_sp_inet6addr_event_work {
8763 	struct work_struct work;
8764 	struct mlxsw_sp *mlxsw_sp;
8765 	struct net_device *dev;
8766 	unsigned long event;
8767 };
8768 
8769 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
8770 {
8771 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
8772 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
8773 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
8774 	struct net_device *dev = inet6addr_work->dev;
8775 	unsigned long event = inet6addr_work->event;
8776 	struct mlxsw_sp_rif *rif;
8777 
8778 	rtnl_lock();
8779 	mutex_lock(&mlxsw_sp->router->lock);
8780 
8781 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8782 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8783 		goto out;
8784 
8785 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
8786 out:
8787 	mutex_unlock(&mlxsw_sp->router->lock);
8788 	rtnl_unlock();
8789 	dev_put(dev);
8790 	kfree(inet6addr_work);
8791 }
8792 
8793 /* Called with rcu_read_lock() */
8794 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
8795 				    unsigned long event, void *ptr)
8796 {
8797 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
8798 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
8799 	struct net_device *dev = if6->idev->dev;
8800 	struct mlxsw_sp_router *router;
8801 
8802 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
8803 	if (event == NETDEV_UP)
8804 		return NOTIFY_DONE;
8805 
8806 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
8807 	if (!inet6addr_work)
8808 		return NOTIFY_BAD;
8809 
8810 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
8811 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
8812 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
8813 	inet6addr_work->dev = dev;
8814 	inet6addr_work->event = event;
8815 	dev_hold(dev);
8816 	mlxsw_core_schedule_work(&inet6addr_work->work);
8817 
8818 	return NOTIFY_DONE;
8819 }
8820 
8821 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
8822 				   unsigned long event, void *ptr)
8823 {
8824 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
8825 	struct net_device *dev = i6vi->i6vi_dev->dev;
8826 	struct mlxsw_sp *mlxsw_sp;
8827 	struct mlxsw_sp_rif *rif;
8828 	int err = 0;
8829 
8830 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8831 	if (!mlxsw_sp)
8832 		return NOTIFY_DONE;
8833 
8834 	mutex_lock(&mlxsw_sp->router->lock);
8835 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8836 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
8837 		goto out;
8838 
8839 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
8840 						  i6vi->extack);
8841 	if (err)
8842 		goto out;
8843 
8844 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
8845 out:
8846 	mutex_unlock(&mlxsw_sp->router->lock);
8847 	return notifier_from_errno(err);
8848 }
8849 
8850 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8851 			     const char *mac, int mtu)
8852 {
8853 	char ritr_pl[MLXSW_REG_RITR_LEN];
8854 	int err;
8855 
8856 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8857 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8858 	if (err)
8859 		return err;
8860 
8861 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
8862 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
8863 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
8864 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8865 }
8866 
8867 static int
8868 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
8869 				  struct mlxsw_sp_rif *rif)
8870 {
8871 	struct net_device *dev = rif->dev;
8872 	u16 fid_index;
8873 	int err;
8874 
8875 	fid_index = mlxsw_sp_fid_index(rif->fid);
8876 
8877 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
8878 	if (err)
8879 		return err;
8880 
8881 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
8882 				dev->mtu);
8883 	if (err)
8884 		goto err_rif_edit;
8885 
8886 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
8887 	if (err)
8888 		goto err_rif_fdb_op;
8889 
8890 	if (rif->mtu != dev->mtu) {
8891 		struct mlxsw_sp_vr *vr;
8892 		int i;
8893 
8894 		/* The RIF is relevant only to its mr_table instance, as unlike
8895 		 * unicast routing, in multicast routing a RIF cannot be shared
8896 		 * between several multicast routing tables.
8897 		 */
8898 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
8899 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8900 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
8901 						   rif, dev->mtu);
8902 	}
8903 
8904 	ether_addr_copy(rif->addr, dev->dev_addr);
8905 	rif->mtu = dev->mtu;
8906 
8907 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
8908 
8909 	return 0;
8910 
8911 err_rif_fdb_op:
8912 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
8913 err_rif_edit:
8914 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
8915 	return err;
8916 }
8917 
8918 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
8919 			    struct netdev_notifier_pre_changeaddr_info *info)
8920 {
8921 	struct netlink_ext_ack *extack;
8922 
8923 	extack = netdev_notifier_info_to_extack(&info->info);
8924 	return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
8925 						   info->dev_addr, extack);
8926 }
8927 
8928 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
8929 					 unsigned long event, void *ptr)
8930 {
8931 	struct mlxsw_sp *mlxsw_sp;
8932 	struct mlxsw_sp_rif *rif;
8933 	int err = 0;
8934 
8935 	mlxsw_sp = mlxsw_sp_lower_get(dev);
8936 	if (!mlxsw_sp)
8937 		return 0;
8938 
8939 	mutex_lock(&mlxsw_sp->router->lock);
8940 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8941 	if (!rif)
8942 		goto out;
8943 
8944 	switch (event) {
8945 	case NETDEV_CHANGEMTU:
8946 	case NETDEV_CHANGEADDR:
8947 		err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
8948 		break;
8949 	case NETDEV_PRE_CHANGEADDR:
8950 		err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
8951 		break;
8952 	}
8953 
8954 out:
8955 	mutex_unlock(&mlxsw_sp->router->lock);
8956 	return err;
8957 }
8958 
8959 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
8960 				  struct net_device *l3_dev,
8961 				  struct netlink_ext_ack *extack)
8962 {
8963 	struct mlxsw_sp_rif *rif;
8964 
8965 	/* If netdev is already associated with a RIF, then we need to
8966 	 * destroy it and create a new one with the new virtual router ID.
8967 	 */
8968 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8969 	if (rif)
8970 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
8971 					  extack);
8972 
8973 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
8974 }
8975 
8976 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
8977 				    struct net_device *l3_dev)
8978 {
8979 	struct mlxsw_sp_rif *rif;
8980 
8981 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8982 	if (!rif)
8983 		return;
8984 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
8985 }
8986 
8987 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
8988 				 struct netdev_notifier_changeupper_info *info)
8989 {
8990 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
8991 	int err = 0;
8992 
8993 	/* We do not create a RIF for a macvlan, but only use it to
8994 	 * direct more MAC addresses to the router.
8995 	 */
8996 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
8997 		return 0;
8998 
8999 	mutex_lock(&mlxsw_sp->router->lock);
9000 	switch (event) {
9001 	case NETDEV_PRECHANGEUPPER:
9002 		break;
9003 	case NETDEV_CHANGEUPPER:
9004 		if (info->linking) {
9005 			struct netlink_ext_ack *extack;
9006 
9007 			extack = netdev_notifier_info_to_extack(&info->info);
9008 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9009 		} else {
9010 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9011 		}
9012 		break;
9013 	}
9014 	mutex_unlock(&mlxsw_sp->router->lock);
9015 
9016 	return err;
9017 }
9018 
9019 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9020 					struct netdev_nested_priv *priv)
9021 {
9022 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9023 
9024 	if (!netif_is_macvlan(dev))
9025 		return 0;
9026 
9027 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9028 				   mlxsw_sp_fid_index(rif->fid), false);
9029 }
9030 
9031 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9032 {
9033 	struct netdev_nested_priv priv = {
9034 		.data = (void *)rif,
9035 	};
9036 
9037 	if (!netif_is_macvlan_port(rif->dev))
9038 		return 0;
9039 
9040 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
9041 	return netdev_walk_all_upper_dev_rcu(rif->dev,
9042 					     __mlxsw_sp_rif_macvlan_flush, &priv);
9043 }
9044 
9045 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9046 				       const struct mlxsw_sp_rif_params *params)
9047 {
9048 	struct mlxsw_sp_rif_subport *rif_subport;
9049 
9050 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9051 	refcount_set(&rif_subport->ref_count, 1);
9052 	rif_subport->vid = params->vid;
9053 	rif_subport->lag = params->lag;
9054 	if (params->lag)
9055 		rif_subport->lag_id = params->lag_id;
9056 	else
9057 		rif_subport->system_port = params->system_port;
9058 }
9059 
9060 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9061 {
9062 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9063 	struct mlxsw_sp_rif_subport *rif_subport;
9064 	char ritr_pl[MLXSW_REG_RITR_LEN];
9065 
9066 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
9067 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9068 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
9069 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9070 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9071 				  rif_subport->lag ? rif_subport->lag_id :
9072 						     rif_subport->system_port,
9073 				  rif_subport->vid);
9074 
9075 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9076 }
9077 
9078 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
9079 {
9080 	int err;
9081 
9082 	err = mlxsw_sp_rif_subport_op(rif, true);
9083 	if (err)
9084 		return err;
9085 
9086 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9087 				  mlxsw_sp_fid_index(rif->fid), true);
9088 	if (err)
9089 		goto err_rif_fdb_op;
9090 
9091 	mlxsw_sp_fid_rif_set(rif->fid, rif);
9092 	return 0;
9093 
9094 err_rif_fdb_op:
9095 	mlxsw_sp_rif_subport_op(rif, false);
9096 	return err;
9097 }
9098 
9099 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9100 {
9101 	struct mlxsw_sp_fid *fid = rif->fid;
9102 
9103 	mlxsw_sp_fid_rif_set(fid, NULL);
9104 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9105 			    mlxsw_sp_fid_index(fid), false);
9106 	mlxsw_sp_rif_macvlan_flush(rif);
9107 	mlxsw_sp_rif_subport_op(rif, false);
9108 }
9109 
9110 static struct mlxsw_sp_fid *
9111 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9112 			     struct netlink_ext_ack *extack)
9113 {
9114 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9115 }
9116 
9117 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9118 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
9119 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
9120 	.setup			= mlxsw_sp_rif_subport_setup,
9121 	.configure		= mlxsw_sp_rif_subport_configure,
9122 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
9123 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
9124 };
9125 
9126 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
9127 				    enum mlxsw_reg_ritr_if_type type,
9128 				    u16 vid_fid, bool enable)
9129 {
9130 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9131 	char ritr_pl[MLXSW_REG_RITR_LEN];
9132 
9133 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9134 			    rif->dev->mtu);
9135 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9136 	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
9137 
9138 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9139 }
9140 
9141 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9142 {
9143 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9144 }
9145 
9146 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
9147 {
9148 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9149 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9150 	int err;
9151 
9152 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
9153 				       true);
9154 	if (err)
9155 		return err;
9156 
9157 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9158 				     mlxsw_sp_router_port(mlxsw_sp), true);
9159 	if (err)
9160 		goto err_fid_mc_flood_set;
9161 
9162 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9163 				     mlxsw_sp_router_port(mlxsw_sp), true);
9164 	if (err)
9165 		goto err_fid_bc_flood_set;
9166 
9167 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9168 				  mlxsw_sp_fid_index(rif->fid), true);
9169 	if (err)
9170 		goto err_rif_fdb_op;
9171 
9172 	mlxsw_sp_fid_rif_set(rif->fid, rif);
9173 	return 0;
9174 
9175 err_rif_fdb_op:
9176 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9177 			       mlxsw_sp_router_port(mlxsw_sp), false);
9178 err_fid_bc_flood_set:
9179 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9180 			       mlxsw_sp_router_port(mlxsw_sp), false);
9181 err_fid_mc_flood_set:
9182 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9183 	return err;
9184 }
9185 
9186 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9187 {
9188 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9189 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9190 	struct mlxsw_sp_fid *fid = rif->fid;
9191 
9192 	mlxsw_sp_fid_rif_set(fid, NULL);
9193 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9194 			    mlxsw_sp_fid_index(fid), false);
9195 	mlxsw_sp_rif_macvlan_flush(rif);
9196 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9197 			       mlxsw_sp_router_port(mlxsw_sp), false);
9198 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9199 			       mlxsw_sp_router_port(mlxsw_sp), false);
9200 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9201 }
9202 
9203 static struct mlxsw_sp_fid *
9204 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9205 			 struct netlink_ext_ack *extack)
9206 {
9207 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9208 }
9209 
9210 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9211 {
9212 	struct switchdev_notifier_fdb_info info = {};
9213 	struct net_device *dev;
9214 
9215 	dev = br_fdb_find_port(rif->dev, mac, 0);
9216 	if (!dev)
9217 		return;
9218 
9219 	info.addr = mac;
9220 	info.vid = 0;
9221 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9222 				 NULL);
9223 }
9224 
9225 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9226 	.type			= MLXSW_SP_RIF_TYPE_FID,
9227 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9228 	.configure		= mlxsw_sp_rif_fid_configure,
9229 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
9230 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
9231 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
9232 };
9233 
9234 static struct mlxsw_sp_fid *
9235 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9236 			  struct netlink_ext_ack *extack)
9237 {
9238 	struct net_device *br_dev;
9239 	u16 vid;
9240 	int err;
9241 
9242 	if (is_vlan_dev(rif->dev)) {
9243 		vid = vlan_dev_vlan_id(rif->dev);
9244 		br_dev = vlan_dev_real_dev(rif->dev);
9245 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
9246 			return ERR_PTR(-EINVAL);
9247 	} else {
9248 		err = br_vlan_get_pvid(rif->dev, &vid);
9249 		if (err < 0 || !vid) {
9250 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9251 			return ERR_PTR(-EINVAL);
9252 		}
9253 	}
9254 
9255 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9256 }
9257 
9258 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9259 {
9260 	struct switchdev_notifier_fdb_info info = {};
9261 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9262 	struct net_device *br_dev;
9263 	struct net_device *dev;
9264 
9265 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9266 	dev = br_fdb_find_port(br_dev, mac, vid);
9267 	if (!dev)
9268 		return;
9269 
9270 	info.addr = mac;
9271 	info.vid = vid;
9272 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9273 				 NULL);
9274 }
9275 
9276 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
9277 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
9278 	.rif_size		= sizeof(struct mlxsw_sp_rif),
9279 	.configure		= mlxsw_sp_rif_fid_configure,
9280 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
9281 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
9282 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
9283 };
9284 
9285 static struct mlxsw_sp_rif_ipip_lb *
9286 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9287 {
9288 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9289 }
9290 
9291 static void
9292 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9293 			   const struct mlxsw_sp_rif_params *params)
9294 {
9295 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9296 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
9297 
9298 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9299 				 common);
9300 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9301 	rif_lb->lb_config = params_lb->lb_config;
9302 }
9303 
9304 static int
9305 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
9306 {
9307 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9308 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9309 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9310 	struct mlxsw_sp_vr *ul_vr;
9311 	int err;
9312 
9313 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
9314 	if (IS_ERR(ul_vr))
9315 		return PTR_ERR(ul_vr);
9316 
9317 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9318 	if (err)
9319 		goto err_loopback_op;
9320 
9321 	lb_rif->ul_vr_id = ul_vr->id;
9322 	lb_rif->ul_rif_id = 0;
9323 	++ul_vr->rif_count;
9324 	return 0;
9325 
9326 err_loopback_op:
9327 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9328 	return err;
9329 }
9330 
9331 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9332 {
9333 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9334 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9335 	struct mlxsw_sp_vr *ul_vr;
9336 
9337 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9338 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9339 
9340 	--ul_vr->rif_count;
9341 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9342 }
9343 
9344 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9345 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9346 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9347 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9348 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
9349 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
9350 };
9351 
9352 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9353 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9354 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
9355 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9356 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
9357 };
9358 
9359 static int
9360 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9361 {
9362 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9363 	char ritr_pl[MLXSW_REG_RITR_LEN];
9364 
9365 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9366 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9367 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9368 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
9369 
9370 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9371 }
9372 
9373 static struct mlxsw_sp_rif *
9374 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9375 		       struct netlink_ext_ack *extack)
9376 {
9377 	struct mlxsw_sp_rif *ul_rif;
9378 	u16 rif_index;
9379 	int err;
9380 
9381 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
9382 	if (err) {
9383 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9384 		return ERR_PTR(err);
9385 	}
9386 
9387 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9388 	if (!ul_rif)
9389 		return ERR_PTR(-ENOMEM);
9390 
9391 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
9392 	ul_rif->mlxsw_sp = mlxsw_sp;
9393 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9394 	if (err)
9395 		goto ul_rif_op_err;
9396 
9397 	return ul_rif;
9398 
9399 ul_rif_op_err:
9400 	mlxsw_sp->router->rifs[rif_index] = NULL;
9401 	kfree(ul_rif);
9402 	return ERR_PTR(err);
9403 }
9404 
9405 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9406 {
9407 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9408 
9409 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9410 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
9411 	kfree(ul_rif);
9412 }
9413 
9414 static struct mlxsw_sp_rif *
9415 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
9416 		    struct netlink_ext_ack *extack)
9417 {
9418 	struct mlxsw_sp_vr *vr;
9419 	int err;
9420 
9421 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
9422 	if (IS_ERR(vr))
9423 		return ERR_CAST(vr);
9424 
9425 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
9426 		return vr->ul_rif;
9427 
9428 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
9429 	if (IS_ERR(vr->ul_rif)) {
9430 		err = PTR_ERR(vr->ul_rif);
9431 		goto err_ul_rif_create;
9432 	}
9433 
9434 	vr->rif_count++;
9435 	refcount_set(&vr->ul_rif_refcnt, 1);
9436 
9437 	return vr->ul_rif;
9438 
9439 err_ul_rif_create:
9440 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9441 	return ERR_PTR(err);
9442 }
9443 
9444 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
9445 {
9446 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9447 	struct mlxsw_sp_vr *vr;
9448 
9449 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
9450 
9451 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
9452 		return;
9453 
9454 	vr->rif_count--;
9455 	mlxsw_sp_ul_rif_destroy(ul_rif);
9456 	mlxsw_sp_vr_put(mlxsw_sp, vr);
9457 }
9458 
9459 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
9460 			       u16 *ul_rif_index)
9461 {
9462 	struct mlxsw_sp_rif *ul_rif;
9463 	int err = 0;
9464 
9465 	mutex_lock(&mlxsw_sp->router->lock);
9466 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9467 	if (IS_ERR(ul_rif)) {
9468 		err = PTR_ERR(ul_rif);
9469 		goto out;
9470 	}
9471 	*ul_rif_index = ul_rif->rif_index;
9472 out:
9473 	mutex_unlock(&mlxsw_sp->router->lock);
9474 	return err;
9475 }
9476 
9477 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
9478 {
9479 	struct mlxsw_sp_rif *ul_rif;
9480 
9481 	mutex_lock(&mlxsw_sp->router->lock);
9482 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
9483 	if (WARN_ON(!ul_rif))
9484 		goto out;
9485 
9486 	mlxsw_sp_ul_rif_put(ul_rif);
9487 out:
9488 	mutex_unlock(&mlxsw_sp->router->lock);
9489 }
9490 
9491 static int
9492 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
9493 {
9494 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9495 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9496 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9497 	struct mlxsw_sp_rif *ul_rif;
9498 	int err;
9499 
9500 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9501 	if (IS_ERR(ul_rif))
9502 		return PTR_ERR(ul_rif);
9503 
9504 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
9505 	if (err)
9506 		goto err_loopback_op;
9507 
9508 	lb_rif->ul_vr_id = 0;
9509 	lb_rif->ul_rif_id = ul_rif->rif_index;
9510 
9511 	return 0;
9512 
9513 err_loopback_op:
9514 	mlxsw_sp_ul_rif_put(ul_rif);
9515 	return err;
9516 }
9517 
9518 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9519 {
9520 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9521 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9522 	struct mlxsw_sp_rif *ul_rif;
9523 
9524 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
9525 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
9526 	mlxsw_sp_ul_rif_put(ul_rif);
9527 }
9528 
9529 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
9530 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
9531 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
9532 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
9533 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
9534 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
9535 };
9536 
9537 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
9538 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
9539 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
9540 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
9541 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
9542 };
9543 
9544 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
9545 {
9546 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9547 
9548 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
9549 					 sizeof(struct mlxsw_sp_rif *),
9550 					 GFP_KERNEL);
9551 	if (!mlxsw_sp->router->rifs)
9552 		return -ENOMEM;
9553 
9554 	return 0;
9555 }
9556 
9557 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
9558 {
9559 	int i;
9560 
9561 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
9562 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
9563 
9564 	kfree(mlxsw_sp->router->rifs);
9565 }
9566 
9567 static int
9568 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
9569 {
9570 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
9571 
9572 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
9573 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
9574 }
9575 
9576 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
9577 {
9578 	int err;
9579 
9580 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
9581 
9582 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
9583 	if (err)
9584 		return err;
9585 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
9586 	if (err)
9587 		return err;
9588 
9589 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
9590 }
9591 
9592 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
9593 {
9594 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
9595 	return mlxsw_sp_ipips_init(mlxsw_sp);
9596 }
9597 
9598 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
9599 {
9600 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
9601 	return mlxsw_sp_ipips_init(mlxsw_sp);
9602 }
9603 
9604 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
9605 {
9606 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
9607 }
9608 
9609 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
9610 {
9611 	struct mlxsw_sp_router *router;
9612 
9613 	/* Flush pending FIB notifications and then flush the device's
9614 	 * table before requesting another dump. The FIB notification
9615 	 * block is unregistered, so no need to take RTNL.
9616 	 */
9617 	mlxsw_core_flush_owq();
9618 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
9619 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
9620 }
9621 
9622 #ifdef CONFIG_IP_ROUTE_MULTIPATH
9623 struct mlxsw_sp_mp_hash_config {
9624 	DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
9625 	DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
9626 	DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
9627 	DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
9628 	bool inc_parsing_depth;
9629 };
9630 
9631 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
9632 	bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
9633 
9634 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
9635 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
9636 
9637 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
9638 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
9639 
9640 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
9641 {
9642 	unsigned long *inner_headers = config->inner_headers;
9643 	unsigned long *inner_fields = config->inner_fields;
9644 
9645 	/* IPv4 inner */
9646 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
9647 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
9648 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
9649 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
9650 	/* IPv6 inner */
9651 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
9652 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
9653 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
9654 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
9655 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
9656 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
9657 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
9658 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
9659 }
9660 
9661 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
9662 {
9663 	unsigned long *headers = config->headers;
9664 	unsigned long *fields = config->fields;
9665 
9666 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
9667 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
9668 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
9669 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
9670 }
9671 
9672 static void
9673 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
9674 			      u32 hash_fields)
9675 {
9676 	unsigned long *inner_headers = config->inner_headers;
9677 	unsigned long *inner_fields = config->inner_fields;
9678 
9679 	/* IPv4 Inner */
9680 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
9681 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
9682 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
9683 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
9684 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
9685 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
9686 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
9687 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
9688 	/* IPv6 inner */
9689 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
9690 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
9691 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
9692 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
9693 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
9694 	}
9695 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
9696 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
9697 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
9698 	}
9699 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
9700 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
9701 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
9702 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
9703 	/* L4 inner */
9704 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
9705 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
9706 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
9707 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
9708 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
9709 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
9710 }
9711 
9712 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
9713 				   struct mlxsw_sp_mp_hash_config *config)
9714 {
9715 	struct net *net = mlxsw_sp_net(mlxsw_sp);
9716 	unsigned long *headers = config->headers;
9717 	unsigned long *fields = config->fields;
9718 	u32 hash_fields;
9719 
9720 	switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
9721 	case 0:
9722 		mlxsw_sp_mp4_hash_outer_addr(config);
9723 		break;
9724 	case 1:
9725 		mlxsw_sp_mp4_hash_outer_addr(config);
9726 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
9727 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
9728 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9729 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9730 		break;
9731 	case 2:
9732 		/* Outer */
9733 		mlxsw_sp_mp4_hash_outer_addr(config);
9734 		/* Inner */
9735 		mlxsw_sp_mp_hash_inner_l3(config);
9736 		break;
9737 	case 3:
9738 		hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
9739 		/* Outer */
9740 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
9741 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
9742 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
9743 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
9744 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
9745 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
9746 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
9747 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
9748 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
9749 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
9750 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9751 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
9752 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9753 		/* Inner */
9754 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
9755 		break;
9756 	}
9757 }
9758 
9759 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
9760 {
9761 	unsigned long *headers = config->headers;
9762 	unsigned long *fields = config->fields;
9763 
9764 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
9765 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
9766 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
9767 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
9768 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
9769 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
9770 }
9771 
9772 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
9773 				   struct mlxsw_sp_mp_hash_config *config)
9774 {
9775 	u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
9776 	unsigned long *headers = config->headers;
9777 	unsigned long *fields = config->fields;
9778 
9779 	switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
9780 	case 0:
9781 		mlxsw_sp_mp6_hash_outer_addr(config);
9782 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
9783 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
9784 		break;
9785 	case 1:
9786 		mlxsw_sp_mp6_hash_outer_addr(config);
9787 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
9788 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
9789 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9790 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9791 		break;
9792 	case 2:
9793 		/* Outer */
9794 		mlxsw_sp_mp6_hash_outer_addr(config);
9795 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
9796 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
9797 		/* Inner */
9798 		mlxsw_sp_mp_hash_inner_l3(config);
9799 		config->inc_parsing_depth = true;
9800 		break;
9801 	case 3:
9802 		/* Outer */
9803 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
9804 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
9805 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
9806 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
9807 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
9808 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
9809 		}
9810 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
9811 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
9812 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
9813 		}
9814 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
9815 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
9816 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
9817 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
9818 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
9819 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9820 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
9821 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9822 		/* Inner */
9823 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
9824 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
9825 			config->inc_parsing_depth = true;
9826 		break;
9827 	}
9828 }
9829 
9830 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
9831 						 bool old_inc_parsing_depth,
9832 						 bool new_inc_parsing_depth)
9833 {
9834 	int err;
9835 
9836 	if (!old_inc_parsing_depth && new_inc_parsing_depth) {
9837 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
9838 		if (err)
9839 			return err;
9840 		mlxsw_sp->router->inc_parsing_depth = true;
9841 	} else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
9842 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
9843 		mlxsw_sp->router->inc_parsing_depth = false;
9844 	}
9845 
9846 	return 0;
9847 }
9848 
9849 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
9850 {
9851 	bool old_inc_parsing_depth, new_inc_parsing_depth;
9852 	struct mlxsw_sp_mp_hash_config config = {};
9853 	char recr2_pl[MLXSW_REG_RECR2_LEN];
9854 	unsigned long bit;
9855 	u32 seed;
9856 	int err;
9857 
9858 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
9859 	mlxsw_reg_recr2_pack(recr2_pl, seed);
9860 	mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
9861 	mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
9862 
9863 	old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
9864 	new_inc_parsing_depth = config.inc_parsing_depth;
9865 	err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
9866 						    old_inc_parsing_depth,
9867 						    new_inc_parsing_depth);
9868 	if (err)
9869 		return err;
9870 
9871 	for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
9872 		mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
9873 	for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
9874 		mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
9875 	for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
9876 		mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
9877 	for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
9878 		mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
9879 
9880 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
9881 	if (err)
9882 		goto err_reg_write;
9883 
9884 	return 0;
9885 
9886 err_reg_write:
9887 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
9888 					      old_inc_parsing_depth);
9889 	return err;
9890 }
9891 #else
9892 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
9893 {
9894 	return 0;
9895 }
9896 #endif
9897 
9898 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
9899 {
9900 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
9901 	unsigned int i;
9902 
9903 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
9904 
9905 	/* HW is determining switch priority based on DSCP-bits, but the
9906 	 * kernel is still doing that based on the ToS. Since there's a
9907 	 * mismatch in bits we need to make sure to translate the right
9908 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
9909 	 */
9910 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
9911 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
9912 
9913 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
9914 }
9915 
9916 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
9917 {
9918 	struct net *net = mlxsw_sp_net(mlxsw_sp);
9919 	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
9920 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
9921 	u64 max_rifs;
9922 
9923 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
9924 		return -EIO;
9925 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9926 
9927 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
9928 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
9929 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
9930 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
9931 }
9932 
9933 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
9934 {
9935 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
9936 
9937 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
9938 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
9939 }
9940 
9941 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
9942 	.init = mlxsw_sp_router_ll_basic_init,
9943 	.ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
9944 	.ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
9945 	.raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
9946 	.fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
9947 	.fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
9948 	.fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
9949 	.fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
9950 	.fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
9951 	.fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
9952 	.fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
9953 	.fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
9954 };
9955 
9956 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
9957 {
9958 	size_t max_size = 0;
9959 	int i;
9960 
9961 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
9962 		size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
9963 
9964 		if (size > max_size)
9965 			max_size = size;
9966 	}
9967 	router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
9968 				    GFP_KERNEL);
9969 	if (!router->ll_op_ctx)
9970 		return -ENOMEM;
9971 	INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
9972 	return 0;
9973 }
9974 
9975 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
9976 {
9977 	WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
9978 	kfree(router->ll_op_ctx);
9979 }
9980 
9981 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
9982 {
9983 	u16 lb_rif_index;
9984 	int err;
9985 
9986 	/* Create a generic loopback RIF associated with the main table
9987 	 * (default VRF). Any table can be used, but the main table exists
9988 	 * anyway, so we do not waste resources.
9989 	 */
9990 	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
9991 					 &lb_rif_index);
9992 	if (err)
9993 		return err;
9994 
9995 	mlxsw_sp->router->lb_rif_index = lb_rif_index;
9996 
9997 	return 0;
9998 }
9999 
10000 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10001 {
10002 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
10003 }
10004 
10005 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10006 {
10007 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10008 
10009 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10010 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10011 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10012 
10013 	return 0;
10014 }
10015 
10016 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10017 	.init = mlxsw_sp1_router_init,
10018 	.ipips_init = mlxsw_sp1_ipips_init,
10019 };
10020 
10021 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10022 {
10023 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10024 
10025 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10026 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10027 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10028 
10029 	return 0;
10030 }
10031 
10032 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10033 	.init = mlxsw_sp2_router_init,
10034 	.ipips_init = mlxsw_sp2_ipips_init,
10035 };
10036 
10037 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
10038 			 struct netlink_ext_ack *extack)
10039 {
10040 	struct mlxsw_sp_router *router;
10041 	int err;
10042 
10043 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
10044 	if (!router)
10045 		return -ENOMEM;
10046 	mutex_init(&router->lock);
10047 	mlxsw_sp->router = router;
10048 	router->mlxsw_sp = mlxsw_sp;
10049 
10050 	err = mlxsw_sp->router_ops->init(mlxsw_sp);
10051 	if (err)
10052 		goto err_router_ops_init;
10053 
10054 	err = mlxsw_sp_router_xm_init(mlxsw_sp);
10055 	if (err)
10056 		goto err_xm_init;
10057 
10058 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
10059 						       &mlxsw_sp_router_ll_xm_ops :
10060 						       &mlxsw_sp_router_ll_basic_ops;
10061 	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
10062 
10063 	err = mlxsw_sp_router_ll_op_ctx_init(router);
10064 	if (err)
10065 		goto err_ll_op_ctx_init;
10066 
10067 	INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
10068 	INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
10069 			  mlxsw_sp_nh_grp_activity_work);
10070 
10071 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
10072 	err = __mlxsw_sp_router_init(mlxsw_sp);
10073 	if (err)
10074 		goto err_router_init;
10075 
10076 	err = mlxsw_sp_rifs_init(mlxsw_sp);
10077 	if (err)
10078 		goto err_rifs_init;
10079 
10080 	err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
10081 	if (err)
10082 		goto err_ipips_init;
10083 
10084 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
10085 			      &mlxsw_sp_nexthop_ht_params);
10086 	if (err)
10087 		goto err_nexthop_ht_init;
10088 
10089 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
10090 			      &mlxsw_sp_nexthop_group_ht_params);
10091 	if (err)
10092 		goto err_nexthop_group_ht_init;
10093 
10094 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
10095 	err = mlxsw_sp_lpm_init(mlxsw_sp);
10096 	if (err)
10097 		goto err_lpm_init;
10098 
10099 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
10100 	if (err)
10101 		goto err_mr_init;
10102 
10103 	err = mlxsw_sp_vrs_init(mlxsw_sp);
10104 	if (err)
10105 		goto err_vrs_init;
10106 
10107 	err = mlxsw_sp_lb_rif_init(mlxsw_sp);
10108 	if (err)
10109 		goto err_lb_rif_init;
10110 
10111 	err = mlxsw_sp_neigh_init(mlxsw_sp);
10112 	if (err)
10113 		goto err_neigh_init;
10114 
10115 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
10116 	if (err)
10117 		goto err_mp_hash_init;
10118 
10119 	err = mlxsw_sp_dscp_init(mlxsw_sp);
10120 	if (err)
10121 		goto err_dscp_init;
10122 
10123 	INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
10124 	INIT_LIST_HEAD(&router->fib_event_queue);
10125 	spin_lock_init(&router->fib_event_queue_lock);
10126 
10127 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
10128 	err = register_inetaddr_notifier(&router->inetaddr_nb);
10129 	if (err)
10130 		goto err_register_inetaddr_notifier;
10131 
10132 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10133 	err = register_inet6addr_notifier(&router->inet6addr_nb);
10134 	if (err)
10135 		goto err_register_inet6addr_notifier;
10136 
10137 	mlxsw_sp->router->netevent_nb.notifier_call =
10138 		mlxsw_sp_router_netevent_event;
10139 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10140 	if (err)
10141 		goto err_register_netevent_notifier;
10142 
10143 	mlxsw_sp->router->nexthop_nb.notifier_call =
10144 		mlxsw_sp_nexthop_obj_event;
10145 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10146 					&mlxsw_sp->router->nexthop_nb,
10147 					extack);
10148 	if (err)
10149 		goto err_register_nexthop_notifier;
10150 
10151 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10152 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10153 				    &mlxsw_sp->router->fib_nb,
10154 				    mlxsw_sp_router_fib_dump_flush, extack);
10155 	if (err)
10156 		goto err_register_fib_notifier;
10157 
10158 	return 0;
10159 
10160 err_register_fib_notifier:
10161 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10162 				    &mlxsw_sp->router->nexthop_nb);
10163 err_register_nexthop_notifier:
10164 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10165 err_register_netevent_notifier:
10166 	unregister_inet6addr_notifier(&router->inet6addr_nb);
10167 err_register_inet6addr_notifier:
10168 	unregister_inetaddr_notifier(&router->inetaddr_nb);
10169 err_register_inetaddr_notifier:
10170 	mlxsw_core_flush_owq();
10171 	WARN_ON(!list_empty(&router->fib_event_queue));
10172 err_dscp_init:
10173 err_mp_hash_init:
10174 	mlxsw_sp_neigh_fini(mlxsw_sp);
10175 err_neigh_init:
10176 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
10177 err_lb_rif_init:
10178 	mlxsw_sp_vrs_fini(mlxsw_sp);
10179 err_vrs_init:
10180 	mlxsw_sp_mr_fini(mlxsw_sp);
10181 err_mr_init:
10182 	mlxsw_sp_lpm_fini(mlxsw_sp);
10183 err_lpm_init:
10184 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10185 err_nexthop_group_ht_init:
10186 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10187 err_nexthop_ht_init:
10188 	mlxsw_sp_ipips_fini(mlxsw_sp);
10189 err_ipips_init:
10190 	mlxsw_sp_rifs_fini(mlxsw_sp);
10191 err_rifs_init:
10192 	__mlxsw_sp_router_fini(mlxsw_sp);
10193 err_router_init:
10194 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10195 	mlxsw_sp_router_ll_op_ctx_fini(router);
10196 err_ll_op_ctx_init:
10197 	mlxsw_sp_router_xm_fini(mlxsw_sp);
10198 err_xm_init:
10199 err_router_ops_init:
10200 	mutex_destroy(&mlxsw_sp->router->lock);
10201 	kfree(mlxsw_sp->router);
10202 	return err;
10203 }
10204 
10205 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10206 {
10207 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10208 				&mlxsw_sp->router->fib_nb);
10209 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10210 				    &mlxsw_sp->router->nexthop_nb);
10211 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10212 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
10213 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
10214 	mlxsw_core_flush_owq();
10215 	WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
10216 	mlxsw_sp_neigh_fini(mlxsw_sp);
10217 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
10218 	mlxsw_sp_vrs_fini(mlxsw_sp);
10219 	mlxsw_sp_mr_fini(mlxsw_sp);
10220 	mlxsw_sp_lpm_fini(mlxsw_sp);
10221 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10222 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10223 	mlxsw_sp_ipips_fini(mlxsw_sp);
10224 	mlxsw_sp_rifs_fini(mlxsw_sp);
10225 	__mlxsw_sp_router_fini(mlxsw_sp);
10226 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10227 	mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
10228 	mlxsw_sp_router_xm_fini(mlxsw_sp);
10229 	mutex_destroy(&mlxsw_sp->router->lock);
10230 	kfree(mlxsw_sp->router);
10231 }
10232