xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c (revision e983940270f10fe8551baf0098be76ea478294a3)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3  * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <net/netevent.h>
44 #include <net/neighbour.h>
45 #include <net/arp.h>
46 #include <net/ip_fib.h>
47 
48 #include "spectrum.h"
49 #include "core.h"
50 #include "reg.h"
51 
52 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
53 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
54 
55 static bool
56 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
57 			     struct mlxsw_sp_prefix_usage *prefix_usage2)
58 {
59 	unsigned char prefix;
60 
61 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
62 		if (!test_bit(prefix, prefix_usage2->b))
63 			return false;
64 	}
65 	return true;
66 }
67 
68 static bool
69 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
70 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
71 {
72 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
73 }
74 
75 static bool
76 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
77 {
78 	struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
79 
80 	return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
81 }
82 
83 static void
84 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
85 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
86 {
87 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
88 }
89 
90 static void
91 mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
92 {
93 	memset(prefix_usage, 0, sizeof(*prefix_usage));
94 }
95 
96 static void
97 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
98 			  unsigned char prefix_len)
99 {
100 	set_bit(prefix_len, prefix_usage->b);
101 }
102 
103 static void
104 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
105 			    unsigned char prefix_len)
106 {
107 	clear_bit(prefix_len, prefix_usage->b);
108 }
109 
110 struct mlxsw_sp_fib_key {
111 	struct net_device *dev;
112 	unsigned char addr[sizeof(struct in6_addr)];
113 	unsigned char prefix_len;
114 };
115 
116 enum mlxsw_sp_fib_entry_type {
117 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
118 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
119 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
120 };
121 
122 struct mlxsw_sp_nexthop_group;
123 
124 struct mlxsw_sp_fib_entry {
125 	struct rhash_head ht_node;
126 	struct list_head list;
127 	struct mlxsw_sp_fib_key key;
128 	enum mlxsw_sp_fib_entry_type type;
129 	unsigned int ref_count;
130 	u16 rif; /* used for action local */
131 	struct mlxsw_sp_vr *vr;
132 	struct fib_info *fi;
133 	struct list_head nexthop_group_node;
134 	struct mlxsw_sp_nexthop_group *nh_group;
135 };
136 
137 struct mlxsw_sp_fib {
138 	struct rhashtable ht;
139 	struct list_head entry_list;
140 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
141 	struct mlxsw_sp_prefix_usage prefix_usage;
142 };
143 
144 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
145 	.key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
146 	.head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
147 	.key_len = sizeof(struct mlxsw_sp_fib_key),
148 	.automatic_shrinking = true,
149 };
150 
151 static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
152 				     struct mlxsw_sp_fib_entry *fib_entry)
153 {
154 	unsigned char prefix_len = fib_entry->key.prefix_len;
155 	int err;
156 
157 	err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
158 				     mlxsw_sp_fib_ht_params);
159 	if (err)
160 		return err;
161 	list_add_tail(&fib_entry->list, &fib->entry_list);
162 	if (fib->prefix_ref_count[prefix_len]++ == 0)
163 		mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
164 	return 0;
165 }
166 
167 static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
168 				      struct mlxsw_sp_fib_entry *fib_entry)
169 {
170 	unsigned char prefix_len = fib_entry->key.prefix_len;
171 
172 	if (--fib->prefix_ref_count[prefix_len] == 0)
173 		mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
174 	list_del(&fib_entry->list);
175 	rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
176 			       mlxsw_sp_fib_ht_params);
177 }
178 
179 static struct mlxsw_sp_fib_entry *
180 mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
181 			  size_t addr_len, unsigned char prefix_len,
182 			  struct net_device *dev)
183 {
184 	struct mlxsw_sp_fib_entry *fib_entry;
185 
186 	fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
187 	if (!fib_entry)
188 		return NULL;
189 	fib_entry->key.dev = dev;
190 	memcpy(fib_entry->key.addr, addr, addr_len);
191 	fib_entry->key.prefix_len = prefix_len;
192 	return fib_entry;
193 }
194 
195 static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
196 {
197 	kfree(fib_entry);
198 }
199 
200 static struct mlxsw_sp_fib_entry *
201 mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
202 			  size_t addr_len, unsigned char prefix_len,
203 			  struct net_device *dev)
204 {
205 	struct mlxsw_sp_fib_key key;
206 
207 	memset(&key, 0, sizeof(key));
208 	key.dev = dev;
209 	memcpy(key.addr, addr, addr_len);
210 	key.prefix_len = prefix_len;
211 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
212 }
213 
214 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
215 {
216 	struct mlxsw_sp_fib *fib;
217 	int err;
218 
219 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
220 	if (!fib)
221 		return ERR_PTR(-ENOMEM);
222 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
223 	if (err)
224 		goto err_rhashtable_init;
225 	INIT_LIST_HEAD(&fib->entry_list);
226 	return fib;
227 
228 err_rhashtable_init:
229 	kfree(fib);
230 	return ERR_PTR(err);
231 }
232 
233 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
234 {
235 	rhashtable_destroy(&fib->ht);
236 	kfree(fib);
237 }
238 
239 static struct mlxsw_sp_lpm_tree *
240 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
241 {
242 	static struct mlxsw_sp_lpm_tree *lpm_tree;
243 	int i;
244 
245 	for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
246 		lpm_tree = &mlxsw_sp->router.lpm_trees[i];
247 		if (lpm_tree->ref_count == 0) {
248 			if (one_reserved)
249 				one_reserved = false;
250 			else
251 				return lpm_tree;
252 		}
253 	}
254 	return NULL;
255 }
256 
257 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
258 				   struct mlxsw_sp_lpm_tree *lpm_tree)
259 {
260 	char ralta_pl[MLXSW_REG_RALTA_LEN];
261 
262 	mlxsw_reg_ralta_pack(ralta_pl, true,
263 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
264 			     lpm_tree->id);
265 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
266 }
267 
268 static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
269 				  struct mlxsw_sp_lpm_tree *lpm_tree)
270 {
271 	char ralta_pl[MLXSW_REG_RALTA_LEN];
272 
273 	mlxsw_reg_ralta_pack(ralta_pl, false,
274 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
275 			     lpm_tree->id);
276 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
277 }
278 
279 static int
280 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
281 				  struct mlxsw_sp_prefix_usage *prefix_usage,
282 				  struct mlxsw_sp_lpm_tree *lpm_tree)
283 {
284 	char ralst_pl[MLXSW_REG_RALST_LEN];
285 	u8 root_bin = 0;
286 	u8 prefix;
287 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
288 
289 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
290 		root_bin = prefix;
291 
292 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
293 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
294 		if (prefix == 0)
295 			continue;
296 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
297 					 MLXSW_REG_RALST_BIN_NO_CHILD);
298 		last_prefix = prefix;
299 	}
300 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
301 }
302 
303 static struct mlxsw_sp_lpm_tree *
304 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
305 			 struct mlxsw_sp_prefix_usage *prefix_usage,
306 			 enum mlxsw_sp_l3proto proto, bool one_reserved)
307 {
308 	struct mlxsw_sp_lpm_tree *lpm_tree;
309 	int err;
310 
311 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
312 	if (!lpm_tree)
313 		return ERR_PTR(-EBUSY);
314 	lpm_tree->proto = proto;
315 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
316 	if (err)
317 		return ERR_PTR(err);
318 
319 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
320 						lpm_tree);
321 	if (err)
322 		goto err_left_struct_set;
323 	return lpm_tree;
324 
325 err_left_struct_set:
326 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
327 	return ERR_PTR(err);
328 }
329 
330 static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
331 				     struct mlxsw_sp_lpm_tree *lpm_tree)
332 {
333 	return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
334 }
335 
336 static struct mlxsw_sp_lpm_tree *
337 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
338 		      struct mlxsw_sp_prefix_usage *prefix_usage,
339 		      enum mlxsw_sp_l3proto proto, bool one_reserved)
340 {
341 	struct mlxsw_sp_lpm_tree *lpm_tree;
342 	int i;
343 
344 	for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
345 		lpm_tree = &mlxsw_sp->router.lpm_trees[i];
346 		if (lpm_tree->proto == proto &&
347 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
348 					     prefix_usage))
349 			goto inc_ref_count;
350 	}
351 	lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
352 					    proto, one_reserved);
353 	if (IS_ERR(lpm_tree))
354 		return lpm_tree;
355 
356 inc_ref_count:
357 	lpm_tree->ref_count++;
358 	return lpm_tree;
359 }
360 
361 static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
362 				 struct mlxsw_sp_lpm_tree *lpm_tree)
363 {
364 	if (--lpm_tree->ref_count == 0)
365 		return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
366 	return 0;
367 }
368 
369 static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
370 {
371 	struct mlxsw_sp_lpm_tree *lpm_tree;
372 	int i;
373 
374 	for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
375 		lpm_tree = &mlxsw_sp->router.lpm_trees[i];
376 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
377 	}
378 }
379 
380 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
381 {
382 	struct mlxsw_resources *resources;
383 	struct mlxsw_sp_vr *vr;
384 	int i;
385 
386 	resources = mlxsw_core_resources_get(mlxsw_sp->core);
387 	for (i = 0; i < resources->max_virtual_routers; i++) {
388 		vr = &mlxsw_sp->router.vrs[i];
389 		if (!vr->used)
390 			return vr;
391 	}
392 	return NULL;
393 }
394 
395 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
396 				     struct mlxsw_sp_vr *vr)
397 {
398 	char raltb_pl[MLXSW_REG_RALTB_LEN];
399 
400 	mlxsw_reg_raltb_pack(raltb_pl, vr->id,
401 			     (enum mlxsw_reg_ralxx_protocol) vr->proto,
402 			     vr->lpm_tree->id);
403 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
404 }
405 
406 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
407 				       struct mlxsw_sp_vr *vr)
408 {
409 	char raltb_pl[MLXSW_REG_RALTB_LEN];
410 
411 	/* Bind to tree 0 which is default */
412 	mlxsw_reg_raltb_pack(raltb_pl, vr->id,
413 			     (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
414 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
415 }
416 
417 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
418 {
419 	/* For our purpose, squash main and local table into one */
420 	if (tb_id == RT_TABLE_LOCAL)
421 		tb_id = RT_TABLE_MAIN;
422 	return tb_id;
423 }
424 
425 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
426 					    u32 tb_id,
427 					    enum mlxsw_sp_l3proto proto)
428 {
429 	struct mlxsw_resources *resources;
430 	struct mlxsw_sp_vr *vr;
431 	int i;
432 
433 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
434 
435 	resources = mlxsw_core_resources_get(mlxsw_sp->core);
436 	for (i = 0; i < resources->max_virtual_routers; i++) {
437 		vr = &mlxsw_sp->router.vrs[i];
438 		if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
439 			return vr;
440 	}
441 	return NULL;
442 }
443 
444 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
445 					      unsigned char prefix_len,
446 					      u32 tb_id,
447 					      enum mlxsw_sp_l3proto proto)
448 {
449 	struct mlxsw_sp_prefix_usage req_prefix_usage;
450 	struct mlxsw_sp_lpm_tree *lpm_tree;
451 	struct mlxsw_sp_vr *vr;
452 	int err;
453 
454 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
455 	if (!vr)
456 		return ERR_PTR(-EBUSY);
457 	vr->fib = mlxsw_sp_fib_create();
458 	if (IS_ERR(vr->fib))
459 		return ERR_CAST(vr->fib);
460 
461 	vr->proto = proto;
462 	vr->tb_id = tb_id;
463 	mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
464 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
465 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
466 					 proto, true);
467 	if (IS_ERR(lpm_tree)) {
468 		err = PTR_ERR(lpm_tree);
469 		goto err_tree_get;
470 	}
471 	vr->lpm_tree = lpm_tree;
472 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
473 	if (err)
474 		goto err_tree_bind;
475 
476 	vr->used = true;
477 	return vr;
478 
479 err_tree_bind:
480 	mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
481 err_tree_get:
482 	mlxsw_sp_fib_destroy(vr->fib);
483 
484 	return ERR_PTR(err);
485 }
486 
487 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
488 				struct mlxsw_sp_vr *vr)
489 {
490 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
491 	mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
492 	mlxsw_sp_fib_destroy(vr->fib);
493 	vr->used = false;
494 }
495 
496 static int
497 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
498 			   struct mlxsw_sp_prefix_usage *req_prefix_usage)
499 {
500 	struct mlxsw_sp_lpm_tree *lpm_tree;
501 
502 	if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
503 				     &vr->lpm_tree->prefix_usage))
504 		return 0;
505 
506 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
507 					 vr->proto, false);
508 	if (IS_ERR(lpm_tree)) {
509 		/* We failed to get a tree according to the required
510 		 * prefix usage. However, the current tree might be still good
511 		 * for us if our requirement is subset of the prefixes used
512 		 * in the tree.
513 		 */
514 		if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
515 						 &vr->lpm_tree->prefix_usage))
516 			return 0;
517 		return PTR_ERR(lpm_tree);
518 	}
519 
520 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
521 	mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
522 	vr->lpm_tree = lpm_tree;
523 	return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
524 }
525 
526 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
527 					   unsigned char prefix_len,
528 					   u32 tb_id,
529 					   enum mlxsw_sp_l3proto proto)
530 {
531 	struct mlxsw_sp_vr *vr;
532 	int err;
533 
534 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
535 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
536 	if (!vr) {
537 		vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
538 		if (IS_ERR(vr))
539 			return vr;
540 	} else {
541 		struct mlxsw_sp_prefix_usage req_prefix_usage;
542 
543 		mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
544 					  &vr->fib->prefix_usage);
545 		mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
546 		/* Need to replace LPM tree in case new prefix is required. */
547 		err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
548 						 &req_prefix_usage);
549 		if (err)
550 			return ERR_PTR(err);
551 	}
552 	return vr;
553 }
554 
555 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
556 {
557 	/* Destroy virtual router entity in case the associated FIB is empty
558 	 * and allow it to be used for other tables in future. Otherwise,
559 	 * check if some prefix usage did not disappear and change tree if
560 	 * that is the case. Note that in case new, smaller tree cannot be
561 	 * allocated, the original one will be kept being used.
562 	 */
563 	if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
564 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
565 	else
566 		mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
567 					   &vr->fib->prefix_usage);
568 }
569 
570 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
571 {
572 	struct mlxsw_resources *resources;
573 	struct mlxsw_sp_vr *vr;
574 	int i;
575 
576 	resources = mlxsw_core_resources_get(mlxsw_sp->core);
577 	if (!resources->max_virtual_routers_valid)
578 		return -EIO;
579 
580 	mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers,
581 				       sizeof(struct mlxsw_sp_vr),
582 				       GFP_KERNEL);
583 	if (!mlxsw_sp->router.vrs)
584 		return -ENOMEM;
585 
586 	for (i = 0; i < resources->max_virtual_routers; i++) {
587 		vr = &mlxsw_sp->router.vrs[i];
588 		vr->id = i;
589 	}
590 
591 	return 0;
592 }
593 
594 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
595 {
596 	kfree(mlxsw_sp->router.vrs);
597 }
598 
599 struct mlxsw_sp_neigh_key {
600 	unsigned char addr[sizeof(struct in6_addr)];
601 	struct net_device *dev;
602 };
603 
604 struct mlxsw_sp_neigh_entry {
605 	struct rhash_head ht_node;
606 	struct mlxsw_sp_neigh_key key;
607 	u16 rif;
608 	struct neighbour *n;
609 	bool offloaded;
610 	struct delayed_work dw;
611 	struct mlxsw_sp_port *mlxsw_sp_port;
612 	unsigned char ha[ETH_ALEN];
613 	struct list_head nexthop_list; /* list of nexthops using
614 					* this neigh entry
615 					*/
616 	struct list_head nexthop_neighs_list_node;
617 };
618 
619 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
620 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
621 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
622 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
623 };
624 
625 static int
626 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
627 			    struct mlxsw_sp_neigh_entry *neigh_entry)
628 {
629 	return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
630 				      &neigh_entry->ht_node,
631 				      mlxsw_sp_neigh_ht_params);
632 }
633 
634 static void
635 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
636 			    struct mlxsw_sp_neigh_entry *neigh_entry)
637 {
638 	rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
639 			       &neigh_entry->ht_node,
640 			       mlxsw_sp_neigh_ht_params);
641 }
642 
643 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
644 
645 static struct mlxsw_sp_neigh_entry *
646 mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
647 			    struct net_device *dev, u16 rif,
648 			    struct neighbour *n)
649 {
650 	struct mlxsw_sp_neigh_entry *neigh_entry;
651 
652 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
653 	if (!neigh_entry)
654 		return NULL;
655 	memcpy(neigh_entry->key.addr, addr, addr_len);
656 	neigh_entry->key.dev = dev;
657 	neigh_entry->rif = rif;
658 	neigh_entry->n = n;
659 	INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
660 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
661 	return neigh_entry;
662 }
663 
664 static void
665 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
666 {
667 	kfree(neigh_entry);
668 }
669 
670 static struct mlxsw_sp_neigh_entry *
671 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
672 			    size_t addr_len, struct net_device *dev)
673 {
674 	struct mlxsw_sp_neigh_key key = {{ 0 } };
675 
676 	memcpy(key.addr, addr, addr_len);
677 	key.dev = dev;
678 	return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
679 				      &key, mlxsw_sp_neigh_ht_params);
680 }
681 
682 int mlxsw_sp_router_neigh_construct(struct net_device *dev,
683 				    struct neighbour *n)
684 {
685 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
686 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
687 	struct mlxsw_sp_neigh_entry *neigh_entry;
688 	struct mlxsw_sp_rif *r;
689 	u32 dip;
690 	int err;
691 
692 	if (n->tbl != &arp_tbl)
693 		return 0;
694 
695 	dip = ntohl(*((__be32 *) n->primary_key));
696 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
697 						  n->dev);
698 	if (neigh_entry) {
699 		WARN_ON(neigh_entry->n != n);
700 		return 0;
701 	}
702 
703 	r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
704 	if (WARN_ON(!r))
705 		return -EINVAL;
706 
707 	neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
708 						  r->rif, n);
709 	if (!neigh_entry)
710 		return -ENOMEM;
711 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
712 	if (err)
713 		goto err_neigh_entry_insert;
714 	return 0;
715 
716 err_neigh_entry_insert:
717 	mlxsw_sp_neigh_entry_destroy(neigh_entry);
718 	return err;
719 }
720 
721 void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
722 				   struct neighbour *n)
723 {
724 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
725 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
726 	struct mlxsw_sp_neigh_entry *neigh_entry;
727 	u32 dip;
728 
729 	if (n->tbl != &arp_tbl)
730 		return;
731 
732 	dip = ntohl(*((__be32 *) n->primary_key));
733 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
734 						  n->dev);
735 	if (!neigh_entry)
736 		return;
737 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
738 	mlxsw_sp_neigh_entry_destroy(neigh_entry);
739 }
740 
741 static void
742 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
743 {
744 	unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
745 
746 	mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
747 }
748 
749 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
750 						   char *rauhtd_pl,
751 						   int ent_index)
752 {
753 	struct net_device *dev;
754 	struct neighbour *n;
755 	__be32 dipn;
756 	u32 dip;
757 	u16 rif;
758 
759 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
760 
761 	if (!mlxsw_sp->rifs[rif]) {
762 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
763 		return;
764 	}
765 
766 	dipn = htonl(dip);
767 	dev = mlxsw_sp->rifs[rif]->dev;
768 	n = neigh_lookup(&arp_tbl, &dipn, dev);
769 	if (!n) {
770 		netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
771 			   &dip);
772 		return;
773 	}
774 
775 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
776 	neigh_event_send(n, NULL);
777 	neigh_release(n);
778 }
779 
780 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
781 						   char *rauhtd_pl,
782 						   int rec_index)
783 {
784 	u8 num_entries;
785 	int i;
786 
787 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
788 								rec_index);
789 	/* Hardware starts counting at 0, so add 1. */
790 	num_entries++;
791 
792 	/* Each record consists of several neighbour entries. */
793 	for (i = 0; i < num_entries; i++) {
794 		int ent_index;
795 
796 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
797 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
798 						       ent_index);
799 	}
800 
801 }
802 
803 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
804 					      char *rauhtd_pl, int rec_index)
805 {
806 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
807 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
808 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
809 						       rec_index);
810 		break;
811 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
812 		WARN_ON_ONCE(1);
813 		break;
814 	}
815 }
816 
817 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
818 {
819 	char *rauhtd_pl;
820 	u8 num_rec;
821 	int i, err;
822 
823 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
824 	if (!rauhtd_pl)
825 		return -ENOMEM;
826 
827 	/* Make sure the neighbour's netdev isn't removed in the
828 	 * process.
829 	 */
830 	rtnl_lock();
831 	do {
832 		mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
833 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
834 				      rauhtd_pl);
835 		if (err) {
836 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
837 			break;
838 		}
839 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
840 		for (i = 0; i < num_rec; i++)
841 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
842 							  i);
843 	} while (num_rec);
844 	rtnl_unlock();
845 
846 	kfree(rauhtd_pl);
847 	return err;
848 }
849 
850 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
851 {
852 	struct mlxsw_sp_neigh_entry *neigh_entry;
853 
854 	/* Take RTNL mutex here to prevent lists from changes */
855 	rtnl_lock();
856 	list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
857 			    nexthop_neighs_list_node) {
858 		/* If this neigh have nexthops, make the kernel think this neigh
859 		 * is active regardless of the traffic.
860 		 */
861 		if (!list_empty(&neigh_entry->nexthop_list))
862 			neigh_event_send(neigh_entry->n, NULL);
863 	}
864 	rtnl_unlock();
865 }
866 
867 static void
868 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
869 {
870 	unsigned long interval = mlxsw_sp->router.neighs_update.interval;
871 
872 	mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
873 			       msecs_to_jiffies(interval));
874 }
875 
876 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
877 {
878 	struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
879 						 router.neighs_update.dw.work);
880 	int err;
881 
882 	err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
883 	if (err)
884 		dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
885 
886 	mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
887 
888 	mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
889 }
890 
891 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
892 {
893 	struct mlxsw_sp_neigh_entry *neigh_entry;
894 	struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
895 						 router.nexthop_probe_dw.work);
896 
897 	/* Iterate over nexthop neighbours, find those who are unresolved and
898 	 * send arp on them. This solves the chicken-egg problem when
899 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
900 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
901 	 * using different nexthop.
902 	 *
903 	 * Take RTNL mutex here to prevent lists from changes.
904 	 */
905 	rtnl_lock();
906 	list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
907 			    nexthop_neighs_list_node) {
908 		if (!(neigh_entry->n->nud_state & NUD_VALID) &&
909 		    !list_empty(&neigh_entry->nexthop_list))
910 			neigh_event_send(neigh_entry->n, NULL);
911 	}
912 	rtnl_unlock();
913 
914 	mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
915 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
916 }
917 
918 static void
919 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
920 			      struct mlxsw_sp_neigh_entry *neigh_entry,
921 			      bool removing);
922 
923 static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
924 {
925 	struct mlxsw_sp_neigh_entry *neigh_entry =
926 		container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
927 	struct neighbour *n = neigh_entry->n;
928 	struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
929 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
930 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
931 	struct net_device *dev;
932 	bool entry_connected;
933 	u8 nud_state;
934 	bool updating;
935 	bool removing;
936 	bool adding;
937 	u32 dip;
938 	int err;
939 
940 	read_lock_bh(&n->lock);
941 	dip = ntohl(*((__be32 *) n->primary_key));
942 	memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
943 	nud_state = n->nud_state;
944 	dev = n->dev;
945 	read_unlock_bh(&n->lock);
946 
947 	entry_connected = nud_state & NUD_VALID;
948 	adding = (!neigh_entry->offloaded) && entry_connected;
949 	updating = neigh_entry->offloaded && entry_connected;
950 	removing = neigh_entry->offloaded && !entry_connected;
951 
952 	if (adding || updating) {
953 		mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
954 				      neigh_entry->rif,
955 				      neigh_entry->ha, dip);
956 		err = mlxsw_reg_write(mlxsw_sp->core,
957 				      MLXSW_REG(rauht), rauht_pl);
958 		if (err) {
959 			netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
960 			neigh_entry->offloaded = false;
961 		} else {
962 			neigh_entry->offloaded = true;
963 		}
964 		mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
965 	} else if (removing) {
966 		mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
967 				      neigh_entry->rif,
968 				      neigh_entry->ha, dip);
969 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
970 				      rauht_pl);
971 		if (err) {
972 			netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
973 			neigh_entry->offloaded = true;
974 		} else {
975 			neigh_entry->offloaded = false;
976 		}
977 		mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
978 	}
979 
980 	neigh_release(n);
981 	mlxsw_sp_port_dev_put(mlxsw_sp_port);
982 }
983 
984 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
985 				   unsigned long event, void *ptr)
986 {
987 	struct mlxsw_sp_neigh_entry *neigh_entry;
988 	struct mlxsw_sp_port *mlxsw_sp_port;
989 	struct mlxsw_sp *mlxsw_sp;
990 	unsigned long interval;
991 	struct net_device *dev;
992 	struct neigh_parms *p;
993 	struct neighbour *n;
994 	u32 dip;
995 
996 	switch (event) {
997 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
998 		p = ptr;
999 
1000 		/* We don't care about changes in the default table. */
1001 		if (!p->dev || p->tbl != &arp_tbl)
1002 			return NOTIFY_DONE;
1003 
1004 		/* We are in atomic context and can't take RTNL mutex,
1005 		 * so use RCU variant to walk the device chain.
1006 		 */
1007 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1008 		if (!mlxsw_sp_port)
1009 			return NOTIFY_DONE;
1010 
1011 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1012 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1013 		mlxsw_sp->router.neighs_update.interval = interval;
1014 
1015 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
1016 		break;
1017 	case NETEVENT_NEIGH_UPDATE:
1018 		n = ptr;
1019 		dev = n->dev;
1020 
1021 		if (n->tbl != &arp_tbl)
1022 			return NOTIFY_DONE;
1023 
1024 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
1025 		if (!mlxsw_sp_port)
1026 			return NOTIFY_DONE;
1027 
1028 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1029 		dip = ntohl(*((__be32 *) n->primary_key));
1030 		neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
1031 							  &dip,
1032 							  sizeof(__be32),
1033 							  dev);
1034 		if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
1035 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
1036 			return NOTIFY_DONE;
1037 		}
1038 		neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
1039 
1040 		/* Take a reference to ensure the neighbour won't be
1041 		 * destructed until we drop the reference in delayed
1042 		 * work.
1043 		 */
1044 		neigh_clone(n);
1045 		if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
1046 			neigh_release(n);
1047 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
1048 		}
1049 		break;
1050 	}
1051 
1052 	return NOTIFY_DONE;
1053 }
1054 
1055 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1056 {
1057 	int err;
1058 
1059 	err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
1060 			      &mlxsw_sp_neigh_ht_params);
1061 	if (err)
1062 		return err;
1063 
1064 	/* Initialize the polling interval according to the default
1065 	 * table.
1066 	 */
1067 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1068 
1069 	/* Create the delayed works for the activity_update */
1070 	INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1071 			  mlxsw_sp_router_neighs_update_work);
1072 	INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1073 			  mlxsw_sp_router_probe_unresolved_nexthops);
1074 	mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
1075 	mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
1076 	return 0;
1077 }
1078 
1079 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1080 {
1081 	cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
1082 	cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
1083 	rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1084 }
1085 
1086 struct mlxsw_sp_nexthop {
1087 	struct list_head neigh_list_node; /* member of neigh entry list */
1088 	struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1089 						* this belongs to
1090 						*/
1091 	u8 should_offload:1, /* set indicates this neigh is connected and
1092 			      * should be put to KVD linear area of this group.
1093 			      */
1094 	   offloaded:1, /* set in case the neigh is actually put into
1095 			 * KVD linear area of this group.
1096 			 */
1097 	   update:1; /* set indicates that MAC of this neigh should be
1098 		      * updated in HW
1099 		      */
1100 	struct mlxsw_sp_neigh_entry *neigh_entry;
1101 };
1102 
1103 struct mlxsw_sp_nexthop_group {
1104 	struct list_head list; /* node in mlxsw->router.nexthop_group_list */
1105 	struct list_head fib_list; /* list of fib entries that use this group */
1106 	u8 adj_index_valid:1;
1107 	u32 adj_index;
1108 	u16 ecmp_size;
1109 	u16 count;
1110 	struct mlxsw_sp_nexthop nexthops[0];
1111 };
1112 
1113 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1114 					     struct mlxsw_sp_vr *vr,
1115 					     u32 adj_index, u16 ecmp_size,
1116 					     u32 new_adj_index,
1117 					     u16 new_ecmp_size)
1118 {
1119 	char raleu_pl[MLXSW_REG_RALEU_LEN];
1120 
1121 	mlxsw_reg_raleu_pack(raleu_pl,
1122 			     (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
1123 			     adj_index, ecmp_size, new_adj_index,
1124 			     new_ecmp_size);
1125 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1126 }
1127 
1128 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1129 					  struct mlxsw_sp_nexthop_group *nh_grp,
1130 					  u32 old_adj_index, u16 old_ecmp_size)
1131 {
1132 	struct mlxsw_sp_fib_entry *fib_entry;
1133 	struct mlxsw_sp_vr *vr = NULL;
1134 	int err;
1135 
1136 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1137 		if (vr == fib_entry->vr)
1138 			continue;
1139 		vr = fib_entry->vr;
1140 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
1141 							old_adj_index,
1142 							old_ecmp_size,
1143 							nh_grp->adj_index,
1144 							nh_grp->ecmp_size);
1145 		if (err)
1146 			return err;
1147 	}
1148 	return 0;
1149 }
1150 
1151 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1152 				       struct mlxsw_sp_nexthop *nh)
1153 {
1154 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1155 	char ratr_pl[MLXSW_REG_RATR_LEN];
1156 
1157 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1158 			    true, adj_index, neigh_entry->rif);
1159 	mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1160 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1161 }
1162 
1163 static int
1164 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1165 				  struct mlxsw_sp_nexthop_group *nh_grp)
1166 {
1167 	u32 adj_index = nh_grp->adj_index; /* base */
1168 	struct mlxsw_sp_nexthop *nh;
1169 	int i;
1170 	int err;
1171 
1172 	for (i = 0; i < nh_grp->count; i++) {
1173 		nh = &nh_grp->nexthops[i];
1174 
1175 		if (!nh->should_offload) {
1176 			nh->offloaded = 0;
1177 			continue;
1178 		}
1179 
1180 		if (nh->update) {
1181 			err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1182 							  adj_index, nh);
1183 			if (err)
1184 				return err;
1185 			nh->update = 0;
1186 			nh->offloaded = 1;
1187 		}
1188 		adj_index++;
1189 	}
1190 	return 0;
1191 }
1192 
1193 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1194 				     struct mlxsw_sp_fib_entry *fib_entry);
1195 
1196 static int
1197 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1198 				    struct mlxsw_sp_nexthop_group *nh_grp)
1199 {
1200 	struct mlxsw_sp_fib_entry *fib_entry;
1201 	int err;
1202 
1203 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1204 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1205 		if (err)
1206 			return err;
1207 	}
1208 	return 0;
1209 }
1210 
1211 static void
1212 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1213 			       struct mlxsw_sp_nexthop_group *nh_grp)
1214 {
1215 	struct mlxsw_sp_nexthop *nh;
1216 	bool offload_change = false;
1217 	u32 adj_index;
1218 	u16 ecmp_size = 0;
1219 	bool old_adj_index_valid;
1220 	u32 old_adj_index;
1221 	u16 old_ecmp_size;
1222 	int ret;
1223 	int i;
1224 	int err;
1225 
1226 	for (i = 0; i < nh_grp->count; i++) {
1227 		nh = &nh_grp->nexthops[i];
1228 
1229 		if (nh->should_offload ^ nh->offloaded) {
1230 			offload_change = true;
1231 			if (nh->should_offload)
1232 				nh->update = 1;
1233 		}
1234 		if (nh->should_offload)
1235 			ecmp_size++;
1236 	}
1237 	if (!offload_change) {
1238 		/* Nothing was added or removed, so no need to reallocate. Just
1239 		 * update MAC on existing adjacency indexes.
1240 		 */
1241 		err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
1242 		if (err) {
1243 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1244 			goto set_trap;
1245 		}
1246 		return;
1247 	}
1248 	if (!ecmp_size)
1249 		/* No neigh of this group is connected so we just set
1250 		 * the trap and let everthing flow through kernel.
1251 		 */
1252 		goto set_trap;
1253 
1254 	ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1255 	if (ret < 0) {
1256 		/* We ran out of KVD linear space, just set the
1257 		 * trap and let everything flow through kernel.
1258 		 */
1259 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1260 		goto set_trap;
1261 	}
1262 	adj_index = ret;
1263 	old_adj_index_valid = nh_grp->adj_index_valid;
1264 	old_adj_index = nh_grp->adj_index;
1265 	old_ecmp_size = nh_grp->ecmp_size;
1266 	nh_grp->adj_index_valid = 1;
1267 	nh_grp->adj_index = adj_index;
1268 	nh_grp->ecmp_size = ecmp_size;
1269 	err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
1270 	if (err) {
1271 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1272 		goto set_trap;
1273 	}
1274 
1275 	if (!old_adj_index_valid) {
1276 		/* The trap was set for fib entries, so we have to call
1277 		 * fib entry update to unset it and use adjacency index.
1278 		 */
1279 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1280 		if (err) {
1281 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1282 			goto set_trap;
1283 		}
1284 		return;
1285 	}
1286 
1287 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1288 					     old_adj_index, old_ecmp_size);
1289 	mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1290 	if (err) {
1291 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1292 		goto set_trap;
1293 	}
1294 	return;
1295 
1296 set_trap:
1297 	old_adj_index_valid = nh_grp->adj_index_valid;
1298 	nh_grp->adj_index_valid = 0;
1299 	for (i = 0; i < nh_grp->count; i++) {
1300 		nh = &nh_grp->nexthops[i];
1301 		nh->offloaded = 0;
1302 	}
1303 	err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1304 	if (err)
1305 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1306 	if (old_adj_index_valid)
1307 		mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1308 }
1309 
1310 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1311 					    bool removing)
1312 {
1313 	if (!removing && !nh->should_offload)
1314 		nh->should_offload = 1;
1315 	else if (removing && nh->offloaded)
1316 		nh->should_offload = 0;
1317 	nh->update = 1;
1318 }
1319 
1320 static void
1321 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1322 			      struct mlxsw_sp_neigh_entry *neigh_entry,
1323 			      bool removing)
1324 {
1325 	struct mlxsw_sp_nexthop *nh;
1326 
1327 	/* Take RTNL mutex here to prevent lists from changes */
1328 	rtnl_lock();
1329 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
1330 			    neigh_list_node) {
1331 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
1332 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1333 	}
1334 	rtnl_unlock();
1335 }
1336 
1337 static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1338 				 struct mlxsw_sp_nexthop_group *nh_grp,
1339 				 struct mlxsw_sp_nexthop *nh,
1340 				 struct fib_nh *fib_nh)
1341 {
1342 	struct mlxsw_sp_neigh_entry *neigh_entry;
1343 	u32 gwip = ntohl(fib_nh->nh_gw);
1344 	struct net_device *dev = fib_nh->nh_dev;
1345 	struct neighbour *n;
1346 	u8 nud_state;
1347 
1348 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
1349 						  sizeof(gwip), dev);
1350 	if (!neigh_entry) {
1351 		__be32 gwipn = htonl(gwip);
1352 
1353 		n = neigh_create(&arp_tbl, &gwipn, dev);
1354 		if (IS_ERR(n))
1355 			return PTR_ERR(n);
1356 		neigh_event_send(n, NULL);
1357 		neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
1358 							  sizeof(gwip), dev);
1359 		if (!neigh_entry) {
1360 			neigh_release(n);
1361 			return -EINVAL;
1362 		}
1363 	} else {
1364 		/* Take a reference of neigh here ensuring that neigh would
1365 		 * not be detructed before the nexthop entry is finished.
1366 		 * The second branch takes the reference in neith_create()
1367 		 */
1368 		n = neigh_entry->n;
1369 		neigh_clone(n);
1370 	}
1371 
1372 	/* If that is the first nexthop connected to that neigh, add to
1373 	 * nexthop_neighs_list
1374 	 */
1375 	if (list_empty(&neigh_entry->nexthop_list))
1376 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1377 			      &mlxsw_sp->router.nexthop_neighs_list);
1378 
1379 	nh->nh_grp = nh_grp;
1380 	nh->neigh_entry = neigh_entry;
1381 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1382 	read_lock_bh(&n->lock);
1383 	nud_state = n->nud_state;
1384 	read_unlock_bh(&n->lock);
1385 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
1386 
1387 	return 0;
1388 }
1389 
1390 static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1391 				  struct mlxsw_sp_nexthop *nh)
1392 {
1393 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1394 
1395 	list_del(&nh->neigh_list_node);
1396 
1397 	/* If that is the last nexthop connected to that neigh, remove from
1398 	 * nexthop_neighs_list
1399 	 */
1400 	if (list_empty(&nh->neigh_entry->nexthop_list))
1401 		list_del(&nh->neigh_entry->nexthop_neighs_list_node);
1402 
1403 	neigh_release(neigh_entry->n);
1404 }
1405 
1406 static struct mlxsw_sp_nexthop_group *
1407 mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1408 {
1409 	struct mlxsw_sp_nexthop_group *nh_grp;
1410 	struct mlxsw_sp_nexthop *nh;
1411 	struct fib_nh *fib_nh;
1412 	size_t alloc_size;
1413 	int i;
1414 	int err;
1415 
1416 	alloc_size = sizeof(*nh_grp) +
1417 		     fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1418 	nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1419 	if (!nh_grp)
1420 		return ERR_PTR(-ENOMEM);
1421 	INIT_LIST_HEAD(&nh_grp->fib_list);
1422 	nh_grp->count = fi->fib_nhs;
1423 	for (i = 0; i < nh_grp->count; i++) {
1424 		nh = &nh_grp->nexthops[i];
1425 		fib_nh = &fi->fib_nh[i];
1426 		err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1427 		if (err)
1428 			goto err_nexthop_init;
1429 	}
1430 	list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
1431 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1432 	return nh_grp;
1433 
1434 err_nexthop_init:
1435 	for (i--; i >= 0; i--)
1436 		mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1437 	kfree(nh_grp);
1438 	return ERR_PTR(err);
1439 }
1440 
1441 static void
1442 mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1443 			       struct mlxsw_sp_nexthop_group *nh_grp)
1444 {
1445 	struct mlxsw_sp_nexthop *nh;
1446 	int i;
1447 
1448 	list_del(&nh_grp->list);
1449 	for (i = 0; i < nh_grp->count; i++) {
1450 		nh = &nh_grp->nexthops[i];
1451 		mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1452 	}
1453 	kfree(nh_grp);
1454 }
1455 
1456 static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
1457 				   struct fib_info *fi)
1458 {
1459 	int i;
1460 
1461 	for (i = 0; i < fi->fib_nhs; i++) {
1462 		struct fib_nh *fib_nh = &fi->fib_nh[i];
1463 		u32 gwip = ntohl(fib_nh->nh_gw);
1464 
1465 		if (memcmp(nh->neigh_entry->key.addr,
1466 			   &gwip, sizeof(u32)) == 0 &&
1467 		    nh->neigh_entry->key.dev == fib_nh->nh_dev)
1468 			return true;
1469 	}
1470 	return false;
1471 }
1472 
1473 static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
1474 					 struct fib_info *fi)
1475 {
1476 	int i;
1477 
1478 	if (nh_grp->count != fi->fib_nhs)
1479 		return false;
1480 	for (i = 0; i < nh_grp->count; i++) {
1481 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
1482 
1483 		if (!mlxsw_sp_nexthop_match(nh, fi))
1484 			return false;
1485 	}
1486 	return true;
1487 }
1488 
1489 static struct mlxsw_sp_nexthop_group *
1490 mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1491 {
1492 	struct mlxsw_sp_nexthop_group *nh_grp;
1493 
1494 	list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
1495 			    list) {
1496 		if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
1497 			return nh_grp;
1498 	}
1499 	return NULL;
1500 }
1501 
1502 static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1503 				      struct mlxsw_sp_fib_entry *fib_entry,
1504 				      struct fib_info *fi)
1505 {
1506 	struct mlxsw_sp_nexthop_group *nh_grp;
1507 
1508 	nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
1509 	if (!nh_grp) {
1510 		nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1511 		if (IS_ERR(nh_grp))
1512 			return PTR_ERR(nh_grp);
1513 	}
1514 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1515 	fib_entry->nh_group = nh_grp;
1516 	return 0;
1517 }
1518 
1519 static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1520 				       struct mlxsw_sp_fib_entry *fib_entry)
1521 {
1522 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1523 
1524 	list_del(&fib_entry->nexthop_group_node);
1525 	if (!list_empty(&nh_grp->fib_list))
1526 		return;
1527 	mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1528 }
1529 
1530 static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1531 					 struct mlxsw_sp_fib_entry *fib_entry,
1532 					 enum mlxsw_reg_ralue_op op)
1533 {
1534 	char ralue_pl[MLXSW_REG_RALUE_LEN];
1535 	u32 *p_dip = (u32 *) fib_entry->key.addr;
1536 	struct mlxsw_sp_vr *vr = fib_entry->vr;
1537 	enum mlxsw_reg_ralue_trap_action trap_action;
1538 	u16 trap_id = 0;
1539 	u32 adjacency_index = 0;
1540 	u16 ecmp_size = 0;
1541 
1542 	/* In case the nexthop group adjacency index is valid, use it
1543 	 * with provided ECMP size. Otherwise, setup trap and pass
1544 	 * traffic to kernel.
1545 	 */
1546 	if (fib_entry->nh_group->adj_index_valid) {
1547 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1548 		adjacency_index = fib_entry->nh_group->adj_index;
1549 		ecmp_size = fib_entry->nh_group->ecmp_size;
1550 	} else {
1551 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1552 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1553 	}
1554 
1555 	mlxsw_reg_ralue_pack4(ralue_pl,
1556 			      (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1557 			      vr->id, fib_entry->key.prefix_len, *p_dip);
1558 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1559 					adjacency_index, ecmp_size);
1560 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1561 }
1562 
1563 static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1564 					struct mlxsw_sp_fib_entry *fib_entry,
1565 					enum mlxsw_reg_ralue_op op)
1566 {
1567 	char ralue_pl[MLXSW_REG_RALUE_LEN];
1568 	u32 *p_dip = (u32 *) fib_entry->key.addr;
1569 	struct mlxsw_sp_vr *vr = fib_entry->vr;
1570 
1571 	mlxsw_reg_ralue_pack4(ralue_pl,
1572 			      (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1573 			      vr->id, fib_entry->key.prefix_len, *p_dip);
1574 	mlxsw_reg_ralue_act_local_pack(ralue_pl,
1575 				       MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
1576 				       fib_entry->rif);
1577 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1578 }
1579 
1580 static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1581 				       struct mlxsw_sp_fib_entry *fib_entry,
1582 				       enum mlxsw_reg_ralue_op op)
1583 {
1584 	char ralue_pl[MLXSW_REG_RALUE_LEN];
1585 	u32 *p_dip = (u32 *) fib_entry->key.addr;
1586 	struct mlxsw_sp_vr *vr = fib_entry->vr;
1587 
1588 	mlxsw_reg_ralue_pack4(ralue_pl,
1589 			      (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1590 			      vr->id, fib_entry->key.prefix_len, *p_dip);
1591 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1592 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1593 }
1594 
1595 static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1596 				  struct mlxsw_sp_fib_entry *fib_entry,
1597 				  enum mlxsw_reg_ralue_op op)
1598 {
1599 	switch (fib_entry->type) {
1600 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1601 		return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
1602 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1603 		return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1604 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1605 		return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1606 	}
1607 	return -EINVAL;
1608 }
1609 
1610 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1611 				 struct mlxsw_sp_fib_entry *fib_entry,
1612 				 enum mlxsw_reg_ralue_op op)
1613 {
1614 	switch (fib_entry->vr->proto) {
1615 	case MLXSW_SP_L3_PROTO_IPV4:
1616 		return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1617 	case MLXSW_SP_L3_PROTO_IPV6:
1618 		return -EINVAL;
1619 	}
1620 	return -EINVAL;
1621 }
1622 
1623 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1624 				     struct mlxsw_sp_fib_entry *fib_entry)
1625 {
1626 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1627 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
1628 }
1629 
1630 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1631 				  struct mlxsw_sp_fib_entry *fib_entry)
1632 {
1633 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1634 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
1635 }
1636 
1637 static int
1638 mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
1639 				const struct fib_entry_notifier_info *fen_info,
1640 				struct mlxsw_sp_fib_entry *fib_entry)
1641 {
1642 	struct fib_info *fi = fen_info->fi;
1643 	struct mlxsw_sp_rif *r = NULL;
1644 	int nhsel;
1645 	int err;
1646 
1647 	if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
1648 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1649 		return 0;
1650 	}
1651 	if (fen_info->type != RTN_UNICAST)
1652 		return -EINVAL;
1653 
1654 	for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1655 		const struct fib_nh *nh = &fi->fib_nh[nhsel];
1656 
1657 		if (!nh->nh_dev)
1658 			continue;
1659 		r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
1660 		if (!r) {
1661 			/* In case router interface is not found for
1662 			 * at least one of the nexthops, that means
1663 			 * the nexthop points to some device unrelated
1664 			 * to us. Set trap and pass the packets for
1665 			 * this prefix to kernel.
1666 			 */
1667 			break;
1668 		}
1669 	}
1670 
1671 	if (!r) {
1672 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1673 		return 0;
1674 	}
1675 
1676 	if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
1677 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1678 		fib_entry->rif = r->rif;
1679 	} else {
1680 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1681 		err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
1682 		if (err)
1683 			return err;
1684 	}
1685 	fib_info_offload_inc(fen_info->fi);
1686 	return 0;
1687 }
1688 
1689 static void
1690 mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
1691 				struct mlxsw_sp_fib_entry *fib_entry)
1692 {
1693 	if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1694 		fib_info_offload_dec(fib_entry->fi);
1695 	if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
1696 		mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1697 }
1698 
1699 static struct mlxsw_sp_fib_entry *
1700 mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
1701 		       const struct fib_entry_notifier_info *fen_info)
1702 {
1703 	struct mlxsw_sp_fib_entry *fib_entry;
1704 	struct fib_info *fi = fen_info->fi;
1705 	struct mlxsw_sp_vr *vr;
1706 	int err;
1707 
1708 	vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
1709 			     MLXSW_SP_L3_PROTO_IPV4);
1710 	if (IS_ERR(vr))
1711 		return ERR_CAST(vr);
1712 
1713 	fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1714 					      sizeof(fen_info->dst),
1715 					      fen_info->dst_len, fi->fib_dev);
1716 	if (fib_entry) {
1717 		/* Already exists, just take a reference */
1718 		fib_entry->ref_count++;
1719 		return fib_entry;
1720 	}
1721 	fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
1722 					      sizeof(fen_info->dst),
1723 					      fen_info->dst_len, fi->fib_dev);
1724 	if (!fib_entry) {
1725 		err = -ENOMEM;
1726 		goto err_fib_entry_create;
1727 	}
1728 	fib_entry->vr = vr;
1729 	fib_entry->fi = fi;
1730 	fib_entry->ref_count = 1;
1731 
1732 	err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
1733 	if (err)
1734 		goto err_fib4_entry_init;
1735 
1736 	return fib_entry;
1737 
1738 err_fib4_entry_init:
1739 	mlxsw_sp_fib_entry_destroy(fib_entry);
1740 err_fib_entry_create:
1741 	mlxsw_sp_vr_put(mlxsw_sp, vr);
1742 
1743 	return ERR_PTR(err);
1744 }
1745 
1746 static struct mlxsw_sp_fib_entry *
1747 mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
1748 			const struct fib_entry_notifier_info *fen_info)
1749 {
1750 	struct mlxsw_sp_vr *vr;
1751 
1752 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
1753 			      MLXSW_SP_L3_PROTO_IPV4);
1754 	if (!vr)
1755 		return NULL;
1756 
1757 	return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1758 					 sizeof(fen_info->dst),
1759 					 fen_info->dst_len,
1760 					 fen_info->fi->fib_dev);
1761 }
1762 
1763 static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
1764 				   struct mlxsw_sp_fib_entry *fib_entry)
1765 {
1766 	struct mlxsw_sp_vr *vr = fib_entry->vr;
1767 
1768 	if (--fib_entry->ref_count == 0) {
1769 		mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
1770 		mlxsw_sp_fib_entry_destroy(fib_entry);
1771 	}
1772 	mlxsw_sp_vr_put(mlxsw_sp, vr);
1773 }
1774 
1775 static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
1776 				       struct mlxsw_sp_fib_entry *fib_entry)
1777 {
1778 	unsigned int last_ref_count;
1779 
1780 	do {
1781 		last_ref_count = fib_entry->ref_count;
1782 		mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1783 	} while (last_ref_count != 1);
1784 }
1785 
1786 static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
1787 				    struct fib_entry_notifier_info *fen_info)
1788 {
1789 	struct mlxsw_sp_fib_entry *fib_entry;
1790 	struct mlxsw_sp_vr *vr;
1791 	int err;
1792 
1793 	if (mlxsw_sp->router.aborted)
1794 		return 0;
1795 
1796 	fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
1797 	if (IS_ERR(fib_entry)) {
1798 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
1799 		return PTR_ERR(fib_entry);
1800 	}
1801 
1802 	if (fib_entry->ref_count != 1)
1803 		return 0;
1804 
1805 	vr = fib_entry->vr;
1806 	err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
1807 	if (err) {
1808 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
1809 		goto err_fib_entry_insert;
1810 	}
1811 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1812 	if (err)
1813 		goto err_fib_entry_add;
1814 	return 0;
1815 
1816 err_fib_entry_add:
1817 	mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
1818 err_fib_entry_insert:
1819 	mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1820 	return err;
1821 }
1822 
1823 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
1824 				    struct fib_entry_notifier_info *fen_info)
1825 {
1826 	struct mlxsw_sp_fib_entry *fib_entry;
1827 
1828 	if (mlxsw_sp->router.aborted)
1829 		return 0;
1830 
1831 	fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
1832 	if (!fib_entry) {
1833 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
1834 		return -ENOENT;
1835 	}
1836 
1837 	if (fib_entry->ref_count == 1) {
1838 		mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1839 		mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
1840 	}
1841 
1842 	mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1843 	return 0;
1844 }
1845 
1846 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
1847 {
1848 	char ralta_pl[MLXSW_REG_RALTA_LEN];
1849 	char ralst_pl[MLXSW_REG_RALST_LEN];
1850 	char raltb_pl[MLXSW_REG_RALTB_LEN];
1851 	char ralue_pl[MLXSW_REG_RALUE_LEN];
1852 	int err;
1853 
1854 	mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
1855 			     MLXSW_SP_LPM_TREE_MIN);
1856 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
1857 	if (err)
1858 		return err;
1859 
1860 	mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
1861 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
1862 	if (err)
1863 		return err;
1864 
1865 	mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
1866 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
1867 	if (err)
1868 		return err;
1869 
1870 	mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
1871 			      MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
1872 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1873 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1874 }
1875 
1876 static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
1877 {
1878 	struct mlxsw_resources *resources;
1879 	struct mlxsw_sp_fib_entry *fib_entry;
1880 	struct mlxsw_sp_fib_entry *tmp;
1881 	struct mlxsw_sp_vr *vr;
1882 	int i;
1883 	int err;
1884 
1885 	resources = mlxsw_core_resources_get(mlxsw_sp->core);
1886 	for (i = 0; i < resources->max_virtual_routers; i++) {
1887 		vr = &mlxsw_sp->router.vrs[i];
1888 		if (!vr->used)
1889 			continue;
1890 
1891 		list_for_each_entry_safe(fib_entry, tmp,
1892 					 &vr->fib->entry_list, list) {
1893 			bool do_break = &tmp->list == &vr->fib->entry_list;
1894 
1895 			mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1896 			mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
1897 						  fib_entry);
1898 			mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
1899 			if (do_break)
1900 				break;
1901 		}
1902 	}
1903 	mlxsw_sp->router.aborted = true;
1904 	err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
1905 	if (err)
1906 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
1907 }
1908 
1909 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1910 {
1911 	struct mlxsw_resources *resources;
1912 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
1913 	int err;
1914 
1915 	resources = mlxsw_core_resources_get(mlxsw_sp->core);
1916 	if (!resources->max_rif_valid)
1917 		return -EIO;
1918 
1919 	mlxsw_sp->rifs = kcalloc(resources->max_rif,
1920 				 sizeof(struct mlxsw_sp_rif *), GFP_KERNEL);
1921 	if (!mlxsw_sp->rifs)
1922 		return -ENOMEM;
1923 
1924 	mlxsw_reg_rgcr_pack(rgcr_pl, true);
1925 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif);
1926 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1927 	if (err)
1928 		goto err_rgcr_fail;
1929 
1930 	return 0;
1931 
1932 err_rgcr_fail:
1933 	kfree(mlxsw_sp->rifs);
1934 	return err;
1935 }
1936 
1937 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
1938 {
1939 	struct mlxsw_resources *resources;
1940 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
1941 	int i;
1942 
1943 	mlxsw_reg_rgcr_pack(rgcr_pl, false);
1944 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1945 
1946 	resources = mlxsw_core_resources_get(mlxsw_sp->core);
1947 	for (i = 0; i < resources->max_rif; i++)
1948 		WARN_ON_ONCE(mlxsw_sp->rifs[i]);
1949 
1950 	kfree(mlxsw_sp->rifs);
1951 }
1952 
1953 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
1954 				     unsigned long event, void *ptr)
1955 {
1956 	struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
1957 	struct fib_entry_notifier_info *fen_info = ptr;
1958 	int err;
1959 
1960 	switch (event) {
1961 	case FIB_EVENT_ENTRY_ADD:
1962 		err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
1963 		if (err)
1964 			mlxsw_sp_router_fib4_abort(mlxsw_sp);
1965 		break;
1966 	case FIB_EVENT_ENTRY_DEL:
1967 		mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
1968 		break;
1969 	case FIB_EVENT_RULE_ADD: /* fall through */
1970 	case FIB_EVENT_RULE_DEL:
1971 		mlxsw_sp_router_fib4_abort(mlxsw_sp);
1972 		break;
1973 	}
1974 	return NOTIFY_DONE;
1975 }
1976 
1977 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1978 {
1979 	int err;
1980 
1981 	INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
1982 	INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
1983 	err = __mlxsw_sp_router_init(mlxsw_sp);
1984 	if (err)
1985 		return err;
1986 
1987 	mlxsw_sp_lpm_init(mlxsw_sp);
1988 	err = mlxsw_sp_vrs_init(mlxsw_sp);
1989 	if (err)
1990 		goto err_vrs_init;
1991 
1992 	err =  mlxsw_sp_neigh_init(mlxsw_sp);
1993 	if (err)
1994 		goto err_neigh_init;
1995 
1996 	mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
1997 	register_fib_notifier(&mlxsw_sp->fib_nb);
1998 	return 0;
1999 
2000 err_neigh_init:
2001 	mlxsw_sp_vrs_fini(mlxsw_sp);
2002 err_vrs_init:
2003 	__mlxsw_sp_router_fini(mlxsw_sp);
2004 	return err;
2005 }
2006 
2007 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
2008 {
2009 	unregister_fib_notifier(&mlxsw_sp->fib_nb);
2010 	mlxsw_sp_neigh_fini(mlxsw_sp);
2011 	mlxsw_sp_vrs_fini(mlxsw_sp);
2012 	__mlxsw_sp_router_fini(mlxsw_sp);
2013 }
2014