1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <net/netevent.h>
23 #include <net/neighbour.h>
24 #include <net/arp.h>
25 #include <net/inet_dscp.h>
26 #include <net/ip_fib.h>
27 #include <net/ip6_fib.h>
28 #include <net/nexthop.h>
29 #include <net/fib_rules.h>
30 #include <net/ip_tunnels.h>
31 #include <net/l3mdev.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
34 #include <net/ipv6.h>
35 #include <net/fib_notifier.h>
36 #include <net/switchdev.h>
37
38 #include "spectrum.h"
39 #include "core.h"
40 #include "reg.h"
41 #include "spectrum_cnt.h"
42 #include "spectrum_dpipe.h"
43 #include "spectrum_ipip.h"
44 #include "spectrum_mr.h"
45 #include "spectrum_mr_tcam.h"
46 #include "spectrum_router.h"
47 #include "spectrum_span.h"
48
49 struct mlxsw_sp_fib;
50 struct mlxsw_sp_vr;
51 struct mlxsw_sp_lpm_tree;
52 struct mlxsw_sp_rif_ops;
53
54 struct mlxsw_sp_crif_key {
55 struct net_device *dev;
56 };
57
58 struct mlxsw_sp_crif {
59 struct mlxsw_sp_crif_key key;
60 struct rhash_head ht_node;
61 bool can_destroy;
62 struct list_head nexthop_list;
63 struct mlxsw_sp_rif *rif;
64 };
65
66 static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
67 .key_offset = offsetof(struct mlxsw_sp_crif, key),
68 .key_len = sizeof_field(struct mlxsw_sp_crif, key),
69 .head_offset = offsetof(struct mlxsw_sp_crif, ht_node),
70 };
71
72 struct mlxsw_sp_rif {
73 struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
74 netdevice_tracker dev_tracker;
75 struct list_head neigh_list;
76 struct mlxsw_sp_fid *fid;
77 unsigned char addr[ETH_ALEN];
78 int mtu;
79 u16 rif_index;
80 u8 mac_profile_id;
81 u8 rif_entries;
82 u16 vr_id;
83 const struct mlxsw_sp_rif_ops *ops;
84 struct mlxsw_sp *mlxsw_sp;
85
86 unsigned int counter_ingress;
87 bool counter_ingress_valid;
88 unsigned int counter_egress;
89 bool counter_egress_valid;
90 };
91
mlxsw_sp_rif_dev(const struct mlxsw_sp_rif * rif)92 static struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
93 {
94 if (!rif->crif)
95 return NULL;
96 return rif->crif->key.dev;
97 }
98
99 struct mlxsw_sp_rif_params {
100 struct net_device *dev;
101 union {
102 u16 system_port;
103 u16 lag_id;
104 };
105 u16 vid;
106 bool lag;
107 bool double_entry;
108 };
109
110 struct mlxsw_sp_rif_subport {
111 struct mlxsw_sp_rif common;
112 refcount_t ref_count;
113 union {
114 u16 system_port;
115 u16 lag_id;
116 };
117 u16 vid;
118 bool lag;
119 };
120
121 struct mlxsw_sp_rif_ipip_lb {
122 struct mlxsw_sp_rif common;
123 struct mlxsw_sp_rif_ipip_lb_config lb_config;
124 u16 ul_vr_id; /* Spectrum-1. */
125 u16 ul_rif_id; /* Spectrum-2+. */
126 };
127
128 struct mlxsw_sp_rif_params_ipip_lb {
129 struct mlxsw_sp_rif_params common;
130 struct mlxsw_sp_rif_ipip_lb_config lb_config;
131 };
132
133 struct mlxsw_sp_rif_ops {
134 enum mlxsw_sp_rif_type type;
135 size_t rif_size;
136
137 void (*setup)(struct mlxsw_sp_rif *rif,
138 const struct mlxsw_sp_rif_params *params);
139 int (*configure)(struct mlxsw_sp_rif *rif,
140 struct netlink_ext_ack *extack);
141 void (*deconfigure)(struct mlxsw_sp_rif *rif);
142 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
143 const struct mlxsw_sp_rif_params *params,
144 struct netlink_ext_ack *extack);
145 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
146 };
147
148 struct mlxsw_sp_rif_mac_profile {
149 unsigned char mac_prefix[ETH_ALEN];
150 refcount_t ref_count;
151 u8 id;
152 };
153
154 struct mlxsw_sp_router_ops {
155 int (*init)(struct mlxsw_sp *mlxsw_sp);
156 int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
157 };
158
159 static struct mlxsw_sp_rif *
160 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
161 const struct net_device *dev);
162 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
163 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
164 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
165 struct mlxsw_sp_lpm_tree *lpm_tree);
166 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
167 const struct mlxsw_sp_fib *fib,
168 u8 tree_id);
169 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
170 const struct mlxsw_sp_fib *fib);
171
172 static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)173 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
174 enum mlxsw_sp_rif_counter_dir dir)
175 {
176 switch (dir) {
177 case MLXSW_SP_RIF_COUNTER_EGRESS:
178 return &rif->counter_egress;
179 case MLXSW_SP_RIF_COUNTER_INGRESS:
180 return &rif->counter_ingress;
181 }
182 return NULL;
183 }
184
185 static bool
mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)186 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
187 enum mlxsw_sp_rif_counter_dir dir)
188 {
189 switch (dir) {
190 case MLXSW_SP_RIF_COUNTER_EGRESS:
191 return rif->counter_egress_valid;
192 case MLXSW_SP_RIF_COUNTER_INGRESS:
193 return rif->counter_ingress_valid;
194 }
195 return false;
196 }
197
198 static void
mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,bool valid)199 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
200 enum mlxsw_sp_rif_counter_dir dir,
201 bool valid)
202 {
203 switch (dir) {
204 case MLXSW_SP_RIF_COUNTER_EGRESS:
205 rif->counter_egress_valid = valid;
206 break;
207 case MLXSW_SP_RIF_COUNTER_INGRESS:
208 rif->counter_ingress_valid = valid;
209 break;
210 }
211 }
212
mlxsw_sp_rif_counter_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,unsigned int counter_index,bool enable,enum mlxsw_sp_rif_counter_dir dir)213 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
214 unsigned int counter_index, bool enable,
215 enum mlxsw_sp_rif_counter_dir dir)
216 {
217 char ritr_pl[MLXSW_REG_RITR_LEN];
218 bool is_egress = false;
219 int err;
220
221 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
222 is_egress = true;
223 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
224 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
225 if (err)
226 return err;
227
228 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
229 is_egress);
230 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
231 }
232
mlxsw_sp_rif_counter_value_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,u64 * cnt)233 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
234 struct mlxsw_sp_rif *rif,
235 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
236 {
237 char ricnt_pl[MLXSW_REG_RICNT_LEN];
238 unsigned int *p_counter_index;
239 bool valid;
240 int err;
241
242 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
243 if (!valid)
244 return -EINVAL;
245
246 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
247 if (!p_counter_index)
248 return -EINVAL;
249 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
250 MLXSW_REG_RICNT_OPCODE_NOP);
251 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
252 if (err)
253 return err;
254 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
255 return 0;
256 }
257
258 struct mlxsw_sp_rif_counter_set_basic {
259 u64 good_unicast_packets;
260 u64 good_multicast_packets;
261 u64 good_broadcast_packets;
262 u64 good_unicast_bytes;
263 u64 good_multicast_bytes;
264 u64 good_broadcast_bytes;
265 u64 error_packets;
266 u64 discard_packets;
267 u64 error_bytes;
268 u64 discard_bytes;
269 };
270
271 static int
mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,struct mlxsw_sp_rif_counter_set_basic * set)272 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
273 enum mlxsw_sp_rif_counter_dir dir,
274 struct mlxsw_sp_rif_counter_set_basic *set)
275 {
276 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
277 char ricnt_pl[MLXSW_REG_RICNT_LEN];
278 unsigned int *p_counter_index;
279 int err;
280
281 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
282 return -EINVAL;
283
284 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
285 if (!p_counter_index)
286 return -EINVAL;
287
288 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
289 MLXSW_REG_RICNT_OPCODE_CLEAR);
290 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
291 if (err)
292 return err;
293
294 if (!set)
295 return 0;
296
297 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
298 (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
299
300 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
301 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
302 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
303 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
304 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
305 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
306 MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
307 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
308 MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
309 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
310
311 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
312
313 return 0;
314 }
315
mlxsw_sp_rif_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)316 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
317 unsigned int counter_index)
318 {
319 char ricnt_pl[MLXSW_REG_RICNT_LEN];
320
321 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
322 MLXSW_REG_RICNT_OPCODE_CLEAR);
323 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
324 }
325
mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)326 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
327 enum mlxsw_sp_rif_counter_dir dir)
328 {
329 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
330 unsigned int *p_counter_index;
331 int err;
332
333 if (mlxsw_sp_rif_counter_valid_get(rif, dir))
334 return 0;
335
336 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
337 if (!p_counter_index)
338 return -EINVAL;
339
340 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
341 p_counter_index);
342 if (err)
343 return err;
344
345 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
346 if (err)
347 goto err_counter_clear;
348
349 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
350 *p_counter_index, true, dir);
351 if (err)
352 goto err_counter_edit;
353 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
354 return 0;
355
356 err_counter_edit:
357 err_counter_clear:
358 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
359 *p_counter_index);
360 return err;
361 }
362
mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)363 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
364 enum mlxsw_sp_rif_counter_dir dir)
365 {
366 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
367 unsigned int *p_counter_index;
368
369 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
370 return;
371
372 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
373 if (WARN_ON(!p_counter_index))
374 return;
375 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
376 *p_counter_index, false, dir);
377 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
378 *p_counter_index);
379 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
380 }
381
mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif * rif)382 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
383 {
384 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
385 struct devlink *devlink;
386
387 devlink = priv_to_devlink(mlxsw_sp->core);
388 if (!devlink_dpipe_table_counter_enabled(devlink,
389 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
390 return;
391 mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
392 }
393
mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif * rif)394 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
395 {
396 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
397 }
398
399 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
400
401 struct mlxsw_sp_prefix_usage {
402 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
403 };
404
405 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
406 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
407
408 static bool
mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)409 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
410 struct mlxsw_sp_prefix_usage *prefix_usage2)
411 {
412 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
413 }
414
415 static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)416 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
417 struct mlxsw_sp_prefix_usage *prefix_usage2)
418 {
419 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
420 }
421
422 static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)423 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
424 unsigned char prefix_len)
425 {
426 set_bit(prefix_len, prefix_usage->b);
427 }
428
429 static void
mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)430 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
431 unsigned char prefix_len)
432 {
433 clear_bit(prefix_len, prefix_usage->b);
434 }
435
436 struct mlxsw_sp_fib_key {
437 unsigned char addr[sizeof(struct in6_addr)];
438 unsigned char prefix_len;
439 };
440
441 enum mlxsw_sp_fib_entry_type {
442 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
443 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
444 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
445 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
446 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
447
448 /* This is a special case of local delivery, where a packet should be
449 * decapsulated on reception. Note that there is no corresponding ENCAP,
450 * because that's a type of next hop, not of FIB entry. (There can be
451 * several next hops in a REMOTE entry, and some of them may be
452 * encapsulating entries.)
453 */
454 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
455 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
456 };
457
458 struct mlxsw_sp_nexthop_group_info;
459 struct mlxsw_sp_nexthop_group;
460 struct mlxsw_sp_fib_entry;
461
462 struct mlxsw_sp_fib_node {
463 struct mlxsw_sp_fib_entry *fib_entry;
464 struct list_head list;
465 struct rhash_head ht_node;
466 struct mlxsw_sp_fib *fib;
467 struct mlxsw_sp_fib_key key;
468 };
469
470 struct mlxsw_sp_fib_entry_decap {
471 struct mlxsw_sp_ipip_entry *ipip_entry;
472 u32 tunnel_index;
473 };
474
475 struct mlxsw_sp_fib_entry {
476 struct mlxsw_sp_fib_node *fib_node;
477 enum mlxsw_sp_fib_entry_type type;
478 struct list_head nexthop_group_node;
479 struct mlxsw_sp_nexthop_group *nh_group;
480 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
481 };
482
483 struct mlxsw_sp_fib4_entry {
484 struct mlxsw_sp_fib_entry common;
485 struct fib_info *fi;
486 u32 tb_id;
487 dscp_t dscp;
488 u8 type;
489 };
490
491 struct mlxsw_sp_fib6_entry {
492 struct mlxsw_sp_fib_entry common;
493 struct list_head rt6_list;
494 unsigned int nrt6;
495 };
496
497 struct mlxsw_sp_rt6 {
498 struct list_head list;
499 struct fib6_info *rt;
500 };
501
502 struct mlxsw_sp_lpm_tree {
503 u8 id; /* tree ID */
504 refcount_t ref_count;
505 enum mlxsw_sp_l3proto proto;
506 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
507 struct mlxsw_sp_prefix_usage prefix_usage;
508 };
509
510 struct mlxsw_sp_fib {
511 struct rhashtable ht;
512 struct list_head node_list;
513 struct mlxsw_sp_vr *vr;
514 struct mlxsw_sp_lpm_tree *lpm_tree;
515 enum mlxsw_sp_l3proto proto;
516 };
517
518 struct mlxsw_sp_vr {
519 u16 id; /* virtual router ID */
520 u32 tb_id; /* kernel fib table id */
521 unsigned int rif_count;
522 struct mlxsw_sp_fib *fib4;
523 struct mlxsw_sp_fib *fib6;
524 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
525 struct mlxsw_sp_rif *ul_rif;
526 refcount_t ul_rif_refcnt;
527 };
528
529 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
530
mlxsw_sp_fib_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)531 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
532 struct mlxsw_sp_vr *vr,
533 enum mlxsw_sp_l3proto proto)
534 {
535 struct mlxsw_sp_lpm_tree *lpm_tree;
536 struct mlxsw_sp_fib *fib;
537 int err;
538
539 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
540 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
541 if (!fib)
542 return ERR_PTR(-ENOMEM);
543 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
544 if (err)
545 goto err_rhashtable_init;
546 INIT_LIST_HEAD(&fib->node_list);
547 fib->proto = proto;
548 fib->vr = vr;
549 fib->lpm_tree = lpm_tree;
550 mlxsw_sp_lpm_tree_hold(lpm_tree);
551 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
552 if (err)
553 goto err_lpm_tree_bind;
554 return fib;
555
556 err_lpm_tree_bind:
557 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
558 err_rhashtable_init:
559 kfree(fib);
560 return ERR_PTR(err);
561 }
562
mlxsw_sp_fib_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib)563 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_fib *fib)
565 {
566 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
567 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
568 WARN_ON(!list_empty(&fib->node_list));
569 rhashtable_destroy(&fib->ht);
570 kfree(fib);
571 }
572
573 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp * mlxsw_sp)574 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
575 {
576 static struct mlxsw_sp_lpm_tree *lpm_tree;
577 int i;
578
579 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
580 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
581 if (refcount_read(&lpm_tree->ref_count) == 0)
582 return lpm_tree;
583 }
584 return NULL;
585 }
586
mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)587 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
588 struct mlxsw_sp_lpm_tree *lpm_tree)
589 {
590 char ralta_pl[MLXSW_REG_RALTA_LEN];
591
592 mlxsw_reg_ralta_pack(ralta_pl, true,
593 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
594 lpm_tree->id);
595 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
596 }
597
mlxsw_sp_lpm_tree_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)598 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_lpm_tree *lpm_tree)
600 {
601 char ralta_pl[MLXSW_REG_RALTA_LEN];
602
603 mlxsw_reg_ralta_pack(ralta_pl, false,
604 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
605 lpm_tree->id);
606 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
607 }
608
609 static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,struct mlxsw_sp_lpm_tree * lpm_tree)610 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
611 struct mlxsw_sp_prefix_usage *prefix_usage,
612 struct mlxsw_sp_lpm_tree *lpm_tree)
613 {
614 char ralst_pl[MLXSW_REG_RALST_LEN];
615 u8 root_bin = 0;
616 u8 prefix;
617 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
618
619 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
620 root_bin = prefix;
621
622 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
623 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
624 if (prefix == 0)
625 continue;
626 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
627 MLXSW_REG_RALST_BIN_NO_CHILD);
628 last_prefix = prefix;
629 }
630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
631 }
632
633 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)634 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
635 struct mlxsw_sp_prefix_usage *prefix_usage,
636 enum mlxsw_sp_l3proto proto)
637 {
638 struct mlxsw_sp_lpm_tree *lpm_tree;
639 int err;
640
641 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
642 if (!lpm_tree)
643 return ERR_PTR(-EBUSY);
644 lpm_tree->proto = proto;
645 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
646 if (err)
647 return ERR_PTR(err);
648
649 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
650 lpm_tree);
651 if (err)
652 goto err_left_struct_set;
653 memcpy(&lpm_tree->prefix_usage, prefix_usage,
654 sizeof(lpm_tree->prefix_usage));
655 memset(&lpm_tree->prefix_ref_count, 0,
656 sizeof(lpm_tree->prefix_ref_count));
657 refcount_set(&lpm_tree->ref_count, 1);
658 return lpm_tree;
659
660 err_left_struct_set:
661 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
662 return ERR_PTR(err);
663 }
664
mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)665 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
666 struct mlxsw_sp_lpm_tree *lpm_tree)
667 {
668 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
669 }
670
671 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)672 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
673 struct mlxsw_sp_prefix_usage *prefix_usage,
674 enum mlxsw_sp_l3proto proto)
675 {
676 struct mlxsw_sp_lpm_tree *lpm_tree;
677 int i;
678
679 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
680 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
681 if (refcount_read(&lpm_tree->ref_count) &&
682 lpm_tree->proto == proto &&
683 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
684 prefix_usage)) {
685 mlxsw_sp_lpm_tree_hold(lpm_tree);
686 return lpm_tree;
687 }
688 }
689 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
690 }
691
mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree * lpm_tree)692 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
693 {
694 refcount_inc(&lpm_tree->ref_count);
695 }
696
mlxsw_sp_lpm_tree_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)697 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
698 struct mlxsw_sp_lpm_tree *lpm_tree)
699 {
700 if (!refcount_dec_and_test(&lpm_tree->ref_count))
701 return;
702 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
703 }
704
705 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
706
mlxsw_sp_lpm_init(struct mlxsw_sp * mlxsw_sp)707 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
708 {
709 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
710 struct mlxsw_sp_lpm_tree *lpm_tree;
711 u64 max_trees;
712 int err, i;
713
714 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
715 return -EIO;
716
717 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
718 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
719 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
720 sizeof(struct mlxsw_sp_lpm_tree),
721 GFP_KERNEL);
722 if (!mlxsw_sp->router->lpm.trees)
723 return -ENOMEM;
724
725 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
726 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
727 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
728 }
729
730 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
731 MLXSW_SP_L3_PROTO_IPV4);
732 if (IS_ERR(lpm_tree)) {
733 err = PTR_ERR(lpm_tree);
734 goto err_ipv4_tree_get;
735 }
736 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
737
738 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
739 MLXSW_SP_L3_PROTO_IPV6);
740 if (IS_ERR(lpm_tree)) {
741 err = PTR_ERR(lpm_tree);
742 goto err_ipv6_tree_get;
743 }
744 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
745
746 return 0;
747
748 err_ipv6_tree_get:
749 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
750 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
751 err_ipv4_tree_get:
752 kfree(mlxsw_sp->router->lpm.trees);
753 return err;
754 }
755
mlxsw_sp_lpm_fini(struct mlxsw_sp * mlxsw_sp)756 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
757 {
758 struct mlxsw_sp_lpm_tree *lpm_tree;
759
760 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
761 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
762
763 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
764 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
765
766 kfree(mlxsw_sp->router->lpm.trees);
767 }
768
mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr * vr)769 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
770 {
771 return !!vr->fib4 || !!vr->fib6 ||
772 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
773 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
774 }
775
mlxsw_sp_vr_find_unused(struct mlxsw_sp * mlxsw_sp)776 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
777 {
778 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
779 struct mlxsw_sp_vr *vr;
780 int i;
781
782 for (i = 0; i < max_vrs; i++) {
783 vr = &mlxsw_sp->router->vrs[i];
784 if (!mlxsw_sp_vr_is_used(vr))
785 return vr;
786 }
787 return NULL;
788 }
789
mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib,u8 tree_id)790 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
791 const struct mlxsw_sp_fib *fib, u8 tree_id)
792 {
793 char raltb_pl[MLXSW_REG_RALTB_LEN];
794
795 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
796 (enum mlxsw_reg_ralxx_protocol) fib->proto,
797 tree_id);
798 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
799 }
800
mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib)801 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
802 const struct mlxsw_sp_fib *fib)
803 {
804 char raltb_pl[MLXSW_REG_RALTB_LEN];
805
806 /* Bind to tree 0 which is default */
807 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
808 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
809 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
810 }
811
mlxsw_sp_fix_tb_id(u32 tb_id)812 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
813 {
814 /* For our purpose, squash main, default and local tables into one */
815 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
816 tb_id = RT_TABLE_MAIN;
817 return tb_id;
818 }
819
mlxsw_sp_vr_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id)820 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
821 u32 tb_id)
822 {
823 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
824 struct mlxsw_sp_vr *vr;
825 int i;
826
827 tb_id = mlxsw_sp_fix_tb_id(tb_id);
828
829 for (i = 0; i < max_vrs; i++) {
830 vr = &mlxsw_sp->router->vrs[i];
831 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
832 return vr;
833 }
834 return NULL;
835 }
836
mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp * mlxsw_sp,u32 tb_id,u16 * vr_id)837 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
838 u16 *vr_id)
839 {
840 struct mlxsw_sp_vr *vr;
841 int err = 0;
842
843 mutex_lock(&mlxsw_sp->router->lock);
844 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
845 if (!vr) {
846 err = -ESRCH;
847 goto out;
848 }
849 *vr_id = vr->id;
850 out:
851 mutex_unlock(&mlxsw_sp->router->lock);
852 return err;
853 }
854
mlxsw_sp_vr_fib(const struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)855 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
856 enum mlxsw_sp_l3proto proto)
857 {
858 switch (proto) {
859 case MLXSW_SP_L3_PROTO_IPV4:
860 return vr->fib4;
861 case MLXSW_SP_L3_PROTO_IPV6:
862 return vr->fib6;
863 }
864 return NULL;
865 }
866
mlxsw_sp_vr_create(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)867 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
868 u32 tb_id,
869 struct netlink_ext_ack *extack)
870 {
871 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
872 struct mlxsw_sp_fib *fib4;
873 struct mlxsw_sp_fib *fib6;
874 struct mlxsw_sp_vr *vr;
875 int err;
876
877 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
878 if (!vr) {
879 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
880 return ERR_PTR(-EBUSY);
881 }
882 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
883 if (IS_ERR(fib4))
884 return ERR_CAST(fib4);
885 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
886 if (IS_ERR(fib6)) {
887 err = PTR_ERR(fib6);
888 goto err_fib6_create;
889 }
890 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
891 MLXSW_SP_L3_PROTO_IPV4);
892 if (IS_ERR(mr4_table)) {
893 err = PTR_ERR(mr4_table);
894 goto err_mr4_table_create;
895 }
896 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
897 MLXSW_SP_L3_PROTO_IPV6);
898 if (IS_ERR(mr6_table)) {
899 err = PTR_ERR(mr6_table);
900 goto err_mr6_table_create;
901 }
902
903 vr->fib4 = fib4;
904 vr->fib6 = fib6;
905 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
906 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
907 vr->tb_id = tb_id;
908 return vr;
909
910 err_mr6_table_create:
911 mlxsw_sp_mr_table_destroy(mr4_table);
912 err_mr4_table_create:
913 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
914 err_fib6_create:
915 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
916 return ERR_PTR(err);
917 }
918
mlxsw_sp_vr_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)919 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
920 struct mlxsw_sp_vr *vr)
921 {
922 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
923 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
924 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
925 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
926 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
927 vr->fib6 = NULL;
928 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
929 vr->fib4 = NULL;
930 }
931
mlxsw_sp_vr_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)932 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
933 struct netlink_ext_ack *extack)
934 {
935 struct mlxsw_sp_vr *vr;
936
937 tb_id = mlxsw_sp_fix_tb_id(tb_id);
938 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
939 if (!vr)
940 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
941 return vr;
942 }
943
mlxsw_sp_vr_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)944 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
945 {
946 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
947 list_empty(&vr->fib6->node_list) &&
948 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
949 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
950 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
951 }
952
953 static bool
mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto,u8 tree_id)954 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
955 enum mlxsw_sp_l3proto proto, u8 tree_id)
956 {
957 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
958
959 if (!mlxsw_sp_vr_is_used(vr))
960 return false;
961 if (fib->lpm_tree->id == tree_id)
962 return true;
963 return false;
964 }
965
mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)966 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
967 struct mlxsw_sp_fib *fib,
968 struct mlxsw_sp_lpm_tree *new_tree)
969 {
970 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
971 int err;
972
973 fib->lpm_tree = new_tree;
974 mlxsw_sp_lpm_tree_hold(new_tree);
975 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
976 if (err)
977 goto err_tree_bind;
978 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
979 return 0;
980
981 err_tree_bind:
982 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
983 fib->lpm_tree = old_tree;
984 return err;
985 }
986
mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)987 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
988 struct mlxsw_sp_fib *fib,
989 struct mlxsw_sp_lpm_tree *new_tree)
990 {
991 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
992 enum mlxsw_sp_l3proto proto = fib->proto;
993 struct mlxsw_sp_lpm_tree *old_tree;
994 u8 old_id, new_id = new_tree->id;
995 struct mlxsw_sp_vr *vr;
996 int i, err;
997
998 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
999 old_id = old_tree->id;
1000
1001 for (i = 0; i < max_vrs; i++) {
1002 vr = &mlxsw_sp->router->vrs[i];
1003 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1004 continue;
1005 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1006 mlxsw_sp_vr_fib(vr, proto),
1007 new_tree);
1008 if (err)
1009 goto err_tree_replace;
1010 }
1011
1012 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1013 sizeof(new_tree->prefix_ref_count));
1014 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1015 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1016
1017 return 0;
1018
1019 err_tree_replace:
1020 for (i--; i >= 0; i--) {
1021 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1022 continue;
1023 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1024 mlxsw_sp_vr_fib(vr, proto),
1025 old_tree);
1026 }
1027 return err;
1028 }
1029
mlxsw_sp_vrs_init(struct mlxsw_sp * mlxsw_sp)1030 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1031 {
1032 struct mlxsw_sp_vr *vr;
1033 u64 max_vrs;
1034 int i;
1035
1036 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1037 return -EIO;
1038
1039 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1040 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1041 GFP_KERNEL);
1042 if (!mlxsw_sp->router->vrs)
1043 return -ENOMEM;
1044
1045 for (i = 0; i < max_vrs; i++) {
1046 vr = &mlxsw_sp->router->vrs[i];
1047 vr->id = i;
1048 }
1049
1050 return 0;
1051 }
1052
1053 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1054
mlxsw_sp_vrs_fini(struct mlxsw_sp * mlxsw_sp)1055 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1056 {
1057 /* At this stage we're guaranteed not to have new incoming
1058 * FIB notifications and the work queue is free from FIBs
1059 * sitting on top of mlxsw netdevs. However, we can still
1060 * have other FIBs queued. Flush the queue before flushing
1061 * the device's tables. No need for locks, as we're the only
1062 * writer.
1063 */
1064 mlxsw_core_flush_owq();
1065 mlxsw_sp_router_fib_flush(mlxsw_sp);
1066 kfree(mlxsw_sp->router->vrs);
1067 }
1068
mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device * ol_dev)1069 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1070 {
1071 struct net_device *d;
1072 u32 tb_id;
1073
1074 rcu_read_lock();
1075 d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1076 if (d)
1077 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1078 else
1079 tb_id = RT_TABLE_MAIN;
1080 rcu_read_unlock();
1081
1082 return tb_id;
1083 }
1084
1085 static void
mlxsw_sp_crif_init(struct mlxsw_sp_crif * crif,struct net_device * dev)1086 mlxsw_sp_crif_init(struct mlxsw_sp_crif *crif, struct net_device *dev)
1087 {
1088 crif->key.dev = dev;
1089 INIT_LIST_HEAD(&crif->nexthop_list);
1090 }
1091
1092 static struct mlxsw_sp_crif *
mlxsw_sp_crif_alloc(struct net_device * dev)1093 mlxsw_sp_crif_alloc(struct net_device *dev)
1094 {
1095 struct mlxsw_sp_crif *crif;
1096
1097 crif = kzalloc(sizeof(*crif), GFP_KERNEL);
1098 if (!crif)
1099 return NULL;
1100
1101 mlxsw_sp_crif_init(crif, dev);
1102 return crif;
1103 }
1104
mlxsw_sp_crif_free(struct mlxsw_sp_crif * crif)1105 static void mlxsw_sp_crif_free(struct mlxsw_sp_crif *crif)
1106 {
1107 if (WARN_ON(crif->rif))
1108 return;
1109
1110 WARN_ON(!list_empty(&crif->nexthop_list));
1111 kfree(crif);
1112 }
1113
mlxsw_sp_crif_insert(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)1114 static int mlxsw_sp_crif_insert(struct mlxsw_sp_router *router,
1115 struct mlxsw_sp_crif *crif)
1116 {
1117 return rhashtable_insert_fast(&router->crif_ht, &crif->ht_node,
1118 mlxsw_sp_crif_ht_params);
1119 }
1120
mlxsw_sp_crif_remove(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)1121 static void mlxsw_sp_crif_remove(struct mlxsw_sp_router *router,
1122 struct mlxsw_sp_crif *crif)
1123 {
1124 rhashtable_remove_fast(&router->crif_ht, &crif->ht_node,
1125 mlxsw_sp_crif_ht_params);
1126 }
1127
1128 static struct mlxsw_sp_crif *
mlxsw_sp_crif_lookup(struct mlxsw_sp_router * router,const struct net_device * dev)1129 mlxsw_sp_crif_lookup(struct mlxsw_sp_router *router,
1130 const struct net_device *dev)
1131 {
1132 struct mlxsw_sp_crif_key key = {
1133 .dev = (struct net_device *)dev,
1134 };
1135
1136 return rhashtable_lookup_fast(&router->crif_ht, &key,
1137 mlxsw_sp_crif_ht_params);
1138 }
1139
1140 static struct mlxsw_sp_rif *
1141 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1142 const struct mlxsw_sp_rif_params *params,
1143 struct netlink_ext_ack *extack);
1144
1145 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev,struct netlink_ext_ack * extack)1146 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1147 enum mlxsw_sp_ipip_type ipipt,
1148 struct net_device *ol_dev,
1149 struct netlink_ext_ack *extack)
1150 {
1151 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1152 const struct mlxsw_sp_ipip_ops *ipip_ops;
1153 struct mlxsw_sp_rif *rif;
1154
1155 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1156 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1157 .common.dev = ol_dev,
1158 .common.lag = false,
1159 .common.double_entry = ipip_ops->double_rif_entry,
1160 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1161 };
1162
1163 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1164 if (IS_ERR(rif))
1165 return ERR_CAST(rif);
1166 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1167 }
1168
1169 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1170 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1171 enum mlxsw_sp_ipip_type ipipt,
1172 struct net_device *ol_dev)
1173 {
1174 const struct mlxsw_sp_ipip_ops *ipip_ops;
1175 struct mlxsw_sp_ipip_entry *ipip_entry;
1176 struct mlxsw_sp_ipip_entry *ret = NULL;
1177 int err;
1178
1179 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1180 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1181 if (!ipip_entry)
1182 return ERR_PTR(-ENOMEM);
1183
1184 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1185 ol_dev, NULL);
1186 if (IS_ERR(ipip_entry->ol_lb)) {
1187 ret = ERR_CAST(ipip_entry->ol_lb);
1188 goto err_ol_ipip_lb_create;
1189 }
1190
1191 ipip_entry->ipipt = ipipt;
1192 ipip_entry->ol_dev = ol_dev;
1193 ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1194
1195 err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1196 if (err) {
1197 ret = ERR_PTR(err);
1198 goto err_rem_ip_addr_set;
1199 }
1200
1201 return ipip_entry;
1202
1203 err_rem_ip_addr_set:
1204 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1205 err_ol_ipip_lb_create:
1206 kfree(ipip_entry);
1207 return ret;
1208 }
1209
mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1210 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1211 struct mlxsw_sp_ipip_entry *ipip_entry)
1212 {
1213 const struct mlxsw_sp_ipip_ops *ipip_ops =
1214 mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1215
1216 ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1217 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1218 kfree(ipip_entry);
1219 }
1220
1221 static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp * mlxsw_sp,const enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,struct mlxsw_sp_ipip_entry * ipip_entry)1222 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1223 const enum mlxsw_sp_l3proto ul_proto,
1224 union mlxsw_sp_l3addr saddr,
1225 u32 ul_tb_id,
1226 struct mlxsw_sp_ipip_entry *ipip_entry)
1227 {
1228 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1229 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1230 union mlxsw_sp_l3addr tun_saddr;
1231
1232 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1233 return false;
1234
1235 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1236 return tun_ul_tb_id == ul_tb_id &&
1237 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1238 }
1239
mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1240 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1241 enum mlxsw_sp_ipip_type ipipt)
1242 {
1243 const struct mlxsw_sp_ipip_ops *ipip_ops;
1244
1245 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1246
1247 /* Not all tunnels require to increase the default pasing depth
1248 * (96 bytes).
1249 */
1250 if (ipip_ops->inc_parsing_depth)
1251 return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1252
1253 return 0;
1254 }
1255
mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1256 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1257 enum mlxsw_sp_ipip_type ipipt)
1258 {
1259 const struct mlxsw_sp_ipip_ops *ipip_ops =
1260 mlxsw_sp->router->ipip_ops_arr[ipipt];
1261
1262 if (ipip_ops->inc_parsing_depth)
1263 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1264 }
1265
1266 static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct mlxsw_sp_ipip_entry * ipip_entry)1267 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1268 struct mlxsw_sp_fib_entry *fib_entry,
1269 struct mlxsw_sp_ipip_entry *ipip_entry)
1270 {
1271 u32 tunnel_index;
1272 int err;
1273
1274 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1275 1, &tunnel_index);
1276 if (err)
1277 return err;
1278
1279 err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1280 ipip_entry->ipipt);
1281 if (err)
1282 goto err_parsing_depth_inc;
1283
1284 ipip_entry->decap_fib_entry = fib_entry;
1285 fib_entry->decap.ipip_entry = ipip_entry;
1286 fib_entry->decap.tunnel_index = tunnel_index;
1287
1288 return 0;
1289
1290 err_parsing_depth_inc:
1291 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1292 fib_entry->decap.tunnel_index);
1293 return err;
1294 }
1295
mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)1296 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1297 struct mlxsw_sp_fib_entry *fib_entry)
1298 {
1299 enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1300
1301 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1302 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1303 fib_entry->decap.ipip_entry = NULL;
1304 mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1305 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1306 1, fib_entry->decap.tunnel_index);
1307 }
1308
1309 static struct mlxsw_sp_fib_node *
1310 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1311 size_t addr_len, unsigned char prefix_len);
1312 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1313 struct mlxsw_sp_fib_entry *fib_entry);
1314
1315 static void
mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1316 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1317 struct mlxsw_sp_ipip_entry *ipip_entry)
1318 {
1319 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1320
1321 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1322 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1323
1324 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1325 }
1326
1327 static void
mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct mlxsw_sp_fib_entry * decap_fib_entry)1328 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1329 struct mlxsw_sp_ipip_entry *ipip_entry,
1330 struct mlxsw_sp_fib_entry *decap_fib_entry)
1331 {
1332 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1333 ipip_entry))
1334 return;
1335 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1336
1337 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1338 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1339 }
1340
1341 static struct mlxsw_sp_fib_entry *
mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id,enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,enum mlxsw_sp_fib_entry_type type)1342 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1343 enum mlxsw_sp_l3proto proto,
1344 const union mlxsw_sp_l3addr *addr,
1345 enum mlxsw_sp_fib_entry_type type)
1346 {
1347 struct mlxsw_sp_fib_node *fib_node;
1348 unsigned char addr_prefix_len;
1349 struct mlxsw_sp_fib *fib;
1350 struct mlxsw_sp_vr *vr;
1351 const void *addrp;
1352 size_t addr_len;
1353 u32 addr4;
1354
1355 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1356 if (!vr)
1357 return NULL;
1358 fib = mlxsw_sp_vr_fib(vr, proto);
1359
1360 switch (proto) {
1361 case MLXSW_SP_L3_PROTO_IPV4:
1362 addr4 = be32_to_cpu(addr->addr4);
1363 addrp = &addr4;
1364 addr_len = 4;
1365 addr_prefix_len = 32;
1366 break;
1367 case MLXSW_SP_L3_PROTO_IPV6:
1368 addrp = &addr->addr6;
1369 addr_len = 16;
1370 addr_prefix_len = 128;
1371 break;
1372 default:
1373 WARN_ON(1);
1374 return NULL;
1375 }
1376
1377 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1378 addr_prefix_len);
1379 if (!fib_node || fib_node->fib_entry->type != type)
1380 return NULL;
1381
1382 return fib_node->fib_entry;
1383 }
1384
1385 /* Given an IPIP entry, find the corresponding decap route. */
1386 static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1387 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1388 struct mlxsw_sp_ipip_entry *ipip_entry)
1389 {
1390 static struct mlxsw_sp_fib_node *fib_node;
1391 const struct mlxsw_sp_ipip_ops *ipip_ops;
1392 unsigned char saddr_prefix_len;
1393 union mlxsw_sp_l3addr saddr;
1394 struct mlxsw_sp_fib *ul_fib;
1395 struct mlxsw_sp_vr *ul_vr;
1396 const void *saddrp;
1397 size_t saddr_len;
1398 u32 ul_tb_id;
1399 u32 saddr4;
1400
1401 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1402
1403 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1404 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1405 if (!ul_vr)
1406 return NULL;
1407
1408 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1409 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1410 ipip_entry->ol_dev);
1411
1412 switch (ipip_ops->ul_proto) {
1413 case MLXSW_SP_L3_PROTO_IPV4:
1414 saddr4 = be32_to_cpu(saddr.addr4);
1415 saddrp = &saddr4;
1416 saddr_len = 4;
1417 saddr_prefix_len = 32;
1418 break;
1419 case MLXSW_SP_L3_PROTO_IPV6:
1420 saddrp = &saddr.addr6;
1421 saddr_len = 16;
1422 saddr_prefix_len = 128;
1423 break;
1424 default:
1425 WARN_ON(1);
1426 return NULL;
1427 }
1428
1429 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1430 saddr_prefix_len);
1431 if (!fib_node ||
1432 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1433 return NULL;
1434
1435 return fib_node->fib_entry;
1436 }
1437
1438 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1439 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1440 enum mlxsw_sp_ipip_type ipipt,
1441 struct net_device *ol_dev)
1442 {
1443 struct mlxsw_sp_ipip_entry *ipip_entry;
1444
1445 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1446 if (IS_ERR(ipip_entry))
1447 return ipip_entry;
1448
1449 list_add_tail(&ipip_entry->ipip_list_node,
1450 &mlxsw_sp->router->ipip_list);
1451
1452 return ipip_entry;
1453 }
1454
1455 static void
mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1456 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1457 struct mlxsw_sp_ipip_entry *ipip_entry)
1458 {
1459 list_del(&ipip_entry->ipip_list_node);
1460 mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1461 }
1462
1463 static bool
mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip,struct mlxsw_sp_ipip_entry * ipip_entry)1464 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1465 const struct net_device *ul_dev,
1466 enum mlxsw_sp_l3proto ul_proto,
1467 union mlxsw_sp_l3addr ul_dip,
1468 struct mlxsw_sp_ipip_entry *ipip_entry)
1469 {
1470 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1471 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1472
1473 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1474 return false;
1475
1476 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1477 ul_tb_id, ipip_entry);
1478 }
1479
1480 /* Given decap parameters, find the corresponding IPIP entry. */
1481 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp * mlxsw_sp,int ul_dev_ifindex,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip)1482 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1483 enum mlxsw_sp_l3proto ul_proto,
1484 union mlxsw_sp_l3addr ul_dip)
1485 {
1486 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1487 struct net_device *ul_dev;
1488
1489 rcu_read_lock();
1490
1491 ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1492 if (!ul_dev)
1493 goto out_unlock;
1494
1495 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1496 ipip_list_node)
1497 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1498 ul_proto, ul_dip,
1499 ipip_entry))
1500 goto out_unlock;
1501
1502 rcu_read_unlock();
1503
1504 return NULL;
1505
1506 out_unlock:
1507 rcu_read_unlock();
1508 return ipip_entry;
1509 }
1510
mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev,enum mlxsw_sp_ipip_type * p_type)1511 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1512 const struct net_device *dev,
1513 enum mlxsw_sp_ipip_type *p_type)
1514 {
1515 struct mlxsw_sp_router *router = mlxsw_sp->router;
1516 const struct mlxsw_sp_ipip_ops *ipip_ops;
1517 enum mlxsw_sp_ipip_type ipipt;
1518
1519 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1520 ipip_ops = router->ipip_ops_arr[ipipt];
1521 if (dev->type == ipip_ops->dev_type) {
1522 if (p_type)
1523 *p_type = ipipt;
1524 return true;
1525 }
1526 }
1527 return false;
1528 }
1529
mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1530 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1531 const struct net_device *dev)
1532 {
1533 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1534 }
1535
1536 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev)1537 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1538 const struct net_device *ol_dev)
1539 {
1540 struct mlxsw_sp_ipip_entry *ipip_entry;
1541
1542 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1543 ipip_list_node)
1544 if (ipip_entry->ol_dev == ol_dev)
1545 return ipip_entry;
1546
1547 return NULL;
1548 }
1549
1550 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,struct mlxsw_sp_ipip_entry * start)1551 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1552 const struct net_device *ul_dev,
1553 struct mlxsw_sp_ipip_entry *start)
1554 {
1555 struct mlxsw_sp_ipip_entry *ipip_entry;
1556
1557 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1558 ipip_list_node);
1559 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1560 ipip_list_node) {
1561 struct net_device *ol_dev = ipip_entry->ol_dev;
1562 struct net_device *ipip_ul_dev;
1563
1564 rcu_read_lock();
1565 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1566 rcu_read_unlock();
1567
1568 if (ipip_ul_dev == ul_dev)
1569 return ipip_entry;
1570 }
1571
1572 return NULL;
1573 }
1574
mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1575 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1576 const struct net_device *dev)
1577 {
1578 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1579 }
1580
mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev,enum mlxsw_sp_ipip_type ipipt)1581 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1582 const struct net_device *ol_dev,
1583 enum mlxsw_sp_ipip_type ipipt)
1584 {
1585 const struct mlxsw_sp_ipip_ops *ops
1586 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1587
1588 return ops->can_offload(mlxsw_sp, ol_dev);
1589 }
1590
mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1591 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1592 struct net_device *ol_dev)
1593 {
1594 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1595 struct mlxsw_sp_ipip_entry *ipip_entry;
1596 enum mlxsw_sp_l3proto ul_proto;
1597 union mlxsw_sp_l3addr saddr;
1598 u32 ul_tb_id;
1599
1600 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1601 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1602 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1603 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1604 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1605 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1606 saddr, ul_tb_id,
1607 NULL)) {
1608 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1609 ol_dev);
1610 if (IS_ERR(ipip_entry))
1611 return PTR_ERR(ipip_entry);
1612 }
1613 }
1614
1615 return 0;
1616 }
1617
mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1618 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1619 struct net_device *ol_dev)
1620 {
1621 struct mlxsw_sp_ipip_entry *ipip_entry;
1622
1623 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1624 if (ipip_entry)
1625 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1626 }
1627
1628 static void
mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1629 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1630 struct mlxsw_sp_ipip_entry *ipip_entry)
1631 {
1632 struct mlxsw_sp_fib_entry *decap_fib_entry;
1633
1634 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1635 if (decap_fib_entry)
1636 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1637 decap_fib_entry);
1638 }
1639
1640 static int
mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb * lb_rif,u16 ul_vr_id,u16 ul_rif_id,bool enable)1641 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1642 u16 ul_rif_id, bool enable)
1643 {
1644 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1645 struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
1646 enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1647 struct mlxsw_sp_rif *rif = &lb_rif->common;
1648 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1649 char ritr_pl[MLXSW_REG_RITR_LEN];
1650 struct in6_addr *saddr6;
1651 u32 saddr4;
1652
1653 ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1654 switch (lb_cf.ul_protocol) {
1655 case MLXSW_SP_L3_PROTO_IPV4:
1656 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1657 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1658 rif->rif_index, rif->vr_id, dev->mtu);
1659 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1660 ipip_options, ul_vr_id,
1661 ul_rif_id, saddr4,
1662 lb_cf.okey);
1663 break;
1664
1665 case MLXSW_SP_L3_PROTO_IPV6:
1666 saddr6 = &lb_cf.saddr.addr6;
1667 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1668 rif->rif_index, rif->vr_id, dev->mtu);
1669 mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1670 ipip_options, ul_vr_id,
1671 ul_rif_id, saddr6,
1672 lb_cf.okey);
1673 break;
1674 }
1675
1676 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1677 }
1678
mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1679 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1680 struct net_device *ol_dev)
1681 {
1682 struct mlxsw_sp_ipip_entry *ipip_entry;
1683 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1684 int err = 0;
1685
1686 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1687 if (ipip_entry) {
1688 lb_rif = ipip_entry->ol_lb;
1689 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1690 lb_rif->ul_rif_id, true);
1691 if (err)
1692 goto out;
1693 lb_rif->common.mtu = ol_dev->mtu;
1694 }
1695
1696 out:
1697 return err;
1698 }
1699
mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1700 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1701 struct net_device *ol_dev)
1702 {
1703 struct mlxsw_sp_ipip_entry *ipip_entry;
1704
1705 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1706 if (ipip_entry)
1707 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1708 }
1709
1710 static void
mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1711 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1712 struct mlxsw_sp_ipip_entry *ipip_entry)
1713 {
1714 if (ipip_entry->decap_fib_entry)
1715 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1716 }
1717
mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1718 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1719 struct net_device *ol_dev)
1720 {
1721 struct mlxsw_sp_ipip_entry *ipip_entry;
1722
1723 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1724 if (ipip_entry)
1725 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1726 }
1727
1728 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1729 struct mlxsw_sp_rif *rif);
1730
mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * old_rif,struct mlxsw_sp_rif * new_rif,bool migrate_nhs)1731 static void mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp *mlxsw_sp,
1732 struct mlxsw_sp_rif *old_rif,
1733 struct mlxsw_sp_rif *new_rif,
1734 bool migrate_nhs)
1735 {
1736 struct mlxsw_sp_crif *crif = old_rif->crif;
1737 struct mlxsw_sp_crif mock_crif = {};
1738
1739 if (migrate_nhs)
1740 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
1741
1742 /* Plant a mock CRIF so that destroying the old RIF doesn't unoffload
1743 * our nexthops and IPIP tunnels, and doesn't sever the crif->rif link.
1744 */
1745 mlxsw_sp_crif_init(&mock_crif, crif->key.dev);
1746 old_rif->crif = &mock_crif;
1747 mock_crif.rif = old_rif;
1748 mlxsw_sp_rif_destroy(old_rif);
1749 }
1750
1751 static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool keep_encap,struct netlink_ext_ack * extack)1752 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1753 struct mlxsw_sp_ipip_entry *ipip_entry,
1754 bool keep_encap,
1755 struct netlink_ext_ack *extack)
1756 {
1757 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1758 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1759
1760 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1761 ipip_entry->ipipt,
1762 ipip_entry->ol_dev,
1763 extack);
1764 if (IS_ERR(new_lb_rif))
1765 return PTR_ERR(new_lb_rif);
1766 ipip_entry->ol_lb = new_lb_rif;
1767
1768 mlxsw_sp_rif_migrate_destroy(mlxsw_sp, &old_lb_rif->common,
1769 &new_lb_rif->common, keep_encap);
1770 return 0;
1771 }
1772
1773 /**
1774 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1775 * @mlxsw_sp: mlxsw_sp.
1776 * @ipip_entry: IPIP entry.
1777 * @recreate_loopback: Recreates the associated loopback RIF.
1778 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1779 * relevant when recreate_loopback is true.
1780 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1781 * is only relevant when recreate_loopback is false.
1782 * @extack: extack.
1783 *
1784 * Return: Non-zero value on failure.
1785 */
__mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool recreate_loopback,bool keep_encap,bool update_nexthops,struct netlink_ext_ack * extack)1786 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1787 struct mlxsw_sp_ipip_entry *ipip_entry,
1788 bool recreate_loopback,
1789 bool keep_encap,
1790 bool update_nexthops,
1791 struct netlink_ext_ack *extack)
1792 {
1793 int err;
1794
1795 /* RIFs can't be edited, so to update loopback, we need to destroy and
1796 * recreate it. That creates a window of opportunity where RALUE and
1797 * RATR registers end up referencing a RIF that's already gone. RATRs
1798 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1799 * of RALUE, demote the decap route back.
1800 */
1801 if (ipip_entry->decap_fib_entry)
1802 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1803
1804 if (recreate_loopback) {
1805 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1806 keep_encap, extack);
1807 if (err)
1808 return err;
1809 } else if (update_nexthops) {
1810 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1811 &ipip_entry->ol_lb->common);
1812 }
1813
1814 if (ipip_entry->ol_dev->flags & IFF_UP)
1815 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1816
1817 return 0;
1818 }
1819
mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1820 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1821 struct net_device *ol_dev,
1822 struct netlink_ext_ack *extack)
1823 {
1824 struct mlxsw_sp_ipip_entry *ipip_entry =
1825 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1826
1827 if (!ipip_entry)
1828 return 0;
1829
1830 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1831 true, false, false, extack);
1832 }
1833
1834 static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,struct netlink_ext_ack * extack)1835 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1836 struct mlxsw_sp_ipip_entry *ipip_entry,
1837 struct net_device *ul_dev,
1838 bool *demote_this,
1839 struct netlink_ext_ack *extack)
1840 {
1841 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1842 enum mlxsw_sp_l3proto ul_proto;
1843 union mlxsw_sp_l3addr saddr;
1844
1845 /* Moving underlay to a different VRF might cause local address
1846 * conflict, and the conflicting tunnels need to be demoted.
1847 */
1848 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1849 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1850 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1851 saddr, ul_tb_id,
1852 ipip_entry)) {
1853 *demote_this = true;
1854 return 0;
1855 }
1856
1857 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1858 true, true, false, extack);
1859 }
1860
1861 static int
mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1862 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1863 struct mlxsw_sp_ipip_entry *ipip_entry,
1864 struct net_device *ul_dev)
1865 {
1866 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1867 false, false, true, NULL);
1868 }
1869
1870 static int
mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1871 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1872 struct mlxsw_sp_ipip_entry *ipip_entry,
1873 struct net_device *ul_dev)
1874 {
1875 /* A down underlay device causes encapsulated packets to not be
1876 * forwarded, but decap still works. So refresh next hops without
1877 * touching anything else.
1878 */
1879 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1880 false, false, true, NULL);
1881 }
1882
1883 static int
mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1884 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1885 struct net_device *ol_dev,
1886 struct netlink_ext_ack *extack)
1887 {
1888 const struct mlxsw_sp_ipip_ops *ipip_ops;
1889 struct mlxsw_sp_ipip_entry *ipip_entry;
1890 int err;
1891
1892 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1893 if (!ipip_entry)
1894 /* A change might make a tunnel eligible for offloading, but
1895 * that is currently not implemented. What falls to slow path
1896 * stays there.
1897 */
1898 return 0;
1899
1900 /* A change might make a tunnel not eligible for offloading. */
1901 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1902 ipip_entry->ipipt)) {
1903 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1904 return 0;
1905 }
1906
1907 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1908 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1909 return err;
1910 }
1911
mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1912 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1913 struct mlxsw_sp_ipip_entry *ipip_entry)
1914 {
1915 struct net_device *ol_dev = ipip_entry->ol_dev;
1916
1917 if (ol_dev->flags & IFF_UP)
1918 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1919 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1920 }
1921
1922 /* The configuration where several tunnels have the same local address in the
1923 * same underlay table needs special treatment in the HW. That is currently not
1924 * implemented in the driver. This function finds and demotes the first tunnel
1925 * with a given source address, except the one passed in the argument
1926 * `except'.
1927 */
1928 bool
mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,const struct mlxsw_sp_ipip_entry * except)1929 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1930 enum mlxsw_sp_l3proto ul_proto,
1931 union mlxsw_sp_l3addr saddr,
1932 u32 ul_tb_id,
1933 const struct mlxsw_sp_ipip_entry *except)
1934 {
1935 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1936
1937 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1938 ipip_list_node) {
1939 if (ipip_entry != except &&
1940 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1941 ul_tb_id, ipip_entry)) {
1942 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1943 return true;
1944 }
1945 }
1946
1947 return false;
1948 }
1949
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev)1950 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1951 struct net_device *ul_dev)
1952 {
1953 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1954
1955 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1956 ipip_list_node) {
1957 struct net_device *ol_dev = ipip_entry->ol_dev;
1958 struct net_device *ipip_ul_dev;
1959
1960 rcu_read_lock();
1961 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1962 rcu_read_unlock();
1963 if (ipip_ul_dev == ul_dev)
1964 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1965 }
1966 }
1967
mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,unsigned long event,struct netdev_notifier_info * info)1968 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1969 struct net_device *ol_dev,
1970 unsigned long event,
1971 struct netdev_notifier_info *info)
1972 {
1973 struct netdev_notifier_changeupper_info *chup;
1974 struct netlink_ext_ack *extack;
1975 int err = 0;
1976
1977 switch (event) {
1978 case NETDEV_REGISTER:
1979 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1980 break;
1981 case NETDEV_UNREGISTER:
1982 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1983 break;
1984 case NETDEV_UP:
1985 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1986 break;
1987 case NETDEV_DOWN:
1988 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1989 break;
1990 case NETDEV_CHANGEUPPER:
1991 chup = container_of(info, typeof(*chup), info);
1992 extack = info->extack;
1993 if (netif_is_l3_master(chup->upper_dev))
1994 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1995 ol_dev,
1996 extack);
1997 break;
1998 case NETDEV_CHANGE:
1999 extack = info->extack;
2000 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
2001 ol_dev, extack);
2002 break;
2003 case NETDEV_CHANGEMTU:
2004 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2005 break;
2006 }
2007 return err;
2008 }
2009
2010 static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,unsigned long event,struct netdev_notifier_info * info)2011 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2012 struct mlxsw_sp_ipip_entry *ipip_entry,
2013 struct net_device *ul_dev,
2014 bool *demote_this,
2015 unsigned long event,
2016 struct netdev_notifier_info *info)
2017 {
2018 struct netdev_notifier_changeupper_info *chup;
2019 struct netlink_ext_ack *extack;
2020
2021 switch (event) {
2022 case NETDEV_CHANGEUPPER:
2023 chup = container_of(info, typeof(*chup), info);
2024 extack = info->extack;
2025 if (netif_is_l3_master(chup->upper_dev))
2026 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2027 ipip_entry,
2028 ul_dev,
2029 demote_this,
2030 extack);
2031 break;
2032
2033 case NETDEV_UP:
2034 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2035 ul_dev);
2036 case NETDEV_DOWN:
2037 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2038 ipip_entry,
2039 ul_dev);
2040 }
2041 return 0;
2042 }
2043
2044 static int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev,unsigned long event,struct netdev_notifier_info * info)2045 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2046 struct net_device *ul_dev,
2047 unsigned long event,
2048 struct netdev_notifier_info *info)
2049 {
2050 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2051 int err;
2052
2053 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2054 ul_dev,
2055 ipip_entry))) {
2056 struct mlxsw_sp_ipip_entry *prev;
2057 bool demote_this = false;
2058
2059 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2060 ul_dev, &demote_this,
2061 event, info);
2062 if (err) {
2063 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2064 ul_dev);
2065 return err;
2066 }
2067
2068 if (demote_this) {
2069 if (list_is_first(&ipip_entry->ipip_list_node,
2070 &mlxsw_sp->router->ipip_list))
2071 prev = NULL;
2072 else
2073 /* This can't be cached from previous iteration,
2074 * because that entry could be gone now.
2075 */
2076 prev = list_prev_entry(ipip_entry,
2077 ipip_list_node);
2078 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2079 ipip_entry = prev;
2080 }
2081 }
2082
2083 return 0;
2084 }
2085
mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip,u32 tunnel_index)2086 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2087 enum mlxsw_sp_l3proto ul_proto,
2088 const union mlxsw_sp_l3addr *ul_sip,
2089 u32 tunnel_index)
2090 {
2091 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2092 struct mlxsw_sp_router *router = mlxsw_sp->router;
2093 struct mlxsw_sp_fib_entry *fib_entry;
2094 int err = 0;
2095
2096 mutex_lock(&mlxsw_sp->router->lock);
2097
2098 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2099 err = -EINVAL;
2100 goto out;
2101 }
2102
2103 router->nve_decap_config.ul_tb_id = ul_tb_id;
2104 router->nve_decap_config.tunnel_index = tunnel_index;
2105 router->nve_decap_config.ul_proto = ul_proto;
2106 router->nve_decap_config.ul_sip = *ul_sip;
2107 router->nve_decap_config.valid = true;
2108
2109 /* It is valid to create a tunnel with a local IP and only later
2110 * assign this IP address to a local interface
2111 */
2112 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2113 ul_proto, ul_sip,
2114 type);
2115 if (!fib_entry)
2116 goto out;
2117
2118 fib_entry->decap.tunnel_index = tunnel_index;
2119 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2120
2121 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2122 if (err)
2123 goto err_fib_entry_update;
2124
2125 goto out;
2126
2127 err_fib_entry_update:
2128 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2129 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2130 out:
2131 mutex_unlock(&mlxsw_sp->router->lock);
2132 return err;
2133 }
2134
mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2135 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2136 enum mlxsw_sp_l3proto ul_proto,
2137 const union mlxsw_sp_l3addr *ul_sip)
2138 {
2139 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2140 struct mlxsw_sp_router *router = mlxsw_sp->router;
2141 struct mlxsw_sp_fib_entry *fib_entry;
2142
2143 mutex_lock(&mlxsw_sp->router->lock);
2144
2145 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2146 goto out;
2147
2148 router->nve_decap_config.valid = false;
2149
2150 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2151 ul_proto, ul_sip,
2152 type);
2153 if (!fib_entry)
2154 goto out;
2155
2156 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2157 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2158 out:
2159 mutex_unlock(&mlxsw_sp->router->lock);
2160 }
2161
mlxsw_sp_router_nve_is_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2162 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2163 u32 ul_tb_id,
2164 enum mlxsw_sp_l3proto ul_proto,
2165 const union mlxsw_sp_l3addr *ul_sip)
2166 {
2167 struct mlxsw_sp_router *router = mlxsw_sp->router;
2168
2169 return router->nve_decap_config.valid &&
2170 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2171 router->nve_decap_config.ul_proto == ul_proto &&
2172 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2173 sizeof(*ul_sip));
2174 }
2175
2176 struct mlxsw_sp_neigh_key {
2177 struct neighbour *n;
2178 };
2179
2180 struct mlxsw_sp_neigh_entry {
2181 struct list_head rif_list_node;
2182 struct rhash_head ht_node;
2183 struct mlxsw_sp_neigh_key key;
2184 u16 rif;
2185 bool connected;
2186 unsigned char ha[ETH_ALEN];
2187 struct list_head nexthop_list; /* list of nexthops using
2188 * this neigh entry
2189 */
2190 struct list_head nexthop_neighs_list_node;
2191 unsigned int counter_index;
2192 bool counter_valid;
2193 };
2194
2195 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2196 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2197 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2198 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2199 };
2200
2201 struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif * rif,struct mlxsw_sp_neigh_entry * neigh_entry)2202 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2203 struct mlxsw_sp_neigh_entry *neigh_entry)
2204 {
2205 if (!neigh_entry) {
2206 if (list_empty(&rif->neigh_list))
2207 return NULL;
2208 else
2209 return list_first_entry(&rif->neigh_list,
2210 typeof(*neigh_entry),
2211 rif_list_node);
2212 }
2213 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2214 return NULL;
2215 return list_next_entry(neigh_entry, rif_list_node);
2216 }
2217
mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry * neigh_entry)2218 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2219 {
2220 return neigh_entry->key.n->tbl->family;
2221 }
2222
2223 unsigned char *
mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry * neigh_entry)2224 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2225 {
2226 return neigh_entry->ha;
2227 }
2228
mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2229 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2230 {
2231 struct neighbour *n;
2232
2233 n = neigh_entry->key.n;
2234 return ntohl(*((__be32 *) n->primary_key));
2235 }
2236
2237 struct in6_addr *
mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2238 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2239 {
2240 struct neighbour *n;
2241
2242 n = neigh_entry->key.n;
2243 return (struct in6_addr *) &n->primary_key;
2244 }
2245
mlxsw_sp_neigh_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,u64 * p_counter)2246 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2247 struct mlxsw_sp_neigh_entry *neigh_entry,
2248 u64 *p_counter)
2249 {
2250 if (!neigh_entry->counter_valid)
2251 return -EINVAL;
2252
2253 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2254 p_counter, NULL);
2255 }
2256
2257 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp * mlxsw_sp,struct neighbour * n,u16 rif)2258 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2259 u16 rif)
2260 {
2261 struct mlxsw_sp_neigh_entry *neigh_entry;
2262
2263 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2264 if (!neigh_entry)
2265 return NULL;
2266
2267 neigh_entry->key.n = n;
2268 neigh_entry->rif = rif;
2269 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2270
2271 return neigh_entry;
2272 }
2273
mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry * neigh_entry)2274 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2275 {
2276 kfree(neigh_entry);
2277 }
2278
2279 static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2280 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2281 struct mlxsw_sp_neigh_entry *neigh_entry)
2282 {
2283 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2284 &neigh_entry->ht_node,
2285 mlxsw_sp_neigh_ht_params);
2286 }
2287
2288 static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2289 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2290 struct mlxsw_sp_neigh_entry *neigh_entry)
2291 {
2292 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2293 &neigh_entry->ht_node,
2294 mlxsw_sp_neigh_ht_params);
2295 }
2296
2297 static bool
mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2298 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2299 struct mlxsw_sp_neigh_entry *neigh_entry)
2300 {
2301 struct devlink *devlink;
2302 const char *table_name;
2303
2304 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2305 case AF_INET:
2306 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2307 break;
2308 case AF_INET6:
2309 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2310 break;
2311 default:
2312 WARN_ON(1);
2313 return false;
2314 }
2315
2316 devlink = priv_to_devlink(mlxsw_sp->core);
2317 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2318 }
2319
2320 static void
mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2321 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2322 struct mlxsw_sp_neigh_entry *neigh_entry)
2323 {
2324 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2325 return;
2326
2327 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2328 return;
2329
2330 neigh_entry->counter_valid = true;
2331 }
2332
2333 static void
mlxsw_sp_neigh_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2334 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2335 struct mlxsw_sp_neigh_entry *neigh_entry)
2336 {
2337 if (!neigh_entry->counter_valid)
2338 return;
2339 mlxsw_sp_flow_counter_free(mlxsw_sp,
2340 neigh_entry->counter_index);
2341 neigh_entry->counter_valid = false;
2342 }
2343
2344 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2345 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2346 {
2347 struct mlxsw_sp_neigh_entry *neigh_entry;
2348 struct mlxsw_sp_rif *rif;
2349 int err;
2350
2351 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2352 if (!rif)
2353 return ERR_PTR(-EINVAL);
2354
2355 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2356 if (!neigh_entry)
2357 return ERR_PTR(-ENOMEM);
2358
2359 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2360 if (err)
2361 goto err_neigh_entry_insert;
2362
2363 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2364 atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2365 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2366
2367 return neigh_entry;
2368
2369 err_neigh_entry_insert:
2370 mlxsw_sp_neigh_entry_free(neigh_entry);
2371 return ERR_PTR(err);
2372 }
2373
2374 static void
mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2375 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2376 struct mlxsw_sp_neigh_entry *neigh_entry)
2377 {
2378 list_del(&neigh_entry->rif_list_node);
2379 atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2380 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2381 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2382 mlxsw_sp_neigh_entry_free(neigh_entry);
2383 }
2384
2385 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2386 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2387 {
2388 struct mlxsw_sp_neigh_key key;
2389
2390 key.n = n;
2391 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2392 &key, mlxsw_sp_neigh_ht_params);
2393 }
2394
2395 static void
mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp * mlxsw_sp)2396 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2397 {
2398 unsigned long interval;
2399
2400 #if IS_ENABLED(CONFIG_IPV6)
2401 interval = min_t(unsigned long,
2402 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2403 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2404 #else
2405 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2406 #endif
2407 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2408 }
2409
mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int ent_index)2410 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2411 char *rauhtd_pl,
2412 int ent_index)
2413 {
2414 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2415 struct net_device *dev;
2416 struct neighbour *n;
2417 __be32 dipn;
2418 u32 dip;
2419 u16 rif;
2420
2421 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2422
2423 if (WARN_ON_ONCE(rif >= max_rifs))
2424 return;
2425 if (!mlxsw_sp->router->rifs[rif]) {
2426 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2427 return;
2428 }
2429
2430 dipn = htonl(dip);
2431 dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2432 n = neigh_lookup(&arp_tbl, &dipn, dev);
2433 if (!n)
2434 return;
2435
2436 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2437 neigh_event_send(n, NULL);
2438 neigh_release(n);
2439 }
2440
2441 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2442 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2443 char *rauhtd_pl,
2444 int rec_index)
2445 {
2446 struct net_device *dev;
2447 struct neighbour *n;
2448 struct in6_addr dip;
2449 u16 rif;
2450
2451 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2452 (char *) &dip);
2453
2454 if (!mlxsw_sp->router->rifs[rif]) {
2455 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2456 return;
2457 }
2458
2459 dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2460 n = neigh_lookup(&nd_tbl, &dip, dev);
2461 if (!n)
2462 return;
2463
2464 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2465 neigh_event_send(n, NULL);
2466 neigh_release(n);
2467 }
2468 #else
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2469 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2470 char *rauhtd_pl,
2471 int rec_index)
2472 {
2473 }
2474 #endif
2475
mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2476 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2477 char *rauhtd_pl,
2478 int rec_index)
2479 {
2480 u8 num_entries;
2481 int i;
2482
2483 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2484 rec_index);
2485 /* Hardware starts counting at 0, so add 1. */
2486 num_entries++;
2487
2488 /* Each record consists of several neighbour entries. */
2489 for (i = 0; i < num_entries; i++) {
2490 int ent_index;
2491
2492 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2493 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2494 ent_index);
2495 }
2496
2497 }
2498
mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2499 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2500 char *rauhtd_pl,
2501 int rec_index)
2502 {
2503 /* One record contains one entry. */
2504 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2505 rec_index);
2506 }
2507
mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2508 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2509 char *rauhtd_pl, int rec_index)
2510 {
2511 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2512 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2513 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2514 rec_index);
2515 break;
2516 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2517 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2518 rec_index);
2519 break;
2520 }
2521 }
2522
mlxsw_sp_router_rauhtd_is_full(char * rauhtd_pl)2523 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2524 {
2525 u8 num_rec, last_rec_index, num_entries;
2526
2527 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2528 last_rec_index = num_rec - 1;
2529
2530 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2531 return false;
2532 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2533 MLXSW_REG_RAUHTD_TYPE_IPV6)
2534 return true;
2535
2536 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2537 last_rec_index);
2538 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2539 return true;
2540 return false;
2541 }
2542
2543 static int
__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,enum mlxsw_reg_rauhtd_type type)2544 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2545 char *rauhtd_pl,
2546 enum mlxsw_reg_rauhtd_type type)
2547 {
2548 int i, num_rec;
2549 int err;
2550
2551 /* Ensure the RIF we read from the device does not change mid-dump. */
2552 mutex_lock(&mlxsw_sp->router->lock);
2553 do {
2554 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2555 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2556 rauhtd_pl);
2557 if (err) {
2558 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2559 break;
2560 }
2561 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2562 for (i = 0; i < num_rec; i++)
2563 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2564 i);
2565 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2566 mutex_unlock(&mlxsw_sp->router->lock);
2567
2568 return err;
2569 }
2570
mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp)2571 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2572 {
2573 enum mlxsw_reg_rauhtd_type type;
2574 char *rauhtd_pl;
2575 int err;
2576
2577 if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2578 return 0;
2579
2580 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2581 if (!rauhtd_pl)
2582 return -ENOMEM;
2583
2584 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2585 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2586 if (err)
2587 goto out;
2588
2589 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2590 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2591 out:
2592 kfree(rauhtd_pl);
2593 return err;
2594 }
2595
mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp * mlxsw_sp)2596 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2597 {
2598 struct mlxsw_sp_neigh_entry *neigh_entry;
2599
2600 mutex_lock(&mlxsw_sp->router->lock);
2601 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2602 nexthop_neighs_list_node)
2603 /* If this neigh have nexthops, make the kernel think this neigh
2604 * is active regardless of the traffic.
2605 */
2606 neigh_event_send(neigh_entry->key.n, NULL);
2607 mutex_unlock(&mlxsw_sp->router->lock);
2608 }
2609
2610 static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp * mlxsw_sp)2611 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2612 {
2613 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2614
2615 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2616 msecs_to_jiffies(interval));
2617 }
2618
mlxsw_sp_router_neighs_update_work(struct work_struct * work)2619 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2620 {
2621 struct mlxsw_sp_router *router;
2622 int err;
2623
2624 router = container_of(work, struct mlxsw_sp_router,
2625 neighs_update.dw.work);
2626 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2627 if (err)
2628 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2629
2630 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2631
2632 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2633 }
2634
mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct * work)2635 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2636 {
2637 struct mlxsw_sp_neigh_entry *neigh_entry;
2638 struct mlxsw_sp_router *router;
2639
2640 router = container_of(work, struct mlxsw_sp_router,
2641 nexthop_probe_dw.work);
2642 /* Iterate over nexthop neighbours, find those who are unresolved and
2643 * send arp on them. This solves the chicken-egg problem when
2644 * the nexthop wouldn't get offloaded until the neighbor is resolved
2645 * but it wouldn't get resolved ever in case traffic is flowing in HW
2646 * using different nexthop.
2647 */
2648 mutex_lock(&router->lock);
2649 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2650 nexthop_neighs_list_node)
2651 if (!neigh_entry->connected)
2652 neigh_event_send(neigh_entry->key.n, NULL);
2653 mutex_unlock(&router->lock);
2654
2655 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2656 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2657 }
2658
2659 static void
2660 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2661 struct mlxsw_sp_neigh_entry *neigh_entry,
2662 bool removing, bool dead);
2663
mlxsw_sp_rauht_op(bool adding)2664 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2665 {
2666 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2667 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2668 }
2669
2670 static int
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2671 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2672 struct mlxsw_sp_neigh_entry *neigh_entry,
2673 enum mlxsw_reg_rauht_op op)
2674 {
2675 struct neighbour *n = neigh_entry->key.n;
2676 u32 dip = ntohl(*((__be32 *) n->primary_key));
2677 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2678
2679 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2680 dip);
2681 if (neigh_entry->counter_valid)
2682 mlxsw_reg_rauht_pack_counter(rauht_pl,
2683 neigh_entry->counter_index);
2684 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2685 }
2686
2687 static int
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2688 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2689 struct mlxsw_sp_neigh_entry *neigh_entry,
2690 enum mlxsw_reg_rauht_op op)
2691 {
2692 struct neighbour *n = neigh_entry->key.n;
2693 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2694 const char *dip = n->primary_key;
2695
2696 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2697 dip);
2698 if (neigh_entry->counter_valid)
2699 mlxsw_reg_rauht_pack_counter(rauht_pl,
2700 neigh_entry->counter_index);
2701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2702 }
2703
mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry * neigh_entry)2704 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2705 {
2706 struct neighbour *n = neigh_entry->key.n;
2707
2708 /* Packets with a link-local destination address are trapped
2709 * after LPM lookup and never reach the neighbour table, so
2710 * there is no need to program such neighbours to the device.
2711 */
2712 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2713 IPV6_ADDR_LINKLOCAL)
2714 return true;
2715 return false;
2716 }
2717
2718 static void
mlxsw_sp_neigh_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2719 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2720 struct mlxsw_sp_neigh_entry *neigh_entry,
2721 bool adding)
2722 {
2723 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2724 int err;
2725
2726 if (!adding && !neigh_entry->connected)
2727 return;
2728 neigh_entry->connected = adding;
2729 if (neigh_entry->key.n->tbl->family == AF_INET) {
2730 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2731 op);
2732 if (err)
2733 return;
2734 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2735 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2736 return;
2737 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2738 op);
2739 if (err)
2740 return;
2741 } else {
2742 WARN_ON_ONCE(1);
2743 return;
2744 }
2745
2746 if (adding)
2747 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2748 else
2749 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2750 }
2751
2752 void
mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2753 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2754 struct mlxsw_sp_neigh_entry *neigh_entry,
2755 bool adding)
2756 {
2757 if (adding)
2758 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2759 else
2760 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2761 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2762 }
2763
2764 struct mlxsw_sp_netevent_work {
2765 struct work_struct work;
2766 struct mlxsw_sp *mlxsw_sp;
2767 struct neighbour *n;
2768 };
2769
mlxsw_sp_router_neigh_event_work(struct work_struct * work)2770 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2771 {
2772 struct mlxsw_sp_netevent_work *net_work =
2773 container_of(work, struct mlxsw_sp_netevent_work, work);
2774 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2775 struct mlxsw_sp_neigh_entry *neigh_entry;
2776 struct neighbour *n = net_work->n;
2777 unsigned char ha[ETH_ALEN];
2778 bool entry_connected;
2779 u8 nud_state, dead;
2780
2781 /* If these parameters are changed after we release the lock,
2782 * then we are guaranteed to receive another event letting us
2783 * know about it.
2784 */
2785 read_lock_bh(&n->lock);
2786 memcpy(ha, n->ha, ETH_ALEN);
2787 nud_state = n->nud_state;
2788 dead = n->dead;
2789 read_unlock_bh(&n->lock);
2790
2791 mutex_lock(&mlxsw_sp->router->lock);
2792 mlxsw_sp_span_respin(mlxsw_sp);
2793
2794 entry_connected = nud_state & NUD_VALID && !dead;
2795 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2796 if (!entry_connected && !neigh_entry)
2797 goto out;
2798 if (!neigh_entry) {
2799 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2800 if (IS_ERR(neigh_entry))
2801 goto out;
2802 }
2803
2804 if (neigh_entry->connected && entry_connected &&
2805 !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2806 goto out;
2807
2808 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2809 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2810 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2811 dead);
2812
2813 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2814 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2815
2816 out:
2817 mutex_unlock(&mlxsw_sp->router->lock);
2818 neigh_release(n);
2819 kfree(net_work);
2820 }
2821
2822 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2823
mlxsw_sp_router_mp_hash_event_work(struct work_struct * work)2824 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2825 {
2826 struct mlxsw_sp_netevent_work *net_work =
2827 container_of(work, struct mlxsw_sp_netevent_work, work);
2828 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2829
2830 mlxsw_sp_mp_hash_init(mlxsw_sp);
2831 kfree(net_work);
2832 }
2833
2834 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2835
mlxsw_sp_router_update_priority_work(struct work_struct * work)2836 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2837 {
2838 struct mlxsw_sp_netevent_work *net_work =
2839 container_of(work, struct mlxsw_sp_netevent_work, work);
2840 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2841
2842 __mlxsw_sp_router_init(mlxsw_sp);
2843 kfree(net_work);
2844 }
2845
mlxsw_sp_router_schedule_work(struct net * net,struct mlxsw_sp_router * router,struct neighbour * n,void (* cb)(struct work_struct *))2846 static int mlxsw_sp_router_schedule_work(struct net *net,
2847 struct mlxsw_sp_router *router,
2848 struct neighbour *n,
2849 void (*cb)(struct work_struct *))
2850 {
2851 struct mlxsw_sp_netevent_work *net_work;
2852
2853 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2854 return NOTIFY_DONE;
2855
2856 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2857 if (!net_work)
2858 return NOTIFY_BAD;
2859
2860 INIT_WORK(&net_work->work, cb);
2861 net_work->mlxsw_sp = router->mlxsw_sp;
2862 net_work->n = n;
2863 mlxsw_core_schedule_work(&net_work->work);
2864 return NOTIFY_DONE;
2865 }
2866
mlxsw_sp_dev_lower_is_port(struct net_device * dev)2867 static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
2868 {
2869 struct mlxsw_sp_port *mlxsw_sp_port;
2870
2871 rcu_read_lock();
2872 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2873 rcu_read_unlock();
2874 return !!mlxsw_sp_port;
2875 }
2876
mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router * router,struct neighbour * n)2877 static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
2878 struct neighbour *n)
2879 {
2880 struct net *net;
2881
2882 net = neigh_parms_net(n->parms);
2883
2884 /* Take a reference to ensure the neighbour won't be destructed until we
2885 * drop the reference in delayed work.
2886 */
2887 neigh_clone(n);
2888 return mlxsw_sp_router_schedule_work(net, router, n,
2889 mlxsw_sp_router_neigh_event_work);
2890 }
2891
mlxsw_sp_router_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)2892 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2893 unsigned long event, void *ptr)
2894 {
2895 struct mlxsw_sp_router *router;
2896 unsigned long interval;
2897 struct neigh_parms *p;
2898 struct neighbour *n;
2899
2900 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2901
2902 switch (event) {
2903 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2904 p = ptr;
2905
2906 /* We don't care about changes in the default table. */
2907 if (!p->dev || (p->tbl->family != AF_INET &&
2908 p->tbl->family != AF_INET6))
2909 return NOTIFY_DONE;
2910
2911 /* We are in atomic context and can't take RTNL mutex,
2912 * so use RCU variant to walk the device chain.
2913 */
2914 if (!mlxsw_sp_dev_lower_is_port(p->dev))
2915 return NOTIFY_DONE;
2916
2917 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2918 router->neighs_update.interval = interval;
2919 break;
2920 case NETEVENT_NEIGH_UPDATE:
2921 n = ptr;
2922
2923 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2924 return NOTIFY_DONE;
2925
2926 if (!mlxsw_sp_dev_lower_is_port(n->dev))
2927 return NOTIFY_DONE;
2928
2929 return mlxsw_sp_router_schedule_neigh_work(router, n);
2930
2931 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2932 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2933 return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2934 mlxsw_sp_router_mp_hash_event_work);
2935
2936 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2937 return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2938 mlxsw_sp_router_update_priority_work);
2939 }
2940
2941 return NOTIFY_DONE;
2942 }
2943
mlxsw_sp_neigh_init(struct mlxsw_sp * mlxsw_sp)2944 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2945 {
2946 int err;
2947
2948 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2949 &mlxsw_sp_neigh_ht_params);
2950 if (err)
2951 return err;
2952
2953 /* Initialize the polling interval according to the default
2954 * table.
2955 */
2956 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2957
2958 /* Create the delayed works for the activity_update */
2959 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2960 mlxsw_sp_router_neighs_update_work);
2961 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2962 mlxsw_sp_router_probe_unresolved_nexthops);
2963 atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2964 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2965 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2966 return 0;
2967 }
2968
mlxsw_sp_neigh_fini(struct mlxsw_sp * mlxsw_sp)2969 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2970 {
2971 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2972 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2973 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2974 }
2975
mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)2976 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2977 struct mlxsw_sp_rif *rif)
2978 {
2979 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2980
2981 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2982 rif_list_node) {
2983 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2984 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2985 }
2986 }
2987
2988 struct mlxsw_sp_neigh_rif_made_sync {
2989 struct mlxsw_sp *mlxsw_sp;
2990 struct mlxsw_sp_rif *rif;
2991 int err;
2992 };
2993
mlxsw_sp_neigh_rif_made_sync_each(struct neighbour * n,void * data)2994 static void mlxsw_sp_neigh_rif_made_sync_each(struct neighbour *n, void *data)
2995 {
2996 struct mlxsw_sp_neigh_rif_made_sync *rms = data;
2997 int rc;
2998
2999 if (rms->err)
3000 return;
3001 if (n->dev != mlxsw_sp_rif_dev(rms->rif))
3002 return;
3003 rc = mlxsw_sp_router_schedule_neigh_work(rms->mlxsw_sp->router, n);
3004 if (rc != NOTIFY_DONE)
3005 rms->err = -ENOMEM;
3006 }
3007
mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)3008 static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
3009 struct mlxsw_sp_rif *rif)
3010 {
3011 struct mlxsw_sp_neigh_rif_made_sync rms = {
3012 .mlxsw_sp = mlxsw_sp,
3013 .rif = rif,
3014 };
3015
3016 if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
3017 return 0;
3018
3019 neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3020 if (rms.err)
3021 goto err_arp;
3022
3023 #if IS_ENABLED(CONFIG_IPV6)
3024 neigh_for_each(&nd_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3025 #endif
3026 if (rms.err)
3027 goto err_nd;
3028
3029 return 0;
3030
3031 err_nd:
3032 err_arp:
3033 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
3034 return rms.err;
3035 }
3036
3037 enum mlxsw_sp_nexthop_type {
3038 MLXSW_SP_NEXTHOP_TYPE_ETH,
3039 MLXSW_SP_NEXTHOP_TYPE_IPIP,
3040 };
3041
3042 enum mlxsw_sp_nexthop_action {
3043 /* Nexthop forwards packets to an egress RIF */
3044 MLXSW_SP_NEXTHOP_ACTION_FORWARD,
3045 /* Nexthop discards packets */
3046 MLXSW_SP_NEXTHOP_ACTION_DISCARD,
3047 /* Nexthop traps packets */
3048 MLXSW_SP_NEXTHOP_ACTION_TRAP,
3049 };
3050
3051 struct mlxsw_sp_nexthop_key {
3052 struct fib_nh *fib_nh;
3053 };
3054
3055 struct mlxsw_sp_nexthop {
3056 struct list_head neigh_list_node; /* member of neigh entry list */
3057 struct list_head crif_list_node;
3058 struct list_head router_list_node;
3059 struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
3060 * this nexthop belongs to
3061 */
3062 struct rhash_head ht_node;
3063 struct neigh_table *neigh_tbl;
3064 struct mlxsw_sp_nexthop_key key;
3065 unsigned char gw_addr[sizeof(struct in6_addr)];
3066 int ifindex;
3067 int nh_weight;
3068 int norm_nh_weight;
3069 int num_adj_entries;
3070 struct mlxsw_sp_crif *crif;
3071 u8 should_offload:1, /* set indicates this nexthop should be written
3072 * to the adjacency table.
3073 */
3074 offloaded:1, /* set indicates this nexthop was written to the
3075 * adjacency table.
3076 */
3077 update:1; /* set indicates this nexthop should be updated in the
3078 * adjacency table (f.e., its MAC changed).
3079 */
3080 enum mlxsw_sp_nexthop_action action;
3081 enum mlxsw_sp_nexthop_type type;
3082 union {
3083 struct mlxsw_sp_neigh_entry *neigh_entry;
3084 struct mlxsw_sp_ipip_entry *ipip_entry;
3085 };
3086 unsigned int counter_index;
3087 bool counter_valid;
3088 };
3089
3090 static struct net_device *
mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop * nh)3091 mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop *nh)
3092 {
3093 if (!nh->crif)
3094 return NULL;
3095 return nh->crif->key.dev;
3096 }
3097
3098 enum mlxsw_sp_nexthop_group_type {
3099 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3100 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3101 MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3102 };
3103
3104 struct mlxsw_sp_nexthop_group_info {
3105 struct mlxsw_sp_nexthop_group *nh_grp;
3106 u32 adj_index;
3107 u16 ecmp_size;
3108 u16 count;
3109 int sum_norm_weight;
3110 u8 adj_index_valid:1,
3111 gateway:1, /* routes using the group use a gateway */
3112 is_resilient:1;
3113 struct list_head list; /* member in nh_res_grp_list */
3114 struct mlxsw_sp_nexthop nexthops[];
3115 };
3116
3117 static struct mlxsw_sp_rif *
mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info * nhgi)3118 mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info *nhgi)
3119 {
3120 struct mlxsw_sp_crif *crif = nhgi->nexthops[0].crif;
3121
3122 if (!crif)
3123 return NULL;
3124 return crif->rif;
3125 }
3126
3127 struct mlxsw_sp_nexthop_group_vr_key {
3128 u16 vr_id;
3129 enum mlxsw_sp_l3proto proto;
3130 };
3131
3132 struct mlxsw_sp_nexthop_group_vr_entry {
3133 struct list_head list; /* member in vr_list */
3134 struct rhash_head ht_node; /* member in vr_ht */
3135 refcount_t ref_count;
3136 struct mlxsw_sp_nexthop_group_vr_key key;
3137 };
3138
3139 struct mlxsw_sp_nexthop_group {
3140 struct rhash_head ht_node;
3141 struct list_head fib_list; /* list of fib entries that use this group */
3142 union {
3143 struct {
3144 struct fib_info *fi;
3145 } ipv4;
3146 struct {
3147 u32 id;
3148 } obj;
3149 };
3150 struct mlxsw_sp_nexthop_group_info *nhgi;
3151 struct list_head vr_list;
3152 struct rhashtable vr_ht;
3153 enum mlxsw_sp_nexthop_group_type type;
3154 bool can_destroy;
3155 };
3156
mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3157 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3158 struct mlxsw_sp_nexthop *nh)
3159 {
3160 struct devlink *devlink;
3161
3162 devlink = priv_to_devlink(mlxsw_sp->core);
3163 if (!devlink_dpipe_table_counter_enabled(devlink,
3164 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3165 return;
3166
3167 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3168 return;
3169
3170 nh->counter_valid = true;
3171 }
3172
mlxsw_sp_nexthop_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3173 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3174 struct mlxsw_sp_nexthop *nh)
3175 {
3176 if (!nh->counter_valid)
3177 return;
3178 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3179 nh->counter_valid = false;
3180 }
3181
mlxsw_sp_nexthop_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,u64 * p_counter)3182 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3183 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3184 {
3185 if (!nh->counter_valid)
3186 return -EINVAL;
3187
3188 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3189 p_counter, NULL);
3190 }
3191
mlxsw_sp_nexthop_next(struct mlxsw_sp_router * router,struct mlxsw_sp_nexthop * nh)3192 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3193 struct mlxsw_sp_nexthop *nh)
3194 {
3195 if (!nh) {
3196 if (list_empty(&router->nexthop_list))
3197 return NULL;
3198 else
3199 return list_first_entry(&router->nexthop_list,
3200 typeof(*nh), router_list_node);
3201 }
3202 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3203 return NULL;
3204 return list_next_entry(nh, router_list_node);
3205 }
3206
mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop * nh)3207 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3208 {
3209 return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3210 }
3211
mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop * nh)3212 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3213 {
3214 if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3215 !mlxsw_sp_nexthop_is_forward(nh))
3216 return NULL;
3217 return nh->neigh_entry->ha;
3218 }
3219
mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop * nh,u32 * p_adj_index,u32 * p_adj_size,u32 * p_adj_hash_index)3220 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3221 u32 *p_adj_size, u32 *p_adj_hash_index)
3222 {
3223 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3224 u32 adj_hash_index = 0;
3225 int i;
3226
3227 if (!nh->offloaded || !nhgi->adj_index_valid)
3228 return -EINVAL;
3229
3230 *p_adj_index = nhgi->adj_index;
3231 *p_adj_size = nhgi->ecmp_size;
3232
3233 for (i = 0; i < nhgi->count; i++) {
3234 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3235
3236 if (nh_iter == nh)
3237 break;
3238 if (nh_iter->offloaded)
3239 adj_hash_index += nh_iter->num_adj_entries;
3240 }
3241
3242 *p_adj_hash_index = adj_hash_index;
3243 return 0;
3244 }
3245
mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop * nh)3246 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3247 {
3248 if (WARN_ON(!nh->crif))
3249 return NULL;
3250 return nh->crif->rif;
3251 }
3252
mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop * nh)3253 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3254 {
3255 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3256 int i;
3257
3258 for (i = 0; i < nhgi->count; i++) {
3259 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3260
3261 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3262 return true;
3263 }
3264 return false;
3265 }
3266
3267 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3268 .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3269 .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3270 .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3271 .automatic_shrinking = true,
3272 };
3273
3274 static struct mlxsw_sp_nexthop_group_vr_entry *
mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3275 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3276 const struct mlxsw_sp_fib *fib)
3277 {
3278 struct mlxsw_sp_nexthop_group_vr_key key;
3279
3280 memset(&key, 0, sizeof(key));
3281 key.vr_id = fib->vr->id;
3282 key.proto = fib->proto;
3283 return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3284 mlxsw_sp_nexthop_group_vr_ht_params);
3285 }
3286
3287 static int
mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3288 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3289 const struct mlxsw_sp_fib *fib)
3290 {
3291 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3292 int err;
3293
3294 vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3295 if (!vr_entry)
3296 return -ENOMEM;
3297
3298 vr_entry->key.vr_id = fib->vr->id;
3299 vr_entry->key.proto = fib->proto;
3300 refcount_set(&vr_entry->ref_count, 1);
3301
3302 err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3303 mlxsw_sp_nexthop_group_vr_ht_params);
3304 if (err)
3305 goto err_hashtable_insert;
3306
3307 list_add(&vr_entry->list, &nh_grp->vr_list);
3308
3309 return 0;
3310
3311 err_hashtable_insert:
3312 kfree(vr_entry);
3313 return err;
3314 }
3315
3316 static void
mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group_vr_entry * vr_entry)3317 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3318 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3319 {
3320 list_del(&vr_entry->list);
3321 rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3322 mlxsw_sp_nexthop_group_vr_ht_params);
3323 kfree(vr_entry);
3324 }
3325
3326 static int
mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3327 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3328 const struct mlxsw_sp_fib *fib)
3329 {
3330 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3331
3332 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3333 if (vr_entry) {
3334 refcount_inc(&vr_entry->ref_count);
3335 return 0;
3336 }
3337
3338 return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3339 }
3340
3341 static void
mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3342 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3343 const struct mlxsw_sp_fib *fib)
3344 {
3345 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3346
3347 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3348 if (WARN_ON_ONCE(!vr_entry))
3349 return;
3350
3351 if (!refcount_dec_and_test(&vr_entry->ref_count))
3352 return;
3353
3354 mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3355 }
3356
3357 struct mlxsw_sp_nexthop_group_cmp_arg {
3358 enum mlxsw_sp_nexthop_group_type type;
3359 union {
3360 struct fib_info *fi;
3361 struct mlxsw_sp_fib6_entry *fib6_entry;
3362 u32 id;
3363 };
3364 };
3365
3366 static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group * nh_grp,const struct in6_addr * gw,int ifindex,int weight)3367 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3368 const struct in6_addr *gw, int ifindex,
3369 int weight)
3370 {
3371 int i;
3372
3373 for (i = 0; i < nh_grp->nhgi->count; i++) {
3374 const struct mlxsw_sp_nexthop *nh;
3375
3376 nh = &nh_grp->nhgi->nexthops[i];
3377 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3378 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3379 return true;
3380 }
3381
3382 return false;
3383 }
3384
3385 static bool
mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib6_entry * fib6_entry)3386 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3387 const struct mlxsw_sp_fib6_entry *fib6_entry)
3388 {
3389 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3390
3391 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3392 return false;
3393
3394 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3395 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3396 struct in6_addr *gw;
3397 int ifindex, weight;
3398
3399 ifindex = fib6_nh->fib_nh_dev->ifindex;
3400 weight = fib6_nh->fib_nh_weight;
3401 gw = &fib6_nh->fib_nh_gw6;
3402 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3403 weight))
3404 return false;
3405 }
3406
3407 return true;
3408 }
3409
3410 static int
mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg * arg,const void * ptr)3411 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3412 {
3413 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3414 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3415
3416 if (nh_grp->type != cmp_arg->type)
3417 return 1;
3418
3419 switch (cmp_arg->type) {
3420 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3421 return cmp_arg->fi != nh_grp->ipv4.fi;
3422 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3423 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3424 cmp_arg->fib6_entry);
3425 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3426 return cmp_arg->id != nh_grp->obj.id;
3427 default:
3428 WARN_ON(1);
3429 return 1;
3430 }
3431 }
3432
mlxsw_sp_nexthop_group_hash_obj(const void * data,u32 len,u32 seed)3433 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3434 {
3435 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3436 const struct mlxsw_sp_nexthop *nh;
3437 struct fib_info *fi;
3438 unsigned int val;
3439 int i;
3440
3441 switch (nh_grp->type) {
3442 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3443 fi = nh_grp->ipv4.fi;
3444 return jhash(&fi, sizeof(fi), seed);
3445 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3446 val = nh_grp->nhgi->count;
3447 for (i = 0; i < nh_grp->nhgi->count; i++) {
3448 nh = &nh_grp->nhgi->nexthops[i];
3449 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3450 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3451 }
3452 return jhash(&val, sizeof(val), seed);
3453 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3454 return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3455 default:
3456 WARN_ON(1);
3457 return 0;
3458 }
3459 }
3460
3461 static u32
mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry * fib6_entry,u32 seed)3462 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3463 {
3464 unsigned int val = fib6_entry->nrt6;
3465 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3466
3467 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3468 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3469 struct net_device *dev = fib6_nh->fib_nh_dev;
3470 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3471
3472 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3473 val ^= jhash(gw, sizeof(*gw), seed);
3474 }
3475
3476 return jhash(&val, sizeof(val), seed);
3477 }
3478
3479 static u32
mlxsw_sp_nexthop_group_hash(const void * data,u32 len,u32 seed)3480 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3481 {
3482 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3483
3484 switch (cmp_arg->type) {
3485 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3486 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3487 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3488 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3489 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3490 return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3491 default:
3492 WARN_ON(1);
3493 return 0;
3494 }
3495 }
3496
3497 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3498 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3499 .hashfn = mlxsw_sp_nexthop_group_hash,
3500 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3501 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3502 };
3503
mlxsw_sp_nexthop_group_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3504 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3505 struct mlxsw_sp_nexthop_group *nh_grp)
3506 {
3507 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3508 !nh_grp->nhgi->gateway)
3509 return 0;
3510
3511 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3512 &nh_grp->ht_node,
3513 mlxsw_sp_nexthop_group_ht_params);
3514 }
3515
mlxsw_sp_nexthop_group_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3516 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3517 struct mlxsw_sp_nexthop_group *nh_grp)
3518 {
3519 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3520 !nh_grp->nhgi->gateway)
3521 return;
3522
3523 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3524 &nh_grp->ht_node,
3525 mlxsw_sp_nexthop_group_ht_params);
3526 }
3527
3528 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)3529 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3530 struct fib_info *fi)
3531 {
3532 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3533
3534 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3535 cmp_arg.fi = fi;
3536 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3537 &cmp_arg,
3538 mlxsw_sp_nexthop_group_ht_params);
3539 }
3540
3541 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)3542 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3543 struct mlxsw_sp_fib6_entry *fib6_entry)
3544 {
3545 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3546
3547 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3548 cmp_arg.fib6_entry = fib6_entry;
3549 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3550 &cmp_arg,
3551 mlxsw_sp_nexthop_group_ht_params);
3552 }
3553
3554 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3555 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3556 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3557 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3558 };
3559
mlxsw_sp_nexthop_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3560 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3561 struct mlxsw_sp_nexthop *nh)
3562 {
3563 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3564 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3565 }
3566
mlxsw_sp_nexthop_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3567 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3568 struct mlxsw_sp_nexthop *nh)
3569 {
3570 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3571 mlxsw_sp_nexthop_ht_params);
3572 }
3573
3574 static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_key key)3575 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3576 struct mlxsw_sp_nexthop_key key)
3577 {
3578 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3579 mlxsw_sp_nexthop_ht_params);
3580 }
3581
mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto proto,u16 vr_id,u32 adj_index,u16 ecmp_size,u32 new_adj_index,u16 new_ecmp_size)3582 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3583 enum mlxsw_sp_l3proto proto,
3584 u16 vr_id,
3585 u32 adj_index, u16 ecmp_size,
3586 u32 new_adj_index,
3587 u16 new_ecmp_size)
3588 {
3589 char raleu_pl[MLXSW_REG_RALEU_LEN];
3590
3591 mlxsw_reg_raleu_pack(raleu_pl,
3592 (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3593 adj_index, ecmp_size, new_adj_index,
3594 new_ecmp_size);
3595 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3596 }
3597
mlxsw_sp_adj_index_mass_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,u32 old_adj_index,u16 old_ecmp_size)3598 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3599 struct mlxsw_sp_nexthop_group *nh_grp,
3600 u32 old_adj_index, u16 old_ecmp_size)
3601 {
3602 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3603 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3604 int err;
3605
3606 list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3607 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3608 vr_entry->key.proto,
3609 vr_entry->key.vr_id,
3610 old_adj_index,
3611 old_ecmp_size,
3612 nhgi->adj_index,
3613 nhgi->ecmp_size);
3614 if (err)
3615 goto err_mass_update_vr;
3616 }
3617 return 0;
3618
3619 err_mass_update_vr:
3620 list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3621 mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3622 vr_entry->key.vr_id,
3623 nhgi->adj_index,
3624 nhgi->ecmp_size,
3625 old_adj_index, old_ecmp_size);
3626 return err;
3627 }
3628
__mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3629 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3630 u32 adj_index,
3631 struct mlxsw_sp_nexthop *nh,
3632 bool force, char *ratr_pl)
3633 {
3634 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3635 struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
3636 enum mlxsw_reg_ratr_op op;
3637 u16 rif_index;
3638
3639 rif_index = rif ? rif->rif_index :
3640 mlxsw_sp->router->lb_crif->rif->rif_index;
3641 op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3642 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3643 mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3644 adj_index, rif_index);
3645 switch (nh->action) {
3646 case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3647 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3648 break;
3649 case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3650 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3651 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3652 break;
3653 case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3654 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3655 MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3656 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3657 break;
3658 default:
3659 WARN_ON_ONCE(1);
3660 return -EINVAL;
3661 }
3662 if (nh->counter_valid)
3663 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3664 else
3665 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3666
3667 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3668 }
3669
mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3670 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3671 struct mlxsw_sp_nexthop *nh, bool force,
3672 char *ratr_pl)
3673 {
3674 int i;
3675
3676 for (i = 0; i < nh->num_adj_entries; i++) {
3677 int err;
3678
3679 err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3680 nh, force, ratr_pl);
3681 if (err)
3682 return err;
3683 }
3684
3685 return 0;
3686 }
3687
__mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3688 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3689 u32 adj_index,
3690 struct mlxsw_sp_nexthop *nh,
3691 bool force, char *ratr_pl)
3692 {
3693 const struct mlxsw_sp_ipip_ops *ipip_ops;
3694
3695 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3696 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3697 force, ratr_pl);
3698 }
3699
mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3700 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3701 u32 adj_index,
3702 struct mlxsw_sp_nexthop *nh, bool force,
3703 char *ratr_pl)
3704 {
3705 int i;
3706
3707 for (i = 0; i < nh->num_adj_entries; i++) {
3708 int err;
3709
3710 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3711 nh, force, ratr_pl);
3712 if (err)
3713 return err;
3714 }
3715
3716 return 0;
3717 }
3718
mlxsw_sp_nexthop_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3719 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3720 struct mlxsw_sp_nexthop *nh, bool force,
3721 char *ratr_pl)
3722 {
3723 /* When action is discard or trap, the nexthop must be
3724 * programmed as an Ethernet nexthop.
3725 */
3726 if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3727 nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3728 nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3729 return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3730 force, ratr_pl);
3731 else
3732 return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3733 force, ratr_pl);
3734 }
3735
3736 static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,bool reallocate)3737 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3738 struct mlxsw_sp_nexthop_group_info *nhgi,
3739 bool reallocate)
3740 {
3741 char ratr_pl[MLXSW_REG_RATR_LEN];
3742 u32 adj_index = nhgi->adj_index; /* base */
3743 struct mlxsw_sp_nexthop *nh;
3744 int i;
3745
3746 for (i = 0; i < nhgi->count; i++) {
3747 nh = &nhgi->nexthops[i];
3748
3749 if (!nh->should_offload) {
3750 nh->offloaded = 0;
3751 continue;
3752 }
3753
3754 if (nh->update || reallocate) {
3755 int err = 0;
3756
3757 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3758 true, ratr_pl);
3759 if (err)
3760 return err;
3761 nh->update = 0;
3762 nh->offloaded = 1;
3763 }
3764 adj_index += nh->num_adj_entries;
3765 }
3766 return 0;
3767 }
3768
3769 static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3770 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3771 struct mlxsw_sp_nexthop_group *nh_grp)
3772 {
3773 struct mlxsw_sp_fib_entry *fib_entry;
3774 int err;
3775
3776 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3777 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3778 if (err)
3779 return err;
3780 }
3781 return 0;
3782 }
3783
3784 struct mlxsw_sp_adj_grp_size_range {
3785 u16 start; /* Inclusive */
3786 u16 end; /* Inclusive */
3787 };
3788
3789 /* Ordered by range start value */
3790 static const struct mlxsw_sp_adj_grp_size_range
3791 mlxsw_sp1_adj_grp_size_ranges[] = {
3792 { .start = 1, .end = 64 },
3793 { .start = 512, .end = 512 },
3794 { .start = 1024, .end = 1024 },
3795 { .start = 2048, .end = 2048 },
3796 { .start = 4096, .end = 4096 },
3797 };
3798
3799 /* Ordered by range start value */
3800 static const struct mlxsw_sp_adj_grp_size_range
3801 mlxsw_sp2_adj_grp_size_ranges[] = {
3802 { .start = 1, .end = 128 },
3803 { .start = 256, .end = 256 },
3804 { .start = 512, .end = 512 },
3805 { .start = 1024, .end = 1024 },
3806 { .start = 2048, .end = 2048 },
3807 { .start = 4096, .end = 4096 },
3808 };
3809
mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3810 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3811 u16 *p_adj_grp_size)
3812 {
3813 int i;
3814
3815 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3816 const struct mlxsw_sp_adj_grp_size_range *size_range;
3817
3818 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3819
3820 if (*p_adj_grp_size >= size_range->start &&
3821 *p_adj_grp_size <= size_range->end)
3822 return;
3823
3824 if (*p_adj_grp_size <= size_range->end) {
3825 *p_adj_grp_size = size_range->end;
3826 return;
3827 }
3828 }
3829 }
3830
mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size,unsigned int alloc_size)3831 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3832 u16 *p_adj_grp_size,
3833 unsigned int alloc_size)
3834 {
3835 int i;
3836
3837 for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3838 const struct mlxsw_sp_adj_grp_size_range *size_range;
3839
3840 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3841
3842 if (alloc_size >= size_range->end) {
3843 *p_adj_grp_size = size_range->end;
3844 return;
3845 }
3846 }
3847 }
3848
mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3849 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3850 u16 *p_adj_grp_size)
3851 {
3852 unsigned int alloc_size;
3853 int err;
3854
3855 /* Round up the requested group size to the next size supported
3856 * by the device and make sure the request can be satisfied.
3857 */
3858 mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3859 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3860 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3861 *p_adj_grp_size, &alloc_size);
3862 if (err)
3863 return err;
3864 /* It is possible the allocation results in more allocated
3865 * entries than requested. Try to use as much of them as
3866 * possible.
3867 */
3868 mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3869
3870 return 0;
3871 }
3872
3873 static void
mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info * nhgi)3874 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3875 {
3876 int i, g = 0, sum_norm_weight = 0;
3877 struct mlxsw_sp_nexthop *nh;
3878
3879 for (i = 0; i < nhgi->count; i++) {
3880 nh = &nhgi->nexthops[i];
3881
3882 if (!nh->should_offload)
3883 continue;
3884 if (g > 0)
3885 g = gcd(nh->nh_weight, g);
3886 else
3887 g = nh->nh_weight;
3888 }
3889
3890 for (i = 0; i < nhgi->count; i++) {
3891 nh = &nhgi->nexthops[i];
3892
3893 if (!nh->should_offload)
3894 continue;
3895 nh->norm_nh_weight = nh->nh_weight / g;
3896 sum_norm_weight += nh->norm_nh_weight;
3897 }
3898
3899 nhgi->sum_norm_weight = sum_norm_weight;
3900 }
3901
3902 static void
mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info * nhgi)3903 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3904 {
3905 int i, weight = 0, lower_bound = 0;
3906 int total = nhgi->sum_norm_weight;
3907 u16 ecmp_size = nhgi->ecmp_size;
3908
3909 for (i = 0; i < nhgi->count; i++) {
3910 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3911 int upper_bound;
3912
3913 if (!nh->should_offload)
3914 continue;
3915 weight += nh->norm_nh_weight;
3916 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3917 nh->num_adj_entries = upper_bound - lower_bound;
3918 lower_bound = upper_bound;
3919 }
3920 }
3921
3922 static struct mlxsw_sp_nexthop *
3923 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3924 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3925
3926 static void
mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3927 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3928 struct mlxsw_sp_nexthop_group *nh_grp)
3929 {
3930 int i;
3931
3932 for (i = 0; i < nh_grp->nhgi->count; i++) {
3933 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3934
3935 if (nh->offloaded)
3936 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3937 else
3938 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3939 }
3940 }
3941
3942 static void
__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)3943 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3944 struct mlxsw_sp_fib6_entry *fib6_entry)
3945 {
3946 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3947
3948 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3949 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3950 struct mlxsw_sp_nexthop *nh;
3951
3952 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3953 if (nh && nh->offloaded)
3954 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3955 else
3956 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3957 }
3958 }
3959
3960 static void
mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3961 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3962 struct mlxsw_sp_nexthop_group *nh_grp)
3963 {
3964 struct mlxsw_sp_fib6_entry *fib6_entry;
3965
3966 /* Unfortunately, in IPv6 the route and the nexthop are described by
3967 * the same struct, so we need to iterate over all the routes using the
3968 * nexthop group and set / clear the offload indication for them.
3969 */
3970 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3971 common.nexthop_group_node)
3972 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3973 }
3974
3975 static void
mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop * nh,u16 bucket_index)3976 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3977 const struct mlxsw_sp_nexthop *nh,
3978 u16 bucket_index)
3979 {
3980 struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3981 bool offload = false, trap = false;
3982
3983 if (nh->offloaded) {
3984 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3985 trap = true;
3986 else
3987 offload = true;
3988 }
3989 nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3990 bucket_index, offload, trap);
3991 }
3992
3993 static void
mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3994 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3995 struct mlxsw_sp_nexthop_group *nh_grp)
3996 {
3997 int i;
3998
3999 /* Do not update the flags if the nexthop group is being destroyed
4000 * since:
4001 * 1. The nexthop objects is being deleted, in which case the flags are
4002 * irrelevant.
4003 * 2. The nexthop group was replaced by a newer group, in which case
4004 * the flags of the nexthop object were already updated based on the
4005 * new group.
4006 */
4007 if (nh_grp->can_destroy)
4008 return;
4009
4010 nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4011 nh_grp->nhgi->adj_index_valid, false);
4012
4013 /* Update flags of individual nexthop buckets in case of a resilient
4014 * nexthop group.
4015 */
4016 if (!nh_grp->nhgi->is_resilient)
4017 return;
4018
4019 for (i = 0; i < nh_grp->nhgi->count; i++) {
4020 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4021
4022 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
4023 }
4024 }
4025
4026 static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4027 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4028 struct mlxsw_sp_nexthop_group *nh_grp)
4029 {
4030 switch (nh_grp->type) {
4031 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
4032 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
4033 break;
4034 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
4035 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
4036 break;
4037 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
4038 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
4039 break;
4040 }
4041 }
4042
4043 static int
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4044 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
4045 struct mlxsw_sp_nexthop_group *nh_grp)
4046 {
4047 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4048 u16 ecmp_size, old_ecmp_size;
4049 struct mlxsw_sp_nexthop *nh;
4050 bool offload_change = false;
4051 u32 adj_index;
4052 bool old_adj_index_valid;
4053 u32 old_adj_index;
4054 int i, err2, err;
4055
4056 if (!nhgi->gateway)
4057 return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4058
4059 for (i = 0; i < nhgi->count; i++) {
4060 nh = &nhgi->nexthops[i];
4061
4062 if (nh->should_offload != nh->offloaded) {
4063 offload_change = true;
4064 if (nh->should_offload)
4065 nh->update = 1;
4066 }
4067 }
4068 if (!offload_change) {
4069 /* Nothing was added or removed, so no need to reallocate. Just
4070 * update MAC on existing adjacency indexes.
4071 */
4072 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
4073 if (err) {
4074 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4075 goto set_trap;
4076 }
4077 /* Flags of individual nexthop buckets might need to be
4078 * updated.
4079 */
4080 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4081 return 0;
4082 }
4083 mlxsw_sp_nexthop_group_normalize(nhgi);
4084 if (!nhgi->sum_norm_weight) {
4085 /* No neigh of this group is connected so we just set
4086 * the trap and let everthing flow through kernel.
4087 */
4088 err = 0;
4089 goto set_trap;
4090 }
4091
4092 ecmp_size = nhgi->sum_norm_weight;
4093 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4094 if (err)
4095 /* No valid allocation size available. */
4096 goto set_trap;
4097
4098 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4099 ecmp_size, &adj_index);
4100 if (err) {
4101 /* We ran out of KVD linear space, just set the
4102 * trap and let everything flow through kernel.
4103 */
4104 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4105 goto set_trap;
4106 }
4107 old_adj_index_valid = nhgi->adj_index_valid;
4108 old_adj_index = nhgi->adj_index;
4109 old_ecmp_size = nhgi->ecmp_size;
4110 nhgi->adj_index_valid = 1;
4111 nhgi->adj_index = adj_index;
4112 nhgi->ecmp_size = ecmp_size;
4113 mlxsw_sp_nexthop_group_rebalance(nhgi);
4114 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4115 if (err) {
4116 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4117 goto set_trap;
4118 }
4119
4120 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4121
4122 if (!old_adj_index_valid) {
4123 /* The trap was set for fib entries, so we have to call
4124 * fib entry update to unset it and use adjacency index.
4125 */
4126 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4127 if (err) {
4128 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4129 goto set_trap;
4130 }
4131 return 0;
4132 }
4133
4134 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4135 old_adj_index, old_ecmp_size);
4136 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4137 old_ecmp_size, old_adj_index);
4138 if (err) {
4139 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4140 goto set_trap;
4141 }
4142
4143 return 0;
4144
4145 set_trap:
4146 old_adj_index_valid = nhgi->adj_index_valid;
4147 nhgi->adj_index_valid = 0;
4148 for (i = 0; i < nhgi->count; i++) {
4149 nh = &nhgi->nexthops[i];
4150 nh->offloaded = 0;
4151 }
4152 err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4153 if (err2)
4154 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4155 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4156 if (old_adj_index_valid)
4157 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4158 nhgi->ecmp_size, nhgi->adj_index);
4159 return err;
4160 }
4161
__mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop * nh,bool removing)4162 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4163 bool removing)
4164 {
4165 if (!removing) {
4166 nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4167 nh->should_offload = 1;
4168 } else if (nh->nhgi->is_resilient) {
4169 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4170 nh->should_offload = 1;
4171 } else {
4172 nh->should_offload = 0;
4173 }
4174 nh->update = 1;
4175 }
4176
4177 static int
mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)4178 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4179 struct mlxsw_sp_neigh_entry *neigh_entry)
4180 {
4181 struct neighbour *n, *old_n = neigh_entry->key.n;
4182 struct mlxsw_sp_nexthop *nh;
4183 struct net_device *dev;
4184 bool entry_connected;
4185 u8 nud_state, dead;
4186 int err;
4187
4188 nh = list_first_entry(&neigh_entry->nexthop_list,
4189 struct mlxsw_sp_nexthop, neigh_list_node);
4190 dev = mlxsw_sp_nexthop_dev(nh);
4191
4192 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4193 if (!n) {
4194 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4195 if (IS_ERR(n))
4196 return PTR_ERR(n);
4197 neigh_event_send(n, NULL);
4198 }
4199
4200 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4201 neigh_entry->key.n = n;
4202 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4203 if (err)
4204 goto err_neigh_entry_insert;
4205
4206 read_lock_bh(&n->lock);
4207 nud_state = n->nud_state;
4208 dead = n->dead;
4209 read_unlock_bh(&n->lock);
4210 entry_connected = nud_state & NUD_VALID && !dead;
4211
4212 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4213 neigh_list_node) {
4214 neigh_release(old_n);
4215 neigh_clone(n);
4216 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4217 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4218 }
4219
4220 neigh_release(n);
4221
4222 return 0;
4223
4224 err_neigh_entry_insert:
4225 neigh_entry->key.n = old_n;
4226 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4227 neigh_release(n);
4228 return err;
4229 }
4230
4231 static void
mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool removing,bool dead)4232 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4233 struct mlxsw_sp_neigh_entry *neigh_entry,
4234 bool removing, bool dead)
4235 {
4236 struct mlxsw_sp_nexthop *nh;
4237
4238 if (list_empty(&neigh_entry->nexthop_list))
4239 return;
4240
4241 if (dead) {
4242 int err;
4243
4244 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4245 neigh_entry);
4246 if (err)
4247 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4248 return;
4249 }
4250
4251 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4252 neigh_list_node) {
4253 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4254 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4255 }
4256 }
4257
mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_crif * crif)4258 static void mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop *nh,
4259 struct mlxsw_sp_crif *crif)
4260 {
4261 if (nh->crif)
4262 return;
4263
4264 nh->crif = crif;
4265 list_add(&nh->crif_list_node, &crif->nexthop_list);
4266 }
4267
mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop * nh)4268 static void mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop *nh)
4269 {
4270 if (!nh->crif)
4271 return;
4272
4273 list_del(&nh->crif_list_node);
4274 nh->crif = NULL;
4275 }
4276
mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4277 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4278 struct mlxsw_sp_nexthop *nh)
4279 {
4280 struct mlxsw_sp_neigh_entry *neigh_entry;
4281 struct net_device *dev;
4282 struct neighbour *n;
4283 u8 nud_state, dead;
4284 int err;
4285
4286 if (WARN_ON(!nh->crif->rif))
4287 return 0;
4288
4289 if (!nh->nhgi->gateway || nh->neigh_entry)
4290 return 0;
4291 dev = mlxsw_sp_nexthop_dev(nh);
4292
4293 /* Take a reference of neigh here ensuring that neigh would
4294 * not be destructed before the nexthop entry is finished.
4295 * The reference is taken either in neigh_lookup() or
4296 * in neigh_create() in case n is not found.
4297 */
4298 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4299 if (!n) {
4300 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4301 if (IS_ERR(n))
4302 return PTR_ERR(n);
4303 neigh_event_send(n, NULL);
4304 }
4305 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4306 if (!neigh_entry) {
4307 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4308 if (IS_ERR(neigh_entry)) {
4309 err = -EINVAL;
4310 goto err_neigh_entry_create;
4311 }
4312 }
4313
4314 /* If that is the first nexthop connected to that neigh, add to
4315 * nexthop_neighs_list
4316 */
4317 if (list_empty(&neigh_entry->nexthop_list))
4318 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4319 &mlxsw_sp->router->nexthop_neighs_list);
4320
4321 nh->neigh_entry = neigh_entry;
4322 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4323 read_lock_bh(&n->lock);
4324 nud_state = n->nud_state;
4325 dead = n->dead;
4326 read_unlock_bh(&n->lock);
4327 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4328
4329 return 0;
4330
4331 err_neigh_entry_create:
4332 neigh_release(n);
4333 return err;
4334 }
4335
mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4336 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4337 struct mlxsw_sp_nexthop *nh)
4338 {
4339 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4340 struct neighbour *n;
4341
4342 if (!neigh_entry)
4343 return;
4344 n = neigh_entry->key.n;
4345
4346 __mlxsw_sp_nexthop_neigh_update(nh, true);
4347 list_del(&nh->neigh_list_node);
4348 nh->neigh_entry = NULL;
4349
4350 /* If that is the last nexthop connected to that neigh, remove from
4351 * nexthop_neighs_list
4352 */
4353 if (list_empty(&neigh_entry->nexthop_list))
4354 list_del(&neigh_entry->nexthop_neighs_list_node);
4355
4356 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4357 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4358
4359 neigh_release(n);
4360 }
4361
mlxsw_sp_ipip_netdev_ul_up(struct net_device * ol_dev)4362 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4363 {
4364 struct net_device *ul_dev;
4365 bool is_up;
4366
4367 rcu_read_lock();
4368 ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4369 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4370 rcu_read_unlock();
4371
4372 return is_up;
4373 }
4374
mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_ipip_entry * ipip_entry)4375 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4376 struct mlxsw_sp_nexthop *nh,
4377 struct mlxsw_sp_ipip_entry *ipip_entry)
4378 {
4379 struct mlxsw_sp_crif *crif;
4380 bool removing;
4381
4382 if (!nh->nhgi->gateway || nh->ipip_entry)
4383 return;
4384
4385 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, ipip_entry->ol_dev);
4386 if (WARN_ON(!crif))
4387 return;
4388
4389 nh->ipip_entry = ipip_entry;
4390 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4391 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4392 mlxsw_sp_nexthop_crif_init(nh, crif);
4393 }
4394
mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4395 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4396 struct mlxsw_sp_nexthop *nh)
4397 {
4398 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4399
4400 if (!ipip_entry)
4401 return;
4402
4403 __mlxsw_sp_nexthop_neigh_update(nh, true);
4404 nh->ipip_entry = NULL;
4405 }
4406
mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib_nh * fib_nh,enum mlxsw_sp_ipip_type * p_ipipt)4407 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4408 const struct fib_nh *fib_nh,
4409 enum mlxsw_sp_ipip_type *p_ipipt)
4410 {
4411 struct net_device *dev = fib_nh->fib_nh_dev;
4412
4413 return dev &&
4414 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4415 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4416 }
4417
mlxsw_sp_nexthop_type_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,const struct net_device * dev)4418 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4419 struct mlxsw_sp_nexthop *nh,
4420 const struct net_device *dev)
4421 {
4422 const struct mlxsw_sp_ipip_ops *ipip_ops;
4423 struct mlxsw_sp_ipip_entry *ipip_entry;
4424 struct mlxsw_sp_crif *crif;
4425 int err;
4426
4427 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4428 if (ipip_entry) {
4429 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4430 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4431 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4432 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4433 return 0;
4434 }
4435 }
4436
4437 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4438 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, dev);
4439 if (!crif)
4440 return 0;
4441
4442 mlxsw_sp_nexthop_crif_init(nh, crif);
4443
4444 if (!crif->rif)
4445 return 0;
4446
4447 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4448 if (err)
4449 goto err_neigh_init;
4450
4451 return 0;
4452
4453 err_neigh_init:
4454 mlxsw_sp_nexthop_crif_fini(nh);
4455 return err;
4456 }
4457
mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4458 static int mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp *mlxsw_sp,
4459 struct mlxsw_sp_nexthop *nh)
4460 {
4461 switch (nh->type) {
4462 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4463 return mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4464 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4465 break;
4466 }
4467
4468 return 0;
4469 }
4470
mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4471 static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
4472 struct mlxsw_sp_nexthop *nh)
4473 {
4474 switch (nh->type) {
4475 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4476 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4477 break;
4478 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4479 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4480 break;
4481 }
4482 }
4483
mlxsw_sp_nexthop_type_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4484 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4485 struct mlxsw_sp_nexthop *nh)
4486 {
4487 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4488 mlxsw_sp_nexthop_crif_fini(nh);
4489 }
4490
mlxsw_sp_nexthop4_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct fib_nh * fib_nh)4491 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4492 struct mlxsw_sp_nexthop_group *nh_grp,
4493 struct mlxsw_sp_nexthop *nh,
4494 struct fib_nh *fib_nh)
4495 {
4496 struct net_device *dev = fib_nh->fib_nh_dev;
4497 struct in_device *in_dev;
4498 int err;
4499
4500 nh->nhgi = nh_grp->nhgi;
4501 nh->key.fib_nh = fib_nh;
4502 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4503 nh->nh_weight = fib_nh->fib_nh_weight;
4504 #else
4505 nh->nh_weight = 1;
4506 #endif
4507 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4508 nh->neigh_tbl = &arp_tbl;
4509 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4510 if (err)
4511 return err;
4512
4513 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4514 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4515
4516 if (!dev)
4517 return 0;
4518 nh->ifindex = dev->ifindex;
4519
4520 rcu_read_lock();
4521 in_dev = __in_dev_get_rcu(dev);
4522 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4523 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4524 rcu_read_unlock();
4525 return 0;
4526 }
4527 rcu_read_unlock();
4528
4529 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4530 if (err)
4531 goto err_nexthop_neigh_init;
4532
4533 return 0;
4534
4535 err_nexthop_neigh_init:
4536 list_del(&nh->router_list_node);
4537 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4538 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4539 return err;
4540 }
4541
mlxsw_sp_nexthop4_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4542 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4543 struct mlxsw_sp_nexthop *nh)
4544 {
4545 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4546 list_del(&nh->router_list_node);
4547 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4548 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4549 }
4550
mlxsw_sp_nexthop4_event(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct fib_nh * fib_nh)4551 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4552 unsigned long event, struct fib_nh *fib_nh)
4553 {
4554 struct mlxsw_sp_nexthop_key key;
4555 struct mlxsw_sp_nexthop *nh;
4556
4557 key.fib_nh = fib_nh;
4558 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4559 if (!nh)
4560 return;
4561
4562 switch (event) {
4563 case FIB_EVENT_NH_ADD:
4564 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4565 break;
4566 case FIB_EVENT_NH_DEL:
4567 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4568 break;
4569 }
4570
4571 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4572 }
4573
mlxsw_sp_nexthop_rif_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4574 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4575 struct mlxsw_sp_rif *rif)
4576 {
4577 struct net_device *dev = mlxsw_sp_rif_dev(rif);
4578 struct mlxsw_sp_nexthop *nh;
4579 bool removing;
4580
4581 list_for_each_entry(nh, &rif->crif->nexthop_list, crif_list_node) {
4582 switch (nh->type) {
4583 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4584 removing = false;
4585 break;
4586 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4587 removing = !mlxsw_sp_ipip_netdev_ul_up(dev);
4588 break;
4589 default:
4590 WARN_ON(1);
4591 continue;
4592 }
4593
4594 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4595 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4596 }
4597 }
4598
mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4599 static int mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
4600 struct mlxsw_sp_rif *rif)
4601 {
4602 struct mlxsw_sp_nexthop *nh, *tmp;
4603 unsigned int n = 0;
4604 int err;
4605
4606 list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4607 crif_list_node) {
4608 err = mlxsw_sp_nexthop_type_rif_made(mlxsw_sp, nh);
4609 if (err)
4610 goto err_nexthop_type_rif;
4611 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4612 n++;
4613 }
4614
4615 return 0;
4616
4617 err_nexthop_type_rif:
4618 list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4619 crif_list_node) {
4620 if (!n--)
4621 break;
4622 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4623 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4624 }
4625 return err;
4626 }
4627
mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4628 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4629 struct mlxsw_sp_rif *rif)
4630 {
4631 struct mlxsw_sp_nexthop *nh, *tmp;
4632
4633 list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4634 crif_list_node) {
4635 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4636 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4637 }
4638 }
4639
mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp * mlxsw_sp)4640 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4641 {
4642 enum mlxsw_reg_ratr_trap_action trap_action;
4643 char ratr_pl[MLXSW_REG_RATR_LEN];
4644 int err;
4645
4646 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4647 &mlxsw_sp->router->adj_trap_index);
4648 if (err)
4649 return err;
4650
4651 trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4652 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4653 MLXSW_REG_RATR_TYPE_ETHERNET,
4654 mlxsw_sp->router->adj_trap_index,
4655 mlxsw_sp->router->lb_crif->rif->rif_index);
4656 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4657 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4658 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4659 if (err)
4660 goto err_ratr_write;
4661
4662 return 0;
4663
4664 err_ratr_write:
4665 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4666 mlxsw_sp->router->adj_trap_index);
4667 return err;
4668 }
4669
mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp * mlxsw_sp)4670 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4671 {
4672 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4673 mlxsw_sp->router->adj_trap_index);
4674 }
4675
mlxsw_sp_nexthop_group_inc(struct mlxsw_sp * mlxsw_sp)4676 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4677 {
4678 int err;
4679
4680 if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4681 return 0;
4682
4683 err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4684 if (err)
4685 return err;
4686
4687 refcount_set(&mlxsw_sp->router->num_groups, 1);
4688
4689 return 0;
4690 }
4691
mlxsw_sp_nexthop_group_dec(struct mlxsw_sp * mlxsw_sp)4692 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4693 {
4694 if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4695 return;
4696
4697 mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4698 }
4699
4700 static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp,unsigned long * activity)4701 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4702 const struct mlxsw_sp_nexthop_group *nh_grp,
4703 unsigned long *activity)
4704 {
4705 char *ratrad_pl;
4706 int i, err;
4707
4708 ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4709 if (!ratrad_pl)
4710 return;
4711
4712 mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4713 nh_grp->nhgi->count);
4714 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4715 if (err)
4716 goto out;
4717
4718 for (i = 0; i < nh_grp->nhgi->count; i++) {
4719 if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4720 continue;
4721 bitmap_set(activity, i, 1);
4722 }
4723
4724 out:
4725 kfree(ratrad_pl);
4726 }
4727
4728 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4729
4730 static void
mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp)4731 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4732 const struct mlxsw_sp_nexthop_group *nh_grp)
4733 {
4734 unsigned long *activity;
4735
4736 activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4737 if (!activity)
4738 return;
4739
4740 mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4741 nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4742 nh_grp->nhgi->count, activity);
4743
4744 bitmap_free(activity);
4745 }
4746
4747 static void
mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp * mlxsw_sp)4748 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4749 {
4750 unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4751
4752 mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4753 msecs_to_jiffies(interval));
4754 }
4755
mlxsw_sp_nh_grp_activity_work(struct work_struct * work)4756 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4757 {
4758 struct mlxsw_sp_nexthop_group_info *nhgi;
4759 struct mlxsw_sp_router *router;
4760 bool reschedule = false;
4761
4762 router = container_of(work, struct mlxsw_sp_router,
4763 nh_grp_activity_dw.work);
4764
4765 mutex_lock(&router->lock);
4766
4767 list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4768 mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4769 reschedule = true;
4770 }
4771
4772 mutex_unlock(&router->lock);
4773
4774 if (!reschedule)
4775 return;
4776 mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4777 }
4778
4779 static int
mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4780 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4781 const struct nh_notifier_single_info *nh,
4782 struct netlink_ext_ack *extack)
4783 {
4784 int err = -EINVAL;
4785
4786 if (nh->is_fdb)
4787 NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4788 else if (nh->has_encap)
4789 NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4790 else
4791 err = 0;
4792
4793 return err;
4794 }
4795
4796 static int
mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4797 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4798 const struct nh_notifier_single_info *nh,
4799 struct netlink_ext_ack *extack)
4800 {
4801 int err;
4802
4803 err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4804 if (err)
4805 return err;
4806
4807 /* Device only nexthops with an IPIP device are programmed as
4808 * encapsulating adjacency entries.
4809 */
4810 if (!nh->gw_family && !nh->is_reject &&
4811 !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4812 NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4813 return -EINVAL;
4814 }
4815
4816 return 0;
4817 }
4818
4819 static int
mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_grp_info * nh_grp,struct netlink_ext_ack * extack)4820 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4821 const struct nh_notifier_grp_info *nh_grp,
4822 struct netlink_ext_ack *extack)
4823 {
4824 int i;
4825
4826 if (nh_grp->is_fdb) {
4827 NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4828 return -EINVAL;
4829 }
4830
4831 for (i = 0; i < nh_grp->num_nh; i++) {
4832 const struct nh_notifier_single_info *nh;
4833 int err;
4834
4835 nh = &nh_grp->nh_entries[i].nh;
4836 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4837 extack);
4838 if (err)
4839 return err;
4840 }
4841
4842 return 0;
4843 }
4844
4845 static int
mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4846 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4847 const struct nh_notifier_res_table_info *nh_res_table,
4848 struct netlink_ext_ack *extack)
4849 {
4850 unsigned int alloc_size;
4851 bool valid_size = false;
4852 int err, i;
4853
4854 if (nh_res_table->num_nh_buckets < 32) {
4855 NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4856 return -EINVAL;
4857 }
4858
4859 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4860 const struct mlxsw_sp_adj_grp_size_range *size_range;
4861
4862 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4863
4864 if (nh_res_table->num_nh_buckets >= size_range->start &&
4865 nh_res_table->num_nh_buckets <= size_range->end) {
4866 valid_size = true;
4867 break;
4868 }
4869 }
4870
4871 if (!valid_size) {
4872 NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4873 return -EINVAL;
4874 }
4875
4876 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4877 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4878 nh_res_table->num_nh_buckets,
4879 &alloc_size);
4880 if (err || nh_res_table->num_nh_buckets != alloc_size) {
4881 NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4882 return -EINVAL;
4883 }
4884
4885 return 0;
4886 }
4887
4888 static int
mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4889 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4890 const struct nh_notifier_res_table_info *nh_res_table,
4891 struct netlink_ext_ack *extack)
4892 {
4893 int err;
4894 u16 i;
4895
4896 err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4897 nh_res_table,
4898 extack);
4899 if (err)
4900 return err;
4901
4902 for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4903 const struct nh_notifier_single_info *nh;
4904 int err;
4905
4906 nh = &nh_res_table->nhs[i];
4907 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4908 extack);
4909 if (err)
4910 return err;
4911 }
4912
4913 return 0;
4914 }
4915
mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct nh_notifier_info * info)4916 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4917 unsigned long event,
4918 struct nh_notifier_info *info)
4919 {
4920 struct nh_notifier_single_info *nh;
4921
4922 if (event != NEXTHOP_EVENT_REPLACE &&
4923 event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4924 event != NEXTHOP_EVENT_BUCKET_REPLACE)
4925 return 0;
4926
4927 switch (info->type) {
4928 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4929 return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4930 info->extack);
4931 case NH_NOTIFIER_INFO_TYPE_GRP:
4932 return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4933 info->nh_grp,
4934 info->extack);
4935 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4936 return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4937 info->nh_res_table,
4938 info->extack);
4939 case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4940 nh = &info->nh_res_bucket->new_nh;
4941 return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4942 info->extack);
4943 default:
4944 NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4945 return -EOPNOTSUPP;
4946 }
4947 }
4948
mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_info * info)4949 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4950 const struct nh_notifier_info *info)
4951 {
4952 const struct net_device *dev;
4953
4954 switch (info->type) {
4955 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4956 dev = info->nh->dev;
4957 return info->nh->gw_family || info->nh->is_reject ||
4958 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4959 case NH_NOTIFIER_INFO_TYPE_GRP:
4960 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4961 /* Already validated earlier. */
4962 return true;
4963 default:
4964 return false;
4965 }
4966 }
4967
mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4968 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4969 struct mlxsw_sp_nexthop *nh)
4970 {
4971 nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4972 nh->should_offload = 1;
4973 /* While nexthops that discard packets do not forward packets
4974 * via an egress RIF, they still need to be programmed using a
4975 * valid RIF, so use the loopback RIF created during init.
4976 */
4977 nh->crif = mlxsw_sp->router->lb_crif;
4978 }
4979
mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4980 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4981 struct mlxsw_sp_nexthop *nh)
4982 {
4983 nh->crif = NULL;
4984 nh->should_offload = 0;
4985 }
4986
4987 static int
mlxsw_sp_nexthop_obj_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_single_info * nh_obj,int weight)4988 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4989 struct mlxsw_sp_nexthop_group *nh_grp,
4990 struct mlxsw_sp_nexthop *nh,
4991 struct nh_notifier_single_info *nh_obj, int weight)
4992 {
4993 struct net_device *dev = nh_obj->dev;
4994 int err;
4995
4996 nh->nhgi = nh_grp->nhgi;
4997 nh->nh_weight = weight;
4998
4999 switch (nh_obj->gw_family) {
5000 case AF_INET:
5001 memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
5002 nh->neigh_tbl = &arp_tbl;
5003 break;
5004 case AF_INET6:
5005 memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
5006 #if IS_ENABLED(CONFIG_IPV6)
5007 nh->neigh_tbl = &nd_tbl;
5008 #endif
5009 break;
5010 }
5011
5012 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5013 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5014 nh->ifindex = dev->ifindex;
5015
5016 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
5017 if (err)
5018 goto err_type_init;
5019
5020 if (nh_obj->is_reject)
5021 mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
5022
5023 /* In a resilient nexthop group, all the nexthops must be written to
5024 * the adjacency table. Even if they do not have a valid neighbour or
5025 * RIF.
5026 */
5027 if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
5028 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
5029 nh->should_offload = 1;
5030 }
5031
5032 return 0;
5033
5034 err_type_init:
5035 list_del(&nh->router_list_node);
5036 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5037 return err;
5038 }
5039
mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5040 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
5041 struct mlxsw_sp_nexthop *nh)
5042 {
5043 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
5044 mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
5045 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5046 list_del(&nh->router_list_node);
5047 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5048 nh->should_offload = 0;
5049 }
5050
5051 static int
mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct nh_notifier_info * info)5052 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
5053 struct mlxsw_sp_nexthop_group *nh_grp,
5054 struct nh_notifier_info *info)
5055 {
5056 struct mlxsw_sp_nexthop_group_info *nhgi;
5057 struct mlxsw_sp_nexthop *nh;
5058 bool is_resilient = false;
5059 unsigned int nhs;
5060 int err, i;
5061
5062 switch (info->type) {
5063 case NH_NOTIFIER_INFO_TYPE_SINGLE:
5064 nhs = 1;
5065 break;
5066 case NH_NOTIFIER_INFO_TYPE_GRP:
5067 nhs = info->nh_grp->num_nh;
5068 break;
5069 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5070 nhs = info->nh_res_table->num_nh_buckets;
5071 is_resilient = true;
5072 break;
5073 default:
5074 return -EINVAL;
5075 }
5076
5077 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5078 if (!nhgi)
5079 return -ENOMEM;
5080 nh_grp->nhgi = nhgi;
5081 nhgi->nh_grp = nh_grp;
5082 nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
5083 nhgi->is_resilient = is_resilient;
5084 nhgi->count = nhs;
5085 for (i = 0; i < nhgi->count; i++) {
5086 struct nh_notifier_single_info *nh_obj;
5087 int weight;
5088
5089 nh = &nhgi->nexthops[i];
5090 switch (info->type) {
5091 case NH_NOTIFIER_INFO_TYPE_SINGLE:
5092 nh_obj = info->nh;
5093 weight = 1;
5094 break;
5095 case NH_NOTIFIER_INFO_TYPE_GRP:
5096 nh_obj = &info->nh_grp->nh_entries[i].nh;
5097 weight = info->nh_grp->nh_entries[i].weight;
5098 break;
5099 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5100 nh_obj = &info->nh_res_table->nhs[i];
5101 weight = 1;
5102 break;
5103 default:
5104 err = -EINVAL;
5105 goto err_nexthop_obj_init;
5106 }
5107 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
5108 weight);
5109 if (err)
5110 goto err_nexthop_obj_init;
5111 }
5112 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5113 if (err)
5114 goto err_group_inc;
5115 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5116 if (err) {
5117 NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
5118 goto err_group_refresh;
5119 }
5120
5121 /* Add resilient nexthop groups to a list so that the activity of their
5122 * nexthop buckets will be periodically queried and cleared.
5123 */
5124 if (nhgi->is_resilient) {
5125 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5126 mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
5127 list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
5128 }
5129
5130 return 0;
5131
5132 err_group_refresh:
5133 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5134 err_group_inc:
5135 i = nhgi->count;
5136 err_nexthop_obj_init:
5137 for (i--; i >= 0; i--) {
5138 nh = &nhgi->nexthops[i];
5139 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5140 }
5141 kfree(nhgi);
5142 return err;
5143 }
5144
5145 static void
mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5146 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5147 struct mlxsw_sp_nexthop_group *nh_grp)
5148 {
5149 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5150 struct mlxsw_sp_router *router = mlxsw_sp->router;
5151 int i;
5152
5153 if (nhgi->is_resilient) {
5154 list_del(&nhgi->list);
5155 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5156 cancel_delayed_work(&router->nh_grp_activity_dw);
5157 }
5158
5159 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5160 for (i = nhgi->count - 1; i >= 0; i--) {
5161 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5162
5163 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5164 }
5165 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5166 WARN_ON_ONCE(nhgi->adj_index_valid);
5167 kfree(nhgi);
5168 }
5169
5170 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5171 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5172 struct nh_notifier_info *info)
5173 {
5174 struct mlxsw_sp_nexthop_group *nh_grp;
5175 int err;
5176
5177 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5178 if (!nh_grp)
5179 return ERR_PTR(-ENOMEM);
5180 INIT_LIST_HEAD(&nh_grp->vr_list);
5181 err = rhashtable_init(&nh_grp->vr_ht,
5182 &mlxsw_sp_nexthop_group_vr_ht_params);
5183 if (err)
5184 goto err_nexthop_group_vr_ht_init;
5185 INIT_LIST_HEAD(&nh_grp->fib_list);
5186 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5187 nh_grp->obj.id = info->id;
5188
5189 err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5190 if (err)
5191 goto err_nexthop_group_info_init;
5192
5193 nh_grp->can_destroy = false;
5194
5195 return nh_grp;
5196
5197 err_nexthop_group_info_init:
5198 rhashtable_destroy(&nh_grp->vr_ht);
5199 err_nexthop_group_vr_ht_init:
5200 kfree(nh_grp);
5201 return ERR_PTR(err);
5202 }
5203
5204 static void
mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5205 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5206 struct mlxsw_sp_nexthop_group *nh_grp)
5207 {
5208 if (!nh_grp->can_destroy)
5209 return;
5210 mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5211 WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5212 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5213 rhashtable_destroy(&nh_grp->vr_ht);
5214 kfree(nh_grp);
5215 }
5216
5217 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp * mlxsw_sp,u32 id)5218 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5219 {
5220 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5221
5222 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5223 cmp_arg.id = id;
5224 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5225 &cmp_arg,
5226 mlxsw_sp_nexthop_group_ht_params);
5227 }
5228
mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5229 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5230 struct mlxsw_sp_nexthop_group *nh_grp)
5231 {
5232 return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5233 }
5234
5235 static int
mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group * old_nh_grp,struct netlink_ext_ack * extack)5236 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5237 struct mlxsw_sp_nexthop_group *nh_grp,
5238 struct mlxsw_sp_nexthop_group *old_nh_grp,
5239 struct netlink_ext_ack *extack)
5240 {
5241 struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5242 struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5243 int err;
5244
5245 old_nh_grp->nhgi = new_nhgi;
5246 new_nhgi->nh_grp = old_nh_grp;
5247 nh_grp->nhgi = old_nhgi;
5248 old_nhgi->nh_grp = nh_grp;
5249
5250 if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5251 /* Both the old adjacency index and the new one are valid.
5252 * Routes are currently using the old one. Tell the device to
5253 * replace the old adjacency index with the new one.
5254 */
5255 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5256 old_nhgi->adj_index,
5257 old_nhgi->ecmp_size);
5258 if (err) {
5259 NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5260 goto err_out;
5261 }
5262 } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5263 /* The old adjacency index is valid, while the new one is not.
5264 * Iterate over all the routes using the group and change them
5265 * to trap packets to the CPU.
5266 */
5267 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5268 if (err) {
5269 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5270 goto err_out;
5271 }
5272 } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5273 /* The old adjacency index is invalid, while the new one is.
5274 * Iterate over all the routes using the group and change them
5275 * to forward packets using the new valid index.
5276 */
5277 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5278 if (err) {
5279 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5280 goto err_out;
5281 }
5282 }
5283
5284 /* Make sure the flags are set / cleared based on the new nexthop group
5285 * information.
5286 */
5287 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5288
5289 /* At this point 'nh_grp' is just a shell that is not used by anyone
5290 * and its nexthop group info is the old info that was just replaced
5291 * with the new one. Remove it.
5292 */
5293 nh_grp->can_destroy = true;
5294 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5295
5296 return 0;
5297
5298 err_out:
5299 old_nhgi->nh_grp = old_nh_grp;
5300 nh_grp->nhgi = new_nhgi;
5301 new_nhgi->nh_grp = nh_grp;
5302 old_nh_grp->nhgi = old_nhgi;
5303 return err;
5304 }
5305
mlxsw_sp_nexthop_obj_new(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5306 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5307 struct nh_notifier_info *info)
5308 {
5309 struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5310 struct netlink_ext_ack *extack = info->extack;
5311 int err;
5312
5313 nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5314 if (IS_ERR(nh_grp))
5315 return PTR_ERR(nh_grp);
5316
5317 old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5318 if (!old_nh_grp)
5319 err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5320 else
5321 err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5322 old_nh_grp, extack);
5323
5324 if (err) {
5325 nh_grp->can_destroy = true;
5326 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5327 }
5328
5329 return err;
5330 }
5331
mlxsw_sp_nexthop_obj_del(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5332 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5333 struct nh_notifier_info *info)
5334 {
5335 struct mlxsw_sp_nexthop_group *nh_grp;
5336
5337 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5338 if (!nh_grp)
5339 return;
5340
5341 nh_grp->can_destroy = true;
5342 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5343
5344 /* If the group still has routes using it, then defer the delete
5345 * operation until the last route using it is deleted.
5346 */
5347 if (!list_empty(&nh_grp->fib_list))
5348 return;
5349 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5350 }
5351
mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp * mlxsw_sp,u32 adj_index,char * ratr_pl)5352 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5353 u32 adj_index, char *ratr_pl)
5354 {
5355 MLXSW_REG_ZERO(ratr, ratr_pl);
5356 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5357 mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5358 mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5359
5360 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5361 }
5362
mlxsw_sp_nexthop_obj_bucket_compare(char * ratr_pl,char * ratr_pl_new)5363 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5364 {
5365 /* Clear the opcode and activity on both the old and new payload as
5366 * they are irrelevant for the comparison.
5367 */
5368 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5369 mlxsw_reg_ratr_a_set(ratr_pl, 0);
5370 mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5371 mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5372
5373 /* If the contents of the adjacency entry are consistent with the
5374 * replacement request, then replacement was successful.
5375 */
5376 if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5377 return 0;
5378
5379 return -EINVAL;
5380 }
5381
5382 static int
mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_info * info)5383 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5384 struct mlxsw_sp_nexthop *nh,
5385 struct nh_notifier_info *info)
5386 {
5387 u16 bucket_index = info->nh_res_bucket->bucket_index;
5388 struct netlink_ext_ack *extack = info->extack;
5389 bool force = info->nh_res_bucket->force;
5390 char ratr_pl_new[MLXSW_REG_RATR_LEN];
5391 char ratr_pl[MLXSW_REG_RATR_LEN];
5392 u32 adj_index;
5393 int err;
5394
5395 /* No point in trying an atomic replacement if the idle timer interval
5396 * is smaller than the interval in which we query and clear activity.
5397 */
5398 if (!force && info->nh_res_bucket->idle_timer_ms <
5399 MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5400 force = true;
5401
5402 adj_index = nh->nhgi->adj_index + bucket_index;
5403 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5404 if (err) {
5405 NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5406 return err;
5407 }
5408
5409 if (!force) {
5410 err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5411 ratr_pl_new);
5412 if (err) {
5413 NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5414 return err;
5415 }
5416
5417 err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5418 if (err) {
5419 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5420 return err;
5421 }
5422 }
5423
5424 nh->update = 0;
5425 nh->offloaded = 1;
5426 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5427
5428 return 0;
5429 }
5430
mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5431 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5432 struct nh_notifier_info *info)
5433 {
5434 u16 bucket_index = info->nh_res_bucket->bucket_index;
5435 struct netlink_ext_ack *extack = info->extack;
5436 struct mlxsw_sp_nexthop_group_info *nhgi;
5437 struct nh_notifier_single_info *nh_obj;
5438 struct mlxsw_sp_nexthop_group *nh_grp;
5439 struct mlxsw_sp_nexthop *nh;
5440 int err;
5441
5442 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5443 if (!nh_grp) {
5444 NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5445 return -EINVAL;
5446 }
5447
5448 nhgi = nh_grp->nhgi;
5449
5450 if (bucket_index >= nhgi->count) {
5451 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5452 return -EINVAL;
5453 }
5454
5455 nh = &nhgi->nexthops[bucket_index];
5456 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5457
5458 nh_obj = &info->nh_res_bucket->new_nh;
5459 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5460 if (err) {
5461 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5462 goto err_nexthop_obj_init;
5463 }
5464
5465 err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5466 if (err)
5467 goto err_nexthop_obj_bucket_adj_update;
5468
5469 return 0;
5470
5471 err_nexthop_obj_bucket_adj_update:
5472 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5473 err_nexthop_obj_init:
5474 nh_obj = &info->nh_res_bucket->old_nh;
5475 mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5476 /* The old adjacency entry was not overwritten */
5477 nh->update = 0;
5478 nh->offloaded = 1;
5479 return err;
5480 }
5481
mlxsw_sp_nexthop_obj_event(struct notifier_block * nb,unsigned long event,void * ptr)5482 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5483 unsigned long event, void *ptr)
5484 {
5485 struct nh_notifier_info *info = ptr;
5486 struct mlxsw_sp_router *router;
5487 int err = 0;
5488
5489 router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5490 err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5491 if (err)
5492 goto out;
5493
5494 mutex_lock(&router->lock);
5495
5496 switch (event) {
5497 case NEXTHOP_EVENT_REPLACE:
5498 err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5499 break;
5500 case NEXTHOP_EVENT_DEL:
5501 mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5502 break;
5503 case NEXTHOP_EVENT_BUCKET_REPLACE:
5504 err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5505 info);
5506 break;
5507 default:
5508 break;
5509 }
5510
5511 mutex_unlock(&router->lock);
5512
5513 out:
5514 return notifier_from_errno(err);
5515 }
5516
mlxsw_sp_fi_is_gateway(const struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5517 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5518 struct fib_info *fi)
5519 {
5520 const struct fib_nh *nh = fib_info_nh(fi, 0);
5521
5522 return nh->fib_nh_gw_family ||
5523 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5524 }
5525
5526 static int
mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5527 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5528 struct mlxsw_sp_nexthop_group *nh_grp)
5529 {
5530 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5531 struct mlxsw_sp_nexthop_group_info *nhgi;
5532 struct mlxsw_sp_nexthop *nh;
5533 int err, i;
5534
5535 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5536 if (!nhgi)
5537 return -ENOMEM;
5538 nh_grp->nhgi = nhgi;
5539 nhgi->nh_grp = nh_grp;
5540 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5541 nhgi->count = nhs;
5542 for (i = 0; i < nhgi->count; i++) {
5543 struct fib_nh *fib_nh;
5544
5545 nh = &nhgi->nexthops[i];
5546 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5547 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5548 if (err)
5549 goto err_nexthop4_init;
5550 }
5551 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5552 if (err)
5553 goto err_group_inc;
5554 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5555 if (err)
5556 goto err_group_refresh;
5557
5558 return 0;
5559
5560 err_group_refresh:
5561 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5562 err_group_inc:
5563 i = nhgi->count;
5564 err_nexthop4_init:
5565 for (i--; i >= 0; i--) {
5566 nh = &nhgi->nexthops[i];
5567 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5568 }
5569 kfree(nhgi);
5570 return err;
5571 }
5572
5573 static void
mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5574 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5575 struct mlxsw_sp_nexthop_group *nh_grp)
5576 {
5577 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5578 int i;
5579
5580 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5581 for (i = nhgi->count - 1; i >= 0; i--) {
5582 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5583
5584 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5585 }
5586 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5587 WARN_ON_ONCE(nhgi->adj_index_valid);
5588 kfree(nhgi);
5589 }
5590
5591 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5592 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5593 {
5594 struct mlxsw_sp_nexthop_group *nh_grp;
5595 int err;
5596
5597 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5598 if (!nh_grp)
5599 return ERR_PTR(-ENOMEM);
5600 INIT_LIST_HEAD(&nh_grp->vr_list);
5601 err = rhashtable_init(&nh_grp->vr_ht,
5602 &mlxsw_sp_nexthop_group_vr_ht_params);
5603 if (err)
5604 goto err_nexthop_group_vr_ht_init;
5605 INIT_LIST_HEAD(&nh_grp->fib_list);
5606 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5607 nh_grp->ipv4.fi = fi;
5608 fib_info_hold(fi);
5609
5610 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5611 if (err)
5612 goto err_nexthop_group_info_init;
5613
5614 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5615 if (err)
5616 goto err_nexthop_group_insert;
5617
5618 nh_grp->can_destroy = true;
5619
5620 return nh_grp;
5621
5622 err_nexthop_group_insert:
5623 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5624 err_nexthop_group_info_init:
5625 fib_info_put(fi);
5626 rhashtable_destroy(&nh_grp->vr_ht);
5627 err_nexthop_group_vr_ht_init:
5628 kfree(nh_grp);
5629 return ERR_PTR(err);
5630 }
5631
5632 static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5633 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5634 struct mlxsw_sp_nexthop_group *nh_grp)
5635 {
5636 if (!nh_grp->can_destroy)
5637 return;
5638 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5639 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5640 fib_info_put(nh_grp->ipv4.fi);
5641 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5642 rhashtable_destroy(&nh_grp->vr_ht);
5643 kfree(nh_grp);
5644 }
5645
mlxsw_sp_nexthop4_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct fib_info * fi)5646 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5647 struct mlxsw_sp_fib_entry *fib_entry,
5648 struct fib_info *fi)
5649 {
5650 struct mlxsw_sp_nexthop_group *nh_grp;
5651
5652 if (fi->nh) {
5653 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5654 fi->nh->id);
5655 if (WARN_ON_ONCE(!nh_grp))
5656 return -EINVAL;
5657 goto out;
5658 }
5659
5660 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5661 if (!nh_grp) {
5662 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5663 if (IS_ERR(nh_grp))
5664 return PTR_ERR(nh_grp);
5665 }
5666 out:
5667 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5668 fib_entry->nh_group = nh_grp;
5669 return 0;
5670 }
5671
mlxsw_sp_nexthop4_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5672 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5673 struct mlxsw_sp_fib_entry *fib_entry)
5674 {
5675 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5676
5677 list_del(&fib_entry->nexthop_group_node);
5678 if (!list_empty(&nh_grp->fib_list))
5679 return;
5680
5681 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5682 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5683 return;
5684 }
5685
5686 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5687 }
5688
5689 static bool
mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5690 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5691 {
5692 struct mlxsw_sp_fib4_entry *fib4_entry;
5693
5694 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5695 common);
5696 return !fib4_entry->dscp;
5697 }
5698
5699 static bool
mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5700 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5701 {
5702 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5703
5704 switch (fib_entry->fib_node->fib->proto) {
5705 case MLXSW_SP_L3_PROTO_IPV4:
5706 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5707 return false;
5708 break;
5709 case MLXSW_SP_L3_PROTO_IPV6:
5710 break;
5711 }
5712
5713 switch (fib_entry->type) {
5714 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5715 return !!nh_group->nhgi->adj_index_valid;
5716 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5717 return !!mlxsw_sp_nhgi_rif(nh_group->nhgi);
5718 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5719 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5720 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5721 return true;
5722 default:
5723 return false;
5724 }
5725 }
5726
5727 static struct mlxsw_sp_nexthop *
mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_rt6 * mlxsw_sp_rt6)5728 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5729 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5730 {
5731 int i;
5732
5733 for (i = 0; i < nh_grp->nhgi->count; i++) {
5734 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5735 struct net_device *dev = mlxsw_sp_nexthop_dev(nh);
5736 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5737
5738 if (dev && dev == rt->fib6_nh->fib_nh_dev &&
5739 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5740 &rt->fib6_nh->fib_nh_gw6))
5741 return nh;
5742 }
5743
5744 return NULL;
5745 }
5746
5747 static void
mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)5748 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5749 struct fib_entry_notifier_info *fen_info)
5750 {
5751 u32 *p_dst = (u32 *) &fen_info->dst;
5752 struct fib_rt_info fri;
5753
5754 fri.fi = fen_info->fi;
5755 fri.tb_id = fen_info->tb_id;
5756 fri.dst = cpu_to_be32(*p_dst);
5757 fri.dst_len = fen_info->dst_len;
5758 fri.dscp = fen_info->dscp;
5759 fri.type = fen_info->type;
5760 fri.offload = false;
5761 fri.trap = false;
5762 fri.offload_failed = true;
5763 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5764 }
5765
5766 static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5767 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5768 struct mlxsw_sp_fib_entry *fib_entry)
5769 {
5770 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5771 int dst_len = fib_entry->fib_node->key.prefix_len;
5772 struct mlxsw_sp_fib4_entry *fib4_entry;
5773 struct fib_rt_info fri;
5774 bool should_offload;
5775
5776 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5777 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5778 common);
5779 fri.fi = fib4_entry->fi;
5780 fri.tb_id = fib4_entry->tb_id;
5781 fri.dst = cpu_to_be32(*p_dst);
5782 fri.dst_len = dst_len;
5783 fri.dscp = fib4_entry->dscp;
5784 fri.type = fib4_entry->type;
5785 fri.offload = should_offload;
5786 fri.trap = !should_offload;
5787 fri.offload_failed = false;
5788 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5789 }
5790
5791 static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5792 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5793 struct mlxsw_sp_fib_entry *fib_entry)
5794 {
5795 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5796 int dst_len = fib_entry->fib_node->key.prefix_len;
5797 struct mlxsw_sp_fib4_entry *fib4_entry;
5798 struct fib_rt_info fri;
5799
5800 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5801 common);
5802 fri.fi = fib4_entry->fi;
5803 fri.tb_id = fib4_entry->tb_id;
5804 fri.dst = cpu_to_be32(*p_dst);
5805 fri.dst_len = dst_len;
5806 fri.dscp = fib4_entry->dscp;
5807 fri.type = fib4_entry->type;
5808 fri.offload = false;
5809 fri.trap = false;
5810 fri.offload_failed = false;
5811 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5812 }
5813
5814 #if IS_ENABLED(CONFIG_IPV6)
5815 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5816 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5817 struct fib6_info **rt_arr,
5818 unsigned int nrt6)
5819 {
5820 int i;
5821
5822 /* In IPv6 a multipath route is represented using multiple routes, so
5823 * we need to set the flags on all of them.
5824 */
5825 for (i = 0; i < nrt6; i++)
5826 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5827 false, false, true);
5828 }
5829 #else
5830 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5831 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5832 struct fib6_info **rt_arr,
5833 unsigned int nrt6)
5834 {
5835 }
5836 #endif
5837
5838 #if IS_ENABLED(CONFIG_IPV6)
5839 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5840 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5841 struct mlxsw_sp_fib_entry *fib_entry)
5842 {
5843 struct mlxsw_sp_fib6_entry *fib6_entry;
5844 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5845 bool should_offload;
5846
5847 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5848
5849 /* In IPv6 a multipath route is represented using multiple routes, so
5850 * we need to set the flags on all of them.
5851 */
5852 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5853 common);
5854 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5855 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5856 should_offload, !should_offload, false);
5857 }
5858 #else
5859 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5860 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5861 struct mlxsw_sp_fib_entry *fib_entry)
5862 {
5863 }
5864 #endif
5865
5866 #if IS_ENABLED(CONFIG_IPV6)
5867 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5868 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5869 struct mlxsw_sp_fib_entry *fib_entry)
5870 {
5871 struct mlxsw_sp_fib6_entry *fib6_entry;
5872 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5873
5874 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5875 common);
5876 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5877 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5878 false, false, false);
5879 }
5880 #else
5881 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5882 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5883 struct mlxsw_sp_fib_entry *fib_entry)
5884 {
5885 }
5886 #endif
5887
5888 static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5889 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5890 struct mlxsw_sp_fib_entry *fib_entry)
5891 {
5892 switch (fib_entry->fib_node->fib->proto) {
5893 case MLXSW_SP_L3_PROTO_IPV4:
5894 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5895 break;
5896 case MLXSW_SP_L3_PROTO_IPV6:
5897 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5898 break;
5899 }
5900 }
5901
5902 static void
mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5903 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5904 struct mlxsw_sp_fib_entry *fib_entry)
5905 {
5906 switch (fib_entry->fib_node->fib->proto) {
5907 case MLXSW_SP_L3_PROTO_IPV4:
5908 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5909 break;
5910 case MLXSW_SP_L3_PROTO_IPV6:
5911 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5912 break;
5913 }
5914 }
5915
5916 static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)5917 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5918 struct mlxsw_sp_fib_entry *fib_entry,
5919 enum mlxsw_reg_ralue_op op)
5920 {
5921 switch (op) {
5922 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
5923 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5924 break;
5925 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
5926 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5927 break;
5928 default:
5929 break;
5930 }
5931 }
5932
5933 static void
mlxsw_sp_fib_entry_ralue_pack(char * ralue_pl,const struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)5934 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
5935 const struct mlxsw_sp_fib_entry *fib_entry,
5936 enum mlxsw_reg_ralue_op op)
5937 {
5938 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5939 enum mlxsw_reg_ralxx_protocol proto;
5940 u32 *p_dip;
5941
5942 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
5943
5944 switch (fib->proto) {
5945 case MLXSW_SP_L3_PROTO_IPV4:
5946 p_dip = (u32 *) fib_entry->fib_node->key.addr;
5947 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
5948 fib_entry->fib_node->key.prefix_len,
5949 *p_dip);
5950 break;
5951 case MLXSW_SP_L3_PROTO_IPV6:
5952 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
5953 fib_entry->fib_node->key.prefix_len,
5954 fib_entry->fib_node->key.addr);
5955 break;
5956 }
5957 }
5958
mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)5959 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5960 struct mlxsw_sp_fib_entry *fib_entry,
5961 enum mlxsw_reg_ralue_op op)
5962 {
5963 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5964 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5965 char ralue_pl[MLXSW_REG_RALUE_LEN];
5966 enum mlxsw_reg_ralue_trap_action trap_action;
5967 u16 trap_id = 0;
5968 u32 adjacency_index = 0;
5969 u16 ecmp_size = 0;
5970
5971 /* In case the nexthop group adjacency index is valid, use it
5972 * with provided ECMP size. Otherwise, setup trap and pass
5973 * traffic to kernel.
5974 */
5975 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5976 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5977 adjacency_index = nhgi->adj_index;
5978 ecmp_size = nhgi->ecmp_size;
5979 } else if (!nhgi->adj_index_valid && nhgi->count &&
5980 mlxsw_sp_nhgi_rif(nhgi)) {
5981 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5982 adjacency_index = mlxsw_sp->router->adj_trap_index;
5983 ecmp_size = 1;
5984 } else {
5985 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5986 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5987 }
5988
5989 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5990 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
5991 adjacency_index, ecmp_size);
5992 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5993 }
5994
mlxsw_sp_fib_entry_op_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)5995 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5996 struct mlxsw_sp_fib_entry *fib_entry,
5997 enum mlxsw_reg_ralue_op op)
5998 {
5999 struct mlxsw_sp_rif *rif = mlxsw_sp_nhgi_rif(fib_entry->nh_group->nhgi);
6000 enum mlxsw_reg_ralue_trap_action trap_action;
6001 char ralue_pl[MLXSW_REG_RALUE_LEN];
6002 u16 trap_id = 0;
6003 u16 rif_index = 0;
6004
6005 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
6006 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6007 rif_index = rif->rif_index;
6008 } else {
6009 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6010 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
6011 }
6012
6013 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6014 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
6015 rif_index);
6016 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6017 }
6018
mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6019 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
6020 struct mlxsw_sp_fib_entry *fib_entry,
6021 enum mlxsw_reg_ralue_op op)
6022 {
6023 char ralue_pl[MLXSW_REG_RALUE_LEN];
6024
6025 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6026 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
6027 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6028 }
6029
mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6030 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
6031 struct mlxsw_sp_fib_entry *fib_entry,
6032 enum mlxsw_reg_ralue_op op)
6033 {
6034 enum mlxsw_reg_ralue_trap_action trap_action;
6035 char ralue_pl[MLXSW_REG_RALUE_LEN];
6036
6037 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
6038 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6039 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
6040 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6041 }
6042
6043 static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6044 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
6045 struct mlxsw_sp_fib_entry *fib_entry,
6046 enum mlxsw_reg_ralue_op op)
6047 {
6048 enum mlxsw_reg_ralue_trap_action trap_action;
6049 char ralue_pl[MLXSW_REG_RALUE_LEN];
6050 u16 trap_id;
6051
6052 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6053 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
6054
6055 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6056 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
6057 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6058 }
6059
6060 static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6061 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
6062 struct mlxsw_sp_fib_entry *fib_entry,
6063 enum mlxsw_reg_ralue_op op)
6064 {
6065 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
6066 const struct mlxsw_sp_ipip_ops *ipip_ops;
6067 char ralue_pl[MLXSW_REG_RALUE_LEN];
6068 int err;
6069
6070 if (WARN_ON(!ipip_entry))
6071 return -EINVAL;
6072
6073 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
6074 err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
6075 fib_entry->decap.tunnel_index);
6076 if (err)
6077 return err;
6078
6079 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6080 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6081 fib_entry->decap.tunnel_index);
6082 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6083 }
6084
mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6085 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
6086 struct mlxsw_sp_fib_entry *fib_entry,
6087 enum mlxsw_reg_ralue_op op)
6088 {
6089 char ralue_pl[MLXSW_REG_RALUE_LEN];
6090
6091 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6092 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6093 fib_entry->decap.tunnel_index);
6094 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6095 }
6096
__mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6097 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6098 struct mlxsw_sp_fib_entry *fib_entry,
6099 enum mlxsw_reg_ralue_op op)
6100 {
6101 switch (fib_entry->type) {
6102 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6103 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
6104 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6105 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
6106 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6107 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
6108 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6109 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
6110 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6111 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
6112 op);
6113 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6114 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
6115 fib_entry, op);
6116 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6117 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
6118 }
6119 return -EINVAL;
6120 }
6121
mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6122 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6123 struct mlxsw_sp_fib_entry *fib_entry,
6124 enum mlxsw_reg_ralue_op op)
6125 {
6126 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
6127
6128 if (err)
6129 return err;
6130
6131 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6132
6133 return err;
6134 }
6135
mlxsw_sp_fib_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6136 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6137 struct mlxsw_sp_fib_entry *fib_entry)
6138 {
6139 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6140 MLXSW_REG_RALUE_OP_WRITE_WRITE);
6141 }
6142
mlxsw_sp_fib_entry_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6143 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6144 struct mlxsw_sp_fib_entry *fib_entry)
6145 {
6146 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6147 MLXSW_REG_RALUE_OP_WRITE_DELETE);
6148 }
6149
6150 static int
mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info,struct mlxsw_sp_fib_entry * fib_entry)6151 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6152 const struct fib_entry_notifier_info *fen_info,
6153 struct mlxsw_sp_fib_entry *fib_entry)
6154 {
6155 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6156 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6157 struct mlxsw_sp_router *router = mlxsw_sp->router;
6158 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6159 int ifindex = nhgi->nexthops[0].ifindex;
6160 struct mlxsw_sp_ipip_entry *ipip_entry;
6161
6162 switch (fen_info->type) {
6163 case RTN_LOCAL:
6164 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6165 MLXSW_SP_L3_PROTO_IPV4, dip);
6166 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6167 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6168 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6169 fib_entry,
6170 ipip_entry);
6171 }
6172 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6173 MLXSW_SP_L3_PROTO_IPV4,
6174 &dip)) {
6175 u32 tunnel_index;
6176
6177 tunnel_index = router->nve_decap_config.tunnel_index;
6178 fib_entry->decap.tunnel_index = tunnel_index;
6179 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6180 return 0;
6181 }
6182 fallthrough;
6183 case RTN_BROADCAST:
6184 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6185 return 0;
6186 case RTN_BLACKHOLE:
6187 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6188 return 0;
6189 case RTN_UNREACHABLE:
6190 case RTN_PROHIBIT:
6191 /* Packets hitting these routes need to be trapped, but
6192 * can do so with a lower priority than packets directed
6193 * at the host, so use action type local instead of trap.
6194 */
6195 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6196 return 0;
6197 case RTN_UNICAST:
6198 if (nhgi->gateway)
6199 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6200 else
6201 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6202 return 0;
6203 default:
6204 return -EINVAL;
6205 }
6206 }
6207
6208 static void
mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6209 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6210 struct mlxsw_sp_fib_entry *fib_entry)
6211 {
6212 switch (fib_entry->type) {
6213 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6214 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6215 break;
6216 default:
6217 break;
6218 }
6219 }
6220
6221 static void
mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6222 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6223 struct mlxsw_sp_fib4_entry *fib4_entry)
6224 {
6225 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6226 }
6227
6228 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,const struct fib_entry_notifier_info * fen_info)6229 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6230 struct mlxsw_sp_fib_node *fib_node,
6231 const struct fib_entry_notifier_info *fen_info)
6232 {
6233 struct mlxsw_sp_fib4_entry *fib4_entry;
6234 struct mlxsw_sp_fib_entry *fib_entry;
6235 int err;
6236
6237 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6238 if (!fib4_entry)
6239 return ERR_PTR(-ENOMEM);
6240 fib_entry = &fib4_entry->common;
6241
6242 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6243 if (err)
6244 goto err_nexthop4_group_get;
6245
6246 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6247 fib_node->fib);
6248 if (err)
6249 goto err_nexthop_group_vr_link;
6250
6251 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6252 if (err)
6253 goto err_fib4_entry_type_set;
6254
6255 fib4_entry->fi = fen_info->fi;
6256 fib_info_hold(fib4_entry->fi);
6257 fib4_entry->tb_id = fen_info->tb_id;
6258 fib4_entry->type = fen_info->type;
6259 fib4_entry->dscp = fen_info->dscp;
6260
6261 fib_entry->fib_node = fib_node;
6262
6263 return fib4_entry;
6264
6265 err_fib4_entry_type_set:
6266 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6267 err_nexthop_group_vr_link:
6268 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6269 err_nexthop4_group_get:
6270 kfree(fib4_entry);
6271 return ERR_PTR(err);
6272 }
6273
mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6274 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6275 struct mlxsw_sp_fib4_entry *fib4_entry)
6276 {
6277 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6278
6279 fib_info_put(fib4_entry->fi);
6280 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6281 mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6282 fib_node->fib);
6283 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6284 kfree(fib4_entry);
6285 }
6286
6287 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6288 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6289 const struct fib_entry_notifier_info *fen_info)
6290 {
6291 struct mlxsw_sp_fib4_entry *fib4_entry;
6292 struct mlxsw_sp_fib_node *fib_node;
6293 struct mlxsw_sp_fib *fib;
6294 struct mlxsw_sp_vr *vr;
6295
6296 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6297 if (!vr)
6298 return NULL;
6299 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6300
6301 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6302 sizeof(fen_info->dst),
6303 fen_info->dst_len);
6304 if (!fib_node)
6305 return NULL;
6306
6307 fib4_entry = container_of(fib_node->fib_entry,
6308 struct mlxsw_sp_fib4_entry, common);
6309 if (fib4_entry->tb_id == fen_info->tb_id &&
6310 fib4_entry->dscp == fen_info->dscp &&
6311 fib4_entry->type == fen_info->type &&
6312 fib4_entry->fi == fen_info->fi)
6313 return fib4_entry;
6314
6315 return NULL;
6316 }
6317
6318 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6319 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6320 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6321 .key_len = sizeof(struct mlxsw_sp_fib_key),
6322 .automatic_shrinking = true,
6323 };
6324
mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6325 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6326 struct mlxsw_sp_fib_node *fib_node)
6327 {
6328 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6329 mlxsw_sp_fib_ht_params);
6330 }
6331
mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6332 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6333 struct mlxsw_sp_fib_node *fib_node)
6334 {
6335 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6336 mlxsw_sp_fib_ht_params);
6337 }
6338
6339 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6340 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6341 size_t addr_len, unsigned char prefix_len)
6342 {
6343 struct mlxsw_sp_fib_key key;
6344
6345 memset(&key, 0, sizeof(key));
6346 memcpy(key.addr, addr, addr_len);
6347 key.prefix_len = prefix_len;
6348 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6349 }
6350
6351 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_create(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6352 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6353 size_t addr_len, unsigned char prefix_len)
6354 {
6355 struct mlxsw_sp_fib_node *fib_node;
6356
6357 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6358 if (!fib_node)
6359 return NULL;
6360
6361 list_add(&fib_node->list, &fib->node_list);
6362 memcpy(fib_node->key.addr, addr, addr_len);
6363 fib_node->key.prefix_len = prefix_len;
6364
6365 return fib_node;
6366 }
6367
mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node * fib_node)6368 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6369 {
6370 list_del(&fib_node->list);
6371 kfree(fib_node);
6372 }
6373
mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6374 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6375 struct mlxsw_sp_fib_node *fib_node)
6376 {
6377 struct mlxsw_sp_prefix_usage req_prefix_usage;
6378 struct mlxsw_sp_fib *fib = fib_node->fib;
6379 struct mlxsw_sp_lpm_tree *lpm_tree;
6380 int err;
6381
6382 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6383 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6384 goto out;
6385
6386 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6387 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6388 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6389 fib->proto);
6390 if (IS_ERR(lpm_tree))
6391 return PTR_ERR(lpm_tree);
6392
6393 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6394 if (err)
6395 goto err_lpm_tree_replace;
6396
6397 out:
6398 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6399 return 0;
6400
6401 err_lpm_tree_replace:
6402 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6403 return err;
6404 }
6405
mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6406 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6407 struct mlxsw_sp_fib_node *fib_node)
6408 {
6409 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6410 struct mlxsw_sp_prefix_usage req_prefix_usage;
6411 struct mlxsw_sp_fib *fib = fib_node->fib;
6412 int err;
6413
6414 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6415 return;
6416 /* Try to construct a new LPM tree from the current prefix usage
6417 * minus the unused one. If we fail, continue using the old one.
6418 */
6419 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6420 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6421 fib_node->key.prefix_len);
6422 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6423 fib->proto);
6424 if (IS_ERR(lpm_tree))
6425 return;
6426
6427 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6428 if (err)
6429 goto err_lpm_tree_replace;
6430
6431 return;
6432
6433 err_lpm_tree_replace:
6434 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6435 }
6436
mlxsw_sp_fib_node_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct mlxsw_sp_fib * fib)6437 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6438 struct mlxsw_sp_fib_node *fib_node,
6439 struct mlxsw_sp_fib *fib)
6440 {
6441 int err;
6442
6443 err = mlxsw_sp_fib_node_insert(fib, fib_node);
6444 if (err)
6445 return err;
6446 fib_node->fib = fib;
6447
6448 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6449 if (err)
6450 goto err_fib_lpm_tree_link;
6451
6452 return 0;
6453
6454 err_fib_lpm_tree_link:
6455 fib_node->fib = NULL;
6456 mlxsw_sp_fib_node_remove(fib, fib_node);
6457 return err;
6458 }
6459
mlxsw_sp_fib_node_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6460 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6461 struct mlxsw_sp_fib_node *fib_node)
6462 {
6463 struct mlxsw_sp_fib *fib = fib_node->fib;
6464
6465 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6466 fib_node->fib = NULL;
6467 mlxsw_sp_fib_node_remove(fib, fib_node);
6468 }
6469
6470 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,const void * addr,size_t addr_len,unsigned char prefix_len,enum mlxsw_sp_l3proto proto)6471 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6472 size_t addr_len, unsigned char prefix_len,
6473 enum mlxsw_sp_l3proto proto)
6474 {
6475 struct mlxsw_sp_fib_node *fib_node;
6476 struct mlxsw_sp_fib *fib;
6477 struct mlxsw_sp_vr *vr;
6478 int err;
6479
6480 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6481 if (IS_ERR(vr))
6482 return ERR_CAST(vr);
6483 fib = mlxsw_sp_vr_fib(vr, proto);
6484
6485 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6486 if (fib_node)
6487 return fib_node;
6488
6489 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6490 if (!fib_node) {
6491 err = -ENOMEM;
6492 goto err_fib_node_create;
6493 }
6494
6495 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6496 if (err)
6497 goto err_fib_node_init;
6498
6499 return fib_node;
6500
6501 err_fib_node_init:
6502 mlxsw_sp_fib_node_destroy(fib_node);
6503 err_fib_node_create:
6504 mlxsw_sp_vr_put(mlxsw_sp, vr);
6505 return ERR_PTR(err);
6506 }
6507
mlxsw_sp_fib_node_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6508 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6509 struct mlxsw_sp_fib_node *fib_node)
6510 {
6511 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6512
6513 if (fib_node->fib_entry)
6514 return;
6515 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6516 mlxsw_sp_fib_node_destroy(fib_node);
6517 mlxsw_sp_vr_put(mlxsw_sp, vr);
6518 }
6519
mlxsw_sp_fib_node_entry_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6520 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6521 struct mlxsw_sp_fib_entry *fib_entry)
6522 {
6523 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6524 int err;
6525
6526 fib_node->fib_entry = fib_entry;
6527
6528 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6529 if (err)
6530 goto err_fib_entry_update;
6531
6532 return 0;
6533
6534 err_fib_entry_update:
6535 fib_node->fib_entry = NULL;
6536 return err;
6537 }
6538
6539 static void
mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6540 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6541 struct mlxsw_sp_fib_entry *fib_entry)
6542 {
6543 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6544
6545 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6546 fib_node->fib_entry = NULL;
6547 }
6548
mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry * fib4_entry)6549 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6550 {
6551 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6552 struct mlxsw_sp_fib4_entry *fib4_replaced;
6553
6554 if (!fib_node->fib_entry)
6555 return true;
6556
6557 fib4_replaced = container_of(fib_node->fib_entry,
6558 struct mlxsw_sp_fib4_entry, common);
6559 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6560 fib4_replaced->tb_id == RT_TABLE_LOCAL)
6561 return false;
6562
6563 return true;
6564 }
6565
6566 static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6567 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6568 const struct fib_entry_notifier_info *fen_info)
6569 {
6570 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6571 struct mlxsw_sp_fib_entry *replaced;
6572 struct mlxsw_sp_fib_node *fib_node;
6573 int err;
6574
6575 if (fen_info->fi->nh &&
6576 !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6577 return 0;
6578
6579 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6580 &fen_info->dst, sizeof(fen_info->dst),
6581 fen_info->dst_len,
6582 MLXSW_SP_L3_PROTO_IPV4);
6583 if (IS_ERR(fib_node)) {
6584 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6585 return PTR_ERR(fib_node);
6586 }
6587
6588 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6589 if (IS_ERR(fib4_entry)) {
6590 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6591 err = PTR_ERR(fib4_entry);
6592 goto err_fib4_entry_create;
6593 }
6594
6595 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6596 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6597 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6598 return 0;
6599 }
6600
6601 replaced = fib_node->fib_entry;
6602 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6603 if (err) {
6604 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6605 goto err_fib_node_entry_link;
6606 }
6607
6608 /* Nothing to replace */
6609 if (!replaced)
6610 return 0;
6611
6612 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6613 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6614 common);
6615 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6616
6617 return 0;
6618
6619 err_fib_node_entry_link:
6620 fib_node->fib_entry = replaced;
6621 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6622 err_fib4_entry_create:
6623 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6624 return err;
6625 }
6626
mlxsw_sp_router_fib4_del(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)6627 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6628 struct fib_entry_notifier_info *fen_info)
6629 {
6630 struct mlxsw_sp_fib4_entry *fib4_entry;
6631 struct mlxsw_sp_fib_node *fib_node;
6632
6633 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6634 if (!fib4_entry)
6635 return;
6636 fib_node = fib4_entry->common.fib_node;
6637
6638 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6639 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6640 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6641 }
6642
mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info * rt)6643 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6644 {
6645 /* Multicast routes aren't supported, so ignore them. Neighbour
6646 * Discovery packets are specifically trapped.
6647 */
6648 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6649 return true;
6650
6651 /* Cloned routes are irrelevant in the forwarding path. */
6652 if (rt->fib6_flags & RTF_CACHE)
6653 return true;
6654
6655 return false;
6656 }
6657
mlxsw_sp_rt6_create(struct fib6_info * rt)6658 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6659 {
6660 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6661
6662 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6663 if (!mlxsw_sp_rt6)
6664 return ERR_PTR(-ENOMEM);
6665
6666 /* In case of route replace, replaced route is deleted with
6667 * no notification. Take reference to prevent accessing freed
6668 * memory.
6669 */
6670 mlxsw_sp_rt6->rt = rt;
6671 fib6_info_hold(rt);
6672
6673 return mlxsw_sp_rt6;
6674 }
6675
6676 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_rt6_release(struct fib6_info * rt)6677 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6678 {
6679 fib6_info_release(rt);
6680 }
6681 #else
mlxsw_sp_rt6_release(struct fib6_info * rt)6682 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6683 {
6684 }
6685 #endif
6686
mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 * mlxsw_sp_rt6)6687 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6688 {
6689 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6690
6691 if (!mlxsw_sp_rt6->rt->nh)
6692 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6693 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6694 kfree(mlxsw_sp_rt6);
6695 }
6696
6697 static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry * fib6_entry)6698 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6699 {
6700 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6701 list)->rt;
6702 }
6703
6704 static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry * fib6_entry,const struct fib6_info * rt)6705 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6706 const struct fib6_info *rt)
6707 {
6708 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6709
6710 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6711 if (mlxsw_sp_rt6->rt == rt)
6712 return mlxsw_sp_rt6;
6713 }
6714
6715 return NULL;
6716 }
6717
mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt,enum mlxsw_sp_ipip_type * ret)6718 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6719 const struct fib6_info *rt,
6720 enum mlxsw_sp_ipip_type *ret)
6721 {
6722 return rt->fib6_nh->fib_nh_dev &&
6723 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6724 }
6725
mlxsw_sp_nexthop6_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,const struct fib6_info * rt)6726 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6727 struct mlxsw_sp_nexthop_group *nh_grp,
6728 struct mlxsw_sp_nexthop *nh,
6729 const struct fib6_info *rt)
6730 {
6731 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6732 int err;
6733
6734 nh->nhgi = nh_grp->nhgi;
6735 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6736 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6737 #if IS_ENABLED(CONFIG_IPV6)
6738 nh->neigh_tbl = &nd_tbl;
6739 #endif
6740 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6741
6742 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6743
6744 if (!dev)
6745 return 0;
6746 nh->ifindex = dev->ifindex;
6747
6748 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6749 if (err)
6750 goto err_nexthop_type_init;
6751
6752 return 0;
6753
6754 err_nexthop_type_init:
6755 list_del(&nh->router_list_node);
6756 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6757 return err;
6758 }
6759
mlxsw_sp_nexthop6_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)6760 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6761 struct mlxsw_sp_nexthop *nh)
6762 {
6763 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6764 list_del(&nh->router_list_node);
6765 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6766 }
6767
mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)6768 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6769 const struct fib6_info *rt)
6770 {
6771 return rt->fib6_nh->fib_nh_gw_family ||
6772 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6773 }
6774
6775 static int
mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)6776 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6777 struct mlxsw_sp_nexthop_group *nh_grp,
6778 struct mlxsw_sp_fib6_entry *fib6_entry)
6779 {
6780 struct mlxsw_sp_nexthop_group_info *nhgi;
6781 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6782 struct mlxsw_sp_nexthop *nh;
6783 int err, i;
6784
6785 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6786 GFP_KERNEL);
6787 if (!nhgi)
6788 return -ENOMEM;
6789 nh_grp->nhgi = nhgi;
6790 nhgi->nh_grp = nh_grp;
6791 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6792 struct mlxsw_sp_rt6, list);
6793 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6794 nhgi->count = fib6_entry->nrt6;
6795 for (i = 0; i < nhgi->count; i++) {
6796 struct fib6_info *rt = mlxsw_sp_rt6->rt;
6797
6798 nh = &nhgi->nexthops[i];
6799 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6800 if (err)
6801 goto err_nexthop6_init;
6802 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6803 }
6804 nh_grp->nhgi = nhgi;
6805 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6806 if (err)
6807 goto err_group_inc;
6808 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6809 if (err)
6810 goto err_group_refresh;
6811
6812 return 0;
6813
6814 err_group_refresh:
6815 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6816 err_group_inc:
6817 i = nhgi->count;
6818 err_nexthop6_init:
6819 for (i--; i >= 0; i--) {
6820 nh = &nhgi->nexthops[i];
6821 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6822 }
6823 kfree(nhgi);
6824 return err;
6825 }
6826
6827 static void
mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)6828 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6829 struct mlxsw_sp_nexthop_group *nh_grp)
6830 {
6831 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6832 int i;
6833
6834 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6835 for (i = nhgi->count - 1; i >= 0; i--) {
6836 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6837
6838 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6839 }
6840 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6841 WARN_ON_ONCE(nhgi->adj_index_valid);
6842 kfree(nhgi);
6843 }
6844
6845 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6846 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6847 struct mlxsw_sp_fib6_entry *fib6_entry)
6848 {
6849 struct mlxsw_sp_nexthop_group *nh_grp;
6850 int err;
6851
6852 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6853 if (!nh_grp)
6854 return ERR_PTR(-ENOMEM);
6855 INIT_LIST_HEAD(&nh_grp->vr_list);
6856 err = rhashtable_init(&nh_grp->vr_ht,
6857 &mlxsw_sp_nexthop_group_vr_ht_params);
6858 if (err)
6859 goto err_nexthop_group_vr_ht_init;
6860 INIT_LIST_HEAD(&nh_grp->fib_list);
6861 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6862
6863 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6864 if (err)
6865 goto err_nexthop_group_info_init;
6866
6867 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6868 if (err)
6869 goto err_nexthop_group_insert;
6870
6871 nh_grp->can_destroy = true;
6872
6873 return nh_grp;
6874
6875 err_nexthop_group_insert:
6876 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6877 err_nexthop_group_info_init:
6878 rhashtable_destroy(&nh_grp->vr_ht);
6879 err_nexthop_group_vr_ht_init:
6880 kfree(nh_grp);
6881 return ERR_PTR(err);
6882 }
6883
6884 static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)6885 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6886 struct mlxsw_sp_nexthop_group *nh_grp)
6887 {
6888 if (!nh_grp->can_destroy)
6889 return;
6890 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6891 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6892 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6893 rhashtable_destroy(&nh_grp->vr_ht);
6894 kfree(nh_grp);
6895 }
6896
mlxsw_sp_nexthop6_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6897 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6898 struct mlxsw_sp_fib6_entry *fib6_entry)
6899 {
6900 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6901 struct mlxsw_sp_nexthop_group *nh_grp;
6902
6903 if (rt->nh) {
6904 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6905 rt->nh->id);
6906 if (WARN_ON_ONCE(!nh_grp))
6907 return -EINVAL;
6908 goto out;
6909 }
6910
6911 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6912 if (!nh_grp) {
6913 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6914 if (IS_ERR(nh_grp))
6915 return PTR_ERR(nh_grp);
6916 }
6917
6918 /* The route and the nexthop are described by the same struct, so we
6919 * need to the update the nexthop offload indication for the new route.
6920 */
6921 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6922
6923 out:
6924 list_add_tail(&fib6_entry->common.nexthop_group_node,
6925 &nh_grp->fib_list);
6926 fib6_entry->common.nh_group = nh_grp;
6927
6928 return 0;
6929 }
6930
mlxsw_sp_nexthop6_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6931 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6932 struct mlxsw_sp_fib_entry *fib_entry)
6933 {
6934 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6935
6936 list_del(&fib_entry->nexthop_group_node);
6937 if (!list_empty(&nh_grp->fib_list))
6938 return;
6939
6940 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6941 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6942 return;
6943 }
6944
6945 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6946 }
6947
6948 static int
mlxsw_sp_nexthop6_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6949 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6950 struct mlxsw_sp_fib6_entry *fib6_entry)
6951 {
6952 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6953 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6954 int err;
6955
6956 mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6957 fib6_entry->common.nh_group = NULL;
6958 list_del(&fib6_entry->common.nexthop_group_node);
6959
6960 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6961 if (err)
6962 goto err_nexthop6_group_get;
6963
6964 err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6965 fib_node->fib);
6966 if (err)
6967 goto err_nexthop_group_vr_link;
6968
6969 /* In case this entry is offloaded, then the adjacency index
6970 * currently associated with it in the device's table is that
6971 * of the old group. Start using the new one instead.
6972 */
6973 err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
6974 if (err)
6975 goto err_fib_entry_update;
6976
6977 if (list_empty(&old_nh_grp->fib_list))
6978 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6979
6980 return 0;
6981
6982 err_fib_entry_update:
6983 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6984 fib_node->fib);
6985 err_nexthop_group_vr_link:
6986 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6987 err_nexthop6_group_get:
6988 list_add_tail(&fib6_entry->common.nexthop_group_node,
6989 &old_nh_grp->fib_list);
6990 fib6_entry->common.nh_group = old_nh_grp;
6991 mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6992 return err;
6993 }
6994
6995 static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)6996 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6997 struct mlxsw_sp_fib6_entry *fib6_entry,
6998 struct fib6_info **rt_arr, unsigned int nrt6)
6999 {
7000 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7001 int err, i;
7002
7003 for (i = 0; i < nrt6; i++) {
7004 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7005 if (IS_ERR(mlxsw_sp_rt6)) {
7006 err = PTR_ERR(mlxsw_sp_rt6);
7007 goto err_rt6_unwind;
7008 }
7009
7010 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7011 fib6_entry->nrt6++;
7012 }
7013
7014 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7015 if (err)
7016 goto err_rt6_unwind;
7017
7018 return 0;
7019
7020 err_rt6_unwind:
7021 for (; i > 0; i--) {
7022 fib6_entry->nrt6--;
7023 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7024 struct mlxsw_sp_rt6, list);
7025 list_del(&mlxsw_sp_rt6->list);
7026 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7027 }
7028 return err;
7029 }
7030
7031 static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)7032 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
7033 struct mlxsw_sp_fib6_entry *fib6_entry,
7034 struct fib6_info **rt_arr, unsigned int nrt6)
7035 {
7036 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7037 int i;
7038
7039 for (i = 0; i < nrt6; i++) {
7040 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
7041 rt_arr[i]);
7042 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
7043 continue;
7044
7045 fib6_entry->nrt6--;
7046 list_del(&mlxsw_sp_rt6->list);
7047 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7048 }
7049
7050 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7051 }
7052
7053 static int
mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7054 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7055 struct mlxsw_sp_fib_entry *fib_entry,
7056 const struct fib6_info *rt)
7057 {
7058 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7059 union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7060 u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7061 struct mlxsw_sp_router *router = mlxsw_sp->router;
7062 int ifindex = nhgi->nexthops[0].ifindex;
7063 struct mlxsw_sp_ipip_entry *ipip_entry;
7064
7065 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7066 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7067 MLXSW_SP_L3_PROTO_IPV6,
7068 dip);
7069
7070 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7071 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7072 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7073 ipip_entry);
7074 }
7075 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7076 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7077 u32 tunnel_index;
7078
7079 tunnel_index = router->nve_decap_config.tunnel_index;
7080 fib_entry->decap.tunnel_index = tunnel_index;
7081 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7082 }
7083
7084 return 0;
7085 }
7086
mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7087 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7088 struct mlxsw_sp_fib_entry *fib_entry,
7089 const struct fib6_info *rt)
7090 {
7091 if (rt->fib6_flags & RTF_LOCAL)
7092 return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7093 rt);
7094 if (rt->fib6_flags & RTF_ANYCAST)
7095 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7096 else if (rt->fib6_type == RTN_BLACKHOLE)
7097 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7098 else if (rt->fib6_flags & RTF_REJECT)
7099 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7100 else if (fib_entry->nh_group->nhgi->gateway)
7101 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7102 else
7103 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7104
7105 return 0;
7106 }
7107
7108 static void
mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry * fib6_entry)7109 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7110 {
7111 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7112
7113 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7114 list) {
7115 fib6_entry->nrt6--;
7116 list_del(&mlxsw_sp_rt6->list);
7117 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7118 }
7119 }
7120
7121 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct fib6_info ** rt_arr,unsigned int nrt6)7122 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7123 struct mlxsw_sp_fib_node *fib_node,
7124 struct fib6_info **rt_arr, unsigned int nrt6)
7125 {
7126 struct mlxsw_sp_fib6_entry *fib6_entry;
7127 struct mlxsw_sp_fib_entry *fib_entry;
7128 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7129 int err, i;
7130
7131 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7132 if (!fib6_entry)
7133 return ERR_PTR(-ENOMEM);
7134 fib_entry = &fib6_entry->common;
7135
7136 INIT_LIST_HEAD(&fib6_entry->rt6_list);
7137
7138 for (i = 0; i < nrt6; i++) {
7139 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7140 if (IS_ERR(mlxsw_sp_rt6)) {
7141 err = PTR_ERR(mlxsw_sp_rt6);
7142 goto err_rt6_unwind;
7143 }
7144 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7145 fib6_entry->nrt6++;
7146 }
7147
7148 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7149 if (err)
7150 goto err_rt6_unwind;
7151
7152 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7153 fib_node->fib);
7154 if (err)
7155 goto err_nexthop_group_vr_link;
7156
7157 err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7158 if (err)
7159 goto err_fib6_entry_type_set;
7160
7161 fib_entry->fib_node = fib_node;
7162
7163 return fib6_entry;
7164
7165 err_fib6_entry_type_set:
7166 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7167 err_nexthop_group_vr_link:
7168 mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7169 err_rt6_unwind:
7170 for (; i > 0; i--) {
7171 fib6_entry->nrt6--;
7172 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7173 struct mlxsw_sp_rt6, list);
7174 list_del(&mlxsw_sp_rt6->list);
7175 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7176 }
7177 kfree(fib6_entry);
7178 return ERR_PTR(err);
7179 }
7180
7181 static void
mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7182 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7183 struct mlxsw_sp_fib6_entry *fib6_entry)
7184 {
7185 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7186 }
7187
mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7188 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7189 struct mlxsw_sp_fib6_entry *fib6_entry)
7190 {
7191 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7192
7193 mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7194 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7195 fib_node->fib);
7196 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7197 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7198 WARN_ON(fib6_entry->nrt6);
7199 kfree(fib6_entry);
7200 }
7201
7202 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)7203 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7204 const struct fib6_info *rt)
7205 {
7206 struct mlxsw_sp_fib6_entry *fib6_entry;
7207 struct mlxsw_sp_fib_node *fib_node;
7208 struct mlxsw_sp_fib *fib;
7209 struct fib6_info *cmp_rt;
7210 struct mlxsw_sp_vr *vr;
7211
7212 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7213 if (!vr)
7214 return NULL;
7215 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7216
7217 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7218 sizeof(rt->fib6_dst.addr),
7219 rt->fib6_dst.plen);
7220 if (!fib_node)
7221 return NULL;
7222
7223 fib6_entry = container_of(fib_node->fib_entry,
7224 struct mlxsw_sp_fib6_entry, common);
7225 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7226 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7227 rt->fib6_metric == cmp_rt->fib6_metric &&
7228 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7229 return fib6_entry;
7230
7231 return NULL;
7232 }
7233
mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry * fib6_entry)7234 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7235 {
7236 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7237 struct mlxsw_sp_fib6_entry *fib6_replaced;
7238 struct fib6_info *rt, *rt_replaced;
7239
7240 if (!fib_node->fib_entry)
7241 return true;
7242
7243 fib6_replaced = container_of(fib_node->fib_entry,
7244 struct mlxsw_sp_fib6_entry,
7245 common);
7246 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7247 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7248 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7249 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7250 return false;
7251
7252 return true;
7253 }
7254
mlxsw_sp_router_fib6_replace(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7255 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7256 struct fib6_info **rt_arr,
7257 unsigned int nrt6)
7258 {
7259 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7260 struct mlxsw_sp_fib_entry *replaced;
7261 struct mlxsw_sp_fib_node *fib_node;
7262 struct fib6_info *rt = rt_arr[0];
7263 int err;
7264
7265 if (rt->fib6_src.plen)
7266 return -EINVAL;
7267
7268 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7269 return 0;
7270
7271 if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7272 return 0;
7273
7274 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7275 &rt->fib6_dst.addr,
7276 sizeof(rt->fib6_dst.addr),
7277 rt->fib6_dst.plen,
7278 MLXSW_SP_L3_PROTO_IPV6);
7279 if (IS_ERR(fib_node))
7280 return PTR_ERR(fib_node);
7281
7282 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7283 nrt6);
7284 if (IS_ERR(fib6_entry)) {
7285 err = PTR_ERR(fib6_entry);
7286 goto err_fib6_entry_create;
7287 }
7288
7289 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7290 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7291 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7292 return 0;
7293 }
7294
7295 replaced = fib_node->fib_entry;
7296 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7297 if (err)
7298 goto err_fib_node_entry_link;
7299
7300 /* Nothing to replace */
7301 if (!replaced)
7302 return 0;
7303
7304 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7305 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7306 common);
7307 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7308
7309 return 0;
7310
7311 err_fib_node_entry_link:
7312 fib_node->fib_entry = replaced;
7313 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7314 err_fib6_entry_create:
7315 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7316 return err;
7317 }
7318
mlxsw_sp_router_fib6_append(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7319 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7320 struct fib6_info **rt_arr,
7321 unsigned int nrt6)
7322 {
7323 struct mlxsw_sp_fib6_entry *fib6_entry;
7324 struct mlxsw_sp_fib_node *fib_node;
7325 struct fib6_info *rt = rt_arr[0];
7326 int err;
7327
7328 if (rt->fib6_src.plen)
7329 return -EINVAL;
7330
7331 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7332 return 0;
7333
7334 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7335 &rt->fib6_dst.addr,
7336 sizeof(rt->fib6_dst.addr),
7337 rt->fib6_dst.plen,
7338 MLXSW_SP_L3_PROTO_IPV6);
7339 if (IS_ERR(fib_node))
7340 return PTR_ERR(fib_node);
7341
7342 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7343 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7344 return -EINVAL;
7345 }
7346
7347 fib6_entry = container_of(fib_node->fib_entry,
7348 struct mlxsw_sp_fib6_entry, common);
7349 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7350 nrt6);
7351 if (err)
7352 goto err_fib6_entry_nexthop_add;
7353
7354 return 0;
7355
7356 err_fib6_entry_nexthop_add:
7357 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7358 return err;
7359 }
7360
mlxsw_sp_router_fib6_del(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7361 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7362 struct fib6_info **rt_arr,
7363 unsigned int nrt6)
7364 {
7365 struct mlxsw_sp_fib6_entry *fib6_entry;
7366 struct mlxsw_sp_fib_node *fib_node;
7367 struct fib6_info *rt = rt_arr[0];
7368
7369 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7370 return;
7371
7372 /* Multipath routes are first added to the FIB trie and only then
7373 * notified. If we vetoed the addition, we will get a delete
7374 * notification for a route we do not have. Therefore, do not warn if
7375 * route was not found.
7376 */
7377 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7378 if (!fib6_entry)
7379 return;
7380
7381 /* If not all the nexthops are deleted, then only reduce the nexthop
7382 * group.
7383 */
7384 if (nrt6 != fib6_entry->nrt6) {
7385 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7386 nrt6);
7387 return;
7388 }
7389
7390 fib_node = fib6_entry->common.fib_node;
7391
7392 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7393 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7394 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7395 }
7396
7397 static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr * vr,int family)7398 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7399 {
7400 if (family == RTNL_FAMILY_IPMR)
7401 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7402 else
7403 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7404 }
7405
mlxsw_sp_router_fibmr_add(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info,bool replace)7406 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7407 struct mfc_entry_notifier_info *men_info,
7408 bool replace)
7409 {
7410 struct mlxsw_sp_mr_table *mrt;
7411 struct mlxsw_sp_vr *vr;
7412
7413 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7414 if (IS_ERR(vr))
7415 return PTR_ERR(vr);
7416
7417 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7418 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7419 }
7420
mlxsw_sp_router_fibmr_del(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info)7421 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7422 struct mfc_entry_notifier_info *men_info)
7423 {
7424 struct mlxsw_sp_mr_table *mrt;
7425 struct mlxsw_sp_vr *vr;
7426
7427 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7428 if (WARN_ON(!vr))
7429 return;
7430
7431 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7432 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7433 mlxsw_sp_vr_put(mlxsw_sp, vr);
7434 }
7435
7436 static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7437 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7438 struct vif_entry_notifier_info *ven_info)
7439 {
7440 struct mlxsw_sp_mr_table *mrt;
7441 struct mlxsw_sp_rif *rif;
7442 struct mlxsw_sp_vr *vr;
7443
7444 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7445 if (IS_ERR(vr))
7446 return PTR_ERR(vr);
7447
7448 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7449 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7450 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7451 ven_info->vif_index,
7452 ven_info->vif_flags, rif);
7453 }
7454
7455 static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7456 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7457 struct vif_entry_notifier_info *ven_info)
7458 {
7459 struct mlxsw_sp_mr_table *mrt;
7460 struct mlxsw_sp_vr *vr;
7461
7462 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7463 if (WARN_ON(!vr))
7464 return;
7465
7466 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7467 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7468 mlxsw_sp_vr_put(mlxsw_sp, vr);
7469 }
7470
mlxsw_sp_fib4_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7471 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7472 struct mlxsw_sp_fib_node *fib_node)
7473 {
7474 struct mlxsw_sp_fib4_entry *fib4_entry;
7475
7476 fib4_entry = container_of(fib_node->fib_entry,
7477 struct mlxsw_sp_fib4_entry, common);
7478 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7479 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7480 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7481 }
7482
mlxsw_sp_fib6_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7483 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7484 struct mlxsw_sp_fib_node *fib_node)
7485 {
7486 struct mlxsw_sp_fib6_entry *fib6_entry;
7487
7488 fib6_entry = container_of(fib_node->fib_entry,
7489 struct mlxsw_sp_fib6_entry, common);
7490 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7491 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7492 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7493 }
7494
mlxsw_sp_fib_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7495 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7496 struct mlxsw_sp_fib_node *fib_node)
7497 {
7498 switch (fib_node->fib->proto) {
7499 case MLXSW_SP_L3_PROTO_IPV4:
7500 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7501 break;
7502 case MLXSW_SP_L3_PROTO_IPV6:
7503 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7504 break;
7505 }
7506 }
7507
mlxsw_sp_vr_fib_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)7508 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7509 struct mlxsw_sp_vr *vr,
7510 enum mlxsw_sp_l3proto proto)
7511 {
7512 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7513 struct mlxsw_sp_fib_node *fib_node, *tmp;
7514
7515 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7516 bool do_break = &tmp->list == &fib->node_list;
7517
7518 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7519 if (do_break)
7520 break;
7521 }
7522 }
7523
mlxsw_sp_router_fib_flush(struct mlxsw_sp * mlxsw_sp)7524 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7525 {
7526 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
7527 int i, j;
7528
7529 for (i = 0; i < max_vrs; i++) {
7530 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7531
7532 if (!mlxsw_sp_vr_is_used(vr))
7533 continue;
7534
7535 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7536 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7537 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7538
7539 /* If virtual router was only used for IPv4, then it's no
7540 * longer used.
7541 */
7542 if (!mlxsw_sp_vr_is_used(vr))
7543 continue;
7544 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7545 }
7546 }
7547
7548 struct mlxsw_sp_fib6_event_work {
7549 struct fib6_info **rt_arr;
7550 unsigned int nrt6;
7551 };
7552
7553 struct mlxsw_sp_fib_event_work {
7554 struct work_struct work;
7555 netdevice_tracker dev_tracker;
7556 union {
7557 struct mlxsw_sp_fib6_event_work fib6_work;
7558 struct fib_entry_notifier_info fen_info;
7559 struct fib_rule_notifier_info fr_info;
7560 struct fib_nh_notifier_info fnh_info;
7561 struct mfc_entry_notifier_info men_info;
7562 struct vif_entry_notifier_info ven_info;
7563 };
7564 struct mlxsw_sp *mlxsw_sp;
7565 unsigned long event;
7566 };
7567
7568 static int
mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work * fib6_work,struct fib6_entry_notifier_info * fen6_info)7569 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7570 struct fib6_entry_notifier_info *fen6_info)
7571 {
7572 struct fib6_info *rt = fen6_info->rt;
7573 struct fib6_info **rt_arr;
7574 struct fib6_info *iter;
7575 unsigned int nrt6;
7576 int i = 0;
7577
7578 nrt6 = fen6_info->nsiblings + 1;
7579
7580 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7581 if (!rt_arr)
7582 return -ENOMEM;
7583
7584 fib6_work->rt_arr = rt_arr;
7585 fib6_work->nrt6 = nrt6;
7586
7587 rt_arr[0] = rt;
7588 fib6_info_hold(rt);
7589
7590 if (!fen6_info->nsiblings)
7591 return 0;
7592
7593 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7594 if (i == fen6_info->nsiblings)
7595 break;
7596
7597 rt_arr[i + 1] = iter;
7598 fib6_info_hold(iter);
7599 i++;
7600 }
7601 WARN_ON_ONCE(i != fen6_info->nsiblings);
7602
7603 return 0;
7604 }
7605
7606 static void
mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work * fib6_work)7607 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7608 {
7609 int i;
7610
7611 for (i = 0; i < fib6_work->nrt6; i++)
7612 mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7613 kfree(fib6_work->rt_arr);
7614 }
7615
mlxsw_sp_router_fib4_event_work(struct work_struct * work)7616 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7617 {
7618 struct mlxsw_sp_fib_event_work *fib_work =
7619 container_of(work, struct mlxsw_sp_fib_event_work, work);
7620 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7621 int err;
7622
7623 mutex_lock(&mlxsw_sp->router->lock);
7624 mlxsw_sp_span_respin(mlxsw_sp);
7625
7626 switch (fib_work->event) {
7627 case FIB_EVENT_ENTRY_REPLACE:
7628 err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7629 &fib_work->fen_info);
7630 if (err) {
7631 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7632 mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7633 &fib_work->fen_info);
7634 }
7635 fib_info_put(fib_work->fen_info.fi);
7636 break;
7637 case FIB_EVENT_ENTRY_DEL:
7638 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7639 fib_info_put(fib_work->fen_info.fi);
7640 break;
7641 case FIB_EVENT_NH_ADD:
7642 case FIB_EVENT_NH_DEL:
7643 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7644 fib_work->fnh_info.fib_nh);
7645 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7646 break;
7647 }
7648 mutex_unlock(&mlxsw_sp->router->lock);
7649 kfree(fib_work);
7650 }
7651
mlxsw_sp_router_fib6_event_work(struct work_struct * work)7652 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7653 {
7654 struct mlxsw_sp_fib_event_work *fib_work =
7655 container_of(work, struct mlxsw_sp_fib_event_work, work);
7656 struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7657 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7658 int err;
7659
7660 mutex_lock(&mlxsw_sp->router->lock);
7661 mlxsw_sp_span_respin(mlxsw_sp);
7662
7663 switch (fib_work->event) {
7664 case FIB_EVENT_ENTRY_REPLACE:
7665 err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7666 fib6_work->rt_arr,
7667 fib6_work->nrt6);
7668 if (err) {
7669 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7670 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7671 fib6_work->rt_arr,
7672 fib6_work->nrt6);
7673 }
7674 mlxsw_sp_router_fib6_work_fini(fib6_work);
7675 break;
7676 case FIB_EVENT_ENTRY_APPEND:
7677 err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7678 fib6_work->rt_arr,
7679 fib6_work->nrt6);
7680 if (err) {
7681 dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7682 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7683 fib6_work->rt_arr,
7684 fib6_work->nrt6);
7685 }
7686 mlxsw_sp_router_fib6_work_fini(fib6_work);
7687 break;
7688 case FIB_EVENT_ENTRY_DEL:
7689 mlxsw_sp_router_fib6_del(mlxsw_sp,
7690 fib6_work->rt_arr,
7691 fib6_work->nrt6);
7692 mlxsw_sp_router_fib6_work_fini(fib6_work);
7693 break;
7694 }
7695 mutex_unlock(&mlxsw_sp->router->lock);
7696 kfree(fib_work);
7697 }
7698
mlxsw_sp_router_fibmr_event_work(struct work_struct * work)7699 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7700 {
7701 struct mlxsw_sp_fib_event_work *fib_work =
7702 container_of(work, struct mlxsw_sp_fib_event_work, work);
7703 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7704 bool replace;
7705 int err;
7706
7707 rtnl_lock();
7708 mutex_lock(&mlxsw_sp->router->lock);
7709 switch (fib_work->event) {
7710 case FIB_EVENT_ENTRY_REPLACE:
7711 case FIB_EVENT_ENTRY_ADD:
7712 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7713
7714 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7715 replace);
7716 if (err)
7717 dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7718 mr_cache_put(fib_work->men_info.mfc);
7719 break;
7720 case FIB_EVENT_ENTRY_DEL:
7721 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7722 mr_cache_put(fib_work->men_info.mfc);
7723 break;
7724 case FIB_EVENT_VIF_ADD:
7725 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7726 &fib_work->ven_info);
7727 if (err)
7728 dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7729 netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7730 break;
7731 case FIB_EVENT_VIF_DEL:
7732 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7733 &fib_work->ven_info);
7734 netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7735 break;
7736 }
7737 mutex_unlock(&mlxsw_sp->router->lock);
7738 rtnl_unlock();
7739 kfree(fib_work);
7740 }
7741
mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)7742 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7743 struct fib_notifier_info *info)
7744 {
7745 struct fib_entry_notifier_info *fen_info;
7746 struct fib_nh_notifier_info *fnh_info;
7747
7748 switch (fib_work->event) {
7749 case FIB_EVENT_ENTRY_REPLACE:
7750 case FIB_EVENT_ENTRY_DEL:
7751 fen_info = container_of(info, struct fib_entry_notifier_info,
7752 info);
7753 fib_work->fen_info = *fen_info;
7754 /* Take reference on fib_info to prevent it from being
7755 * freed while work is queued. Release it afterwards.
7756 */
7757 fib_info_hold(fib_work->fen_info.fi);
7758 break;
7759 case FIB_EVENT_NH_ADD:
7760 case FIB_EVENT_NH_DEL:
7761 fnh_info = container_of(info, struct fib_nh_notifier_info,
7762 info);
7763 fib_work->fnh_info = *fnh_info;
7764 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
7765 break;
7766 }
7767 }
7768
mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)7769 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
7770 struct fib_notifier_info *info)
7771 {
7772 struct fib6_entry_notifier_info *fen6_info;
7773 int err;
7774
7775 switch (fib_work->event) {
7776 case FIB_EVENT_ENTRY_REPLACE:
7777 case FIB_EVENT_ENTRY_APPEND:
7778 case FIB_EVENT_ENTRY_DEL:
7779 fen6_info = container_of(info, struct fib6_entry_notifier_info,
7780 info);
7781 err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
7782 fen6_info);
7783 if (err)
7784 return err;
7785 break;
7786 }
7787
7788 return 0;
7789 }
7790
7791 static void
mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)7792 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
7793 struct fib_notifier_info *info)
7794 {
7795 switch (fib_work->event) {
7796 case FIB_EVENT_ENTRY_REPLACE:
7797 case FIB_EVENT_ENTRY_ADD:
7798 case FIB_EVENT_ENTRY_DEL:
7799 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
7800 mr_cache_hold(fib_work->men_info.mfc);
7801 break;
7802 case FIB_EVENT_VIF_ADD:
7803 case FIB_EVENT_VIF_DEL:
7804 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
7805 netdev_hold(fib_work->ven_info.dev, &fib_work->dev_tracker,
7806 GFP_ATOMIC);
7807 break;
7808 }
7809 }
7810
mlxsw_sp_router_fib_rule_event(unsigned long event,struct fib_notifier_info * info,struct mlxsw_sp * mlxsw_sp)7811 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7812 struct fib_notifier_info *info,
7813 struct mlxsw_sp *mlxsw_sp)
7814 {
7815 struct netlink_ext_ack *extack = info->extack;
7816 struct fib_rule_notifier_info *fr_info;
7817 struct fib_rule *rule;
7818 int err = 0;
7819
7820 /* nothing to do at the moment */
7821 if (event == FIB_EVENT_RULE_DEL)
7822 return 0;
7823
7824 fr_info = container_of(info, struct fib_rule_notifier_info, info);
7825 rule = fr_info->rule;
7826
7827 /* Rule only affects locally generated traffic */
7828 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7829 return 0;
7830
7831 switch (info->family) {
7832 case AF_INET:
7833 if (!fib4_rule_default(rule) && !rule->l3mdev)
7834 err = -EOPNOTSUPP;
7835 break;
7836 case AF_INET6:
7837 if (!fib6_rule_default(rule) && !rule->l3mdev)
7838 err = -EOPNOTSUPP;
7839 break;
7840 case RTNL_FAMILY_IPMR:
7841 if (!ipmr_rule_default(rule) && !rule->l3mdev)
7842 err = -EOPNOTSUPP;
7843 break;
7844 case RTNL_FAMILY_IP6MR:
7845 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7846 err = -EOPNOTSUPP;
7847 break;
7848 }
7849
7850 if (err < 0)
7851 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7852
7853 return err;
7854 }
7855
7856 /* Called with rcu_read_lock() */
mlxsw_sp_router_fib_event(struct notifier_block * nb,unsigned long event,void * ptr)7857 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7858 unsigned long event, void *ptr)
7859 {
7860 struct mlxsw_sp_fib_event_work *fib_work;
7861 struct fib_notifier_info *info = ptr;
7862 struct mlxsw_sp_router *router;
7863 int err;
7864
7865 if ((info->family != AF_INET && info->family != AF_INET6 &&
7866 info->family != RTNL_FAMILY_IPMR &&
7867 info->family != RTNL_FAMILY_IP6MR))
7868 return NOTIFY_DONE;
7869
7870 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7871
7872 switch (event) {
7873 case FIB_EVENT_RULE_ADD:
7874 case FIB_EVENT_RULE_DEL:
7875 err = mlxsw_sp_router_fib_rule_event(event, info,
7876 router->mlxsw_sp);
7877 return notifier_from_errno(err);
7878 case FIB_EVENT_ENTRY_ADD:
7879 case FIB_EVENT_ENTRY_REPLACE:
7880 case FIB_EVENT_ENTRY_APPEND:
7881 if (info->family == AF_INET) {
7882 struct fib_entry_notifier_info *fen_info = ptr;
7883
7884 if (fen_info->fi->fib_nh_is_v6) {
7885 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7886 return notifier_from_errno(-EINVAL);
7887 }
7888 }
7889 break;
7890 }
7891
7892 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
7893 if (!fib_work)
7894 return NOTIFY_BAD;
7895
7896 fib_work->mlxsw_sp = router->mlxsw_sp;
7897 fib_work->event = event;
7898
7899 switch (info->family) {
7900 case AF_INET:
7901 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
7902 mlxsw_sp_router_fib4_event(fib_work, info);
7903 break;
7904 case AF_INET6:
7905 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
7906 err = mlxsw_sp_router_fib6_event(fib_work, info);
7907 if (err)
7908 goto err_fib_event;
7909 break;
7910 case RTNL_FAMILY_IP6MR:
7911 case RTNL_FAMILY_IPMR:
7912 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
7913 mlxsw_sp_router_fibmr_event(fib_work, info);
7914 break;
7915 }
7916
7917 mlxsw_core_schedule_work(&fib_work->work);
7918
7919 return NOTIFY_DONE;
7920
7921 err_fib_event:
7922 kfree(fib_work);
7923 return NOTIFY_BAD;
7924 }
7925
7926 static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7927 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7928 const struct net_device *dev)
7929 {
7930 int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7931 int i;
7932
7933 for (i = 0; i < max_rifs; i++)
7934 if (mlxsw_sp->router->rifs[i] &&
7935 mlxsw_sp_rif_dev_is(mlxsw_sp->router->rifs[i], dev))
7936 return mlxsw_sp->router->rifs[i];
7937
7938 return NULL;
7939 }
7940
mlxsw_sp_rif_exists(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7941 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7942 const struct net_device *dev)
7943 {
7944 struct mlxsw_sp_rif *rif;
7945
7946 mutex_lock(&mlxsw_sp->router->lock);
7947 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7948 mutex_unlock(&mlxsw_sp->router->lock);
7949
7950 return rif;
7951 }
7952
mlxsw_sp_rif_vid(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7953 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7954 {
7955 struct mlxsw_sp_rif *rif;
7956 u16 vid = 0;
7957
7958 mutex_lock(&mlxsw_sp->router->lock);
7959 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7960 if (!rif)
7961 goto out;
7962
7963 /* We only return the VID for VLAN RIFs. Otherwise we return an
7964 * invalid value (0).
7965 */
7966 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7967 goto out;
7968
7969 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7970
7971 out:
7972 mutex_unlock(&mlxsw_sp->router->lock);
7973 return vid;
7974 }
7975
mlxsw_sp_router_rif_disable(struct mlxsw_sp * mlxsw_sp,u16 rif)7976 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7977 {
7978 char ritr_pl[MLXSW_REG_RITR_LEN];
7979 int err;
7980
7981 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7982 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7983 if (err)
7984 return err;
7985
7986 mlxsw_reg_ritr_enable_set(ritr_pl, false);
7987 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7988 }
7989
mlxsw_sp_router_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)7990 static int mlxsw_sp_router_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
7991 struct mlxsw_sp_rif *rif)
7992 {
7993 int err;
7994
7995 err = mlxsw_sp_neigh_rif_made_sync(mlxsw_sp, rif);
7996 if (err)
7997 return err;
7998
7999 err = mlxsw_sp_nexthop_rif_made_sync(mlxsw_sp, rif);
8000 if (err)
8001 goto err_nexthop;
8002
8003 return 0;
8004
8005 err_nexthop:
8006 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8007 return err;
8008 }
8009
mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)8010 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8011 struct mlxsw_sp_rif *rif)
8012 {
8013 /* Signal to nexthop cleanup that the RIF is going away. */
8014 rif->crif->rif = NULL;
8015
8016 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8017 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8018 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8019 }
8020
__mlxsw_sp_dev_addr_list_empty(const struct net_device * dev)8021 static bool __mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8022 {
8023 struct inet6_dev *inet6_dev;
8024 struct in_device *idev;
8025
8026 idev = __in_dev_get_rcu(dev);
8027 if (idev && idev->ifa_list)
8028 return false;
8029
8030 inet6_dev = __in6_dev_get(dev);
8031 if (inet6_dev && !list_empty(&inet6_dev->addr_list))
8032 return false;
8033
8034 return true;
8035 }
8036
mlxsw_sp_dev_addr_list_empty(const struct net_device * dev)8037 static bool mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8038 {
8039 bool addr_list_empty;
8040
8041 rcu_read_lock();
8042 addr_list_empty = __mlxsw_sp_dev_addr_list_empty(dev);
8043 rcu_read_unlock();
8044
8045 return addr_list_empty;
8046 }
8047
8048 static bool
mlxsw_sp_rif_should_config(struct mlxsw_sp_rif * rif,struct net_device * dev,unsigned long event)8049 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8050 unsigned long event)
8051 {
8052 bool addr_list_empty;
8053
8054 switch (event) {
8055 case NETDEV_UP:
8056 return rif == NULL;
8057 case NETDEV_DOWN:
8058 addr_list_empty = mlxsw_sp_dev_addr_list_empty(dev);
8059
8060 /* macvlans do not have a RIF, but rather piggy back on the
8061 * RIF of their lower device.
8062 */
8063 if (netif_is_macvlan(dev) && addr_list_empty)
8064 return true;
8065
8066 if (rif && addr_list_empty &&
8067 !netif_is_l3_slave(mlxsw_sp_rif_dev(rif)))
8068 return true;
8069 /* It is possible we already removed the RIF ourselves
8070 * if it was assigned to a netdev that is now a bridge
8071 * or LAG slave.
8072 */
8073 return false;
8074 }
8075
8076 return false;
8077 }
8078
8079 static enum mlxsw_sp_rif_type
mlxsw_sp_dev_rif_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8080 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8081 const struct net_device *dev)
8082 {
8083 enum mlxsw_sp_fid_type type;
8084
8085 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8086 return MLXSW_SP_RIF_TYPE_IPIP_LB;
8087
8088 /* Otherwise RIF type is derived from the type of the underlying FID. */
8089 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8090 type = MLXSW_SP_FID_TYPE_8021Q;
8091 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8092 type = MLXSW_SP_FID_TYPE_8021Q;
8093 else if (netif_is_bridge_master(dev))
8094 type = MLXSW_SP_FID_TYPE_8021D;
8095 else
8096 type = MLXSW_SP_FID_TYPE_RFID;
8097
8098 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8099 }
8100
mlxsw_sp_rif_index_alloc(struct mlxsw_sp * mlxsw_sp,u16 * p_rif_index,u8 rif_entries)8101 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
8102 u8 rif_entries)
8103 {
8104 *p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
8105 rif_entries);
8106 if (*p_rif_index == 0)
8107 return -ENOBUFS;
8108 *p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
8109
8110 /* RIF indexes must be aligned to the allocation size. */
8111 WARN_ON_ONCE(*p_rif_index % rif_entries);
8112
8113 return 0;
8114 }
8115
mlxsw_sp_rif_index_free(struct mlxsw_sp * mlxsw_sp,u16 rif_index,u8 rif_entries)8116 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8117 u8 rif_entries)
8118 {
8119 gen_pool_free(mlxsw_sp->router->rifs_table,
8120 MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
8121 }
8122
mlxsw_sp_rif_alloc(size_t rif_size,u16 rif_index,u16 vr_id,struct mlxsw_sp_crif * crif)8123 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8124 u16 vr_id,
8125 struct mlxsw_sp_crif *crif)
8126 {
8127 struct net_device *l3_dev = crif ? crif->key.dev : NULL;
8128 struct mlxsw_sp_rif *rif;
8129
8130 rif = kzalloc(rif_size, GFP_KERNEL);
8131 if (!rif)
8132 return NULL;
8133
8134 INIT_LIST_HEAD(&rif->neigh_list);
8135 if (l3_dev) {
8136 ether_addr_copy(rif->addr, l3_dev->dev_addr);
8137 rif->mtu = l3_dev->mtu;
8138 }
8139 rif->vr_id = vr_id;
8140 rif->rif_index = rif_index;
8141 if (crif) {
8142 rif->crif = crif;
8143 crif->rif = rif;
8144 }
8145
8146 return rif;
8147 }
8148
mlxsw_sp_rif_free(struct mlxsw_sp_rif * rif)8149 static void mlxsw_sp_rif_free(struct mlxsw_sp_rif *rif)
8150 {
8151 WARN_ON(!list_empty(&rif->neigh_list));
8152
8153 if (rif->crif)
8154 rif->crif->rif = NULL;
8155 kfree(rif);
8156 }
8157
mlxsw_sp_rif_by_index(const struct mlxsw_sp * mlxsw_sp,u16 rif_index)8158 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8159 u16 rif_index)
8160 {
8161 return mlxsw_sp->router->rifs[rif_index];
8162 }
8163
mlxsw_sp_rif_index(const struct mlxsw_sp_rif * rif)8164 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8165 {
8166 return rif->rif_index;
8167 }
8168
mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8169 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8170 {
8171 return lb_rif->common.rif_index;
8172 }
8173
mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8174 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8175 {
8176 struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
8177 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
8178 struct mlxsw_sp_vr *ul_vr;
8179
8180 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8181 if (WARN_ON(IS_ERR(ul_vr)))
8182 return 0;
8183
8184 return ul_vr->id;
8185 }
8186
mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8187 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8188 {
8189 return lb_rif->ul_rif_id;
8190 }
8191
8192 static bool
mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif * rif)8193 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8194 {
8195 return mlxsw_sp_rif_counter_valid_get(rif,
8196 MLXSW_SP_RIF_COUNTER_EGRESS) &&
8197 mlxsw_sp_rif_counter_valid_get(rif,
8198 MLXSW_SP_RIF_COUNTER_INGRESS);
8199 }
8200
8201 static int
mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif * rif)8202 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8203 {
8204 int err;
8205
8206 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8207 if (err)
8208 return err;
8209
8210 /* Clear stale data. */
8211 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8212 MLXSW_SP_RIF_COUNTER_INGRESS,
8213 NULL);
8214 if (err)
8215 goto err_clear_ingress;
8216
8217 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8218 if (err)
8219 goto err_alloc_egress;
8220
8221 /* Clear stale data. */
8222 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8223 MLXSW_SP_RIF_COUNTER_EGRESS,
8224 NULL);
8225 if (err)
8226 goto err_clear_egress;
8227
8228 return 0;
8229
8230 err_clear_egress:
8231 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8232 err_alloc_egress:
8233 err_clear_ingress:
8234 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8235 return err;
8236 }
8237
8238 static void
mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif * rif)8239 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8240 {
8241 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8242 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8243 }
8244
8245 static void
mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8246 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8247 struct netdev_notifier_offload_xstats_info *info)
8248 {
8249 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8250 return;
8251 netdev_offload_xstats_report_used(info->report_used);
8252 }
8253
8254 static int
mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif * rif,struct rtnl_hw_stats64 * p_stats)8255 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8256 struct rtnl_hw_stats64 *p_stats)
8257 {
8258 struct mlxsw_sp_rif_counter_set_basic ingress;
8259 struct mlxsw_sp_rif_counter_set_basic egress;
8260 int err;
8261
8262 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8263 MLXSW_SP_RIF_COUNTER_INGRESS,
8264 &ingress);
8265 if (err)
8266 return err;
8267
8268 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8269 MLXSW_SP_RIF_COUNTER_EGRESS,
8270 &egress);
8271 if (err)
8272 return err;
8273
8274 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
8275 ((SET.good_unicast_ ## SFX) + \
8276 (SET.good_multicast_ ## SFX) + \
8277 (SET.good_broadcast_ ## SFX))
8278
8279 p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8280 p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8281 p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8282 p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8283 p_stats->rx_errors = ingress.error_packets;
8284 p_stats->tx_errors = egress.error_packets;
8285 p_stats->rx_dropped = ingress.discard_packets;
8286 p_stats->tx_dropped = egress.discard_packets;
8287 p_stats->multicast = ingress.good_multicast_packets +
8288 ingress.good_broadcast_packets;
8289
8290 #undef MLXSW_SP_ROUTER_ALL_GOOD
8291
8292 return 0;
8293 }
8294
8295 static int
mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8296 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8297 struct netdev_notifier_offload_xstats_info *info)
8298 {
8299 struct rtnl_hw_stats64 stats = {};
8300 int err;
8301
8302 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8303 return 0;
8304
8305 err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8306 if (err)
8307 return err;
8308
8309 netdev_offload_xstats_report_delta(info->report_delta, &stats);
8310 return 0;
8311 }
8312
8313 struct mlxsw_sp_router_hwstats_notify_work {
8314 struct work_struct work;
8315 struct net_device *dev;
8316 netdevice_tracker dev_tracker;
8317 };
8318
mlxsw_sp_router_hwstats_notify_work(struct work_struct * work)8319 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8320 {
8321 struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8322 container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8323 work);
8324
8325 rtnl_lock();
8326 rtnl_offload_xstats_notify(hws_work->dev);
8327 rtnl_unlock();
8328 netdev_put(hws_work->dev, &hws_work->dev_tracker);
8329 kfree(hws_work);
8330 }
8331
8332 static void
mlxsw_sp_router_hwstats_notify_schedule(struct net_device * dev)8333 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8334 {
8335 struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8336
8337 /* To collect notification payload, the core ends up sending another
8338 * notifier block message, which would deadlock on the attempt to
8339 * acquire the router lock again. Just postpone the notification until
8340 * later.
8341 */
8342
8343 hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8344 if (!hws_work)
8345 return;
8346
8347 INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8348 netdev_hold(dev, &hws_work->dev_tracker, GFP_KERNEL);
8349 hws_work->dev = dev;
8350 mlxsw_core_schedule_work(&hws_work->work);
8351 }
8352
mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif * rif)8353 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8354 {
8355 return mlxsw_sp_rif_dev(rif)->ifindex;
8356 }
8357
mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif * rif)8358 bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif)
8359 {
8360 return !!mlxsw_sp_rif_dev(rif);
8361 }
8362
mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif * rif,const struct net_device * dev)8363 bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
8364 const struct net_device *dev)
8365 {
8366 return mlxsw_sp_rif_dev(rif) == dev;
8367 }
8368
mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif * rif)8369 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8370 {
8371 struct rtnl_hw_stats64 stats = {};
8372
8373 if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8374 netdev_offload_xstats_push_delta(mlxsw_sp_rif_dev(rif),
8375 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8376 &stats);
8377 }
8378
8379 static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8380 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8381 const struct mlxsw_sp_rif_params *params,
8382 struct netlink_ext_ack *extack)
8383 {
8384 u8 rif_entries = params->double_entry ? 2 : 1;
8385 u32 tb_id = l3mdev_fib_table(params->dev);
8386 const struct mlxsw_sp_rif_ops *ops;
8387 struct mlxsw_sp_fid *fid = NULL;
8388 enum mlxsw_sp_rif_type type;
8389 struct mlxsw_sp_crif *crif;
8390 struct mlxsw_sp_rif *rif;
8391 struct mlxsw_sp_vr *vr;
8392 u16 rif_index;
8393 int i, err;
8394
8395 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8396 ops = mlxsw_sp->router->rif_ops_arr[type];
8397
8398 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8399 if (IS_ERR(vr))
8400 return ERR_CAST(vr);
8401 vr->rif_count++;
8402
8403 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8404 if (err) {
8405 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8406 goto err_rif_index_alloc;
8407 }
8408
8409 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, params->dev);
8410 if (WARN_ON(!crif)) {
8411 err = -ENOENT;
8412 goto err_crif_lookup;
8413 }
8414
8415 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, crif);
8416 if (!rif) {
8417 err = -ENOMEM;
8418 goto err_rif_alloc;
8419 }
8420 netdev_hold(params->dev, &rif->dev_tracker, GFP_KERNEL);
8421 mlxsw_sp->router->rifs[rif_index] = rif;
8422 rif->mlxsw_sp = mlxsw_sp;
8423 rif->ops = ops;
8424 rif->rif_entries = rif_entries;
8425
8426 if (ops->fid_get) {
8427 fid = ops->fid_get(rif, params, extack);
8428 if (IS_ERR(fid)) {
8429 err = PTR_ERR(fid);
8430 goto err_fid_get;
8431 }
8432 rif->fid = fid;
8433 }
8434
8435 if (ops->setup)
8436 ops->setup(rif, params);
8437
8438 err = ops->configure(rif, extack);
8439 if (err)
8440 goto err_configure;
8441
8442 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8443 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8444 if (err)
8445 goto err_mr_rif_add;
8446 }
8447
8448 err = mlxsw_sp_router_rif_made_sync(mlxsw_sp, rif);
8449 if (err)
8450 goto err_rif_made_sync;
8451
8452 if (netdev_offload_xstats_enabled(params->dev,
8453 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8454 err = mlxsw_sp_router_port_l3_stats_enable(rif);
8455 if (err)
8456 goto err_stats_enable;
8457 mlxsw_sp_router_hwstats_notify_schedule(params->dev);
8458 } else {
8459 mlxsw_sp_rif_counters_alloc(rif);
8460 }
8461
8462 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8463 return rif;
8464
8465 err_stats_enable:
8466 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8467 err_rif_made_sync:
8468 err_mr_rif_add:
8469 for (i--; i >= 0; i--)
8470 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8471 ops->deconfigure(rif);
8472 err_configure:
8473 if (fid)
8474 mlxsw_sp_fid_put(fid);
8475 err_fid_get:
8476 mlxsw_sp->router->rifs[rif_index] = NULL;
8477 netdev_put(params->dev, &rif->dev_tracker);
8478 mlxsw_sp_rif_free(rif);
8479 err_rif_alloc:
8480 err_crif_lookup:
8481 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8482 err_rif_index_alloc:
8483 vr->rif_count--;
8484 mlxsw_sp_vr_put(mlxsw_sp, vr);
8485 return ERR_PTR(err);
8486 }
8487
mlxsw_sp_rif_destroy(struct mlxsw_sp_rif * rif)8488 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8489 {
8490 struct net_device *dev = mlxsw_sp_rif_dev(rif);
8491 const struct mlxsw_sp_rif_ops *ops = rif->ops;
8492 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8493 struct mlxsw_sp_crif *crif = rif->crif;
8494 struct mlxsw_sp_fid *fid = rif->fid;
8495 u8 rif_entries = rif->rif_entries;
8496 u16 rif_index = rif->rif_index;
8497 struct mlxsw_sp_vr *vr;
8498 int i;
8499
8500 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8501 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8502 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8503
8504 if (netdev_offload_xstats_enabled(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8505 mlxsw_sp_rif_push_l3_stats(rif);
8506 mlxsw_sp_router_port_l3_stats_disable(rif);
8507 mlxsw_sp_router_hwstats_notify_schedule(dev);
8508 } else {
8509 mlxsw_sp_rif_counters_free(rif);
8510 }
8511
8512 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8513 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8514 ops->deconfigure(rif);
8515 if (fid)
8516 /* Loopback RIFs are not associated with a FID. */
8517 mlxsw_sp_fid_put(fid);
8518 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8519 netdev_put(dev, &rif->dev_tracker);
8520 mlxsw_sp_rif_free(rif);
8521 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8522 vr->rif_count--;
8523 mlxsw_sp_vr_put(mlxsw_sp, vr);
8524
8525 if (crif->can_destroy)
8526 mlxsw_sp_crif_free(crif);
8527 }
8528
mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)8529 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8530 struct net_device *dev)
8531 {
8532 struct mlxsw_sp_rif *rif;
8533
8534 mutex_lock(&mlxsw_sp->router->lock);
8535 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8536 if (!rif)
8537 goto out;
8538 mlxsw_sp_rif_destroy(rif);
8539 out:
8540 mutex_unlock(&mlxsw_sp->router->lock);
8541 }
8542
mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,u16 vid)8543 static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp,
8544 struct net_device *br_dev,
8545 u16 vid)
8546 {
8547 struct net_device *upper_dev;
8548 struct mlxsw_sp_crif *crif;
8549
8550 rcu_read_lock();
8551 upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid);
8552 rcu_read_unlock();
8553
8554 if (!upper_dev)
8555 return;
8556
8557 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev);
8558 if (!crif || !crif->rif)
8559 return;
8560
8561 mlxsw_sp_rif_destroy(crif->rif);
8562 }
8563
8564 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8565 struct net_device *l3_dev,
8566 int lower_pvid,
8567 unsigned long event,
8568 struct netlink_ext_ack *extack);
8569
mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,u16 new_vid,bool is_pvid,struct netlink_ext_ack * extack)8570 int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
8571 struct net_device *br_dev,
8572 u16 new_vid, bool is_pvid,
8573 struct netlink_ext_ack *extack)
8574 {
8575 struct mlxsw_sp_rif *old_rif;
8576 struct mlxsw_sp_rif *new_rif;
8577 struct net_device *upper_dev;
8578 u16 old_pvid = 0;
8579 u16 new_pvid;
8580 int err = 0;
8581
8582 mutex_lock(&mlxsw_sp->router->lock);
8583 old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
8584 if (old_rif) {
8585 /* If the RIF on the bridge is not a VLAN RIF, we shouldn't have
8586 * gotten a PVID notification.
8587 */
8588 if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN))
8589 old_rif = NULL;
8590 else
8591 old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid);
8592 }
8593
8594 if (is_pvid)
8595 new_pvid = new_vid;
8596 else if (old_pvid == new_vid)
8597 new_pvid = 0;
8598 else
8599 goto out;
8600
8601 if (old_pvid == new_pvid)
8602 goto out;
8603
8604 if (new_pvid) {
8605 struct mlxsw_sp_rif_params params = {
8606 .dev = br_dev,
8607 .vid = new_pvid,
8608 };
8609
8610 /* If there is a VLAN upper with the same VID as the new PVID,
8611 * kill its RIF, if there is one.
8612 */
8613 mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid);
8614
8615 if (mlxsw_sp_dev_addr_list_empty(br_dev))
8616 goto out;
8617 new_rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8618 if (IS_ERR(new_rif)) {
8619 err = PTR_ERR(new_rif);
8620 goto out;
8621 }
8622
8623 if (old_pvid)
8624 mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif,
8625 true);
8626 } else {
8627 mlxsw_sp_rif_destroy(old_rif);
8628 }
8629
8630 if (old_pvid) {
8631 rcu_read_lock();
8632 upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q),
8633 old_pvid);
8634 rcu_read_unlock();
8635 if (upper_dev)
8636 err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp,
8637 upper_dev,
8638 new_pvid,
8639 NETDEV_UP, extack);
8640 }
8641
8642 out:
8643 mutex_unlock(&mlxsw_sp->router->lock);
8644 return err;
8645 }
8646
8647 static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params * params,struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8648 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8649 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8650 {
8651 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8652
8653 params->vid = mlxsw_sp_port_vlan->vid;
8654 params->lag = mlxsw_sp_port->lagged;
8655 if (params->lag)
8656 params->lag_id = mlxsw_sp_port->lag_id;
8657 else
8658 params->system_port = mlxsw_sp_port->local_port;
8659 }
8660
8661 static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif * rif)8662 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8663 {
8664 return container_of(rif, struct mlxsw_sp_rif_subport, common);
8665 }
8666
8667 static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8668 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8669 const struct mlxsw_sp_rif_params *params,
8670 struct netlink_ext_ack *extack)
8671 {
8672 struct mlxsw_sp_rif_subport *rif_subport;
8673 struct mlxsw_sp_rif *rif;
8674
8675 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8676 if (!rif)
8677 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8678
8679 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8680 refcount_inc(&rif_subport->ref_count);
8681 return rif;
8682 }
8683
mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif * rif)8684 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8685 {
8686 struct mlxsw_sp_rif_subport *rif_subport;
8687
8688 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8689 if (!refcount_dec_and_test(&rif_subport->ref_count))
8690 return;
8691
8692 mlxsw_sp_rif_destroy(rif);
8693 }
8694
mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif_mac_profile * profile,struct netlink_ext_ack * extack)8695 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8696 struct mlxsw_sp_rif_mac_profile *profile,
8697 struct netlink_ext_ack *extack)
8698 {
8699 u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8700 struct mlxsw_sp_router *router = mlxsw_sp->router;
8701 int id;
8702
8703 id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8704 max_rif_mac_profiles, GFP_KERNEL);
8705
8706 if (id >= 0) {
8707 profile->id = id;
8708 return 0;
8709 }
8710
8711 if (id == -ENOSPC)
8712 NL_SET_ERR_MSG_MOD(extack,
8713 "Exceeded number of supported router interface MAC profiles");
8714
8715 return id;
8716 }
8717
8718 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8719 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8720 {
8721 struct mlxsw_sp_rif_mac_profile *profile;
8722
8723 profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8724 mac_profile);
8725 WARN_ON(!profile);
8726 return profile;
8727 }
8728
8729 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_alloc(const char * mac)8730 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8731 {
8732 struct mlxsw_sp_rif_mac_profile *profile;
8733
8734 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8735 if (!profile)
8736 return NULL;
8737
8738 ether_addr_copy(profile->mac_prefix, mac);
8739 refcount_set(&profile->ref_count, 1);
8740 return profile;
8741 }
8742
8743 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp * mlxsw_sp,const char * mac)8744 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8745 {
8746 struct mlxsw_sp_router *router = mlxsw_sp->router;
8747 struct mlxsw_sp_rif_mac_profile *profile;
8748 int id;
8749
8750 idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8751 if (ether_addr_equal_masked(profile->mac_prefix, mac,
8752 mlxsw_sp->mac_mask))
8753 return profile;
8754 }
8755
8756 return NULL;
8757 }
8758
mlxsw_sp_rif_mac_profiles_occ_get(void * priv)8759 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8760 {
8761 const struct mlxsw_sp *mlxsw_sp = priv;
8762
8763 return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8764 }
8765
mlxsw_sp_rifs_occ_get(void * priv)8766 static u64 mlxsw_sp_rifs_occ_get(void *priv)
8767 {
8768 const struct mlxsw_sp *mlxsw_sp = priv;
8769
8770 return atomic_read(&mlxsw_sp->router->rifs_count);
8771 }
8772
8773 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp * mlxsw_sp,const char * mac,struct netlink_ext_ack * extack)8774 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8775 struct netlink_ext_ack *extack)
8776 {
8777 struct mlxsw_sp_rif_mac_profile *profile;
8778 int err;
8779
8780 profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8781 if (!profile)
8782 return ERR_PTR(-ENOMEM);
8783
8784 err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8785 if (err)
8786 goto profile_index_alloc_err;
8787
8788 atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8789 return profile;
8790
8791 profile_index_alloc_err:
8792 kfree(profile);
8793 return ERR_PTR(err);
8794 }
8795
mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8796 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8797 u8 mac_profile)
8798 {
8799 struct mlxsw_sp_rif_mac_profile *profile;
8800
8801 atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8802 profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8803 kfree(profile);
8804 }
8805
mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp * mlxsw_sp,const char * mac,u8 * p_mac_profile,struct netlink_ext_ack * extack)8806 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8807 const char *mac, u8 *p_mac_profile,
8808 struct netlink_ext_ack *extack)
8809 {
8810 struct mlxsw_sp_rif_mac_profile *profile;
8811
8812 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8813 if (profile) {
8814 refcount_inc(&profile->ref_count);
8815 goto out;
8816 }
8817
8818 profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8819 if (IS_ERR(profile))
8820 return PTR_ERR(profile);
8821
8822 out:
8823 *p_mac_profile = profile->id;
8824 return 0;
8825 }
8826
mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8827 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8828 u8 mac_profile)
8829 {
8830 struct mlxsw_sp_rif_mac_profile *profile;
8831
8832 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8833 mac_profile);
8834 if (WARN_ON(!profile))
8835 return;
8836
8837 if (!refcount_dec_and_test(&profile->ref_count))
8838 return;
8839
8840 mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8841 }
8842
mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif * rif)8843 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8844 {
8845 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8846 struct mlxsw_sp_rif_mac_profile *profile;
8847
8848 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8849 rif->mac_profile_id);
8850 if (WARN_ON(!profile))
8851 return false;
8852
8853 return refcount_read(&profile->ref_count) > 1;
8854 }
8855
mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif * rif,const char * new_mac)8856 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8857 const char *new_mac)
8858 {
8859 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8860 struct mlxsw_sp_rif_mac_profile *profile;
8861
8862 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8863 rif->mac_profile_id);
8864 if (WARN_ON(!profile))
8865 return -EINVAL;
8866
8867 ether_addr_copy(profile->mac_prefix, new_mac);
8868 return 0;
8869 }
8870
8871 static int
mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,const char * new_mac,struct netlink_ext_ack * extack)8872 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8873 struct mlxsw_sp_rif *rif,
8874 const char *new_mac,
8875 struct netlink_ext_ack *extack)
8876 {
8877 u8 mac_profile;
8878 int err;
8879
8880 if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8881 !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8882 return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8883
8884 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8885 &mac_profile, extack);
8886 if (err)
8887 return err;
8888
8889 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8890 rif->mac_profile_id = mac_profile;
8891 return 0;
8892 }
8893
8894 static int
__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)8895 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8896 struct net_device *l3_dev,
8897 struct netlink_ext_ack *extack)
8898 {
8899 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8900 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8901 struct mlxsw_sp_rif_params params;
8902 u16 vid = mlxsw_sp_port_vlan->vid;
8903 struct mlxsw_sp_rif *rif;
8904 struct mlxsw_sp_fid *fid;
8905 int err;
8906
8907 params = (struct mlxsw_sp_rif_params) {
8908 .dev = l3_dev,
8909 .vid = vid,
8910 };
8911
8912 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
8913 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
8914 if (IS_ERR(rif))
8915 return PTR_ERR(rif);
8916
8917 /* FID was already created, just take a reference */
8918 fid = rif->ops->fid_get(rif, ¶ms, extack);
8919 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8920 if (err)
8921 goto err_fid_port_vid_map;
8922
8923 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8924 if (err)
8925 goto err_port_vid_learning_set;
8926
8927 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8928 BR_STATE_FORWARDING);
8929 if (err)
8930 goto err_port_vid_stp_set;
8931
8932 mlxsw_sp_port_vlan->fid = fid;
8933
8934 return 0;
8935
8936 err_port_vid_stp_set:
8937 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8938 err_port_vid_learning_set:
8939 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8940 err_fid_port_vid_map:
8941 mlxsw_sp_fid_put(fid);
8942 mlxsw_sp_rif_subport_put(rif);
8943 return err;
8944 }
8945
8946 static void
__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8947 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8948 {
8949 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8950 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8951 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8952 u16 vid = mlxsw_sp_port_vlan->vid;
8953
8954 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8955 return;
8956
8957 mlxsw_sp_port_vlan->fid = NULL;
8958 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8959 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8960 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8961 mlxsw_sp_fid_put(fid);
8962 mlxsw_sp_rif_subport_put(rif);
8963 }
8964
8965 static int
mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)8966 mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8967 struct net_device *l3_dev,
8968 struct netlink_ext_ack *extack)
8969 {
8970 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8971
8972 lockdep_assert_held(&mlxsw_sp->router->lock);
8973
8974 if (!mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev))
8975 return 0;
8976
8977 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8978 extack);
8979 }
8980
8981 void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8982 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8983 {
8984 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8985
8986 mutex_lock(&mlxsw_sp->router->lock);
8987 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8988 mutex_unlock(&mlxsw_sp->router->lock);
8989 }
8990
mlxsw_sp_inetaddr_port_vlan_event(struct net_device * l3_dev,struct net_device * port_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)8991 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8992 struct net_device *port_dev,
8993 unsigned long event, u16 vid,
8994 struct netlink_ext_ack *extack)
8995 {
8996 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8997 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8998
8999 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
9000 if (WARN_ON(!mlxsw_sp_port_vlan))
9001 return -EINVAL;
9002
9003 switch (event) {
9004 case NETDEV_UP:
9005 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
9006 l3_dev, extack);
9007 case NETDEV_DOWN:
9008 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9009 break;
9010 }
9011
9012 return 0;
9013 }
9014
mlxsw_sp_inetaddr_port_event(struct net_device * port_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9015 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
9016 unsigned long event, bool nomaster,
9017 struct netlink_ext_ack *extack)
9018 {
9019 if (!nomaster && (netif_is_any_bridge_port(port_dev) ||
9020 netif_is_lag_port(port_dev)))
9021 return 0;
9022
9023 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
9024 MLXSW_SP_DEFAULT_VID, extack);
9025 }
9026
__mlxsw_sp_inetaddr_lag_event(struct net_device * l3_dev,struct net_device * lag_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)9027 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
9028 struct net_device *lag_dev,
9029 unsigned long event, u16 vid,
9030 struct netlink_ext_ack *extack)
9031 {
9032 struct net_device *port_dev;
9033 struct list_head *iter;
9034 int err;
9035
9036 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
9037 if (mlxsw_sp_port_dev_check(port_dev)) {
9038 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
9039 port_dev,
9040 event, vid,
9041 extack);
9042 if (err)
9043 return err;
9044 }
9045 }
9046
9047 return 0;
9048 }
9049
mlxsw_sp_inetaddr_lag_event(struct net_device * lag_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9050 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
9051 unsigned long event, bool nomaster,
9052 struct netlink_ext_ack *extack)
9053 {
9054 if (!nomaster && netif_is_bridge_port(lag_dev))
9055 return 0;
9056
9057 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
9058 MLXSW_SP_DEFAULT_VID, extack);
9059 }
9060
mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,int lower_pvid,unsigned long event,struct netlink_ext_ack * extack)9061 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
9062 struct net_device *l3_dev,
9063 int lower_pvid,
9064 unsigned long event,
9065 struct netlink_ext_ack *extack)
9066 {
9067 struct mlxsw_sp_rif_params params = {
9068 .dev = l3_dev,
9069 };
9070 struct mlxsw_sp_rif *rif;
9071 int err;
9072
9073 switch (event) {
9074 case NETDEV_UP:
9075 if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
9076 u16 proto;
9077
9078 br_vlan_get_proto(l3_dev, &proto);
9079 if (proto == ETH_P_8021AD) {
9080 NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
9081 return -EOPNOTSUPP;
9082 }
9083 err = br_vlan_get_pvid(l3_dev, ¶ms.vid);
9084 if (err)
9085 return err;
9086 if (!params.vid)
9087 return 0;
9088 } else if (is_vlan_dev(l3_dev)) {
9089 params.vid = vlan_dev_vlan_id(l3_dev);
9090
9091 /* If the VID matches PVID of the bridge below, the
9092 * bridge owns the RIF for this VLAN. Don't do anything.
9093 */
9094 if ((int)params.vid == lower_pvid)
9095 return 0;
9096 }
9097
9098 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
9099 if (IS_ERR(rif))
9100 return PTR_ERR(rif);
9101 break;
9102 case NETDEV_DOWN:
9103 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9104 mlxsw_sp_rif_destroy(rif);
9105 break;
9106 }
9107
9108 return 0;
9109 }
9110
mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9111 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
9112 struct net_device *vlan_dev,
9113 unsigned long event, bool nomaster,
9114 struct netlink_ext_ack *extack)
9115 {
9116 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
9117 u16 vid = vlan_dev_vlan_id(vlan_dev);
9118 u16 lower_pvid;
9119 int err;
9120
9121 if (!nomaster && netif_is_bridge_port(vlan_dev))
9122 return 0;
9123
9124 if (mlxsw_sp_port_dev_check(real_dev)) {
9125 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
9126 event, vid, extack);
9127 } else if (netif_is_lag_master(real_dev)) {
9128 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
9129 vid, extack);
9130 } else if (netif_is_bridge_master(real_dev) &&
9131 br_vlan_enabled(real_dev)) {
9132 err = br_vlan_get_pvid(real_dev, &lower_pvid);
9133 if (err)
9134 return err;
9135 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev,
9136 lower_pvid, event,
9137 extack);
9138 }
9139
9140 return 0;
9141 }
9142
mlxsw_sp_rif_macvlan_is_vrrp4(const u8 * mac)9143 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
9144 {
9145 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9146 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9147
9148 return ether_addr_equal_masked(mac, vrrp4, mask);
9149 }
9150
mlxsw_sp_rif_macvlan_is_vrrp6(const u8 * mac)9151 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9152 {
9153 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9154 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9155
9156 return ether_addr_equal_masked(mac, vrrp6, mask);
9157 }
9158
mlxsw_sp_rif_vrrp_op(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const u8 * mac,bool adding)9159 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9160 const u8 *mac, bool adding)
9161 {
9162 char ritr_pl[MLXSW_REG_RITR_LEN];
9163 u8 vrrp_id = adding ? mac[5] : 0;
9164 int err;
9165
9166 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9167 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9168 return 0;
9169
9170 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9171 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9172 if (err)
9173 return err;
9174
9175 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9176 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9177 else
9178 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9179
9180 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9181 }
9182
mlxsw_sp_rif_macvlan_add(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev,struct netlink_ext_ack * extack)9183 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9184 const struct net_device *macvlan_dev,
9185 struct netlink_ext_ack *extack)
9186 {
9187 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9188 struct mlxsw_sp_rif *rif;
9189 int err;
9190
9191 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9192 if (!rif)
9193 return 0;
9194
9195 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9196 mlxsw_sp_fid_index(rif->fid), true);
9197 if (err)
9198 return err;
9199
9200 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9201 macvlan_dev->dev_addr, true);
9202 if (err)
9203 goto err_rif_vrrp_add;
9204
9205 /* Make sure the bridge driver does not have this MAC pointing at
9206 * some other port.
9207 */
9208 if (rif->ops->fdb_del)
9209 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9210
9211 return 0;
9212
9213 err_rif_vrrp_add:
9214 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9215 mlxsw_sp_fid_index(rif->fid), false);
9216 return err;
9217 }
9218
__mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9219 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9220 const struct net_device *macvlan_dev)
9221 {
9222 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9223 struct mlxsw_sp_rif *rif;
9224
9225 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9226 /* If we do not have a RIF, then we already took care of
9227 * removing the macvlan's MAC during RIF deletion.
9228 */
9229 if (!rif)
9230 return;
9231 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9232 false);
9233 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9234 mlxsw_sp_fid_index(rif->fid), false);
9235 }
9236
mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9237 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9238 const struct net_device *macvlan_dev)
9239 {
9240 mutex_lock(&mlxsw_sp->router->lock);
9241 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9242 mutex_unlock(&mlxsw_sp->router->lock);
9243 }
9244
mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * macvlan_dev,unsigned long event,struct netlink_ext_ack * extack)9245 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9246 struct net_device *macvlan_dev,
9247 unsigned long event,
9248 struct netlink_ext_ack *extack)
9249 {
9250 switch (event) {
9251 case NETDEV_UP:
9252 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9253 case NETDEV_DOWN:
9254 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9255 break;
9256 }
9257
9258 return 0;
9259 }
9260
__mlxsw_sp_inetaddr_event(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9261 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9262 struct net_device *dev,
9263 unsigned long event, bool nomaster,
9264 struct netlink_ext_ack *extack)
9265 {
9266 if (mlxsw_sp_port_dev_check(dev))
9267 return mlxsw_sp_inetaddr_port_event(dev, event, nomaster,
9268 extack);
9269 else if (netif_is_lag_master(dev))
9270 return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster,
9271 extack);
9272 else if (netif_is_bridge_master(dev))
9273 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event,
9274 extack);
9275 else if (is_vlan_dev(dev))
9276 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9277 nomaster, extack);
9278 else if (netif_is_macvlan(dev))
9279 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9280 extack);
9281 else
9282 return 0;
9283 }
9284
mlxsw_sp_inetaddr_event(struct notifier_block * nb,unsigned long event,void * ptr)9285 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9286 unsigned long event, void *ptr)
9287 {
9288 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9289 struct net_device *dev = ifa->ifa_dev->dev;
9290 struct mlxsw_sp_router *router;
9291 struct mlxsw_sp_rif *rif;
9292 int err = 0;
9293
9294 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9295 if (event == NETDEV_UP)
9296 return NOTIFY_DONE;
9297
9298 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9299 mutex_lock(&router->lock);
9300 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9301 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9302 goto out;
9303
9304 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false,
9305 NULL);
9306 out:
9307 mutex_unlock(&router->lock);
9308 return notifier_from_errno(err);
9309 }
9310
mlxsw_sp_inetaddr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9311 static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9312 unsigned long event, void *ptr)
9313 {
9314 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9315 struct net_device *dev = ivi->ivi_dev->dev;
9316 struct mlxsw_sp *mlxsw_sp;
9317 struct mlxsw_sp_rif *rif;
9318 int err = 0;
9319
9320 mlxsw_sp = mlxsw_sp_lower_get(dev);
9321 if (!mlxsw_sp)
9322 return NOTIFY_DONE;
9323
9324 mutex_lock(&mlxsw_sp->router->lock);
9325 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9326 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9327 goto out;
9328
9329 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9330 ivi->extack);
9331 out:
9332 mutex_unlock(&mlxsw_sp->router->lock);
9333 return notifier_from_errno(err);
9334 }
9335
9336 struct mlxsw_sp_inet6addr_event_work {
9337 struct work_struct work;
9338 struct mlxsw_sp *mlxsw_sp;
9339 struct net_device *dev;
9340 netdevice_tracker dev_tracker;
9341 unsigned long event;
9342 };
9343
mlxsw_sp_inet6addr_event_work(struct work_struct * work)9344 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9345 {
9346 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9347 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9348 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9349 struct net_device *dev = inet6addr_work->dev;
9350 unsigned long event = inet6addr_work->event;
9351 struct mlxsw_sp_rif *rif;
9352
9353 rtnl_lock();
9354 mutex_lock(&mlxsw_sp->router->lock);
9355
9356 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9357 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9358 goto out;
9359
9360 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL);
9361 out:
9362 mutex_unlock(&mlxsw_sp->router->lock);
9363 rtnl_unlock();
9364 netdev_put(dev, &inet6addr_work->dev_tracker);
9365 kfree(inet6addr_work);
9366 }
9367
9368 /* Called with rcu_read_lock() */
mlxsw_sp_inet6addr_event(struct notifier_block * nb,unsigned long event,void * ptr)9369 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9370 unsigned long event, void *ptr)
9371 {
9372 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9373 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9374 struct net_device *dev = if6->idev->dev;
9375 struct mlxsw_sp_router *router;
9376
9377 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9378 if (event == NETDEV_UP)
9379 return NOTIFY_DONE;
9380
9381 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9382 if (!inet6addr_work)
9383 return NOTIFY_BAD;
9384
9385 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9386 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9387 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9388 inet6addr_work->dev = dev;
9389 inet6addr_work->event = event;
9390 netdev_hold(dev, &inet6addr_work->dev_tracker, GFP_ATOMIC);
9391 mlxsw_core_schedule_work(&inet6addr_work->work);
9392
9393 return NOTIFY_DONE;
9394 }
9395
mlxsw_sp_inet6addr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9396 static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9397 unsigned long event, void *ptr)
9398 {
9399 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9400 struct net_device *dev = i6vi->i6vi_dev->dev;
9401 struct mlxsw_sp *mlxsw_sp;
9402 struct mlxsw_sp_rif *rif;
9403 int err = 0;
9404
9405 mlxsw_sp = mlxsw_sp_lower_get(dev);
9406 if (!mlxsw_sp)
9407 return NOTIFY_DONE;
9408
9409 mutex_lock(&mlxsw_sp->router->lock);
9410 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9411 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9412 goto out;
9413
9414 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9415 i6vi->extack);
9416 out:
9417 mutex_unlock(&mlxsw_sp->router->lock);
9418 return notifier_from_errno(err);
9419 }
9420
mlxsw_sp_rif_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const char * mac,int mtu,u8 mac_profile)9421 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9422 const char *mac, int mtu, u8 mac_profile)
9423 {
9424 char ritr_pl[MLXSW_REG_RITR_LEN];
9425 int err;
9426
9427 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9428 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9429 if (err)
9430 return err;
9431
9432 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9433 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9434 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9435 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9437 }
9438
9439 static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9440 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9441 struct mlxsw_sp_rif *rif,
9442 struct netlink_ext_ack *extack)
9443 {
9444 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9445 u8 old_mac_profile;
9446 u16 fid_index;
9447 int err;
9448
9449 fid_index = mlxsw_sp_fid_index(rif->fid);
9450
9451 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9452 if (err)
9453 return err;
9454
9455 old_mac_profile = rif->mac_profile_id;
9456 err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9457 extack);
9458 if (err)
9459 goto err_rif_mac_profile_replace;
9460
9461 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9462 dev->mtu, rif->mac_profile_id);
9463 if (err)
9464 goto err_rif_edit;
9465
9466 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9467 if (err)
9468 goto err_rif_fdb_op;
9469
9470 if (rif->mtu != dev->mtu) {
9471 struct mlxsw_sp_vr *vr;
9472 int i;
9473
9474 /* The RIF is relevant only to its mr_table instance, as unlike
9475 * unicast routing, in multicast routing a RIF cannot be shared
9476 * between several multicast routing tables.
9477 */
9478 vr = &mlxsw_sp->router->vrs[rif->vr_id];
9479 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9480 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9481 rif, dev->mtu);
9482 }
9483
9484 ether_addr_copy(rif->addr, dev->dev_addr);
9485 rif->mtu = dev->mtu;
9486
9487 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9488
9489 return 0;
9490
9491 err_rif_fdb_op:
9492 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9493 old_mac_profile);
9494 err_rif_edit:
9495 mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9496 err_rif_mac_profile_replace:
9497 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9498 return err;
9499 }
9500
mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif * rif,struct netdev_notifier_pre_changeaddr_info * info)9501 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9502 struct netdev_notifier_pre_changeaddr_info *info)
9503 {
9504 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9505 struct mlxsw_sp_rif_mac_profile *profile;
9506 struct netlink_ext_ack *extack;
9507 u8 max_rif_mac_profiles;
9508 u64 occ;
9509
9510 extack = netdev_notifier_info_to_extack(&info->info);
9511
9512 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9513 if (profile)
9514 return 0;
9515
9516 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9517 occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9518 if (occ < max_rif_mac_profiles)
9519 return 0;
9520
9521 if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9522 return 0;
9523
9524 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9525 return -ENOBUFS;
9526 }
9527
mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)9528 static bool mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp *mlxsw_sp,
9529 struct net_device *dev)
9530 {
9531 struct vlan_dev_priv *vlan;
9532
9533 if (netif_is_lag_master(dev) ||
9534 netif_is_bridge_master(dev) ||
9535 mlxsw_sp_port_dev_check(dev) ||
9536 mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev) ||
9537 netif_is_l3_master(dev))
9538 return true;
9539
9540 if (!is_vlan_dev(dev))
9541 return false;
9542
9543 vlan = vlan_dev_priv(dev);
9544 return netif_is_lag_master(vlan->real_dev) ||
9545 netif_is_bridge_master(vlan->real_dev) ||
9546 mlxsw_sp_port_dev_check(vlan->real_dev);
9547 }
9548
9549 static struct mlxsw_sp_crif *
mlxsw_sp_crif_register(struct mlxsw_sp_router * router,struct net_device * dev)9550 mlxsw_sp_crif_register(struct mlxsw_sp_router *router, struct net_device *dev)
9551 {
9552 struct mlxsw_sp_crif *crif;
9553 int err;
9554
9555 if (WARN_ON(mlxsw_sp_crif_lookup(router, dev)))
9556 return NULL;
9557
9558 crif = mlxsw_sp_crif_alloc(dev);
9559 if (!crif)
9560 return ERR_PTR(-ENOMEM);
9561
9562 err = mlxsw_sp_crif_insert(router, crif);
9563 if (err)
9564 goto err_netdev_insert;
9565
9566 return crif;
9567
9568 err_netdev_insert:
9569 mlxsw_sp_crif_free(crif);
9570 return ERR_PTR(err);
9571 }
9572
mlxsw_sp_crif_unregister(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)9573 static void mlxsw_sp_crif_unregister(struct mlxsw_sp_router *router,
9574 struct mlxsw_sp_crif *crif)
9575 {
9576 struct mlxsw_sp_nexthop *nh, *tmp;
9577
9578 mlxsw_sp_crif_remove(router, crif);
9579
9580 list_for_each_entry_safe(nh, tmp, &crif->nexthop_list, crif_list_node)
9581 mlxsw_sp_nexthop_type_fini(router->mlxsw_sp, nh);
9582
9583 if (crif->rif)
9584 crif->can_destroy = true;
9585 else
9586 mlxsw_sp_crif_free(crif);
9587 }
9588
mlxsw_sp_netdevice_register(struct mlxsw_sp_router * router,struct net_device * dev)9589 static int mlxsw_sp_netdevice_register(struct mlxsw_sp_router *router,
9590 struct net_device *dev)
9591 {
9592 struct mlxsw_sp_crif *crif;
9593
9594 if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9595 return 0;
9596
9597 crif = mlxsw_sp_crif_register(router, dev);
9598 return PTR_ERR_OR_ZERO(crif);
9599 }
9600
mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router * router,struct net_device * dev)9601 static void mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router *router,
9602 struct net_device *dev)
9603 {
9604 struct mlxsw_sp_crif *crif;
9605
9606 if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9607 return;
9608
9609 /* netdev_run_todo(), by way of netdev_wait_allrefs_any(), rebroadcasts
9610 * the NETDEV_UNREGISTER message, so we can get here twice. If that's
9611 * what happened, the netdevice state is NETREG_UNREGISTERED. In that
9612 * case, we expect to have collected the CRIF already, and warn if it
9613 * still exists. Otherwise we expect the CRIF to exist.
9614 */
9615 crif = mlxsw_sp_crif_lookup(router, dev);
9616 if (dev->reg_state == NETREG_UNREGISTERED) {
9617 if (!WARN_ON(crif))
9618 return;
9619 }
9620 if (WARN_ON(!crif))
9621 return;
9622
9623 mlxsw_sp_crif_unregister(router, crif);
9624 }
9625
mlxsw_sp_is_offload_xstats_event(unsigned long event)9626 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9627 {
9628 switch (event) {
9629 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9630 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9631 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9632 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9633 return true;
9634 }
9635
9636 return false;
9637 }
9638
9639 static int
mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif * rif,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9640 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9641 unsigned long event,
9642 struct netdev_notifier_offload_xstats_info *info)
9643 {
9644 switch (info->type) {
9645 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9646 break;
9647 default:
9648 return 0;
9649 }
9650
9651 switch (event) {
9652 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9653 return mlxsw_sp_router_port_l3_stats_enable(rif);
9654 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9655 mlxsw_sp_router_port_l3_stats_disable(rif);
9656 return 0;
9657 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9658 mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9659 return 0;
9660 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9661 return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9662 }
9663
9664 WARN_ON_ONCE(1);
9665 return 0;
9666 }
9667
9668 static int
mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9669 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9670 struct net_device *dev,
9671 unsigned long event,
9672 struct netdev_notifier_offload_xstats_info *info)
9673 {
9674 struct mlxsw_sp_rif *rif;
9675
9676 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9677 if (!rif)
9678 return 0;
9679
9680 return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9681 }
9682
mlxsw_sp_is_router_event(unsigned long event)9683 static bool mlxsw_sp_is_router_event(unsigned long event)
9684 {
9685 switch (event) {
9686 case NETDEV_PRE_CHANGEADDR:
9687 case NETDEV_CHANGEADDR:
9688 case NETDEV_CHANGEMTU:
9689 return true;
9690 default:
9691 return false;
9692 }
9693 }
9694
mlxsw_sp_netdevice_router_port_event(struct net_device * dev,unsigned long event,void * ptr)9695 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9696 unsigned long event, void *ptr)
9697 {
9698 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9699 struct mlxsw_sp *mlxsw_sp;
9700 struct mlxsw_sp_rif *rif;
9701
9702 mlxsw_sp = mlxsw_sp_lower_get(dev);
9703 if (!mlxsw_sp)
9704 return 0;
9705
9706 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9707 if (!rif)
9708 return 0;
9709
9710 switch (event) {
9711 case NETDEV_CHANGEMTU:
9712 case NETDEV_CHANGEADDR:
9713 return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9714 case NETDEV_PRE_CHANGEADDR:
9715 return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9716 default:
9717 WARN_ON_ONCE(1);
9718 break;
9719 }
9720
9721 return 0;
9722 }
9723
mlxsw_sp_port_vrf_join(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,struct netlink_ext_ack * extack)9724 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9725 struct net_device *l3_dev,
9726 struct netlink_ext_ack *extack)
9727 {
9728 struct mlxsw_sp_rif *rif;
9729
9730 /* If netdev is already associated with a RIF, then we need to
9731 * destroy it and create a new one with the new virtual router ID.
9732 */
9733 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9734 if (rif)
9735 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false,
9736 extack);
9737
9738 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false,
9739 extack);
9740 }
9741
mlxsw_sp_port_vrf_leave(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev)9742 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9743 struct net_device *l3_dev)
9744 {
9745 struct mlxsw_sp_rif *rif;
9746
9747 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9748 if (!rif)
9749 return;
9750 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL);
9751 }
9752
mlxsw_sp_is_vrf_event(unsigned long event,void * ptr)9753 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9754 {
9755 struct netdev_notifier_changeupper_info *info = ptr;
9756
9757 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9758 return false;
9759 return netif_is_l3_master(info->upper_dev);
9760 }
9761
9762 static int
mlxsw_sp_netdevice_vrf_event(struct net_device * l3_dev,unsigned long event,struct netdev_notifier_changeupper_info * info)9763 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9764 struct netdev_notifier_changeupper_info *info)
9765 {
9766 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9767 int err = 0;
9768
9769 /* We do not create a RIF for a macvlan, but only use it to
9770 * direct more MAC addresses to the router.
9771 */
9772 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9773 return 0;
9774
9775 switch (event) {
9776 case NETDEV_PRECHANGEUPPER:
9777 break;
9778 case NETDEV_CHANGEUPPER:
9779 if (info->linking) {
9780 struct netlink_ext_ack *extack;
9781
9782 extack = netdev_notifier_info_to_extack(&info->info);
9783 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9784 } else {
9785 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9786 }
9787 break;
9788 }
9789
9790 return err;
9791 }
9792
9793 struct mlxsw_sp_router_replay_inetaddr_up {
9794 struct mlxsw_sp *mlxsw_sp;
9795 struct netlink_ext_ack *extack;
9796 unsigned int done;
9797 bool deslavement;
9798 };
9799
mlxsw_sp_router_replay_inetaddr_up(struct net_device * dev,struct netdev_nested_priv * priv)9800 static int mlxsw_sp_router_replay_inetaddr_up(struct net_device *dev,
9801 struct netdev_nested_priv *priv)
9802 {
9803 struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
9804 bool nomaster = ctx->deslavement;
9805 struct mlxsw_sp_crif *crif;
9806 int err;
9807
9808 if (mlxsw_sp_dev_addr_list_empty(dev))
9809 return 0;
9810
9811 crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
9812 if (!crif || crif->rif)
9813 return 0;
9814
9815 if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
9816 return 0;
9817
9818 err = __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_UP,
9819 nomaster, ctx->extack);
9820 if (err)
9821 return err;
9822
9823 ctx->done++;
9824 return 0;
9825 }
9826
mlxsw_sp_router_unreplay_inetaddr_up(struct net_device * dev,struct netdev_nested_priv * priv)9827 static int mlxsw_sp_router_unreplay_inetaddr_up(struct net_device *dev,
9828 struct netdev_nested_priv *priv)
9829 {
9830 struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
9831 bool nomaster = ctx->deslavement;
9832 struct mlxsw_sp_crif *crif;
9833
9834 if (!ctx->done)
9835 return 0;
9836
9837 if (mlxsw_sp_dev_addr_list_empty(dev))
9838 return 0;
9839
9840 crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
9841 if (!crif || !crif->rif)
9842 return 0;
9843
9844 /* We are rolling back NETDEV_UP, so ask for that. */
9845 if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
9846 return 0;
9847
9848 __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_DOWN, nomaster,
9849 NULL);
9850
9851 ctx->done--;
9852 return 0;
9853 }
9854
mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp * mlxsw_sp,struct net_device * upper_dev,struct netlink_ext_ack * extack)9855 int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
9856 struct net_device *upper_dev,
9857 struct netlink_ext_ack *extack)
9858 {
9859 struct mlxsw_sp_router_replay_inetaddr_up ctx = {
9860 .mlxsw_sp = mlxsw_sp,
9861 .extack = extack,
9862 .deslavement = false,
9863 };
9864 struct netdev_nested_priv priv = {
9865 .data = &ctx,
9866 };
9867 int err;
9868
9869 err = mlxsw_sp_router_replay_inetaddr_up(upper_dev, &priv);
9870 if (err)
9871 return err;
9872
9873 err = netdev_walk_all_upper_dev_rcu(upper_dev,
9874 mlxsw_sp_router_replay_inetaddr_up,
9875 &priv);
9876 if (err)
9877 goto err_replay_up;
9878
9879 return 0;
9880
9881 err_replay_up:
9882 netdev_walk_all_upper_dev_rcu(upper_dev,
9883 mlxsw_sp_router_unreplay_inetaddr_up,
9884 &priv);
9885 mlxsw_sp_router_unreplay_inetaddr_up(upper_dev, &priv);
9886 return err;
9887 }
9888
mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)9889 void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
9890 struct net_device *dev)
9891 {
9892 struct mlxsw_sp_router_replay_inetaddr_up ctx = {
9893 .mlxsw_sp = mlxsw_sp,
9894 .deslavement = true,
9895 };
9896 struct netdev_nested_priv priv = {
9897 .data = &ctx,
9898 };
9899
9900 mlxsw_sp_router_replay_inetaddr_up(dev, &priv);
9901 }
9902
9903 static int
mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,struct net_device * dev,struct netlink_ext_ack * extack)9904 mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
9905 u16 vid, struct net_device *dev,
9906 struct netlink_ext_ack *extack)
9907 {
9908 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9909
9910 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9911 vid);
9912 if (WARN_ON(!mlxsw_sp_port_vlan))
9913 return -EINVAL;
9914
9915 return mlxsw_sp_port_vlan_router_join_existing(mlxsw_sp_port_vlan,
9916 dev, extack);
9917 }
9918
9919 static void
mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,struct net_device * dev)9920 mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
9921 struct net_device *dev)
9922 {
9923 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9924
9925 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9926 vid);
9927 if (WARN_ON(!mlxsw_sp_port_vlan))
9928 return;
9929
9930 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9931 }
9932
__mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)9933 static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9934 struct net_device *lag_dev,
9935 struct netlink_ext_ack *extack)
9936 {
9937 u16 default_vid = MLXSW_SP_DEFAULT_VID;
9938 struct net_device *upper_dev;
9939 struct list_head *iter;
9940 int done = 0;
9941 u16 vid;
9942 int err;
9943
9944 err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, default_vid,
9945 lag_dev, extack);
9946 if (err)
9947 return err;
9948
9949 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9950 if (!is_vlan_dev(upper_dev))
9951 continue;
9952
9953 vid = vlan_dev_vlan_id(upper_dev);
9954 err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, vid,
9955 upper_dev, extack);
9956 if (err)
9957 goto err_router_join_dev;
9958
9959 ++done;
9960 }
9961
9962 return 0;
9963
9964 err_router_join_dev:
9965 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9966 if (!is_vlan_dev(upper_dev))
9967 continue;
9968 if (!done--)
9969 break;
9970
9971 vid = vlan_dev_vlan_id(upper_dev);
9972 mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
9973 }
9974
9975 mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
9976 return err;
9977 }
9978
9979 static void
__mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)9980 __mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9981 struct net_device *lag_dev)
9982 {
9983 u16 default_vid = MLXSW_SP_DEFAULT_VID;
9984 struct net_device *upper_dev;
9985 struct list_head *iter;
9986 u16 vid;
9987
9988 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9989 if (!is_vlan_dev(upper_dev))
9990 continue;
9991
9992 vid = vlan_dev_vlan_id(upper_dev);
9993 mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
9994 }
9995
9996 mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
9997 }
9998
mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)9999 int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10000 struct net_device *lag_dev,
10001 struct netlink_ext_ack *extack)
10002 {
10003 int err;
10004
10005 mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10006 err = __mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, extack);
10007 mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10008
10009 return err;
10010 }
10011
mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)10012 void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10013 struct net_device *lag_dev)
10014 {
10015 mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10016 __mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
10017 mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10018 }
10019
mlxsw_sp_router_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)10020 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
10021 unsigned long event, void *ptr)
10022 {
10023 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
10024 struct mlxsw_sp_router *router;
10025 struct mlxsw_sp *mlxsw_sp;
10026 int err = 0;
10027
10028 router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
10029 mlxsw_sp = router->mlxsw_sp;
10030
10031 mutex_lock(&mlxsw_sp->router->lock);
10032
10033 if (event == NETDEV_REGISTER) {
10034 err = mlxsw_sp_netdevice_register(router, dev);
10035 if (err)
10036 /* No need to roll this back, UNREGISTER will collect it
10037 * anyhow.
10038 */
10039 goto out;
10040 }
10041
10042 if (mlxsw_sp_is_offload_xstats_event(event))
10043 err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
10044 event, ptr);
10045 else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
10046 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
10047 event, ptr);
10048 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
10049 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
10050 event, ptr);
10051 else if (mlxsw_sp_is_router_event(event))
10052 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
10053 else if (mlxsw_sp_is_vrf_event(event, ptr))
10054 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
10055
10056 if (event == NETDEV_UNREGISTER)
10057 mlxsw_sp_netdevice_unregister(router, dev);
10058
10059 out:
10060 mutex_unlock(&mlxsw_sp->router->lock);
10061
10062 return notifier_from_errno(err);
10063 }
10064
10065 struct mlxsw_sp_macvlan_replay {
10066 struct mlxsw_sp *mlxsw_sp;
10067 struct netlink_ext_ack *extack;
10068 };
10069
mlxsw_sp_macvlan_replay_upper(struct net_device * dev,struct netdev_nested_priv * priv)10070 static int mlxsw_sp_macvlan_replay_upper(struct net_device *dev,
10071 struct netdev_nested_priv *priv)
10072 {
10073 const struct mlxsw_sp_macvlan_replay *rms = priv->data;
10074 struct netlink_ext_ack *extack = rms->extack;
10075 struct mlxsw_sp *mlxsw_sp = rms->mlxsw_sp;
10076
10077 if (!netif_is_macvlan(dev))
10078 return 0;
10079
10080 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, dev, extack);
10081 }
10082
mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10083 static int mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif *rif,
10084 struct netlink_ext_ack *extack)
10085 {
10086 struct mlxsw_sp_macvlan_replay rms = {
10087 .mlxsw_sp = rif->mlxsw_sp,
10088 .extack = extack,
10089 };
10090 struct netdev_nested_priv priv = {
10091 .data = &rms,
10092 };
10093
10094 return netdev_walk_all_upper_dev_rcu(mlxsw_sp_rif_dev(rif),
10095 mlxsw_sp_macvlan_replay_upper,
10096 &priv);
10097 }
10098
__mlxsw_sp_rif_macvlan_flush(struct net_device * dev,struct netdev_nested_priv * priv)10099 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
10100 struct netdev_nested_priv *priv)
10101 {
10102 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
10103
10104 if (!netif_is_macvlan(dev))
10105 return 0;
10106
10107 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10108 mlxsw_sp_fid_index(rif->fid), false);
10109 }
10110
mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif * rif)10111 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
10112 {
10113 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10114 struct netdev_nested_priv priv = {
10115 .data = (void *)rif,
10116 };
10117
10118 if (!netif_is_macvlan_port(dev))
10119 return 0;
10120
10121 return netdev_walk_all_upper_dev_rcu(dev,
10122 __mlxsw_sp_rif_macvlan_flush, &priv);
10123 }
10124
mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)10125 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
10126 const struct mlxsw_sp_rif_params *params)
10127 {
10128 struct mlxsw_sp_rif_subport *rif_subport;
10129
10130 rif_subport = mlxsw_sp_rif_subport_rif(rif);
10131 refcount_set(&rif_subport->ref_count, 1);
10132 rif_subport->vid = params->vid;
10133 rif_subport->lag = params->lag;
10134 if (params->lag)
10135 rif_subport->lag_id = params->lag_id;
10136 else
10137 rif_subport->system_port = params->system_port;
10138 }
10139
mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif * rif,bool enable)10140 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
10141 {
10142 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10143 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10144 struct mlxsw_sp_rif_subport *rif_subport;
10145 char ritr_pl[MLXSW_REG_RITR_LEN];
10146 u16 efid;
10147
10148 rif_subport = mlxsw_sp_rif_subport_rif(rif);
10149 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
10150 rif->rif_index, rif->vr_id, dev->mtu);
10151 mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10152 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10153 efid = mlxsw_sp_fid_index(rif->fid);
10154 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
10155 rif_subport->lag ? rif_subport->lag_id :
10156 rif_subport->system_port,
10157 efid, 0);
10158 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10159 }
10160
mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10161 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
10162 struct netlink_ext_ack *extack)
10163 {
10164 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10165 u8 mac_profile;
10166 int err;
10167
10168 err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
10169 &mac_profile, extack);
10170 if (err)
10171 return err;
10172 rif->mac_profile_id = mac_profile;
10173
10174 err = mlxsw_sp_rif_subport_op(rif, true);
10175 if (err)
10176 goto err_rif_subport_op;
10177
10178 err = mlxsw_sp_macvlan_replay(rif, extack);
10179 if (err)
10180 goto err_macvlan_replay;
10181
10182 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10183 mlxsw_sp_fid_index(rif->fid), true);
10184 if (err)
10185 goto err_rif_fdb_op;
10186
10187 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10188 if (err)
10189 goto err_fid_rif_set;
10190
10191 return 0;
10192
10193 err_fid_rif_set:
10194 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10195 mlxsw_sp_fid_index(rif->fid), false);
10196 err_rif_fdb_op:
10197 mlxsw_sp_rif_macvlan_flush(rif);
10198 err_macvlan_replay:
10199 mlxsw_sp_rif_subport_op(rif, false);
10200 err_rif_subport_op:
10201 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
10202 return err;
10203 }
10204
mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif * rif)10205 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
10206 {
10207 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10208 struct mlxsw_sp_fid *fid = rif->fid;
10209
10210 mlxsw_sp_fid_rif_unset(fid);
10211 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10212 mlxsw_sp_fid_index(fid), false);
10213 mlxsw_sp_rif_macvlan_flush(rif);
10214 mlxsw_sp_rif_subport_op(rif, false);
10215 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10216 }
10217
10218 static struct mlxsw_sp_fid *
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10219 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
10220 const struct mlxsw_sp_rif_params *params,
10221 struct netlink_ext_ack *extack)
10222 {
10223 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
10224 }
10225
10226 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
10227 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
10228 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
10229 .setup = mlxsw_sp_rif_subport_setup,
10230 .configure = mlxsw_sp_rif_subport_configure,
10231 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
10232 .fid_get = mlxsw_sp_rif_subport_fid_get,
10233 };
10234
mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif * rif,u16 fid,bool enable)10235 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
10236 {
10237 enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
10238 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10239 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10240 char ritr_pl[MLXSW_REG_RITR_LEN];
10241
10242 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
10243 dev->mtu);
10244 mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10245 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10246 mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
10247
10248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10249 }
10250
mlxsw_sp_router_port(const struct mlxsw_sp * mlxsw_sp)10251 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
10252 {
10253 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
10254 }
10255
mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10256 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
10257 struct netlink_ext_ack *extack)
10258 {
10259 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10260 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10261 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10262 u8 mac_profile;
10263 int err;
10264
10265 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10266 &mac_profile, extack);
10267 if (err)
10268 return err;
10269 rif->mac_profile_id = mac_profile;
10270
10271 err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
10272 if (err)
10273 goto err_rif_fid_op;
10274
10275 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10276 mlxsw_sp_router_port(mlxsw_sp), true);
10277 if (err)
10278 goto err_fid_mc_flood_set;
10279
10280 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10281 mlxsw_sp_router_port(mlxsw_sp), true);
10282 if (err)
10283 goto err_fid_bc_flood_set;
10284
10285 err = mlxsw_sp_macvlan_replay(rif, extack);
10286 if (err)
10287 goto err_macvlan_replay;
10288
10289 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10290 mlxsw_sp_fid_index(rif->fid), true);
10291 if (err)
10292 goto err_rif_fdb_op;
10293
10294 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10295 if (err)
10296 goto err_fid_rif_set;
10297
10298 return 0;
10299
10300 err_fid_rif_set:
10301 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10302 mlxsw_sp_fid_index(rif->fid), false);
10303 err_rif_fdb_op:
10304 mlxsw_sp_rif_macvlan_flush(rif);
10305 err_macvlan_replay:
10306 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10307 mlxsw_sp_router_port(mlxsw_sp), false);
10308 err_fid_bc_flood_set:
10309 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10310 mlxsw_sp_router_port(mlxsw_sp), false);
10311 err_fid_mc_flood_set:
10312 mlxsw_sp_rif_fid_op(rif, fid_index, false);
10313 err_rif_fid_op:
10314 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10315 return err;
10316 }
10317
mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif * rif)10318 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
10319 {
10320 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10321 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10322 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10323 struct mlxsw_sp_fid *fid = rif->fid;
10324
10325 mlxsw_sp_fid_rif_unset(fid);
10326 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10327 mlxsw_sp_fid_index(fid), false);
10328 mlxsw_sp_rif_macvlan_flush(rif);
10329 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10330 mlxsw_sp_router_port(mlxsw_sp), false);
10331 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10332 mlxsw_sp_router_port(mlxsw_sp), false);
10333 mlxsw_sp_rif_fid_op(rif, fid_index, false);
10334 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10335 }
10336
10337 static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10338 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
10339 const struct mlxsw_sp_rif_params *params,
10340 struct netlink_ext_ack *extack)
10341 {
10342 int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
10343
10344 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif_ifindex);
10345 }
10346
mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)10347 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10348 {
10349 struct switchdev_notifier_fdb_info info = {};
10350 struct net_device *dev;
10351
10352 dev = br_fdb_find_port(mlxsw_sp_rif_dev(rif), mac, 0);
10353 if (!dev)
10354 return;
10355
10356 info.addr = mac;
10357 info.vid = 0;
10358 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10359 NULL);
10360 }
10361
10362 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
10363 .type = MLXSW_SP_RIF_TYPE_FID,
10364 .rif_size = sizeof(struct mlxsw_sp_rif),
10365 .configure = mlxsw_sp_rif_fid_configure,
10366 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
10367 .fid_get = mlxsw_sp_rif_fid_fid_get,
10368 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
10369 };
10370
10371 static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10372 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
10373 const struct mlxsw_sp_rif_params *params,
10374 struct netlink_ext_ack *extack)
10375 {
10376 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10377 struct net_device *br_dev;
10378
10379 if (WARN_ON(!params->vid))
10380 return ERR_PTR(-EINVAL);
10381
10382 if (is_vlan_dev(dev)) {
10383 br_dev = vlan_dev_real_dev(dev);
10384 if (WARN_ON(!netif_is_bridge_master(br_dev)))
10385 return ERR_PTR(-EINVAL);
10386 }
10387
10388 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid);
10389 }
10390
mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)10391 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10392 {
10393 struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
10394 struct switchdev_notifier_fdb_info info = {};
10395 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10396 struct net_device *br_dev;
10397 struct net_device *dev;
10398
10399 br_dev = is_vlan_dev(rif_dev) ? vlan_dev_real_dev(rif_dev) : rif_dev;
10400 dev = br_fdb_find_port(br_dev, mac, vid);
10401 if (!dev)
10402 return;
10403
10404 info.addr = mac;
10405 info.vid = vid;
10406 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10407 NULL);
10408 }
10409
mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif * rif,u16 vid,u16 efid,bool enable)10410 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
10411 bool enable)
10412 {
10413 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10414 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10415 char ritr_pl[MLXSW_REG_RITR_LEN];
10416
10417 mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
10418 dev->mtu, dev->dev_addr,
10419 rif->mac_profile_id, vid, efid);
10420
10421 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10422 }
10423
mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif * rif,u16 efid,struct netlink_ext_ack * extack)10424 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
10425 struct netlink_ext_ack *extack)
10426 {
10427 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10428 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10429 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10430 u8 mac_profile;
10431 int err;
10432
10433 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10434 &mac_profile, extack);
10435 if (err)
10436 return err;
10437 rif->mac_profile_id = mac_profile;
10438
10439 err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
10440 if (err)
10441 goto err_rif_vlan_fid_op;
10442
10443 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10444 mlxsw_sp_router_port(mlxsw_sp), true);
10445 if (err)
10446 goto err_fid_mc_flood_set;
10447
10448 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10449 mlxsw_sp_router_port(mlxsw_sp), true);
10450 if (err)
10451 goto err_fid_bc_flood_set;
10452
10453 err = mlxsw_sp_macvlan_replay(rif, extack);
10454 if (err)
10455 goto err_macvlan_replay;
10456
10457 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10458 mlxsw_sp_fid_index(rif->fid), true);
10459 if (err)
10460 goto err_rif_fdb_op;
10461
10462 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10463 if (err)
10464 goto err_fid_rif_set;
10465
10466 return 0;
10467
10468 err_fid_rif_set:
10469 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10470 mlxsw_sp_fid_index(rif->fid), false);
10471 err_rif_fdb_op:
10472 mlxsw_sp_rif_macvlan_flush(rif);
10473 err_macvlan_replay:
10474 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10475 mlxsw_sp_router_port(mlxsw_sp), false);
10476 err_fid_bc_flood_set:
10477 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10478 mlxsw_sp_router_port(mlxsw_sp), false);
10479 err_fid_mc_flood_set:
10480 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10481 err_rif_vlan_fid_op:
10482 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10483 return err;
10484 }
10485
mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif * rif)10486 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
10487 {
10488 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10489 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10490 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10491
10492 mlxsw_sp_fid_rif_unset(rif->fid);
10493 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10494 mlxsw_sp_fid_index(rif->fid), false);
10495 mlxsw_sp_rif_macvlan_flush(rif);
10496 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10497 mlxsw_sp_router_port(mlxsw_sp), false);
10498 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10499 mlxsw_sp_router_port(mlxsw_sp), false);
10500 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10501 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10502 }
10503
mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10504 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10505 struct netlink_ext_ack *extack)
10506 {
10507 return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
10508 }
10509
10510 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
10511 .type = MLXSW_SP_RIF_TYPE_VLAN,
10512 .rif_size = sizeof(struct mlxsw_sp_rif),
10513 .configure = mlxsw_sp1_rif_vlan_configure,
10514 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
10515 .fid_get = mlxsw_sp_rif_vlan_fid_get,
10516 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
10517 };
10518
mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10519 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10520 struct netlink_ext_ack *extack)
10521 {
10522 u16 efid = mlxsw_sp_fid_index(rif->fid);
10523
10524 return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
10525 }
10526
10527 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
10528 .type = MLXSW_SP_RIF_TYPE_VLAN,
10529 .rif_size = sizeof(struct mlxsw_sp_rif),
10530 .configure = mlxsw_sp2_rif_vlan_configure,
10531 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
10532 .fid_get = mlxsw_sp_rif_vlan_fid_get,
10533 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
10534 };
10535
10536 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif * rif)10537 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
10538 {
10539 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
10540 }
10541
10542 static void
mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)10543 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
10544 const struct mlxsw_sp_rif_params *params)
10545 {
10546 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
10547 struct mlxsw_sp_rif_ipip_lb *rif_lb;
10548
10549 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
10550 common);
10551 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
10552 rif_lb->lb_config = params_lb->lb_config;
10553 }
10554
10555 static int
mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10556 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10557 struct netlink_ext_ack *extack)
10558 {
10559 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10560 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10561 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10562 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10563 struct mlxsw_sp_vr *ul_vr;
10564 int err;
10565
10566 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
10567 if (IS_ERR(ul_vr))
10568 return PTR_ERR(ul_vr);
10569
10570 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
10571 if (err)
10572 goto err_loopback_op;
10573
10574 lb_rif->ul_vr_id = ul_vr->id;
10575 lb_rif->ul_rif_id = 0;
10576 ++ul_vr->rif_count;
10577 return 0;
10578
10579 err_loopback_op:
10580 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10581 return err;
10582 }
10583
mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)10584 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10585 {
10586 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10587 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10588 struct mlxsw_sp_vr *ul_vr;
10589
10590 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
10591 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
10592
10593 --ul_vr->rif_count;
10594 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10595 }
10596
10597 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
10598 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10599 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10600 .setup = mlxsw_sp_rif_ipip_lb_setup,
10601 .configure = mlxsw_sp1_rif_ipip_lb_configure,
10602 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
10603 };
10604
10605 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
10606 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10607 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
10608 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10609 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
10610 };
10611
10612 static int
mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif * ul_rif,bool enable)10613 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
10614 {
10615 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10616 char ritr_pl[MLXSW_REG_RITR_LEN];
10617
10618 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
10619 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
10620 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
10621 MLXSW_REG_RITR_LOOPBACK_GENERIC);
10622
10623 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10624 }
10625
10626 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,struct mlxsw_sp_crif * ul_crif,struct netlink_ext_ack * extack)10627 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
10628 struct mlxsw_sp_crif *ul_crif,
10629 struct netlink_ext_ack *extack)
10630 {
10631 struct mlxsw_sp_rif *ul_rif;
10632 u8 rif_entries = 1;
10633 u16 rif_index;
10634 int err;
10635
10636 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
10637 if (err) {
10638 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
10639 return ERR_PTR(err);
10640 }
10641
10642 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id,
10643 ul_crif);
10644 if (!ul_rif) {
10645 err = -ENOMEM;
10646 goto err_rif_alloc;
10647 }
10648
10649 mlxsw_sp->router->rifs[rif_index] = ul_rif;
10650 ul_rif->mlxsw_sp = mlxsw_sp;
10651 ul_rif->rif_entries = rif_entries;
10652 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
10653 if (err)
10654 goto ul_rif_op_err;
10655
10656 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
10657 return ul_rif;
10658
10659 ul_rif_op_err:
10660 mlxsw_sp->router->rifs[rif_index] = NULL;
10661 mlxsw_sp_rif_free(ul_rif);
10662 err_rif_alloc:
10663 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10664 return ERR_PTR(err);
10665 }
10666
mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif * ul_rif)10667 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
10668 {
10669 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10670 u8 rif_entries = ul_rif->rif_entries;
10671 u16 rif_index = ul_rif->rif_index;
10672
10673 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
10674 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
10675 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
10676 mlxsw_sp_rif_free(ul_rif);
10677 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10678 }
10679
10680 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct mlxsw_sp_crif * ul_crif,struct netlink_ext_ack * extack)10681 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10682 struct mlxsw_sp_crif *ul_crif,
10683 struct netlink_ext_ack *extack)
10684 {
10685 struct mlxsw_sp_vr *vr;
10686 int err;
10687
10688 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10689 if (IS_ERR(vr))
10690 return ERR_CAST(vr);
10691
10692 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10693 return vr->ul_rif;
10694
10695 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, ul_crif, extack);
10696 if (IS_ERR(vr->ul_rif)) {
10697 err = PTR_ERR(vr->ul_rif);
10698 goto err_ul_rif_create;
10699 }
10700
10701 vr->rif_count++;
10702 refcount_set(&vr->ul_rif_refcnt, 1);
10703
10704 return vr->ul_rif;
10705
10706 err_ul_rif_create:
10707 mlxsw_sp_vr_put(mlxsw_sp, vr);
10708 return ERR_PTR(err);
10709 }
10710
mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif * ul_rif)10711 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10712 {
10713 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10714 struct mlxsw_sp_vr *vr;
10715
10716 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10717
10718 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10719 return;
10720
10721 vr->rif_count--;
10722 mlxsw_sp_ul_rif_destroy(ul_rif);
10723 mlxsw_sp_vr_put(mlxsw_sp, vr);
10724 }
10725
mlxsw_sp_router_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,u16 * ul_rif_index)10726 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10727 u16 *ul_rif_index)
10728 {
10729 struct mlxsw_sp_rif *ul_rif;
10730 int err = 0;
10731
10732 mutex_lock(&mlxsw_sp->router->lock);
10733 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, NULL);
10734 if (IS_ERR(ul_rif)) {
10735 err = PTR_ERR(ul_rif);
10736 goto out;
10737 }
10738 *ul_rif_index = ul_rif->rif_index;
10739 out:
10740 mutex_unlock(&mlxsw_sp->router->lock);
10741 return err;
10742 }
10743
mlxsw_sp_router_ul_rif_put(struct mlxsw_sp * mlxsw_sp,u16 ul_rif_index)10744 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
10745 {
10746 struct mlxsw_sp_rif *ul_rif;
10747
10748 mutex_lock(&mlxsw_sp->router->lock);
10749 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
10750 if (WARN_ON(!ul_rif))
10751 goto out;
10752
10753 mlxsw_sp_ul_rif_put(ul_rif);
10754 out:
10755 mutex_unlock(&mlxsw_sp->router->lock);
10756 }
10757
10758 static int
mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10759 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10760 struct netlink_ext_ack *extack)
10761 {
10762 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10763 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10764 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10765 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10766 struct mlxsw_sp_rif *ul_rif;
10767 int err;
10768
10769 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, extack);
10770 if (IS_ERR(ul_rif))
10771 return PTR_ERR(ul_rif);
10772
10773 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10774 if (err)
10775 goto err_loopback_op;
10776
10777 lb_rif->ul_vr_id = 0;
10778 lb_rif->ul_rif_id = ul_rif->rif_index;
10779
10780 return 0;
10781
10782 err_loopback_op:
10783 mlxsw_sp_ul_rif_put(ul_rif);
10784 return err;
10785 }
10786
mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)10787 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10788 {
10789 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10790 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10791 struct mlxsw_sp_rif *ul_rif;
10792
10793 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
10794 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
10795 mlxsw_sp_ul_rif_put(ul_rif);
10796 }
10797
10798 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
10799 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10800 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10801 .setup = mlxsw_sp_rif_ipip_lb_setup,
10802 .configure = mlxsw_sp2_rif_ipip_lb_configure,
10803 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
10804 };
10805
10806 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
10807 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10808 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
10809 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10810 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
10811 };
10812
mlxsw_sp_rifs_table_init(struct mlxsw_sp * mlxsw_sp)10813 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
10814 {
10815 struct gen_pool *rifs_table;
10816 int err;
10817
10818 rifs_table = gen_pool_create(0, -1);
10819 if (!rifs_table)
10820 return -ENOMEM;
10821
10822 gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
10823 NULL);
10824
10825 err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
10826 MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
10827 if (err)
10828 goto err_gen_pool_add;
10829
10830 mlxsw_sp->router->rifs_table = rifs_table;
10831
10832 return 0;
10833
10834 err_gen_pool_add:
10835 gen_pool_destroy(rifs_table);
10836 return err;
10837 }
10838
mlxsw_sp_rifs_table_fini(struct mlxsw_sp * mlxsw_sp)10839 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
10840 {
10841 gen_pool_destroy(mlxsw_sp->router->rifs_table);
10842 }
10843
mlxsw_sp_rifs_init(struct mlxsw_sp * mlxsw_sp)10844 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10845 {
10846 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10847 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10848 struct mlxsw_core *core = mlxsw_sp->core;
10849 int err;
10850
10851 if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10852 return -EIO;
10853 mlxsw_sp->router->max_rif_mac_profile =
10854 MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10855
10856 mlxsw_sp->router->rifs = kcalloc(max_rifs,
10857 sizeof(struct mlxsw_sp_rif *),
10858 GFP_KERNEL);
10859 if (!mlxsw_sp->router->rifs)
10860 return -ENOMEM;
10861
10862 err = mlxsw_sp_rifs_table_init(mlxsw_sp);
10863 if (err)
10864 goto err_rifs_table_init;
10865
10866 idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10867 atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10868 atomic_set(&mlxsw_sp->router->rifs_count, 0);
10869 devl_resource_occ_get_register(devlink,
10870 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10871 mlxsw_sp_rif_mac_profiles_occ_get,
10872 mlxsw_sp);
10873 devl_resource_occ_get_register(devlink,
10874 MLXSW_SP_RESOURCE_RIFS,
10875 mlxsw_sp_rifs_occ_get,
10876 mlxsw_sp);
10877
10878 return 0;
10879
10880 err_rifs_table_init:
10881 kfree(mlxsw_sp->router->rifs);
10882 return err;
10883 }
10884
mlxsw_sp_rifs_fini(struct mlxsw_sp * mlxsw_sp)10885 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10886 {
10887 int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10888 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10889 int i;
10890
10891 WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
10892 for (i = 0; i < max_rifs; i++)
10893 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10894
10895 devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
10896 devl_resource_occ_get_unregister(devlink,
10897 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10898 WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10899 idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10900 mlxsw_sp_rifs_table_fini(mlxsw_sp);
10901 kfree(mlxsw_sp->router->rifs);
10902 }
10903
10904 static int
mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp * mlxsw_sp)10905 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10906 {
10907 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10908
10909 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10910 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10911 }
10912
mlxsw_sp_ipips_init(struct mlxsw_sp * mlxsw_sp)10913 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10914 {
10915 int err;
10916
10917 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10918
10919 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10920 if (err)
10921 return err;
10922 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10923 if (err)
10924 return err;
10925
10926 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10927 }
10928
mlxsw_sp1_ipips_init(struct mlxsw_sp * mlxsw_sp)10929 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10930 {
10931 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10932 return mlxsw_sp_ipips_init(mlxsw_sp);
10933 }
10934
mlxsw_sp2_ipips_init(struct mlxsw_sp * mlxsw_sp)10935 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10936 {
10937 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10938 return mlxsw_sp_ipips_init(mlxsw_sp);
10939 }
10940
mlxsw_sp_ipips_fini(struct mlxsw_sp * mlxsw_sp)10941 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10942 {
10943 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10944 }
10945
mlxsw_sp_router_fib_dump_flush(struct notifier_block * nb)10946 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10947 {
10948 struct mlxsw_sp_router *router;
10949
10950 /* Flush pending FIB notifications and then flush the device's
10951 * table before requesting another dump. The FIB notification
10952 * block is unregistered, so no need to take RTNL.
10953 */
10954 mlxsw_core_flush_owq();
10955 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10956 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10957 }
10958
10959 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10960 struct mlxsw_sp_mp_hash_config {
10961 DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10962 DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10963 DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10964 DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10965 bool inc_parsing_depth;
10966 };
10967
10968 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10969 bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10970
10971 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10972 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10973
10974 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10975 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10976
mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config * config)10977 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10978 {
10979 unsigned long *inner_headers = config->inner_headers;
10980 unsigned long *inner_fields = config->inner_fields;
10981
10982 /* IPv4 inner */
10983 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10984 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10985 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10986 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10987 /* IPv6 inner */
10988 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10989 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10990 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10991 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10992 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10993 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10994 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10995 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10996 }
10997
mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)10998 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10999 {
11000 unsigned long *headers = config->headers;
11001 unsigned long *fields = config->fields;
11002
11003 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11004 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11005 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11006 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11007 }
11008
11009 static void
mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config * config,u32 hash_fields)11010 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
11011 u32 hash_fields)
11012 {
11013 unsigned long *inner_headers = config->inner_headers;
11014 unsigned long *inner_fields = config->inner_fields;
11015
11016 /* IPv4 Inner */
11017 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
11018 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
11019 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
11020 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
11021 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
11022 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
11023 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11024 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
11025 /* IPv6 inner */
11026 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
11027 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11028 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
11029 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11030 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11031 }
11032 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
11033 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11034 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11035 }
11036 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11037 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11038 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
11039 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11040 /* L4 inner */
11041 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
11042 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
11043 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
11044 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
11045 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
11046 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
11047 }
11048
mlxsw_sp_mp4_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)11049 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
11050 struct mlxsw_sp_mp_hash_config *config)
11051 {
11052 struct net *net = mlxsw_sp_net(mlxsw_sp);
11053 unsigned long *headers = config->headers;
11054 unsigned long *fields = config->fields;
11055 u32 hash_fields;
11056
11057 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
11058 case 0:
11059 mlxsw_sp_mp4_hash_outer_addr(config);
11060 break;
11061 case 1:
11062 mlxsw_sp_mp4_hash_outer_addr(config);
11063 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11064 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11065 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11066 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11067 break;
11068 case 2:
11069 /* Outer */
11070 mlxsw_sp_mp4_hash_outer_addr(config);
11071 /* Inner */
11072 mlxsw_sp_mp_hash_inner_l3(config);
11073 break;
11074 case 3:
11075 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
11076 /* Outer */
11077 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11078 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11079 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11080 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
11081 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11082 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
11083 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11084 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11085 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11086 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11087 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11088 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11089 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11090 /* Inner */
11091 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11092 break;
11093 }
11094 }
11095
mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)11096 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11097 {
11098 unsigned long *headers = config->headers;
11099 unsigned long *fields = config->fields;
11100
11101 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11102 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11103 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11104 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11105 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11106 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11107 }
11108
mlxsw_sp_mp6_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)11109 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
11110 struct mlxsw_sp_mp_hash_config *config)
11111 {
11112 u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
11113 unsigned long *headers = config->headers;
11114 unsigned long *fields = config->fields;
11115
11116 switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
11117 case 0:
11118 mlxsw_sp_mp6_hash_outer_addr(config);
11119 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11120 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11121 break;
11122 case 1:
11123 mlxsw_sp_mp6_hash_outer_addr(config);
11124 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11125 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11126 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11127 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11128 break;
11129 case 2:
11130 /* Outer */
11131 mlxsw_sp_mp6_hash_outer_addr(config);
11132 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11133 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11134 /* Inner */
11135 mlxsw_sp_mp_hash_inner_l3(config);
11136 config->inc_parsing_depth = true;
11137 break;
11138 case 3:
11139 /* Outer */
11140 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11141 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11142 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11143 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
11144 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11145 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11146 }
11147 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
11148 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11149 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11150 }
11151 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11152 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11153 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
11154 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11155 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11156 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11157 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11158 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11159 /* Inner */
11160 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11161 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
11162 config->inc_parsing_depth = true;
11163 break;
11164 }
11165 }
11166
mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp * mlxsw_sp,bool old_inc_parsing_depth,bool new_inc_parsing_depth)11167 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
11168 bool old_inc_parsing_depth,
11169 bool new_inc_parsing_depth)
11170 {
11171 int err;
11172
11173 if (!old_inc_parsing_depth && new_inc_parsing_depth) {
11174 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
11175 if (err)
11176 return err;
11177 mlxsw_sp->router->inc_parsing_depth = true;
11178 } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
11179 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
11180 mlxsw_sp->router->inc_parsing_depth = false;
11181 }
11182
11183 return 0;
11184 }
11185
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)11186 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11187 {
11188 bool old_inc_parsing_depth, new_inc_parsing_depth;
11189 struct mlxsw_sp_mp_hash_config config = {};
11190 char recr2_pl[MLXSW_REG_RECR2_LEN];
11191 unsigned long bit;
11192 u32 seed;
11193 int err;
11194
11195 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
11196 mlxsw_reg_recr2_pack(recr2_pl, seed);
11197 mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
11198 mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
11199
11200 old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11201 new_inc_parsing_depth = config.inc_parsing_depth;
11202 err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
11203 old_inc_parsing_depth,
11204 new_inc_parsing_depth);
11205 if (err)
11206 return err;
11207
11208 for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
11209 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
11210 for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
11211 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
11212 for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
11213 mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
11214 for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
11215 mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
11216
11217 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
11218 if (err)
11219 goto err_reg_write;
11220
11221 return 0;
11222
11223 err_reg_write:
11224 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
11225 old_inc_parsing_depth);
11226 return err;
11227 }
11228
mlxsw_sp_mp_hash_fini(struct mlxsw_sp * mlxsw_sp)11229 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11230 {
11231 bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11232
11233 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
11234 false);
11235 }
11236 #else
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)11237 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11238 {
11239 return 0;
11240 }
11241
mlxsw_sp_mp_hash_fini(struct mlxsw_sp * mlxsw_sp)11242 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11243 {
11244 }
11245 #endif
11246
mlxsw_sp_dscp_init(struct mlxsw_sp * mlxsw_sp)11247 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
11248 {
11249 char rdpm_pl[MLXSW_REG_RDPM_LEN];
11250 unsigned int i;
11251
11252 MLXSW_REG_ZERO(rdpm, rdpm_pl);
11253
11254 /* HW is determining switch priority based on DSCP-bits, but the
11255 * kernel is still doing that based on the ToS. Since there's a
11256 * mismatch in bits we need to make sure to translate the right
11257 * value ToS would observe, skipping the 2 least-significant ECN bits.
11258 */
11259 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
11260 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
11261
11262 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
11263 }
11264
__mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp)11265 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
11266 {
11267 struct net *net = mlxsw_sp_net(mlxsw_sp);
11268 char rgcr_pl[MLXSW_REG_RGCR_LEN];
11269 u64 max_rifs;
11270 bool usp;
11271
11272 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
11273 return -EIO;
11274 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11275 usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
11276
11277 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
11278 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
11279 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
11280 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11281 }
11282
__mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)11283 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11284 {
11285 char rgcr_pl[MLXSW_REG_RGCR_LEN];
11286
11287 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
11288 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11289 }
11290
mlxsw_sp_lb_rif_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)11291 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
11292 struct netlink_ext_ack *extack)
11293 {
11294 struct mlxsw_sp_router *router = mlxsw_sp->router;
11295 struct mlxsw_sp_rif *lb_rif;
11296 int err;
11297
11298 router->lb_crif = mlxsw_sp_crif_alloc(NULL);
11299 if (!router->lb_crif)
11300 return -ENOMEM;
11301
11302 /* Create a generic loopback RIF associated with the main table
11303 * (default VRF). Any table can be used, but the main table exists
11304 * anyway, so we do not waste resources. Loopback RIFs are usually
11305 * created with a NULL CRIF, but this RIF is used as a fallback RIF
11306 * for blackhole nexthops, and nexthops expect to have a valid CRIF.
11307 */
11308 lb_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, router->lb_crif,
11309 extack);
11310 if (IS_ERR(lb_rif)) {
11311 err = PTR_ERR(lb_rif);
11312 goto err_ul_rif_get;
11313 }
11314
11315 return 0;
11316
11317 err_ul_rif_get:
11318 mlxsw_sp_crif_free(router->lb_crif);
11319 return err;
11320 }
11321
mlxsw_sp_lb_rif_fini(struct mlxsw_sp * mlxsw_sp)11322 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
11323 {
11324 mlxsw_sp_ul_rif_put(mlxsw_sp->router->lb_crif->rif);
11325 mlxsw_sp_crif_free(mlxsw_sp->router->lb_crif);
11326 }
11327
mlxsw_sp1_router_init(struct mlxsw_sp * mlxsw_sp)11328 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
11329 {
11330 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
11331
11332 mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
11333 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
11334 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11335
11336 return 0;
11337 }
11338
11339 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
11340 .init = mlxsw_sp1_router_init,
11341 .ipips_init = mlxsw_sp1_ipips_init,
11342 };
11343
mlxsw_sp2_router_init(struct mlxsw_sp * mlxsw_sp)11344 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
11345 {
11346 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
11347
11348 mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
11349 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
11350 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11351
11352 return 0;
11353 }
11354
11355 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
11356 .init = mlxsw_sp2_router_init,
11357 .ipips_init = mlxsw_sp2_ipips_init,
11358 };
11359
mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)11360 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
11361 struct netlink_ext_ack *extack)
11362 {
11363 struct mlxsw_sp_router *router;
11364 struct notifier_block *nb;
11365 int err;
11366
11367 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
11368 if (!router)
11369 return -ENOMEM;
11370 mutex_init(&router->lock);
11371 mlxsw_sp->router = router;
11372 router->mlxsw_sp = mlxsw_sp;
11373
11374 err = mlxsw_sp->router_ops->init(mlxsw_sp);
11375 if (err)
11376 goto err_router_ops_init;
11377
11378 INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
11379 INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
11380 mlxsw_sp_nh_grp_activity_work);
11381 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
11382 err = __mlxsw_sp_router_init(mlxsw_sp);
11383 if (err)
11384 goto err_router_init;
11385
11386 err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
11387 if (err)
11388 goto err_ipips_init;
11389
11390 err = rhashtable_init(&mlxsw_sp->router->crif_ht,
11391 &mlxsw_sp_crif_ht_params);
11392 if (err)
11393 goto err_crif_ht_init;
11394
11395 err = mlxsw_sp_rifs_init(mlxsw_sp);
11396 if (err)
11397 goto err_rifs_init;
11398
11399 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
11400 &mlxsw_sp_nexthop_ht_params);
11401 if (err)
11402 goto err_nexthop_ht_init;
11403
11404 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
11405 &mlxsw_sp_nexthop_group_ht_params);
11406 if (err)
11407 goto err_nexthop_group_ht_init;
11408
11409 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
11410 err = mlxsw_sp_lpm_init(mlxsw_sp);
11411 if (err)
11412 goto err_lpm_init;
11413
11414 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
11415 if (err)
11416 goto err_mr_init;
11417
11418 err = mlxsw_sp_vrs_init(mlxsw_sp);
11419 if (err)
11420 goto err_vrs_init;
11421
11422 err = mlxsw_sp_lb_rif_init(mlxsw_sp, extack);
11423 if (err)
11424 goto err_lb_rif_init;
11425
11426 err = mlxsw_sp_neigh_init(mlxsw_sp);
11427 if (err)
11428 goto err_neigh_init;
11429
11430 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
11431 if (err)
11432 goto err_mp_hash_init;
11433
11434 err = mlxsw_sp_dscp_init(mlxsw_sp);
11435 if (err)
11436 goto err_dscp_init;
11437
11438 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
11439 err = register_inetaddr_notifier(&router->inetaddr_nb);
11440 if (err)
11441 goto err_register_inetaddr_notifier;
11442
11443 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
11444 err = register_inet6addr_notifier(&router->inet6addr_nb);
11445 if (err)
11446 goto err_register_inet6addr_notifier;
11447
11448 router->inetaddr_valid_nb.notifier_call = mlxsw_sp_inetaddr_valid_event;
11449 err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11450 if (err)
11451 goto err_register_inetaddr_valid_notifier;
11452
11453 nb = &router->inet6addr_valid_nb;
11454 nb->notifier_call = mlxsw_sp_inet6addr_valid_event;
11455 err = register_inet6addr_validator_notifier(nb);
11456 if (err)
11457 goto err_register_inet6addr_valid_notifier;
11458
11459 mlxsw_sp->router->netevent_nb.notifier_call =
11460 mlxsw_sp_router_netevent_event;
11461 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11462 if (err)
11463 goto err_register_netevent_notifier;
11464
11465 mlxsw_sp->router->netdevice_nb.notifier_call =
11466 mlxsw_sp_router_netdevice_event;
11467 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11468 &mlxsw_sp->router->netdevice_nb);
11469 if (err)
11470 goto err_register_netdev_notifier;
11471
11472 mlxsw_sp->router->nexthop_nb.notifier_call =
11473 mlxsw_sp_nexthop_obj_event;
11474 err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11475 &mlxsw_sp->router->nexthop_nb,
11476 extack);
11477 if (err)
11478 goto err_register_nexthop_notifier;
11479
11480 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
11481 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
11482 &mlxsw_sp->router->fib_nb,
11483 mlxsw_sp_router_fib_dump_flush, extack);
11484 if (err)
11485 goto err_register_fib_notifier;
11486
11487 return 0;
11488
11489 err_register_fib_notifier:
11490 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11491 &mlxsw_sp->router->nexthop_nb);
11492 err_register_nexthop_notifier:
11493 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11494 &router->netdevice_nb);
11495 err_register_netdev_notifier:
11496 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11497 err_register_netevent_notifier:
11498 unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11499 err_register_inet6addr_valid_notifier:
11500 unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11501 err_register_inetaddr_valid_notifier:
11502 unregister_inet6addr_notifier(&router->inet6addr_nb);
11503 err_register_inet6addr_notifier:
11504 unregister_inetaddr_notifier(&router->inetaddr_nb);
11505 err_register_inetaddr_notifier:
11506 mlxsw_core_flush_owq();
11507 err_dscp_init:
11508 mlxsw_sp_mp_hash_fini(mlxsw_sp);
11509 err_mp_hash_init:
11510 mlxsw_sp_neigh_fini(mlxsw_sp);
11511 err_neigh_init:
11512 mlxsw_sp_lb_rif_fini(mlxsw_sp);
11513 err_lb_rif_init:
11514 mlxsw_sp_vrs_fini(mlxsw_sp);
11515 err_vrs_init:
11516 mlxsw_sp_mr_fini(mlxsw_sp);
11517 err_mr_init:
11518 mlxsw_sp_lpm_fini(mlxsw_sp);
11519 err_lpm_init:
11520 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
11521 err_nexthop_group_ht_init:
11522 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
11523 err_nexthop_ht_init:
11524 mlxsw_sp_rifs_fini(mlxsw_sp);
11525 err_rifs_init:
11526 rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11527 err_crif_ht_init:
11528 mlxsw_sp_ipips_fini(mlxsw_sp);
11529 err_ipips_init:
11530 __mlxsw_sp_router_fini(mlxsw_sp);
11531 err_router_init:
11532 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
11533 err_router_ops_init:
11534 mutex_destroy(&mlxsw_sp->router->lock);
11535 kfree(mlxsw_sp->router);
11536 return err;
11537 }
11538
mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)11539 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11540 {
11541 struct mlxsw_sp_router *router = mlxsw_sp->router;
11542
11543 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
11544 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11545 &router->nexthop_nb);
11546 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11547 &router->netdevice_nb);
11548 unregister_netevent_notifier(&router->netevent_nb);
11549 unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11550 unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11551 unregister_inet6addr_notifier(&router->inet6addr_nb);
11552 unregister_inetaddr_notifier(&router->inetaddr_nb);
11553 mlxsw_core_flush_owq();
11554 mlxsw_sp_mp_hash_fini(mlxsw_sp);
11555 mlxsw_sp_neigh_fini(mlxsw_sp);
11556 mlxsw_sp_lb_rif_fini(mlxsw_sp);
11557 mlxsw_sp_vrs_fini(mlxsw_sp);
11558 mlxsw_sp_mr_fini(mlxsw_sp);
11559 mlxsw_sp_lpm_fini(mlxsw_sp);
11560 rhashtable_destroy(&router->nexthop_group_ht);
11561 rhashtable_destroy(&router->nexthop_ht);
11562 mlxsw_sp_rifs_fini(mlxsw_sp);
11563 rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11564 mlxsw_sp_ipips_fini(mlxsw_sp);
11565 __mlxsw_sp_router_fini(mlxsw_sp);
11566 cancel_delayed_work_sync(&router->nh_grp_activity_dw);
11567 mutex_destroy(&router->lock);
11568 kfree(router);
11569 }
11570