1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <net/netevent.h>
23 #include <net/neighbour.h>
24 #include <net/arp.h>
25 #include <net/inet_dscp.h>
26 #include <net/ip_fib.h>
27 #include <net/ip6_fib.h>
28 #include <net/nexthop.h>
29 #include <net/fib_rules.h>
30 #include <net/ip_tunnels.h>
31 #include <net/l3mdev.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
34 #include <net/ipv6.h>
35 #include <net/fib_notifier.h>
36 #include <net/switchdev.h>
37
38 #include "spectrum.h"
39 #include "core.h"
40 #include "reg.h"
41 #include "spectrum_cnt.h"
42 #include "spectrum_dpipe.h"
43 #include "spectrum_ipip.h"
44 #include "spectrum_mr.h"
45 #include "spectrum_mr_tcam.h"
46 #include "spectrum_router.h"
47 #include "spectrum_span.h"
48
49 struct mlxsw_sp_fib;
50 struct mlxsw_sp_vr;
51 struct mlxsw_sp_lpm_tree;
52 struct mlxsw_sp_rif_ops;
53
54 struct mlxsw_sp_crif_key {
55 struct net_device *dev;
56 };
57
58 struct mlxsw_sp_crif {
59 struct mlxsw_sp_crif_key key;
60 struct rhash_head ht_node;
61 bool can_destroy;
62 struct list_head nexthop_list;
63 struct mlxsw_sp_rif *rif;
64 };
65
66 static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
67 .key_offset = offsetof(struct mlxsw_sp_crif, key),
68 .key_len = sizeof_field(struct mlxsw_sp_crif, key),
69 .head_offset = offsetof(struct mlxsw_sp_crif, ht_node),
70 };
71
72 struct mlxsw_sp_rif {
73 struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
74 netdevice_tracker dev_tracker;
75 struct list_head neigh_list;
76 struct mlxsw_sp_fid *fid;
77 unsigned char addr[ETH_ALEN];
78 int mtu;
79 u16 rif_index;
80 u8 mac_profile_id;
81 u8 rif_entries;
82 u16 vr_id;
83 const struct mlxsw_sp_rif_ops *ops;
84 struct mlxsw_sp *mlxsw_sp;
85
86 unsigned int counter_ingress;
87 bool counter_ingress_valid;
88 unsigned int counter_egress;
89 bool counter_egress_valid;
90 };
91
mlxsw_sp_rif_dev(const struct mlxsw_sp_rif * rif)92 static struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
93 {
94 if (!rif->crif)
95 return NULL;
96 return rif->crif->key.dev;
97 }
98
99 struct mlxsw_sp_rif_params {
100 struct net_device *dev;
101 union {
102 u16 system_port;
103 u16 lag_id;
104 };
105 u16 vid;
106 bool lag;
107 bool double_entry;
108 };
109
110 struct mlxsw_sp_rif_subport {
111 struct mlxsw_sp_rif common;
112 refcount_t ref_count;
113 union {
114 u16 system_port;
115 u16 lag_id;
116 };
117 u16 vid;
118 bool lag;
119 };
120
121 struct mlxsw_sp_rif_ipip_lb {
122 struct mlxsw_sp_rif common;
123 struct mlxsw_sp_rif_ipip_lb_config lb_config;
124 u16 ul_vr_id; /* Spectrum-1. */
125 u16 ul_rif_id; /* Spectrum-2+. */
126 };
127
128 struct mlxsw_sp_rif_params_ipip_lb {
129 struct mlxsw_sp_rif_params common;
130 struct mlxsw_sp_rif_ipip_lb_config lb_config;
131 };
132
133 struct mlxsw_sp_rif_ops {
134 enum mlxsw_sp_rif_type type;
135 size_t rif_size;
136
137 void (*setup)(struct mlxsw_sp_rif *rif,
138 const struct mlxsw_sp_rif_params *params);
139 int (*configure)(struct mlxsw_sp_rif *rif,
140 struct netlink_ext_ack *extack);
141 void (*deconfigure)(struct mlxsw_sp_rif *rif);
142 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
143 const struct mlxsw_sp_rif_params *params,
144 struct netlink_ext_ack *extack);
145 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
146 };
147
148 struct mlxsw_sp_rif_mac_profile {
149 unsigned char mac_prefix[ETH_ALEN];
150 refcount_t ref_count;
151 u8 id;
152 };
153
154 struct mlxsw_sp_router_ops {
155 int (*init)(struct mlxsw_sp *mlxsw_sp);
156 int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
157 };
158
159 static struct mlxsw_sp_rif *
160 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
161 const struct net_device *dev);
162 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
163 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
164 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
165 struct mlxsw_sp_lpm_tree *lpm_tree);
166 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
167 const struct mlxsw_sp_fib *fib,
168 u8 tree_id);
169 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
170 const struct mlxsw_sp_fib *fib);
171
172 static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)173 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
174 enum mlxsw_sp_rif_counter_dir dir)
175 {
176 switch (dir) {
177 case MLXSW_SP_RIF_COUNTER_EGRESS:
178 return &rif->counter_egress;
179 case MLXSW_SP_RIF_COUNTER_INGRESS:
180 return &rif->counter_ingress;
181 }
182 return NULL;
183 }
184
185 static bool
mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)186 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
187 enum mlxsw_sp_rif_counter_dir dir)
188 {
189 switch (dir) {
190 case MLXSW_SP_RIF_COUNTER_EGRESS:
191 return rif->counter_egress_valid;
192 case MLXSW_SP_RIF_COUNTER_INGRESS:
193 return rif->counter_ingress_valid;
194 }
195 return false;
196 }
197
198 static void
mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,bool valid)199 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
200 enum mlxsw_sp_rif_counter_dir dir,
201 bool valid)
202 {
203 switch (dir) {
204 case MLXSW_SP_RIF_COUNTER_EGRESS:
205 rif->counter_egress_valid = valid;
206 break;
207 case MLXSW_SP_RIF_COUNTER_INGRESS:
208 rif->counter_ingress_valid = valid;
209 break;
210 }
211 }
212
mlxsw_sp_rif_counter_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,unsigned int counter_index,bool enable,enum mlxsw_sp_rif_counter_dir dir)213 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
214 unsigned int counter_index, bool enable,
215 enum mlxsw_sp_rif_counter_dir dir)
216 {
217 char ritr_pl[MLXSW_REG_RITR_LEN];
218 bool is_egress = false;
219 int err;
220
221 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
222 is_egress = true;
223 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
224 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
225 if (err)
226 return err;
227
228 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
229 is_egress);
230 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
231 }
232
mlxsw_sp_rif_counter_value_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,u64 * cnt)233 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
234 struct mlxsw_sp_rif *rif,
235 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
236 {
237 char ricnt_pl[MLXSW_REG_RICNT_LEN];
238 unsigned int *p_counter_index;
239 bool valid;
240 int err;
241
242 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
243 if (!valid)
244 return -EINVAL;
245
246 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
247 if (!p_counter_index)
248 return -EINVAL;
249 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
250 MLXSW_REG_RICNT_OPCODE_NOP);
251 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
252 if (err)
253 return err;
254 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
255 return 0;
256 }
257
258 struct mlxsw_sp_rif_counter_set_basic {
259 u64 good_unicast_packets;
260 u64 good_multicast_packets;
261 u64 good_broadcast_packets;
262 u64 good_unicast_bytes;
263 u64 good_multicast_bytes;
264 u64 good_broadcast_bytes;
265 u64 error_packets;
266 u64 discard_packets;
267 u64 error_bytes;
268 u64 discard_bytes;
269 };
270
271 static int
mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,struct mlxsw_sp_rif_counter_set_basic * set)272 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
273 enum mlxsw_sp_rif_counter_dir dir,
274 struct mlxsw_sp_rif_counter_set_basic *set)
275 {
276 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
277 char ricnt_pl[MLXSW_REG_RICNT_LEN];
278 unsigned int *p_counter_index;
279 int err;
280
281 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
282 return -EINVAL;
283
284 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
285 if (!p_counter_index)
286 return -EINVAL;
287
288 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
289 MLXSW_REG_RICNT_OPCODE_CLEAR);
290 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
291 if (err)
292 return err;
293
294 if (!set)
295 return 0;
296
297 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
298 (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
299
300 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
301 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
302 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
303 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
304 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
305 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
306 MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
307 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
308 MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
309 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
310
311 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
312
313 return 0;
314 }
315
mlxsw_sp_rif_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)316 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
317 unsigned int counter_index)
318 {
319 char ricnt_pl[MLXSW_REG_RICNT_LEN];
320
321 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
322 MLXSW_REG_RICNT_OPCODE_CLEAR);
323 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
324 }
325
mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)326 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
327 enum mlxsw_sp_rif_counter_dir dir)
328 {
329 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
330 unsigned int *p_counter_index;
331 int err;
332
333 if (mlxsw_sp_rif_counter_valid_get(rif, dir))
334 return 0;
335
336 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
337 if (!p_counter_index)
338 return -EINVAL;
339
340 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
341 p_counter_index);
342 if (err)
343 return err;
344
345 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
346 if (err)
347 goto err_counter_clear;
348
349 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
350 *p_counter_index, true, dir);
351 if (err)
352 goto err_counter_edit;
353 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
354 return 0;
355
356 err_counter_edit:
357 err_counter_clear:
358 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
359 *p_counter_index);
360 return err;
361 }
362
mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)363 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
364 enum mlxsw_sp_rif_counter_dir dir)
365 {
366 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
367 unsigned int *p_counter_index;
368
369 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
370 return;
371
372 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
373 if (WARN_ON(!p_counter_index))
374 return;
375 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
376 *p_counter_index, false, dir);
377 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
378 *p_counter_index);
379 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
380 }
381
mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif * rif)382 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
383 {
384 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
385 struct devlink *devlink;
386
387 devlink = priv_to_devlink(mlxsw_sp->core);
388 if (!devlink_dpipe_table_counter_enabled(devlink,
389 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
390 return;
391 mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
392 }
393
mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif * rif)394 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
395 {
396 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
397 }
398
399 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
400
401 struct mlxsw_sp_prefix_usage {
402 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
403 };
404
405 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
406 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
407
408 static bool
mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)409 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
410 struct mlxsw_sp_prefix_usage *prefix_usage2)
411 {
412 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
413 }
414
415 static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)416 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
417 struct mlxsw_sp_prefix_usage *prefix_usage2)
418 {
419 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
420 }
421
422 static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)423 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
424 unsigned char prefix_len)
425 {
426 set_bit(prefix_len, prefix_usage->b);
427 }
428
429 static void
mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)430 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
431 unsigned char prefix_len)
432 {
433 clear_bit(prefix_len, prefix_usage->b);
434 }
435
436 struct mlxsw_sp_fib_key {
437 unsigned char addr[sizeof(struct in6_addr)];
438 unsigned char prefix_len;
439 };
440
441 enum mlxsw_sp_fib_entry_type {
442 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
443 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
444 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
445 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
446 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
447
448 /* This is a special case of local delivery, where a packet should be
449 * decapsulated on reception. Note that there is no corresponding ENCAP,
450 * because that's a type of next hop, not of FIB entry. (There can be
451 * several next hops in a REMOTE entry, and some of them may be
452 * encapsulating entries.)
453 */
454 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
455 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
456 };
457
458 struct mlxsw_sp_nexthop_group_info;
459 struct mlxsw_sp_nexthop_group;
460 struct mlxsw_sp_fib_entry;
461
462 struct mlxsw_sp_fib_node {
463 struct mlxsw_sp_fib_entry *fib_entry;
464 struct list_head list;
465 struct rhash_head ht_node;
466 struct mlxsw_sp_fib *fib;
467 struct mlxsw_sp_fib_key key;
468 };
469
470 struct mlxsw_sp_fib_entry_decap {
471 struct mlxsw_sp_ipip_entry *ipip_entry;
472 u32 tunnel_index;
473 };
474
475 struct mlxsw_sp_fib_entry {
476 struct mlxsw_sp_fib_node *fib_node;
477 enum mlxsw_sp_fib_entry_type type;
478 struct list_head nexthop_group_node;
479 struct mlxsw_sp_nexthop_group *nh_group;
480 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
481 };
482
483 struct mlxsw_sp_fib4_entry {
484 struct mlxsw_sp_fib_entry common;
485 struct fib_info *fi;
486 u32 tb_id;
487 dscp_t dscp;
488 u8 type;
489 };
490
491 struct mlxsw_sp_fib6_entry {
492 struct mlxsw_sp_fib_entry common;
493 struct list_head rt6_list;
494 unsigned int nrt6;
495 };
496
497 struct mlxsw_sp_rt6 {
498 struct list_head list;
499 struct fib6_info *rt;
500 };
501
502 struct mlxsw_sp_lpm_tree {
503 u8 id; /* tree ID */
504 refcount_t ref_count;
505 enum mlxsw_sp_l3proto proto;
506 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
507 struct mlxsw_sp_prefix_usage prefix_usage;
508 };
509
510 struct mlxsw_sp_fib {
511 struct rhashtable ht;
512 struct list_head node_list;
513 struct mlxsw_sp_vr *vr;
514 struct mlxsw_sp_lpm_tree *lpm_tree;
515 enum mlxsw_sp_l3proto proto;
516 };
517
518 struct mlxsw_sp_vr {
519 u16 id; /* virtual router ID */
520 u32 tb_id; /* kernel fib table id */
521 unsigned int rif_count;
522 struct mlxsw_sp_fib *fib4;
523 struct mlxsw_sp_fib *fib6;
524 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
525 struct mlxsw_sp_rif *ul_rif;
526 refcount_t ul_rif_refcnt;
527 };
528
529 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
530
mlxsw_sp_fib_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)531 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
532 struct mlxsw_sp_vr *vr,
533 enum mlxsw_sp_l3proto proto)
534 {
535 struct mlxsw_sp_lpm_tree *lpm_tree;
536 struct mlxsw_sp_fib *fib;
537 int err;
538
539 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
540 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
541 if (!fib)
542 return ERR_PTR(-ENOMEM);
543 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
544 if (err)
545 goto err_rhashtable_init;
546 INIT_LIST_HEAD(&fib->node_list);
547 fib->proto = proto;
548 fib->vr = vr;
549 fib->lpm_tree = lpm_tree;
550 mlxsw_sp_lpm_tree_hold(lpm_tree);
551 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
552 if (err)
553 goto err_lpm_tree_bind;
554 return fib;
555
556 err_lpm_tree_bind:
557 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
558 err_rhashtable_init:
559 kfree(fib);
560 return ERR_PTR(err);
561 }
562
mlxsw_sp_fib_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib)563 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_fib *fib)
565 {
566 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
567 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
568 WARN_ON(!list_empty(&fib->node_list));
569 rhashtable_destroy(&fib->ht);
570 kfree(fib);
571 }
572
573 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp * mlxsw_sp)574 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
575 {
576 static struct mlxsw_sp_lpm_tree *lpm_tree;
577 int i;
578
579 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
580 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
581 if (refcount_read(&lpm_tree->ref_count) == 0)
582 return lpm_tree;
583 }
584 return NULL;
585 }
586
mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)587 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
588 struct mlxsw_sp_lpm_tree *lpm_tree)
589 {
590 char ralta_pl[MLXSW_REG_RALTA_LEN];
591
592 mlxsw_reg_ralta_pack(ralta_pl, true,
593 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
594 lpm_tree->id);
595 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
596 }
597
mlxsw_sp_lpm_tree_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)598 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_lpm_tree *lpm_tree)
600 {
601 char ralta_pl[MLXSW_REG_RALTA_LEN];
602
603 mlxsw_reg_ralta_pack(ralta_pl, false,
604 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
605 lpm_tree->id);
606 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
607 }
608
609 static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,struct mlxsw_sp_lpm_tree * lpm_tree)610 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
611 struct mlxsw_sp_prefix_usage *prefix_usage,
612 struct mlxsw_sp_lpm_tree *lpm_tree)
613 {
614 char ralst_pl[MLXSW_REG_RALST_LEN];
615 u8 root_bin = 0;
616 u8 prefix;
617 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
618
619 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
620 root_bin = prefix;
621
622 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
623 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
624 if (prefix == 0)
625 continue;
626 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
627 MLXSW_REG_RALST_BIN_NO_CHILD);
628 last_prefix = prefix;
629 }
630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
631 }
632
633 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)634 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
635 struct mlxsw_sp_prefix_usage *prefix_usage,
636 enum mlxsw_sp_l3proto proto)
637 {
638 struct mlxsw_sp_lpm_tree *lpm_tree;
639 int err;
640
641 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
642 if (!lpm_tree)
643 return ERR_PTR(-EBUSY);
644 lpm_tree->proto = proto;
645 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
646 if (err)
647 return ERR_PTR(err);
648
649 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
650 lpm_tree);
651 if (err)
652 goto err_left_struct_set;
653 memcpy(&lpm_tree->prefix_usage, prefix_usage,
654 sizeof(lpm_tree->prefix_usage));
655 memset(&lpm_tree->prefix_ref_count, 0,
656 sizeof(lpm_tree->prefix_ref_count));
657 refcount_set(&lpm_tree->ref_count, 1);
658 return lpm_tree;
659
660 err_left_struct_set:
661 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
662 return ERR_PTR(err);
663 }
664
mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)665 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
666 struct mlxsw_sp_lpm_tree *lpm_tree)
667 {
668 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
669 }
670
671 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)672 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
673 struct mlxsw_sp_prefix_usage *prefix_usage,
674 enum mlxsw_sp_l3proto proto)
675 {
676 struct mlxsw_sp_lpm_tree *lpm_tree;
677 int i;
678
679 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
680 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
681 if (refcount_read(&lpm_tree->ref_count) &&
682 lpm_tree->proto == proto &&
683 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
684 prefix_usage)) {
685 mlxsw_sp_lpm_tree_hold(lpm_tree);
686 return lpm_tree;
687 }
688 }
689 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
690 }
691
mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree * lpm_tree)692 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
693 {
694 refcount_inc(&lpm_tree->ref_count);
695 }
696
mlxsw_sp_lpm_tree_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)697 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
698 struct mlxsw_sp_lpm_tree *lpm_tree)
699 {
700 if (!refcount_dec_and_test(&lpm_tree->ref_count))
701 return;
702 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
703 }
704
705 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
706
mlxsw_sp_lpm_init(struct mlxsw_sp * mlxsw_sp)707 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
708 {
709 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
710 struct mlxsw_sp_lpm_tree *lpm_tree;
711 u64 max_trees;
712 int err, i;
713
714 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
715 return -EIO;
716
717 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
718 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
719 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
720 sizeof(struct mlxsw_sp_lpm_tree),
721 GFP_KERNEL);
722 if (!mlxsw_sp->router->lpm.trees)
723 return -ENOMEM;
724
725 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
726 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
727 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
728 }
729
730 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
731 MLXSW_SP_L3_PROTO_IPV4);
732 if (IS_ERR(lpm_tree)) {
733 err = PTR_ERR(lpm_tree);
734 goto err_ipv4_tree_get;
735 }
736 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
737
738 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
739 MLXSW_SP_L3_PROTO_IPV6);
740 if (IS_ERR(lpm_tree)) {
741 err = PTR_ERR(lpm_tree);
742 goto err_ipv6_tree_get;
743 }
744 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
745
746 return 0;
747
748 err_ipv6_tree_get:
749 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
750 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
751 err_ipv4_tree_get:
752 kfree(mlxsw_sp->router->lpm.trees);
753 return err;
754 }
755
mlxsw_sp_lpm_fini(struct mlxsw_sp * mlxsw_sp)756 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
757 {
758 struct mlxsw_sp_lpm_tree *lpm_tree;
759
760 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
761 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
762
763 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
764 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
765
766 kfree(mlxsw_sp->router->lpm.trees);
767 }
768
mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr * vr)769 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
770 {
771 return !!vr->fib4 || !!vr->fib6 ||
772 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
773 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
774 }
775
mlxsw_sp_vr_find_unused(struct mlxsw_sp * mlxsw_sp)776 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
777 {
778 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
779 struct mlxsw_sp_vr *vr;
780 int i;
781
782 for (i = 0; i < max_vrs; i++) {
783 vr = &mlxsw_sp->router->vrs[i];
784 if (!mlxsw_sp_vr_is_used(vr))
785 return vr;
786 }
787 return NULL;
788 }
789
mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib,u8 tree_id)790 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
791 const struct mlxsw_sp_fib *fib, u8 tree_id)
792 {
793 char raltb_pl[MLXSW_REG_RALTB_LEN];
794
795 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
796 (enum mlxsw_reg_ralxx_protocol) fib->proto,
797 tree_id);
798 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
799 }
800
mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib)801 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
802 const struct mlxsw_sp_fib *fib)
803 {
804 char raltb_pl[MLXSW_REG_RALTB_LEN];
805
806 /* Bind to tree 0 which is default */
807 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
808 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
809 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
810 }
811
mlxsw_sp_fix_tb_id(u32 tb_id)812 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
813 {
814 /* For our purpose, squash main, default and local tables into one */
815 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
816 tb_id = RT_TABLE_MAIN;
817 return tb_id;
818 }
819
mlxsw_sp_vr_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id)820 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
821 u32 tb_id)
822 {
823 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
824 struct mlxsw_sp_vr *vr;
825 int i;
826
827 tb_id = mlxsw_sp_fix_tb_id(tb_id);
828
829 for (i = 0; i < max_vrs; i++) {
830 vr = &mlxsw_sp->router->vrs[i];
831 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
832 return vr;
833 }
834 return NULL;
835 }
836
mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp * mlxsw_sp,u32 tb_id,u16 * vr_id)837 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
838 u16 *vr_id)
839 {
840 struct mlxsw_sp_vr *vr;
841 int err = 0;
842
843 mutex_lock(&mlxsw_sp->router->lock);
844 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
845 if (!vr) {
846 err = -ESRCH;
847 goto out;
848 }
849 *vr_id = vr->id;
850 out:
851 mutex_unlock(&mlxsw_sp->router->lock);
852 return err;
853 }
854
mlxsw_sp_vr_fib(const struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)855 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
856 enum mlxsw_sp_l3proto proto)
857 {
858 switch (proto) {
859 case MLXSW_SP_L3_PROTO_IPV4:
860 return vr->fib4;
861 case MLXSW_SP_L3_PROTO_IPV6:
862 return vr->fib6;
863 }
864 return NULL;
865 }
866
mlxsw_sp_vr_create(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)867 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
868 u32 tb_id,
869 struct netlink_ext_ack *extack)
870 {
871 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
872 struct mlxsw_sp_fib *fib4;
873 struct mlxsw_sp_fib *fib6;
874 struct mlxsw_sp_vr *vr;
875 int err;
876
877 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
878 if (!vr) {
879 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
880 return ERR_PTR(-EBUSY);
881 }
882 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
883 if (IS_ERR(fib4))
884 return ERR_CAST(fib4);
885 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
886 if (IS_ERR(fib6)) {
887 err = PTR_ERR(fib6);
888 goto err_fib6_create;
889 }
890 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
891 MLXSW_SP_L3_PROTO_IPV4);
892 if (IS_ERR(mr4_table)) {
893 err = PTR_ERR(mr4_table);
894 goto err_mr4_table_create;
895 }
896 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
897 MLXSW_SP_L3_PROTO_IPV6);
898 if (IS_ERR(mr6_table)) {
899 err = PTR_ERR(mr6_table);
900 goto err_mr6_table_create;
901 }
902
903 vr->fib4 = fib4;
904 vr->fib6 = fib6;
905 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
906 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
907 vr->tb_id = tb_id;
908 return vr;
909
910 err_mr6_table_create:
911 mlxsw_sp_mr_table_destroy(mr4_table);
912 err_mr4_table_create:
913 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
914 err_fib6_create:
915 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
916 return ERR_PTR(err);
917 }
918
mlxsw_sp_vr_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)919 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
920 struct mlxsw_sp_vr *vr)
921 {
922 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
923 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
924 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
925 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
926 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
927 vr->fib6 = NULL;
928 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
929 vr->fib4 = NULL;
930 }
931
mlxsw_sp_vr_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)932 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
933 struct netlink_ext_ack *extack)
934 {
935 struct mlxsw_sp_vr *vr;
936
937 tb_id = mlxsw_sp_fix_tb_id(tb_id);
938 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
939 if (!vr)
940 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
941 return vr;
942 }
943
mlxsw_sp_vr_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)944 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
945 {
946 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
947 list_empty(&vr->fib6->node_list) &&
948 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
949 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
950 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
951 }
952
953 static bool
mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto,u8 tree_id)954 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
955 enum mlxsw_sp_l3proto proto, u8 tree_id)
956 {
957 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
958
959 if (!mlxsw_sp_vr_is_used(vr))
960 return false;
961 if (fib->lpm_tree->id == tree_id)
962 return true;
963 return false;
964 }
965
mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)966 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
967 struct mlxsw_sp_fib *fib,
968 struct mlxsw_sp_lpm_tree *new_tree)
969 {
970 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
971 int err;
972
973 fib->lpm_tree = new_tree;
974 mlxsw_sp_lpm_tree_hold(new_tree);
975 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
976 if (err)
977 goto err_tree_bind;
978 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
979 return 0;
980
981 err_tree_bind:
982 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
983 fib->lpm_tree = old_tree;
984 return err;
985 }
986
mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)987 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
988 struct mlxsw_sp_fib *fib,
989 struct mlxsw_sp_lpm_tree *new_tree)
990 {
991 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
992 enum mlxsw_sp_l3proto proto = fib->proto;
993 struct mlxsw_sp_lpm_tree *old_tree;
994 u8 old_id, new_id = new_tree->id;
995 struct mlxsw_sp_vr *vr;
996 int i, err;
997
998 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
999 old_id = old_tree->id;
1000
1001 for (i = 0; i < max_vrs; i++) {
1002 vr = &mlxsw_sp->router->vrs[i];
1003 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1004 continue;
1005 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1006 mlxsw_sp_vr_fib(vr, proto),
1007 new_tree);
1008 if (err)
1009 goto err_tree_replace;
1010 }
1011
1012 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1013 sizeof(new_tree->prefix_ref_count));
1014 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1015 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1016
1017 return 0;
1018
1019 err_tree_replace:
1020 for (i--; i >= 0; i--) {
1021 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1022 continue;
1023 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1024 mlxsw_sp_vr_fib(vr, proto),
1025 old_tree);
1026 }
1027 return err;
1028 }
1029
mlxsw_sp_vrs_init(struct mlxsw_sp * mlxsw_sp)1030 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1031 {
1032 struct mlxsw_sp_vr *vr;
1033 u64 max_vrs;
1034 int i;
1035
1036 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1037 return -EIO;
1038
1039 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1040 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1041 GFP_KERNEL);
1042 if (!mlxsw_sp->router->vrs)
1043 return -ENOMEM;
1044
1045 for (i = 0; i < max_vrs; i++) {
1046 vr = &mlxsw_sp->router->vrs[i];
1047 vr->id = i;
1048 }
1049
1050 return 0;
1051 }
1052
1053 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1054
mlxsw_sp_vrs_fini(struct mlxsw_sp * mlxsw_sp)1055 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1056 {
1057 /* At this stage we're guaranteed not to have new incoming
1058 * FIB notifications and the work queue is free from FIBs
1059 * sitting on top of mlxsw netdevs. However, we can still
1060 * have other FIBs queued. Flush the queue before flushing
1061 * the device's tables. No need for locks, as we're the only
1062 * writer.
1063 */
1064 mlxsw_core_flush_owq();
1065 mlxsw_sp_router_fib_flush(mlxsw_sp);
1066 kfree(mlxsw_sp->router->vrs);
1067 }
1068
mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device * ol_dev)1069 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1070 {
1071 struct net_device *d;
1072 u32 tb_id;
1073
1074 rcu_read_lock();
1075 d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1076 if (d)
1077 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1078 else
1079 tb_id = RT_TABLE_MAIN;
1080 rcu_read_unlock();
1081
1082 return tb_id;
1083 }
1084
1085 static void
mlxsw_sp_crif_init(struct mlxsw_sp_crif * crif,struct net_device * dev)1086 mlxsw_sp_crif_init(struct mlxsw_sp_crif *crif, struct net_device *dev)
1087 {
1088 crif->key.dev = dev;
1089 INIT_LIST_HEAD(&crif->nexthop_list);
1090 }
1091
1092 static struct mlxsw_sp_crif *
mlxsw_sp_crif_alloc(struct net_device * dev)1093 mlxsw_sp_crif_alloc(struct net_device *dev)
1094 {
1095 struct mlxsw_sp_crif *crif;
1096
1097 crif = kzalloc(sizeof(*crif), GFP_KERNEL);
1098 if (!crif)
1099 return NULL;
1100
1101 mlxsw_sp_crif_init(crif, dev);
1102 return crif;
1103 }
1104
mlxsw_sp_crif_free(struct mlxsw_sp_crif * crif)1105 static void mlxsw_sp_crif_free(struct mlxsw_sp_crif *crif)
1106 {
1107 if (WARN_ON(crif->rif))
1108 return;
1109
1110 WARN_ON(!list_empty(&crif->nexthop_list));
1111 kfree(crif);
1112 }
1113
mlxsw_sp_crif_insert(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)1114 static int mlxsw_sp_crif_insert(struct mlxsw_sp_router *router,
1115 struct mlxsw_sp_crif *crif)
1116 {
1117 return rhashtable_insert_fast(&router->crif_ht, &crif->ht_node,
1118 mlxsw_sp_crif_ht_params);
1119 }
1120
mlxsw_sp_crif_remove(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)1121 static void mlxsw_sp_crif_remove(struct mlxsw_sp_router *router,
1122 struct mlxsw_sp_crif *crif)
1123 {
1124 rhashtable_remove_fast(&router->crif_ht, &crif->ht_node,
1125 mlxsw_sp_crif_ht_params);
1126 }
1127
1128 static struct mlxsw_sp_crif *
mlxsw_sp_crif_lookup(struct mlxsw_sp_router * router,const struct net_device * dev)1129 mlxsw_sp_crif_lookup(struct mlxsw_sp_router *router,
1130 const struct net_device *dev)
1131 {
1132 struct mlxsw_sp_crif_key key = {
1133 .dev = (struct net_device *)dev,
1134 };
1135
1136 return rhashtable_lookup_fast(&router->crif_ht, &key,
1137 mlxsw_sp_crif_ht_params);
1138 }
1139
1140 static struct mlxsw_sp_rif *
1141 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1142 const struct mlxsw_sp_rif_params *params,
1143 struct netlink_ext_ack *extack);
1144
1145 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev,struct netlink_ext_ack * extack)1146 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1147 enum mlxsw_sp_ipip_type ipipt,
1148 struct net_device *ol_dev,
1149 struct netlink_ext_ack *extack)
1150 {
1151 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1152 const struct mlxsw_sp_ipip_ops *ipip_ops;
1153 struct mlxsw_sp_rif *rif;
1154
1155 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1156 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1157 .common.dev = ol_dev,
1158 .common.lag = false,
1159 .common.double_entry = ipip_ops->double_rif_entry,
1160 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1161 };
1162
1163 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1164 if (IS_ERR(rif))
1165 return ERR_CAST(rif);
1166 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1167 }
1168
1169 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1170 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1171 enum mlxsw_sp_ipip_type ipipt,
1172 struct net_device *ol_dev)
1173 {
1174 const struct mlxsw_sp_ipip_ops *ipip_ops;
1175 struct mlxsw_sp_ipip_entry *ipip_entry;
1176 struct mlxsw_sp_ipip_entry *ret = NULL;
1177 int err;
1178
1179 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1180 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1181 if (!ipip_entry)
1182 return ERR_PTR(-ENOMEM);
1183
1184 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1185 ol_dev, NULL);
1186 if (IS_ERR(ipip_entry->ol_lb)) {
1187 ret = ERR_CAST(ipip_entry->ol_lb);
1188 goto err_ol_ipip_lb_create;
1189 }
1190
1191 ipip_entry->ipipt = ipipt;
1192 ipip_entry->ol_dev = ol_dev;
1193 ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1194
1195 err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1196 if (err) {
1197 ret = ERR_PTR(err);
1198 goto err_rem_ip_addr_set;
1199 }
1200
1201 return ipip_entry;
1202
1203 err_rem_ip_addr_set:
1204 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1205 err_ol_ipip_lb_create:
1206 kfree(ipip_entry);
1207 return ret;
1208 }
1209
mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1210 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1211 struct mlxsw_sp_ipip_entry *ipip_entry)
1212 {
1213 const struct mlxsw_sp_ipip_ops *ipip_ops =
1214 mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1215
1216 ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1217 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1218 kfree(ipip_entry);
1219 }
1220
1221 static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp * mlxsw_sp,const enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,struct mlxsw_sp_ipip_entry * ipip_entry)1222 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1223 const enum mlxsw_sp_l3proto ul_proto,
1224 union mlxsw_sp_l3addr saddr,
1225 u32 ul_tb_id,
1226 struct mlxsw_sp_ipip_entry *ipip_entry)
1227 {
1228 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1229 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1230 union mlxsw_sp_l3addr tun_saddr;
1231
1232 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1233 return false;
1234
1235 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1236 return tun_ul_tb_id == ul_tb_id &&
1237 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1238 }
1239
mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1240 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1241 enum mlxsw_sp_ipip_type ipipt)
1242 {
1243 const struct mlxsw_sp_ipip_ops *ipip_ops;
1244
1245 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1246
1247 /* Not all tunnels require to increase the default pasing depth
1248 * (96 bytes).
1249 */
1250 if (ipip_ops->inc_parsing_depth)
1251 return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1252
1253 return 0;
1254 }
1255
mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1256 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1257 enum mlxsw_sp_ipip_type ipipt)
1258 {
1259 const struct mlxsw_sp_ipip_ops *ipip_ops =
1260 mlxsw_sp->router->ipip_ops_arr[ipipt];
1261
1262 if (ipip_ops->inc_parsing_depth)
1263 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1264 }
1265
1266 static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct mlxsw_sp_ipip_entry * ipip_entry)1267 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1268 struct mlxsw_sp_fib_entry *fib_entry,
1269 struct mlxsw_sp_ipip_entry *ipip_entry)
1270 {
1271 u32 tunnel_index;
1272 int err;
1273
1274 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1275 1, &tunnel_index);
1276 if (err)
1277 return err;
1278
1279 err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1280 ipip_entry->ipipt);
1281 if (err)
1282 goto err_parsing_depth_inc;
1283
1284 ipip_entry->decap_fib_entry = fib_entry;
1285 fib_entry->decap.ipip_entry = ipip_entry;
1286 fib_entry->decap.tunnel_index = tunnel_index;
1287
1288 return 0;
1289
1290 err_parsing_depth_inc:
1291 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1292 fib_entry->decap.tunnel_index);
1293 return err;
1294 }
1295
mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)1296 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1297 struct mlxsw_sp_fib_entry *fib_entry)
1298 {
1299 enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1300
1301 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1302 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1303 fib_entry->decap.ipip_entry = NULL;
1304 mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1305 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1306 1, fib_entry->decap.tunnel_index);
1307 }
1308
1309 static struct mlxsw_sp_fib_node *
1310 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1311 size_t addr_len, unsigned char prefix_len);
1312 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1313 struct mlxsw_sp_fib_entry *fib_entry);
1314
1315 static void
mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1316 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1317 struct mlxsw_sp_ipip_entry *ipip_entry)
1318 {
1319 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1320
1321 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1322 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1323
1324 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1325 }
1326
1327 static void
mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct mlxsw_sp_fib_entry * decap_fib_entry)1328 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1329 struct mlxsw_sp_ipip_entry *ipip_entry,
1330 struct mlxsw_sp_fib_entry *decap_fib_entry)
1331 {
1332 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1333 ipip_entry))
1334 return;
1335 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1336
1337 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1338 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1339 }
1340
1341 static struct mlxsw_sp_fib_entry *
mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id,enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,enum mlxsw_sp_fib_entry_type type)1342 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1343 enum mlxsw_sp_l3proto proto,
1344 const union mlxsw_sp_l3addr *addr,
1345 enum mlxsw_sp_fib_entry_type type)
1346 {
1347 struct mlxsw_sp_fib_node *fib_node;
1348 unsigned char addr_prefix_len;
1349 struct mlxsw_sp_fib *fib;
1350 struct mlxsw_sp_vr *vr;
1351 const void *addrp;
1352 size_t addr_len;
1353 u32 addr4;
1354
1355 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1356 if (!vr)
1357 return NULL;
1358 fib = mlxsw_sp_vr_fib(vr, proto);
1359
1360 switch (proto) {
1361 case MLXSW_SP_L3_PROTO_IPV4:
1362 addr4 = be32_to_cpu(addr->addr4);
1363 addrp = &addr4;
1364 addr_len = 4;
1365 addr_prefix_len = 32;
1366 break;
1367 case MLXSW_SP_L3_PROTO_IPV6:
1368 addrp = &addr->addr6;
1369 addr_len = 16;
1370 addr_prefix_len = 128;
1371 break;
1372 default:
1373 WARN_ON(1);
1374 return NULL;
1375 }
1376
1377 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1378 addr_prefix_len);
1379 if (!fib_node || fib_node->fib_entry->type != type)
1380 return NULL;
1381
1382 return fib_node->fib_entry;
1383 }
1384
1385 /* Given an IPIP entry, find the corresponding decap route. */
1386 static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1387 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1388 struct mlxsw_sp_ipip_entry *ipip_entry)
1389 {
1390 static struct mlxsw_sp_fib_node *fib_node;
1391 const struct mlxsw_sp_ipip_ops *ipip_ops;
1392 unsigned char saddr_prefix_len;
1393 union mlxsw_sp_l3addr saddr;
1394 struct mlxsw_sp_fib *ul_fib;
1395 struct mlxsw_sp_vr *ul_vr;
1396 const void *saddrp;
1397 size_t saddr_len;
1398 u32 ul_tb_id;
1399 u32 saddr4;
1400
1401 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1402
1403 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1404 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1405 if (!ul_vr)
1406 return NULL;
1407
1408 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1409 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1410 ipip_entry->ol_dev);
1411
1412 switch (ipip_ops->ul_proto) {
1413 case MLXSW_SP_L3_PROTO_IPV4:
1414 saddr4 = be32_to_cpu(saddr.addr4);
1415 saddrp = &saddr4;
1416 saddr_len = 4;
1417 saddr_prefix_len = 32;
1418 break;
1419 case MLXSW_SP_L3_PROTO_IPV6:
1420 saddrp = &saddr.addr6;
1421 saddr_len = 16;
1422 saddr_prefix_len = 128;
1423 break;
1424 default:
1425 WARN_ON(1);
1426 return NULL;
1427 }
1428
1429 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1430 saddr_prefix_len);
1431 if (!fib_node ||
1432 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1433 return NULL;
1434
1435 return fib_node->fib_entry;
1436 }
1437
1438 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1439 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1440 enum mlxsw_sp_ipip_type ipipt,
1441 struct net_device *ol_dev)
1442 {
1443 struct mlxsw_sp_ipip_entry *ipip_entry;
1444
1445 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1446 if (IS_ERR(ipip_entry))
1447 return ipip_entry;
1448
1449 list_add_tail(&ipip_entry->ipip_list_node,
1450 &mlxsw_sp->router->ipip_list);
1451
1452 return ipip_entry;
1453 }
1454
1455 static void
mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1456 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1457 struct mlxsw_sp_ipip_entry *ipip_entry)
1458 {
1459 list_del(&ipip_entry->ipip_list_node);
1460 mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1461 }
1462
1463 static bool
mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip,struct mlxsw_sp_ipip_entry * ipip_entry)1464 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1465 const struct net_device *ul_dev,
1466 enum mlxsw_sp_l3proto ul_proto,
1467 union mlxsw_sp_l3addr ul_dip,
1468 struct mlxsw_sp_ipip_entry *ipip_entry)
1469 {
1470 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1471 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1472
1473 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1474 return false;
1475
1476 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1477 ul_tb_id, ipip_entry);
1478 }
1479
1480 /* Given decap parameters, find the corresponding IPIP entry. */
1481 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp * mlxsw_sp,int ul_dev_ifindex,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip)1482 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1483 enum mlxsw_sp_l3proto ul_proto,
1484 union mlxsw_sp_l3addr ul_dip)
1485 {
1486 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1487 struct net_device *ul_dev;
1488
1489 rcu_read_lock();
1490
1491 ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1492 if (!ul_dev)
1493 goto out_unlock;
1494
1495 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1496 ipip_list_node)
1497 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1498 ul_proto, ul_dip,
1499 ipip_entry))
1500 goto out_unlock;
1501
1502 rcu_read_unlock();
1503
1504 return NULL;
1505
1506 out_unlock:
1507 rcu_read_unlock();
1508 return ipip_entry;
1509 }
1510
mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev,enum mlxsw_sp_ipip_type * p_type)1511 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1512 const struct net_device *dev,
1513 enum mlxsw_sp_ipip_type *p_type)
1514 {
1515 struct mlxsw_sp_router *router = mlxsw_sp->router;
1516 const struct mlxsw_sp_ipip_ops *ipip_ops;
1517 enum mlxsw_sp_ipip_type ipipt;
1518
1519 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1520 ipip_ops = router->ipip_ops_arr[ipipt];
1521 if (dev->type == ipip_ops->dev_type) {
1522 if (p_type)
1523 *p_type = ipipt;
1524 return true;
1525 }
1526 }
1527 return false;
1528 }
1529
mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1530 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1531 const struct net_device *dev)
1532 {
1533 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1534 }
1535
1536 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev)1537 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1538 const struct net_device *ol_dev)
1539 {
1540 struct mlxsw_sp_ipip_entry *ipip_entry;
1541
1542 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1543 ipip_list_node)
1544 if (ipip_entry->ol_dev == ol_dev)
1545 return ipip_entry;
1546
1547 return NULL;
1548 }
1549
1550 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,struct mlxsw_sp_ipip_entry * start)1551 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1552 const struct net_device *ul_dev,
1553 struct mlxsw_sp_ipip_entry *start)
1554 {
1555 struct mlxsw_sp_ipip_entry *ipip_entry;
1556
1557 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1558 ipip_list_node);
1559 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1560 ipip_list_node) {
1561 struct net_device *ol_dev = ipip_entry->ol_dev;
1562 struct net_device *ipip_ul_dev;
1563
1564 rcu_read_lock();
1565 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1566 rcu_read_unlock();
1567
1568 if (ipip_ul_dev == ul_dev)
1569 return ipip_entry;
1570 }
1571
1572 return NULL;
1573 }
1574
mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1575 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1576 const struct net_device *dev)
1577 {
1578 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1579 }
1580
mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev,enum mlxsw_sp_ipip_type ipipt)1581 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1582 const struct net_device *ol_dev,
1583 enum mlxsw_sp_ipip_type ipipt)
1584 {
1585 const struct mlxsw_sp_ipip_ops *ops
1586 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1587
1588 return ops->can_offload(mlxsw_sp, ol_dev);
1589 }
1590
mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1591 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1592 struct net_device *ol_dev)
1593 {
1594 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1595 struct mlxsw_sp_ipip_entry *ipip_entry;
1596 enum mlxsw_sp_l3proto ul_proto;
1597 union mlxsw_sp_l3addr saddr;
1598 u32 ul_tb_id;
1599
1600 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1601 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1602 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1603 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1604 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1605 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1606 saddr, ul_tb_id,
1607 NULL)) {
1608 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1609 ol_dev);
1610 if (IS_ERR(ipip_entry))
1611 return PTR_ERR(ipip_entry);
1612 }
1613 }
1614
1615 return 0;
1616 }
1617
mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1618 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1619 struct net_device *ol_dev)
1620 {
1621 struct mlxsw_sp_ipip_entry *ipip_entry;
1622
1623 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1624 if (ipip_entry)
1625 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1626 }
1627
1628 static void
mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1629 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1630 struct mlxsw_sp_ipip_entry *ipip_entry)
1631 {
1632 struct mlxsw_sp_fib_entry *decap_fib_entry;
1633
1634 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1635 if (decap_fib_entry)
1636 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1637 decap_fib_entry);
1638 }
1639
1640 static int
mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb * lb_rif,u16 ul_vr_id,u16 ul_rif_id,bool enable)1641 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1642 u16 ul_rif_id, bool enable)
1643 {
1644 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1645 struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
1646 enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1647 struct mlxsw_sp_rif *rif = &lb_rif->common;
1648 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1649 char ritr_pl[MLXSW_REG_RITR_LEN];
1650 struct in6_addr *saddr6;
1651 u32 saddr4;
1652
1653 ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1654 switch (lb_cf.ul_protocol) {
1655 case MLXSW_SP_L3_PROTO_IPV4:
1656 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1657 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1658 rif->rif_index, rif->vr_id, dev->mtu);
1659 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1660 ipip_options, ul_vr_id,
1661 ul_rif_id, saddr4,
1662 lb_cf.okey);
1663 break;
1664
1665 case MLXSW_SP_L3_PROTO_IPV6:
1666 saddr6 = &lb_cf.saddr.addr6;
1667 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1668 rif->rif_index, rif->vr_id, dev->mtu);
1669 mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1670 ipip_options, ul_vr_id,
1671 ul_rif_id, saddr6,
1672 lb_cf.okey);
1673 break;
1674 }
1675
1676 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1677 }
1678
mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1679 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1680 struct net_device *ol_dev)
1681 {
1682 struct mlxsw_sp_ipip_entry *ipip_entry;
1683 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1684 int err = 0;
1685
1686 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1687 if (ipip_entry) {
1688 lb_rif = ipip_entry->ol_lb;
1689 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1690 lb_rif->ul_rif_id, true);
1691 if (err)
1692 goto out;
1693 lb_rif->common.mtu = ol_dev->mtu;
1694 }
1695
1696 out:
1697 return err;
1698 }
1699
mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1700 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1701 struct net_device *ol_dev)
1702 {
1703 struct mlxsw_sp_ipip_entry *ipip_entry;
1704
1705 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1706 if (ipip_entry)
1707 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1708 }
1709
1710 static void
mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1711 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1712 struct mlxsw_sp_ipip_entry *ipip_entry)
1713 {
1714 if (ipip_entry->decap_fib_entry)
1715 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1716 }
1717
mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1718 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1719 struct net_device *ol_dev)
1720 {
1721 struct mlxsw_sp_ipip_entry *ipip_entry;
1722
1723 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1724 if (ipip_entry)
1725 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1726 }
1727
1728 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1729 struct mlxsw_sp_rif *rif);
1730
mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * old_rif,struct mlxsw_sp_rif * new_rif,bool migrate_nhs)1731 static void mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp *mlxsw_sp,
1732 struct mlxsw_sp_rif *old_rif,
1733 struct mlxsw_sp_rif *new_rif,
1734 bool migrate_nhs)
1735 {
1736 struct mlxsw_sp_crif *crif = old_rif->crif;
1737 struct mlxsw_sp_crif mock_crif = {};
1738
1739 if (migrate_nhs)
1740 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
1741
1742 /* Plant a mock CRIF so that destroying the old RIF doesn't unoffload
1743 * our nexthops and IPIP tunnels, and doesn't sever the crif->rif link.
1744 */
1745 mlxsw_sp_crif_init(&mock_crif, crif->key.dev);
1746 old_rif->crif = &mock_crif;
1747 mock_crif.rif = old_rif;
1748 mlxsw_sp_rif_destroy(old_rif);
1749 }
1750
1751 static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool keep_encap,struct netlink_ext_ack * extack)1752 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1753 struct mlxsw_sp_ipip_entry *ipip_entry,
1754 bool keep_encap,
1755 struct netlink_ext_ack *extack)
1756 {
1757 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1758 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1759
1760 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1761 ipip_entry->ipipt,
1762 ipip_entry->ol_dev,
1763 extack);
1764 if (IS_ERR(new_lb_rif))
1765 return PTR_ERR(new_lb_rif);
1766 ipip_entry->ol_lb = new_lb_rif;
1767
1768 mlxsw_sp_rif_migrate_destroy(mlxsw_sp, &old_lb_rif->common,
1769 &new_lb_rif->common, keep_encap);
1770 return 0;
1771 }
1772
1773 /**
1774 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1775 * @mlxsw_sp: mlxsw_sp.
1776 * @ipip_entry: IPIP entry.
1777 * @recreate_loopback: Recreates the associated loopback RIF.
1778 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1779 * relevant when recreate_loopback is true.
1780 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1781 * is only relevant when recreate_loopback is false.
1782 * @extack: extack.
1783 *
1784 * Return: Non-zero value on failure.
1785 */
__mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool recreate_loopback,bool keep_encap,bool update_nexthops,struct netlink_ext_ack * extack)1786 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1787 struct mlxsw_sp_ipip_entry *ipip_entry,
1788 bool recreate_loopback,
1789 bool keep_encap,
1790 bool update_nexthops,
1791 struct netlink_ext_ack *extack)
1792 {
1793 int err;
1794
1795 /* RIFs can't be edited, so to update loopback, we need to destroy and
1796 * recreate it. That creates a window of opportunity where RALUE and
1797 * RATR registers end up referencing a RIF that's already gone. RATRs
1798 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1799 * of RALUE, demote the decap route back.
1800 */
1801 if (ipip_entry->decap_fib_entry)
1802 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1803
1804 if (recreate_loopback) {
1805 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1806 keep_encap, extack);
1807 if (err)
1808 return err;
1809 } else if (update_nexthops) {
1810 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1811 &ipip_entry->ol_lb->common);
1812 }
1813
1814 if (ipip_entry->ol_dev->flags & IFF_UP)
1815 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1816
1817 return 0;
1818 }
1819
mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1820 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1821 struct net_device *ol_dev,
1822 struct netlink_ext_ack *extack)
1823 {
1824 struct mlxsw_sp_ipip_entry *ipip_entry =
1825 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1826
1827 if (!ipip_entry)
1828 return 0;
1829
1830 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1831 true, false, false, extack);
1832 }
1833
1834 static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,struct netlink_ext_ack * extack)1835 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1836 struct mlxsw_sp_ipip_entry *ipip_entry,
1837 struct net_device *ul_dev,
1838 bool *demote_this,
1839 struct netlink_ext_ack *extack)
1840 {
1841 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1842 enum mlxsw_sp_l3proto ul_proto;
1843 union mlxsw_sp_l3addr saddr;
1844
1845 /* Moving underlay to a different VRF might cause local address
1846 * conflict, and the conflicting tunnels need to be demoted.
1847 */
1848 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1849 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1850 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1851 saddr, ul_tb_id,
1852 ipip_entry)) {
1853 *demote_this = true;
1854 return 0;
1855 }
1856
1857 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1858 true, true, false, extack);
1859 }
1860
1861 static int
mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1862 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1863 struct mlxsw_sp_ipip_entry *ipip_entry,
1864 struct net_device *ul_dev)
1865 {
1866 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1867 false, false, true, NULL);
1868 }
1869
1870 static int
mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1871 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1872 struct mlxsw_sp_ipip_entry *ipip_entry,
1873 struct net_device *ul_dev)
1874 {
1875 /* A down underlay device causes encapsulated packets to not be
1876 * forwarded, but decap still works. So refresh next hops without
1877 * touching anything else.
1878 */
1879 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1880 false, false, true, NULL);
1881 }
1882
1883 static int
mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1884 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1885 struct net_device *ol_dev,
1886 struct netlink_ext_ack *extack)
1887 {
1888 const struct mlxsw_sp_ipip_ops *ipip_ops;
1889 struct mlxsw_sp_ipip_entry *ipip_entry;
1890 int err;
1891
1892 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1893 if (!ipip_entry)
1894 /* A change might make a tunnel eligible for offloading, but
1895 * that is currently not implemented. What falls to slow path
1896 * stays there.
1897 */
1898 return 0;
1899
1900 /* A change might make a tunnel not eligible for offloading. */
1901 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1902 ipip_entry->ipipt)) {
1903 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1904 return 0;
1905 }
1906
1907 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1908 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1909 return err;
1910 }
1911
mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1912 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1913 struct mlxsw_sp_ipip_entry *ipip_entry)
1914 {
1915 struct net_device *ol_dev = ipip_entry->ol_dev;
1916
1917 if (ol_dev->flags & IFF_UP)
1918 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1919 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1920 }
1921
1922 /* The configuration where several tunnels have the same local address in the
1923 * same underlay table needs special treatment in the HW. That is currently not
1924 * implemented in the driver. This function finds and demotes the first tunnel
1925 * with a given source address, except the one passed in the argument
1926 * `except'.
1927 */
1928 bool
mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,const struct mlxsw_sp_ipip_entry * except)1929 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1930 enum mlxsw_sp_l3proto ul_proto,
1931 union mlxsw_sp_l3addr saddr,
1932 u32 ul_tb_id,
1933 const struct mlxsw_sp_ipip_entry *except)
1934 {
1935 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1936
1937 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1938 ipip_list_node) {
1939 if (ipip_entry != except &&
1940 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1941 ul_tb_id, ipip_entry)) {
1942 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1943 return true;
1944 }
1945 }
1946
1947 return false;
1948 }
1949
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev)1950 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1951 struct net_device *ul_dev)
1952 {
1953 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1954
1955 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1956 ipip_list_node) {
1957 struct net_device *ol_dev = ipip_entry->ol_dev;
1958 struct net_device *ipip_ul_dev;
1959
1960 rcu_read_lock();
1961 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1962 rcu_read_unlock();
1963 if (ipip_ul_dev == ul_dev)
1964 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1965 }
1966 }
1967
mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,unsigned long event,struct netdev_notifier_info * info)1968 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1969 struct net_device *ol_dev,
1970 unsigned long event,
1971 struct netdev_notifier_info *info)
1972 {
1973 struct netdev_notifier_changeupper_info *chup;
1974 struct netlink_ext_ack *extack;
1975 int err = 0;
1976
1977 switch (event) {
1978 case NETDEV_REGISTER:
1979 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1980 break;
1981 case NETDEV_UNREGISTER:
1982 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1983 break;
1984 case NETDEV_UP:
1985 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1986 break;
1987 case NETDEV_DOWN:
1988 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1989 break;
1990 case NETDEV_CHANGEUPPER:
1991 chup = container_of(info, typeof(*chup), info);
1992 extack = info->extack;
1993 if (netif_is_l3_master(chup->upper_dev))
1994 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1995 ol_dev,
1996 extack);
1997 break;
1998 case NETDEV_CHANGE:
1999 extack = info->extack;
2000 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
2001 ol_dev, extack);
2002 break;
2003 case NETDEV_CHANGEMTU:
2004 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2005 break;
2006 }
2007 return err;
2008 }
2009
2010 static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,unsigned long event,struct netdev_notifier_info * info)2011 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2012 struct mlxsw_sp_ipip_entry *ipip_entry,
2013 struct net_device *ul_dev,
2014 bool *demote_this,
2015 unsigned long event,
2016 struct netdev_notifier_info *info)
2017 {
2018 struct netdev_notifier_changeupper_info *chup;
2019 struct netlink_ext_ack *extack;
2020
2021 switch (event) {
2022 case NETDEV_CHANGEUPPER:
2023 chup = container_of(info, typeof(*chup), info);
2024 extack = info->extack;
2025 if (netif_is_l3_master(chup->upper_dev))
2026 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2027 ipip_entry,
2028 ul_dev,
2029 demote_this,
2030 extack);
2031 break;
2032
2033 case NETDEV_UP:
2034 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2035 ul_dev);
2036 case NETDEV_DOWN:
2037 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2038 ipip_entry,
2039 ul_dev);
2040 }
2041 return 0;
2042 }
2043
2044 static int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev,unsigned long event,struct netdev_notifier_info * info)2045 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2046 struct net_device *ul_dev,
2047 unsigned long event,
2048 struct netdev_notifier_info *info)
2049 {
2050 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2051 int err;
2052
2053 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2054 ul_dev,
2055 ipip_entry))) {
2056 struct mlxsw_sp_ipip_entry *prev;
2057 bool demote_this = false;
2058
2059 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2060 ul_dev, &demote_this,
2061 event, info);
2062 if (err) {
2063 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2064 ul_dev);
2065 return err;
2066 }
2067
2068 if (demote_this) {
2069 if (list_is_first(&ipip_entry->ipip_list_node,
2070 &mlxsw_sp->router->ipip_list))
2071 prev = NULL;
2072 else
2073 /* This can't be cached from previous iteration,
2074 * because that entry could be gone now.
2075 */
2076 prev = list_prev_entry(ipip_entry,
2077 ipip_list_node);
2078 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2079 ipip_entry = prev;
2080 }
2081 }
2082
2083 return 0;
2084 }
2085
mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip,u32 tunnel_index)2086 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2087 enum mlxsw_sp_l3proto ul_proto,
2088 const union mlxsw_sp_l3addr *ul_sip,
2089 u32 tunnel_index)
2090 {
2091 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2092 struct mlxsw_sp_router *router = mlxsw_sp->router;
2093 struct mlxsw_sp_fib_entry *fib_entry;
2094 int err = 0;
2095
2096 mutex_lock(&mlxsw_sp->router->lock);
2097
2098 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2099 err = -EINVAL;
2100 goto out;
2101 }
2102
2103 router->nve_decap_config.ul_tb_id = ul_tb_id;
2104 router->nve_decap_config.tunnel_index = tunnel_index;
2105 router->nve_decap_config.ul_proto = ul_proto;
2106 router->nve_decap_config.ul_sip = *ul_sip;
2107 router->nve_decap_config.valid = true;
2108
2109 /* It is valid to create a tunnel with a local IP and only later
2110 * assign this IP address to a local interface
2111 */
2112 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2113 ul_proto, ul_sip,
2114 type);
2115 if (!fib_entry)
2116 goto out;
2117
2118 fib_entry->decap.tunnel_index = tunnel_index;
2119 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2120
2121 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2122 if (err)
2123 goto err_fib_entry_update;
2124
2125 goto out;
2126
2127 err_fib_entry_update:
2128 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2129 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2130 out:
2131 mutex_unlock(&mlxsw_sp->router->lock);
2132 return err;
2133 }
2134
mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2135 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2136 enum mlxsw_sp_l3proto ul_proto,
2137 const union mlxsw_sp_l3addr *ul_sip)
2138 {
2139 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2140 struct mlxsw_sp_router *router = mlxsw_sp->router;
2141 struct mlxsw_sp_fib_entry *fib_entry;
2142
2143 mutex_lock(&mlxsw_sp->router->lock);
2144
2145 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2146 goto out;
2147
2148 router->nve_decap_config.valid = false;
2149
2150 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2151 ul_proto, ul_sip,
2152 type);
2153 if (!fib_entry)
2154 goto out;
2155
2156 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2157 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2158 out:
2159 mutex_unlock(&mlxsw_sp->router->lock);
2160 }
2161
mlxsw_sp_router_nve_is_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2162 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2163 u32 ul_tb_id,
2164 enum mlxsw_sp_l3proto ul_proto,
2165 const union mlxsw_sp_l3addr *ul_sip)
2166 {
2167 struct mlxsw_sp_router *router = mlxsw_sp->router;
2168
2169 return router->nve_decap_config.valid &&
2170 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2171 router->nve_decap_config.ul_proto == ul_proto &&
2172 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2173 sizeof(*ul_sip));
2174 }
2175
2176 struct mlxsw_sp_neigh_key {
2177 struct neighbour *n;
2178 };
2179
2180 struct mlxsw_sp_neigh_entry {
2181 struct list_head rif_list_node;
2182 struct rhash_head ht_node;
2183 struct mlxsw_sp_neigh_key key;
2184 u16 rif;
2185 bool connected;
2186 unsigned char ha[ETH_ALEN];
2187 struct list_head nexthop_list; /* list of nexthops using
2188 * this neigh entry
2189 */
2190 struct list_head nexthop_neighs_list_node;
2191 unsigned int counter_index;
2192 bool counter_valid;
2193 };
2194
2195 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2196 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2197 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2198 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2199 };
2200
2201 struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif * rif,struct mlxsw_sp_neigh_entry * neigh_entry)2202 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2203 struct mlxsw_sp_neigh_entry *neigh_entry)
2204 {
2205 if (!neigh_entry) {
2206 if (list_empty(&rif->neigh_list))
2207 return NULL;
2208 else
2209 return list_first_entry(&rif->neigh_list,
2210 typeof(*neigh_entry),
2211 rif_list_node);
2212 }
2213 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2214 return NULL;
2215 return list_next_entry(neigh_entry, rif_list_node);
2216 }
2217
mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry * neigh_entry)2218 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2219 {
2220 return neigh_entry->key.n->tbl->family;
2221 }
2222
2223 unsigned char *
mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry * neigh_entry)2224 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2225 {
2226 return neigh_entry->ha;
2227 }
2228
mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2229 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2230 {
2231 struct neighbour *n;
2232
2233 n = neigh_entry->key.n;
2234 return ntohl(*((__be32 *) n->primary_key));
2235 }
2236
2237 struct in6_addr *
mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2238 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2239 {
2240 struct neighbour *n;
2241
2242 n = neigh_entry->key.n;
2243 return (struct in6_addr *) &n->primary_key;
2244 }
2245
mlxsw_sp_neigh_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,u64 * p_counter)2246 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2247 struct mlxsw_sp_neigh_entry *neigh_entry,
2248 u64 *p_counter)
2249 {
2250 if (!neigh_entry->counter_valid)
2251 return -EINVAL;
2252
2253 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2254 p_counter, NULL);
2255 }
2256
2257 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp * mlxsw_sp,struct neighbour * n,u16 rif)2258 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2259 u16 rif)
2260 {
2261 struct mlxsw_sp_neigh_entry *neigh_entry;
2262
2263 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2264 if (!neigh_entry)
2265 return NULL;
2266
2267 neigh_entry->key.n = n;
2268 neigh_entry->rif = rif;
2269 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2270
2271 return neigh_entry;
2272 }
2273
mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry * neigh_entry)2274 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2275 {
2276 kfree(neigh_entry);
2277 }
2278
2279 static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2280 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2281 struct mlxsw_sp_neigh_entry *neigh_entry)
2282 {
2283 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2284 &neigh_entry->ht_node,
2285 mlxsw_sp_neigh_ht_params);
2286 }
2287
2288 static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2289 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2290 struct mlxsw_sp_neigh_entry *neigh_entry)
2291 {
2292 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2293 &neigh_entry->ht_node,
2294 mlxsw_sp_neigh_ht_params);
2295 }
2296
2297 static bool
mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2298 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2299 struct mlxsw_sp_neigh_entry *neigh_entry)
2300 {
2301 struct devlink *devlink;
2302 const char *table_name;
2303
2304 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2305 case AF_INET:
2306 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2307 break;
2308 case AF_INET6:
2309 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2310 break;
2311 default:
2312 WARN_ON(1);
2313 return false;
2314 }
2315
2316 devlink = priv_to_devlink(mlxsw_sp->core);
2317 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2318 }
2319
2320 static void
mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2321 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2322 struct mlxsw_sp_neigh_entry *neigh_entry)
2323 {
2324 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2325 return;
2326
2327 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2328 return;
2329
2330 neigh_entry->counter_valid = true;
2331 }
2332
2333 static void
mlxsw_sp_neigh_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2334 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2335 struct mlxsw_sp_neigh_entry *neigh_entry)
2336 {
2337 if (!neigh_entry->counter_valid)
2338 return;
2339 mlxsw_sp_flow_counter_free(mlxsw_sp,
2340 neigh_entry->counter_index);
2341 neigh_entry->counter_valid = false;
2342 }
2343
2344 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2345 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2346 {
2347 struct mlxsw_sp_neigh_entry *neigh_entry;
2348 struct mlxsw_sp_rif *rif;
2349 int err;
2350
2351 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2352 if (!rif)
2353 return ERR_PTR(-EINVAL);
2354
2355 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2356 if (!neigh_entry)
2357 return ERR_PTR(-ENOMEM);
2358
2359 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2360 if (err)
2361 goto err_neigh_entry_insert;
2362
2363 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2364 atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2365 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2366
2367 return neigh_entry;
2368
2369 err_neigh_entry_insert:
2370 mlxsw_sp_neigh_entry_free(neigh_entry);
2371 return ERR_PTR(err);
2372 }
2373
2374 static void
mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2375 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2376 struct mlxsw_sp_neigh_entry *neigh_entry)
2377 {
2378 list_del(&neigh_entry->rif_list_node);
2379 atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2380 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2381 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2382 mlxsw_sp_neigh_entry_free(neigh_entry);
2383 }
2384
2385 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2386 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2387 {
2388 struct mlxsw_sp_neigh_key key;
2389
2390 key.n = n;
2391 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2392 &key, mlxsw_sp_neigh_ht_params);
2393 }
2394
2395 static void
mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp * mlxsw_sp)2396 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2397 {
2398 unsigned long interval;
2399
2400 #if IS_ENABLED(CONFIG_IPV6)
2401 interval = min_t(unsigned long,
2402 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2403 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2404 #else
2405 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2406 #endif
2407 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2408 }
2409
mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int ent_index)2410 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2411 char *rauhtd_pl,
2412 int ent_index)
2413 {
2414 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2415 struct net_device *dev;
2416 struct neighbour *n;
2417 __be32 dipn;
2418 u32 dip;
2419 u16 rif;
2420
2421 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2422
2423 if (WARN_ON_ONCE(rif >= max_rifs))
2424 return;
2425 if (!mlxsw_sp->router->rifs[rif]) {
2426 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2427 return;
2428 }
2429
2430 dipn = htonl(dip);
2431 dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2432 n = neigh_lookup(&arp_tbl, &dipn, dev);
2433 if (!n)
2434 return;
2435
2436 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2437 neigh_event_send(n, NULL);
2438 neigh_release(n);
2439 }
2440
2441 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2442 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2443 char *rauhtd_pl,
2444 int rec_index)
2445 {
2446 struct net_device *dev;
2447 struct neighbour *n;
2448 struct in6_addr dip;
2449 u16 rif;
2450
2451 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2452 (char *) &dip);
2453
2454 if (!mlxsw_sp->router->rifs[rif]) {
2455 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2456 return;
2457 }
2458
2459 dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2460 n = neigh_lookup(&nd_tbl, &dip, dev);
2461 if (!n)
2462 return;
2463
2464 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2465 neigh_event_send(n, NULL);
2466 neigh_release(n);
2467 }
2468 #else
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2469 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2470 char *rauhtd_pl,
2471 int rec_index)
2472 {
2473 }
2474 #endif
2475
mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2476 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2477 char *rauhtd_pl,
2478 int rec_index)
2479 {
2480 u8 num_entries;
2481 int i;
2482
2483 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2484 rec_index);
2485 /* Hardware starts counting at 0, so add 1. */
2486 num_entries++;
2487
2488 /* Each record consists of several neighbour entries. */
2489 for (i = 0; i < num_entries; i++) {
2490 int ent_index;
2491
2492 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2493 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2494 ent_index);
2495 }
2496
2497 }
2498
mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2499 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2500 char *rauhtd_pl,
2501 int rec_index)
2502 {
2503 /* One record contains one entry. */
2504 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2505 rec_index);
2506 }
2507
mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2508 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2509 char *rauhtd_pl, int rec_index)
2510 {
2511 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2512 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2513 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2514 rec_index);
2515 break;
2516 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2517 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2518 rec_index);
2519 break;
2520 }
2521 }
2522
mlxsw_sp_router_rauhtd_is_full(char * rauhtd_pl)2523 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2524 {
2525 u8 num_rec, last_rec_index, num_entries;
2526
2527 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2528 last_rec_index = num_rec - 1;
2529
2530 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2531 return false;
2532 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2533 MLXSW_REG_RAUHTD_TYPE_IPV6)
2534 return true;
2535
2536 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2537 last_rec_index);
2538 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2539 return true;
2540 return false;
2541 }
2542
2543 static int
__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,enum mlxsw_reg_rauhtd_type type)2544 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2545 char *rauhtd_pl,
2546 enum mlxsw_reg_rauhtd_type type)
2547 {
2548 int i, num_rec;
2549 int err;
2550
2551 /* Ensure the RIF we read from the device does not change mid-dump. */
2552 mutex_lock(&mlxsw_sp->router->lock);
2553 do {
2554 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2555 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2556 rauhtd_pl);
2557 if (err) {
2558 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2559 break;
2560 }
2561 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2562 for (i = 0; i < num_rec; i++)
2563 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2564 i);
2565 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2566 mutex_unlock(&mlxsw_sp->router->lock);
2567
2568 return err;
2569 }
2570
mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp)2571 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2572 {
2573 enum mlxsw_reg_rauhtd_type type;
2574 char *rauhtd_pl;
2575 int err;
2576
2577 if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2578 return 0;
2579
2580 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2581 if (!rauhtd_pl)
2582 return -ENOMEM;
2583
2584 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2585 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2586 if (err)
2587 goto out;
2588
2589 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2590 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2591 out:
2592 kfree(rauhtd_pl);
2593 return err;
2594 }
2595
mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp * mlxsw_sp)2596 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2597 {
2598 struct mlxsw_sp_neigh_entry *neigh_entry;
2599
2600 mutex_lock(&mlxsw_sp->router->lock);
2601 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2602 nexthop_neighs_list_node)
2603 /* If this neigh have nexthops, make the kernel think this neigh
2604 * is active regardless of the traffic.
2605 */
2606 neigh_event_send(neigh_entry->key.n, NULL);
2607 mutex_unlock(&mlxsw_sp->router->lock);
2608 }
2609
2610 static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp * mlxsw_sp)2611 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2612 {
2613 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2614
2615 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2616 msecs_to_jiffies(interval));
2617 }
2618
mlxsw_sp_router_neighs_update_work(struct work_struct * work)2619 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2620 {
2621 struct mlxsw_sp_router *router;
2622 int err;
2623
2624 router = container_of(work, struct mlxsw_sp_router,
2625 neighs_update.dw.work);
2626 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2627 if (err)
2628 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2629
2630 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2631
2632 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2633 }
2634
mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct * work)2635 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2636 {
2637 struct mlxsw_sp_neigh_entry *neigh_entry;
2638 struct mlxsw_sp_router *router;
2639
2640 router = container_of(work, struct mlxsw_sp_router,
2641 nexthop_probe_dw.work);
2642 /* Iterate over nexthop neighbours, find those who are unresolved and
2643 * send arp on them. This solves the chicken-egg problem when
2644 * the nexthop wouldn't get offloaded until the neighbor is resolved
2645 * but it wouldn't get resolved ever in case traffic is flowing in HW
2646 * using different nexthop.
2647 */
2648 mutex_lock(&router->lock);
2649 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2650 nexthop_neighs_list_node)
2651 if (!neigh_entry->connected)
2652 neigh_event_send(neigh_entry->key.n, NULL);
2653 mutex_unlock(&router->lock);
2654
2655 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2656 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2657 }
2658
2659 static void
2660 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2661 struct mlxsw_sp_neigh_entry *neigh_entry,
2662 bool removing, bool dead);
2663
mlxsw_sp_rauht_op(bool adding)2664 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2665 {
2666 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2667 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2668 }
2669
2670 static int
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2671 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2672 struct mlxsw_sp_neigh_entry *neigh_entry,
2673 enum mlxsw_reg_rauht_op op)
2674 {
2675 struct neighbour *n = neigh_entry->key.n;
2676 u32 dip = ntohl(*((__be32 *) n->primary_key));
2677 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2678
2679 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2680 dip);
2681 if (neigh_entry->counter_valid)
2682 mlxsw_reg_rauht_pack_counter(rauht_pl,
2683 neigh_entry->counter_index);
2684 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2685 }
2686
2687 static int
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2688 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2689 struct mlxsw_sp_neigh_entry *neigh_entry,
2690 enum mlxsw_reg_rauht_op op)
2691 {
2692 struct neighbour *n = neigh_entry->key.n;
2693 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2694 const char *dip = n->primary_key;
2695
2696 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2697 dip);
2698 if (neigh_entry->counter_valid)
2699 mlxsw_reg_rauht_pack_counter(rauht_pl,
2700 neigh_entry->counter_index);
2701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2702 }
2703
mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry * neigh_entry)2704 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2705 {
2706 struct neighbour *n = neigh_entry->key.n;
2707
2708 /* Packets with a link-local destination address are trapped
2709 * after LPM lookup and never reach the neighbour table, so
2710 * there is no need to program such neighbours to the device.
2711 */
2712 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2713 IPV6_ADDR_LINKLOCAL)
2714 return true;
2715 return false;
2716 }
2717
2718 static void
mlxsw_sp_neigh_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2719 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2720 struct mlxsw_sp_neigh_entry *neigh_entry,
2721 bool adding)
2722 {
2723 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2724 int err;
2725
2726 if (!adding && !neigh_entry->connected)
2727 return;
2728 neigh_entry->connected = adding;
2729 if (neigh_entry->key.n->tbl->family == AF_INET) {
2730 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2731 op);
2732 if (err)
2733 return;
2734 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2735 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2736 return;
2737 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2738 op);
2739 if (err)
2740 return;
2741 } else {
2742 WARN_ON_ONCE(1);
2743 return;
2744 }
2745
2746 if (adding)
2747 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2748 else
2749 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2750 }
2751
2752 void
mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2753 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2754 struct mlxsw_sp_neigh_entry *neigh_entry,
2755 bool adding)
2756 {
2757 if (adding)
2758 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2759 else
2760 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2761 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2762 }
2763
2764 struct mlxsw_sp_netevent_work {
2765 struct work_struct work;
2766 struct mlxsw_sp *mlxsw_sp;
2767 struct neighbour *n;
2768 };
2769
mlxsw_sp_router_neigh_event_work(struct work_struct * work)2770 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2771 {
2772 struct mlxsw_sp_netevent_work *net_work =
2773 container_of(work, struct mlxsw_sp_netevent_work, work);
2774 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2775 struct mlxsw_sp_neigh_entry *neigh_entry;
2776 struct neighbour *n = net_work->n;
2777 unsigned char ha[ETH_ALEN];
2778 bool entry_connected;
2779 u8 nud_state, dead;
2780
2781 /* If these parameters are changed after we release the lock,
2782 * then we are guaranteed to receive another event letting us
2783 * know about it.
2784 */
2785 read_lock_bh(&n->lock);
2786 memcpy(ha, n->ha, ETH_ALEN);
2787 nud_state = n->nud_state;
2788 dead = n->dead;
2789 read_unlock_bh(&n->lock);
2790
2791 mutex_lock(&mlxsw_sp->router->lock);
2792 mlxsw_sp_span_respin(mlxsw_sp);
2793
2794 entry_connected = nud_state & NUD_VALID && !dead;
2795 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2796 if (!entry_connected && !neigh_entry)
2797 goto out;
2798 if (!neigh_entry) {
2799 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2800 if (IS_ERR(neigh_entry))
2801 goto out;
2802 }
2803
2804 if (neigh_entry->connected && entry_connected &&
2805 !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2806 goto out;
2807
2808 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2809 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2810 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2811 dead);
2812
2813 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2814 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2815
2816 out:
2817 mutex_unlock(&mlxsw_sp->router->lock);
2818 neigh_release(n);
2819 kfree(net_work);
2820 }
2821
2822 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2823
mlxsw_sp_router_mp_hash_event_work(struct work_struct * work)2824 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2825 {
2826 struct mlxsw_sp_netevent_work *net_work =
2827 container_of(work, struct mlxsw_sp_netevent_work, work);
2828 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2829
2830 mlxsw_sp_mp_hash_init(mlxsw_sp);
2831 kfree(net_work);
2832 }
2833
2834 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2835
mlxsw_sp_router_update_priority_work(struct work_struct * work)2836 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2837 {
2838 struct mlxsw_sp_netevent_work *net_work =
2839 container_of(work, struct mlxsw_sp_netevent_work, work);
2840 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2841
2842 __mlxsw_sp_router_init(mlxsw_sp);
2843 kfree(net_work);
2844 }
2845
mlxsw_sp_router_schedule_work(struct net * net,struct mlxsw_sp_router * router,struct neighbour * n,void (* cb)(struct work_struct *))2846 static int mlxsw_sp_router_schedule_work(struct net *net,
2847 struct mlxsw_sp_router *router,
2848 struct neighbour *n,
2849 void (*cb)(struct work_struct *))
2850 {
2851 struct mlxsw_sp_netevent_work *net_work;
2852
2853 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2854 return NOTIFY_DONE;
2855
2856 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2857 if (!net_work)
2858 return NOTIFY_BAD;
2859
2860 INIT_WORK(&net_work->work, cb);
2861 net_work->mlxsw_sp = router->mlxsw_sp;
2862 net_work->n = n;
2863 mlxsw_core_schedule_work(&net_work->work);
2864 return NOTIFY_DONE;
2865 }
2866
mlxsw_sp_dev_lower_is_port(struct net_device * dev)2867 static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
2868 {
2869 struct mlxsw_sp_port *mlxsw_sp_port;
2870
2871 rcu_read_lock();
2872 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2873 rcu_read_unlock();
2874 return !!mlxsw_sp_port;
2875 }
2876
mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router * router,struct neighbour * n)2877 static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
2878 struct neighbour *n)
2879 {
2880 struct net *net;
2881
2882 net = neigh_parms_net(n->parms);
2883
2884 /* Take a reference to ensure the neighbour won't be destructed until we
2885 * drop the reference in delayed work.
2886 */
2887 neigh_clone(n);
2888 return mlxsw_sp_router_schedule_work(net, router, n,
2889 mlxsw_sp_router_neigh_event_work);
2890 }
2891
mlxsw_sp_router_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)2892 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2893 unsigned long event, void *ptr)
2894 {
2895 struct mlxsw_sp_router *router;
2896 unsigned long interval;
2897 struct neigh_parms *p;
2898 struct neighbour *n;
2899
2900 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2901
2902 switch (event) {
2903 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2904 p = ptr;
2905
2906 /* We don't care about changes in the default table. */
2907 if (!p->dev || (p->tbl->family != AF_INET &&
2908 p->tbl->family != AF_INET6))
2909 return NOTIFY_DONE;
2910
2911 /* We are in atomic context and can't take RTNL mutex,
2912 * so use RCU variant to walk the device chain.
2913 */
2914 if (!mlxsw_sp_dev_lower_is_port(p->dev))
2915 return NOTIFY_DONE;
2916
2917 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2918 router->neighs_update.interval = interval;
2919 break;
2920 case NETEVENT_NEIGH_UPDATE:
2921 n = ptr;
2922
2923 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2924 return NOTIFY_DONE;
2925
2926 if (!mlxsw_sp_dev_lower_is_port(n->dev))
2927 return NOTIFY_DONE;
2928
2929 return mlxsw_sp_router_schedule_neigh_work(router, n);
2930
2931 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2932 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2933 return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2934 mlxsw_sp_router_mp_hash_event_work);
2935
2936 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2937 return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2938 mlxsw_sp_router_update_priority_work);
2939 }
2940
2941 return NOTIFY_DONE;
2942 }
2943
mlxsw_sp_neigh_init(struct mlxsw_sp * mlxsw_sp)2944 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2945 {
2946 int err;
2947
2948 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2949 &mlxsw_sp_neigh_ht_params);
2950 if (err)
2951 return err;
2952
2953 /* Initialize the polling interval according to the default
2954 * table.
2955 */
2956 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2957
2958 /* Create the delayed works for the activity_update */
2959 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2960 mlxsw_sp_router_neighs_update_work);
2961 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2962 mlxsw_sp_router_probe_unresolved_nexthops);
2963 atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2964 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2965 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2966 return 0;
2967 }
2968
mlxsw_sp_neigh_fini(struct mlxsw_sp * mlxsw_sp)2969 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2970 {
2971 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2972 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2973 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2974 }
2975
mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)2976 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2977 struct mlxsw_sp_rif *rif)
2978 {
2979 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2980
2981 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2982 rif_list_node) {
2983 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2984 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2985 }
2986 }
2987
2988 struct mlxsw_sp_neigh_rif_made_sync {
2989 struct mlxsw_sp *mlxsw_sp;
2990 struct mlxsw_sp_rif *rif;
2991 int err;
2992 };
2993
mlxsw_sp_neigh_rif_made_sync_each(struct neighbour * n,void * data)2994 static void mlxsw_sp_neigh_rif_made_sync_each(struct neighbour *n, void *data)
2995 {
2996 struct mlxsw_sp_neigh_rif_made_sync *rms = data;
2997 int rc;
2998
2999 if (rms->err)
3000 return;
3001 if (n->dev != mlxsw_sp_rif_dev(rms->rif))
3002 return;
3003 rc = mlxsw_sp_router_schedule_neigh_work(rms->mlxsw_sp->router, n);
3004 if (rc != NOTIFY_DONE)
3005 rms->err = -ENOMEM;
3006 }
3007
mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)3008 static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
3009 struct mlxsw_sp_rif *rif)
3010 {
3011 struct mlxsw_sp_neigh_rif_made_sync rms = {
3012 .mlxsw_sp = mlxsw_sp,
3013 .rif = rif,
3014 };
3015
3016 neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3017 if (rms.err)
3018 goto err_arp;
3019
3020 #if IS_ENABLED(CONFIG_IPV6)
3021 neigh_for_each(&nd_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3022 #endif
3023 if (rms.err)
3024 goto err_nd;
3025
3026 return 0;
3027
3028 err_nd:
3029 err_arp:
3030 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
3031 return rms.err;
3032 }
3033
3034 enum mlxsw_sp_nexthop_type {
3035 MLXSW_SP_NEXTHOP_TYPE_ETH,
3036 MLXSW_SP_NEXTHOP_TYPE_IPIP,
3037 };
3038
3039 enum mlxsw_sp_nexthop_action {
3040 /* Nexthop forwards packets to an egress RIF */
3041 MLXSW_SP_NEXTHOP_ACTION_FORWARD,
3042 /* Nexthop discards packets */
3043 MLXSW_SP_NEXTHOP_ACTION_DISCARD,
3044 /* Nexthop traps packets */
3045 MLXSW_SP_NEXTHOP_ACTION_TRAP,
3046 };
3047
3048 struct mlxsw_sp_nexthop_key {
3049 struct fib_nh *fib_nh;
3050 };
3051
3052 struct mlxsw_sp_nexthop {
3053 struct list_head neigh_list_node; /* member of neigh entry list */
3054 struct list_head crif_list_node;
3055 struct list_head router_list_node;
3056 struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
3057 * this nexthop belongs to
3058 */
3059 struct rhash_head ht_node;
3060 struct neigh_table *neigh_tbl;
3061 struct mlxsw_sp_nexthop_key key;
3062 unsigned char gw_addr[sizeof(struct in6_addr)];
3063 int ifindex;
3064 int nh_weight;
3065 int norm_nh_weight;
3066 int num_adj_entries;
3067 struct mlxsw_sp_crif *crif;
3068 u8 should_offload:1, /* set indicates this nexthop should be written
3069 * to the adjacency table.
3070 */
3071 offloaded:1, /* set indicates this nexthop was written to the
3072 * adjacency table.
3073 */
3074 update:1; /* set indicates this nexthop should be updated in the
3075 * adjacency table (f.e., its MAC changed).
3076 */
3077 enum mlxsw_sp_nexthop_action action;
3078 enum mlxsw_sp_nexthop_type type;
3079 union {
3080 struct mlxsw_sp_neigh_entry *neigh_entry;
3081 struct mlxsw_sp_ipip_entry *ipip_entry;
3082 };
3083 unsigned int counter_index;
3084 bool counter_valid;
3085 };
3086
3087 static struct net_device *
mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop * nh)3088 mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop *nh)
3089 {
3090 if (!nh->crif)
3091 return NULL;
3092 return nh->crif->key.dev;
3093 }
3094
3095 enum mlxsw_sp_nexthop_group_type {
3096 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3097 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3098 MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3099 };
3100
3101 struct mlxsw_sp_nexthop_group_info {
3102 struct mlxsw_sp_nexthop_group *nh_grp;
3103 u32 adj_index;
3104 u16 ecmp_size;
3105 u16 count;
3106 int sum_norm_weight;
3107 u8 adj_index_valid:1,
3108 gateway:1, /* routes using the group use a gateway */
3109 is_resilient:1;
3110 struct list_head list; /* member in nh_res_grp_list */
3111 struct mlxsw_sp_nexthop nexthops[];
3112 };
3113
3114 static struct mlxsw_sp_rif *
mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info * nhgi)3115 mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info *nhgi)
3116 {
3117 struct mlxsw_sp_crif *crif = nhgi->nexthops[0].crif;
3118
3119 if (!crif)
3120 return NULL;
3121 return crif->rif;
3122 }
3123
3124 struct mlxsw_sp_nexthop_group_vr_key {
3125 u16 vr_id;
3126 enum mlxsw_sp_l3proto proto;
3127 };
3128
3129 struct mlxsw_sp_nexthop_group_vr_entry {
3130 struct list_head list; /* member in vr_list */
3131 struct rhash_head ht_node; /* member in vr_ht */
3132 refcount_t ref_count;
3133 struct mlxsw_sp_nexthop_group_vr_key key;
3134 };
3135
3136 struct mlxsw_sp_nexthop_group {
3137 struct rhash_head ht_node;
3138 struct list_head fib_list; /* list of fib entries that use this group */
3139 union {
3140 struct {
3141 struct fib_info *fi;
3142 } ipv4;
3143 struct {
3144 u32 id;
3145 } obj;
3146 };
3147 struct mlxsw_sp_nexthop_group_info *nhgi;
3148 struct list_head vr_list;
3149 struct rhashtable vr_ht;
3150 enum mlxsw_sp_nexthop_group_type type;
3151 bool can_destroy;
3152 };
3153
mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3154 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3155 struct mlxsw_sp_nexthop *nh)
3156 {
3157 struct devlink *devlink;
3158
3159 devlink = priv_to_devlink(mlxsw_sp->core);
3160 if (!devlink_dpipe_table_counter_enabled(devlink,
3161 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3162 return;
3163
3164 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3165 return;
3166
3167 nh->counter_valid = true;
3168 }
3169
mlxsw_sp_nexthop_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3170 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3171 struct mlxsw_sp_nexthop *nh)
3172 {
3173 if (!nh->counter_valid)
3174 return;
3175 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3176 nh->counter_valid = false;
3177 }
3178
mlxsw_sp_nexthop_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,u64 * p_counter)3179 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3180 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3181 {
3182 if (!nh->counter_valid)
3183 return -EINVAL;
3184
3185 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3186 p_counter, NULL);
3187 }
3188
mlxsw_sp_nexthop_next(struct mlxsw_sp_router * router,struct mlxsw_sp_nexthop * nh)3189 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3190 struct mlxsw_sp_nexthop *nh)
3191 {
3192 if (!nh) {
3193 if (list_empty(&router->nexthop_list))
3194 return NULL;
3195 else
3196 return list_first_entry(&router->nexthop_list,
3197 typeof(*nh), router_list_node);
3198 }
3199 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3200 return NULL;
3201 return list_next_entry(nh, router_list_node);
3202 }
3203
mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop * nh)3204 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3205 {
3206 return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3207 }
3208
mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop * nh)3209 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3210 {
3211 if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3212 !mlxsw_sp_nexthop_is_forward(nh))
3213 return NULL;
3214 return nh->neigh_entry->ha;
3215 }
3216
mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop * nh,u32 * p_adj_index,u32 * p_adj_size,u32 * p_adj_hash_index)3217 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3218 u32 *p_adj_size, u32 *p_adj_hash_index)
3219 {
3220 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3221 u32 adj_hash_index = 0;
3222 int i;
3223
3224 if (!nh->offloaded || !nhgi->adj_index_valid)
3225 return -EINVAL;
3226
3227 *p_adj_index = nhgi->adj_index;
3228 *p_adj_size = nhgi->ecmp_size;
3229
3230 for (i = 0; i < nhgi->count; i++) {
3231 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3232
3233 if (nh_iter == nh)
3234 break;
3235 if (nh_iter->offloaded)
3236 adj_hash_index += nh_iter->num_adj_entries;
3237 }
3238
3239 *p_adj_hash_index = adj_hash_index;
3240 return 0;
3241 }
3242
mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop * nh)3243 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3244 {
3245 if (WARN_ON(!nh->crif))
3246 return NULL;
3247 return nh->crif->rif;
3248 }
3249
mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop * nh)3250 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3251 {
3252 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3253 int i;
3254
3255 for (i = 0; i < nhgi->count; i++) {
3256 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3257
3258 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3259 return true;
3260 }
3261 return false;
3262 }
3263
3264 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3265 .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3266 .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3267 .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3268 .automatic_shrinking = true,
3269 };
3270
3271 static struct mlxsw_sp_nexthop_group_vr_entry *
mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3272 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3273 const struct mlxsw_sp_fib *fib)
3274 {
3275 struct mlxsw_sp_nexthop_group_vr_key key;
3276
3277 memset(&key, 0, sizeof(key));
3278 key.vr_id = fib->vr->id;
3279 key.proto = fib->proto;
3280 return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3281 mlxsw_sp_nexthop_group_vr_ht_params);
3282 }
3283
3284 static int
mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3285 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3286 const struct mlxsw_sp_fib *fib)
3287 {
3288 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3289 int err;
3290
3291 vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3292 if (!vr_entry)
3293 return -ENOMEM;
3294
3295 vr_entry->key.vr_id = fib->vr->id;
3296 vr_entry->key.proto = fib->proto;
3297 refcount_set(&vr_entry->ref_count, 1);
3298
3299 err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3300 mlxsw_sp_nexthop_group_vr_ht_params);
3301 if (err)
3302 goto err_hashtable_insert;
3303
3304 list_add(&vr_entry->list, &nh_grp->vr_list);
3305
3306 return 0;
3307
3308 err_hashtable_insert:
3309 kfree(vr_entry);
3310 return err;
3311 }
3312
3313 static void
mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group_vr_entry * vr_entry)3314 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3315 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3316 {
3317 list_del(&vr_entry->list);
3318 rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3319 mlxsw_sp_nexthop_group_vr_ht_params);
3320 kfree(vr_entry);
3321 }
3322
3323 static int
mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3324 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3325 const struct mlxsw_sp_fib *fib)
3326 {
3327 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3328
3329 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3330 if (vr_entry) {
3331 refcount_inc(&vr_entry->ref_count);
3332 return 0;
3333 }
3334
3335 return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3336 }
3337
3338 static void
mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3339 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3340 const struct mlxsw_sp_fib *fib)
3341 {
3342 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3343
3344 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3345 if (WARN_ON_ONCE(!vr_entry))
3346 return;
3347
3348 if (!refcount_dec_and_test(&vr_entry->ref_count))
3349 return;
3350
3351 mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3352 }
3353
3354 struct mlxsw_sp_nexthop_group_cmp_arg {
3355 enum mlxsw_sp_nexthop_group_type type;
3356 union {
3357 struct fib_info *fi;
3358 struct mlxsw_sp_fib6_entry *fib6_entry;
3359 u32 id;
3360 };
3361 };
3362
3363 static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group * nh_grp,const struct in6_addr * gw,int ifindex,int weight)3364 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3365 const struct in6_addr *gw, int ifindex,
3366 int weight)
3367 {
3368 int i;
3369
3370 for (i = 0; i < nh_grp->nhgi->count; i++) {
3371 const struct mlxsw_sp_nexthop *nh;
3372
3373 nh = &nh_grp->nhgi->nexthops[i];
3374 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3375 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3376 return true;
3377 }
3378
3379 return false;
3380 }
3381
3382 static bool
mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib6_entry * fib6_entry)3383 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3384 const struct mlxsw_sp_fib6_entry *fib6_entry)
3385 {
3386 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3387
3388 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3389 return false;
3390
3391 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3392 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3393 struct in6_addr *gw;
3394 int ifindex, weight;
3395
3396 ifindex = fib6_nh->fib_nh_dev->ifindex;
3397 weight = fib6_nh->fib_nh_weight;
3398 gw = &fib6_nh->fib_nh_gw6;
3399 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3400 weight))
3401 return false;
3402 }
3403
3404 return true;
3405 }
3406
3407 static int
mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg * arg,const void * ptr)3408 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3409 {
3410 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3411 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3412
3413 if (nh_grp->type != cmp_arg->type)
3414 return 1;
3415
3416 switch (cmp_arg->type) {
3417 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3418 return cmp_arg->fi != nh_grp->ipv4.fi;
3419 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3420 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3421 cmp_arg->fib6_entry);
3422 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3423 return cmp_arg->id != nh_grp->obj.id;
3424 default:
3425 WARN_ON(1);
3426 return 1;
3427 }
3428 }
3429
mlxsw_sp_nexthop_group_hash_obj(const void * data,u32 len,u32 seed)3430 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3431 {
3432 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3433 const struct mlxsw_sp_nexthop *nh;
3434 struct fib_info *fi;
3435 unsigned int val;
3436 int i;
3437
3438 switch (nh_grp->type) {
3439 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3440 fi = nh_grp->ipv4.fi;
3441 return jhash(&fi, sizeof(fi), seed);
3442 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3443 val = nh_grp->nhgi->count;
3444 for (i = 0; i < nh_grp->nhgi->count; i++) {
3445 nh = &nh_grp->nhgi->nexthops[i];
3446 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3447 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3448 }
3449 return jhash(&val, sizeof(val), seed);
3450 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3451 return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3452 default:
3453 WARN_ON(1);
3454 return 0;
3455 }
3456 }
3457
3458 static u32
mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry * fib6_entry,u32 seed)3459 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3460 {
3461 unsigned int val = fib6_entry->nrt6;
3462 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3463
3464 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3465 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3466 struct net_device *dev = fib6_nh->fib_nh_dev;
3467 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3468
3469 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3470 val ^= jhash(gw, sizeof(*gw), seed);
3471 }
3472
3473 return jhash(&val, sizeof(val), seed);
3474 }
3475
3476 static u32
mlxsw_sp_nexthop_group_hash(const void * data,u32 len,u32 seed)3477 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3478 {
3479 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3480
3481 switch (cmp_arg->type) {
3482 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3483 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3484 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3485 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3486 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3487 return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3488 default:
3489 WARN_ON(1);
3490 return 0;
3491 }
3492 }
3493
3494 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3495 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3496 .hashfn = mlxsw_sp_nexthop_group_hash,
3497 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3498 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3499 };
3500
mlxsw_sp_nexthop_group_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3501 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3502 struct mlxsw_sp_nexthop_group *nh_grp)
3503 {
3504 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3505 !nh_grp->nhgi->gateway)
3506 return 0;
3507
3508 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3509 &nh_grp->ht_node,
3510 mlxsw_sp_nexthop_group_ht_params);
3511 }
3512
mlxsw_sp_nexthop_group_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3513 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3514 struct mlxsw_sp_nexthop_group *nh_grp)
3515 {
3516 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3517 !nh_grp->nhgi->gateway)
3518 return;
3519
3520 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3521 &nh_grp->ht_node,
3522 mlxsw_sp_nexthop_group_ht_params);
3523 }
3524
3525 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)3526 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3527 struct fib_info *fi)
3528 {
3529 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3530
3531 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3532 cmp_arg.fi = fi;
3533 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3534 &cmp_arg,
3535 mlxsw_sp_nexthop_group_ht_params);
3536 }
3537
3538 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)3539 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3540 struct mlxsw_sp_fib6_entry *fib6_entry)
3541 {
3542 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3543
3544 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3545 cmp_arg.fib6_entry = fib6_entry;
3546 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3547 &cmp_arg,
3548 mlxsw_sp_nexthop_group_ht_params);
3549 }
3550
3551 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3552 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3553 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3554 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3555 };
3556
mlxsw_sp_nexthop_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3557 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3558 struct mlxsw_sp_nexthop *nh)
3559 {
3560 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3561 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3562 }
3563
mlxsw_sp_nexthop_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3564 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3565 struct mlxsw_sp_nexthop *nh)
3566 {
3567 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3568 mlxsw_sp_nexthop_ht_params);
3569 }
3570
3571 static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_key key)3572 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3573 struct mlxsw_sp_nexthop_key key)
3574 {
3575 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3576 mlxsw_sp_nexthop_ht_params);
3577 }
3578
mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto proto,u16 vr_id,u32 adj_index,u16 ecmp_size,u32 new_adj_index,u16 new_ecmp_size)3579 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3580 enum mlxsw_sp_l3proto proto,
3581 u16 vr_id,
3582 u32 adj_index, u16 ecmp_size,
3583 u32 new_adj_index,
3584 u16 new_ecmp_size)
3585 {
3586 char raleu_pl[MLXSW_REG_RALEU_LEN];
3587
3588 mlxsw_reg_raleu_pack(raleu_pl,
3589 (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3590 adj_index, ecmp_size, new_adj_index,
3591 new_ecmp_size);
3592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3593 }
3594
mlxsw_sp_adj_index_mass_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,u32 old_adj_index,u16 old_ecmp_size)3595 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3596 struct mlxsw_sp_nexthop_group *nh_grp,
3597 u32 old_adj_index, u16 old_ecmp_size)
3598 {
3599 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3600 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3601 int err;
3602
3603 list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3604 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3605 vr_entry->key.proto,
3606 vr_entry->key.vr_id,
3607 old_adj_index,
3608 old_ecmp_size,
3609 nhgi->adj_index,
3610 nhgi->ecmp_size);
3611 if (err)
3612 goto err_mass_update_vr;
3613 }
3614 return 0;
3615
3616 err_mass_update_vr:
3617 list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3618 mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3619 vr_entry->key.vr_id,
3620 nhgi->adj_index,
3621 nhgi->ecmp_size,
3622 old_adj_index, old_ecmp_size);
3623 return err;
3624 }
3625
__mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3626 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3627 u32 adj_index,
3628 struct mlxsw_sp_nexthop *nh,
3629 bool force, char *ratr_pl)
3630 {
3631 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3632 struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
3633 enum mlxsw_reg_ratr_op op;
3634 u16 rif_index;
3635
3636 rif_index = rif ? rif->rif_index :
3637 mlxsw_sp->router->lb_crif->rif->rif_index;
3638 op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3639 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3640 mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3641 adj_index, rif_index);
3642 switch (nh->action) {
3643 case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3644 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3645 break;
3646 case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3647 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3648 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3649 break;
3650 case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3651 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3652 MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3653 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3654 break;
3655 default:
3656 WARN_ON_ONCE(1);
3657 return -EINVAL;
3658 }
3659 if (nh->counter_valid)
3660 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3661 else
3662 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3663
3664 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3665 }
3666
mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3667 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3668 struct mlxsw_sp_nexthop *nh, bool force,
3669 char *ratr_pl)
3670 {
3671 int i;
3672
3673 for (i = 0; i < nh->num_adj_entries; i++) {
3674 int err;
3675
3676 err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3677 nh, force, ratr_pl);
3678 if (err)
3679 return err;
3680 }
3681
3682 return 0;
3683 }
3684
__mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3685 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3686 u32 adj_index,
3687 struct mlxsw_sp_nexthop *nh,
3688 bool force, char *ratr_pl)
3689 {
3690 const struct mlxsw_sp_ipip_ops *ipip_ops;
3691
3692 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3693 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3694 force, ratr_pl);
3695 }
3696
mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3697 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3698 u32 adj_index,
3699 struct mlxsw_sp_nexthop *nh, bool force,
3700 char *ratr_pl)
3701 {
3702 int i;
3703
3704 for (i = 0; i < nh->num_adj_entries; i++) {
3705 int err;
3706
3707 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3708 nh, force, ratr_pl);
3709 if (err)
3710 return err;
3711 }
3712
3713 return 0;
3714 }
3715
mlxsw_sp_nexthop_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3716 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3717 struct mlxsw_sp_nexthop *nh, bool force,
3718 char *ratr_pl)
3719 {
3720 /* When action is discard or trap, the nexthop must be
3721 * programmed as an Ethernet nexthop.
3722 */
3723 if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3724 nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3725 nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3726 return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3727 force, ratr_pl);
3728 else
3729 return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3730 force, ratr_pl);
3731 }
3732
3733 static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,bool reallocate)3734 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3735 struct mlxsw_sp_nexthop_group_info *nhgi,
3736 bool reallocate)
3737 {
3738 char ratr_pl[MLXSW_REG_RATR_LEN];
3739 u32 adj_index = nhgi->adj_index; /* base */
3740 struct mlxsw_sp_nexthop *nh;
3741 int i;
3742
3743 for (i = 0; i < nhgi->count; i++) {
3744 nh = &nhgi->nexthops[i];
3745
3746 if (!nh->should_offload) {
3747 nh->offloaded = 0;
3748 continue;
3749 }
3750
3751 if (nh->update || reallocate) {
3752 int err = 0;
3753
3754 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3755 true, ratr_pl);
3756 if (err)
3757 return err;
3758 nh->update = 0;
3759 nh->offloaded = 1;
3760 }
3761 adj_index += nh->num_adj_entries;
3762 }
3763 return 0;
3764 }
3765
3766 static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3767 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3768 struct mlxsw_sp_nexthop_group *nh_grp)
3769 {
3770 struct mlxsw_sp_fib_entry *fib_entry;
3771 int err;
3772
3773 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3774 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3775 if (err)
3776 return err;
3777 }
3778 return 0;
3779 }
3780
3781 struct mlxsw_sp_adj_grp_size_range {
3782 u16 start; /* Inclusive */
3783 u16 end; /* Inclusive */
3784 };
3785
3786 /* Ordered by range start value */
3787 static const struct mlxsw_sp_adj_grp_size_range
3788 mlxsw_sp1_adj_grp_size_ranges[] = {
3789 { .start = 1, .end = 64 },
3790 { .start = 512, .end = 512 },
3791 { .start = 1024, .end = 1024 },
3792 { .start = 2048, .end = 2048 },
3793 { .start = 4096, .end = 4096 },
3794 };
3795
3796 /* Ordered by range start value */
3797 static const struct mlxsw_sp_adj_grp_size_range
3798 mlxsw_sp2_adj_grp_size_ranges[] = {
3799 { .start = 1, .end = 128 },
3800 { .start = 256, .end = 256 },
3801 { .start = 512, .end = 512 },
3802 { .start = 1024, .end = 1024 },
3803 { .start = 2048, .end = 2048 },
3804 { .start = 4096, .end = 4096 },
3805 };
3806
mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3807 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3808 u16 *p_adj_grp_size)
3809 {
3810 int i;
3811
3812 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3813 const struct mlxsw_sp_adj_grp_size_range *size_range;
3814
3815 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3816
3817 if (*p_adj_grp_size >= size_range->start &&
3818 *p_adj_grp_size <= size_range->end)
3819 return;
3820
3821 if (*p_adj_grp_size <= size_range->end) {
3822 *p_adj_grp_size = size_range->end;
3823 return;
3824 }
3825 }
3826 }
3827
mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size,unsigned int alloc_size)3828 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3829 u16 *p_adj_grp_size,
3830 unsigned int alloc_size)
3831 {
3832 int i;
3833
3834 for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3835 const struct mlxsw_sp_adj_grp_size_range *size_range;
3836
3837 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3838
3839 if (alloc_size >= size_range->end) {
3840 *p_adj_grp_size = size_range->end;
3841 return;
3842 }
3843 }
3844 }
3845
mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3846 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3847 u16 *p_adj_grp_size)
3848 {
3849 unsigned int alloc_size;
3850 int err;
3851
3852 /* Round up the requested group size to the next size supported
3853 * by the device and make sure the request can be satisfied.
3854 */
3855 mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3856 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3857 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3858 *p_adj_grp_size, &alloc_size);
3859 if (err)
3860 return err;
3861 /* It is possible the allocation results in more allocated
3862 * entries than requested. Try to use as much of them as
3863 * possible.
3864 */
3865 mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3866
3867 return 0;
3868 }
3869
3870 static void
mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info * nhgi)3871 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3872 {
3873 int i, g = 0, sum_norm_weight = 0;
3874 struct mlxsw_sp_nexthop *nh;
3875
3876 for (i = 0; i < nhgi->count; i++) {
3877 nh = &nhgi->nexthops[i];
3878
3879 if (!nh->should_offload)
3880 continue;
3881 if (g > 0)
3882 g = gcd(nh->nh_weight, g);
3883 else
3884 g = nh->nh_weight;
3885 }
3886
3887 for (i = 0; i < nhgi->count; i++) {
3888 nh = &nhgi->nexthops[i];
3889
3890 if (!nh->should_offload)
3891 continue;
3892 nh->norm_nh_weight = nh->nh_weight / g;
3893 sum_norm_weight += nh->norm_nh_weight;
3894 }
3895
3896 nhgi->sum_norm_weight = sum_norm_weight;
3897 }
3898
3899 static void
mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info * nhgi)3900 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3901 {
3902 int i, weight = 0, lower_bound = 0;
3903 int total = nhgi->sum_norm_weight;
3904 u16 ecmp_size = nhgi->ecmp_size;
3905
3906 for (i = 0; i < nhgi->count; i++) {
3907 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3908 int upper_bound;
3909
3910 if (!nh->should_offload)
3911 continue;
3912 weight += nh->norm_nh_weight;
3913 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3914 nh->num_adj_entries = upper_bound - lower_bound;
3915 lower_bound = upper_bound;
3916 }
3917 }
3918
3919 static struct mlxsw_sp_nexthop *
3920 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3921 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3922
3923 static void
mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3924 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3925 struct mlxsw_sp_nexthop_group *nh_grp)
3926 {
3927 int i;
3928
3929 for (i = 0; i < nh_grp->nhgi->count; i++) {
3930 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3931
3932 if (nh->offloaded)
3933 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3934 else
3935 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3936 }
3937 }
3938
3939 static void
__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)3940 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3941 struct mlxsw_sp_fib6_entry *fib6_entry)
3942 {
3943 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3944
3945 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3946 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3947 struct mlxsw_sp_nexthop *nh;
3948
3949 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3950 if (nh && nh->offloaded)
3951 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3952 else
3953 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3954 }
3955 }
3956
3957 static void
mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3958 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3959 struct mlxsw_sp_nexthop_group *nh_grp)
3960 {
3961 struct mlxsw_sp_fib6_entry *fib6_entry;
3962
3963 /* Unfortunately, in IPv6 the route and the nexthop are described by
3964 * the same struct, so we need to iterate over all the routes using the
3965 * nexthop group and set / clear the offload indication for them.
3966 */
3967 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3968 common.nexthop_group_node)
3969 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3970 }
3971
3972 static void
mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop * nh,u16 bucket_index)3973 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3974 const struct mlxsw_sp_nexthop *nh,
3975 u16 bucket_index)
3976 {
3977 struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3978 bool offload = false, trap = false;
3979
3980 if (nh->offloaded) {
3981 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3982 trap = true;
3983 else
3984 offload = true;
3985 }
3986 nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3987 bucket_index, offload, trap);
3988 }
3989
3990 static void
mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3991 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3992 struct mlxsw_sp_nexthop_group *nh_grp)
3993 {
3994 int i;
3995
3996 /* Do not update the flags if the nexthop group is being destroyed
3997 * since:
3998 * 1. The nexthop objects is being deleted, in which case the flags are
3999 * irrelevant.
4000 * 2. The nexthop group was replaced by a newer group, in which case
4001 * the flags of the nexthop object were already updated based on the
4002 * new group.
4003 */
4004 if (nh_grp->can_destroy)
4005 return;
4006
4007 nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4008 nh_grp->nhgi->adj_index_valid, false);
4009
4010 /* Update flags of individual nexthop buckets in case of a resilient
4011 * nexthop group.
4012 */
4013 if (!nh_grp->nhgi->is_resilient)
4014 return;
4015
4016 for (i = 0; i < nh_grp->nhgi->count; i++) {
4017 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4018
4019 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
4020 }
4021 }
4022
4023 static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4024 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4025 struct mlxsw_sp_nexthop_group *nh_grp)
4026 {
4027 switch (nh_grp->type) {
4028 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
4029 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
4030 break;
4031 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
4032 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
4033 break;
4034 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
4035 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
4036 break;
4037 }
4038 }
4039
4040 static int
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4041 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
4042 struct mlxsw_sp_nexthop_group *nh_grp)
4043 {
4044 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4045 u16 ecmp_size, old_ecmp_size;
4046 struct mlxsw_sp_nexthop *nh;
4047 bool offload_change = false;
4048 u32 adj_index;
4049 bool old_adj_index_valid;
4050 u32 old_adj_index;
4051 int i, err2, err;
4052
4053 if (!nhgi->gateway)
4054 return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4055
4056 for (i = 0; i < nhgi->count; i++) {
4057 nh = &nhgi->nexthops[i];
4058
4059 if (nh->should_offload != nh->offloaded) {
4060 offload_change = true;
4061 if (nh->should_offload)
4062 nh->update = 1;
4063 }
4064 }
4065 if (!offload_change) {
4066 /* Nothing was added or removed, so no need to reallocate. Just
4067 * update MAC on existing adjacency indexes.
4068 */
4069 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
4070 if (err) {
4071 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4072 goto set_trap;
4073 }
4074 /* Flags of individual nexthop buckets might need to be
4075 * updated.
4076 */
4077 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4078 return 0;
4079 }
4080 mlxsw_sp_nexthop_group_normalize(nhgi);
4081 if (!nhgi->sum_norm_weight) {
4082 /* No neigh of this group is connected so we just set
4083 * the trap and let everthing flow through kernel.
4084 */
4085 err = 0;
4086 goto set_trap;
4087 }
4088
4089 ecmp_size = nhgi->sum_norm_weight;
4090 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4091 if (err)
4092 /* No valid allocation size available. */
4093 goto set_trap;
4094
4095 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4096 ecmp_size, &adj_index);
4097 if (err) {
4098 /* We ran out of KVD linear space, just set the
4099 * trap and let everything flow through kernel.
4100 */
4101 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4102 goto set_trap;
4103 }
4104 old_adj_index_valid = nhgi->adj_index_valid;
4105 old_adj_index = nhgi->adj_index;
4106 old_ecmp_size = nhgi->ecmp_size;
4107 nhgi->adj_index_valid = 1;
4108 nhgi->adj_index = adj_index;
4109 nhgi->ecmp_size = ecmp_size;
4110 mlxsw_sp_nexthop_group_rebalance(nhgi);
4111 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4112 if (err) {
4113 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4114 goto set_trap;
4115 }
4116
4117 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4118
4119 if (!old_adj_index_valid) {
4120 /* The trap was set for fib entries, so we have to call
4121 * fib entry update to unset it and use adjacency index.
4122 */
4123 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4124 if (err) {
4125 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4126 goto set_trap;
4127 }
4128 return 0;
4129 }
4130
4131 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4132 old_adj_index, old_ecmp_size);
4133 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4134 old_ecmp_size, old_adj_index);
4135 if (err) {
4136 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4137 goto set_trap;
4138 }
4139
4140 return 0;
4141
4142 set_trap:
4143 old_adj_index_valid = nhgi->adj_index_valid;
4144 nhgi->adj_index_valid = 0;
4145 for (i = 0; i < nhgi->count; i++) {
4146 nh = &nhgi->nexthops[i];
4147 nh->offloaded = 0;
4148 }
4149 err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4150 if (err2)
4151 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4152 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4153 if (old_adj_index_valid)
4154 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4155 nhgi->ecmp_size, nhgi->adj_index);
4156 return err;
4157 }
4158
__mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop * nh,bool removing)4159 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4160 bool removing)
4161 {
4162 if (!removing) {
4163 nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4164 nh->should_offload = 1;
4165 } else if (nh->nhgi->is_resilient) {
4166 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4167 nh->should_offload = 1;
4168 } else {
4169 nh->should_offload = 0;
4170 }
4171 nh->update = 1;
4172 }
4173
4174 static int
mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)4175 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4176 struct mlxsw_sp_neigh_entry *neigh_entry)
4177 {
4178 struct neighbour *n, *old_n = neigh_entry->key.n;
4179 struct mlxsw_sp_nexthop *nh;
4180 struct net_device *dev;
4181 bool entry_connected;
4182 u8 nud_state, dead;
4183 int err;
4184
4185 nh = list_first_entry(&neigh_entry->nexthop_list,
4186 struct mlxsw_sp_nexthop, neigh_list_node);
4187 dev = mlxsw_sp_nexthop_dev(nh);
4188
4189 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4190 if (!n) {
4191 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4192 if (IS_ERR(n))
4193 return PTR_ERR(n);
4194 neigh_event_send(n, NULL);
4195 }
4196
4197 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4198 neigh_entry->key.n = n;
4199 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4200 if (err)
4201 goto err_neigh_entry_insert;
4202
4203 read_lock_bh(&n->lock);
4204 nud_state = n->nud_state;
4205 dead = n->dead;
4206 read_unlock_bh(&n->lock);
4207 entry_connected = nud_state & NUD_VALID && !dead;
4208
4209 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4210 neigh_list_node) {
4211 neigh_release(old_n);
4212 neigh_clone(n);
4213 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4214 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4215 }
4216
4217 neigh_release(n);
4218
4219 return 0;
4220
4221 err_neigh_entry_insert:
4222 neigh_entry->key.n = old_n;
4223 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4224 neigh_release(n);
4225 return err;
4226 }
4227
4228 static void
mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool removing,bool dead)4229 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4230 struct mlxsw_sp_neigh_entry *neigh_entry,
4231 bool removing, bool dead)
4232 {
4233 struct mlxsw_sp_nexthop *nh;
4234
4235 if (list_empty(&neigh_entry->nexthop_list))
4236 return;
4237
4238 if (dead) {
4239 int err;
4240
4241 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4242 neigh_entry);
4243 if (err)
4244 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4245 return;
4246 }
4247
4248 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4249 neigh_list_node) {
4250 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4251 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4252 }
4253 }
4254
mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_crif * crif)4255 static void mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop *nh,
4256 struct mlxsw_sp_crif *crif)
4257 {
4258 if (nh->crif)
4259 return;
4260
4261 nh->crif = crif;
4262 list_add(&nh->crif_list_node, &crif->nexthop_list);
4263 }
4264
mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop * nh)4265 static void mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop *nh)
4266 {
4267 if (!nh->crif)
4268 return;
4269
4270 list_del(&nh->crif_list_node);
4271 nh->crif = NULL;
4272 }
4273
mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4274 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4275 struct mlxsw_sp_nexthop *nh)
4276 {
4277 struct mlxsw_sp_neigh_entry *neigh_entry;
4278 struct net_device *dev;
4279 struct neighbour *n;
4280 u8 nud_state, dead;
4281 int err;
4282
4283 if (WARN_ON(!nh->crif->rif))
4284 return 0;
4285
4286 if (!nh->nhgi->gateway || nh->neigh_entry)
4287 return 0;
4288 dev = mlxsw_sp_nexthop_dev(nh);
4289
4290 /* Take a reference of neigh here ensuring that neigh would
4291 * not be destructed before the nexthop entry is finished.
4292 * The reference is taken either in neigh_lookup() or
4293 * in neigh_create() in case n is not found.
4294 */
4295 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4296 if (!n) {
4297 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4298 if (IS_ERR(n))
4299 return PTR_ERR(n);
4300 neigh_event_send(n, NULL);
4301 }
4302 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4303 if (!neigh_entry) {
4304 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4305 if (IS_ERR(neigh_entry)) {
4306 err = -EINVAL;
4307 goto err_neigh_entry_create;
4308 }
4309 }
4310
4311 /* If that is the first nexthop connected to that neigh, add to
4312 * nexthop_neighs_list
4313 */
4314 if (list_empty(&neigh_entry->nexthop_list))
4315 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4316 &mlxsw_sp->router->nexthop_neighs_list);
4317
4318 nh->neigh_entry = neigh_entry;
4319 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4320 read_lock_bh(&n->lock);
4321 nud_state = n->nud_state;
4322 dead = n->dead;
4323 read_unlock_bh(&n->lock);
4324 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4325
4326 return 0;
4327
4328 err_neigh_entry_create:
4329 neigh_release(n);
4330 return err;
4331 }
4332
mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4333 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4334 struct mlxsw_sp_nexthop *nh)
4335 {
4336 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4337 struct neighbour *n;
4338
4339 if (!neigh_entry)
4340 return;
4341 n = neigh_entry->key.n;
4342
4343 __mlxsw_sp_nexthop_neigh_update(nh, true);
4344 list_del(&nh->neigh_list_node);
4345 nh->neigh_entry = NULL;
4346
4347 /* If that is the last nexthop connected to that neigh, remove from
4348 * nexthop_neighs_list
4349 */
4350 if (list_empty(&neigh_entry->nexthop_list))
4351 list_del(&neigh_entry->nexthop_neighs_list_node);
4352
4353 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4354 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4355
4356 neigh_release(n);
4357 }
4358
mlxsw_sp_ipip_netdev_ul_up(struct net_device * ol_dev)4359 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4360 {
4361 struct net_device *ul_dev;
4362 bool is_up;
4363
4364 rcu_read_lock();
4365 ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4366 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4367 rcu_read_unlock();
4368
4369 return is_up;
4370 }
4371
mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_ipip_entry * ipip_entry)4372 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4373 struct mlxsw_sp_nexthop *nh,
4374 struct mlxsw_sp_ipip_entry *ipip_entry)
4375 {
4376 struct mlxsw_sp_crif *crif;
4377 bool removing;
4378
4379 if (!nh->nhgi->gateway || nh->ipip_entry)
4380 return;
4381
4382 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, ipip_entry->ol_dev);
4383 if (WARN_ON(!crif))
4384 return;
4385
4386 nh->ipip_entry = ipip_entry;
4387 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4388 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4389 mlxsw_sp_nexthop_crif_init(nh, crif);
4390 }
4391
mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4392 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4393 struct mlxsw_sp_nexthop *nh)
4394 {
4395 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4396
4397 if (!ipip_entry)
4398 return;
4399
4400 __mlxsw_sp_nexthop_neigh_update(nh, true);
4401 nh->ipip_entry = NULL;
4402 }
4403
mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib_nh * fib_nh,enum mlxsw_sp_ipip_type * p_ipipt)4404 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4405 const struct fib_nh *fib_nh,
4406 enum mlxsw_sp_ipip_type *p_ipipt)
4407 {
4408 struct net_device *dev = fib_nh->fib_nh_dev;
4409
4410 return dev &&
4411 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4412 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4413 }
4414
mlxsw_sp_nexthop_type_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,const struct net_device * dev)4415 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4416 struct mlxsw_sp_nexthop *nh,
4417 const struct net_device *dev)
4418 {
4419 const struct mlxsw_sp_ipip_ops *ipip_ops;
4420 struct mlxsw_sp_ipip_entry *ipip_entry;
4421 struct mlxsw_sp_crif *crif;
4422 int err;
4423
4424 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4425 if (ipip_entry) {
4426 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4427 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4428 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4429 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4430 return 0;
4431 }
4432 }
4433
4434 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4435 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, dev);
4436 if (!crif)
4437 return 0;
4438
4439 mlxsw_sp_nexthop_crif_init(nh, crif);
4440
4441 if (!crif->rif)
4442 return 0;
4443
4444 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4445 if (err)
4446 goto err_neigh_init;
4447
4448 return 0;
4449
4450 err_neigh_init:
4451 mlxsw_sp_nexthop_crif_fini(nh);
4452 return err;
4453 }
4454
mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4455 static int mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp *mlxsw_sp,
4456 struct mlxsw_sp_nexthop *nh)
4457 {
4458 switch (nh->type) {
4459 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4460 return mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4461 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4462 break;
4463 }
4464
4465 return 0;
4466 }
4467
mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4468 static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
4469 struct mlxsw_sp_nexthop *nh)
4470 {
4471 switch (nh->type) {
4472 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4473 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4474 break;
4475 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4476 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4477 break;
4478 }
4479 }
4480
mlxsw_sp_nexthop_type_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4481 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4482 struct mlxsw_sp_nexthop *nh)
4483 {
4484 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4485 mlxsw_sp_nexthop_crif_fini(nh);
4486 }
4487
mlxsw_sp_nexthop4_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct fib_nh * fib_nh)4488 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4489 struct mlxsw_sp_nexthop_group *nh_grp,
4490 struct mlxsw_sp_nexthop *nh,
4491 struct fib_nh *fib_nh)
4492 {
4493 struct net_device *dev = fib_nh->fib_nh_dev;
4494 struct in_device *in_dev;
4495 int err;
4496
4497 nh->nhgi = nh_grp->nhgi;
4498 nh->key.fib_nh = fib_nh;
4499 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4500 nh->nh_weight = fib_nh->fib_nh_weight;
4501 #else
4502 nh->nh_weight = 1;
4503 #endif
4504 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4505 nh->neigh_tbl = &arp_tbl;
4506 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4507 if (err)
4508 return err;
4509
4510 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4511 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4512
4513 if (!dev)
4514 return 0;
4515 nh->ifindex = dev->ifindex;
4516
4517 rcu_read_lock();
4518 in_dev = __in_dev_get_rcu(dev);
4519 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4520 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4521 rcu_read_unlock();
4522 return 0;
4523 }
4524 rcu_read_unlock();
4525
4526 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4527 if (err)
4528 goto err_nexthop_neigh_init;
4529
4530 return 0;
4531
4532 err_nexthop_neigh_init:
4533 list_del(&nh->router_list_node);
4534 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4535 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4536 return err;
4537 }
4538
mlxsw_sp_nexthop4_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4539 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4540 struct mlxsw_sp_nexthop *nh)
4541 {
4542 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4543 list_del(&nh->router_list_node);
4544 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4545 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4546 }
4547
mlxsw_sp_nexthop4_event(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct fib_nh * fib_nh)4548 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4549 unsigned long event, struct fib_nh *fib_nh)
4550 {
4551 struct mlxsw_sp_nexthop_key key;
4552 struct mlxsw_sp_nexthop *nh;
4553
4554 key.fib_nh = fib_nh;
4555 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4556 if (!nh)
4557 return;
4558
4559 switch (event) {
4560 case FIB_EVENT_NH_ADD:
4561 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4562 break;
4563 case FIB_EVENT_NH_DEL:
4564 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4565 break;
4566 }
4567
4568 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4569 }
4570
mlxsw_sp_nexthop_rif_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4571 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4572 struct mlxsw_sp_rif *rif)
4573 {
4574 struct net_device *dev = mlxsw_sp_rif_dev(rif);
4575 struct mlxsw_sp_nexthop *nh;
4576 bool removing;
4577
4578 list_for_each_entry(nh, &rif->crif->nexthop_list, crif_list_node) {
4579 switch (nh->type) {
4580 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4581 removing = false;
4582 break;
4583 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4584 removing = !mlxsw_sp_ipip_netdev_ul_up(dev);
4585 break;
4586 default:
4587 WARN_ON(1);
4588 continue;
4589 }
4590
4591 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4592 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4593 }
4594 }
4595
mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4596 static int mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
4597 struct mlxsw_sp_rif *rif)
4598 {
4599 struct mlxsw_sp_nexthop *nh, *tmp;
4600 unsigned int n = 0;
4601 int err;
4602
4603 list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4604 crif_list_node) {
4605 err = mlxsw_sp_nexthop_type_rif_made(mlxsw_sp, nh);
4606 if (err)
4607 goto err_nexthop_type_rif;
4608 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4609 n++;
4610 }
4611
4612 return 0;
4613
4614 err_nexthop_type_rif:
4615 list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4616 crif_list_node) {
4617 if (!n--)
4618 break;
4619 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4620 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4621 }
4622 return err;
4623 }
4624
mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4625 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4626 struct mlxsw_sp_rif *rif)
4627 {
4628 struct mlxsw_sp_nexthop *nh, *tmp;
4629
4630 list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4631 crif_list_node) {
4632 mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4633 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4634 }
4635 }
4636
mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp * mlxsw_sp)4637 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4638 {
4639 enum mlxsw_reg_ratr_trap_action trap_action;
4640 char ratr_pl[MLXSW_REG_RATR_LEN];
4641 int err;
4642
4643 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4644 &mlxsw_sp->router->adj_trap_index);
4645 if (err)
4646 return err;
4647
4648 trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4649 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4650 MLXSW_REG_RATR_TYPE_ETHERNET,
4651 mlxsw_sp->router->adj_trap_index,
4652 mlxsw_sp->router->lb_crif->rif->rif_index);
4653 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4654 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4655 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4656 if (err)
4657 goto err_ratr_write;
4658
4659 return 0;
4660
4661 err_ratr_write:
4662 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4663 mlxsw_sp->router->adj_trap_index);
4664 return err;
4665 }
4666
mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp * mlxsw_sp)4667 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4668 {
4669 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4670 mlxsw_sp->router->adj_trap_index);
4671 }
4672
mlxsw_sp_nexthop_group_inc(struct mlxsw_sp * mlxsw_sp)4673 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4674 {
4675 int err;
4676
4677 if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4678 return 0;
4679
4680 err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4681 if (err)
4682 return err;
4683
4684 refcount_set(&mlxsw_sp->router->num_groups, 1);
4685
4686 return 0;
4687 }
4688
mlxsw_sp_nexthop_group_dec(struct mlxsw_sp * mlxsw_sp)4689 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4690 {
4691 if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4692 return;
4693
4694 mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4695 }
4696
4697 static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp,unsigned long * activity)4698 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4699 const struct mlxsw_sp_nexthop_group *nh_grp,
4700 unsigned long *activity)
4701 {
4702 char *ratrad_pl;
4703 int i, err;
4704
4705 ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4706 if (!ratrad_pl)
4707 return;
4708
4709 mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4710 nh_grp->nhgi->count);
4711 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4712 if (err)
4713 goto out;
4714
4715 for (i = 0; i < nh_grp->nhgi->count; i++) {
4716 if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4717 continue;
4718 bitmap_set(activity, i, 1);
4719 }
4720
4721 out:
4722 kfree(ratrad_pl);
4723 }
4724
4725 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4726
4727 static void
mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp)4728 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4729 const struct mlxsw_sp_nexthop_group *nh_grp)
4730 {
4731 unsigned long *activity;
4732
4733 activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4734 if (!activity)
4735 return;
4736
4737 mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4738 nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4739 nh_grp->nhgi->count, activity);
4740
4741 bitmap_free(activity);
4742 }
4743
4744 static void
mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp * mlxsw_sp)4745 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4746 {
4747 unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4748
4749 mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4750 msecs_to_jiffies(interval));
4751 }
4752
mlxsw_sp_nh_grp_activity_work(struct work_struct * work)4753 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4754 {
4755 struct mlxsw_sp_nexthop_group_info *nhgi;
4756 struct mlxsw_sp_router *router;
4757 bool reschedule = false;
4758
4759 router = container_of(work, struct mlxsw_sp_router,
4760 nh_grp_activity_dw.work);
4761
4762 mutex_lock(&router->lock);
4763
4764 list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4765 mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4766 reschedule = true;
4767 }
4768
4769 mutex_unlock(&router->lock);
4770
4771 if (!reschedule)
4772 return;
4773 mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4774 }
4775
4776 static int
mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4777 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4778 const struct nh_notifier_single_info *nh,
4779 struct netlink_ext_ack *extack)
4780 {
4781 int err = -EINVAL;
4782
4783 if (nh->is_fdb)
4784 NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4785 else if (nh->has_encap)
4786 NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4787 else
4788 err = 0;
4789
4790 return err;
4791 }
4792
4793 static int
mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4794 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4795 const struct nh_notifier_single_info *nh,
4796 struct netlink_ext_ack *extack)
4797 {
4798 int err;
4799
4800 err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4801 if (err)
4802 return err;
4803
4804 /* Device only nexthops with an IPIP device are programmed as
4805 * encapsulating adjacency entries.
4806 */
4807 if (!nh->gw_family && !nh->is_reject &&
4808 !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4809 NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4810 return -EINVAL;
4811 }
4812
4813 return 0;
4814 }
4815
4816 static int
mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_grp_info * nh_grp,struct netlink_ext_ack * extack)4817 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4818 const struct nh_notifier_grp_info *nh_grp,
4819 struct netlink_ext_ack *extack)
4820 {
4821 int i;
4822
4823 if (nh_grp->is_fdb) {
4824 NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4825 return -EINVAL;
4826 }
4827
4828 for (i = 0; i < nh_grp->num_nh; i++) {
4829 const struct nh_notifier_single_info *nh;
4830 int err;
4831
4832 nh = &nh_grp->nh_entries[i].nh;
4833 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4834 extack);
4835 if (err)
4836 return err;
4837 }
4838
4839 return 0;
4840 }
4841
4842 static int
mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4843 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4844 const struct nh_notifier_res_table_info *nh_res_table,
4845 struct netlink_ext_ack *extack)
4846 {
4847 unsigned int alloc_size;
4848 bool valid_size = false;
4849 int err, i;
4850
4851 if (nh_res_table->num_nh_buckets < 32) {
4852 NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4853 return -EINVAL;
4854 }
4855
4856 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4857 const struct mlxsw_sp_adj_grp_size_range *size_range;
4858
4859 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4860
4861 if (nh_res_table->num_nh_buckets >= size_range->start &&
4862 nh_res_table->num_nh_buckets <= size_range->end) {
4863 valid_size = true;
4864 break;
4865 }
4866 }
4867
4868 if (!valid_size) {
4869 NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4870 return -EINVAL;
4871 }
4872
4873 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4874 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4875 nh_res_table->num_nh_buckets,
4876 &alloc_size);
4877 if (err || nh_res_table->num_nh_buckets != alloc_size) {
4878 NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4879 return -EINVAL;
4880 }
4881
4882 return 0;
4883 }
4884
4885 static int
mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4886 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4887 const struct nh_notifier_res_table_info *nh_res_table,
4888 struct netlink_ext_ack *extack)
4889 {
4890 int err;
4891 u16 i;
4892
4893 err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4894 nh_res_table,
4895 extack);
4896 if (err)
4897 return err;
4898
4899 for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4900 const struct nh_notifier_single_info *nh;
4901 int err;
4902
4903 nh = &nh_res_table->nhs[i];
4904 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4905 extack);
4906 if (err)
4907 return err;
4908 }
4909
4910 return 0;
4911 }
4912
mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct nh_notifier_info * info)4913 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4914 unsigned long event,
4915 struct nh_notifier_info *info)
4916 {
4917 struct nh_notifier_single_info *nh;
4918
4919 if (event != NEXTHOP_EVENT_REPLACE &&
4920 event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4921 event != NEXTHOP_EVENT_BUCKET_REPLACE)
4922 return 0;
4923
4924 switch (info->type) {
4925 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4926 return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4927 info->extack);
4928 case NH_NOTIFIER_INFO_TYPE_GRP:
4929 return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4930 info->nh_grp,
4931 info->extack);
4932 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4933 return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4934 info->nh_res_table,
4935 info->extack);
4936 case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4937 nh = &info->nh_res_bucket->new_nh;
4938 return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4939 info->extack);
4940 default:
4941 NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4942 return -EOPNOTSUPP;
4943 }
4944 }
4945
mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_info * info)4946 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4947 const struct nh_notifier_info *info)
4948 {
4949 const struct net_device *dev;
4950
4951 switch (info->type) {
4952 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4953 dev = info->nh->dev;
4954 return info->nh->gw_family || info->nh->is_reject ||
4955 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4956 case NH_NOTIFIER_INFO_TYPE_GRP:
4957 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4958 /* Already validated earlier. */
4959 return true;
4960 default:
4961 return false;
4962 }
4963 }
4964
mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4965 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4966 struct mlxsw_sp_nexthop *nh)
4967 {
4968 nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4969 nh->should_offload = 1;
4970 /* While nexthops that discard packets do not forward packets
4971 * via an egress RIF, they still need to be programmed using a
4972 * valid RIF, so use the loopback RIF created during init.
4973 */
4974 nh->crif = mlxsw_sp->router->lb_crif;
4975 }
4976
mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4977 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4978 struct mlxsw_sp_nexthop *nh)
4979 {
4980 nh->crif = NULL;
4981 nh->should_offload = 0;
4982 }
4983
4984 static int
mlxsw_sp_nexthop_obj_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_single_info * nh_obj,int weight)4985 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4986 struct mlxsw_sp_nexthop_group *nh_grp,
4987 struct mlxsw_sp_nexthop *nh,
4988 struct nh_notifier_single_info *nh_obj, int weight)
4989 {
4990 struct net_device *dev = nh_obj->dev;
4991 int err;
4992
4993 nh->nhgi = nh_grp->nhgi;
4994 nh->nh_weight = weight;
4995
4996 switch (nh_obj->gw_family) {
4997 case AF_INET:
4998 memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4999 nh->neigh_tbl = &arp_tbl;
5000 break;
5001 case AF_INET6:
5002 memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
5003 #if IS_ENABLED(CONFIG_IPV6)
5004 nh->neigh_tbl = &nd_tbl;
5005 #endif
5006 break;
5007 }
5008
5009 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5010 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5011 nh->ifindex = dev->ifindex;
5012
5013 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
5014 if (err)
5015 goto err_type_init;
5016
5017 if (nh_obj->is_reject)
5018 mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
5019
5020 /* In a resilient nexthop group, all the nexthops must be written to
5021 * the adjacency table. Even if they do not have a valid neighbour or
5022 * RIF.
5023 */
5024 if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
5025 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
5026 nh->should_offload = 1;
5027 }
5028
5029 return 0;
5030
5031 err_type_init:
5032 list_del(&nh->router_list_node);
5033 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5034 return err;
5035 }
5036
mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5037 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
5038 struct mlxsw_sp_nexthop *nh)
5039 {
5040 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
5041 mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
5042 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5043 list_del(&nh->router_list_node);
5044 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5045 nh->should_offload = 0;
5046 }
5047
5048 static int
mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct nh_notifier_info * info)5049 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
5050 struct mlxsw_sp_nexthop_group *nh_grp,
5051 struct nh_notifier_info *info)
5052 {
5053 struct mlxsw_sp_nexthop_group_info *nhgi;
5054 struct mlxsw_sp_nexthop *nh;
5055 bool is_resilient = false;
5056 unsigned int nhs;
5057 int err, i;
5058
5059 switch (info->type) {
5060 case NH_NOTIFIER_INFO_TYPE_SINGLE:
5061 nhs = 1;
5062 break;
5063 case NH_NOTIFIER_INFO_TYPE_GRP:
5064 nhs = info->nh_grp->num_nh;
5065 break;
5066 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5067 nhs = info->nh_res_table->num_nh_buckets;
5068 is_resilient = true;
5069 break;
5070 default:
5071 return -EINVAL;
5072 }
5073
5074 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5075 if (!nhgi)
5076 return -ENOMEM;
5077 nh_grp->nhgi = nhgi;
5078 nhgi->nh_grp = nh_grp;
5079 nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
5080 nhgi->is_resilient = is_resilient;
5081 nhgi->count = nhs;
5082 for (i = 0; i < nhgi->count; i++) {
5083 struct nh_notifier_single_info *nh_obj;
5084 int weight;
5085
5086 nh = &nhgi->nexthops[i];
5087 switch (info->type) {
5088 case NH_NOTIFIER_INFO_TYPE_SINGLE:
5089 nh_obj = info->nh;
5090 weight = 1;
5091 break;
5092 case NH_NOTIFIER_INFO_TYPE_GRP:
5093 nh_obj = &info->nh_grp->nh_entries[i].nh;
5094 weight = info->nh_grp->nh_entries[i].weight;
5095 break;
5096 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5097 nh_obj = &info->nh_res_table->nhs[i];
5098 weight = 1;
5099 break;
5100 default:
5101 err = -EINVAL;
5102 goto err_nexthop_obj_init;
5103 }
5104 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
5105 weight);
5106 if (err)
5107 goto err_nexthop_obj_init;
5108 }
5109 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5110 if (err)
5111 goto err_group_inc;
5112 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5113 if (err) {
5114 NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
5115 goto err_group_refresh;
5116 }
5117
5118 /* Add resilient nexthop groups to a list so that the activity of their
5119 * nexthop buckets will be periodically queried and cleared.
5120 */
5121 if (nhgi->is_resilient) {
5122 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5123 mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
5124 list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
5125 }
5126
5127 return 0;
5128
5129 err_group_refresh:
5130 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5131 err_group_inc:
5132 i = nhgi->count;
5133 err_nexthop_obj_init:
5134 for (i--; i >= 0; i--) {
5135 nh = &nhgi->nexthops[i];
5136 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5137 }
5138 kfree(nhgi);
5139 return err;
5140 }
5141
5142 static void
mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5143 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5144 struct mlxsw_sp_nexthop_group *nh_grp)
5145 {
5146 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5147 struct mlxsw_sp_router *router = mlxsw_sp->router;
5148 int i;
5149
5150 if (nhgi->is_resilient) {
5151 list_del(&nhgi->list);
5152 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5153 cancel_delayed_work(&router->nh_grp_activity_dw);
5154 }
5155
5156 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5157 for (i = nhgi->count - 1; i >= 0; i--) {
5158 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5159
5160 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5161 }
5162 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5163 WARN_ON_ONCE(nhgi->adj_index_valid);
5164 kfree(nhgi);
5165 }
5166
5167 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5168 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5169 struct nh_notifier_info *info)
5170 {
5171 struct mlxsw_sp_nexthop_group *nh_grp;
5172 int err;
5173
5174 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5175 if (!nh_grp)
5176 return ERR_PTR(-ENOMEM);
5177 INIT_LIST_HEAD(&nh_grp->vr_list);
5178 err = rhashtable_init(&nh_grp->vr_ht,
5179 &mlxsw_sp_nexthop_group_vr_ht_params);
5180 if (err)
5181 goto err_nexthop_group_vr_ht_init;
5182 INIT_LIST_HEAD(&nh_grp->fib_list);
5183 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5184 nh_grp->obj.id = info->id;
5185
5186 err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5187 if (err)
5188 goto err_nexthop_group_info_init;
5189
5190 nh_grp->can_destroy = false;
5191
5192 return nh_grp;
5193
5194 err_nexthop_group_info_init:
5195 rhashtable_destroy(&nh_grp->vr_ht);
5196 err_nexthop_group_vr_ht_init:
5197 kfree(nh_grp);
5198 return ERR_PTR(err);
5199 }
5200
5201 static void
mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5202 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5203 struct mlxsw_sp_nexthop_group *nh_grp)
5204 {
5205 if (!nh_grp->can_destroy)
5206 return;
5207 mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5208 WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5209 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5210 rhashtable_destroy(&nh_grp->vr_ht);
5211 kfree(nh_grp);
5212 }
5213
5214 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp * mlxsw_sp,u32 id)5215 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5216 {
5217 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5218
5219 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5220 cmp_arg.id = id;
5221 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5222 &cmp_arg,
5223 mlxsw_sp_nexthop_group_ht_params);
5224 }
5225
mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5226 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5227 struct mlxsw_sp_nexthop_group *nh_grp)
5228 {
5229 return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5230 }
5231
5232 static int
mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group * old_nh_grp,struct netlink_ext_ack * extack)5233 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5234 struct mlxsw_sp_nexthop_group *nh_grp,
5235 struct mlxsw_sp_nexthop_group *old_nh_grp,
5236 struct netlink_ext_ack *extack)
5237 {
5238 struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5239 struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5240 int err;
5241
5242 old_nh_grp->nhgi = new_nhgi;
5243 new_nhgi->nh_grp = old_nh_grp;
5244 nh_grp->nhgi = old_nhgi;
5245 old_nhgi->nh_grp = nh_grp;
5246
5247 if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5248 /* Both the old adjacency index and the new one are valid.
5249 * Routes are currently using the old one. Tell the device to
5250 * replace the old adjacency index with the new one.
5251 */
5252 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5253 old_nhgi->adj_index,
5254 old_nhgi->ecmp_size);
5255 if (err) {
5256 NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5257 goto err_out;
5258 }
5259 } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5260 /* The old adjacency index is valid, while the new one is not.
5261 * Iterate over all the routes using the group and change them
5262 * to trap packets to the CPU.
5263 */
5264 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5265 if (err) {
5266 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5267 goto err_out;
5268 }
5269 } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5270 /* The old adjacency index is invalid, while the new one is.
5271 * Iterate over all the routes using the group and change them
5272 * to forward packets using the new valid index.
5273 */
5274 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5275 if (err) {
5276 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5277 goto err_out;
5278 }
5279 }
5280
5281 /* Make sure the flags are set / cleared based on the new nexthop group
5282 * information.
5283 */
5284 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5285
5286 /* At this point 'nh_grp' is just a shell that is not used by anyone
5287 * and its nexthop group info is the old info that was just replaced
5288 * with the new one. Remove it.
5289 */
5290 nh_grp->can_destroy = true;
5291 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5292
5293 return 0;
5294
5295 err_out:
5296 old_nhgi->nh_grp = old_nh_grp;
5297 nh_grp->nhgi = new_nhgi;
5298 new_nhgi->nh_grp = nh_grp;
5299 old_nh_grp->nhgi = old_nhgi;
5300 return err;
5301 }
5302
mlxsw_sp_nexthop_obj_new(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5303 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5304 struct nh_notifier_info *info)
5305 {
5306 struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5307 struct netlink_ext_ack *extack = info->extack;
5308 int err;
5309
5310 nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5311 if (IS_ERR(nh_grp))
5312 return PTR_ERR(nh_grp);
5313
5314 old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5315 if (!old_nh_grp)
5316 err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5317 else
5318 err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5319 old_nh_grp, extack);
5320
5321 if (err) {
5322 nh_grp->can_destroy = true;
5323 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5324 }
5325
5326 return err;
5327 }
5328
mlxsw_sp_nexthop_obj_del(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5329 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5330 struct nh_notifier_info *info)
5331 {
5332 struct mlxsw_sp_nexthop_group *nh_grp;
5333
5334 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5335 if (!nh_grp)
5336 return;
5337
5338 nh_grp->can_destroy = true;
5339 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5340
5341 /* If the group still has routes using it, then defer the delete
5342 * operation until the last route using it is deleted.
5343 */
5344 if (!list_empty(&nh_grp->fib_list))
5345 return;
5346 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5347 }
5348
mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp * mlxsw_sp,u32 adj_index,char * ratr_pl)5349 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5350 u32 adj_index, char *ratr_pl)
5351 {
5352 MLXSW_REG_ZERO(ratr, ratr_pl);
5353 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5354 mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5355 mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5356
5357 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5358 }
5359
mlxsw_sp_nexthop_obj_bucket_compare(char * ratr_pl,char * ratr_pl_new)5360 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5361 {
5362 /* Clear the opcode and activity on both the old and new payload as
5363 * they are irrelevant for the comparison.
5364 */
5365 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5366 mlxsw_reg_ratr_a_set(ratr_pl, 0);
5367 mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5368 mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5369
5370 /* If the contents of the adjacency entry are consistent with the
5371 * replacement request, then replacement was successful.
5372 */
5373 if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5374 return 0;
5375
5376 return -EINVAL;
5377 }
5378
5379 static int
mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_info * info)5380 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5381 struct mlxsw_sp_nexthop *nh,
5382 struct nh_notifier_info *info)
5383 {
5384 u16 bucket_index = info->nh_res_bucket->bucket_index;
5385 struct netlink_ext_ack *extack = info->extack;
5386 bool force = info->nh_res_bucket->force;
5387 char ratr_pl_new[MLXSW_REG_RATR_LEN];
5388 char ratr_pl[MLXSW_REG_RATR_LEN];
5389 u32 adj_index;
5390 int err;
5391
5392 /* No point in trying an atomic replacement if the idle timer interval
5393 * is smaller than the interval in which we query and clear activity.
5394 */
5395 if (!force && info->nh_res_bucket->idle_timer_ms <
5396 MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5397 force = true;
5398
5399 adj_index = nh->nhgi->adj_index + bucket_index;
5400 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5401 if (err) {
5402 NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5403 return err;
5404 }
5405
5406 if (!force) {
5407 err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5408 ratr_pl_new);
5409 if (err) {
5410 NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5411 return err;
5412 }
5413
5414 err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5415 if (err) {
5416 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5417 return err;
5418 }
5419 }
5420
5421 nh->update = 0;
5422 nh->offloaded = 1;
5423 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5424
5425 return 0;
5426 }
5427
mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5428 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5429 struct nh_notifier_info *info)
5430 {
5431 u16 bucket_index = info->nh_res_bucket->bucket_index;
5432 struct netlink_ext_ack *extack = info->extack;
5433 struct mlxsw_sp_nexthop_group_info *nhgi;
5434 struct nh_notifier_single_info *nh_obj;
5435 struct mlxsw_sp_nexthop_group *nh_grp;
5436 struct mlxsw_sp_nexthop *nh;
5437 int err;
5438
5439 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5440 if (!nh_grp) {
5441 NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5442 return -EINVAL;
5443 }
5444
5445 nhgi = nh_grp->nhgi;
5446
5447 if (bucket_index >= nhgi->count) {
5448 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5449 return -EINVAL;
5450 }
5451
5452 nh = &nhgi->nexthops[bucket_index];
5453 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5454
5455 nh_obj = &info->nh_res_bucket->new_nh;
5456 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5457 if (err) {
5458 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5459 goto err_nexthop_obj_init;
5460 }
5461
5462 err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5463 if (err)
5464 goto err_nexthop_obj_bucket_adj_update;
5465
5466 return 0;
5467
5468 err_nexthop_obj_bucket_adj_update:
5469 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5470 err_nexthop_obj_init:
5471 nh_obj = &info->nh_res_bucket->old_nh;
5472 mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5473 /* The old adjacency entry was not overwritten */
5474 nh->update = 0;
5475 nh->offloaded = 1;
5476 return err;
5477 }
5478
mlxsw_sp_nexthop_obj_event(struct notifier_block * nb,unsigned long event,void * ptr)5479 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5480 unsigned long event, void *ptr)
5481 {
5482 struct nh_notifier_info *info = ptr;
5483 struct mlxsw_sp_router *router;
5484 int err = 0;
5485
5486 router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5487 err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5488 if (err)
5489 goto out;
5490
5491 mutex_lock(&router->lock);
5492
5493 switch (event) {
5494 case NEXTHOP_EVENT_REPLACE:
5495 err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5496 break;
5497 case NEXTHOP_EVENT_DEL:
5498 mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5499 break;
5500 case NEXTHOP_EVENT_BUCKET_REPLACE:
5501 err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5502 info);
5503 break;
5504 default:
5505 break;
5506 }
5507
5508 mutex_unlock(&router->lock);
5509
5510 out:
5511 return notifier_from_errno(err);
5512 }
5513
mlxsw_sp_fi_is_gateway(const struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5514 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5515 struct fib_info *fi)
5516 {
5517 const struct fib_nh *nh = fib_info_nh(fi, 0);
5518
5519 return nh->fib_nh_gw_family ||
5520 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5521 }
5522
5523 static int
mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5524 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5525 struct mlxsw_sp_nexthop_group *nh_grp)
5526 {
5527 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5528 struct mlxsw_sp_nexthop_group_info *nhgi;
5529 struct mlxsw_sp_nexthop *nh;
5530 int err, i;
5531
5532 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5533 if (!nhgi)
5534 return -ENOMEM;
5535 nh_grp->nhgi = nhgi;
5536 nhgi->nh_grp = nh_grp;
5537 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5538 nhgi->count = nhs;
5539 for (i = 0; i < nhgi->count; i++) {
5540 struct fib_nh *fib_nh;
5541
5542 nh = &nhgi->nexthops[i];
5543 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5544 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5545 if (err)
5546 goto err_nexthop4_init;
5547 }
5548 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5549 if (err)
5550 goto err_group_inc;
5551 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5552 if (err)
5553 goto err_group_refresh;
5554
5555 return 0;
5556
5557 err_group_refresh:
5558 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5559 err_group_inc:
5560 i = nhgi->count;
5561 err_nexthop4_init:
5562 for (i--; i >= 0; i--) {
5563 nh = &nhgi->nexthops[i];
5564 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5565 }
5566 kfree(nhgi);
5567 return err;
5568 }
5569
5570 static void
mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5571 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5572 struct mlxsw_sp_nexthop_group *nh_grp)
5573 {
5574 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5575 int i;
5576
5577 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5578 for (i = nhgi->count - 1; i >= 0; i--) {
5579 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5580
5581 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5582 }
5583 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5584 WARN_ON_ONCE(nhgi->adj_index_valid);
5585 kfree(nhgi);
5586 }
5587
5588 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5589 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5590 {
5591 struct mlxsw_sp_nexthop_group *nh_grp;
5592 int err;
5593
5594 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5595 if (!nh_grp)
5596 return ERR_PTR(-ENOMEM);
5597 INIT_LIST_HEAD(&nh_grp->vr_list);
5598 err = rhashtable_init(&nh_grp->vr_ht,
5599 &mlxsw_sp_nexthop_group_vr_ht_params);
5600 if (err)
5601 goto err_nexthop_group_vr_ht_init;
5602 INIT_LIST_HEAD(&nh_grp->fib_list);
5603 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5604 nh_grp->ipv4.fi = fi;
5605 fib_info_hold(fi);
5606
5607 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5608 if (err)
5609 goto err_nexthop_group_info_init;
5610
5611 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5612 if (err)
5613 goto err_nexthop_group_insert;
5614
5615 nh_grp->can_destroy = true;
5616
5617 return nh_grp;
5618
5619 err_nexthop_group_insert:
5620 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5621 err_nexthop_group_info_init:
5622 fib_info_put(fi);
5623 rhashtable_destroy(&nh_grp->vr_ht);
5624 err_nexthop_group_vr_ht_init:
5625 kfree(nh_grp);
5626 return ERR_PTR(err);
5627 }
5628
5629 static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5630 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5631 struct mlxsw_sp_nexthop_group *nh_grp)
5632 {
5633 if (!nh_grp->can_destroy)
5634 return;
5635 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5636 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5637 fib_info_put(nh_grp->ipv4.fi);
5638 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5639 rhashtable_destroy(&nh_grp->vr_ht);
5640 kfree(nh_grp);
5641 }
5642
mlxsw_sp_nexthop4_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct fib_info * fi)5643 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5644 struct mlxsw_sp_fib_entry *fib_entry,
5645 struct fib_info *fi)
5646 {
5647 struct mlxsw_sp_nexthop_group *nh_grp;
5648
5649 if (fi->nh) {
5650 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5651 fi->nh->id);
5652 if (WARN_ON_ONCE(!nh_grp))
5653 return -EINVAL;
5654 goto out;
5655 }
5656
5657 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5658 if (!nh_grp) {
5659 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5660 if (IS_ERR(nh_grp))
5661 return PTR_ERR(nh_grp);
5662 }
5663 out:
5664 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5665 fib_entry->nh_group = nh_grp;
5666 return 0;
5667 }
5668
mlxsw_sp_nexthop4_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5669 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5670 struct mlxsw_sp_fib_entry *fib_entry)
5671 {
5672 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5673
5674 list_del(&fib_entry->nexthop_group_node);
5675 if (!list_empty(&nh_grp->fib_list))
5676 return;
5677
5678 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5679 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5680 return;
5681 }
5682
5683 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5684 }
5685
5686 static bool
mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5687 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5688 {
5689 struct mlxsw_sp_fib4_entry *fib4_entry;
5690
5691 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5692 common);
5693 return !fib4_entry->dscp;
5694 }
5695
5696 static bool
mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5697 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5698 {
5699 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5700
5701 switch (fib_entry->fib_node->fib->proto) {
5702 case MLXSW_SP_L3_PROTO_IPV4:
5703 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5704 return false;
5705 break;
5706 case MLXSW_SP_L3_PROTO_IPV6:
5707 break;
5708 }
5709
5710 switch (fib_entry->type) {
5711 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5712 return !!nh_group->nhgi->adj_index_valid;
5713 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5714 return !!mlxsw_sp_nhgi_rif(nh_group->nhgi);
5715 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5716 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5717 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5718 return true;
5719 default:
5720 return false;
5721 }
5722 }
5723
5724 static struct mlxsw_sp_nexthop *
mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_rt6 * mlxsw_sp_rt6)5725 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5726 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5727 {
5728 int i;
5729
5730 for (i = 0; i < nh_grp->nhgi->count; i++) {
5731 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5732 struct net_device *dev = mlxsw_sp_nexthop_dev(nh);
5733 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5734
5735 if (dev && dev == rt->fib6_nh->fib_nh_dev &&
5736 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5737 &rt->fib6_nh->fib_nh_gw6))
5738 return nh;
5739 }
5740
5741 return NULL;
5742 }
5743
5744 static void
mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)5745 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5746 struct fib_entry_notifier_info *fen_info)
5747 {
5748 u32 *p_dst = (u32 *) &fen_info->dst;
5749 struct fib_rt_info fri;
5750
5751 fri.fi = fen_info->fi;
5752 fri.tb_id = fen_info->tb_id;
5753 fri.dst = cpu_to_be32(*p_dst);
5754 fri.dst_len = fen_info->dst_len;
5755 fri.dscp = fen_info->dscp;
5756 fri.type = fen_info->type;
5757 fri.offload = false;
5758 fri.trap = false;
5759 fri.offload_failed = true;
5760 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5761 }
5762
5763 static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5764 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5765 struct mlxsw_sp_fib_entry *fib_entry)
5766 {
5767 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5768 int dst_len = fib_entry->fib_node->key.prefix_len;
5769 struct mlxsw_sp_fib4_entry *fib4_entry;
5770 struct fib_rt_info fri;
5771 bool should_offload;
5772
5773 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5774 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5775 common);
5776 fri.fi = fib4_entry->fi;
5777 fri.tb_id = fib4_entry->tb_id;
5778 fri.dst = cpu_to_be32(*p_dst);
5779 fri.dst_len = dst_len;
5780 fri.dscp = fib4_entry->dscp;
5781 fri.type = fib4_entry->type;
5782 fri.offload = should_offload;
5783 fri.trap = !should_offload;
5784 fri.offload_failed = false;
5785 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5786 }
5787
5788 static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5789 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5790 struct mlxsw_sp_fib_entry *fib_entry)
5791 {
5792 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5793 int dst_len = fib_entry->fib_node->key.prefix_len;
5794 struct mlxsw_sp_fib4_entry *fib4_entry;
5795 struct fib_rt_info fri;
5796
5797 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5798 common);
5799 fri.fi = fib4_entry->fi;
5800 fri.tb_id = fib4_entry->tb_id;
5801 fri.dst = cpu_to_be32(*p_dst);
5802 fri.dst_len = dst_len;
5803 fri.dscp = fib4_entry->dscp;
5804 fri.type = fib4_entry->type;
5805 fri.offload = false;
5806 fri.trap = false;
5807 fri.offload_failed = false;
5808 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5809 }
5810
5811 #if IS_ENABLED(CONFIG_IPV6)
5812 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5813 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5814 struct fib6_info **rt_arr,
5815 unsigned int nrt6)
5816 {
5817 int i;
5818
5819 /* In IPv6 a multipath route is represented using multiple routes, so
5820 * we need to set the flags on all of them.
5821 */
5822 for (i = 0; i < nrt6; i++)
5823 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5824 false, false, true);
5825 }
5826 #else
5827 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5828 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5829 struct fib6_info **rt_arr,
5830 unsigned int nrt6)
5831 {
5832 }
5833 #endif
5834
5835 #if IS_ENABLED(CONFIG_IPV6)
5836 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5837 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5838 struct mlxsw_sp_fib_entry *fib_entry)
5839 {
5840 struct mlxsw_sp_fib6_entry *fib6_entry;
5841 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5842 bool should_offload;
5843
5844 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5845
5846 /* In IPv6 a multipath route is represented using multiple routes, so
5847 * we need to set the flags on all of them.
5848 */
5849 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5850 common);
5851 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5852 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5853 should_offload, !should_offload, false);
5854 }
5855 #else
5856 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5857 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5858 struct mlxsw_sp_fib_entry *fib_entry)
5859 {
5860 }
5861 #endif
5862
5863 #if IS_ENABLED(CONFIG_IPV6)
5864 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5865 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5866 struct mlxsw_sp_fib_entry *fib_entry)
5867 {
5868 struct mlxsw_sp_fib6_entry *fib6_entry;
5869 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5870
5871 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5872 common);
5873 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5874 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5875 false, false, false);
5876 }
5877 #else
5878 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5879 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5880 struct mlxsw_sp_fib_entry *fib_entry)
5881 {
5882 }
5883 #endif
5884
5885 static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5886 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5887 struct mlxsw_sp_fib_entry *fib_entry)
5888 {
5889 switch (fib_entry->fib_node->fib->proto) {
5890 case MLXSW_SP_L3_PROTO_IPV4:
5891 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5892 break;
5893 case MLXSW_SP_L3_PROTO_IPV6:
5894 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5895 break;
5896 }
5897 }
5898
5899 static void
mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5900 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5901 struct mlxsw_sp_fib_entry *fib_entry)
5902 {
5903 switch (fib_entry->fib_node->fib->proto) {
5904 case MLXSW_SP_L3_PROTO_IPV4:
5905 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5906 break;
5907 case MLXSW_SP_L3_PROTO_IPV6:
5908 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5909 break;
5910 }
5911 }
5912
5913 static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)5914 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5915 struct mlxsw_sp_fib_entry *fib_entry,
5916 enum mlxsw_reg_ralue_op op)
5917 {
5918 switch (op) {
5919 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
5920 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5921 break;
5922 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
5923 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5924 break;
5925 default:
5926 break;
5927 }
5928 }
5929
5930 static void
mlxsw_sp_fib_entry_ralue_pack(char * ralue_pl,const struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)5931 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
5932 const struct mlxsw_sp_fib_entry *fib_entry,
5933 enum mlxsw_reg_ralue_op op)
5934 {
5935 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5936 enum mlxsw_reg_ralxx_protocol proto;
5937 u32 *p_dip;
5938
5939 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
5940
5941 switch (fib->proto) {
5942 case MLXSW_SP_L3_PROTO_IPV4:
5943 p_dip = (u32 *) fib_entry->fib_node->key.addr;
5944 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
5945 fib_entry->fib_node->key.prefix_len,
5946 *p_dip);
5947 break;
5948 case MLXSW_SP_L3_PROTO_IPV6:
5949 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
5950 fib_entry->fib_node->key.prefix_len,
5951 fib_entry->fib_node->key.addr);
5952 break;
5953 }
5954 }
5955
mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)5956 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5957 struct mlxsw_sp_fib_entry *fib_entry,
5958 enum mlxsw_reg_ralue_op op)
5959 {
5960 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5961 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5962 char ralue_pl[MLXSW_REG_RALUE_LEN];
5963 enum mlxsw_reg_ralue_trap_action trap_action;
5964 u16 trap_id = 0;
5965 u32 adjacency_index = 0;
5966 u16 ecmp_size = 0;
5967
5968 /* In case the nexthop group adjacency index is valid, use it
5969 * with provided ECMP size. Otherwise, setup trap and pass
5970 * traffic to kernel.
5971 */
5972 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5973 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5974 adjacency_index = nhgi->adj_index;
5975 ecmp_size = nhgi->ecmp_size;
5976 } else if (!nhgi->adj_index_valid && nhgi->count &&
5977 mlxsw_sp_nhgi_rif(nhgi)) {
5978 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5979 adjacency_index = mlxsw_sp->router->adj_trap_index;
5980 ecmp_size = 1;
5981 } else {
5982 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5983 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5984 }
5985
5986 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5987 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
5988 adjacency_index, ecmp_size);
5989 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5990 }
5991
mlxsw_sp_fib_entry_op_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)5992 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5993 struct mlxsw_sp_fib_entry *fib_entry,
5994 enum mlxsw_reg_ralue_op op)
5995 {
5996 struct mlxsw_sp_rif *rif = mlxsw_sp_nhgi_rif(fib_entry->nh_group->nhgi);
5997 enum mlxsw_reg_ralue_trap_action trap_action;
5998 char ralue_pl[MLXSW_REG_RALUE_LEN];
5999 u16 trap_id = 0;
6000 u16 rif_index = 0;
6001
6002 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
6003 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6004 rif_index = rif->rif_index;
6005 } else {
6006 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6007 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
6008 }
6009
6010 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6011 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
6012 rif_index);
6013 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6014 }
6015
mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6016 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
6017 struct mlxsw_sp_fib_entry *fib_entry,
6018 enum mlxsw_reg_ralue_op op)
6019 {
6020 char ralue_pl[MLXSW_REG_RALUE_LEN];
6021
6022 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6023 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
6024 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6025 }
6026
mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6027 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
6028 struct mlxsw_sp_fib_entry *fib_entry,
6029 enum mlxsw_reg_ralue_op op)
6030 {
6031 enum mlxsw_reg_ralue_trap_action trap_action;
6032 char ralue_pl[MLXSW_REG_RALUE_LEN];
6033
6034 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
6035 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6036 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
6037 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6038 }
6039
6040 static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6041 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
6042 struct mlxsw_sp_fib_entry *fib_entry,
6043 enum mlxsw_reg_ralue_op op)
6044 {
6045 enum mlxsw_reg_ralue_trap_action trap_action;
6046 char ralue_pl[MLXSW_REG_RALUE_LEN];
6047 u16 trap_id;
6048
6049 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6050 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
6051
6052 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6053 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
6054 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6055 }
6056
6057 static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6058 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
6059 struct mlxsw_sp_fib_entry *fib_entry,
6060 enum mlxsw_reg_ralue_op op)
6061 {
6062 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
6063 const struct mlxsw_sp_ipip_ops *ipip_ops;
6064 char ralue_pl[MLXSW_REG_RALUE_LEN];
6065 int err;
6066
6067 if (WARN_ON(!ipip_entry))
6068 return -EINVAL;
6069
6070 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
6071 err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
6072 fib_entry->decap.tunnel_index);
6073 if (err)
6074 return err;
6075
6076 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6077 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6078 fib_entry->decap.tunnel_index);
6079 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6080 }
6081
mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6082 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
6083 struct mlxsw_sp_fib_entry *fib_entry,
6084 enum mlxsw_reg_ralue_op op)
6085 {
6086 char ralue_pl[MLXSW_REG_RALUE_LEN];
6087
6088 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6089 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6090 fib_entry->decap.tunnel_index);
6091 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6092 }
6093
__mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6094 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6095 struct mlxsw_sp_fib_entry *fib_entry,
6096 enum mlxsw_reg_ralue_op op)
6097 {
6098 switch (fib_entry->type) {
6099 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6100 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
6101 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6102 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
6103 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6104 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
6105 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6106 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
6107 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6108 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
6109 op);
6110 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6111 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
6112 fib_entry, op);
6113 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6114 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
6115 }
6116 return -EINVAL;
6117 }
6118
mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6119 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6120 struct mlxsw_sp_fib_entry *fib_entry,
6121 enum mlxsw_reg_ralue_op op)
6122 {
6123 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
6124
6125 if (err)
6126 return err;
6127
6128 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6129
6130 return err;
6131 }
6132
mlxsw_sp_fib_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6133 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6134 struct mlxsw_sp_fib_entry *fib_entry)
6135 {
6136 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6137 MLXSW_REG_RALUE_OP_WRITE_WRITE);
6138 }
6139
mlxsw_sp_fib_entry_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6140 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6141 struct mlxsw_sp_fib_entry *fib_entry)
6142 {
6143 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6144 MLXSW_REG_RALUE_OP_WRITE_DELETE);
6145 }
6146
6147 static int
mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info,struct mlxsw_sp_fib_entry * fib_entry)6148 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6149 const struct fib_entry_notifier_info *fen_info,
6150 struct mlxsw_sp_fib_entry *fib_entry)
6151 {
6152 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6153 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6154 struct mlxsw_sp_router *router = mlxsw_sp->router;
6155 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6156 int ifindex = nhgi->nexthops[0].ifindex;
6157 struct mlxsw_sp_ipip_entry *ipip_entry;
6158
6159 switch (fen_info->type) {
6160 case RTN_LOCAL:
6161 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6162 MLXSW_SP_L3_PROTO_IPV4, dip);
6163 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6164 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6165 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6166 fib_entry,
6167 ipip_entry);
6168 }
6169 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6170 MLXSW_SP_L3_PROTO_IPV4,
6171 &dip)) {
6172 u32 tunnel_index;
6173
6174 tunnel_index = router->nve_decap_config.tunnel_index;
6175 fib_entry->decap.tunnel_index = tunnel_index;
6176 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6177 return 0;
6178 }
6179 fallthrough;
6180 case RTN_BROADCAST:
6181 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6182 return 0;
6183 case RTN_BLACKHOLE:
6184 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6185 return 0;
6186 case RTN_UNREACHABLE:
6187 case RTN_PROHIBIT:
6188 /* Packets hitting these routes need to be trapped, but
6189 * can do so with a lower priority than packets directed
6190 * at the host, so use action type local instead of trap.
6191 */
6192 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6193 return 0;
6194 case RTN_UNICAST:
6195 if (nhgi->gateway)
6196 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6197 else
6198 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6199 return 0;
6200 default:
6201 return -EINVAL;
6202 }
6203 }
6204
6205 static void
mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6206 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6207 struct mlxsw_sp_fib_entry *fib_entry)
6208 {
6209 switch (fib_entry->type) {
6210 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6211 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6212 break;
6213 default:
6214 break;
6215 }
6216 }
6217
6218 static void
mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6219 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6220 struct mlxsw_sp_fib4_entry *fib4_entry)
6221 {
6222 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6223 }
6224
6225 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,const struct fib_entry_notifier_info * fen_info)6226 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6227 struct mlxsw_sp_fib_node *fib_node,
6228 const struct fib_entry_notifier_info *fen_info)
6229 {
6230 struct mlxsw_sp_fib4_entry *fib4_entry;
6231 struct mlxsw_sp_fib_entry *fib_entry;
6232 int err;
6233
6234 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6235 if (!fib4_entry)
6236 return ERR_PTR(-ENOMEM);
6237 fib_entry = &fib4_entry->common;
6238
6239 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6240 if (err)
6241 goto err_nexthop4_group_get;
6242
6243 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6244 fib_node->fib);
6245 if (err)
6246 goto err_nexthop_group_vr_link;
6247
6248 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6249 if (err)
6250 goto err_fib4_entry_type_set;
6251
6252 fib4_entry->fi = fen_info->fi;
6253 fib_info_hold(fib4_entry->fi);
6254 fib4_entry->tb_id = fen_info->tb_id;
6255 fib4_entry->type = fen_info->type;
6256 fib4_entry->dscp = fen_info->dscp;
6257
6258 fib_entry->fib_node = fib_node;
6259
6260 return fib4_entry;
6261
6262 err_fib4_entry_type_set:
6263 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6264 err_nexthop_group_vr_link:
6265 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6266 err_nexthop4_group_get:
6267 kfree(fib4_entry);
6268 return ERR_PTR(err);
6269 }
6270
mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6271 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6272 struct mlxsw_sp_fib4_entry *fib4_entry)
6273 {
6274 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6275
6276 fib_info_put(fib4_entry->fi);
6277 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6278 mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6279 fib_node->fib);
6280 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6281 kfree(fib4_entry);
6282 }
6283
6284 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6285 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6286 const struct fib_entry_notifier_info *fen_info)
6287 {
6288 struct mlxsw_sp_fib4_entry *fib4_entry;
6289 struct mlxsw_sp_fib_node *fib_node;
6290 struct mlxsw_sp_fib *fib;
6291 struct mlxsw_sp_vr *vr;
6292
6293 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6294 if (!vr)
6295 return NULL;
6296 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6297
6298 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6299 sizeof(fen_info->dst),
6300 fen_info->dst_len);
6301 if (!fib_node)
6302 return NULL;
6303
6304 fib4_entry = container_of(fib_node->fib_entry,
6305 struct mlxsw_sp_fib4_entry, common);
6306 if (fib4_entry->tb_id == fen_info->tb_id &&
6307 fib4_entry->dscp == fen_info->dscp &&
6308 fib4_entry->type == fen_info->type &&
6309 fib4_entry->fi == fen_info->fi)
6310 return fib4_entry;
6311
6312 return NULL;
6313 }
6314
6315 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6316 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6317 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6318 .key_len = sizeof(struct mlxsw_sp_fib_key),
6319 .automatic_shrinking = true,
6320 };
6321
mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6322 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6323 struct mlxsw_sp_fib_node *fib_node)
6324 {
6325 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6326 mlxsw_sp_fib_ht_params);
6327 }
6328
mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6329 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6330 struct mlxsw_sp_fib_node *fib_node)
6331 {
6332 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6333 mlxsw_sp_fib_ht_params);
6334 }
6335
6336 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6337 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6338 size_t addr_len, unsigned char prefix_len)
6339 {
6340 struct mlxsw_sp_fib_key key;
6341
6342 memset(&key, 0, sizeof(key));
6343 memcpy(key.addr, addr, addr_len);
6344 key.prefix_len = prefix_len;
6345 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6346 }
6347
6348 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_create(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6349 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6350 size_t addr_len, unsigned char prefix_len)
6351 {
6352 struct mlxsw_sp_fib_node *fib_node;
6353
6354 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6355 if (!fib_node)
6356 return NULL;
6357
6358 list_add(&fib_node->list, &fib->node_list);
6359 memcpy(fib_node->key.addr, addr, addr_len);
6360 fib_node->key.prefix_len = prefix_len;
6361
6362 return fib_node;
6363 }
6364
mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node * fib_node)6365 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6366 {
6367 list_del(&fib_node->list);
6368 kfree(fib_node);
6369 }
6370
mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6371 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6372 struct mlxsw_sp_fib_node *fib_node)
6373 {
6374 struct mlxsw_sp_prefix_usage req_prefix_usage;
6375 struct mlxsw_sp_fib *fib = fib_node->fib;
6376 struct mlxsw_sp_lpm_tree *lpm_tree;
6377 int err;
6378
6379 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6380 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6381 goto out;
6382
6383 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6384 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6385 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6386 fib->proto);
6387 if (IS_ERR(lpm_tree))
6388 return PTR_ERR(lpm_tree);
6389
6390 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6391 if (err)
6392 goto err_lpm_tree_replace;
6393
6394 out:
6395 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6396 return 0;
6397
6398 err_lpm_tree_replace:
6399 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6400 return err;
6401 }
6402
mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6403 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6404 struct mlxsw_sp_fib_node *fib_node)
6405 {
6406 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6407 struct mlxsw_sp_prefix_usage req_prefix_usage;
6408 struct mlxsw_sp_fib *fib = fib_node->fib;
6409 int err;
6410
6411 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6412 return;
6413 /* Try to construct a new LPM tree from the current prefix usage
6414 * minus the unused one. If we fail, continue using the old one.
6415 */
6416 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6417 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6418 fib_node->key.prefix_len);
6419 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6420 fib->proto);
6421 if (IS_ERR(lpm_tree))
6422 return;
6423
6424 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6425 if (err)
6426 goto err_lpm_tree_replace;
6427
6428 return;
6429
6430 err_lpm_tree_replace:
6431 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6432 }
6433
mlxsw_sp_fib_node_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct mlxsw_sp_fib * fib)6434 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6435 struct mlxsw_sp_fib_node *fib_node,
6436 struct mlxsw_sp_fib *fib)
6437 {
6438 int err;
6439
6440 err = mlxsw_sp_fib_node_insert(fib, fib_node);
6441 if (err)
6442 return err;
6443 fib_node->fib = fib;
6444
6445 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6446 if (err)
6447 goto err_fib_lpm_tree_link;
6448
6449 return 0;
6450
6451 err_fib_lpm_tree_link:
6452 fib_node->fib = NULL;
6453 mlxsw_sp_fib_node_remove(fib, fib_node);
6454 return err;
6455 }
6456
mlxsw_sp_fib_node_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6457 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6458 struct mlxsw_sp_fib_node *fib_node)
6459 {
6460 struct mlxsw_sp_fib *fib = fib_node->fib;
6461
6462 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6463 fib_node->fib = NULL;
6464 mlxsw_sp_fib_node_remove(fib, fib_node);
6465 }
6466
6467 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,const void * addr,size_t addr_len,unsigned char prefix_len,enum mlxsw_sp_l3proto proto)6468 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6469 size_t addr_len, unsigned char prefix_len,
6470 enum mlxsw_sp_l3proto proto)
6471 {
6472 struct mlxsw_sp_fib_node *fib_node;
6473 struct mlxsw_sp_fib *fib;
6474 struct mlxsw_sp_vr *vr;
6475 int err;
6476
6477 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6478 if (IS_ERR(vr))
6479 return ERR_CAST(vr);
6480 fib = mlxsw_sp_vr_fib(vr, proto);
6481
6482 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6483 if (fib_node)
6484 return fib_node;
6485
6486 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6487 if (!fib_node) {
6488 err = -ENOMEM;
6489 goto err_fib_node_create;
6490 }
6491
6492 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6493 if (err)
6494 goto err_fib_node_init;
6495
6496 return fib_node;
6497
6498 err_fib_node_init:
6499 mlxsw_sp_fib_node_destroy(fib_node);
6500 err_fib_node_create:
6501 mlxsw_sp_vr_put(mlxsw_sp, vr);
6502 return ERR_PTR(err);
6503 }
6504
mlxsw_sp_fib_node_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6505 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6506 struct mlxsw_sp_fib_node *fib_node)
6507 {
6508 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6509
6510 if (fib_node->fib_entry)
6511 return;
6512 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6513 mlxsw_sp_fib_node_destroy(fib_node);
6514 mlxsw_sp_vr_put(mlxsw_sp, vr);
6515 }
6516
mlxsw_sp_fib_node_entry_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6517 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6518 struct mlxsw_sp_fib_entry *fib_entry)
6519 {
6520 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6521 int err;
6522
6523 fib_node->fib_entry = fib_entry;
6524
6525 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6526 if (err)
6527 goto err_fib_entry_update;
6528
6529 return 0;
6530
6531 err_fib_entry_update:
6532 fib_node->fib_entry = NULL;
6533 return err;
6534 }
6535
6536 static void
mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6537 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6538 struct mlxsw_sp_fib_entry *fib_entry)
6539 {
6540 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6541
6542 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6543 fib_node->fib_entry = NULL;
6544 }
6545
mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry * fib4_entry)6546 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6547 {
6548 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6549 struct mlxsw_sp_fib4_entry *fib4_replaced;
6550
6551 if (!fib_node->fib_entry)
6552 return true;
6553
6554 fib4_replaced = container_of(fib_node->fib_entry,
6555 struct mlxsw_sp_fib4_entry, common);
6556 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6557 fib4_replaced->tb_id == RT_TABLE_LOCAL)
6558 return false;
6559
6560 return true;
6561 }
6562
6563 static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6564 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6565 const struct fib_entry_notifier_info *fen_info)
6566 {
6567 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6568 struct mlxsw_sp_fib_entry *replaced;
6569 struct mlxsw_sp_fib_node *fib_node;
6570 int err;
6571
6572 if (fen_info->fi->nh &&
6573 !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6574 return 0;
6575
6576 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6577 &fen_info->dst, sizeof(fen_info->dst),
6578 fen_info->dst_len,
6579 MLXSW_SP_L3_PROTO_IPV4);
6580 if (IS_ERR(fib_node)) {
6581 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6582 return PTR_ERR(fib_node);
6583 }
6584
6585 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6586 if (IS_ERR(fib4_entry)) {
6587 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6588 err = PTR_ERR(fib4_entry);
6589 goto err_fib4_entry_create;
6590 }
6591
6592 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6593 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6594 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6595 return 0;
6596 }
6597
6598 replaced = fib_node->fib_entry;
6599 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6600 if (err) {
6601 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6602 goto err_fib_node_entry_link;
6603 }
6604
6605 /* Nothing to replace */
6606 if (!replaced)
6607 return 0;
6608
6609 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6610 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6611 common);
6612 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6613
6614 return 0;
6615
6616 err_fib_node_entry_link:
6617 fib_node->fib_entry = replaced;
6618 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6619 err_fib4_entry_create:
6620 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6621 return err;
6622 }
6623
mlxsw_sp_router_fib4_del(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)6624 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6625 struct fib_entry_notifier_info *fen_info)
6626 {
6627 struct mlxsw_sp_fib4_entry *fib4_entry;
6628 struct mlxsw_sp_fib_node *fib_node;
6629
6630 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6631 if (!fib4_entry)
6632 return;
6633 fib_node = fib4_entry->common.fib_node;
6634
6635 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6636 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6637 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6638 }
6639
mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info * rt)6640 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6641 {
6642 /* Multicast routes aren't supported, so ignore them. Neighbour
6643 * Discovery packets are specifically trapped.
6644 */
6645 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6646 return true;
6647
6648 /* Cloned routes are irrelevant in the forwarding path. */
6649 if (rt->fib6_flags & RTF_CACHE)
6650 return true;
6651
6652 return false;
6653 }
6654
mlxsw_sp_rt6_create(struct fib6_info * rt)6655 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6656 {
6657 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6658
6659 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6660 if (!mlxsw_sp_rt6)
6661 return ERR_PTR(-ENOMEM);
6662
6663 /* In case of route replace, replaced route is deleted with
6664 * no notification. Take reference to prevent accessing freed
6665 * memory.
6666 */
6667 mlxsw_sp_rt6->rt = rt;
6668 fib6_info_hold(rt);
6669
6670 return mlxsw_sp_rt6;
6671 }
6672
6673 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_rt6_release(struct fib6_info * rt)6674 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6675 {
6676 fib6_info_release(rt);
6677 }
6678 #else
mlxsw_sp_rt6_release(struct fib6_info * rt)6679 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6680 {
6681 }
6682 #endif
6683
mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 * mlxsw_sp_rt6)6684 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6685 {
6686 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6687
6688 if (!mlxsw_sp_rt6->rt->nh)
6689 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6690 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6691 kfree(mlxsw_sp_rt6);
6692 }
6693
6694 static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry * fib6_entry)6695 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6696 {
6697 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6698 list)->rt;
6699 }
6700
6701 static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry * fib6_entry,const struct fib6_info * rt)6702 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6703 const struct fib6_info *rt)
6704 {
6705 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6706
6707 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6708 if (mlxsw_sp_rt6->rt == rt)
6709 return mlxsw_sp_rt6;
6710 }
6711
6712 return NULL;
6713 }
6714
mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt,enum mlxsw_sp_ipip_type * ret)6715 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6716 const struct fib6_info *rt,
6717 enum mlxsw_sp_ipip_type *ret)
6718 {
6719 return rt->fib6_nh->fib_nh_dev &&
6720 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6721 }
6722
mlxsw_sp_nexthop6_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,const struct fib6_info * rt)6723 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6724 struct mlxsw_sp_nexthop_group *nh_grp,
6725 struct mlxsw_sp_nexthop *nh,
6726 const struct fib6_info *rt)
6727 {
6728 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6729 int err;
6730
6731 nh->nhgi = nh_grp->nhgi;
6732 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6733 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6734 #if IS_ENABLED(CONFIG_IPV6)
6735 nh->neigh_tbl = &nd_tbl;
6736 #endif
6737 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6738
6739 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6740
6741 if (!dev)
6742 return 0;
6743 nh->ifindex = dev->ifindex;
6744
6745 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6746 if (err)
6747 goto err_nexthop_type_init;
6748
6749 return 0;
6750
6751 err_nexthop_type_init:
6752 list_del(&nh->router_list_node);
6753 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6754 return err;
6755 }
6756
mlxsw_sp_nexthop6_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)6757 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6758 struct mlxsw_sp_nexthop *nh)
6759 {
6760 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6761 list_del(&nh->router_list_node);
6762 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6763 }
6764
mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)6765 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6766 const struct fib6_info *rt)
6767 {
6768 return rt->fib6_nh->fib_nh_gw_family ||
6769 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6770 }
6771
6772 static int
mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)6773 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6774 struct mlxsw_sp_nexthop_group *nh_grp,
6775 struct mlxsw_sp_fib6_entry *fib6_entry)
6776 {
6777 struct mlxsw_sp_nexthop_group_info *nhgi;
6778 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6779 struct mlxsw_sp_nexthop *nh;
6780 int err, i;
6781
6782 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6783 GFP_KERNEL);
6784 if (!nhgi)
6785 return -ENOMEM;
6786 nh_grp->nhgi = nhgi;
6787 nhgi->nh_grp = nh_grp;
6788 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6789 struct mlxsw_sp_rt6, list);
6790 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6791 nhgi->count = fib6_entry->nrt6;
6792 for (i = 0; i < nhgi->count; i++) {
6793 struct fib6_info *rt = mlxsw_sp_rt6->rt;
6794
6795 nh = &nhgi->nexthops[i];
6796 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6797 if (err)
6798 goto err_nexthop6_init;
6799 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6800 }
6801 nh_grp->nhgi = nhgi;
6802 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6803 if (err)
6804 goto err_group_inc;
6805 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6806 if (err)
6807 goto err_group_refresh;
6808
6809 return 0;
6810
6811 err_group_refresh:
6812 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6813 err_group_inc:
6814 i = nhgi->count;
6815 err_nexthop6_init:
6816 for (i--; i >= 0; i--) {
6817 nh = &nhgi->nexthops[i];
6818 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6819 }
6820 kfree(nhgi);
6821 return err;
6822 }
6823
6824 static void
mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)6825 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6826 struct mlxsw_sp_nexthop_group *nh_grp)
6827 {
6828 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6829 int i;
6830
6831 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6832 for (i = nhgi->count - 1; i >= 0; i--) {
6833 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6834
6835 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6836 }
6837 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6838 WARN_ON_ONCE(nhgi->adj_index_valid);
6839 kfree(nhgi);
6840 }
6841
6842 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6843 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6844 struct mlxsw_sp_fib6_entry *fib6_entry)
6845 {
6846 struct mlxsw_sp_nexthop_group *nh_grp;
6847 int err;
6848
6849 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6850 if (!nh_grp)
6851 return ERR_PTR(-ENOMEM);
6852 INIT_LIST_HEAD(&nh_grp->vr_list);
6853 err = rhashtable_init(&nh_grp->vr_ht,
6854 &mlxsw_sp_nexthop_group_vr_ht_params);
6855 if (err)
6856 goto err_nexthop_group_vr_ht_init;
6857 INIT_LIST_HEAD(&nh_grp->fib_list);
6858 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6859
6860 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6861 if (err)
6862 goto err_nexthop_group_info_init;
6863
6864 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6865 if (err)
6866 goto err_nexthop_group_insert;
6867
6868 nh_grp->can_destroy = true;
6869
6870 return nh_grp;
6871
6872 err_nexthop_group_insert:
6873 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6874 err_nexthop_group_info_init:
6875 rhashtable_destroy(&nh_grp->vr_ht);
6876 err_nexthop_group_vr_ht_init:
6877 kfree(nh_grp);
6878 return ERR_PTR(err);
6879 }
6880
6881 static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)6882 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6883 struct mlxsw_sp_nexthop_group *nh_grp)
6884 {
6885 if (!nh_grp->can_destroy)
6886 return;
6887 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6888 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6889 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6890 rhashtable_destroy(&nh_grp->vr_ht);
6891 kfree(nh_grp);
6892 }
6893
mlxsw_sp_nexthop6_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6894 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6895 struct mlxsw_sp_fib6_entry *fib6_entry)
6896 {
6897 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6898 struct mlxsw_sp_nexthop_group *nh_grp;
6899
6900 if (rt->nh) {
6901 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6902 rt->nh->id);
6903 if (WARN_ON_ONCE(!nh_grp))
6904 return -EINVAL;
6905 goto out;
6906 }
6907
6908 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6909 if (!nh_grp) {
6910 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6911 if (IS_ERR(nh_grp))
6912 return PTR_ERR(nh_grp);
6913 }
6914
6915 /* The route and the nexthop are described by the same struct, so we
6916 * need to the update the nexthop offload indication for the new route.
6917 */
6918 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6919
6920 out:
6921 list_add_tail(&fib6_entry->common.nexthop_group_node,
6922 &nh_grp->fib_list);
6923 fib6_entry->common.nh_group = nh_grp;
6924
6925 return 0;
6926 }
6927
mlxsw_sp_nexthop6_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6928 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6929 struct mlxsw_sp_fib_entry *fib_entry)
6930 {
6931 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6932
6933 list_del(&fib_entry->nexthop_group_node);
6934 if (!list_empty(&nh_grp->fib_list))
6935 return;
6936
6937 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6938 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6939 return;
6940 }
6941
6942 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6943 }
6944
6945 static int
mlxsw_sp_nexthop6_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6946 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6947 struct mlxsw_sp_fib6_entry *fib6_entry)
6948 {
6949 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6950 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6951 int err;
6952
6953 mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6954 fib6_entry->common.nh_group = NULL;
6955 list_del(&fib6_entry->common.nexthop_group_node);
6956
6957 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6958 if (err)
6959 goto err_nexthop6_group_get;
6960
6961 err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6962 fib_node->fib);
6963 if (err)
6964 goto err_nexthop_group_vr_link;
6965
6966 /* In case this entry is offloaded, then the adjacency index
6967 * currently associated with it in the device's table is that
6968 * of the old group. Start using the new one instead.
6969 */
6970 err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
6971 if (err)
6972 goto err_fib_entry_update;
6973
6974 if (list_empty(&old_nh_grp->fib_list))
6975 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6976
6977 return 0;
6978
6979 err_fib_entry_update:
6980 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6981 fib_node->fib);
6982 err_nexthop_group_vr_link:
6983 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6984 err_nexthop6_group_get:
6985 list_add_tail(&fib6_entry->common.nexthop_group_node,
6986 &old_nh_grp->fib_list);
6987 fib6_entry->common.nh_group = old_nh_grp;
6988 mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6989 return err;
6990 }
6991
6992 static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)6993 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6994 struct mlxsw_sp_fib6_entry *fib6_entry,
6995 struct fib6_info **rt_arr, unsigned int nrt6)
6996 {
6997 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6998 int err, i;
6999
7000 for (i = 0; i < nrt6; i++) {
7001 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7002 if (IS_ERR(mlxsw_sp_rt6)) {
7003 err = PTR_ERR(mlxsw_sp_rt6);
7004 goto err_rt6_unwind;
7005 }
7006
7007 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7008 fib6_entry->nrt6++;
7009 }
7010
7011 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7012 if (err)
7013 goto err_rt6_unwind;
7014
7015 return 0;
7016
7017 err_rt6_unwind:
7018 for (; i > 0; i--) {
7019 fib6_entry->nrt6--;
7020 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7021 struct mlxsw_sp_rt6, list);
7022 list_del(&mlxsw_sp_rt6->list);
7023 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7024 }
7025 return err;
7026 }
7027
7028 static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)7029 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
7030 struct mlxsw_sp_fib6_entry *fib6_entry,
7031 struct fib6_info **rt_arr, unsigned int nrt6)
7032 {
7033 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7034 int i;
7035
7036 for (i = 0; i < nrt6; i++) {
7037 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
7038 rt_arr[i]);
7039 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
7040 continue;
7041
7042 fib6_entry->nrt6--;
7043 list_del(&mlxsw_sp_rt6->list);
7044 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7045 }
7046
7047 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7048 }
7049
7050 static int
mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7051 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7052 struct mlxsw_sp_fib_entry *fib_entry,
7053 const struct fib6_info *rt)
7054 {
7055 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7056 union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7057 u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7058 struct mlxsw_sp_router *router = mlxsw_sp->router;
7059 int ifindex = nhgi->nexthops[0].ifindex;
7060 struct mlxsw_sp_ipip_entry *ipip_entry;
7061
7062 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7063 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7064 MLXSW_SP_L3_PROTO_IPV6,
7065 dip);
7066
7067 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7068 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7069 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7070 ipip_entry);
7071 }
7072 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7073 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7074 u32 tunnel_index;
7075
7076 tunnel_index = router->nve_decap_config.tunnel_index;
7077 fib_entry->decap.tunnel_index = tunnel_index;
7078 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7079 }
7080
7081 return 0;
7082 }
7083
mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7084 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7085 struct mlxsw_sp_fib_entry *fib_entry,
7086 const struct fib6_info *rt)
7087 {
7088 if (rt->fib6_flags & RTF_LOCAL)
7089 return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7090 rt);
7091 if (rt->fib6_flags & RTF_ANYCAST)
7092 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7093 else if (rt->fib6_type == RTN_BLACKHOLE)
7094 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7095 else if (rt->fib6_flags & RTF_REJECT)
7096 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7097 else if (fib_entry->nh_group->nhgi->gateway)
7098 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7099 else
7100 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7101
7102 return 0;
7103 }
7104
7105 static void
mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry * fib6_entry)7106 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7107 {
7108 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7109
7110 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7111 list) {
7112 fib6_entry->nrt6--;
7113 list_del(&mlxsw_sp_rt6->list);
7114 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7115 }
7116 }
7117
7118 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct fib6_info ** rt_arr,unsigned int nrt6)7119 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7120 struct mlxsw_sp_fib_node *fib_node,
7121 struct fib6_info **rt_arr, unsigned int nrt6)
7122 {
7123 struct mlxsw_sp_fib6_entry *fib6_entry;
7124 struct mlxsw_sp_fib_entry *fib_entry;
7125 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7126 int err, i;
7127
7128 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7129 if (!fib6_entry)
7130 return ERR_PTR(-ENOMEM);
7131 fib_entry = &fib6_entry->common;
7132
7133 INIT_LIST_HEAD(&fib6_entry->rt6_list);
7134
7135 for (i = 0; i < nrt6; i++) {
7136 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7137 if (IS_ERR(mlxsw_sp_rt6)) {
7138 err = PTR_ERR(mlxsw_sp_rt6);
7139 goto err_rt6_unwind;
7140 }
7141 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7142 fib6_entry->nrt6++;
7143 }
7144
7145 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7146 if (err)
7147 goto err_rt6_unwind;
7148
7149 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7150 fib_node->fib);
7151 if (err)
7152 goto err_nexthop_group_vr_link;
7153
7154 err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7155 if (err)
7156 goto err_fib6_entry_type_set;
7157
7158 fib_entry->fib_node = fib_node;
7159
7160 return fib6_entry;
7161
7162 err_fib6_entry_type_set:
7163 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7164 err_nexthop_group_vr_link:
7165 mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7166 err_rt6_unwind:
7167 for (; i > 0; i--) {
7168 fib6_entry->nrt6--;
7169 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7170 struct mlxsw_sp_rt6, list);
7171 list_del(&mlxsw_sp_rt6->list);
7172 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7173 }
7174 kfree(fib6_entry);
7175 return ERR_PTR(err);
7176 }
7177
7178 static void
mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7179 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7180 struct mlxsw_sp_fib6_entry *fib6_entry)
7181 {
7182 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7183 }
7184
mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7185 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7186 struct mlxsw_sp_fib6_entry *fib6_entry)
7187 {
7188 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7189
7190 mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7191 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7192 fib_node->fib);
7193 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7194 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7195 WARN_ON(fib6_entry->nrt6);
7196 kfree(fib6_entry);
7197 }
7198
7199 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)7200 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7201 const struct fib6_info *rt)
7202 {
7203 struct mlxsw_sp_fib6_entry *fib6_entry;
7204 struct mlxsw_sp_fib_node *fib_node;
7205 struct mlxsw_sp_fib *fib;
7206 struct fib6_info *cmp_rt;
7207 struct mlxsw_sp_vr *vr;
7208
7209 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7210 if (!vr)
7211 return NULL;
7212 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7213
7214 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7215 sizeof(rt->fib6_dst.addr),
7216 rt->fib6_dst.plen);
7217 if (!fib_node)
7218 return NULL;
7219
7220 fib6_entry = container_of(fib_node->fib_entry,
7221 struct mlxsw_sp_fib6_entry, common);
7222 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7223 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7224 rt->fib6_metric == cmp_rt->fib6_metric &&
7225 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7226 return fib6_entry;
7227
7228 return NULL;
7229 }
7230
mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry * fib6_entry)7231 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7232 {
7233 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7234 struct mlxsw_sp_fib6_entry *fib6_replaced;
7235 struct fib6_info *rt, *rt_replaced;
7236
7237 if (!fib_node->fib_entry)
7238 return true;
7239
7240 fib6_replaced = container_of(fib_node->fib_entry,
7241 struct mlxsw_sp_fib6_entry,
7242 common);
7243 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7244 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7245 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7246 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7247 return false;
7248
7249 return true;
7250 }
7251
mlxsw_sp_router_fib6_replace(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7252 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7253 struct fib6_info **rt_arr,
7254 unsigned int nrt6)
7255 {
7256 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7257 struct mlxsw_sp_fib_entry *replaced;
7258 struct mlxsw_sp_fib_node *fib_node;
7259 struct fib6_info *rt = rt_arr[0];
7260 int err;
7261
7262 if (rt->fib6_src.plen)
7263 return -EINVAL;
7264
7265 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7266 return 0;
7267
7268 if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7269 return 0;
7270
7271 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7272 &rt->fib6_dst.addr,
7273 sizeof(rt->fib6_dst.addr),
7274 rt->fib6_dst.plen,
7275 MLXSW_SP_L3_PROTO_IPV6);
7276 if (IS_ERR(fib_node))
7277 return PTR_ERR(fib_node);
7278
7279 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7280 nrt6);
7281 if (IS_ERR(fib6_entry)) {
7282 err = PTR_ERR(fib6_entry);
7283 goto err_fib6_entry_create;
7284 }
7285
7286 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7287 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7288 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7289 return 0;
7290 }
7291
7292 replaced = fib_node->fib_entry;
7293 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7294 if (err)
7295 goto err_fib_node_entry_link;
7296
7297 /* Nothing to replace */
7298 if (!replaced)
7299 return 0;
7300
7301 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7302 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7303 common);
7304 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7305
7306 return 0;
7307
7308 err_fib_node_entry_link:
7309 fib_node->fib_entry = replaced;
7310 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7311 err_fib6_entry_create:
7312 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7313 return err;
7314 }
7315
mlxsw_sp_router_fib6_append(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7316 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7317 struct fib6_info **rt_arr,
7318 unsigned int nrt6)
7319 {
7320 struct mlxsw_sp_fib6_entry *fib6_entry;
7321 struct mlxsw_sp_fib_node *fib_node;
7322 struct fib6_info *rt = rt_arr[0];
7323 int err;
7324
7325 if (rt->fib6_src.plen)
7326 return -EINVAL;
7327
7328 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7329 return 0;
7330
7331 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7332 &rt->fib6_dst.addr,
7333 sizeof(rt->fib6_dst.addr),
7334 rt->fib6_dst.plen,
7335 MLXSW_SP_L3_PROTO_IPV6);
7336 if (IS_ERR(fib_node))
7337 return PTR_ERR(fib_node);
7338
7339 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7340 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7341 return -EINVAL;
7342 }
7343
7344 fib6_entry = container_of(fib_node->fib_entry,
7345 struct mlxsw_sp_fib6_entry, common);
7346 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7347 nrt6);
7348 if (err)
7349 goto err_fib6_entry_nexthop_add;
7350
7351 return 0;
7352
7353 err_fib6_entry_nexthop_add:
7354 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7355 return err;
7356 }
7357
mlxsw_sp_router_fib6_del(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7358 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7359 struct fib6_info **rt_arr,
7360 unsigned int nrt6)
7361 {
7362 struct mlxsw_sp_fib6_entry *fib6_entry;
7363 struct mlxsw_sp_fib_node *fib_node;
7364 struct fib6_info *rt = rt_arr[0];
7365
7366 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7367 return;
7368
7369 /* Multipath routes are first added to the FIB trie and only then
7370 * notified. If we vetoed the addition, we will get a delete
7371 * notification for a route we do not have. Therefore, do not warn if
7372 * route was not found.
7373 */
7374 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7375 if (!fib6_entry)
7376 return;
7377
7378 /* If not all the nexthops are deleted, then only reduce the nexthop
7379 * group.
7380 */
7381 if (nrt6 != fib6_entry->nrt6) {
7382 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7383 nrt6);
7384 return;
7385 }
7386
7387 fib_node = fib6_entry->common.fib_node;
7388
7389 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7390 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7391 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7392 }
7393
7394 static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr * vr,int family)7395 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7396 {
7397 if (family == RTNL_FAMILY_IPMR)
7398 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7399 else
7400 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7401 }
7402
mlxsw_sp_router_fibmr_add(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info,bool replace)7403 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7404 struct mfc_entry_notifier_info *men_info,
7405 bool replace)
7406 {
7407 struct mlxsw_sp_mr_table *mrt;
7408 struct mlxsw_sp_vr *vr;
7409
7410 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7411 if (IS_ERR(vr))
7412 return PTR_ERR(vr);
7413
7414 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7415 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7416 }
7417
mlxsw_sp_router_fibmr_del(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info)7418 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7419 struct mfc_entry_notifier_info *men_info)
7420 {
7421 struct mlxsw_sp_mr_table *mrt;
7422 struct mlxsw_sp_vr *vr;
7423
7424 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7425 if (WARN_ON(!vr))
7426 return;
7427
7428 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7429 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7430 mlxsw_sp_vr_put(mlxsw_sp, vr);
7431 }
7432
7433 static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7434 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7435 struct vif_entry_notifier_info *ven_info)
7436 {
7437 struct mlxsw_sp_mr_table *mrt;
7438 struct mlxsw_sp_rif *rif;
7439 struct mlxsw_sp_vr *vr;
7440
7441 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7442 if (IS_ERR(vr))
7443 return PTR_ERR(vr);
7444
7445 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7446 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7447 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7448 ven_info->vif_index,
7449 ven_info->vif_flags, rif);
7450 }
7451
7452 static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7453 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7454 struct vif_entry_notifier_info *ven_info)
7455 {
7456 struct mlxsw_sp_mr_table *mrt;
7457 struct mlxsw_sp_vr *vr;
7458
7459 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7460 if (WARN_ON(!vr))
7461 return;
7462
7463 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7464 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7465 mlxsw_sp_vr_put(mlxsw_sp, vr);
7466 }
7467
mlxsw_sp_fib4_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7468 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7469 struct mlxsw_sp_fib_node *fib_node)
7470 {
7471 struct mlxsw_sp_fib4_entry *fib4_entry;
7472
7473 fib4_entry = container_of(fib_node->fib_entry,
7474 struct mlxsw_sp_fib4_entry, common);
7475 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7476 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7477 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7478 }
7479
mlxsw_sp_fib6_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7480 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7481 struct mlxsw_sp_fib_node *fib_node)
7482 {
7483 struct mlxsw_sp_fib6_entry *fib6_entry;
7484
7485 fib6_entry = container_of(fib_node->fib_entry,
7486 struct mlxsw_sp_fib6_entry, common);
7487 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7488 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7489 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7490 }
7491
mlxsw_sp_fib_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7492 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7493 struct mlxsw_sp_fib_node *fib_node)
7494 {
7495 switch (fib_node->fib->proto) {
7496 case MLXSW_SP_L3_PROTO_IPV4:
7497 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7498 break;
7499 case MLXSW_SP_L3_PROTO_IPV6:
7500 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7501 break;
7502 }
7503 }
7504
mlxsw_sp_vr_fib_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)7505 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7506 struct mlxsw_sp_vr *vr,
7507 enum mlxsw_sp_l3proto proto)
7508 {
7509 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7510 struct mlxsw_sp_fib_node *fib_node, *tmp;
7511
7512 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7513 bool do_break = &tmp->list == &fib->node_list;
7514
7515 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7516 if (do_break)
7517 break;
7518 }
7519 }
7520
mlxsw_sp_router_fib_flush(struct mlxsw_sp * mlxsw_sp)7521 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7522 {
7523 int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
7524 int i, j;
7525
7526 for (i = 0; i < max_vrs; i++) {
7527 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7528
7529 if (!mlxsw_sp_vr_is_used(vr))
7530 continue;
7531
7532 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7533 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7534 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7535
7536 /* If virtual router was only used for IPv4, then it's no
7537 * longer used.
7538 */
7539 if (!mlxsw_sp_vr_is_used(vr))
7540 continue;
7541 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7542 }
7543 }
7544
7545 struct mlxsw_sp_fib6_event_work {
7546 struct fib6_info **rt_arr;
7547 unsigned int nrt6;
7548 };
7549
7550 struct mlxsw_sp_fib_event_work {
7551 struct work_struct work;
7552 netdevice_tracker dev_tracker;
7553 union {
7554 struct mlxsw_sp_fib6_event_work fib6_work;
7555 struct fib_entry_notifier_info fen_info;
7556 struct fib_rule_notifier_info fr_info;
7557 struct fib_nh_notifier_info fnh_info;
7558 struct mfc_entry_notifier_info men_info;
7559 struct vif_entry_notifier_info ven_info;
7560 };
7561 struct mlxsw_sp *mlxsw_sp;
7562 unsigned long event;
7563 };
7564
7565 static int
mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work * fib6_work,struct fib6_entry_notifier_info * fen6_info)7566 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7567 struct fib6_entry_notifier_info *fen6_info)
7568 {
7569 struct fib6_info *rt = fen6_info->rt;
7570 struct fib6_info **rt_arr;
7571 struct fib6_info *iter;
7572 unsigned int nrt6;
7573 int i = 0;
7574
7575 nrt6 = fen6_info->nsiblings + 1;
7576
7577 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7578 if (!rt_arr)
7579 return -ENOMEM;
7580
7581 fib6_work->rt_arr = rt_arr;
7582 fib6_work->nrt6 = nrt6;
7583
7584 rt_arr[0] = rt;
7585 fib6_info_hold(rt);
7586
7587 if (!fen6_info->nsiblings)
7588 return 0;
7589
7590 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7591 if (i == fen6_info->nsiblings)
7592 break;
7593
7594 rt_arr[i + 1] = iter;
7595 fib6_info_hold(iter);
7596 i++;
7597 }
7598 WARN_ON_ONCE(i != fen6_info->nsiblings);
7599
7600 return 0;
7601 }
7602
7603 static void
mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work * fib6_work)7604 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7605 {
7606 int i;
7607
7608 for (i = 0; i < fib6_work->nrt6; i++)
7609 mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7610 kfree(fib6_work->rt_arr);
7611 }
7612
mlxsw_sp_router_fib4_event_work(struct work_struct * work)7613 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7614 {
7615 struct mlxsw_sp_fib_event_work *fib_work =
7616 container_of(work, struct mlxsw_sp_fib_event_work, work);
7617 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7618 int err;
7619
7620 mutex_lock(&mlxsw_sp->router->lock);
7621 mlxsw_sp_span_respin(mlxsw_sp);
7622
7623 switch (fib_work->event) {
7624 case FIB_EVENT_ENTRY_REPLACE:
7625 err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7626 &fib_work->fen_info);
7627 if (err) {
7628 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7629 mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7630 &fib_work->fen_info);
7631 }
7632 fib_info_put(fib_work->fen_info.fi);
7633 break;
7634 case FIB_EVENT_ENTRY_DEL:
7635 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7636 fib_info_put(fib_work->fen_info.fi);
7637 break;
7638 case FIB_EVENT_NH_ADD:
7639 case FIB_EVENT_NH_DEL:
7640 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7641 fib_work->fnh_info.fib_nh);
7642 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7643 break;
7644 }
7645 mutex_unlock(&mlxsw_sp->router->lock);
7646 kfree(fib_work);
7647 }
7648
mlxsw_sp_router_fib6_event_work(struct work_struct * work)7649 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7650 {
7651 struct mlxsw_sp_fib_event_work *fib_work =
7652 container_of(work, struct mlxsw_sp_fib_event_work, work);
7653 struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7654 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7655 int err;
7656
7657 mutex_lock(&mlxsw_sp->router->lock);
7658 mlxsw_sp_span_respin(mlxsw_sp);
7659
7660 switch (fib_work->event) {
7661 case FIB_EVENT_ENTRY_REPLACE:
7662 err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7663 fib6_work->rt_arr,
7664 fib6_work->nrt6);
7665 if (err) {
7666 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7667 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7668 fib6_work->rt_arr,
7669 fib6_work->nrt6);
7670 }
7671 mlxsw_sp_router_fib6_work_fini(fib6_work);
7672 break;
7673 case FIB_EVENT_ENTRY_APPEND:
7674 err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7675 fib6_work->rt_arr,
7676 fib6_work->nrt6);
7677 if (err) {
7678 dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7679 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7680 fib6_work->rt_arr,
7681 fib6_work->nrt6);
7682 }
7683 mlxsw_sp_router_fib6_work_fini(fib6_work);
7684 break;
7685 case FIB_EVENT_ENTRY_DEL:
7686 mlxsw_sp_router_fib6_del(mlxsw_sp,
7687 fib6_work->rt_arr,
7688 fib6_work->nrt6);
7689 mlxsw_sp_router_fib6_work_fini(fib6_work);
7690 break;
7691 }
7692 mutex_unlock(&mlxsw_sp->router->lock);
7693 kfree(fib_work);
7694 }
7695
mlxsw_sp_router_fibmr_event_work(struct work_struct * work)7696 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7697 {
7698 struct mlxsw_sp_fib_event_work *fib_work =
7699 container_of(work, struct mlxsw_sp_fib_event_work, work);
7700 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7701 bool replace;
7702 int err;
7703
7704 rtnl_lock();
7705 mutex_lock(&mlxsw_sp->router->lock);
7706 switch (fib_work->event) {
7707 case FIB_EVENT_ENTRY_REPLACE:
7708 case FIB_EVENT_ENTRY_ADD:
7709 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7710
7711 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7712 replace);
7713 if (err)
7714 dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7715 mr_cache_put(fib_work->men_info.mfc);
7716 break;
7717 case FIB_EVENT_ENTRY_DEL:
7718 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7719 mr_cache_put(fib_work->men_info.mfc);
7720 break;
7721 case FIB_EVENT_VIF_ADD:
7722 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7723 &fib_work->ven_info);
7724 if (err)
7725 dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7726 netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7727 break;
7728 case FIB_EVENT_VIF_DEL:
7729 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7730 &fib_work->ven_info);
7731 netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7732 break;
7733 }
7734 mutex_unlock(&mlxsw_sp->router->lock);
7735 rtnl_unlock();
7736 kfree(fib_work);
7737 }
7738
mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)7739 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7740 struct fib_notifier_info *info)
7741 {
7742 struct fib_entry_notifier_info *fen_info;
7743 struct fib_nh_notifier_info *fnh_info;
7744
7745 switch (fib_work->event) {
7746 case FIB_EVENT_ENTRY_REPLACE:
7747 case FIB_EVENT_ENTRY_DEL:
7748 fen_info = container_of(info, struct fib_entry_notifier_info,
7749 info);
7750 fib_work->fen_info = *fen_info;
7751 /* Take reference on fib_info to prevent it from being
7752 * freed while work is queued. Release it afterwards.
7753 */
7754 fib_info_hold(fib_work->fen_info.fi);
7755 break;
7756 case FIB_EVENT_NH_ADD:
7757 case FIB_EVENT_NH_DEL:
7758 fnh_info = container_of(info, struct fib_nh_notifier_info,
7759 info);
7760 fib_work->fnh_info = *fnh_info;
7761 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
7762 break;
7763 }
7764 }
7765
mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)7766 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
7767 struct fib_notifier_info *info)
7768 {
7769 struct fib6_entry_notifier_info *fen6_info;
7770 int err;
7771
7772 switch (fib_work->event) {
7773 case FIB_EVENT_ENTRY_REPLACE:
7774 case FIB_EVENT_ENTRY_APPEND:
7775 case FIB_EVENT_ENTRY_DEL:
7776 fen6_info = container_of(info, struct fib6_entry_notifier_info,
7777 info);
7778 err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
7779 fen6_info);
7780 if (err)
7781 return err;
7782 break;
7783 }
7784
7785 return 0;
7786 }
7787
7788 static void
mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)7789 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
7790 struct fib_notifier_info *info)
7791 {
7792 switch (fib_work->event) {
7793 case FIB_EVENT_ENTRY_REPLACE:
7794 case FIB_EVENT_ENTRY_ADD:
7795 case FIB_EVENT_ENTRY_DEL:
7796 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
7797 mr_cache_hold(fib_work->men_info.mfc);
7798 break;
7799 case FIB_EVENT_VIF_ADD:
7800 case FIB_EVENT_VIF_DEL:
7801 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
7802 netdev_hold(fib_work->ven_info.dev, &fib_work->dev_tracker,
7803 GFP_ATOMIC);
7804 break;
7805 }
7806 }
7807
mlxsw_sp_router_fib_rule_event(unsigned long event,struct fib_notifier_info * info,struct mlxsw_sp * mlxsw_sp)7808 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7809 struct fib_notifier_info *info,
7810 struct mlxsw_sp *mlxsw_sp)
7811 {
7812 struct netlink_ext_ack *extack = info->extack;
7813 struct fib_rule_notifier_info *fr_info;
7814 struct fib_rule *rule;
7815 int err = 0;
7816
7817 /* nothing to do at the moment */
7818 if (event == FIB_EVENT_RULE_DEL)
7819 return 0;
7820
7821 fr_info = container_of(info, struct fib_rule_notifier_info, info);
7822 rule = fr_info->rule;
7823
7824 /* Rule only affects locally generated traffic */
7825 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7826 return 0;
7827
7828 switch (info->family) {
7829 case AF_INET:
7830 if (!fib4_rule_default(rule) && !rule->l3mdev)
7831 err = -EOPNOTSUPP;
7832 break;
7833 case AF_INET6:
7834 if (!fib6_rule_default(rule) && !rule->l3mdev)
7835 err = -EOPNOTSUPP;
7836 break;
7837 case RTNL_FAMILY_IPMR:
7838 if (!ipmr_rule_default(rule) && !rule->l3mdev)
7839 err = -EOPNOTSUPP;
7840 break;
7841 case RTNL_FAMILY_IP6MR:
7842 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7843 err = -EOPNOTSUPP;
7844 break;
7845 }
7846
7847 if (err < 0)
7848 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7849
7850 return err;
7851 }
7852
7853 /* Called with rcu_read_lock() */
mlxsw_sp_router_fib_event(struct notifier_block * nb,unsigned long event,void * ptr)7854 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7855 unsigned long event, void *ptr)
7856 {
7857 struct mlxsw_sp_fib_event_work *fib_work;
7858 struct fib_notifier_info *info = ptr;
7859 struct mlxsw_sp_router *router;
7860 int err;
7861
7862 if ((info->family != AF_INET && info->family != AF_INET6 &&
7863 info->family != RTNL_FAMILY_IPMR &&
7864 info->family != RTNL_FAMILY_IP6MR))
7865 return NOTIFY_DONE;
7866
7867 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7868
7869 switch (event) {
7870 case FIB_EVENT_RULE_ADD:
7871 case FIB_EVENT_RULE_DEL:
7872 err = mlxsw_sp_router_fib_rule_event(event, info,
7873 router->mlxsw_sp);
7874 return notifier_from_errno(err);
7875 case FIB_EVENT_ENTRY_ADD:
7876 case FIB_EVENT_ENTRY_REPLACE:
7877 case FIB_EVENT_ENTRY_APPEND:
7878 if (info->family == AF_INET) {
7879 struct fib_entry_notifier_info *fen_info = ptr;
7880
7881 if (fen_info->fi->fib_nh_is_v6) {
7882 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7883 return notifier_from_errno(-EINVAL);
7884 }
7885 }
7886 break;
7887 }
7888
7889 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
7890 if (!fib_work)
7891 return NOTIFY_BAD;
7892
7893 fib_work->mlxsw_sp = router->mlxsw_sp;
7894 fib_work->event = event;
7895
7896 switch (info->family) {
7897 case AF_INET:
7898 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
7899 mlxsw_sp_router_fib4_event(fib_work, info);
7900 break;
7901 case AF_INET6:
7902 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
7903 err = mlxsw_sp_router_fib6_event(fib_work, info);
7904 if (err)
7905 goto err_fib_event;
7906 break;
7907 case RTNL_FAMILY_IP6MR:
7908 case RTNL_FAMILY_IPMR:
7909 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
7910 mlxsw_sp_router_fibmr_event(fib_work, info);
7911 break;
7912 }
7913
7914 mlxsw_core_schedule_work(&fib_work->work);
7915
7916 return NOTIFY_DONE;
7917
7918 err_fib_event:
7919 kfree(fib_work);
7920 return NOTIFY_BAD;
7921 }
7922
7923 static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7924 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7925 const struct net_device *dev)
7926 {
7927 int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7928 int i;
7929
7930 for (i = 0; i < max_rifs; i++)
7931 if (mlxsw_sp->router->rifs[i] &&
7932 mlxsw_sp_rif_dev_is(mlxsw_sp->router->rifs[i], dev))
7933 return mlxsw_sp->router->rifs[i];
7934
7935 return NULL;
7936 }
7937
mlxsw_sp_rif_exists(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7938 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7939 const struct net_device *dev)
7940 {
7941 struct mlxsw_sp_rif *rif;
7942
7943 mutex_lock(&mlxsw_sp->router->lock);
7944 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7945 mutex_unlock(&mlxsw_sp->router->lock);
7946
7947 return rif;
7948 }
7949
mlxsw_sp_rif_vid(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)7950 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7951 {
7952 struct mlxsw_sp_rif *rif;
7953 u16 vid = 0;
7954
7955 mutex_lock(&mlxsw_sp->router->lock);
7956 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7957 if (!rif)
7958 goto out;
7959
7960 /* We only return the VID for VLAN RIFs. Otherwise we return an
7961 * invalid value (0).
7962 */
7963 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7964 goto out;
7965
7966 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7967
7968 out:
7969 mutex_unlock(&mlxsw_sp->router->lock);
7970 return vid;
7971 }
7972
mlxsw_sp_router_rif_disable(struct mlxsw_sp * mlxsw_sp,u16 rif)7973 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7974 {
7975 char ritr_pl[MLXSW_REG_RITR_LEN];
7976 int err;
7977
7978 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7979 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7980 if (err)
7981 return err;
7982
7983 mlxsw_reg_ritr_enable_set(ritr_pl, false);
7984 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7985 }
7986
mlxsw_sp_router_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)7987 static int mlxsw_sp_router_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
7988 struct mlxsw_sp_rif *rif)
7989 {
7990 int err;
7991
7992 err = mlxsw_sp_neigh_rif_made_sync(mlxsw_sp, rif);
7993 if (err)
7994 return err;
7995
7996 err = mlxsw_sp_nexthop_rif_made_sync(mlxsw_sp, rif);
7997 if (err)
7998 goto err_nexthop;
7999
8000 return 0;
8001
8002 err_nexthop:
8003 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8004 return err;
8005 }
8006
mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)8007 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8008 struct mlxsw_sp_rif *rif)
8009 {
8010 /* Signal to nexthop cleanup that the RIF is going away. */
8011 rif->crif->rif = NULL;
8012
8013 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8014 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8015 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8016 }
8017
__mlxsw_sp_dev_addr_list_empty(const struct net_device * dev)8018 static bool __mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8019 {
8020 struct inet6_dev *inet6_dev;
8021 struct in_device *idev;
8022
8023 idev = __in_dev_get_rcu(dev);
8024 if (idev && idev->ifa_list)
8025 return false;
8026
8027 inet6_dev = __in6_dev_get(dev);
8028 if (inet6_dev && !list_empty(&inet6_dev->addr_list))
8029 return false;
8030
8031 return true;
8032 }
8033
mlxsw_sp_dev_addr_list_empty(const struct net_device * dev)8034 static bool mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8035 {
8036 bool addr_list_empty;
8037
8038 rcu_read_lock();
8039 addr_list_empty = __mlxsw_sp_dev_addr_list_empty(dev);
8040 rcu_read_unlock();
8041
8042 return addr_list_empty;
8043 }
8044
8045 static bool
mlxsw_sp_rif_should_config(struct mlxsw_sp_rif * rif,struct net_device * dev,unsigned long event)8046 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8047 unsigned long event)
8048 {
8049 bool addr_list_empty;
8050
8051 switch (event) {
8052 case NETDEV_UP:
8053 return rif == NULL;
8054 case NETDEV_DOWN:
8055 addr_list_empty = mlxsw_sp_dev_addr_list_empty(dev);
8056
8057 /* macvlans do not have a RIF, but rather piggy back on the
8058 * RIF of their lower device.
8059 */
8060 if (netif_is_macvlan(dev) && addr_list_empty)
8061 return true;
8062
8063 if (rif && addr_list_empty &&
8064 !netif_is_l3_slave(mlxsw_sp_rif_dev(rif)))
8065 return true;
8066 /* It is possible we already removed the RIF ourselves
8067 * if it was assigned to a netdev that is now a bridge
8068 * or LAG slave.
8069 */
8070 return false;
8071 }
8072
8073 return false;
8074 }
8075
8076 static enum mlxsw_sp_rif_type
mlxsw_sp_dev_rif_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8077 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8078 const struct net_device *dev)
8079 {
8080 enum mlxsw_sp_fid_type type;
8081
8082 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8083 return MLXSW_SP_RIF_TYPE_IPIP_LB;
8084
8085 /* Otherwise RIF type is derived from the type of the underlying FID. */
8086 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8087 type = MLXSW_SP_FID_TYPE_8021Q;
8088 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8089 type = MLXSW_SP_FID_TYPE_8021Q;
8090 else if (netif_is_bridge_master(dev))
8091 type = MLXSW_SP_FID_TYPE_8021D;
8092 else
8093 type = MLXSW_SP_FID_TYPE_RFID;
8094
8095 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8096 }
8097
mlxsw_sp_rif_index_alloc(struct mlxsw_sp * mlxsw_sp,u16 * p_rif_index,u8 rif_entries)8098 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
8099 u8 rif_entries)
8100 {
8101 *p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
8102 rif_entries);
8103 if (*p_rif_index == 0)
8104 return -ENOBUFS;
8105 *p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
8106
8107 /* RIF indexes must be aligned to the allocation size. */
8108 WARN_ON_ONCE(*p_rif_index % rif_entries);
8109
8110 return 0;
8111 }
8112
mlxsw_sp_rif_index_free(struct mlxsw_sp * mlxsw_sp,u16 rif_index,u8 rif_entries)8113 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8114 u8 rif_entries)
8115 {
8116 gen_pool_free(mlxsw_sp->router->rifs_table,
8117 MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
8118 }
8119
mlxsw_sp_rif_alloc(size_t rif_size,u16 rif_index,u16 vr_id,struct mlxsw_sp_crif * crif)8120 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8121 u16 vr_id,
8122 struct mlxsw_sp_crif *crif)
8123 {
8124 struct net_device *l3_dev = crif ? crif->key.dev : NULL;
8125 struct mlxsw_sp_rif *rif;
8126
8127 rif = kzalloc(rif_size, GFP_KERNEL);
8128 if (!rif)
8129 return NULL;
8130
8131 INIT_LIST_HEAD(&rif->neigh_list);
8132 if (l3_dev) {
8133 ether_addr_copy(rif->addr, l3_dev->dev_addr);
8134 rif->mtu = l3_dev->mtu;
8135 }
8136 rif->vr_id = vr_id;
8137 rif->rif_index = rif_index;
8138 if (crif) {
8139 rif->crif = crif;
8140 crif->rif = rif;
8141 }
8142
8143 return rif;
8144 }
8145
mlxsw_sp_rif_free(struct mlxsw_sp_rif * rif)8146 static void mlxsw_sp_rif_free(struct mlxsw_sp_rif *rif)
8147 {
8148 WARN_ON(!list_empty(&rif->neigh_list));
8149
8150 if (rif->crif)
8151 rif->crif->rif = NULL;
8152 kfree(rif);
8153 }
8154
mlxsw_sp_rif_by_index(const struct mlxsw_sp * mlxsw_sp,u16 rif_index)8155 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8156 u16 rif_index)
8157 {
8158 return mlxsw_sp->router->rifs[rif_index];
8159 }
8160
mlxsw_sp_rif_index(const struct mlxsw_sp_rif * rif)8161 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8162 {
8163 return rif->rif_index;
8164 }
8165
mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8166 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8167 {
8168 return lb_rif->common.rif_index;
8169 }
8170
mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8171 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8172 {
8173 struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
8174 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
8175 struct mlxsw_sp_vr *ul_vr;
8176
8177 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8178 if (WARN_ON(IS_ERR(ul_vr)))
8179 return 0;
8180
8181 return ul_vr->id;
8182 }
8183
mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8184 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8185 {
8186 return lb_rif->ul_rif_id;
8187 }
8188
8189 static bool
mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif * rif)8190 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8191 {
8192 return mlxsw_sp_rif_counter_valid_get(rif,
8193 MLXSW_SP_RIF_COUNTER_EGRESS) &&
8194 mlxsw_sp_rif_counter_valid_get(rif,
8195 MLXSW_SP_RIF_COUNTER_INGRESS);
8196 }
8197
8198 static int
mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif * rif)8199 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8200 {
8201 int err;
8202
8203 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8204 if (err)
8205 return err;
8206
8207 /* Clear stale data. */
8208 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8209 MLXSW_SP_RIF_COUNTER_INGRESS,
8210 NULL);
8211 if (err)
8212 goto err_clear_ingress;
8213
8214 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8215 if (err)
8216 goto err_alloc_egress;
8217
8218 /* Clear stale data. */
8219 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8220 MLXSW_SP_RIF_COUNTER_EGRESS,
8221 NULL);
8222 if (err)
8223 goto err_clear_egress;
8224
8225 return 0;
8226
8227 err_clear_egress:
8228 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8229 err_alloc_egress:
8230 err_clear_ingress:
8231 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8232 return err;
8233 }
8234
8235 static void
mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif * rif)8236 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8237 {
8238 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8239 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8240 }
8241
8242 static void
mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8243 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8244 struct netdev_notifier_offload_xstats_info *info)
8245 {
8246 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8247 return;
8248 netdev_offload_xstats_report_used(info->report_used);
8249 }
8250
8251 static int
mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif * rif,struct rtnl_hw_stats64 * p_stats)8252 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8253 struct rtnl_hw_stats64 *p_stats)
8254 {
8255 struct mlxsw_sp_rif_counter_set_basic ingress;
8256 struct mlxsw_sp_rif_counter_set_basic egress;
8257 int err;
8258
8259 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8260 MLXSW_SP_RIF_COUNTER_INGRESS,
8261 &ingress);
8262 if (err)
8263 return err;
8264
8265 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8266 MLXSW_SP_RIF_COUNTER_EGRESS,
8267 &egress);
8268 if (err)
8269 return err;
8270
8271 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
8272 ((SET.good_unicast_ ## SFX) + \
8273 (SET.good_multicast_ ## SFX) + \
8274 (SET.good_broadcast_ ## SFX))
8275
8276 p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8277 p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8278 p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8279 p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8280 p_stats->rx_errors = ingress.error_packets;
8281 p_stats->tx_errors = egress.error_packets;
8282 p_stats->rx_dropped = ingress.discard_packets;
8283 p_stats->tx_dropped = egress.discard_packets;
8284 p_stats->multicast = ingress.good_multicast_packets +
8285 ingress.good_broadcast_packets;
8286
8287 #undef MLXSW_SP_ROUTER_ALL_GOOD
8288
8289 return 0;
8290 }
8291
8292 static int
mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8293 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8294 struct netdev_notifier_offload_xstats_info *info)
8295 {
8296 struct rtnl_hw_stats64 stats = {};
8297 int err;
8298
8299 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8300 return 0;
8301
8302 err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8303 if (err)
8304 return err;
8305
8306 netdev_offload_xstats_report_delta(info->report_delta, &stats);
8307 return 0;
8308 }
8309
8310 struct mlxsw_sp_router_hwstats_notify_work {
8311 struct work_struct work;
8312 struct net_device *dev;
8313 netdevice_tracker dev_tracker;
8314 };
8315
mlxsw_sp_router_hwstats_notify_work(struct work_struct * work)8316 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8317 {
8318 struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8319 container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8320 work);
8321
8322 rtnl_lock();
8323 rtnl_offload_xstats_notify(hws_work->dev);
8324 rtnl_unlock();
8325 netdev_put(hws_work->dev, &hws_work->dev_tracker);
8326 kfree(hws_work);
8327 }
8328
8329 static void
mlxsw_sp_router_hwstats_notify_schedule(struct net_device * dev)8330 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8331 {
8332 struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8333
8334 /* To collect notification payload, the core ends up sending another
8335 * notifier block message, which would deadlock on the attempt to
8336 * acquire the router lock again. Just postpone the notification until
8337 * later.
8338 */
8339
8340 hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8341 if (!hws_work)
8342 return;
8343
8344 INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8345 netdev_hold(dev, &hws_work->dev_tracker, GFP_KERNEL);
8346 hws_work->dev = dev;
8347 mlxsw_core_schedule_work(&hws_work->work);
8348 }
8349
mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif * rif)8350 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8351 {
8352 return mlxsw_sp_rif_dev(rif)->ifindex;
8353 }
8354
mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif * rif)8355 bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif)
8356 {
8357 return !!mlxsw_sp_rif_dev(rif);
8358 }
8359
mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif * rif,const struct net_device * dev)8360 bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
8361 const struct net_device *dev)
8362 {
8363 return mlxsw_sp_rif_dev(rif) == dev;
8364 }
8365
mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif * rif)8366 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8367 {
8368 struct rtnl_hw_stats64 stats = {};
8369
8370 if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8371 netdev_offload_xstats_push_delta(mlxsw_sp_rif_dev(rif),
8372 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8373 &stats);
8374 }
8375
8376 static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8377 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8378 const struct mlxsw_sp_rif_params *params,
8379 struct netlink_ext_ack *extack)
8380 {
8381 u8 rif_entries = params->double_entry ? 2 : 1;
8382 u32 tb_id = l3mdev_fib_table(params->dev);
8383 const struct mlxsw_sp_rif_ops *ops;
8384 struct mlxsw_sp_fid *fid = NULL;
8385 enum mlxsw_sp_rif_type type;
8386 struct mlxsw_sp_crif *crif;
8387 struct mlxsw_sp_rif *rif;
8388 struct mlxsw_sp_vr *vr;
8389 u16 rif_index;
8390 int i, err;
8391
8392 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8393 ops = mlxsw_sp->router->rif_ops_arr[type];
8394
8395 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8396 if (IS_ERR(vr))
8397 return ERR_CAST(vr);
8398 vr->rif_count++;
8399
8400 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8401 if (err) {
8402 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8403 goto err_rif_index_alloc;
8404 }
8405
8406 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, params->dev);
8407 if (WARN_ON(!crif)) {
8408 err = -ENOENT;
8409 goto err_crif_lookup;
8410 }
8411
8412 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, crif);
8413 if (!rif) {
8414 err = -ENOMEM;
8415 goto err_rif_alloc;
8416 }
8417 netdev_hold(params->dev, &rif->dev_tracker, GFP_KERNEL);
8418 mlxsw_sp->router->rifs[rif_index] = rif;
8419 rif->mlxsw_sp = mlxsw_sp;
8420 rif->ops = ops;
8421 rif->rif_entries = rif_entries;
8422
8423 if (ops->fid_get) {
8424 fid = ops->fid_get(rif, params, extack);
8425 if (IS_ERR(fid)) {
8426 err = PTR_ERR(fid);
8427 goto err_fid_get;
8428 }
8429 rif->fid = fid;
8430 }
8431
8432 if (ops->setup)
8433 ops->setup(rif, params);
8434
8435 err = ops->configure(rif, extack);
8436 if (err)
8437 goto err_configure;
8438
8439 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8440 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8441 if (err)
8442 goto err_mr_rif_add;
8443 }
8444
8445 err = mlxsw_sp_router_rif_made_sync(mlxsw_sp, rif);
8446 if (err)
8447 goto err_rif_made_sync;
8448
8449 if (netdev_offload_xstats_enabled(params->dev,
8450 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8451 err = mlxsw_sp_router_port_l3_stats_enable(rif);
8452 if (err)
8453 goto err_stats_enable;
8454 mlxsw_sp_router_hwstats_notify_schedule(params->dev);
8455 } else {
8456 mlxsw_sp_rif_counters_alloc(rif);
8457 }
8458
8459 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8460 return rif;
8461
8462 err_stats_enable:
8463 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8464 err_rif_made_sync:
8465 err_mr_rif_add:
8466 for (i--; i >= 0; i--)
8467 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8468 ops->deconfigure(rif);
8469 err_configure:
8470 if (fid)
8471 mlxsw_sp_fid_put(fid);
8472 err_fid_get:
8473 mlxsw_sp->router->rifs[rif_index] = NULL;
8474 netdev_put(params->dev, &rif->dev_tracker);
8475 mlxsw_sp_rif_free(rif);
8476 err_rif_alloc:
8477 err_crif_lookup:
8478 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8479 err_rif_index_alloc:
8480 vr->rif_count--;
8481 mlxsw_sp_vr_put(mlxsw_sp, vr);
8482 return ERR_PTR(err);
8483 }
8484
mlxsw_sp_rif_destroy(struct mlxsw_sp_rif * rif)8485 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8486 {
8487 struct net_device *dev = mlxsw_sp_rif_dev(rif);
8488 const struct mlxsw_sp_rif_ops *ops = rif->ops;
8489 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8490 struct mlxsw_sp_crif *crif = rif->crif;
8491 struct mlxsw_sp_fid *fid = rif->fid;
8492 u8 rif_entries = rif->rif_entries;
8493 u16 rif_index = rif->rif_index;
8494 struct mlxsw_sp_vr *vr;
8495 int i;
8496
8497 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8498 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8499 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8500
8501 if (netdev_offload_xstats_enabled(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8502 mlxsw_sp_rif_push_l3_stats(rif);
8503 mlxsw_sp_router_port_l3_stats_disable(rif);
8504 mlxsw_sp_router_hwstats_notify_schedule(dev);
8505 } else {
8506 mlxsw_sp_rif_counters_free(rif);
8507 }
8508
8509 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8510 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8511 ops->deconfigure(rif);
8512 if (fid)
8513 /* Loopback RIFs are not associated with a FID. */
8514 mlxsw_sp_fid_put(fid);
8515 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8516 netdev_put(dev, &rif->dev_tracker);
8517 mlxsw_sp_rif_free(rif);
8518 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8519 vr->rif_count--;
8520 mlxsw_sp_vr_put(mlxsw_sp, vr);
8521
8522 if (crif->can_destroy)
8523 mlxsw_sp_crif_free(crif);
8524 }
8525
mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)8526 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8527 struct net_device *dev)
8528 {
8529 struct mlxsw_sp_rif *rif;
8530
8531 mutex_lock(&mlxsw_sp->router->lock);
8532 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8533 if (!rif)
8534 goto out;
8535 mlxsw_sp_rif_destroy(rif);
8536 out:
8537 mutex_unlock(&mlxsw_sp->router->lock);
8538 }
8539
mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,u16 vid)8540 static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp,
8541 struct net_device *br_dev,
8542 u16 vid)
8543 {
8544 struct net_device *upper_dev;
8545 struct mlxsw_sp_crif *crif;
8546
8547 rcu_read_lock();
8548 upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid);
8549 rcu_read_unlock();
8550
8551 if (!upper_dev)
8552 return;
8553
8554 crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev);
8555 if (!crif || !crif->rif)
8556 return;
8557
8558 mlxsw_sp_rif_destroy(crif->rif);
8559 }
8560
8561 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8562 struct net_device *l3_dev,
8563 int lower_pvid,
8564 unsigned long event,
8565 struct netlink_ext_ack *extack);
8566
mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,u16 new_vid,bool is_pvid,struct netlink_ext_ack * extack)8567 int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
8568 struct net_device *br_dev,
8569 u16 new_vid, bool is_pvid,
8570 struct netlink_ext_ack *extack)
8571 {
8572 struct mlxsw_sp_rif *old_rif;
8573 struct mlxsw_sp_rif *new_rif;
8574 struct net_device *upper_dev;
8575 u16 old_pvid = 0;
8576 u16 new_pvid;
8577 int err = 0;
8578
8579 mutex_lock(&mlxsw_sp->router->lock);
8580 old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
8581 if (old_rif) {
8582 /* If the RIF on the bridge is not a VLAN RIF, we shouldn't have
8583 * gotten a PVID notification.
8584 */
8585 if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN))
8586 old_rif = NULL;
8587 else
8588 old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid);
8589 }
8590
8591 if (is_pvid)
8592 new_pvid = new_vid;
8593 else if (old_pvid == new_vid)
8594 new_pvid = 0;
8595 else
8596 goto out;
8597
8598 if (old_pvid == new_pvid)
8599 goto out;
8600
8601 if (new_pvid) {
8602 struct mlxsw_sp_rif_params params = {
8603 .dev = br_dev,
8604 .vid = new_pvid,
8605 };
8606
8607 /* If there is a VLAN upper with the same VID as the new PVID,
8608 * kill its RIF, if there is one.
8609 */
8610 mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid);
8611
8612 if (mlxsw_sp_dev_addr_list_empty(br_dev))
8613 goto out;
8614 new_rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8615 if (IS_ERR(new_rif)) {
8616 err = PTR_ERR(new_rif);
8617 goto out;
8618 }
8619
8620 if (old_pvid)
8621 mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif,
8622 true);
8623 } else {
8624 mlxsw_sp_rif_destroy(old_rif);
8625 }
8626
8627 if (old_pvid) {
8628 rcu_read_lock();
8629 upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q),
8630 old_pvid);
8631 rcu_read_unlock();
8632 if (upper_dev)
8633 err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp,
8634 upper_dev,
8635 new_pvid,
8636 NETDEV_UP, extack);
8637 }
8638
8639 out:
8640 mutex_unlock(&mlxsw_sp->router->lock);
8641 return err;
8642 }
8643
8644 static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params * params,struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8645 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8646 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8647 {
8648 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8649
8650 params->vid = mlxsw_sp_port_vlan->vid;
8651 params->lag = mlxsw_sp_port->lagged;
8652 if (params->lag)
8653 params->lag_id = mlxsw_sp_port->lag_id;
8654 else
8655 params->system_port = mlxsw_sp_port->local_port;
8656 }
8657
8658 static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif * rif)8659 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8660 {
8661 return container_of(rif, struct mlxsw_sp_rif_subport, common);
8662 }
8663
8664 static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8665 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8666 const struct mlxsw_sp_rif_params *params,
8667 struct netlink_ext_ack *extack)
8668 {
8669 struct mlxsw_sp_rif_subport *rif_subport;
8670 struct mlxsw_sp_rif *rif;
8671
8672 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8673 if (!rif)
8674 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8675
8676 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8677 refcount_inc(&rif_subport->ref_count);
8678 return rif;
8679 }
8680
mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif * rif)8681 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8682 {
8683 struct mlxsw_sp_rif_subport *rif_subport;
8684
8685 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8686 if (!refcount_dec_and_test(&rif_subport->ref_count))
8687 return;
8688
8689 mlxsw_sp_rif_destroy(rif);
8690 }
8691
mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif_mac_profile * profile,struct netlink_ext_ack * extack)8692 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8693 struct mlxsw_sp_rif_mac_profile *profile,
8694 struct netlink_ext_ack *extack)
8695 {
8696 u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8697 struct mlxsw_sp_router *router = mlxsw_sp->router;
8698 int id;
8699
8700 id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8701 max_rif_mac_profiles, GFP_KERNEL);
8702
8703 if (id >= 0) {
8704 profile->id = id;
8705 return 0;
8706 }
8707
8708 if (id == -ENOSPC)
8709 NL_SET_ERR_MSG_MOD(extack,
8710 "Exceeded number of supported router interface MAC profiles");
8711
8712 return id;
8713 }
8714
8715 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8716 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8717 {
8718 struct mlxsw_sp_rif_mac_profile *profile;
8719
8720 profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8721 mac_profile);
8722 WARN_ON(!profile);
8723 return profile;
8724 }
8725
8726 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_alloc(const char * mac)8727 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8728 {
8729 struct mlxsw_sp_rif_mac_profile *profile;
8730
8731 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8732 if (!profile)
8733 return NULL;
8734
8735 ether_addr_copy(profile->mac_prefix, mac);
8736 refcount_set(&profile->ref_count, 1);
8737 return profile;
8738 }
8739
8740 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp * mlxsw_sp,const char * mac)8741 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8742 {
8743 struct mlxsw_sp_router *router = mlxsw_sp->router;
8744 struct mlxsw_sp_rif_mac_profile *profile;
8745 int id;
8746
8747 idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8748 if (ether_addr_equal_masked(profile->mac_prefix, mac,
8749 mlxsw_sp->mac_mask))
8750 return profile;
8751 }
8752
8753 return NULL;
8754 }
8755
mlxsw_sp_rif_mac_profiles_occ_get(void * priv)8756 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8757 {
8758 const struct mlxsw_sp *mlxsw_sp = priv;
8759
8760 return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8761 }
8762
mlxsw_sp_rifs_occ_get(void * priv)8763 static u64 mlxsw_sp_rifs_occ_get(void *priv)
8764 {
8765 const struct mlxsw_sp *mlxsw_sp = priv;
8766
8767 return atomic_read(&mlxsw_sp->router->rifs_count);
8768 }
8769
8770 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp * mlxsw_sp,const char * mac,struct netlink_ext_ack * extack)8771 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8772 struct netlink_ext_ack *extack)
8773 {
8774 struct mlxsw_sp_rif_mac_profile *profile;
8775 int err;
8776
8777 profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8778 if (!profile)
8779 return ERR_PTR(-ENOMEM);
8780
8781 err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8782 if (err)
8783 goto profile_index_alloc_err;
8784
8785 atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8786 return profile;
8787
8788 profile_index_alloc_err:
8789 kfree(profile);
8790 return ERR_PTR(err);
8791 }
8792
mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8793 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8794 u8 mac_profile)
8795 {
8796 struct mlxsw_sp_rif_mac_profile *profile;
8797
8798 atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8799 profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8800 kfree(profile);
8801 }
8802
mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp * mlxsw_sp,const char * mac,u8 * p_mac_profile,struct netlink_ext_ack * extack)8803 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8804 const char *mac, u8 *p_mac_profile,
8805 struct netlink_ext_ack *extack)
8806 {
8807 struct mlxsw_sp_rif_mac_profile *profile;
8808
8809 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8810 if (profile) {
8811 refcount_inc(&profile->ref_count);
8812 goto out;
8813 }
8814
8815 profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8816 if (IS_ERR(profile))
8817 return PTR_ERR(profile);
8818
8819 out:
8820 *p_mac_profile = profile->id;
8821 return 0;
8822 }
8823
mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8824 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8825 u8 mac_profile)
8826 {
8827 struct mlxsw_sp_rif_mac_profile *profile;
8828
8829 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8830 mac_profile);
8831 if (WARN_ON(!profile))
8832 return;
8833
8834 if (!refcount_dec_and_test(&profile->ref_count))
8835 return;
8836
8837 mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8838 }
8839
mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif * rif)8840 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8841 {
8842 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8843 struct mlxsw_sp_rif_mac_profile *profile;
8844
8845 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8846 rif->mac_profile_id);
8847 if (WARN_ON(!profile))
8848 return false;
8849
8850 return refcount_read(&profile->ref_count) > 1;
8851 }
8852
mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif * rif,const char * new_mac)8853 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8854 const char *new_mac)
8855 {
8856 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8857 struct mlxsw_sp_rif_mac_profile *profile;
8858
8859 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8860 rif->mac_profile_id);
8861 if (WARN_ON(!profile))
8862 return -EINVAL;
8863
8864 ether_addr_copy(profile->mac_prefix, new_mac);
8865 return 0;
8866 }
8867
8868 static int
mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,const char * new_mac,struct netlink_ext_ack * extack)8869 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8870 struct mlxsw_sp_rif *rif,
8871 const char *new_mac,
8872 struct netlink_ext_ack *extack)
8873 {
8874 u8 mac_profile;
8875 int err;
8876
8877 if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8878 !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8879 return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8880
8881 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8882 &mac_profile, extack);
8883 if (err)
8884 return err;
8885
8886 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8887 rif->mac_profile_id = mac_profile;
8888 return 0;
8889 }
8890
8891 static int
__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)8892 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8893 struct net_device *l3_dev,
8894 struct netlink_ext_ack *extack)
8895 {
8896 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8897 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8898 struct mlxsw_sp_rif_params params;
8899 u16 vid = mlxsw_sp_port_vlan->vid;
8900 struct mlxsw_sp_rif *rif;
8901 struct mlxsw_sp_fid *fid;
8902 int err;
8903
8904 params = (struct mlxsw_sp_rif_params) {
8905 .dev = l3_dev,
8906 .vid = vid,
8907 };
8908
8909 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
8910 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
8911 if (IS_ERR(rif))
8912 return PTR_ERR(rif);
8913
8914 /* FID was already created, just take a reference */
8915 fid = rif->ops->fid_get(rif, ¶ms, extack);
8916 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8917 if (err)
8918 goto err_fid_port_vid_map;
8919
8920 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8921 if (err)
8922 goto err_port_vid_learning_set;
8923
8924 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8925 BR_STATE_FORWARDING);
8926 if (err)
8927 goto err_port_vid_stp_set;
8928
8929 mlxsw_sp_port_vlan->fid = fid;
8930
8931 return 0;
8932
8933 err_port_vid_stp_set:
8934 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8935 err_port_vid_learning_set:
8936 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8937 err_fid_port_vid_map:
8938 mlxsw_sp_fid_put(fid);
8939 mlxsw_sp_rif_subport_put(rif);
8940 return err;
8941 }
8942
8943 static void
__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8944 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8945 {
8946 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8947 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8948 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8949 u16 vid = mlxsw_sp_port_vlan->vid;
8950
8951 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8952 return;
8953
8954 mlxsw_sp_port_vlan->fid = NULL;
8955 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8956 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8957 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8958 mlxsw_sp_fid_put(fid);
8959 mlxsw_sp_rif_subport_put(rif);
8960 }
8961
8962 static int
mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)8963 mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8964 struct net_device *l3_dev,
8965 struct netlink_ext_ack *extack)
8966 {
8967 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8968
8969 lockdep_assert_held(&mlxsw_sp->router->lock);
8970
8971 if (!mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev))
8972 return 0;
8973
8974 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8975 extack);
8976 }
8977
8978 void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8979 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8980 {
8981 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8982
8983 mutex_lock(&mlxsw_sp->router->lock);
8984 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8985 mutex_unlock(&mlxsw_sp->router->lock);
8986 }
8987
mlxsw_sp_inetaddr_port_vlan_event(struct net_device * l3_dev,struct net_device * port_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)8988 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8989 struct net_device *port_dev,
8990 unsigned long event, u16 vid,
8991 struct netlink_ext_ack *extack)
8992 {
8993 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8994 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8995
8996 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8997 if (WARN_ON(!mlxsw_sp_port_vlan))
8998 return -EINVAL;
8999
9000 switch (event) {
9001 case NETDEV_UP:
9002 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
9003 l3_dev, extack);
9004 case NETDEV_DOWN:
9005 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9006 break;
9007 }
9008
9009 return 0;
9010 }
9011
mlxsw_sp_inetaddr_port_event(struct net_device * port_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9012 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
9013 unsigned long event, bool nomaster,
9014 struct netlink_ext_ack *extack)
9015 {
9016 if (!nomaster && (netif_is_any_bridge_port(port_dev) ||
9017 netif_is_lag_port(port_dev)))
9018 return 0;
9019
9020 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
9021 MLXSW_SP_DEFAULT_VID, extack);
9022 }
9023
__mlxsw_sp_inetaddr_lag_event(struct net_device * l3_dev,struct net_device * lag_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)9024 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
9025 struct net_device *lag_dev,
9026 unsigned long event, u16 vid,
9027 struct netlink_ext_ack *extack)
9028 {
9029 struct net_device *port_dev;
9030 struct list_head *iter;
9031 int err;
9032
9033 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
9034 if (mlxsw_sp_port_dev_check(port_dev)) {
9035 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
9036 port_dev,
9037 event, vid,
9038 extack);
9039 if (err)
9040 return err;
9041 }
9042 }
9043
9044 return 0;
9045 }
9046
mlxsw_sp_inetaddr_lag_event(struct net_device * lag_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9047 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
9048 unsigned long event, bool nomaster,
9049 struct netlink_ext_ack *extack)
9050 {
9051 if (!nomaster && netif_is_bridge_port(lag_dev))
9052 return 0;
9053
9054 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
9055 MLXSW_SP_DEFAULT_VID, extack);
9056 }
9057
mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,int lower_pvid,unsigned long event,struct netlink_ext_ack * extack)9058 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
9059 struct net_device *l3_dev,
9060 int lower_pvid,
9061 unsigned long event,
9062 struct netlink_ext_ack *extack)
9063 {
9064 struct mlxsw_sp_rif_params params = {
9065 .dev = l3_dev,
9066 };
9067 struct mlxsw_sp_rif *rif;
9068 int err;
9069
9070 switch (event) {
9071 case NETDEV_UP:
9072 if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
9073 u16 proto;
9074
9075 br_vlan_get_proto(l3_dev, &proto);
9076 if (proto == ETH_P_8021AD) {
9077 NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
9078 return -EOPNOTSUPP;
9079 }
9080 err = br_vlan_get_pvid(l3_dev, ¶ms.vid);
9081 if (err)
9082 return err;
9083 if (!params.vid)
9084 return 0;
9085 } else if (is_vlan_dev(l3_dev)) {
9086 params.vid = vlan_dev_vlan_id(l3_dev);
9087
9088 /* If the VID matches PVID of the bridge below, the
9089 * bridge owns the RIF for this VLAN. Don't do anything.
9090 */
9091 if ((int)params.vid == lower_pvid)
9092 return 0;
9093 }
9094
9095 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
9096 if (IS_ERR(rif))
9097 return PTR_ERR(rif);
9098 break;
9099 case NETDEV_DOWN:
9100 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9101 mlxsw_sp_rif_destroy(rif);
9102 break;
9103 }
9104
9105 return 0;
9106 }
9107
mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9108 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
9109 struct net_device *vlan_dev,
9110 unsigned long event, bool nomaster,
9111 struct netlink_ext_ack *extack)
9112 {
9113 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
9114 u16 vid = vlan_dev_vlan_id(vlan_dev);
9115 u16 lower_pvid;
9116 int err;
9117
9118 if (!nomaster && netif_is_bridge_port(vlan_dev))
9119 return 0;
9120
9121 if (mlxsw_sp_port_dev_check(real_dev)) {
9122 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
9123 event, vid, extack);
9124 } else if (netif_is_lag_master(real_dev)) {
9125 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
9126 vid, extack);
9127 } else if (netif_is_bridge_master(real_dev) &&
9128 br_vlan_enabled(real_dev)) {
9129 err = br_vlan_get_pvid(real_dev, &lower_pvid);
9130 if (err)
9131 return err;
9132 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev,
9133 lower_pvid, event,
9134 extack);
9135 }
9136
9137 return 0;
9138 }
9139
mlxsw_sp_rif_macvlan_is_vrrp4(const u8 * mac)9140 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
9141 {
9142 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9143 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9144
9145 return ether_addr_equal_masked(mac, vrrp4, mask);
9146 }
9147
mlxsw_sp_rif_macvlan_is_vrrp6(const u8 * mac)9148 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9149 {
9150 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9151 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9152
9153 return ether_addr_equal_masked(mac, vrrp6, mask);
9154 }
9155
mlxsw_sp_rif_vrrp_op(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const u8 * mac,bool adding)9156 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9157 const u8 *mac, bool adding)
9158 {
9159 char ritr_pl[MLXSW_REG_RITR_LEN];
9160 u8 vrrp_id = adding ? mac[5] : 0;
9161 int err;
9162
9163 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9164 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9165 return 0;
9166
9167 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9168 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9169 if (err)
9170 return err;
9171
9172 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9173 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9174 else
9175 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9176
9177 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9178 }
9179
mlxsw_sp_rif_macvlan_add(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev,struct netlink_ext_ack * extack)9180 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9181 const struct net_device *macvlan_dev,
9182 struct netlink_ext_ack *extack)
9183 {
9184 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9185 struct mlxsw_sp_rif *rif;
9186 int err;
9187
9188 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9189 if (!rif)
9190 return 0;
9191
9192 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9193 mlxsw_sp_fid_index(rif->fid), true);
9194 if (err)
9195 return err;
9196
9197 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9198 macvlan_dev->dev_addr, true);
9199 if (err)
9200 goto err_rif_vrrp_add;
9201
9202 /* Make sure the bridge driver does not have this MAC pointing at
9203 * some other port.
9204 */
9205 if (rif->ops->fdb_del)
9206 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9207
9208 return 0;
9209
9210 err_rif_vrrp_add:
9211 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9212 mlxsw_sp_fid_index(rif->fid), false);
9213 return err;
9214 }
9215
__mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9216 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9217 const struct net_device *macvlan_dev)
9218 {
9219 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9220 struct mlxsw_sp_rif *rif;
9221
9222 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9223 /* If we do not have a RIF, then we already took care of
9224 * removing the macvlan's MAC during RIF deletion.
9225 */
9226 if (!rif)
9227 return;
9228 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9229 false);
9230 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9231 mlxsw_sp_fid_index(rif->fid), false);
9232 }
9233
mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9234 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9235 const struct net_device *macvlan_dev)
9236 {
9237 mutex_lock(&mlxsw_sp->router->lock);
9238 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9239 mutex_unlock(&mlxsw_sp->router->lock);
9240 }
9241
mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * macvlan_dev,unsigned long event,struct netlink_ext_ack * extack)9242 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9243 struct net_device *macvlan_dev,
9244 unsigned long event,
9245 struct netlink_ext_ack *extack)
9246 {
9247 switch (event) {
9248 case NETDEV_UP:
9249 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9250 case NETDEV_DOWN:
9251 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9252 break;
9253 }
9254
9255 return 0;
9256 }
9257
__mlxsw_sp_inetaddr_event(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9258 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9259 struct net_device *dev,
9260 unsigned long event, bool nomaster,
9261 struct netlink_ext_ack *extack)
9262 {
9263 if (mlxsw_sp_port_dev_check(dev))
9264 return mlxsw_sp_inetaddr_port_event(dev, event, nomaster,
9265 extack);
9266 else if (netif_is_lag_master(dev))
9267 return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster,
9268 extack);
9269 else if (netif_is_bridge_master(dev))
9270 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event,
9271 extack);
9272 else if (is_vlan_dev(dev))
9273 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9274 nomaster, extack);
9275 else if (netif_is_macvlan(dev))
9276 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9277 extack);
9278 else
9279 return 0;
9280 }
9281
mlxsw_sp_inetaddr_event(struct notifier_block * nb,unsigned long event,void * ptr)9282 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9283 unsigned long event, void *ptr)
9284 {
9285 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9286 struct net_device *dev = ifa->ifa_dev->dev;
9287 struct mlxsw_sp_router *router;
9288 struct mlxsw_sp_rif *rif;
9289 int err = 0;
9290
9291 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9292 if (event == NETDEV_UP)
9293 return NOTIFY_DONE;
9294
9295 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9296 mutex_lock(&router->lock);
9297 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9298 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9299 goto out;
9300
9301 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false,
9302 NULL);
9303 out:
9304 mutex_unlock(&router->lock);
9305 return notifier_from_errno(err);
9306 }
9307
mlxsw_sp_inetaddr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9308 static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9309 unsigned long event, void *ptr)
9310 {
9311 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9312 struct net_device *dev = ivi->ivi_dev->dev;
9313 struct mlxsw_sp *mlxsw_sp;
9314 struct mlxsw_sp_rif *rif;
9315 int err = 0;
9316
9317 mlxsw_sp = mlxsw_sp_lower_get(dev);
9318 if (!mlxsw_sp)
9319 return NOTIFY_DONE;
9320
9321 mutex_lock(&mlxsw_sp->router->lock);
9322 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9323 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9324 goto out;
9325
9326 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9327 ivi->extack);
9328 out:
9329 mutex_unlock(&mlxsw_sp->router->lock);
9330 return notifier_from_errno(err);
9331 }
9332
9333 struct mlxsw_sp_inet6addr_event_work {
9334 struct work_struct work;
9335 struct mlxsw_sp *mlxsw_sp;
9336 struct net_device *dev;
9337 netdevice_tracker dev_tracker;
9338 unsigned long event;
9339 };
9340
mlxsw_sp_inet6addr_event_work(struct work_struct * work)9341 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9342 {
9343 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9344 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9345 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9346 struct net_device *dev = inet6addr_work->dev;
9347 unsigned long event = inet6addr_work->event;
9348 struct mlxsw_sp_rif *rif;
9349
9350 rtnl_lock();
9351 mutex_lock(&mlxsw_sp->router->lock);
9352
9353 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9354 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9355 goto out;
9356
9357 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL);
9358 out:
9359 mutex_unlock(&mlxsw_sp->router->lock);
9360 rtnl_unlock();
9361 netdev_put(dev, &inet6addr_work->dev_tracker);
9362 kfree(inet6addr_work);
9363 }
9364
9365 /* Called with rcu_read_lock() */
mlxsw_sp_inet6addr_event(struct notifier_block * nb,unsigned long event,void * ptr)9366 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9367 unsigned long event, void *ptr)
9368 {
9369 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9370 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9371 struct net_device *dev = if6->idev->dev;
9372 struct mlxsw_sp_router *router;
9373
9374 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9375 if (event == NETDEV_UP)
9376 return NOTIFY_DONE;
9377
9378 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9379 if (!inet6addr_work)
9380 return NOTIFY_BAD;
9381
9382 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9383 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9384 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9385 inet6addr_work->dev = dev;
9386 inet6addr_work->event = event;
9387 netdev_hold(dev, &inet6addr_work->dev_tracker, GFP_ATOMIC);
9388 mlxsw_core_schedule_work(&inet6addr_work->work);
9389
9390 return NOTIFY_DONE;
9391 }
9392
mlxsw_sp_inet6addr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9393 static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9394 unsigned long event, void *ptr)
9395 {
9396 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9397 struct net_device *dev = i6vi->i6vi_dev->dev;
9398 struct mlxsw_sp *mlxsw_sp;
9399 struct mlxsw_sp_rif *rif;
9400 int err = 0;
9401
9402 mlxsw_sp = mlxsw_sp_lower_get(dev);
9403 if (!mlxsw_sp)
9404 return NOTIFY_DONE;
9405
9406 mutex_lock(&mlxsw_sp->router->lock);
9407 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9408 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9409 goto out;
9410
9411 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9412 i6vi->extack);
9413 out:
9414 mutex_unlock(&mlxsw_sp->router->lock);
9415 return notifier_from_errno(err);
9416 }
9417
mlxsw_sp_rif_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const char * mac,int mtu,u8 mac_profile)9418 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9419 const char *mac, int mtu, u8 mac_profile)
9420 {
9421 char ritr_pl[MLXSW_REG_RITR_LEN];
9422 int err;
9423
9424 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9425 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9426 if (err)
9427 return err;
9428
9429 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9430 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9431 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9432 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9433 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9434 }
9435
9436 static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9437 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9438 struct mlxsw_sp_rif *rif,
9439 struct netlink_ext_ack *extack)
9440 {
9441 struct net_device *dev = mlxsw_sp_rif_dev(rif);
9442 u8 old_mac_profile;
9443 u16 fid_index;
9444 int err;
9445
9446 fid_index = mlxsw_sp_fid_index(rif->fid);
9447
9448 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9449 if (err)
9450 return err;
9451
9452 old_mac_profile = rif->mac_profile_id;
9453 err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9454 extack);
9455 if (err)
9456 goto err_rif_mac_profile_replace;
9457
9458 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9459 dev->mtu, rif->mac_profile_id);
9460 if (err)
9461 goto err_rif_edit;
9462
9463 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9464 if (err)
9465 goto err_rif_fdb_op;
9466
9467 if (rif->mtu != dev->mtu) {
9468 struct mlxsw_sp_vr *vr;
9469 int i;
9470
9471 /* The RIF is relevant only to its mr_table instance, as unlike
9472 * unicast routing, in multicast routing a RIF cannot be shared
9473 * between several multicast routing tables.
9474 */
9475 vr = &mlxsw_sp->router->vrs[rif->vr_id];
9476 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9477 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9478 rif, dev->mtu);
9479 }
9480
9481 ether_addr_copy(rif->addr, dev->dev_addr);
9482 rif->mtu = dev->mtu;
9483
9484 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9485
9486 return 0;
9487
9488 err_rif_fdb_op:
9489 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9490 old_mac_profile);
9491 err_rif_edit:
9492 mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9493 err_rif_mac_profile_replace:
9494 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9495 return err;
9496 }
9497
mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif * rif,struct netdev_notifier_pre_changeaddr_info * info)9498 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9499 struct netdev_notifier_pre_changeaddr_info *info)
9500 {
9501 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9502 struct mlxsw_sp_rif_mac_profile *profile;
9503 struct netlink_ext_ack *extack;
9504 u8 max_rif_mac_profiles;
9505 u64 occ;
9506
9507 extack = netdev_notifier_info_to_extack(&info->info);
9508
9509 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9510 if (profile)
9511 return 0;
9512
9513 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9514 occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9515 if (occ < max_rif_mac_profiles)
9516 return 0;
9517
9518 if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9519 return 0;
9520
9521 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9522 return -ENOBUFS;
9523 }
9524
mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)9525 static bool mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp *mlxsw_sp,
9526 struct net_device *dev)
9527 {
9528 struct vlan_dev_priv *vlan;
9529
9530 if (netif_is_lag_master(dev) ||
9531 netif_is_bridge_master(dev) ||
9532 mlxsw_sp_port_dev_check(dev) ||
9533 mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev) ||
9534 netif_is_l3_master(dev))
9535 return true;
9536
9537 if (!is_vlan_dev(dev))
9538 return false;
9539
9540 vlan = vlan_dev_priv(dev);
9541 return netif_is_lag_master(vlan->real_dev) ||
9542 netif_is_bridge_master(vlan->real_dev) ||
9543 mlxsw_sp_port_dev_check(vlan->real_dev);
9544 }
9545
9546 static struct mlxsw_sp_crif *
mlxsw_sp_crif_register(struct mlxsw_sp_router * router,struct net_device * dev)9547 mlxsw_sp_crif_register(struct mlxsw_sp_router *router, struct net_device *dev)
9548 {
9549 struct mlxsw_sp_crif *crif;
9550 int err;
9551
9552 if (WARN_ON(mlxsw_sp_crif_lookup(router, dev)))
9553 return NULL;
9554
9555 crif = mlxsw_sp_crif_alloc(dev);
9556 if (!crif)
9557 return ERR_PTR(-ENOMEM);
9558
9559 err = mlxsw_sp_crif_insert(router, crif);
9560 if (err)
9561 goto err_netdev_insert;
9562
9563 return crif;
9564
9565 err_netdev_insert:
9566 mlxsw_sp_crif_free(crif);
9567 return ERR_PTR(err);
9568 }
9569
mlxsw_sp_crif_unregister(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)9570 static void mlxsw_sp_crif_unregister(struct mlxsw_sp_router *router,
9571 struct mlxsw_sp_crif *crif)
9572 {
9573 struct mlxsw_sp_nexthop *nh, *tmp;
9574
9575 mlxsw_sp_crif_remove(router, crif);
9576
9577 list_for_each_entry_safe(nh, tmp, &crif->nexthop_list, crif_list_node)
9578 mlxsw_sp_nexthop_type_fini(router->mlxsw_sp, nh);
9579
9580 if (crif->rif)
9581 crif->can_destroy = true;
9582 else
9583 mlxsw_sp_crif_free(crif);
9584 }
9585
mlxsw_sp_netdevice_register(struct mlxsw_sp_router * router,struct net_device * dev)9586 static int mlxsw_sp_netdevice_register(struct mlxsw_sp_router *router,
9587 struct net_device *dev)
9588 {
9589 struct mlxsw_sp_crif *crif;
9590
9591 if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9592 return 0;
9593
9594 crif = mlxsw_sp_crif_register(router, dev);
9595 return PTR_ERR_OR_ZERO(crif);
9596 }
9597
mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router * router,struct net_device * dev)9598 static void mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router *router,
9599 struct net_device *dev)
9600 {
9601 struct mlxsw_sp_crif *crif;
9602
9603 if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9604 return;
9605
9606 /* netdev_run_todo(), by way of netdev_wait_allrefs_any(), rebroadcasts
9607 * the NETDEV_UNREGISTER message, so we can get here twice. If that's
9608 * what happened, the netdevice state is NETREG_UNREGISTERED. In that
9609 * case, we expect to have collected the CRIF already, and warn if it
9610 * still exists. Otherwise we expect the CRIF to exist.
9611 */
9612 crif = mlxsw_sp_crif_lookup(router, dev);
9613 if (dev->reg_state == NETREG_UNREGISTERED) {
9614 if (!WARN_ON(crif))
9615 return;
9616 }
9617 if (WARN_ON(!crif))
9618 return;
9619
9620 mlxsw_sp_crif_unregister(router, crif);
9621 }
9622
mlxsw_sp_is_offload_xstats_event(unsigned long event)9623 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9624 {
9625 switch (event) {
9626 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9627 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9628 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9629 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9630 return true;
9631 }
9632
9633 return false;
9634 }
9635
9636 static int
mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif * rif,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9637 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9638 unsigned long event,
9639 struct netdev_notifier_offload_xstats_info *info)
9640 {
9641 switch (info->type) {
9642 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9643 break;
9644 default:
9645 return 0;
9646 }
9647
9648 switch (event) {
9649 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9650 return mlxsw_sp_router_port_l3_stats_enable(rif);
9651 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9652 mlxsw_sp_router_port_l3_stats_disable(rif);
9653 return 0;
9654 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9655 mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9656 return 0;
9657 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9658 return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9659 }
9660
9661 WARN_ON_ONCE(1);
9662 return 0;
9663 }
9664
9665 static int
mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9666 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9667 struct net_device *dev,
9668 unsigned long event,
9669 struct netdev_notifier_offload_xstats_info *info)
9670 {
9671 struct mlxsw_sp_rif *rif;
9672
9673 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9674 if (!rif)
9675 return 0;
9676
9677 return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9678 }
9679
mlxsw_sp_is_router_event(unsigned long event)9680 static bool mlxsw_sp_is_router_event(unsigned long event)
9681 {
9682 switch (event) {
9683 case NETDEV_PRE_CHANGEADDR:
9684 case NETDEV_CHANGEADDR:
9685 case NETDEV_CHANGEMTU:
9686 return true;
9687 default:
9688 return false;
9689 }
9690 }
9691
mlxsw_sp_netdevice_router_port_event(struct net_device * dev,unsigned long event,void * ptr)9692 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9693 unsigned long event, void *ptr)
9694 {
9695 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9696 struct mlxsw_sp *mlxsw_sp;
9697 struct mlxsw_sp_rif *rif;
9698
9699 mlxsw_sp = mlxsw_sp_lower_get(dev);
9700 if (!mlxsw_sp)
9701 return 0;
9702
9703 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9704 if (!rif)
9705 return 0;
9706
9707 switch (event) {
9708 case NETDEV_CHANGEMTU:
9709 case NETDEV_CHANGEADDR:
9710 return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9711 case NETDEV_PRE_CHANGEADDR:
9712 return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9713 default:
9714 WARN_ON_ONCE(1);
9715 break;
9716 }
9717
9718 return 0;
9719 }
9720
mlxsw_sp_port_vrf_join(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,struct netlink_ext_ack * extack)9721 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9722 struct net_device *l3_dev,
9723 struct netlink_ext_ack *extack)
9724 {
9725 struct mlxsw_sp_rif *rif;
9726
9727 /* If netdev is already associated with a RIF, then we need to
9728 * destroy it and create a new one with the new virtual router ID.
9729 */
9730 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9731 if (rif)
9732 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false,
9733 extack);
9734
9735 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false,
9736 extack);
9737 }
9738
mlxsw_sp_port_vrf_leave(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev)9739 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9740 struct net_device *l3_dev)
9741 {
9742 struct mlxsw_sp_rif *rif;
9743
9744 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9745 if (!rif)
9746 return;
9747 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL);
9748 }
9749
mlxsw_sp_is_vrf_event(unsigned long event,void * ptr)9750 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9751 {
9752 struct netdev_notifier_changeupper_info *info = ptr;
9753
9754 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9755 return false;
9756 return netif_is_l3_master(info->upper_dev);
9757 }
9758
9759 static int
mlxsw_sp_netdevice_vrf_event(struct net_device * l3_dev,unsigned long event,struct netdev_notifier_changeupper_info * info)9760 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9761 struct netdev_notifier_changeupper_info *info)
9762 {
9763 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9764 int err = 0;
9765
9766 /* We do not create a RIF for a macvlan, but only use it to
9767 * direct more MAC addresses to the router.
9768 */
9769 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9770 return 0;
9771
9772 switch (event) {
9773 case NETDEV_PRECHANGEUPPER:
9774 break;
9775 case NETDEV_CHANGEUPPER:
9776 if (info->linking) {
9777 struct netlink_ext_ack *extack;
9778
9779 extack = netdev_notifier_info_to_extack(&info->info);
9780 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9781 } else {
9782 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9783 }
9784 break;
9785 }
9786
9787 return err;
9788 }
9789
9790 struct mlxsw_sp_router_replay_inetaddr_up {
9791 struct mlxsw_sp *mlxsw_sp;
9792 struct netlink_ext_ack *extack;
9793 unsigned int done;
9794 bool deslavement;
9795 };
9796
mlxsw_sp_router_replay_inetaddr_up(struct net_device * dev,struct netdev_nested_priv * priv)9797 static int mlxsw_sp_router_replay_inetaddr_up(struct net_device *dev,
9798 struct netdev_nested_priv *priv)
9799 {
9800 struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
9801 bool nomaster = ctx->deslavement;
9802 struct mlxsw_sp_crif *crif;
9803 int err;
9804
9805 if (mlxsw_sp_dev_addr_list_empty(dev))
9806 return 0;
9807
9808 crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
9809 if (!crif || crif->rif)
9810 return 0;
9811
9812 if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
9813 return 0;
9814
9815 err = __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_UP,
9816 nomaster, ctx->extack);
9817 if (err)
9818 return err;
9819
9820 ctx->done++;
9821 return 0;
9822 }
9823
mlxsw_sp_router_unreplay_inetaddr_up(struct net_device * dev,struct netdev_nested_priv * priv)9824 static int mlxsw_sp_router_unreplay_inetaddr_up(struct net_device *dev,
9825 struct netdev_nested_priv *priv)
9826 {
9827 struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
9828 bool nomaster = ctx->deslavement;
9829 struct mlxsw_sp_crif *crif;
9830
9831 if (!ctx->done)
9832 return 0;
9833
9834 if (mlxsw_sp_dev_addr_list_empty(dev))
9835 return 0;
9836
9837 crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
9838 if (!crif || !crif->rif)
9839 return 0;
9840
9841 /* We are rolling back NETDEV_UP, so ask for that. */
9842 if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
9843 return 0;
9844
9845 __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_DOWN, nomaster,
9846 NULL);
9847
9848 ctx->done--;
9849 return 0;
9850 }
9851
mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp * mlxsw_sp,struct net_device * upper_dev,struct netlink_ext_ack * extack)9852 int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
9853 struct net_device *upper_dev,
9854 struct netlink_ext_ack *extack)
9855 {
9856 struct mlxsw_sp_router_replay_inetaddr_up ctx = {
9857 .mlxsw_sp = mlxsw_sp,
9858 .extack = extack,
9859 .deslavement = false,
9860 };
9861 struct netdev_nested_priv priv = {
9862 .data = &ctx,
9863 };
9864 int err;
9865
9866 err = mlxsw_sp_router_replay_inetaddr_up(upper_dev, &priv);
9867 if (err)
9868 return err;
9869
9870 err = netdev_walk_all_upper_dev_rcu(upper_dev,
9871 mlxsw_sp_router_replay_inetaddr_up,
9872 &priv);
9873 if (err)
9874 goto err_replay_up;
9875
9876 return 0;
9877
9878 err_replay_up:
9879 netdev_walk_all_upper_dev_rcu(upper_dev,
9880 mlxsw_sp_router_unreplay_inetaddr_up,
9881 &priv);
9882 mlxsw_sp_router_unreplay_inetaddr_up(upper_dev, &priv);
9883 return err;
9884 }
9885
mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)9886 void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
9887 struct net_device *dev)
9888 {
9889 struct mlxsw_sp_router_replay_inetaddr_up ctx = {
9890 .mlxsw_sp = mlxsw_sp,
9891 .deslavement = true,
9892 };
9893 struct netdev_nested_priv priv = {
9894 .data = &ctx,
9895 };
9896
9897 mlxsw_sp_router_replay_inetaddr_up(dev, &priv);
9898 }
9899
9900 static int
mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,struct net_device * dev,struct netlink_ext_ack * extack)9901 mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
9902 u16 vid, struct net_device *dev,
9903 struct netlink_ext_ack *extack)
9904 {
9905 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9906
9907 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9908 vid);
9909 if (WARN_ON(!mlxsw_sp_port_vlan))
9910 return -EINVAL;
9911
9912 return mlxsw_sp_port_vlan_router_join_existing(mlxsw_sp_port_vlan,
9913 dev, extack);
9914 }
9915
9916 static void
mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,struct net_device * dev)9917 mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
9918 struct net_device *dev)
9919 {
9920 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9921
9922 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
9923 vid);
9924 if (WARN_ON(!mlxsw_sp_port_vlan))
9925 return;
9926
9927 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9928 }
9929
__mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)9930 static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9931 struct net_device *lag_dev,
9932 struct netlink_ext_ack *extack)
9933 {
9934 u16 default_vid = MLXSW_SP_DEFAULT_VID;
9935 struct net_device *upper_dev;
9936 struct list_head *iter;
9937 int done = 0;
9938 u16 vid;
9939 int err;
9940
9941 err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, default_vid,
9942 lag_dev, extack);
9943 if (err)
9944 return err;
9945
9946 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9947 if (!is_vlan_dev(upper_dev))
9948 continue;
9949
9950 vid = vlan_dev_vlan_id(upper_dev);
9951 err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, vid,
9952 upper_dev, extack);
9953 if (err)
9954 goto err_router_join_dev;
9955
9956 ++done;
9957 }
9958
9959 return 0;
9960
9961 err_router_join_dev:
9962 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9963 if (!is_vlan_dev(upper_dev))
9964 continue;
9965 if (!done--)
9966 break;
9967
9968 vid = vlan_dev_vlan_id(upper_dev);
9969 mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
9970 }
9971
9972 mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
9973 return err;
9974 }
9975
9976 static void
__mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)9977 __mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9978 struct net_device *lag_dev)
9979 {
9980 u16 default_vid = MLXSW_SP_DEFAULT_VID;
9981 struct net_device *upper_dev;
9982 struct list_head *iter;
9983 u16 vid;
9984
9985 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
9986 if (!is_vlan_dev(upper_dev))
9987 continue;
9988
9989 vid = vlan_dev_vlan_id(upper_dev);
9990 mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
9991 }
9992
9993 mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
9994 }
9995
mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)9996 int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
9997 struct net_device *lag_dev,
9998 struct netlink_ext_ack *extack)
9999 {
10000 int err;
10001
10002 mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10003 err = __mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, extack);
10004 mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10005
10006 return err;
10007 }
10008
mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)10009 void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10010 struct net_device *lag_dev)
10011 {
10012 mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10013 __mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
10014 mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10015 }
10016
mlxsw_sp_router_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)10017 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
10018 unsigned long event, void *ptr)
10019 {
10020 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
10021 struct mlxsw_sp_router *router;
10022 struct mlxsw_sp *mlxsw_sp;
10023 int err = 0;
10024
10025 router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
10026 mlxsw_sp = router->mlxsw_sp;
10027
10028 mutex_lock(&mlxsw_sp->router->lock);
10029
10030 if (event == NETDEV_REGISTER) {
10031 err = mlxsw_sp_netdevice_register(router, dev);
10032 if (err)
10033 /* No need to roll this back, UNREGISTER will collect it
10034 * anyhow.
10035 */
10036 goto out;
10037 }
10038
10039 if (mlxsw_sp_is_offload_xstats_event(event))
10040 err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
10041 event, ptr);
10042 else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
10043 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
10044 event, ptr);
10045 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
10046 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
10047 event, ptr);
10048 else if (mlxsw_sp_is_router_event(event))
10049 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
10050 else if (mlxsw_sp_is_vrf_event(event, ptr))
10051 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
10052
10053 if (event == NETDEV_UNREGISTER)
10054 mlxsw_sp_netdevice_unregister(router, dev);
10055
10056 out:
10057 mutex_unlock(&mlxsw_sp->router->lock);
10058
10059 return notifier_from_errno(err);
10060 }
10061
10062 struct mlxsw_sp_macvlan_replay {
10063 struct mlxsw_sp *mlxsw_sp;
10064 struct netlink_ext_ack *extack;
10065 };
10066
mlxsw_sp_macvlan_replay_upper(struct net_device * dev,struct netdev_nested_priv * priv)10067 static int mlxsw_sp_macvlan_replay_upper(struct net_device *dev,
10068 struct netdev_nested_priv *priv)
10069 {
10070 const struct mlxsw_sp_macvlan_replay *rms = priv->data;
10071 struct netlink_ext_ack *extack = rms->extack;
10072 struct mlxsw_sp *mlxsw_sp = rms->mlxsw_sp;
10073
10074 if (!netif_is_macvlan(dev))
10075 return 0;
10076
10077 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, dev, extack);
10078 }
10079
mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10080 static int mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif *rif,
10081 struct netlink_ext_ack *extack)
10082 {
10083 struct mlxsw_sp_macvlan_replay rms = {
10084 .mlxsw_sp = rif->mlxsw_sp,
10085 .extack = extack,
10086 };
10087 struct netdev_nested_priv priv = {
10088 .data = &rms,
10089 };
10090
10091 return netdev_walk_all_upper_dev_rcu(mlxsw_sp_rif_dev(rif),
10092 mlxsw_sp_macvlan_replay_upper,
10093 &priv);
10094 }
10095
__mlxsw_sp_rif_macvlan_flush(struct net_device * dev,struct netdev_nested_priv * priv)10096 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
10097 struct netdev_nested_priv *priv)
10098 {
10099 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
10100
10101 if (!netif_is_macvlan(dev))
10102 return 0;
10103
10104 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10105 mlxsw_sp_fid_index(rif->fid), false);
10106 }
10107
mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif * rif)10108 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
10109 {
10110 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10111 struct netdev_nested_priv priv = {
10112 .data = (void *)rif,
10113 };
10114
10115 if (!netif_is_macvlan_port(dev))
10116 return 0;
10117
10118 return netdev_walk_all_upper_dev_rcu(dev,
10119 __mlxsw_sp_rif_macvlan_flush, &priv);
10120 }
10121
mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)10122 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
10123 const struct mlxsw_sp_rif_params *params)
10124 {
10125 struct mlxsw_sp_rif_subport *rif_subport;
10126
10127 rif_subport = mlxsw_sp_rif_subport_rif(rif);
10128 refcount_set(&rif_subport->ref_count, 1);
10129 rif_subport->vid = params->vid;
10130 rif_subport->lag = params->lag;
10131 if (params->lag)
10132 rif_subport->lag_id = params->lag_id;
10133 else
10134 rif_subport->system_port = params->system_port;
10135 }
10136
mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif * rif,bool enable)10137 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
10138 {
10139 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10140 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10141 struct mlxsw_sp_rif_subport *rif_subport;
10142 char ritr_pl[MLXSW_REG_RITR_LEN];
10143 u16 efid;
10144
10145 rif_subport = mlxsw_sp_rif_subport_rif(rif);
10146 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
10147 rif->rif_index, rif->vr_id, dev->mtu);
10148 mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10149 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10150 efid = mlxsw_sp_fid_index(rif->fid);
10151 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
10152 rif_subport->lag ? rif_subport->lag_id :
10153 rif_subport->system_port,
10154 efid, 0);
10155 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10156 }
10157
mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10158 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
10159 struct netlink_ext_ack *extack)
10160 {
10161 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10162 u8 mac_profile;
10163 int err;
10164
10165 err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
10166 &mac_profile, extack);
10167 if (err)
10168 return err;
10169 rif->mac_profile_id = mac_profile;
10170
10171 err = mlxsw_sp_rif_subport_op(rif, true);
10172 if (err)
10173 goto err_rif_subport_op;
10174
10175 err = mlxsw_sp_macvlan_replay(rif, extack);
10176 if (err)
10177 goto err_macvlan_replay;
10178
10179 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10180 mlxsw_sp_fid_index(rif->fid), true);
10181 if (err)
10182 goto err_rif_fdb_op;
10183
10184 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10185 if (err)
10186 goto err_fid_rif_set;
10187
10188 return 0;
10189
10190 err_fid_rif_set:
10191 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10192 mlxsw_sp_fid_index(rif->fid), false);
10193 err_rif_fdb_op:
10194 mlxsw_sp_rif_macvlan_flush(rif);
10195 err_macvlan_replay:
10196 mlxsw_sp_rif_subport_op(rif, false);
10197 err_rif_subport_op:
10198 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
10199 return err;
10200 }
10201
mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif * rif)10202 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
10203 {
10204 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10205 struct mlxsw_sp_fid *fid = rif->fid;
10206
10207 mlxsw_sp_fid_rif_unset(fid);
10208 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10209 mlxsw_sp_fid_index(fid), false);
10210 mlxsw_sp_rif_macvlan_flush(rif);
10211 mlxsw_sp_rif_subport_op(rif, false);
10212 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10213 }
10214
10215 static struct mlxsw_sp_fid *
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10216 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
10217 const struct mlxsw_sp_rif_params *params,
10218 struct netlink_ext_ack *extack)
10219 {
10220 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
10221 }
10222
10223 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
10224 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
10225 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
10226 .setup = mlxsw_sp_rif_subport_setup,
10227 .configure = mlxsw_sp_rif_subport_configure,
10228 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
10229 .fid_get = mlxsw_sp_rif_subport_fid_get,
10230 };
10231
mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif * rif,u16 fid,bool enable)10232 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
10233 {
10234 enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
10235 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10236 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10237 char ritr_pl[MLXSW_REG_RITR_LEN];
10238
10239 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
10240 dev->mtu);
10241 mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10242 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10243 mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
10244
10245 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10246 }
10247
mlxsw_sp_router_port(const struct mlxsw_sp * mlxsw_sp)10248 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
10249 {
10250 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
10251 }
10252
mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10253 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
10254 struct netlink_ext_ack *extack)
10255 {
10256 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10257 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10258 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10259 u8 mac_profile;
10260 int err;
10261
10262 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10263 &mac_profile, extack);
10264 if (err)
10265 return err;
10266 rif->mac_profile_id = mac_profile;
10267
10268 err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
10269 if (err)
10270 goto err_rif_fid_op;
10271
10272 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10273 mlxsw_sp_router_port(mlxsw_sp), true);
10274 if (err)
10275 goto err_fid_mc_flood_set;
10276
10277 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10278 mlxsw_sp_router_port(mlxsw_sp), true);
10279 if (err)
10280 goto err_fid_bc_flood_set;
10281
10282 err = mlxsw_sp_macvlan_replay(rif, extack);
10283 if (err)
10284 goto err_macvlan_replay;
10285
10286 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10287 mlxsw_sp_fid_index(rif->fid), true);
10288 if (err)
10289 goto err_rif_fdb_op;
10290
10291 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10292 if (err)
10293 goto err_fid_rif_set;
10294
10295 return 0;
10296
10297 err_fid_rif_set:
10298 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10299 mlxsw_sp_fid_index(rif->fid), false);
10300 err_rif_fdb_op:
10301 mlxsw_sp_rif_macvlan_flush(rif);
10302 err_macvlan_replay:
10303 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10304 mlxsw_sp_router_port(mlxsw_sp), false);
10305 err_fid_bc_flood_set:
10306 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10307 mlxsw_sp_router_port(mlxsw_sp), false);
10308 err_fid_mc_flood_set:
10309 mlxsw_sp_rif_fid_op(rif, fid_index, false);
10310 err_rif_fid_op:
10311 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10312 return err;
10313 }
10314
mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif * rif)10315 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
10316 {
10317 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10318 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10319 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10320 struct mlxsw_sp_fid *fid = rif->fid;
10321
10322 mlxsw_sp_fid_rif_unset(fid);
10323 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10324 mlxsw_sp_fid_index(fid), false);
10325 mlxsw_sp_rif_macvlan_flush(rif);
10326 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10327 mlxsw_sp_router_port(mlxsw_sp), false);
10328 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10329 mlxsw_sp_router_port(mlxsw_sp), false);
10330 mlxsw_sp_rif_fid_op(rif, fid_index, false);
10331 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10332 }
10333
10334 static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10335 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
10336 const struct mlxsw_sp_rif_params *params,
10337 struct netlink_ext_ack *extack)
10338 {
10339 int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
10340
10341 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif_ifindex);
10342 }
10343
mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)10344 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10345 {
10346 struct switchdev_notifier_fdb_info info = {};
10347 struct net_device *dev;
10348
10349 dev = br_fdb_find_port(mlxsw_sp_rif_dev(rif), mac, 0);
10350 if (!dev)
10351 return;
10352
10353 info.addr = mac;
10354 info.vid = 0;
10355 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10356 NULL);
10357 }
10358
10359 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
10360 .type = MLXSW_SP_RIF_TYPE_FID,
10361 .rif_size = sizeof(struct mlxsw_sp_rif),
10362 .configure = mlxsw_sp_rif_fid_configure,
10363 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
10364 .fid_get = mlxsw_sp_rif_fid_fid_get,
10365 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
10366 };
10367
10368 static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10369 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
10370 const struct mlxsw_sp_rif_params *params,
10371 struct netlink_ext_ack *extack)
10372 {
10373 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10374 struct net_device *br_dev;
10375
10376 if (WARN_ON(!params->vid))
10377 return ERR_PTR(-EINVAL);
10378
10379 if (is_vlan_dev(dev)) {
10380 br_dev = vlan_dev_real_dev(dev);
10381 if (WARN_ON(!netif_is_bridge_master(br_dev)))
10382 return ERR_PTR(-EINVAL);
10383 }
10384
10385 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid);
10386 }
10387
mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)10388 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10389 {
10390 struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
10391 struct switchdev_notifier_fdb_info info = {};
10392 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10393 struct net_device *br_dev;
10394 struct net_device *dev;
10395
10396 br_dev = is_vlan_dev(rif_dev) ? vlan_dev_real_dev(rif_dev) : rif_dev;
10397 dev = br_fdb_find_port(br_dev, mac, vid);
10398 if (!dev)
10399 return;
10400
10401 info.addr = mac;
10402 info.vid = vid;
10403 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10404 NULL);
10405 }
10406
mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif * rif,u16 vid,u16 efid,bool enable)10407 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
10408 bool enable)
10409 {
10410 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10411 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10412 char ritr_pl[MLXSW_REG_RITR_LEN];
10413
10414 mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
10415 dev->mtu, dev->dev_addr,
10416 rif->mac_profile_id, vid, efid);
10417
10418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10419 }
10420
mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif * rif,u16 efid,struct netlink_ext_ack * extack)10421 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
10422 struct netlink_ext_ack *extack)
10423 {
10424 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10425 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10426 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10427 u8 mac_profile;
10428 int err;
10429
10430 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10431 &mac_profile, extack);
10432 if (err)
10433 return err;
10434 rif->mac_profile_id = mac_profile;
10435
10436 err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
10437 if (err)
10438 goto err_rif_vlan_fid_op;
10439
10440 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10441 mlxsw_sp_router_port(mlxsw_sp), true);
10442 if (err)
10443 goto err_fid_mc_flood_set;
10444
10445 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10446 mlxsw_sp_router_port(mlxsw_sp), true);
10447 if (err)
10448 goto err_fid_bc_flood_set;
10449
10450 err = mlxsw_sp_macvlan_replay(rif, extack);
10451 if (err)
10452 goto err_macvlan_replay;
10453
10454 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10455 mlxsw_sp_fid_index(rif->fid), true);
10456 if (err)
10457 goto err_rif_fdb_op;
10458
10459 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10460 if (err)
10461 goto err_fid_rif_set;
10462
10463 return 0;
10464
10465 err_fid_rif_set:
10466 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10467 mlxsw_sp_fid_index(rif->fid), false);
10468 err_rif_fdb_op:
10469 mlxsw_sp_rif_macvlan_flush(rif);
10470 err_macvlan_replay:
10471 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10472 mlxsw_sp_router_port(mlxsw_sp), false);
10473 err_fid_bc_flood_set:
10474 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10475 mlxsw_sp_router_port(mlxsw_sp), false);
10476 err_fid_mc_flood_set:
10477 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10478 err_rif_vlan_fid_op:
10479 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10480 return err;
10481 }
10482
mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif * rif)10483 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
10484 {
10485 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10486 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10487 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10488
10489 mlxsw_sp_fid_rif_unset(rif->fid);
10490 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10491 mlxsw_sp_fid_index(rif->fid), false);
10492 mlxsw_sp_rif_macvlan_flush(rif);
10493 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10494 mlxsw_sp_router_port(mlxsw_sp), false);
10495 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10496 mlxsw_sp_router_port(mlxsw_sp), false);
10497 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10498 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10499 }
10500
mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10501 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10502 struct netlink_ext_ack *extack)
10503 {
10504 return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
10505 }
10506
10507 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
10508 .type = MLXSW_SP_RIF_TYPE_VLAN,
10509 .rif_size = sizeof(struct mlxsw_sp_rif),
10510 .configure = mlxsw_sp1_rif_vlan_configure,
10511 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
10512 .fid_get = mlxsw_sp_rif_vlan_fid_get,
10513 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
10514 };
10515
mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10516 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10517 struct netlink_ext_ack *extack)
10518 {
10519 u16 efid = mlxsw_sp_fid_index(rif->fid);
10520
10521 return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
10522 }
10523
10524 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
10525 .type = MLXSW_SP_RIF_TYPE_VLAN,
10526 .rif_size = sizeof(struct mlxsw_sp_rif),
10527 .configure = mlxsw_sp2_rif_vlan_configure,
10528 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
10529 .fid_get = mlxsw_sp_rif_vlan_fid_get,
10530 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
10531 };
10532
10533 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif * rif)10534 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
10535 {
10536 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
10537 }
10538
10539 static void
mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)10540 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
10541 const struct mlxsw_sp_rif_params *params)
10542 {
10543 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
10544 struct mlxsw_sp_rif_ipip_lb *rif_lb;
10545
10546 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
10547 common);
10548 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
10549 rif_lb->lb_config = params_lb->lb_config;
10550 }
10551
10552 static int
mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10553 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10554 struct netlink_ext_ack *extack)
10555 {
10556 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10557 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10558 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10559 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10560 struct mlxsw_sp_vr *ul_vr;
10561 int err;
10562
10563 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
10564 if (IS_ERR(ul_vr))
10565 return PTR_ERR(ul_vr);
10566
10567 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
10568 if (err)
10569 goto err_loopback_op;
10570
10571 lb_rif->ul_vr_id = ul_vr->id;
10572 lb_rif->ul_rif_id = 0;
10573 ++ul_vr->rif_count;
10574 return 0;
10575
10576 err_loopback_op:
10577 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10578 return err;
10579 }
10580
mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)10581 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10582 {
10583 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10584 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10585 struct mlxsw_sp_vr *ul_vr;
10586
10587 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
10588 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
10589
10590 --ul_vr->rif_count;
10591 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10592 }
10593
10594 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
10595 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10596 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10597 .setup = mlxsw_sp_rif_ipip_lb_setup,
10598 .configure = mlxsw_sp1_rif_ipip_lb_configure,
10599 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
10600 };
10601
10602 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
10603 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10604 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
10605 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10606 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
10607 };
10608
10609 static int
mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif * ul_rif,bool enable)10610 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
10611 {
10612 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10613 char ritr_pl[MLXSW_REG_RITR_LEN];
10614
10615 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
10616 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
10617 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
10618 MLXSW_REG_RITR_LOOPBACK_GENERIC);
10619
10620 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10621 }
10622
10623 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,struct mlxsw_sp_crif * ul_crif,struct netlink_ext_ack * extack)10624 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
10625 struct mlxsw_sp_crif *ul_crif,
10626 struct netlink_ext_ack *extack)
10627 {
10628 struct mlxsw_sp_rif *ul_rif;
10629 u8 rif_entries = 1;
10630 u16 rif_index;
10631 int err;
10632
10633 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
10634 if (err) {
10635 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
10636 return ERR_PTR(err);
10637 }
10638
10639 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id,
10640 ul_crif);
10641 if (!ul_rif) {
10642 err = -ENOMEM;
10643 goto err_rif_alloc;
10644 }
10645
10646 mlxsw_sp->router->rifs[rif_index] = ul_rif;
10647 ul_rif->mlxsw_sp = mlxsw_sp;
10648 ul_rif->rif_entries = rif_entries;
10649 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
10650 if (err)
10651 goto ul_rif_op_err;
10652
10653 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
10654 return ul_rif;
10655
10656 ul_rif_op_err:
10657 mlxsw_sp->router->rifs[rif_index] = NULL;
10658 mlxsw_sp_rif_free(ul_rif);
10659 err_rif_alloc:
10660 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10661 return ERR_PTR(err);
10662 }
10663
mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif * ul_rif)10664 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
10665 {
10666 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10667 u8 rif_entries = ul_rif->rif_entries;
10668 u16 rif_index = ul_rif->rif_index;
10669
10670 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
10671 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
10672 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
10673 mlxsw_sp_rif_free(ul_rif);
10674 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10675 }
10676
10677 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct mlxsw_sp_crif * ul_crif,struct netlink_ext_ack * extack)10678 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10679 struct mlxsw_sp_crif *ul_crif,
10680 struct netlink_ext_ack *extack)
10681 {
10682 struct mlxsw_sp_vr *vr;
10683 int err;
10684
10685 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10686 if (IS_ERR(vr))
10687 return ERR_CAST(vr);
10688
10689 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10690 return vr->ul_rif;
10691
10692 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, ul_crif, extack);
10693 if (IS_ERR(vr->ul_rif)) {
10694 err = PTR_ERR(vr->ul_rif);
10695 goto err_ul_rif_create;
10696 }
10697
10698 vr->rif_count++;
10699 refcount_set(&vr->ul_rif_refcnt, 1);
10700
10701 return vr->ul_rif;
10702
10703 err_ul_rif_create:
10704 mlxsw_sp_vr_put(mlxsw_sp, vr);
10705 return ERR_PTR(err);
10706 }
10707
mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif * ul_rif)10708 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10709 {
10710 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10711 struct mlxsw_sp_vr *vr;
10712
10713 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10714
10715 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10716 return;
10717
10718 vr->rif_count--;
10719 mlxsw_sp_ul_rif_destroy(ul_rif);
10720 mlxsw_sp_vr_put(mlxsw_sp, vr);
10721 }
10722
mlxsw_sp_router_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,u16 * ul_rif_index)10723 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10724 u16 *ul_rif_index)
10725 {
10726 struct mlxsw_sp_rif *ul_rif;
10727 int err = 0;
10728
10729 mutex_lock(&mlxsw_sp->router->lock);
10730 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, NULL);
10731 if (IS_ERR(ul_rif)) {
10732 err = PTR_ERR(ul_rif);
10733 goto out;
10734 }
10735 *ul_rif_index = ul_rif->rif_index;
10736 out:
10737 mutex_unlock(&mlxsw_sp->router->lock);
10738 return err;
10739 }
10740
mlxsw_sp_router_ul_rif_put(struct mlxsw_sp * mlxsw_sp,u16 ul_rif_index)10741 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
10742 {
10743 struct mlxsw_sp_rif *ul_rif;
10744
10745 mutex_lock(&mlxsw_sp->router->lock);
10746 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
10747 if (WARN_ON(!ul_rif))
10748 goto out;
10749
10750 mlxsw_sp_ul_rif_put(ul_rif);
10751 out:
10752 mutex_unlock(&mlxsw_sp->router->lock);
10753 }
10754
10755 static int
mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10756 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10757 struct netlink_ext_ack *extack)
10758 {
10759 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10760 struct net_device *dev = mlxsw_sp_rif_dev(rif);
10761 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10762 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10763 struct mlxsw_sp_rif *ul_rif;
10764 int err;
10765
10766 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, extack);
10767 if (IS_ERR(ul_rif))
10768 return PTR_ERR(ul_rif);
10769
10770 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10771 if (err)
10772 goto err_loopback_op;
10773
10774 lb_rif->ul_vr_id = 0;
10775 lb_rif->ul_rif_id = ul_rif->rif_index;
10776
10777 return 0;
10778
10779 err_loopback_op:
10780 mlxsw_sp_ul_rif_put(ul_rif);
10781 return err;
10782 }
10783
mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)10784 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10785 {
10786 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10787 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10788 struct mlxsw_sp_rif *ul_rif;
10789
10790 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
10791 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
10792 mlxsw_sp_ul_rif_put(ul_rif);
10793 }
10794
10795 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
10796 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10797 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10798 .setup = mlxsw_sp_rif_ipip_lb_setup,
10799 .configure = mlxsw_sp2_rif_ipip_lb_configure,
10800 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
10801 };
10802
10803 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
10804 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10805 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
10806 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10807 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
10808 };
10809
mlxsw_sp_rifs_table_init(struct mlxsw_sp * mlxsw_sp)10810 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
10811 {
10812 struct gen_pool *rifs_table;
10813 int err;
10814
10815 rifs_table = gen_pool_create(0, -1);
10816 if (!rifs_table)
10817 return -ENOMEM;
10818
10819 gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
10820 NULL);
10821
10822 err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
10823 MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
10824 if (err)
10825 goto err_gen_pool_add;
10826
10827 mlxsw_sp->router->rifs_table = rifs_table;
10828
10829 return 0;
10830
10831 err_gen_pool_add:
10832 gen_pool_destroy(rifs_table);
10833 return err;
10834 }
10835
mlxsw_sp_rifs_table_fini(struct mlxsw_sp * mlxsw_sp)10836 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
10837 {
10838 gen_pool_destroy(mlxsw_sp->router->rifs_table);
10839 }
10840
mlxsw_sp_rifs_init(struct mlxsw_sp * mlxsw_sp)10841 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10842 {
10843 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10844 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10845 struct mlxsw_core *core = mlxsw_sp->core;
10846 int err;
10847
10848 if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10849 return -EIO;
10850 mlxsw_sp->router->max_rif_mac_profile =
10851 MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10852
10853 mlxsw_sp->router->rifs = kcalloc(max_rifs,
10854 sizeof(struct mlxsw_sp_rif *),
10855 GFP_KERNEL);
10856 if (!mlxsw_sp->router->rifs)
10857 return -ENOMEM;
10858
10859 err = mlxsw_sp_rifs_table_init(mlxsw_sp);
10860 if (err)
10861 goto err_rifs_table_init;
10862
10863 idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10864 atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10865 atomic_set(&mlxsw_sp->router->rifs_count, 0);
10866 devl_resource_occ_get_register(devlink,
10867 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10868 mlxsw_sp_rif_mac_profiles_occ_get,
10869 mlxsw_sp);
10870 devl_resource_occ_get_register(devlink,
10871 MLXSW_SP_RESOURCE_RIFS,
10872 mlxsw_sp_rifs_occ_get,
10873 mlxsw_sp);
10874
10875 return 0;
10876
10877 err_rifs_table_init:
10878 kfree(mlxsw_sp->router->rifs);
10879 return err;
10880 }
10881
mlxsw_sp_rifs_fini(struct mlxsw_sp * mlxsw_sp)10882 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10883 {
10884 int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10885 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10886 int i;
10887
10888 WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
10889 for (i = 0; i < max_rifs; i++)
10890 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10891
10892 devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
10893 devl_resource_occ_get_unregister(devlink,
10894 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10895 WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10896 idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10897 mlxsw_sp_rifs_table_fini(mlxsw_sp);
10898 kfree(mlxsw_sp->router->rifs);
10899 }
10900
10901 static int
mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp * mlxsw_sp)10902 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10903 {
10904 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10905
10906 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10907 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10908 }
10909
mlxsw_sp_ipips_init(struct mlxsw_sp * mlxsw_sp)10910 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10911 {
10912 int err;
10913
10914 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10915
10916 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10917 if (err)
10918 return err;
10919 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10920 if (err)
10921 return err;
10922
10923 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10924 }
10925
mlxsw_sp1_ipips_init(struct mlxsw_sp * mlxsw_sp)10926 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10927 {
10928 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10929 return mlxsw_sp_ipips_init(mlxsw_sp);
10930 }
10931
mlxsw_sp2_ipips_init(struct mlxsw_sp * mlxsw_sp)10932 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10933 {
10934 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10935 return mlxsw_sp_ipips_init(mlxsw_sp);
10936 }
10937
mlxsw_sp_ipips_fini(struct mlxsw_sp * mlxsw_sp)10938 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10939 {
10940 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10941 }
10942
mlxsw_sp_router_fib_dump_flush(struct notifier_block * nb)10943 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10944 {
10945 struct mlxsw_sp_router *router;
10946
10947 /* Flush pending FIB notifications and then flush the device's
10948 * table before requesting another dump. The FIB notification
10949 * block is unregistered, so no need to take RTNL.
10950 */
10951 mlxsw_core_flush_owq();
10952 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10953 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10954 }
10955
10956 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10957 struct mlxsw_sp_mp_hash_config {
10958 DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10959 DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10960 DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10961 DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10962 bool inc_parsing_depth;
10963 };
10964
10965 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10966 bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10967
10968 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10969 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10970
10971 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10972 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10973
mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config * config)10974 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10975 {
10976 unsigned long *inner_headers = config->inner_headers;
10977 unsigned long *inner_fields = config->inner_fields;
10978
10979 /* IPv4 inner */
10980 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10981 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10982 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10983 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10984 /* IPv6 inner */
10985 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10986 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10987 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10988 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10989 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10990 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10991 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10992 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10993 }
10994
mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)10995 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10996 {
10997 unsigned long *headers = config->headers;
10998 unsigned long *fields = config->fields;
10999
11000 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11001 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11002 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11003 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11004 }
11005
11006 static void
mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config * config,u32 hash_fields)11007 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
11008 u32 hash_fields)
11009 {
11010 unsigned long *inner_headers = config->inner_headers;
11011 unsigned long *inner_fields = config->inner_fields;
11012
11013 /* IPv4 Inner */
11014 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
11015 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
11016 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
11017 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
11018 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
11019 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
11020 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11021 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
11022 /* IPv6 inner */
11023 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
11024 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11025 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
11026 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11027 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11028 }
11029 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
11030 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11031 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11032 }
11033 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11034 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11035 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
11036 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11037 /* L4 inner */
11038 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
11039 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
11040 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
11041 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
11042 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
11043 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
11044 }
11045
mlxsw_sp_mp4_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)11046 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
11047 struct mlxsw_sp_mp_hash_config *config)
11048 {
11049 struct net *net = mlxsw_sp_net(mlxsw_sp);
11050 unsigned long *headers = config->headers;
11051 unsigned long *fields = config->fields;
11052 u32 hash_fields;
11053
11054 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
11055 case 0:
11056 mlxsw_sp_mp4_hash_outer_addr(config);
11057 break;
11058 case 1:
11059 mlxsw_sp_mp4_hash_outer_addr(config);
11060 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11061 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11062 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11063 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11064 break;
11065 case 2:
11066 /* Outer */
11067 mlxsw_sp_mp4_hash_outer_addr(config);
11068 /* Inner */
11069 mlxsw_sp_mp_hash_inner_l3(config);
11070 break;
11071 case 3:
11072 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
11073 /* Outer */
11074 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11075 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11076 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11077 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
11078 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11079 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
11080 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11081 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11082 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11083 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11084 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11085 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11086 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11087 /* Inner */
11088 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11089 break;
11090 }
11091 }
11092
mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)11093 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11094 {
11095 unsigned long *headers = config->headers;
11096 unsigned long *fields = config->fields;
11097
11098 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11099 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11100 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11101 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11102 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11103 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11104 }
11105
mlxsw_sp_mp6_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)11106 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
11107 struct mlxsw_sp_mp_hash_config *config)
11108 {
11109 u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
11110 unsigned long *headers = config->headers;
11111 unsigned long *fields = config->fields;
11112
11113 switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
11114 case 0:
11115 mlxsw_sp_mp6_hash_outer_addr(config);
11116 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11117 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11118 break;
11119 case 1:
11120 mlxsw_sp_mp6_hash_outer_addr(config);
11121 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11122 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11123 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11124 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11125 break;
11126 case 2:
11127 /* Outer */
11128 mlxsw_sp_mp6_hash_outer_addr(config);
11129 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11130 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11131 /* Inner */
11132 mlxsw_sp_mp_hash_inner_l3(config);
11133 config->inc_parsing_depth = true;
11134 break;
11135 case 3:
11136 /* Outer */
11137 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11138 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11139 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11140 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
11141 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11142 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11143 }
11144 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
11145 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11146 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11147 }
11148 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11149 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11150 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
11151 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11152 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11153 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11154 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11155 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11156 /* Inner */
11157 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11158 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
11159 config->inc_parsing_depth = true;
11160 break;
11161 }
11162 }
11163
mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp * mlxsw_sp,bool old_inc_parsing_depth,bool new_inc_parsing_depth)11164 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
11165 bool old_inc_parsing_depth,
11166 bool new_inc_parsing_depth)
11167 {
11168 int err;
11169
11170 if (!old_inc_parsing_depth && new_inc_parsing_depth) {
11171 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
11172 if (err)
11173 return err;
11174 mlxsw_sp->router->inc_parsing_depth = true;
11175 } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
11176 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
11177 mlxsw_sp->router->inc_parsing_depth = false;
11178 }
11179
11180 return 0;
11181 }
11182
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)11183 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11184 {
11185 bool old_inc_parsing_depth, new_inc_parsing_depth;
11186 struct mlxsw_sp_mp_hash_config config = {};
11187 char recr2_pl[MLXSW_REG_RECR2_LEN];
11188 unsigned long bit;
11189 u32 seed;
11190 int err;
11191
11192 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
11193 mlxsw_reg_recr2_pack(recr2_pl, seed);
11194 mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
11195 mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
11196
11197 old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11198 new_inc_parsing_depth = config.inc_parsing_depth;
11199 err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
11200 old_inc_parsing_depth,
11201 new_inc_parsing_depth);
11202 if (err)
11203 return err;
11204
11205 for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
11206 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
11207 for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
11208 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
11209 for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
11210 mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
11211 for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
11212 mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
11213
11214 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
11215 if (err)
11216 goto err_reg_write;
11217
11218 return 0;
11219
11220 err_reg_write:
11221 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
11222 old_inc_parsing_depth);
11223 return err;
11224 }
11225
mlxsw_sp_mp_hash_fini(struct mlxsw_sp * mlxsw_sp)11226 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11227 {
11228 bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11229
11230 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
11231 false);
11232 }
11233 #else
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)11234 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11235 {
11236 return 0;
11237 }
11238
mlxsw_sp_mp_hash_fini(struct mlxsw_sp * mlxsw_sp)11239 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11240 {
11241 }
11242 #endif
11243
mlxsw_sp_dscp_init(struct mlxsw_sp * mlxsw_sp)11244 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
11245 {
11246 char rdpm_pl[MLXSW_REG_RDPM_LEN];
11247 unsigned int i;
11248
11249 MLXSW_REG_ZERO(rdpm, rdpm_pl);
11250
11251 /* HW is determining switch priority based on DSCP-bits, but the
11252 * kernel is still doing that based on the ToS. Since there's a
11253 * mismatch in bits we need to make sure to translate the right
11254 * value ToS would observe, skipping the 2 least-significant ECN bits.
11255 */
11256 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
11257 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
11258
11259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
11260 }
11261
__mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp)11262 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
11263 {
11264 struct net *net = mlxsw_sp_net(mlxsw_sp);
11265 char rgcr_pl[MLXSW_REG_RGCR_LEN];
11266 u64 max_rifs;
11267 bool usp;
11268
11269 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
11270 return -EIO;
11271 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11272 usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
11273
11274 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
11275 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
11276 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
11277 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11278 }
11279
__mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)11280 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11281 {
11282 char rgcr_pl[MLXSW_REG_RGCR_LEN];
11283
11284 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
11285 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11286 }
11287
mlxsw_sp_lb_rif_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)11288 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
11289 struct netlink_ext_ack *extack)
11290 {
11291 struct mlxsw_sp_router *router = mlxsw_sp->router;
11292 struct mlxsw_sp_rif *lb_rif;
11293 int err;
11294
11295 router->lb_crif = mlxsw_sp_crif_alloc(NULL);
11296 if (!router->lb_crif)
11297 return -ENOMEM;
11298
11299 /* Create a generic loopback RIF associated with the main table
11300 * (default VRF). Any table can be used, but the main table exists
11301 * anyway, so we do not waste resources. Loopback RIFs are usually
11302 * created with a NULL CRIF, but this RIF is used as a fallback RIF
11303 * for blackhole nexthops, and nexthops expect to have a valid CRIF.
11304 */
11305 lb_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, router->lb_crif,
11306 extack);
11307 if (IS_ERR(lb_rif)) {
11308 err = PTR_ERR(lb_rif);
11309 goto err_ul_rif_get;
11310 }
11311
11312 return 0;
11313
11314 err_ul_rif_get:
11315 mlxsw_sp_crif_free(router->lb_crif);
11316 return err;
11317 }
11318
mlxsw_sp_lb_rif_fini(struct mlxsw_sp * mlxsw_sp)11319 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
11320 {
11321 mlxsw_sp_ul_rif_put(mlxsw_sp->router->lb_crif->rif);
11322 mlxsw_sp_crif_free(mlxsw_sp->router->lb_crif);
11323 }
11324
mlxsw_sp1_router_init(struct mlxsw_sp * mlxsw_sp)11325 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
11326 {
11327 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
11328
11329 mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
11330 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
11331 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11332
11333 return 0;
11334 }
11335
11336 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
11337 .init = mlxsw_sp1_router_init,
11338 .ipips_init = mlxsw_sp1_ipips_init,
11339 };
11340
mlxsw_sp2_router_init(struct mlxsw_sp * mlxsw_sp)11341 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
11342 {
11343 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
11344
11345 mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
11346 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
11347 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11348
11349 return 0;
11350 }
11351
11352 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
11353 .init = mlxsw_sp2_router_init,
11354 .ipips_init = mlxsw_sp2_ipips_init,
11355 };
11356
mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)11357 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
11358 struct netlink_ext_ack *extack)
11359 {
11360 struct mlxsw_sp_router *router;
11361 struct notifier_block *nb;
11362 int err;
11363
11364 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
11365 if (!router)
11366 return -ENOMEM;
11367 mutex_init(&router->lock);
11368 mlxsw_sp->router = router;
11369 router->mlxsw_sp = mlxsw_sp;
11370
11371 err = mlxsw_sp->router_ops->init(mlxsw_sp);
11372 if (err)
11373 goto err_router_ops_init;
11374
11375 INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
11376 INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
11377 mlxsw_sp_nh_grp_activity_work);
11378 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
11379 err = __mlxsw_sp_router_init(mlxsw_sp);
11380 if (err)
11381 goto err_router_init;
11382
11383 err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
11384 if (err)
11385 goto err_ipips_init;
11386
11387 err = rhashtable_init(&mlxsw_sp->router->crif_ht,
11388 &mlxsw_sp_crif_ht_params);
11389 if (err)
11390 goto err_crif_ht_init;
11391
11392 err = mlxsw_sp_rifs_init(mlxsw_sp);
11393 if (err)
11394 goto err_rifs_init;
11395
11396 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
11397 &mlxsw_sp_nexthop_ht_params);
11398 if (err)
11399 goto err_nexthop_ht_init;
11400
11401 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
11402 &mlxsw_sp_nexthop_group_ht_params);
11403 if (err)
11404 goto err_nexthop_group_ht_init;
11405
11406 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
11407 err = mlxsw_sp_lpm_init(mlxsw_sp);
11408 if (err)
11409 goto err_lpm_init;
11410
11411 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
11412 if (err)
11413 goto err_mr_init;
11414
11415 err = mlxsw_sp_vrs_init(mlxsw_sp);
11416 if (err)
11417 goto err_vrs_init;
11418
11419 err = mlxsw_sp_lb_rif_init(mlxsw_sp, extack);
11420 if (err)
11421 goto err_lb_rif_init;
11422
11423 err = mlxsw_sp_neigh_init(mlxsw_sp);
11424 if (err)
11425 goto err_neigh_init;
11426
11427 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
11428 if (err)
11429 goto err_mp_hash_init;
11430
11431 err = mlxsw_sp_dscp_init(mlxsw_sp);
11432 if (err)
11433 goto err_dscp_init;
11434
11435 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
11436 err = register_inetaddr_notifier(&router->inetaddr_nb);
11437 if (err)
11438 goto err_register_inetaddr_notifier;
11439
11440 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
11441 err = register_inet6addr_notifier(&router->inet6addr_nb);
11442 if (err)
11443 goto err_register_inet6addr_notifier;
11444
11445 router->inetaddr_valid_nb.notifier_call = mlxsw_sp_inetaddr_valid_event;
11446 err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11447 if (err)
11448 goto err_register_inetaddr_valid_notifier;
11449
11450 nb = &router->inet6addr_valid_nb;
11451 nb->notifier_call = mlxsw_sp_inet6addr_valid_event;
11452 err = register_inet6addr_validator_notifier(nb);
11453 if (err)
11454 goto err_register_inet6addr_valid_notifier;
11455
11456 mlxsw_sp->router->netevent_nb.notifier_call =
11457 mlxsw_sp_router_netevent_event;
11458 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11459 if (err)
11460 goto err_register_netevent_notifier;
11461
11462 mlxsw_sp->router->netdevice_nb.notifier_call =
11463 mlxsw_sp_router_netdevice_event;
11464 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11465 &mlxsw_sp->router->netdevice_nb);
11466 if (err)
11467 goto err_register_netdev_notifier;
11468
11469 mlxsw_sp->router->nexthop_nb.notifier_call =
11470 mlxsw_sp_nexthop_obj_event;
11471 err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11472 &mlxsw_sp->router->nexthop_nb,
11473 extack);
11474 if (err)
11475 goto err_register_nexthop_notifier;
11476
11477 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
11478 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
11479 &mlxsw_sp->router->fib_nb,
11480 mlxsw_sp_router_fib_dump_flush, extack);
11481 if (err)
11482 goto err_register_fib_notifier;
11483
11484 return 0;
11485
11486 err_register_fib_notifier:
11487 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11488 &mlxsw_sp->router->nexthop_nb);
11489 err_register_nexthop_notifier:
11490 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11491 &router->netdevice_nb);
11492 err_register_netdev_notifier:
11493 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11494 err_register_netevent_notifier:
11495 unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11496 err_register_inet6addr_valid_notifier:
11497 unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11498 err_register_inetaddr_valid_notifier:
11499 unregister_inet6addr_notifier(&router->inet6addr_nb);
11500 err_register_inet6addr_notifier:
11501 unregister_inetaddr_notifier(&router->inetaddr_nb);
11502 err_register_inetaddr_notifier:
11503 mlxsw_core_flush_owq();
11504 err_dscp_init:
11505 mlxsw_sp_mp_hash_fini(mlxsw_sp);
11506 err_mp_hash_init:
11507 mlxsw_sp_neigh_fini(mlxsw_sp);
11508 err_neigh_init:
11509 mlxsw_sp_lb_rif_fini(mlxsw_sp);
11510 err_lb_rif_init:
11511 mlxsw_sp_vrs_fini(mlxsw_sp);
11512 err_vrs_init:
11513 mlxsw_sp_mr_fini(mlxsw_sp);
11514 err_mr_init:
11515 mlxsw_sp_lpm_fini(mlxsw_sp);
11516 err_lpm_init:
11517 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
11518 err_nexthop_group_ht_init:
11519 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
11520 err_nexthop_ht_init:
11521 mlxsw_sp_rifs_fini(mlxsw_sp);
11522 err_rifs_init:
11523 rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11524 err_crif_ht_init:
11525 mlxsw_sp_ipips_fini(mlxsw_sp);
11526 err_ipips_init:
11527 __mlxsw_sp_router_fini(mlxsw_sp);
11528 err_router_init:
11529 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
11530 err_router_ops_init:
11531 mutex_destroy(&mlxsw_sp->router->lock);
11532 kfree(mlxsw_sp->router);
11533 return err;
11534 }
11535
mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)11536 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11537 {
11538 struct mlxsw_sp_router *router = mlxsw_sp->router;
11539
11540 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
11541 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11542 &router->nexthop_nb);
11543 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11544 &router->netdevice_nb);
11545 unregister_netevent_notifier(&router->netevent_nb);
11546 unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11547 unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11548 unregister_inet6addr_notifier(&router->inet6addr_nb);
11549 unregister_inetaddr_notifier(&router->inetaddr_nb);
11550 mlxsw_core_flush_owq();
11551 mlxsw_sp_mp_hash_fini(mlxsw_sp);
11552 mlxsw_sp_neigh_fini(mlxsw_sp);
11553 mlxsw_sp_lb_rif_fini(mlxsw_sp);
11554 mlxsw_sp_vrs_fini(mlxsw_sp);
11555 mlxsw_sp_mr_fini(mlxsw_sp);
11556 mlxsw_sp_lpm_fini(mlxsw_sp);
11557 rhashtable_destroy(&router->nexthop_group_ht);
11558 rhashtable_destroy(&router->nexthop_ht);
11559 mlxsw_sp_rifs_fini(mlxsw_sp);
11560 rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11561 mlxsw_sp_ipips_fini(mlxsw_sp);
11562 __mlxsw_sp_router_fini(mlxsw_sp);
11563 cancel_delayed_work_sync(&router->nh_grp_activity_dw);
11564 mutex_destroy(&router->lock);
11565 kfree(router);
11566 }
11567