1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/rhashtable.h> 7 #include <linux/bitops.h> 8 #include <linux/in6.h> 9 #include <linux/notifier.h> 10 #include <linux/inetdevice.h> 11 #include <linux/netdevice.h> 12 #include <linux/if_bridge.h> 13 #include <linux/socket.h> 14 #include <linux/route.h> 15 #include <linux/gcd.h> 16 #include <linux/if_macvlan.h> 17 #include <linux/refcount.h> 18 #include <linux/jhash.h> 19 #include <linux/net_namespace.h> 20 #include <net/netevent.h> 21 #include <net/neighbour.h> 22 #include <net/arp.h> 23 #include <net/ip_fib.h> 24 #include <net/ip6_fib.h> 25 #include <net/nexthop.h> 26 #include <net/fib_rules.h> 27 #include <net/ip_tunnels.h> 28 #include <net/l3mdev.h> 29 #include <net/addrconf.h> 30 #include <net/ndisc.h> 31 #include <net/ipv6.h> 32 #include <net/fib_notifier.h> 33 #include <net/switchdev.h> 34 35 #include "spectrum.h" 36 #include "core.h" 37 #include "reg.h" 38 #include "spectrum_cnt.h" 39 #include "spectrum_dpipe.h" 40 #include "spectrum_ipip.h" 41 #include "spectrum_mr.h" 42 #include "spectrum_mr_tcam.h" 43 #include "spectrum_router.h" 44 #include "spectrum_span.h" 45 46 struct mlxsw_sp_fib; 47 struct mlxsw_sp_vr; 48 struct mlxsw_sp_lpm_tree; 49 struct mlxsw_sp_rif_ops; 50 51 struct mlxsw_sp_router { 52 struct mlxsw_sp *mlxsw_sp; 53 struct mlxsw_sp_rif **rifs; 54 struct mlxsw_sp_vr *vrs; 55 struct rhashtable neigh_ht; 56 struct rhashtable nexthop_group_ht; 57 struct rhashtable nexthop_ht; 58 struct list_head nexthop_list; 59 struct { 60 /* One tree for each protocol: IPv4 and IPv6 */ 61 struct mlxsw_sp_lpm_tree *proto_trees[2]; 62 struct mlxsw_sp_lpm_tree *trees; 63 unsigned int tree_count; 64 } lpm; 65 struct { 66 struct delayed_work dw; 67 unsigned long interval; /* ms */ 68 } neighs_update; 69 struct delayed_work nexthop_probe_dw; 70 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ 71 struct list_head nexthop_neighs_list; 72 struct list_head ipip_list; 73 bool aborted; 74 struct notifier_block fib_nb; 75 struct notifier_block netevent_nb; 76 struct notifier_block inetaddr_nb; 77 struct notifier_block inet6addr_nb; 78 const struct mlxsw_sp_rif_ops **rif_ops_arr; 79 const struct mlxsw_sp_ipip_ops **ipip_ops_arr; 80 u32 adj_discard_index; 81 bool adj_discard_index_valid; 82 }; 83 84 struct mlxsw_sp_rif { 85 struct list_head nexthop_list; 86 struct list_head neigh_list; 87 struct net_device *dev; /* NULL for underlay RIF */ 88 struct mlxsw_sp_fid *fid; 89 unsigned char addr[ETH_ALEN]; 90 int mtu; 91 u16 rif_index; 92 u16 vr_id; 93 const struct mlxsw_sp_rif_ops *ops; 94 struct mlxsw_sp *mlxsw_sp; 95 96 unsigned int counter_ingress; 97 bool counter_ingress_valid; 98 unsigned int counter_egress; 99 bool counter_egress_valid; 100 }; 101 102 struct mlxsw_sp_rif_params { 103 struct net_device *dev; 104 union { 105 u16 system_port; 106 u16 lag_id; 107 }; 108 u16 vid; 109 bool lag; 110 }; 111 112 struct mlxsw_sp_rif_subport { 113 struct mlxsw_sp_rif common; 114 refcount_t ref_count; 115 union { 116 u16 system_port; 117 u16 lag_id; 118 }; 119 u16 vid; 120 bool lag; 121 }; 122 123 struct mlxsw_sp_rif_ipip_lb { 124 struct mlxsw_sp_rif common; 125 struct mlxsw_sp_rif_ipip_lb_config lb_config; 126 u16 ul_vr_id; /* Reserved for Spectrum-2. */ 127 u16 ul_rif_id; /* Reserved for Spectrum. */ 128 }; 129 130 struct mlxsw_sp_rif_params_ipip_lb { 131 struct mlxsw_sp_rif_params common; 132 struct mlxsw_sp_rif_ipip_lb_config lb_config; 133 }; 134 135 struct mlxsw_sp_rif_ops { 136 enum mlxsw_sp_rif_type type; 137 size_t rif_size; 138 139 void (*setup)(struct mlxsw_sp_rif *rif, 140 const struct mlxsw_sp_rif_params *params); 141 int (*configure)(struct mlxsw_sp_rif *rif); 142 void (*deconfigure)(struct mlxsw_sp_rif *rif); 143 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, 144 struct netlink_ext_ack *extack); 145 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); 146 }; 147 148 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); 149 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); 150 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, 151 struct mlxsw_sp_lpm_tree *lpm_tree); 152 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, 153 const struct mlxsw_sp_fib *fib, 154 u8 tree_id); 155 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, 156 const struct mlxsw_sp_fib *fib); 157 158 static unsigned int * 159 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif, 160 enum mlxsw_sp_rif_counter_dir dir) 161 { 162 switch (dir) { 163 case MLXSW_SP_RIF_COUNTER_EGRESS: 164 return &rif->counter_egress; 165 case MLXSW_SP_RIF_COUNTER_INGRESS: 166 return &rif->counter_ingress; 167 } 168 return NULL; 169 } 170 171 static bool 172 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif, 173 enum mlxsw_sp_rif_counter_dir dir) 174 { 175 switch (dir) { 176 case MLXSW_SP_RIF_COUNTER_EGRESS: 177 return rif->counter_egress_valid; 178 case MLXSW_SP_RIF_COUNTER_INGRESS: 179 return rif->counter_ingress_valid; 180 } 181 return false; 182 } 183 184 static void 185 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif, 186 enum mlxsw_sp_rif_counter_dir dir, 187 bool valid) 188 { 189 switch (dir) { 190 case MLXSW_SP_RIF_COUNTER_EGRESS: 191 rif->counter_egress_valid = valid; 192 break; 193 case MLXSW_SP_RIF_COUNTER_INGRESS: 194 rif->counter_ingress_valid = valid; 195 break; 196 } 197 } 198 199 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, 200 unsigned int counter_index, bool enable, 201 enum mlxsw_sp_rif_counter_dir dir) 202 { 203 char ritr_pl[MLXSW_REG_RITR_LEN]; 204 bool is_egress = false; 205 int err; 206 207 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS) 208 is_egress = true; 209 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); 210 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 211 if (err) 212 return err; 213 214 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable, 215 is_egress); 216 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 217 } 218 219 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, 220 struct mlxsw_sp_rif *rif, 221 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt) 222 { 223 char ricnt_pl[MLXSW_REG_RICNT_LEN]; 224 unsigned int *p_counter_index; 225 bool valid; 226 int err; 227 228 valid = mlxsw_sp_rif_counter_valid_get(rif, dir); 229 if (!valid) 230 return -EINVAL; 231 232 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 233 if (!p_counter_index) 234 return -EINVAL; 235 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index, 236 MLXSW_REG_RICNT_OPCODE_NOP); 237 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); 238 if (err) 239 return err; 240 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl); 241 return 0; 242 } 243 244 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp, 245 unsigned int counter_index) 246 { 247 char ricnt_pl[MLXSW_REG_RICNT_LEN]; 248 249 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index, 250 MLXSW_REG_RICNT_OPCODE_CLEAR); 251 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); 252 } 253 254 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp, 255 struct mlxsw_sp_rif *rif, 256 enum mlxsw_sp_rif_counter_dir dir) 257 { 258 unsigned int *p_counter_index; 259 int err; 260 261 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 262 if (!p_counter_index) 263 return -EINVAL; 264 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, 265 p_counter_index); 266 if (err) 267 return err; 268 269 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index); 270 if (err) 271 goto err_counter_clear; 272 273 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index, 274 *p_counter_index, true, dir); 275 if (err) 276 goto err_counter_edit; 277 mlxsw_sp_rif_counter_valid_set(rif, dir, true); 278 return 0; 279 280 err_counter_edit: 281 err_counter_clear: 282 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, 283 *p_counter_index); 284 return err; 285 } 286 287 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, 288 struct mlxsw_sp_rif *rif, 289 enum mlxsw_sp_rif_counter_dir dir) 290 { 291 unsigned int *p_counter_index; 292 293 if (!mlxsw_sp_rif_counter_valid_get(rif, dir)) 294 return; 295 296 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 297 if (WARN_ON(!p_counter_index)) 298 return; 299 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index, 300 *p_counter_index, false, dir); 301 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, 302 *p_counter_index); 303 mlxsw_sp_rif_counter_valid_set(rif, dir, false); 304 } 305 306 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif) 307 { 308 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 309 struct devlink *devlink; 310 311 devlink = priv_to_devlink(mlxsw_sp->core); 312 if (!devlink_dpipe_table_counter_enabled(devlink, 313 MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) 314 return; 315 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); 316 } 317 318 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif) 319 { 320 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 321 322 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); 323 } 324 325 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) 326 327 struct mlxsw_sp_prefix_usage { 328 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT); 329 }; 330 331 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ 332 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) 333 334 static bool 335 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1, 336 struct mlxsw_sp_prefix_usage *prefix_usage2) 337 { 338 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); 339 } 340 341 static void 342 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1, 343 struct mlxsw_sp_prefix_usage *prefix_usage2) 344 { 345 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); 346 } 347 348 static void 349 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage, 350 unsigned char prefix_len) 351 { 352 set_bit(prefix_len, prefix_usage->b); 353 } 354 355 static void 356 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage, 357 unsigned char prefix_len) 358 { 359 clear_bit(prefix_len, prefix_usage->b); 360 } 361 362 struct mlxsw_sp_fib_key { 363 unsigned char addr[sizeof(struct in6_addr)]; 364 unsigned char prefix_len; 365 }; 366 367 enum mlxsw_sp_fib_entry_type { 368 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE, 369 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL, 370 MLXSW_SP_FIB_ENTRY_TYPE_TRAP, 371 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE, 372 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE, 373 374 /* This is a special case of local delivery, where a packet should be 375 * decapsulated on reception. Note that there is no corresponding ENCAP, 376 * because that's a type of next hop, not of FIB entry. (There can be 377 * several next hops in a REMOTE entry, and some of them may be 378 * encapsulating entries.) 379 */ 380 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP, 381 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP, 382 }; 383 384 struct mlxsw_sp_nexthop_group; 385 386 struct mlxsw_sp_fib_node { 387 struct list_head entry_list; 388 struct list_head list; 389 struct rhash_head ht_node; 390 struct mlxsw_sp_fib *fib; 391 struct mlxsw_sp_fib_key key; 392 }; 393 394 struct mlxsw_sp_fib_entry_decap { 395 struct mlxsw_sp_ipip_entry *ipip_entry; 396 u32 tunnel_index; 397 }; 398 399 struct mlxsw_sp_fib_entry { 400 struct list_head list; 401 struct mlxsw_sp_fib_node *fib_node; 402 enum mlxsw_sp_fib_entry_type type; 403 struct list_head nexthop_group_node; 404 struct mlxsw_sp_nexthop_group *nh_group; 405 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */ 406 }; 407 408 struct mlxsw_sp_fib4_entry { 409 struct mlxsw_sp_fib_entry common; 410 u32 tb_id; 411 u32 prio; 412 u8 tos; 413 u8 type; 414 }; 415 416 struct mlxsw_sp_fib6_entry { 417 struct mlxsw_sp_fib_entry common; 418 struct list_head rt6_list; 419 unsigned int nrt6; 420 }; 421 422 struct mlxsw_sp_rt6 { 423 struct list_head list; 424 struct fib6_info *rt; 425 }; 426 427 struct mlxsw_sp_lpm_tree { 428 u8 id; /* tree ID */ 429 unsigned int ref_count; 430 enum mlxsw_sp_l3proto proto; 431 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; 432 struct mlxsw_sp_prefix_usage prefix_usage; 433 }; 434 435 struct mlxsw_sp_fib { 436 struct rhashtable ht; 437 struct list_head node_list; 438 struct mlxsw_sp_vr *vr; 439 struct mlxsw_sp_lpm_tree *lpm_tree; 440 enum mlxsw_sp_l3proto proto; 441 }; 442 443 struct mlxsw_sp_vr { 444 u16 id; /* virtual router ID */ 445 u32 tb_id; /* kernel fib table id */ 446 unsigned int rif_count; 447 struct mlxsw_sp_fib *fib4; 448 struct mlxsw_sp_fib *fib6; 449 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX]; 450 struct mlxsw_sp_rif *ul_rif; 451 refcount_t ul_rif_refcnt; 452 }; 453 454 static const struct rhashtable_params mlxsw_sp_fib_ht_params; 455 456 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp, 457 struct mlxsw_sp_vr *vr, 458 enum mlxsw_sp_l3proto proto) 459 { 460 struct mlxsw_sp_lpm_tree *lpm_tree; 461 struct mlxsw_sp_fib *fib; 462 int err; 463 464 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto]; 465 fib = kzalloc(sizeof(*fib), GFP_KERNEL); 466 if (!fib) 467 return ERR_PTR(-ENOMEM); 468 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params); 469 if (err) 470 goto err_rhashtable_init; 471 INIT_LIST_HEAD(&fib->node_list); 472 fib->proto = proto; 473 fib->vr = vr; 474 fib->lpm_tree = lpm_tree; 475 mlxsw_sp_lpm_tree_hold(lpm_tree); 476 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id); 477 if (err) 478 goto err_lpm_tree_bind; 479 return fib; 480 481 err_lpm_tree_bind: 482 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 483 err_rhashtable_init: 484 kfree(fib); 485 return ERR_PTR(err); 486 } 487 488 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp, 489 struct mlxsw_sp_fib *fib) 490 { 491 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); 492 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree); 493 WARN_ON(!list_empty(&fib->node_list)); 494 rhashtable_destroy(&fib->ht); 495 kfree(fib); 496 } 497 498 static struct mlxsw_sp_lpm_tree * 499 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp) 500 { 501 static struct mlxsw_sp_lpm_tree *lpm_tree; 502 int i; 503 504 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { 505 lpm_tree = &mlxsw_sp->router->lpm.trees[i]; 506 if (lpm_tree->ref_count == 0) 507 return lpm_tree; 508 } 509 return NULL; 510 } 511 512 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp, 513 struct mlxsw_sp_lpm_tree *lpm_tree) 514 { 515 char ralta_pl[MLXSW_REG_RALTA_LEN]; 516 517 mlxsw_reg_ralta_pack(ralta_pl, true, 518 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, 519 lpm_tree->id); 520 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); 521 } 522 523 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp, 524 struct mlxsw_sp_lpm_tree *lpm_tree) 525 { 526 char ralta_pl[MLXSW_REG_RALTA_LEN]; 527 528 mlxsw_reg_ralta_pack(ralta_pl, false, 529 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, 530 lpm_tree->id); 531 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); 532 } 533 534 static int 535 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp, 536 struct mlxsw_sp_prefix_usage *prefix_usage, 537 struct mlxsw_sp_lpm_tree *lpm_tree) 538 { 539 char ralst_pl[MLXSW_REG_RALST_LEN]; 540 u8 root_bin = 0; 541 u8 prefix; 542 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD; 543 544 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) 545 root_bin = prefix; 546 547 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id); 548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) { 549 if (prefix == 0) 550 continue; 551 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix, 552 MLXSW_REG_RALST_BIN_NO_CHILD); 553 last_prefix = prefix; 554 } 555 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); 556 } 557 558 static struct mlxsw_sp_lpm_tree * 559 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, 560 struct mlxsw_sp_prefix_usage *prefix_usage, 561 enum mlxsw_sp_l3proto proto) 562 { 563 struct mlxsw_sp_lpm_tree *lpm_tree; 564 int err; 565 566 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp); 567 if (!lpm_tree) 568 return ERR_PTR(-EBUSY); 569 lpm_tree->proto = proto; 570 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree); 571 if (err) 572 return ERR_PTR(err); 573 574 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage, 575 lpm_tree); 576 if (err) 577 goto err_left_struct_set; 578 memcpy(&lpm_tree->prefix_usage, prefix_usage, 579 sizeof(lpm_tree->prefix_usage)); 580 memset(&lpm_tree->prefix_ref_count, 0, 581 sizeof(lpm_tree->prefix_ref_count)); 582 lpm_tree->ref_count = 1; 583 return lpm_tree; 584 585 err_left_struct_set: 586 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); 587 return ERR_PTR(err); 588 } 589 590 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, 591 struct mlxsw_sp_lpm_tree *lpm_tree) 592 { 593 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); 594 } 595 596 static struct mlxsw_sp_lpm_tree * 597 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, 598 struct mlxsw_sp_prefix_usage *prefix_usage, 599 enum mlxsw_sp_l3proto proto) 600 { 601 struct mlxsw_sp_lpm_tree *lpm_tree; 602 int i; 603 604 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { 605 lpm_tree = &mlxsw_sp->router->lpm.trees[i]; 606 if (lpm_tree->ref_count != 0 && 607 lpm_tree->proto == proto && 608 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, 609 prefix_usage)) { 610 mlxsw_sp_lpm_tree_hold(lpm_tree); 611 return lpm_tree; 612 } 613 } 614 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto); 615 } 616 617 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree) 618 { 619 lpm_tree->ref_count++; 620 } 621 622 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, 623 struct mlxsw_sp_lpm_tree *lpm_tree) 624 { 625 if (--lpm_tree->ref_count == 0) 626 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); 627 } 628 629 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */ 630 631 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) 632 { 633 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } }; 634 struct mlxsw_sp_lpm_tree *lpm_tree; 635 u64 max_trees; 636 int err, i; 637 638 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES)) 639 return -EIO; 640 641 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES); 642 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN; 643 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count, 644 sizeof(struct mlxsw_sp_lpm_tree), 645 GFP_KERNEL); 646 if (!mlxsw_sp->router->lpm.trees) 647 return -ENOMEM; 648 649 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { 650 lpm_tree = &mlxsw_sp->router->lpm.trees[i]; 651 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN; 652 } 653 654 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, 655 MLXSW_SP_L3_PROTO_IPV4); 656 if (IS_ERR(lpm_tree)) { 657 err = PTR_ERR(lpm_tree); 658 goto err_ipv4_tree_get; 659 } 660 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree; 661 662 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, 663 MLXSW_SP_L3_PROTO_IPV6); 664 if (IS_ERR(lpm_tree)) { 665 err = PTR_ERR(lpm_tree); 666 goto err_ipv6_tree_get; 667 } 668 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree; 669 670 return 0; 671 672 err_ipv6_tree_get: 673 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4]; 674 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 675 err_ipv4_tree_get: 676 kfree(mlxsw_sp->router->lpm.trees); 677 return err; 678 } 679 680 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) 681 { 682 struct mlxsw_sp_lpm_tree *lpm_tree; 683 684 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6]; 685 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 686 687 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4]; 688 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 689 690 kfree(mlxsw_sp->router->lpm.trees); 691 } 692 693 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) 694 { 695 return !!vr->fib4 || !!vr->fib6 || 696 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] || 697 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; 698 } 699 700 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) 701 { 702 struct mlxsw_sp_vr *vr; 703 int i; 704 705 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 706 vr = &mlxsw_sp->router->vrs[i]; 707 if (!mlxsw_sp_vr_is_used(vr)) 708 return vr; 709 } 710 return NULL; 711 } 712 713 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, 714 const struct mlxsw_sp_fib *fib, u8 tree_id) 715 { 716 char raltb_pl[MLXSW_REG_RALTB_LEN]; 717 718 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, 719 (enum mlxsw_reg_ralxx_protocol) fib->proto, 720 tree_id); 721 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); 722 } 723 724 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, 725 const struct mlxsw_sp_fib *fib) 726 { 727 char raltb_pl[MLXSW_REG_RALTB_LEN]; 728 729 /* Bind to tree 0 which is default */ 730 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, 731 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0); 732 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); 733 } 734 735 static u32 mlxsw_sp_fix_tb_id(u32 tb_id) 736 { 737 /* For our purpose, squash main, default and local tables into one */ 738 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT) 739 tb_id = RT_TABLE_MAIN; 740 return tb_id; 741 } 742 743 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, 744 u32 tb_id) 745 { 746 struct mlxsw_sp_vr *vr; 747 int i; 748 749 tb_id = mlxsw_sp_fix_tb_id(tb_id); 750 751 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 752 vr = &mlxsw_sp->router->vrs[i]; 753 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id) 754 return vr; 755 } 756 return NULL; 757 } 758 759 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id, 760 u16 *vr_id) 761 { 762 struct mlxsw_sp_vr *vr; 763 764 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); 765 if (!vr) 766 return -ESRCH; 767 *vr_id = vr->id; 768 769 return 0; 770 } 771 772 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, 773 enum mlxsw_sp_l3proto proto) 774 { 775 switch (proto) { 776 case MLXSW_SP_L3_PROTO_IPV4: 777 return vr->fib4; 778 case MLXSW_SP_L3_PROTO_IPV6: 779 return vr->fib6; 780 } 781 return NULL; 782 } 783 784 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, 785 u32 tb_id, 786 struct netlink_ext_ack *extack) 787 { 788 struct mlxsw_sp_mr_table *mr4_table, *mr6_table; 789 struct mlxsw_sp_fib *fib4; 790 struct mlxsw_sp_fib *fib6; 791 struct mlxsw_sp_vr *vr; 792 int err; 793 794 vr = mlxsw_sp_vr_find_unused(mlxsw_sp); 795 if (!vr) { 796 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers"); 797 return ERR_PTR(-EBUSY); 798 } 799 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); 800 if (IS_ERR(fib4)) 801 return ERR_CAST(fib4); 802 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); 803 if (IS_ERR(fib6)) { 804 err = PTR_ERR(fib6); 805 goto err_fib6_create; 806 } 807 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, 808 MLXSW_SP_L3_PROTO_IPV4); 809 if (IS_ERR(mr4_table)) { 810 err = PTR_ERR(mr4_table); 811 goto err_mr4_table_create; 812 } 813 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, 814 MLXSW_SP_L3_PROTO_IPV6); 815 if (IS_ERR(mr6_table)) { 816 err = PTR_ERR(mr6_table); 817 goto err_mr6_table_create; 818 } 819 820 vr->fib4 = fib4; 821 vr->fib6 = fib6; 822 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table; 823 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table; 824 vr->tb_id = tb_id; 825 return vr; 826 827 err_mr6_table_create: 828 mlxsw_sp_mr_table_destroy(mr4_table); 829 err_mr4_table_create: 830 mlxsw_sp_fib_destroy(mlxsw_sp, fib6); 831 err_fib6_create: 832 mlxsw_sp_fib_destroy(mlxsw_sp, fib4); 833 return ERR_PTR(err); 834 } 835 836 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, 837 struct mlxsw_sp_vr *vr) 838 { 839 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]); 840 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL; 841 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]); 842 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL; 843 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); 844 vr->fib6 = NULL; 845 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); 846 vr->fib4 = NULL; 847 } 848 849 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, 850 struct netlink_ext_ack *extack) 851 { 852 struct mlxsw_sp_vr *vr; 853 854 tb_id = mlxsw_sp_fix_tb_id(tb_id); 855 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); 856 if (!vr) 857 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack); 858 return vr; 859 } 860 861 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) 862 { 863 if (!vr->rif_count && list_empty(&vr->fib4->node_list) && 864 list_empty(&vr->fib6->node_list) && 865 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) && 866 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6])) 867 mlxsw_sp_vr_destroy(mlxsw_sp, vr); 868 } 869 870 static bool 871 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr, 872 enum mlxsw_sp_l3proto proto, u8 tree_id) 873 { 874 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); 875 876 if (!mlxsw_sp_vr_is_used(vr)) 877 return false; 878 if (fib->lpm_tree->id == tree_id) 879 return true; 880 return false; 881 } 882 883 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, 884 struct mlxsw_sp_fib *fib, 885 struct mlxsw_sp_lpm_tree *new_tree) 886 { 887 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; 888 int err; 889 890 fib->lpm_tree = new_tree; 891 mlxsw_sp_lpm_tree_hold(new_tree); 892 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id); 893 if (err) 894 goto err_tree_bind; 895 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); 896 return 0; 897 898 err_tree_bind: 899 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); 900 fib->lpm_tree = old_tree; 901 return err; 902 } 903 904 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, 905 struct mlxsw_sp_fib *fib, 906 struct mlxsw_sp_lpm_tree *new_tree) 907 { 908 enum mlxsw_sp_l3proto proto = fib->proto; 909 struct mlxsw_sp_lpm_tree *old_tree; 910 u8 old_id, new_id = new_tree->id; 911 struct mlxsw_sp_vr *vr; 912 int i, err; 913 914 old_tree = mlxsw_sp->router->lpm.proto_trees[proto]; 915 old_id = old_tree->id; 916 917 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 918 vr = &mlxsw_sp->router->vrs[i]; 919 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id)) 920 continue; 921 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp, 922 mlxsw_sp_vr_fib(vr, proto), 923 new_tree); 924 if (err) 925 goto err_tree_replace; 926 } 927 928 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count, 929 sizeof(new_tree->prefix_ref_count)); 930 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree; 931 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); 932 933 return 0; 934 935 err_tree_replace: 936 for (i--; i >= 0; i--) { 937 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id)) 938 continue; 939 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp, 940 mlxsw_sp_vr_fib(vr, proto), 941 old_tree); 942 } 943 return err; 944 } 945 946 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) 947 { 948 struct mlxsw_sp_vr *vr; 949 u64 max_vrs; 950 int i; 951 952 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS)) 953 return -EIO; 954 955 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); 956 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr), 957 GFP_KERNEL); 958 if (!mlxsw_sp->router->vrs) 959 return -ENOMEM; 960 961 for (i = 0; i < max_vrs; i++) { 962 vr = &mlxsw_sp->router->vrs[i]; 963 vr->id = i; 964 } 965 966 return 0; 967 } 968 969 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp); 970 971 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) 972 { 973 /* At this stage we're guaranteed not to have new incoming 974 * FIB notifications and the work queue is free from FIBs 975 * sitting on top of mlxsw netdevs. However, we can still 976 * have other FIBs queued. Flush the queue before flushing 977 * the device's tables. No need for locks, as we're the only 978 * writer. 979 */ 980 mlxsw_core_flush_owq(); 981 mlxsw_sp_router_fib_flush(mlxsw_sp); 982 kfree(mlxsw_sp->router->vrs); 983 } 984 985 static struct net_device * 986 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) 987 { 988 struct ip_tunnel *tun = netdev_priv(ol_dev); 989 struct net *net = dev_net(ol_dev); 990 991 return __dev_get_by_index(net, tun->parms.link); 992 } 993 994 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) 995 { 996 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); 997 998 if (d) 999 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN; 1000 else 1001 return RT_TABLE_MAIN; 1002 } 1003 1004 static struct mlxsw_sp_rif * 1005 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, 1006 const struct mlxsw_sp_rif_params *params, 1007 struct netlink_ext_ack *extack); 1008 1009 static struct mlxsw_sp_rif_ipip_lb * 1010 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp, 1011 enum mlxsw_sp_ipip_type ipipt, 1012 struct net_device *ol_dev, 1013 struct netlink_ext_ack *extack) 1014 { 1015 struct mlxsw_sp_rif_params_ipip_lb lb_params; 1016 const struct mlxsw_sp_ipip_ops *ipip_ops; 1017 struct mlxsw_sp_rif *rif; 1018 1019 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; 1020 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) { 1021 .common.dev = ol_dev, 1022 .common.lag = false, 1023 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev), 1024 }; 1025 1026 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack); 1027 if (IS_ERR(rif)) 1028 return ERR_CAST(rif); 1029 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common); 1030 } 1031 1032 static struct mlxsw_sp_ipip_entry * 1033 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, 1034 enum mlxsw_sp_ipip_type ipipt, 1035 struct net_device *ol_dev) 1036 { 1037 const struct mlxsw_sp_ipip_ops *ipip_ops; 1038 struct mlxsw_sp_ipip_entry *ipip_entry; 1039 struct mlxsw_sp_ipip_entry *ret = NULL; 1040 1041 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; 1042 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL); 1043 if (!ipip_entry) 1044 return ERR_PTR(-ENOMEM); 1045 1046 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt, 1047 ol_dev, NULL); 1048 if (IS_ERR(ipip_entry->ol_lb)) { 1049 ret = ERR_CAST(ipip_entry->ol_lb); 1050 goto err_ol_ipip_lb_create; 1051 } 1052 1053 ipip_entry->ipipt = ipipt; 1054 ipip_entry->ol_dev = ol_dev; 1055 1056 switch (ipip_ops->ul_proto) { 1057 case MLXSW_SP_L3_PROTO_IPV4: 1058 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); 1059 break; 1060 case MLXSW_SP_L3_PROTO_IPV6: 1061 WARN_ON(1); 1062 break; 1063 } 1064 1065 return ipip_entry; 1066 1067 err_ol_ipip_lb_create: 1068 kfree(ipip_entry); 1069 return ret; 1070 } 1071 1072 static void 1073 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry) 1074 { 1075 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); 1076 kfree(ipip_entry); 1077 } 1078 1079 static bool 1080 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp, 1081 const enum mlxsw_sp_l3proto ul_proto, 1082 union mlxsw_sp_l3addr saddr, 1083 u32 ul_tb_id, 1084 struct mlxsw_sp_ipip_entry *ipip_entry) 1085 { 1086 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); 1087 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; 1088 union mlxsw_sp_l3addr tun_saddr; 1089 1090 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) 1091 return false; 1092 1093 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev); 1094 return tun_ul_tb_id == ul_tb_id && 1095 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr); 1096 } 1097 1098 static int 1099 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, 1100 struct mlxsw_sp_fib_entry *fib_entry, 1101 struct mlxsw_sp_ipip_entry *ipip_entry) 1102 { 1103 u32 tunnel_index; 1104 int err; 1105 1106 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1107 1, &tunnel_index); 1108 if (err) 1109 return err; 1110 1111 ipip_entry->decap_fib_entry = fib_entry; 1112 fib_entry->decap.ipip_entry = ipip_entry; 1113 fib_entry->decap.tunnel_index = tunnel_index; 1114 return 0; 1115 } 1116 1117 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, 1118 struct mlxsw_sp_fib_entry *fib_entry) 1119 { 1120 /* Unlink this node from the IPIP entry that it's the decap entry of. */ 1121 fib_entry->decap.ipip_entry->decap_fib_entry = NULL; 1122 fib_entry->decap.ipip_entry = NULL; 1123 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1124 1, fib_entry->decap.tunnel_index); 1125 } 1126 1127 static struct mlxsw_sp_fib_node * 1128 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, 1129 size_t addr_len, unsigned char prefix_len); 1130 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, 1131 struct mlxsw_sp_fib_entry *fib_entry); 1132 1133 static void 1134 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp, 1135 struct mlxsw_sp_ipip_entry *ipip_entry) 1136 { 1137 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry; 1138 1139 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry); 1140 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 1141 1142 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1143 } 1144 1145 static void 1146 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp, 1147 struct mlxsw_sp_ipip_entry *ipip_entry, 1148 struct mlxsw_sp_fib_entry *decap_fib_entry) 1149 { 1150 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry, 1151 ipip_entry)) 1152 return; 1153 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; 1154 1155 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry)) 1156 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); 1157 } 1158 1159 static struct mlxsw_sp_fib_entry * 1160 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id, 1161 enum mlxsw_sp_l3proto proto, 1162 const union mlxsw_sp_l3addr *addr, 1163 enum mlxsw_sp_fib_entry_type type) 1164 { 1165 struct mlxsw_sp_fib_entry *fib_entry; 1166 struct mlxsw_sp_fib_node *fib_node; 1167 unsigned char addr_prefix_len; 1168 struct mlxsw_sp_fib *fib; 1169 struct mlxsw_sp_vr *vr; 1170 const void *addrp; 1171 size_t addr_len; 1172 u32 addr4; 1173 1174 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); 1175 if (!vr) 1176 return NULL; 1177 fib = mlxsw_sp_vr_fib(vr, proto); 1178 1179 switch (proto) { 1180 case MLXSW_SP_L3_PROTO_IPV4: 1181 addr4 = be32_to_cpu(addr->addr4); 1182 addrp = &addr4; 1183 addr_len = 4; 1184 addr_prefix_len = 32; 1185 break; 1186 case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ 1187 default: 1188 WARN_ON(1); 1189 return NULL; 1190 } 1191 1192 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len, 1193 addr_prefix_len); 1194 if (!fib_node || list_empty(&fib_node->entry_list)) 1195 return NULL; 1196 1197 fib_entry = list_first_entry(&fib_node->entry_list, 1198 struct mlxsw_sp_fib_entry, list); 1199 if (fib_entry->type != type) 1200 return NULL; 1201 1202 return fib_entry; 1203 } 1204 1205 /* Given an IPIP entry, find the corresponding decap route. */ 1206 static struct mlxsw_sp_fib_entry * 1207 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp, 1208 struct mlxsw_sp_ipip_entry *ipip_entry) 1209 { 1210 static struct mlxsw_sp_fib_node *fib_node; 1211 const struct mlxsw_sp_ipip_ops *ipip_ops; 1212 struct mlxsw_sp_fib_entry *fib_entry; 1213 unsigned char saddr_prefix_len; 1214 union mlxsw_sp_l3addr saddr; 1215 struct mlxsw_sp_fib *ul_fib; 1216 struct mlxsw_sp_vr *ul_vr; 1217 const void *saddrp; 1218 size_t saddr_len; 1219 u32 ul_tb_id; 1220 u32 saddr4; 1221 1222 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 1223 1224 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); 1225 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id); 1226 if (!ul_vr) 1227 return NULL; 1228 1229 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto); 1230 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto, 1231 ipip_entry->ol_dev); 1232 1233 switch (ipip_ops->ul_proto) { 1234 case MLXSW_SP_L3_PROTO_IPV4: 1235 saddr4 = be32_to_cpu(saddr.addr4); 1236 saddrp = &saddr4; 1237 saddr_len = 4; 1238 saddr_prefix_len = 32; 1239 break; 1240 case MLXSW_SP_L3_PROTO_IPV6: 1241 WARN_ON(1); 1242 return NULL; 1243 } 1244 1245 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len, 1246 saddr_prefix_len); 1247 if (!fib_node || list_empty(&fib_node->entry_list)) 1248 return NULL; 1249 1250 fib_entry = list_first_entry(&fib_node->entry_list, 1251 struct mlxsw_sp_fib_entry, list); 1252 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP) 1253 return NULL; 1254 1255 return fib_entry; 1256 } 1257 1258 static struct mlxsw_sp_ipip_entry * 1259 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp, 1260 enum mlxsw_sp_ipip_type ipipt, 1261 struct net_device *ol_dev) 1262 { 1263 struct mlxsw_sp_ipip_entry *ipip_entry; 1264 1265 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev); 1266 if (IS_ERR(ipip_entry)) 1267 return ipip_entry; 1268 1269 list_add_tail(&ipip_entry->ipip_list_node, 1270 &mlxsw_sp->router->ipip_list); 1271 1272 return ipip_entry; 1273 } 1274 1275 static void 1276 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp, 1277 struct mlxsw_sp_ipip_entry *ipip_entry) 1278 { 1279 list_del(&ipip_entry->ipip_list_node); 1280 mlxsw_sp_ipip_entry_dealloc(ipip_entry); 1281 } 1282 1283 static bool 1284 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp, 1285 const struct net_device *ul_dev, 1286 enum mlxsw_sp_l3proto ul_proto, 1287 union mlxsw_sp_l3addr ul_dip, 1288 struct mlxsw_sp_ipip_entry *ipip_entry) 1289 { 1290 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN; 1291 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; 1292 1293 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) 1294 return false; 1295 1296 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip, 1297 ul_tb_id, ipip_entry); 1298 } 1299 1300 /* Given decap parameters, find the corresponding IPIP entry. */ 1301 static struct mlxsw_sp_ipip_entry * 1302 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, 1303 const struct net_device *ul_dev, 1304 enum mlxsw_sp_l3proto ul_proto, 1305 union mlxsw_sp_l3addr ul_dip) 1306 { 1307 struct mlxsw_sp_ipip_entry *ipip_entry; 1308 1309 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, 1310 ipip_list_node) 1311 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev, 1312 ul_proto, ul_dip, 1313 ipip_entry)) 1314 return ipip_entry; 1315 1316 return NULL; 1317 } 1318 1319 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp, 1320 const struct net_device *dev, 1321 enum mlxsw_sp_ipip_type *p_type) 1322 { 1323 struct mlxsw_sp_router *router = mlxsw_sp->router; 1324 const struct mlxsw_sp_ipip_ops *ipip_ops; 1325 enum mlxsw_sp_ipip_type ipipt; 1326 1327 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) { 1328 ipip_ops = router->ipip_ops_arr[ipipt]; 1329 if (dev->type == ipip_ops->dev_type) { 1330 if (p_type) 1331 *p_type = ipipt; 1332 return true; 1333 } 1334 } 1335 return false; 1336 } 1337 1338 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, 1339 const struct net_device *dev) 1340 { 1341 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL); 1342 } 1343 1344 static struct mlxsw_sp_ipip_entry * 1345 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp, 1346 const struct net_device *ol_dev) 1347 { 1348 struct mlxsw_sp_ipip_entry *ipip_entry; 1349 1350 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, 1351 ipip_list_node) 1352 if (ipip_entry->ol_dev == ol_dev) 1353 return ipip_entry; 1354 1355 return NULL; 1356 } 1357 1358 static struct mlxsw_sp_ipip_entry * 1359 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp, 1360 const struct net_device *ul_dev, 1361 struct mlxsw_sp_ipip_entry *start) 1362 { 1363 struct mlxsw_sp_ipip_entry *ipip_entry; 1364 1365 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list, 1366 ipip_list_node); 1367 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list, 1368 ipip_list_node) { 1369 struct net_device *ipip_ul_dev = 1370 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); 1371 1372 if (ipip_ul_dev == ul_dev) 1373 return ipip_entry; 1374 } 1375 1376 return NULL; 1377 } 1378 1379 bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp, 1380 const struct net_device *dev) 1381 { 1382 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL); 1383 } 1384 1385 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp, 1386 const struct net_device *ol_dev, 1387 enum mlxsw_sp_ipip_type ipipt) 1388 { 1389 const struct mlxsw_sp_ipip_ops *ops 1390 = mlxsw_sp->router->ipip_ops_arr[ipipt]; 1391 1392 /* For deciding whether decap should be offloaded, we don't care about 1393 * overlay protocol, so ask whether either one is supported. 1394 */ 1395 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) || 1396 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6); 1397 } 1398 1399 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp, 1400 struct net_device *ol_dev) 1401 { 1402 struct mlxsw_sp_ipip_entry *ipip_entry; 1403 enum mlxsw_sp_l3proto ul_proto; 1404 enum mlxsw_sp_ipip_type ipipt; 1405 union mlxsw_sp_l3addr saddr; 1406 u32 ul_tb_id; 1407 1408 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt); 1409 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) { 1410 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); 1411 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto; 1412 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); 1413 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, 1414 saddr, ul_tb_id, 1415 NULL)) { 1416 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt, 1417 ol_dev); 1418 if (IS_ERR(ipip_entry)) 1419 return PTR_ERR(ipip_entry); 1420 } 1421 } 1422 1423 return 0; 1424 } 1425 1426 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp, 1427 struct net_device *ol_dev) 1428 { 1429 struct mlxsw_sp_ipip_entry *ipip_entry; 1430 1431 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1432 if (ipip_entry) 1433 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry); 1434 } 1435 1436 static void 1437 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp, 1438 struct mlxsw_sp_ipip_entry *ipip_entry) 1439 { 1440 struct mlxsw_sp_fib_entry *decap_fib_entry; 1441 1442 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry); 1443 if (decap_fib_entry) 1444 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry, 1445 decap_fib_entry); 1446 } 1447 1448 static int 1449 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id, 1450 u16 ul_rif_id, bool enable) 1451 { 1452 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; 1453 struct mlxsw_sp_rif *rif = &lb_rif->common; 1454 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 1455 char ritr_pl[MLXSW_REG_RITR_LEN]; 1456 u32 saddr4; 1457 1458 switch (lb_cf.ul_protocol) { 1459 case MLXSW_SP_L3_PROTO_IPV4: 1460 saddr4 = be32_to_cpu(lb_cf.saddr.addr4); 1461 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, 1462 rif->rif_index, rif->vr_id, rif->dev->mtu); 1463 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, 1464 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, 1465 ul_vr_id, ul_rif_id, saddr4, lb_cf.okey); 1466 break; 1467 1468 case MLXSW_SP_L3_PROTO_IPV6: 1469 return -EAFNOSUPPORT; 1470 } 1471 1472 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 1473 } 1474 1475 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp, 1476 struct net_device *ol_dev) 1477 { 1478 struct mlxsw_sp_ipip_entry *ipip_entry; 1479 struct mlxsw_sp_rif_ipip_lb *lb_rif; 1480 int err = 0; 1481 1482 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1483 if (ipip_entry) { 1484 lb_rif = ipip_entry->ol_lb; 1485 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id, 1486 lb_rif->ul_rif_id, true); 1487 if (err) 1488 goto out; 1489 lb_rif->common.mtu = ol_dev->mtu; 1490 } 1491 1492 out: 1493 return err; 1494 } 1495 1496 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, 1497 struct net_device *ol_dev) 1498 { 1499 struct mlxsw_sp_ipip_entry *ipip_entry; 1500 1501 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1502 if (ipip_entry) 1503 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry); 1504 } 1505 1506 static void 1507 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp, 1508 struct mlxsw_sp_ipip_entry *ipip_entry) 1509 { 1510 if (ipip_entry->decap_fib_entry) 1511 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); 1512 } 1513 1514 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp, 1515 struct net_device *ol_dev) 1516 { 1517 struct mlxsw_sp_ipip_entry *ipip_entry; 1518 1519 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1520 if (ipip_entry) 1521 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); 1522 } 1523 1524 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp, 1525 struct mlxsw_sp_rif *old_rif, 1526 struct mlxsw_sp_rif *new_rif); 1527 static int 1528 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp, 1529 struct mlxsw_sp_ipip_entry *ipip_entry, 1530 bool keep_encap, 1531 struct netlink_ext_ack *extack) 1532 { 1533 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb; 1534 struct mlxsw_sp_rif_ipip_lb *new_lb_rif; 1535 1536 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, 1537 ipip_entry->ipipt, 1538 ipip_entry->ol_dev, 1539 extack); 1540 if (IS_ERR(new_lb_rif)) 1541 return PTR_ERR(new_lb_rif); 1542 ipip_entry->ol_lb = new_lb_rif; 1543 1544 if (keep_encap) 1545 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common, 1546 &new_lb_rif->common); 1547 1548 mlxsw_sp_rif_destroy(&old_lb_rif->common); 1549 1550 return 0; 1551 } 1552 1553 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, 1554 struct mlxsw_sp_rif *rif); 1555 1556 /** 1557 * Update the offload related to an IPIP entry. This always updates decap, and 1558 * in addition to that it also: 1559 * @recreate_loopback: recreates the associated loopback RIF 1560 * @keep_encap: updates next hops that use the tunnel netdevice. This is only 1561 * relevant when recreate_loopback is true. 1562 * @update_nexthops: updates next hops, keeping the current loopback RIF. This 1563 * is only relevant when recreate_loopback is false. 1564 */ 1565 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp, 1566 struct mlxsw_sp_ipip_entry *ipip_entry, 1567 bool recreate_loopback, 1568 bool keep_encap, 1569 bool update_nexthops, 1570 struct netlink_ext_ack *extack) 1571 { 1572 int err; 1573 1574 /* RIFs can't be edited, so to update loopback, we need to destroy and 1575 * recreate it. That creates a window of opportunity where RALUE and 1576 * RATR registers end up referencing a RIF that's already gone. RATRs 1577 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care 1578 * of RALUE, demote the decap route back. 1579 */ 1580 if (ipip_entry->decap_fib_entry) 1581 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); 1582 1583 if (recreate_loopback) { 1584 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry, 1585 keep_encap, extack); 1586 if (err) 1587 return err; 1588 } else if (update_nexthops) { 1589 mlxsw_sp_nexthop_rif_update(mlxsw_sp, 1590 &ipip_entry->ol_lb->common); 1591 } 1592 1593 if (ipip_entry->ol_dev->flags & IFF_UP) 1594 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry); 1595 1596 return 0; 1597 } 1598 1599 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, 1600 struct net_device *ol_dev, 1601 struct netlink_ext_ack *extack) 1602 { 1603 struct mlxsw_sp_ipip_entry *ipip_entry = 1604 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1605 1606 if (!ipip_entry) 1607 return 0; 1608 1609 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1610 true, false, false, extack); 1611 } 1612 1613 static int 1614 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp, 1615 struct mlxsw_sp_ipip_entry *ipip_entry, 1616 struct net_device *ul_dev, 1617 bool *demote_this, 1618 struct netlink_ext_ack *extack) 1619 { 1620 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN; 1621 enum mlxsw_sp_l3proto ul_proto; 1622 union mlxsw_sp_l3addr saddr; 1623 1624 /* Moving underlay to a different VRF might cause local address 1625 * conflict, and the conflicting tunnels need to be demoted. 1626 */ 1627 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; 1628 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev); 1629 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, 1630 saddr, ul_tb_id, 1631 ipip_entry)) { 1632 *demote_this = true; 1633 return 0; 1634 } 1635 1636 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1637 true, true, false, extack); 1638 } 1639 1640 static int 1641 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp, 1642 struct mlxsw_sp_ipip_entry *ipip_entry, 1643 struct net_device *ul_dev) 1644 { 1645 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1646 false, false, true, NULL); 1647 } 1648 1649 static int 1650 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp, 1651 struct mlxsw_sp_ipip_entry *ipip_entry, 1652 struct net_device *ul_dev) 1653 { 1654 /* A down underlay device causes encapsulated packets to not be 1655 * forwarded, but decap still works. So refresh next hops without 1656 * touching anything else. 1657 */ 1658 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1659 false, false, true, NULL); 1660 } 1661 1662 static int 1663 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp, 1664 struct net_device *ol_dev, 1665 struct netlink_ext_ack *extack) 1666 { 1667 const struct mlxsw_sp_ipip_ops *ipip_ops; 1668 struct mlxsw_sp_ipip_entry *ipip_entry; 1669 int err; 1670 1671 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1672 if (!ipip_entry) 1673 /* A change might make a tunnel eligible for offloading, but 1674 * that is currently not implemented. What falls to slow path 1675 * stays there. 1676 */ 1677 return 0; 1678 1679 /* A change might make a tunnel not eligible for offloading. */ 1680 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, 1681 ipip_entry->ipipt)) { 1682 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1683 return 0; 1684 } 1685 1686 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 1687 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack); 1688 return err; 1689 } 1690 1691 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp, 1692 struct mlxsw_sp_ipip_entry *ipip_entry) 1693 { 1694 struct net_device *ol_dev = ipip_entry->ol_dev; 1695 1696 if (ol_dev->flags & IFF_UP) 1697 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); 1698 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry); 1699 } 1700 1701 /* The configuration where several tunnels have the same local address in the 1702 * same underlay table needs special treatment in the HW. That is currently not 1703 * implemented in the driver. This function finds and demotes the first tunnel 1704 * with a given source address, except the one passed in in the argument 1705 * `except'. 1706 */ 1707 bool 1708 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp, 1709 enum mlxsw_sp_l3proto ul_proto, 1710 union mlxsw_sp_l3addr saddr, 1711 u32 ul_tb_id, 1712 const struct mlxsw_sp_ipip_entry *except) 1713 { 1714 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp; 1715 1716 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list, 1717 ipip_list_node) { 1718 if (ipip_entry != except && 1719 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr, 1720 ul_tb_id, ipip_entry)) { 1721 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1722 return true; 1723 } 1724 } 1725 1726 return false; 1727 } 1728 1729 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp, 1730 struct net_device *ul_dev) 1731 { 1732 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp; 1733 1734 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list, 1735 ipip_list_node) { 1736 struct net_device *ipip_ul_dev = 1737 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); 1738 1739 if (ipip_ul_dev == ul_dev) 1740 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1741 } 1742 } 1743 1744 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, 1745 struct net_device *ol_dev, 1746 unsigned long event, 1747 struct netdev_notifier_info *info) 1748 { 1749 struct netdev_notifier_changeupper_info *chup; 1750 struct netlink_ext_ack *extack; 1751 1752 switch (event) { 1753 case NETDEV_REGISTER: 1754 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev); 1755 case NETDEV_UNREGISTER: 1756 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev); 1757 return 0; 1758 case NETDEV_UP: 1759 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev); 1760 return 0; 1761 case NETDEV_DOWN: 1762 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev); 1763 return 0; 1764 case NETDEV_CHANGEUPPER: 1765 chup = container_of(info, typeof(*chup), info); 1766 extack = info->extack; 1767 if (netif_is_l3_master(chup->upper_dev)) 1768 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp, 1769 ol_dev, 1770 extack); 1771 return 0; 1772 case NETDEV_CHANGE: 1773 extack = info->extack; 1774 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp, 1775 ol_dev, extack); 1776 case NETDEV_CHANGEMTU: 1777 return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev); 1778 } 1779 return 0; 1780 } 1781 1782 static int 1783 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, 1784 struct mlxsw_sp_ipip_entry *ipip_entry, 1785 struct net_device *ul_dev, 1786 bool *demote_this, 1787 unsigned long event, 1788 struct netdev_notifier_info *info) 1789 { 1790 struct netdev_notifier_changeupper_info *chup; 1791 struct netlink_ext_ack *extack; 1792 1793 switch (event) { 1794 case NETDEV_CHANGEUPPER: 1795 chup = container_of(info, typeof(*chup), info); 1796 extack = info->extack; 1797 if (netif_is_l3_master(chup->upper_dev)) 1798 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp, 1799 ipip_entry, 1800 ul_dev, 1801 demote_this, 1802 extack); 1803 break; 1804 1805 case NETDEV_UP: 1806 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry, 1807 ul_dev); 1808 case NETDEV_DOWN: 1809 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp, 1810 ipip_entry, 1811 ul_dev); 1812 } 1813 return 0; 1814 } 1815 1816 int 1817 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, 1818 struct net_device *ul_dev, 1819 unsigned long event, 1820 struct netdev_notifier_info *info) 1821 { 1822 struct mlxsw_sp_ipip_entry *ipip_entry = NULL; 1823 int err; 1824 1825 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, 1826 ul_dev, 1827 ipip_entry))) { 1828 struct mlxsw_sp_ipip_entry *prev; 1829 bool demote_this = false; 1830 1831 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry, 1832 ul_dev, &demote_this, 1833 event, info); 1834 if (err) { 1835 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp, 1836 ul_dev); 1837 return err; 1838 } 1839 1840 if (demote_this) { 1841 if (list_is_first(&ipip_entry->ipip_list_node, 1842 &mlxsw_sp->router->ipip_list)) 1843 prev = NULL; 1844 else 1845 /* This can't be cached from previous iteration, 1846 * because that entry could be gone now. 1847 */ 1848 prev = list_prev_entry(ipip_entry, 1849 ipip_list_node); 1850 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1851 ipip_entry = prev; 1852 } 1853 } 1854 1855 return 0; 1856 } 1857 1858 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, 1859 enum mlxsw_sp_l3proto ul_proto, 1860 const union mlxsw_sp_l3addr *ul_sip, 1861 u32 tunnel_index) 1862 { 1863 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 1864 struct mlxsw_sp_fib_entry *fib_entry; 1865 int err; 1866 1867 /* It is valid to create a tunnel with a local IP and only later 1868 * assign this IP address to a local interface 1869 */ 1870 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id, 1871 ul_proto, ul_sip, 1872 type); 1873 if (!fib_entry) 1874 return 0; 1875 1876 fib_entry->decap.tunnel_index = tunnel_index; 1877 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; 1878 1879 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1880 if (err) 1881 goto err_fib_entry_update; 1882 1883 return 0; 1884 1885 err_fib_entry_update: 1886 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 1887 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1888 return err; 1889 } 1890 1891 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, 1892 enum mlxsw_sp_l3proto ul_proto, 1893 const union mlxsw_sp_l3addr *ul_sip) 1894 { 1895 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; 1896 struct mlxsw_sp_fib_entry *fib_entry; 1897 1898 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id, 1899 ul_proto, ul_sip, 1900 type); 1901 if (!fib_entry) 1902 return; 1903 1904 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 1905 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1906 } 1907 1908 struct mlxsw_sp_neigh_key { 1909 struct neighbour *n; 1910 }; 1911 1912 struct mlxsw_sp_neigh_entry { 1913 struct list_head rif_list_node; 1914 struct rhash_head ht_node; 1915 struct mlxsw_sp_neigh_key key; 1916 u16 rif; 1917 bool connected; 1918 unsigned char ha[ETH_ALEN]; 1919 struct list_head nexthop_list; /* list of nexthops using 1920 * this neigh entry 1921 */ 1922 struct list_head nexthop_neighs_list_node; 1923 unsigned int counter_index; 1924 bool counter_valid; 1925 }; 1926 1927 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = { 1928 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key), 1929 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node), 1930 .key_len = sizeof(struct mlxsw_sp_neigh_key), 1931 }; 1932 1933 struct mlxsw_sp_neigh_entry * 1934 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif, 1935 struct mlxsw_sp_neigh_entry *neigh_entry) 1936 { 1937 if (!neigh_entry) { 1938 if (list_empty(&rif->neigh_list)) 1939 return NULL; 1940 else 1941 return list_first_entry(&rif->neigh_list, 1942 typeof(*neigh_entry), 1943 rif_list_node); 1944 } 1945 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list)) 1946 return NULL; 1947 return list_next_entry(neigh_entry, rif_list_node); 1948 } 1949 1950 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry) 1951 { 1952 return neigh_entry->key.n->tbl->family; 1953 } 1954 1955 unsigned char * 1956 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry) 1957 { 1958 return neigh_entry->ha; 1959 } 1960 1961 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry) 1962 { 1963 struct neighbour *n; 1964 1965 n = neigh_entry->key.n; 1966 return ntohl(*((__be32 *) n->primary_key)); 1967 } 1968 1969 struct in6_addr * 1970 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry) 1971 { 1972 struct neighbour *n; 1973 1974 n = neigh_entry->key.n; 1975 return (struct in6_addr *) &n->primary_key; 1976 } 1977 1978 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp, 1979 struct mlxsw_sp_neigh_entry *neigh_entry, 1980 u64 *p_counter) 1981 { 1982 if (!neigh_entry->counter_valid) 1983 return -EINVAL; 1984 1985 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index, 1986 p_counter, NULL); 1987 } 1988 1989 static struct mlxsw_sp_neigh_entry * 1990 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n, 1991 u16 rif) 1992 { 1993 struct mlxsw_sp_neigh_entry *neigh_entry; 1994 1995 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL); 1996 if (!neigh_entry) 1997 return NULL; 1998 1999 neigh_entry->key.n = n; 2000 neigh_entry->rif = rif; 2001 INIT_LIST_HEAD(&neigh_entry->nexthop_list); 2002 2003 return neigh_entry; 2004 } 2005 2006 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry) 2007 { 2008 kfree(neigh_entry); 2009 } 2010 2011 static int 2012 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp, 2013 struct mlxsw_sp_neigh_entry *neigh_entry) 2014 { 2015 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht, 2016 &neigh_entry->ht_node, 2017 mlxsw_sp_neigh_ht_params); 2018 } 2019 2020 static void 2021 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, 2022 struct mlxsw_sp_neigh_entry *neigh_entry) 2023 { 2024 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht, 2025 &neigh_entry->ht_node, 2026 mlxsw_sp_neigh_ht_params); 2027 } 2028 2029 static bool 2030 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp, 2031 struct mlxsw_sp_neigh_entry *neigh_entry) 2032 { 2033 struct devlink *devlink; 2034 const char *table_name; 2035 2036 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) { 2037 case AF_INET: 2038 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4; 2039 break; 2040 case AF_INET6: 2041 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6; 2042 break; 2043 default: 2044 WARN_ON(1); 2045 return false; 2046 } 2047 2048 devlink = priv_to_devlink(mlxsw_sp->core); 2049 return devlink_dpipe_table_counter_enabled(devlink, table_name); 2050 } 2051 2052 static void 2053 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp, 2054 struct mlxsw_sp_neigh_entry *neigh_entry) 2055 { 2056 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry)) 2057 return; 2058 2059 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index)) 2060 return; 2061 2062 neigh_entry->counter_valid = true; 2063 } 2064 2065 static void 2066 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp, 2067 struct mlxsw_sp_neigh_entry *neigh_entry) 2068 { 2069 if (!neigh_entry->counter_valid) 2070 return; 2071 mlxsw_sp_flow_counter_free(mlxsw_sp, 2072 neigh_entry->counter_index); 2073 neigh_entry->counter_valid = false; 2074 } 2075 2076 static struct mlxsw_sp_neigh_entry * 2077 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) 2078 { 2079 struct mlxsw_sp_neigh_entry *neigh_entry; 2080 struct mlxsw_sp_rif *rif; 2081 int err; 2082 2083 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); 2084 if (!rif) 2085 return ERR_PTR(-EINVAL); 2086 2087 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index); 2088 if (!neigh_entry) 2089 return ERR_PTR(-ENOMEM); 2090 2091 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); 2092 if (err) 2093 goto err_neigh_entry_insert; 2094 2095 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry); 2096 list_add(&neigh_entry->rif_list_node, &rif->neigh_list); 2097 2098 return neigh_entry; 2099 2100 err_neigh_entry_insert: 2101 mlxsw_sp_neigh_entry_free(neigh_entry); 2102 return ERR_PTR(err); 2103 } 2104 2105 static void 2106 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp, 2107 struct mlxsw_sp_neigh_entry *neigh_entry) 2108 { 2109 list_del(&neigh_entry->rif_list_node); 2110 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry); 2111 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); 2112 mlxsw_sp_neigh_entry_free(neigh_entry); 2113 } 2114 2115 static struct mlxsw_sp_neigh_entry * 2116 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) 2117 { 2118 struct mlxsw_sp_neigh_key key; 2119 2120 key.n = n; 2121 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht, 2122 &key, mlxsw_sp_neigh_ht_params); 2123 } 2124 2125 static void 2126 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp) 2127 { 2128 unsigned long interval; 2129 2130 #if IS_ENABLED(CONFIG_IPV6) 2131 interval = min_t(unsigned long, 2132 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME), 2133 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME)); 2134 #else 2135 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); 2136 #endif 2137 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval); 2138 } 2139 2140 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, 2141 char *rauhtd_pl, 2142 int ent_index) 2143 { 2144 struct net_device *dev; 2145 struct neighbour *n; 2146 __be32 dipn; 2147 u32 dip; 2148 u16 rif; 2149 2150 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip); 2151 2152 if (!mlxsw_sp->router->rifs[rif]) { 2153 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n"); 2154 return; 2155 } 2156 2157 dipn = htonl(dip); 2158 dev = mlxsw_sp->router->rifs[rif]->dev; 2159 n = neigh_lookup(&arp_tbl, &dipn, dev); 2160 if (!n) 2161 return; 2162 2163 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); 2164 neigh_event_send(n, NULL); 2165 neigh_release(n); 2166 } 2167 2168 #if IS_ENABLED(CONFIG_IPV6) 2169 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, 2170 char *rauhtd_pl, 2171 int rec_index) 2172 { 2173 struct net_device *dev; 2174 struct neighbour *n; 2175 struct in6_addr dip; 2176 u16 rif; 2177 2178 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif, 2179 (char *) &dip); 2180 2181 if (!mlxsw_sp->router->rifs[rif]) { 2182 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n"); 2183 return; 2184 } 2185 2186 dev = mlxsw_sp->router->rifs[rif]->dev; 2187 n = neigh_lookup(&nd_tbl, &dip, dev); 2188 if (!n) 2189 return; 2190 2191 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); 2192 neigh_event_send(n, NULL); 2193 neigh_release(n); 2194 } 2195 #else 2196 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, 2197 char *rauhtd_pl, 2198 int rec_index) 2199 { 2200 } 2201 #endif 2202 2203 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, 2204 char *rauhtd_pl, 2205 int rec_index) 2206 { 2207 u8 num_entries; 2208 int i; 2209 2210 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, 2211 rec_index); 2212 /* Hardware starts counting at 0, so add 1. */ 2213 num_entries++; 2214 2215 /* Each record consists of several neighbour entries. */ 2216 for (i = 0; i < num_entries; i++) { 2217 int ent_index; 2218 2219 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i; 2220 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl, 2221 ent_index); 2222 } 2223 2224 } 2225 2226 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp, 2227 char *rauhtd_pl, 2228 int rec_index) 2229 { 2230 /* One record contains one entry. */ 2231 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl, 2232 rec_index); 2233 } 2234 2235 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, 2236 char *rauhtd_pl, int rec_index) 2237 { 2238 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) { 2239 case MLXSW_REG_RAUHTD_TYPE_IPV4: 2240 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl, 2241 rec_index); 2242 break; 2243 case MLXSW_REG_RAUHTD_TYPE_IPV6: 2244 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl, 2245 rec_index); 2246 break; 2247 } 2248 } 2249 2250 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) 2251 { 2252 u8 num_rec, last_rec_index, num_entries; 2253 2254 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); 2255 last_rec_index = num_rec - 1; 2256 2257 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM) 2258 return false; 2259 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) == 2260 MLXSW_REG_RAUHTD_TYPE_IPV6) 2261 return true; 2262 2263 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, 2264 last_rec_index); 2265 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC) 2266 return true; 2267 return false; 2268 } 2269 2270 static int 2271 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp, 2272 char *rauhtd_pl, 2273 enum mlxsw_reg_rauhtd_type type) 2274 { 2275 int i, num_rec; 2276 int err; 2277 2278 /* Make sure the neighbour's netdev isn't removed in the 2279 * process. 2280 */ 2281 rtnl_lock(); 2282 do { 2283 mlxsw_reg_rauhtd_pack(rauhtd_pl, type); 2284 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd), 2285 rauhtd_pl); 2286 if (err) { 2287 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n"); 2288 break; 2289 } 2290 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); 2291 for (i = 0; i < num_rec; i++) 2292 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, 2293 i); 2294 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); 2295 rtnl_unlock(); 2296 2297 return err; 2298 } 2299 2300 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) 2301 { 2302 enum mlxsw_reg_rauhtd_type type; 2303 char *rauhtd_pl; 2304 int err; 2305 2306 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); 2307 if (!rauhtd_pl) 2308 return -ENOMEM; 2309 2310 type = MLXSW_REG_RAUHTD_TYPE_IPV4; 2311 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); 2312 if (err) 2313 goto out; 2314 2315 type = MLXSW_REG_RAUHTD_TYPE_IPV6; 2316 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); 2317 out: 2318 kfree(rauhtd_pl); 2319 return err; 2320 } 2321 2322 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) 2323 { 2324 struct mlxsw_sp_neigh_entry *neigh_entry; 2325 2326 /* Take RTNL mutex here to prevent lists from changes */ 2327 rtnl_lock(); 2328 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list, 2329 nexthop_neighs_list_node) 2330 /* If this neigh have nexthops, make the kernel think this neigh 2331 * is active regardless of the traffic. 2332 */ 2333 neigh_event_send(neigh_entry->key.n, NULL); 2334 rtnl_unlock(); 2335 } 2336 2337 static void 2338 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp) 2339 { 2340 unsigned long interval = mlxsw_sp->router->neighs_update.interval; 2341 2342 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 2343 msecs_to_jiffies(interval)); 2344 } 2345 2346 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work) 2347 { 2348 struct mlxsw_sp_router *router; 2349 int err; 2350 2351 router = container_of(work, struct mlxsw_sp_router, 2352 neighs_update.dw.work); 2353 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp); 2354 if (err) 2355 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity"); 2356 2357 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp); 2358 2359 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp); 2360 } 2361 2362 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) 2363 { 2364 struct mlxsw_sp_neigh_entry *neigh_entry; 2365 struct mlxsw_sp_router *router; 2366 2367 router = container_of(work, struct mlxsw_sp_router, 2368 nexthop_probe_dw.work); 2369 /* Iterate over nexthop neighbours, find those who are unresolved and 2370 * send arp on them. This solves the chicken-egg problem when 2371 * the nexthop wouldn't get offloaded until the neighbor is resolved 2372 * but it wouldn't get resolved ever in case traffic is flowing in HW 2373 * using different nexthop. 2374 * 2375 * Take RTNL mutex here to prevent lists from changes. 2376 */ 2377 rtnl_lock(); 2378 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list, 2379 nexthop_neighs_list_node) 2380 if (!neigh_entry->connected) 2381 neigh_event_send(neigh_entry->key.n, NULL); 2382 rtnl_unlock(); 2383 2384 mlxsw_core_schedule_dw(&router->nexthop_probe_dw, 2385 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL); 2386 } 2387 2388 static void 2389 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, 2390 struct mlxsw_sp_neigh_entry *neigh_entry, 2391 bool removing, bool dead); 2392 2393 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding) 2394 { 2395 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD : 2396 MLXSW_REG_RAUHT_OP_WRITE_DELETE; 2397 } 2398 2399 static int 2400 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp, 2401 struct mlxsw_sp_neigh_entry *neigh_entry, 2402 enum mlxsw_reg_rauht_op op) 2403 { 2404 struct neighbour *n = neigh_entry->key.n; 2405 u32 dip = ntohl(*((__be32 *) n->primary_key)); 2406 char rauht_pl[MLXSW_REG_RAUHT_LEN]; 2407 2408 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, 2409 dip); 2410 if (neigh_entry->counter_valid) 2411 mlxsw_reg_rauht_pack_counter(rauht_pl, 2412 neigh_entry->counter_index); 2413 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); 2414 } 2415 2416 static int 2417 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp, 2418 struct mlxsw_sp_neigh_entry *neigh_entry, 2419 enum mlxsw_reg_rauht_op op) 2420 { 2421 struct neighbour *n = neigh_entry->key.n; 2422 char rauht_pl[MLXSW_REG_RAUHT_LEN]; 2423 const char *dip = n->primary_key; 2424 2425 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, 2426 dip); 2427 if (neigh_entry->counter_valid) 2428 mlxsw_reg_rauht_pack_counter(rauht_pl, 2429 neigh_entry->counter_index); 2430 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); 2431 } 2432 2433 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry) 2434 { 2435 struct neighbour *n = neigh_entry->key.n; 2436 2437 /* Packets with a link-local destination address are trapped 2438 * after LPM lookup and never reach the neighbour table, so 2439 * there is no need to program such neighbours to the device. 2440 */ 2441 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) & 2442 IPV6_ADDR_LINKLOCAL) 2443 return true; 2444 return false; 2445 } 2446 2447 static void 2448 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, 2449 struct mlxsw_sp_neigh_entry *neigh_entry, 2450 bool adding) 2451 { 2452 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding); 2453 int err; 2454 2455 if (!adding && !neigh_entry->connected) 2456 return; 2457 neigh_entry->connected = adding; 2458 if (neigh_entry->key.n->tbl->family == AF_INET) { 2459 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, 2460 op); 2461 if (err) 2462 return; 2463 } else if (neigh_entry->key.n->tbl->family == AF_INET6) { 2464 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry)) 2465 return; 2466 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry, 2467 op); 2468 if (err) 2469 return; 2470 } else { 2471 WARN_ON_ONCE(1); 2472 return; 2473 } 2474 2475 if (adding) 2476 neigh_entry->key.n->flags |= NTF_OFFLOADED; 2477 else 2478 neigh_entry->key.n->flags &= ~NTF_OFFLOADED; 2479 } 2480 2481 void 2482 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp, 2483 struct mlxsw_sp_neigh_entry *neigh_entry, 2484 bool adding) 2485 { 2486 if (adding) 2487 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry); 2488 else 2489 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry); 2490 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true); 2491 } 2492 2493 struct mlxsw_sp_netevent_work { 2494 struct work_struct work; 2495 struct mlxsw_sp *mlxsw_sp; 2496 struct neighbour *n; 2497 }; 2498 2499 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) 2500 { 2501 struct mlxsw_sp_netevent_work *net_work = 2502 container_of(work, struct mlxsw_sp_netevent_work, work); 2503 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; 2504 struct mlxsw_sp_neigh_entry *neigh_entry; 2505 struct neighbour *n = net_work->n; 2506 unsigned char ha[ETH_ALEN]; 2507 bool entry_connected; 2508 u8 nud_state, dead; 2509 2510 /* If these parameters are changed after we release the lock, 2511 * then we are guaranteed to receive another event letting us 2512 * know about it. 2513 */ 2514 read_lock_bh(&n->lock); 2515 memcpy(ha, n->ha, ETH_ALEN); 2516 nud_state = n->nud_state; 2517 dead = n->dead; 2518 read_unlock_bh(&n->lock); 2519 2520 rtnl_lock(); 2521 mlxsw_sp_span_respin(mlxsw_sp); 2522 2523 entry_connected = nud_state & NUD_VALID && !dead; 2524 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); 2525 if (!entry_connected && !neigh_entry) 2526 goto out; 2527 if (!neigh_entry) { 2528 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); 2529 if (IS_ERR(neigh_entry)) 2530 goto out; 2531 } 2532 2533 memcpy(neigh_entry->ha, ha, ETH_ALEN); 2534 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected); 2535 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected, 2536 dead); 2537 2538 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) 2539 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); 2540 2541 out: 2542 rtnl_unlock(); 2543 neigh_release(n); 2544 kfree(net_work); 2545 } 2546 2547 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp); 2548 2549 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work) 2550 { 2551 struct mlxsw_sp_netevent_work *net_work = 2552 container_of(work, struct mlxsw_sp_netevent_work, work); 2553 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; 2554 2555 mlxsw_sp_mp_hash_init(mlxsw_sp); 2556 kfree(net_work); 2557 } 2558 2559 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); 2560 2561 static void mlxsw_sp_router_update_priority_work(struct work_struct *work) 2562 { 2563 struct mlxsw_sp_netevent_work *net_work = 2564 container_of(work, struct mlxsw_sp_netevent_work, work); 2565 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; 2566 2567 __mlxsw_sp_router_init(mlxsw_sp); 2568 kfree(net_work); 2569 } 2570 2571 static int mlxsw_sp_router_schedule_work(struct net *net, 2572 struct notifier_block *nb, 2573 void (*cb)(struct work_struct *)) 2574 { 2575 struct mlxsw_sp_netevent_work *net_work; 2576 struct mlxsw_sp_router *router; 2577 2578 router = container_of(nb, struct mlxsw_sp_router, netevent_nb); 2579 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp))) 2580 return NOTIFY_DONE; 2581 2582 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); 2583 if (!net_work) 2584 return NOTIFY_BAD; 2585 2586 INIT_WORK(&net_work->work, cb); 2587 net_work->mlxsw_sp = router->mlxsw_sp; 2588 mlxsw_core_schedule_work(&net_work->work); 2589 return NOTIFY_DONE; 2590 } 2591 2592 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, 2593 unsigned long event, void *ptr) 2594 { 2595 struct mlxsw_sp_netevent_work *net_work; 2596 struct mlxsw_sp_port *mlxsw_sp_port; 2597 struct mlxsw_sp *mlxsw_sp; 2598 unsigned long interval; 2599 struct neigh_parms *p; 2600 struct neighbour *n; 2601 2602 switch (event) { 2603 case NETEVENT_DELAY_PROBE_TIME_UPDATE: 2604 p = ptr; 2605 2606 /* We don't care about changes in the default table. */ 2607 if (!p->dev || (p->tbl->family != AF_INET && 2608 p->tbl->family != AF_INET6)) 2609 return NOTIFY_DONE; 2610 2611 /* We are in atomic context and can't take RTNL mutex, 2612 * so use RCU variant to walk the device chain. 2613 */ 2614 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev); 2615 if (!mlxsw_sp_port) 2616 return NOTIFY_DONE; 2617 2618 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2619 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME)); 2620 mlxsw_sp->router->neighs_update.interval = interval; 2621 2622 mlxsw_sp_port_dev_put(mlxsw_sp_port); 2623 break; 2624 case NETEVENT_NEIGH_UPDATE: 2625 n = ptr; 2626 2627 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6) 2628 return NOTIFY_DONE; 2629 2630 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev); 2631 if (!mlxsw_sp_port) 2632 return NOTIFY_DONE; 2633 2634 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); 2635 if (!net_work) { 2636 mlxsw_sp_port_dev_put(mlxsw_sp_port); 2637 return NOTIFY_BAD; 2638 } 2639 2640 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work); 2641 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2642 net_work->n = n; 2643 2644 /* Take a reference to ensure the neighbour won't be 2645 * destructed until we drop the reference in delayed 2646 * work. 2647 */ 2648 neigh_clone(n); 2649 mlxsw_core_schedule_work(&net_work->work); 2650 mlxsw_sp_port_dev_put(mlxsw_sp_port); 2651 break; 2652 case NETEVENT_IPV4_MPATH_HASH_UPDATE: 2653 case NETEVENT_IPV6_MPATH_HASH_UPDATE: 2654 return mlxsw_sp_router_schedule_work(ptr, nb, 2655 mlxsw_sp_router_mp_hash_event_work); 2656 2657 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE: 2658 return mlxsw_sp_router_schedule_work(ptr, nb, 2659 mlxsw_sp_router_update_priority_work); 2660 } 2661 2662 return NOTIFY_DONE; 2663 } 2664 2665 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) 2666 { 2667 int err; 2668 2669 err = rhashtable_init(&mlxsw_sp->router->neigh_ht, 2670 &mlxsw_sp_neigh_ht_params); 2671 if (err) 2672 return err; 2673 2674 /* Initialize the polling interval according to the default 2675 * table. 2676 */ 2677 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp); 2678 2679 /* Create the delayed works for the activity_update */ 2680 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw, 2681 mlxsw_sp_router_neighs_update_work); 2682 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw, 2683 mlxsw_sp_router_probe_unresolved_nexthops); 2684 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0); 2685 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0); 2686 return 0; 2687 } 2688 2689 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) 2690 { 2691 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw); 2692 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw); 2693 rhashtable_destroy(&mlxsw_sp->router->neigh_ht); 2694 } 2695 2696 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 2697 struct mlxsw_sp_rif *rif) 2698 { 2699 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; 2700 2701 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, 2702 rif_list_node) { 2703 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false); 2704 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); 2705 } 2706 } 2707 2708 enum mlxsw_sp_nexthop_type { 2709 MLXSW_SP_NEXTHOP_TYPE_ETH, 2710 MLXSW_SP_NEXTHOP_TYPE_IPIP, 2711 }; 2712 2713 struct mlxsw_sp_nexthop_key { 2714 struct fib_nh *fib_nh; 2715 }; 2716 2717 struct mlxsw_sp_nexthop { 2718 struct list_head neigh_list_node; /* member of neigh entry list */ 2719 struct list_head rif_list_node; 2720 struct list_head router_list_node; 2721 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group 2722 * this belongs to 2723 */ 2724 struct rhash_head ht_node; 2725 struct mlxsw_sp_nexthop_key key; 2726 unsigned char gw_addr[sizeof(struct in6_addr)]; 2727 int ifindex; 2728 int nh_weight; 2729 int norm_nh_weight; 2730 int num_adj_entries; 2731 struct mlxsw_sp_rif *rif; 2732 u8 should_offload:1, /* set indicates this neigh is connected and 2733 * should be put to KVD linear area of this group. 2734 */ 2735 offloaded:1, /* set in case the neigh is actually put into 2736 * KVD linear area of this group. 2737 */ 2738 update:1; /* set indicates that MAC of this neigh should be 2739 * updated in HW 2740 */ 2741 enum mlxsw_sp_nexthop_type type; 2742 union { 2743 struct mlxsw_sp_neigh_entry *neigh_entry; 2744 struct mlxsw_sp_ipip_entry *ipip_entry; 2745 }; 2746 unsigned int counter_index; 2747 bool counter_valid; 2748 }; 2749 2750 struct mlxsw_sp_nexthop_group { 2751 void *priv; 2752 struct rhash_head ht_node; 2753 struct list_head fib_list; /* list of fib entries that use this group */ 2754 struct neigh_table *neigh_tbl; 2755 u8 adj_index_valid:1, 2756 gateway:1; /* routes using the group use a gateway */ 2757 u32 adj_index; 2758 u16 ecmp_size; 2759 u16 count; 2760 int sum_norm_weight; 2761 struct mlxsw_sp_nexthop nexthops[0]; 2762 #define nh_rif nexthops[0].rif 2763 }; 2764 2765 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, 2766 struct mlxsw_sp_nexthop *nh) 2767 { 2768 struct devlink *devlink; 2769 2770 devlink = priv_to_devlink(mlxsw_sp->core); 2771 if (!devlink_dpipe_table_counter_enabled(devlink, 2772 MLXSW_SP_DPIPE_TABLE_NAME_ADJ)) 2773 return; 2774 2775 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index)) 2776 return; 2777 2778 nh->counter_valid = true; 2779 } 2780 2781 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp, 2782 struct mlxsw_sp_nexthop *nh) 2783 { 2784 if (!nh->counter_valid) 2785 return; 2786 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index); 2787 nh->counter_valid = false; 2788 } 2789 2790 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp, 2791 struct mlxsw_sp_nexthop *nh, u64 *p_counter) 2792 { 2793 if (!nh->counter_valid) 2794 return -EINVAL; 2795 2796 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index, 2797 p_counter, NULL); 2798 } 2799 2800 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, 2801 struct mlxsw_sp_nexthop *nh) 2802 { 2803 if (!nh) { 2804 if (list_empty(&router->nexthop_list)) 2805 return NULL; 2806 else 2807 return list_first_entry(&router->nexthop_list, 2808 typeof(*nh), router_list_node); 2809 } 2810 if (list_is_last(&nh->router_list_node, &router->nexthop_list)) 2811 return NULL; 2812 return list_next_entry(nh, router_list_node); 2813 } 2814 2815 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh) 2816 { 2817 return nh->offloaded; 2818 } 2819 2820 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh) 2821 { 2822 if (!nh->offloaded) 2823 return NULL; 2824 return nh->neigh_entry->ha; 2825 } 2826 2827 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, 2828 u32 *p_adj_size, u32 *p_adj_hash_index) 2829 { 2830 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; 2831 u32 adj_hash_index = 0; 2832 int i; 2833 2834 if (!nh->offloaded || !nh_grp->adj_index_valid) 2835 return -EINVAL; 2836 2837 *p_adj_index = nh_grp->adj_index; 2838 *p_adj_size = nh_grp->ecmp_size; 2839 2840 for (i = 0; i < nh_grp->count; i++) { 2841 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i]; 2842 2843 if (nh_iter == nh) 2844 break; 2845 if (nh_iter->offloaded) 2846 adj_hash_index += nh_iter->num_adj_entries; 2847 } 2848 2849 *p_adj_hash_index = adj_hash_index; 2850 return 0; 2851 } 2852 2853 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh) 2854 { 2855 return nh->rif; 2856 } 2857 2858 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh) 2859 { 2860 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; 2861 int i; 2862 2863 for (i = 0; i < nh_grp->count; i++) { 2864 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i]; 2865 2866 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP) 2867 return true; 2868 } 2869 return false; 2870 } 2871 2872 static struct fib_info * 2873 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp) 2874 { 2875 return nh_grp->priv; 2876 } 2877 2878 struct mlxsw_sp_nexthop_group_cmp_arg { 2879 enum mlxsw_sp_l3proto proto; 2880 union { 2881 struct fib_info *fi; 2882 struct mlxsw_sp_fib6_entry *fib6_entry; 2883 }; 2884 }; 2885 2886 static bool 2887 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp, 2888 const struct in6_addr *gw, int ifindex, 2889 int weight) 2890 { 2891 int i; 2892 2893 for (i = 0; i < nh_grp->count; i++) { 2894 const struct mlxsw_sp_nexthop *nh; 2895 2896 nh = &nh_grp->nexthops[i]; 2897 if (nh->ifindex == ifindex && nh->nh_weight == weight && 2898 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr)) 2899 return true; 2900 } 2901 2902 return false; 2903 } 2904 2905 static bool 2906 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp, 2907 const struct mlxsw_sp_fib6_entry *fib6_entry) 2908 { 2909 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 2910 2911 if (nh_grp->count != fib6_entry->nrt6) 2912 return false; 2913 2914 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 2915 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh; 2916 struct in6_addr *gw; 2917 int ifindex, weight; 2918 2919 ifindex = fib6_nh->fib_nh_dev->ifindex; 2920 weight = fib6_nh->fib_nh_weight; 2921 gw = &fib6_nh->fib_nh_gw6; 2922 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex, 2923 weight)) 2924 return false; 2925 } 2926 2927 return true; 2928 } 2929 2930 static int 2931 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr) 2932 { 2933 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key; 2934 const struct mlxsw_sp_nexthop_group *nh_grp = ptr; 2935 2936 switch (cmp_arg->proto) { 2937 case MLXSW_SP_L3_PROTO_IPV4: 2938 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp); 2939 case MLXSW_SP_L3_PROTO_IPV6: 2940 return !mlxsw_sp_nexthop6_group_cmp(nh_grp, 2941 cmp_arg->fib6_entry); 2942 default: 2943 WARN_ON(1); 2944 return 1; 2945 } 2946 } 2947 2948 static int 2949 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp) 2950 { 2951 return nh_grp->neigh_tbl->family; 2952 } 2953 2954 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed) 2955 { 2956 const struct mlxsw_sp_nexthop_group *nh_grp = data; 2957 const struct mlxsw_sp_nexthop *nh; 2958 struct fib_info *fi; 2959 unsigned int val; 2960 int i; 2961 2962 switch (mlxsw_sp_nexthop_group_type(nh_grp)) { 2963 case AF_INET: 2964 fi = mlxsw_sp_nexthop4_group_fi(nh_grp); 2965 return jhash(&fi, sizeof(fi), seed); 2966 case AF_INET6: 2967 val = nh_grp->count; 2968 for (i = 0; i < nh_grp->count; i++) { 2969 nh = &nh_grp->nexthops[i]; 2970 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed); 2971 } 2972 return jhash(&val, sizeof(val), seed); 2973 default: 2974 WARN_ON(1); 2975 return 0; 2976 } 2977 } 2978 2979 static u32 2980 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed) 2981 { 2982 unsigned int val = fib6_entry->nrt6; 2983 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 2984 struct net_device *dev; 2985 2986 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 2987 dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev; 2988 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed); 2989 } 2990 2991 return jhash(&val, sizeof(val), seed); 2992 } 2993 2994 static u32 2995 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed) 2996 { 2997 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data; 2998 2999 switch (cmp_arg->proto) { 3000 case MLXSW_SP_L3_PROTO_IPV4: 3001 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed); 3002 case MLXSW_SP_L3_PROTO_IPV6: 3003 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed); 3004 default: 3005 WARN_ON(1); 3006 return 0; 3007 } 3008 } 3009 3010 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { 3011 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node), 3012 .hashfn = mlxsw_sp_nexthop_group_hash, 3013 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj, 3014 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp, 3015 }; 3016 3017 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, 3018 struct mlxsw_sp_nexthop_group *nh_grp) 3019 { 3020 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && 3021 !nh_grp->gateway) 3022 return 0; 3023 3024 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht, 3025 &nh_grp->ht_node, 3026 mlxsw_sp_nexthop_group_ht_params); 3027 } 3028 3029 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp, 3030 struct mlxsw_sp_nexthop_group *nh_grp) 3031 { 3032 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && 3033 !nh_grp->gateway) 3034 return; 3035 3036 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht, 3037 &nh_grp->ht_node, 3038 mlxsw_sp_nexthop_group_ht_params); 3039 } 3040 3041 static struct mlxsw_sp_nexthop_group * 3042 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp, 3043 struct fib_info *fi) 3044 { 3045 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; 3046 3047 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4; 3048 cmp_arg.fi = fi; 3049 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, 3050 &cmp_arg, 3051 mlxsw_sp_nexthop_group_ht_params); 3052 } 3053 3054 static struct mlxsw_sp_nexthop_group * 3055 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp, 3056 struct mlxsw_sp_fib6_entry *fib6_entry) 3057 { 3058 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; 3059 3060 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6; 3061 cmp_arg.fib6_entry = fib6_entry; 3062 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, 3063 &cmp_arg, 3064 mlxsw_sp_nexthop_group_ht_params); 3065 } 3066 3067 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = { 3068 .key_offset = offsetof(struct mlxsw_sp_nexthop, key), 3069 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node), 3070 .key_len = sizeof(struct mlxsw_sp_nexthop_key), 3071 }; 3072 3073 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp, 3074 struct mlxsw_sp_nexthop *nh) 3075 { 3076 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht, 3077 &nh->ht_node, mlxsw_sp_nexthop_ht_params); 3078 } 3079 3080 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp, 3081 struct mlxsw_sp_nexthop *nh) 3082 { 3083 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node, 3084 mlxsw_sp_nexthop_ht_params); 3085 } 3086 3087 static struct mlxsw_sp_nexthop * 3088 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp, 3089 struct mlxsw_sp_nexthop_key key) 3090 { 3091 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key, 3092 mlxsw_sp_nexthop_ht_params); 3093 } 3094 3095 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, 3096 const struct mlxsw_sp_fib *fib, 3097 u32 adj_index, u16 ecmp_size, 3098 u32 new_adj_index, 3099 u16 new_ecmp_size) 3100 { 3101 char raleu_pl[MLXSW_REG_RALEU_LEN]; 3102 3103 mlxsw_reg_raleu_pack(raleu_pl, 3104 (enum mlxsw_reg_ralxx_protocol) fib->proto, 3105 fib->vr->id, adj_index, ecmp_size, new_adj_index, 3106 new_ecmp_size); 3107 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl); 3108 } 3109 3110 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, 3111 struct mlxsw_sp_nexthop_group *nh_grp, 3112 u32 old_adj_index, u16 old_ecmp_size) 3113 { 3114 struct mlxsw_sp_fib_entry *fib_entry; 3115 struct mlxsw_sp_fib *fib = NULL; 3116 int err; 3117 3118 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { 3119 if (fib == fib_entry->fib_node->fib) 3120 continue; 3121 fib = fib_entry->fib_node->fib; 3122 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib, 3123 old_adj_index, 3124 old_ecmp_size, 3125 nh_grp->adj_index, 3126 nh_grp->ecmp_size); 3127 if (err) 3128 return err; 3129 } 3130 return 0; 3131 } 3132 3133 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, 3134 struct mlxsw_sp_nexthop *nh) 3135 { 3136 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; 3137 char ratr_pl[MLXSW_REG_RATR_LEN]; 3138 3139 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, 3140 true, MLXSW_REG_RATR_TYPE_ETHERNET, 3141 adj_index, neigh_entry->rif); 3142 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); 3143 if (nh->counter_valid) 3144 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true); 3145 else 3146 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false); 3147 3148 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); 3149 } 3150 3151 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, 3152 struct mlxsw_sp_nexthop *nh) 3153 { 3154 int i; 3155 3156 for (i = 0; i < nh->num_adj_entries; i++) { 3157 int err; 3158 3159 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh); 3160 if (err) 3161 return err; 3162 } 3163 3164 return 0; 3165 } 3166 3167 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, 3168 u32 adj_index, 3169 struct mlxsw_sp_nexthop *nh) 3170 { 3171 const struct mlxsw_sp_ipip_ops *ipip_ops; 3172 3173 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt]; 3174 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry); 3175 } 3176 3177 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, 3178 u32 adj_index, 3179 struct mlxsw_sp_nexthop *nh) 3180 { 3181 int i; 3182 3183 for (i = 0; i < nh->num_adj_entries; i++) { 3184 int err; 3185 3186 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i, 3187 nh); 3188 if (err) 3189 return err; 3190 } 3191 3192 return 0; 3193 } 3194 3195 static int 3196 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, 3197 struct mlxsw_sp_nexthop_group *nh_grp, 3198 bool reallocate) 3199 { 3200 u32 adj_index = nh_grp->adj_index; /* base */ 3201 struct mlxsw_sp_nexthop *nh; 3202 int i; 3203 int err; 3204 3205 for (i = 0; i < nh_grp->count; i++) { 3206 nh = &nh_grp->nexthops[i]; 3207 3208 if (!nh->should_offload) { 3209 nh->offloaded = 0; 3210 continue; 3211 } 3212 3213 if (nh->update || reallocate) { 3214 switch (nh->type) { 3215 case MLXSW_SP_NEXTHOP_TYPE_ETH: 3216 err = mlxsw_sp_nexthop_update 3217 (mlxsw_sp, adj_index, nh); 3218 break; 3219 case MLXSW_SP_NEXTHOP_TYPE_IPIP: 3220 err = mlxsw_sp_nexthop_ipip_update 3221 (mlxsw_sp, adj_index, nh); 3222 break; 3223 } 3224 if (err) 3225 return err; 3226 nh->update = 0; 3227 nh->offloaded = 1; 3228 } 3229 adj_index += nh->num_adj_entries; 3230 } 3231 return 0; 3232 } 3233 3234 static bool 3235 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, 3236 const struct mlxsw_sp_fib_entry *fib_entry); 3237 3238 static int 3239 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, 3240 struct mlxsw_sp_nexthop_group *nh_grp) 3241 { 3242 struct mlxsw_sp_fib_entry *fib_entry; 3243 int err; 3244 3245 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { 3246 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, 3247 fib_entry)) 3248 continue; 3249 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 3250 if (err) 3251 return err; 3252 } 3253 return 0; 3254 } 3255 3256 static void 3257 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, 3258 enum mlxsw_reg_ralue_op op, int err); 3259 3260 static void 3261 mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp) 3262 { 3263 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE; 3264 struct mlxsw_sp_fib_entry *fib_entry; 3265 3266 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { 3267 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, 3268 fib_entry)) 3269 continue; 3270 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0); 3271 } 3272 } 3273 3274 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size) 3275 { 3276 /* Valid sizes for an adjacency group are: 3277 * 1-64, 512, 1024, 2048 and 4096. 3278 */ 3279 if (*p_adj_grp_size <= 64) 3280 return; 3281 else if (*p_adj_grp_size <= 512) 3282 *p_adj_grp_size = 512; 3283 else if (*p_adj_grp_size <= 1024) 3284 *p_adj_grp_size = 1024; 3285 else if (*p_adj_grp_size <= 2048) 3286 *p_adj_grp_size = 2048; 3287 else 3288 *p_adj_grp_size = 4096; 3289 } 3290 3291 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size, 3292 unsigned int alloc_size) 3293 { 3294 if (alloc_size >= 4096) 3295 *p_adj_grp_size = 4096; 3296 else if (alloc_size >= 2048) 3297 *p_adj_grp_size = 2048; 3298 else if (alloc_size >= 1024) 3299 *p_adj_grp_size = 1024; 3300 else if (alloc_size >= 512) 3301 *p_adj_grp_size = 512; 3302 } 3303 3304 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, 3305 u16 *p_adj_grp_size) 3306 { 3307 unsigned int alloc_size; 3308 int err; 3309 3310 /* Round up the requested group size to the next size supported 3311 * by the device and make sure the request can be satisfied. 3312 */ 3313 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size); 3314 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, 3315 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 3316 *p_adj_grp_size, &alloc_size); 3317 if (err) 3318 return err; 3319 /* It is possible the allocation results in more allocated 3320 * entries than requested. Try to use as much of them as 3321 * possible. 3322 */ 3323 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size); 3324 3325 return 0; 3326 } 3327 3328 static void 3329 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp) 3330 { 3331 int i, g = 0, sum_norm_weight = 0; 3332 struct mlxsw_sp_nexthop *nh; 3333 3334 for (i = 0; i < nh_grp->count; i++) { 3335 nh = &nh_grp->nexthops[i]; 3336 3337 if (!nh->should_offload) 3338 continue; 3339 if (g > 0) 3340 g = gcd(nh->nh_weight, g); 3341 else 3342 g = nh->nh_weight; 3343 } 3344 3345 for (i = 0; i < nh_grp->count; i++) { 3346 nh = &nh_grp->nexthops[i]; 3347 3348 if (!nh->should_offload) 3349 continue; 3350 nh->norm_nh_weight = nh->nh_weight / g; 3351 sum_norm_weight += nh->norm_nh_weight; 3352 } 3353 3354 nh_grp->sum_norm_weight = sum_norm_weight; 3355 } 3356 3357 static void 3358 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp) 3359 { 3360 int total = nh_grp->sum_norm_weight; 3361 u16 ecmp_size = nh_grp->ecmp_size; 3362 int i, weight = 0, lower_bound = 0; 3363 3364 for (i = 0; i < nh_grp->count; i++) { 3365 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 3366 int upper_bound; 3367 3368 if (!nh->should_offload) 3369 continue; 3370 weight += nh->norm_nh_weight; 3371 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total); 3372 nh->num_adj_entries = upper_bound - lower_bound; 3373 lower_bound = upper_bound; 3374 } 3375 } 3376 3377 static void 3378 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, 3379 struct mlxsw_sp_nexthop_group *nh_grp) 3380 { 3381 u16 ecmp_size, old_ecmp_size; 3382 struct mlxsw_sp_nexthop *nh; 3383 bool offload_change = false; 3384 u32 adj_index; 3385 bool old_adj_index_valid; 3386 u32 old_adj_index; 3387 int i; 3388 int err; 3389 3390 if (!nh_grp->gateway) { 3391 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); 3392 return; 3393 } 3394 3395 for (i = 0; i < nh_grp->count; i++) { 3396 nh = &nh_grp->nexthops[i]; 3397 3398 if (nh->should_offload != nh->offloaded) { 3399 offload_change = true; 3400 if (nh->should_offload) 3401 nh->update = 1; 3402 } 3403 } 3404 if (!offload_change) { 3405 /* Nothing was added or removed, so no need to reallocate. Just 3406 * update MAC on existing adjacency indexes. 3407 */ 3408 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false); 3409 if (err) { 3410 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 3411 goto set_trap; 3412 } 3413 return; 3414 } 3415 mlxsw_sp_nexthop_group_normalize(nh_grp); 3416 if (!nh_grp->sum_norm_weight) 3417 /* No neigh of this group is connected so we just set 3418 * the trap and let everthing flow through kernel. 3419 */ 3420 goto set_trap; 3421 3422 ecmp_size = nh_grp->sum_norm_weight; 3423 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size); 3424 if (err) 3425 /* No valid allocation size available. */ 3426 goto set_trap; 3427 3428 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 3429 ecmp_size, &adj_index); 3430 if (err) { 3431 /* We ran out of KVD linear space, just set the 3432 * trap and let everything flow through kernel. 3433 */ 3434 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n"); 3435 goto set_trap; 3436 } 3437 old_adj_index_valid = nh_grp->adj_index_valid; 3438 old_adj_index = nh_grp->adj_index; 3439 old_ecmp_size = nh_grp->ecmp_size; 3440 nh_grp->adj_index_valid = 1; 3441 nh_grp->adj_index = adj_index; 3442 nh_grp->ecmp_size = ecmp_size; 3443 mlxsw_sp_nexthop_group_rebalance(nh_grp); 3444 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true); 3445 if (err) { 3446 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 3447 goto set_trap; 3448 } 3449 3450 if (!old_adj_index_valid) { 3451 /* The trap was set for fib entries, so we have to call 3452 * fib entry update to unset it and use adjacency index. 3453 */ 3454 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); 3455 if (err) { 3456 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n"); 3457 goto set_trap; 3458 } 3459 return; 3460 } 3461 3462 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, 3463 old_adj_index, old_ecmp_size); 3464 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 3465 old_ecmp_size, old_adj_index); 3466 if (err) { 3467 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); 3468 goto set_trap; 3469 } 3470 3471 /* Offload state within the group changed, so update the flags. */ 3472 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp); 3473 3474 return; 3475 3476 set_trap: 3477 old_adj_index_valid = nh_grp->adj_index_valid; 3478 nh_grp->adj_index_valid = 0; 3479 for (i = 0; i < nh_grp->count; i++) { 3480 nh = &nh_grp->nexthops[i]; 3481 nh->offloaded = 0; 3482 } 3483 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); 3484 if (err) 3485 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); 3486 if (old_adj_index_valid) 3487 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 3488 nh_grp->ecmp_size, nh_grp->adj_index); 3489 } 3490 3491 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, 3492 bool removing) 3493 { 3494 if (!removing) 3495 nh->should_offload = 1; 3496 else 3497 nh->should_offload = 0; 3498 nh->update = 1; 3499 } 3500 3501 static int 3502 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp, 3503 struct mlxsw_sp_neigh_entry *neigh_entry) 3504 { 3505 struct neighbour *n, *old_n = neigh_entry->key.n; 3506 struct mlxsw_sp_nexthop *nh; 3507 bool entry_connected; 3508 u8 nud_state, dead; 3509 int err; 3510 3511 nh = list_first_entry(&neigh_entry->nexthop_list, 3512 struct mlxsw_sp_nexthop, neigh_list_node); 3513 3514 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); 3515 if (!n) { 3516 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, 3517 nh->rif->dev); 3518 if (IS_ERR(n)) 3519 return PTR_ERR(n); 3520 neigh_event_send(n, NULL); 3521 } 3522 3523 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); 3524 neigh_entry->key.n = n; 3525 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); 3526 if (err) 3527 goto err_neigh_entry_insert; 3528 3529 read_lock_bh(&n->lock); 3530 nud_state = n->nud_state; 3531 dead = n->dead; 3532 read_unlock_bh(&n->lock); 3533 entry_connected = nud_state & NUD_VALID && !dead; 3534 3535 list_for_each_entry(nh, &neigh_entry->nexthop_list, 3536 neigh_list_node) { 3537 neigh_release(old_n); 3538 neigh_clone(n); 3539 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected); 3540 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3541 } 3542 3543 neigh_release(n); 3544 3545 return 0; 3546 3547 err_neigh_entry_insert: 3548 neigh_entry->key.n = old_n; 3549 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); 3550 neigh_release(n); 3551 return err; 3552 } 3553 3554 static void 3555 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, 3556 struct mlxsw_sp_neigh_entry *neigh_entry, 3557 bool removing, bool dead) 3558 { 3559 struct mlxsw_sp_nexthop *nh; 3560 3561 if (list_empty(&neigh_entry->nexthop_list)) 3562 return; 3563 3564 if (dead) { 3565 int err; 3566 3567 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp, 3568 neigh_entry); 3569 if (err) 3570 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n"); 3571 return; 3572 } 3573 3574 list_for_each_entry(nh, &neigh_entry->nexthop_list, 3575 neigh_list_node) { 3576 __mlxsw_sp_nexthop_neigh_update(nh, removing); 3577 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3578 } 3579 } 3580 3581 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh, 3582 struct mlxsw_sp_rif *rif) 3583 { 3584 if (nh->rif) 3585 return; 3586 3587 nh->rif = rif; 3588 list_add(&nh->rif_list_node, &rif->nexthop_list); 3589 } 3590 3591 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh) 3592 { 3593 if (!nh->rif) 3594 return; 3595 3596 list_del(&nh->rif_list_node); 3597 nh->rif = NULL; 3598 } 3599 3600 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, 3601 struct mlxsw_sp_nexthop *nh) 3602 { 3603 struct mlxsw_sp_neigh_entry *neigh_entry; 3604 struct neighbour *n; 3605 u8 nud_state, dead; 3606 int err; 3607 3608 if (!nh->nh_grp->gateway || nh->neigh_entry) 3609 return 0; 3610 3611 /* Take a reference of neigh here ensuring that neigh would 3612 * not be destructed before the nexthop entry is finished. 3613 * The reference is taken either in neigh_lookup() or 3614 * in neigh_create() in case n is not found. 3615 */ 3616 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); 3617 if (!n) { 3618 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, 3619 nh->rif->dev); 3620 if (IS_ERR(n)) 3621 return PTR_ERR(n); 3622 neigh_event_send(n, NULL); 3623 } 3624 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); 3625 if (!neigh_entry) { 3626 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); 3627 if (IS_ERR(neigh_entry)) { 3628 err = -EINVAL; 3629 goto err_neigh_entry_create; 3630 } 3631 } 3632 3633 /* If that is the first nexthop connected to that neigh, add to 3634 * nexthop_neighs_list 3635 */ 3636 if (list_empty(&neigh_entry->nexthop_list)) 3637 list_add_tail(&neigh_entry->nexthop_neighs_list_node, 3638 &mlxsw_sp->router->nexthop_neighs_list); 3639 3640 nh->neigh_entry = neigh_entry; 3641 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list); 3642 read_lock_bh(&n->lock); 3643 nud_state = n->nud_state; 3644 dead = n->dead; 3645 read_unlock_bh(&n->lock); 3646 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead)); 3647 3648 return 0; 3649 3650 err_neigh_entry_create: 3651 neigh_release(n); 3652 return err; 3653 } 3654 3655 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, 3656 struct mlxsw_sp_nexthop *nh) 3657 { 3658 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; 3659 struct neighbour *n; 3660 3661 if (!neigh_entry) 3662 return; 3663 n = neigh_entry->key.n; 3664 3665 __mlxsw_sp_nexthop_neigh_update(nh, true); 3666 list_del(&nh->neigh_list_node); 3667 nh->neigh_entry = NULL; 3668 3669 /* If that is the last nexthop connected to that neigh, remove from 3670 * nexthop_neighs_list 3671 */ 3672 if (list_empty(&neigh_entry->nexthop_list)) 3673 list_del(&neigh_entry->nexthop_neighs_list_node); 3674 3675 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) 3676 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); 3677 3678 neigh_release(n); 3679 } 3680 3681 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev) 3682 { 3683 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); 3684 3685 return ul_dev ? (ul_dev->flags & IFF_UP) : true; 3686 } 3687 3688 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, 3689 struct mlxsw_sp_nexthop *nh, 3690 struct mlxsw_sp_ipip_entry *ipip_entry) 3691 { 3692 bool removing; 3693 3694 if (!nh->nh_grp->gateway || nh->ipip_entry) 3695 return; 3696 3697 nh->ipip_entry = ipip_entry; 3698 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev); 3699 __mlxsw_sp_nexthop_neigh_update(nh, removing); 3700 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common); 3701 } 3702 3703 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp, 3704 struct mlxsw_sp_nexthop *nh) 3705 { 3706 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry; 3707 3708 if (!ipip_entry) 3709 return; 3710 3711 __mlxsw_sp_nexthop_neigh_update(nh, true); 3712 nh->ipip_entry = NULL; 3713 } 3714 3715 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp, 3716 const struct fib_nh *fib_nh, 3717 enum mlxsw_sp_ipip_type *p_ipipt) 3718 { 3719 struct net_device *dev = fib_nh->fib_nh_dev; 3720 3721 return dev && 3722 fib_nh->nh_parent->fib_type == RTN_UNICAST && 3723 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt); 3724 } 3725 3726 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp, 3727 struct mlxsw_sp_nexthop *nh) 3728 { 3729 switch (nh->type) { 3730 case MLXSW_SP_NEXTHOP_TYPE_ETH: 3731 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); 3732 mlxsw_sp_nexthop_rif_fini(nh); 3733 break; 3734 case MLXSW_SP_NEXTHOP_TYPE_IPIP: 3735 mlxsw_sp_nexthop_rif_fini(nh); 3736 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); 3737 break; 3738 } 3739 } 3740 3741 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp, 3742 struct mlxsw_sp_nexthop *nh, 3743 struct fib_nh *fib_nh) 3744 { 3745 const struct mlxsw_sp_ipip_ops *ipip_ops; 3746 struct net_device *dev = fib_nh->fib_nh_dev; 3747 struct mlxsw_sp_ipip_entry *ipip_entry; 3748 struct mlxsw_sp_rif *rif; 3749 int err; 3750 3751 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); 3752 if (ipip_entry) { 3753 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 3754 if (ipip_ops->can_offload(mlxsw_sp, dev, 3755 MLXSW_SP_L3_PROTO_IPV4)) { 3756 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 3757 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); 3758 return 0; 3759 } 3760 } 3761 3762 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 3763 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3764 if (!rif) 3765 return 0; 3766 3767 mlxsw_sp_nexthop_rif_init(nh, rif); 3768 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); 3769 if (err) 3770 goto err_neigh_init; 3771 3772 return 0; 3773 3774 err_neigh_init: 3775 mlxsw_sp_nexthop_rif_fini(nh); 3776 return err; 3777 } 3778 3779 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp, 3780 struct mlxsw_sp_nexthop *nh) 3781 { 3782 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); 3783 } 3784 3785 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, 3786 struct mlxsw_sp_nexthop_group *nh_grp, 3787 struct mlxsw_sp_nexthop *nh, 3788 struct fib_nh *fib_nh) 3789 { 3790 struct net_device *dev = fib_nh->fib_nh_dev; 3791 struct in_device *in_dev; 3792 int err; 3793 3794 nh->nh_grp = nh_grp; 3795 nh->key.fib_nh = fib_nh; 3796 #ifdef CONFIG_IP_ROUTE_MULTIPATH 3797 nh->nh_weight = fib_nh->fib_nh_weight; 3798 #else 3799 nh->nh_weight = 1; 3800 #endif 3801 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4)); 3802 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); 3803 if (err) 3804 return err; 3805 3806 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); 3807 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); 3808 3809 if (!dev) 3810 return 0; 3811 3812 in_dev = __in_dev_get_rtnl(dev); 3813 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && 3814 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) 3815 return 0; 3816 3817 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); 3818 if (err) 3819 goto err_nexthop_neigh_init; 3820 3821 return 0; 3822 3823 err_nexthop_neigh_init: 3824 mlxsw_sp_nexthop_remove(mlxsw_sp, nh); 3825 return err; 3826 } 3827 3828 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp, 3829 struct mlxsw_sp_nexthop *nh) 3830 { 3831 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); 3832 list_del(&nh->router_list_node); 3833 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); 3834 mlxsw_sp_nexthop_remove(mlxsw_sp, nh); 3835 } 3836 3837 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp, 3838 unsigned long event, struct fib_nh *fib_nh) 3839 { 3840 struct mlxsw_sp_nexthop_key key; 3841 struct mlxsw_sp_nexthop *nh; 3842 3843 if (mlxsw_sp->router->aborted) 3844 return; 3845 3846 key.fib_nh = fib_nh; 3847 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key); 3848 if (WARN_ON_ONCE(!nh)) 3849 return; 3850 3851 switch (event) { 3852 case FIB_EVENT_NH_ADD: 3853 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); 3854 break; 3855 case FIB_EVENT_NH_DEL: 3856 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); 3857 break; 3858 } 3859 3860 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3861 } 3862 3863 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, 3864 struct mlxsw_sp_rif *rif) 3865 { 3866 struct mlxsw_sp_nexthop *nh; 3867 bool removing; 3868 3869 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) { 3870 switch (nh->type) { 3871 case MLXSW_SP_NEXTHOP_TYPE_ETH: 3872 removing = false; 3873 break; 3874 case MLXSW_SP_NEXTHOP_TYPE_IPIP: 3875 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev); 3876 break; 3877 default: 3878 WARN_ON(1); 3879 continue; 3880 } 3881 3882 __mlxsw_sp_nexthop_neigh_update(nh, removing); 3883 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3884 } 3885 } 3886 3887 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp, 3888 struct mlxsw_sp_rif *old_rif, 3889 struct mlxsw_sp_rif *new_rif) 3890 { 3891 struct mlxsw_sp_nexthop *nh; 3892 3893 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list); 3894 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node) 3895 nh->rif = new_rif; 3896 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif); 3897 } 3898 3899 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 3900 struct mlxsw_sp_rif *rif) 3901 { 3902 struct mlxsw_sp_nexthop *nh, *tmp; 3903 3904 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) { 3905 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); 3906 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3907 } 3908 } 3909 3910 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, 3911 struct fib_info *fi) 3912 { 3913 const struct fib_nh *nh = fib_info_nh(fi, 0); 3914 3915 return nh->fib_nh_scope == RT_SCOPE_LINK || 3916 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); 3917 } 3918 3919 static struct mlxsw_sp_nexthop_group * 3920 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) 3921 { 3922 unsigned int nhs = fib_info_num_path(fi); 3923 struct mlxsw_sp_nexthop_group *nh_grp; 3924 struct mlxsw_sp_nexthop *nh; 3925 struct fib_nh *fib_nh; 3926 int i; 3927 int err; 3928 3929 nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL); 3930 if (!nh_grp) 3931 return ERR_PTR(-ENOMEM); 3932 nh_grp->priv = fi; 3933 INIT_LIST_HEAD(&nh_grp->fib_list); 3934 nh_grp->neigh_tbl = &arp_tbl; 3935 3936 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi); 3937 nh_grp->count = nhs; 3938 fib_info_hold(fi); 3939 for (i = 0; i < nh_grp->count; i++) { 3940 nh = &nh_grp->nexthops[i]; 3941 fib_nh = fib_info_nh(fi, i); 3942 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh); 3943 if (err) 3944 goto err_nexthop4_init; 3945 } 3946 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); 3947 if (err) 3948 goto err_nexthop_group_insert; 3949 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); 3950 return nh_grp; 3951 3952 err_nexthop_group_insert: 3953 err_nexthop4_init: 3954 for (i--; i >= 0; i--) { 3955 nh = &nh_grp->nexthops[i]; 3956 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); 3957 } 3958 fib_info_put(fi); 3959 kfree(nh_grp); 3960 return ERR_PTR(err); 3961 } 3962 3963 static void 3964 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp, 3965 struct mlxsw_sp_nexthop_group *nh_grp) 3966 { 3967 struct mlxsw_sp_nexthop *nh; 3968 int i; 3969 3970 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); 3971 for (i = 0; i < nh_grp->count; i++) { 3972 nh = &nh_grp->nexthops[i]; 3973 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); 3974 } 3975 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); 3976 WARN_ON_ONCE(nh_grp->adj_index_valid); 3977 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp)); 3978 kfree(nh_grp); 3979 } 3980 3981 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp, 3982 struct mlxsw_sp_fib_entry *fib_entry, 3983 struct fib_info *fi) 3984 { 3985 struct mlxsw_sp_nexthop_group *nh_grp; 3986 3987 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi); 3988 if (!nh_grp) { 3989 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi); 3990 if (IS_ERR(nh_grp)) 3991 return PTR_ERR(nh_grp); 3992 } 3993 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list); 3994 fib_entry->nh_group = nh_grp; 3995 return 0; 3996 } 3997 3998 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp, 3999 struct mlxsw_sp_fib_entry *fib_entry) 4000 { 4001 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 4002 4003 list_del(&fib_entry->nexthop_group_node); 4004 if (!list_empty(&nh_grp->fib_list)) 4005 return; 4006 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp); 4007 } 4008 4009 static bool 4010 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) 4011 { 4012 struct mlxsw_sp_fib4_entry *fib4_entry; 4013 4014 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry, 4015 common); 4016 return !fib4_entry->tos; 4017 } 4018 4019 static bool 4020 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) 4021 { 4022 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; 4023 4024 switch (fib_entry->fib_node->fib->proto) { 4025 case MLXSW_SP_L3_PROTO_IPV4: 4026 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry)) 4027 return false; 4028 break; 4029 case MLXSW_SP_L3_PROTO_IPV6: 4030 break; 4031 } 4032 4033 switch (fib_entry->type) { 4034 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: 4035 return !!nh_group->adj_index_valid; 4036 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: 4037 return !!nh_group->nh_rif; 4038 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: 4039 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: 4040 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: 4041 return true; 4042 default: 4043 return false; 4044 } 4045 } 4046 4047 static struct mlxsw_sp_nexthop * 4048 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, 4049 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6) 4050 { 4051 int i; 4052 4053 for (i = 0; i < nh_grp->count; i++) { 4054 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 4055 struct fib6_info *rt = mlxsw_sp_rt6->rt; 4056 4057 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev && 4058 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, 4059 &rt->fib6_nh->fib_nh_gw6)) 4060 return nh; 4061 continue; 4062 } 4063 4064 return NULL; 4065 } 4066 4067 static void 4068 mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) 4069 { 4070 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 4071 int i; 4072 4073 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || 4074 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE || 4075 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP || 4076 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) { 4077 nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; 4078 return; 4079 } 4080 4081 for (i = 0; i < nh_grp->count; i++) { 4082 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 4083 4084 if (nh->offloaded) 4085 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; 4086 else 4087 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4088 } 4089 } 4090 4091 static void 4092 mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) 4093 { 4094 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 4095 int i; 4096 4097 if (!list_is_singular(&nh_grp->fib_list)) 4098 return; 4099 4100 for (i = 0; i < nh_grp->count; i++) { 4101 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 4102 4103 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4104 } 4105 } 4106 4107 static void 4108 mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) 4109 { 4110 struct mlxsw_sp_fib6_entry *fib6_entry; 4111 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 4112 4113 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, 4114 common); 4115 4116 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || 4117 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) { 4118 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, 4119 list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD; 4120 return; 4121 } 4122 4123 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 4124 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 4125 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh; 4126 struct mlxsw_sp_nexthop *nh; 4127 4128 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); 4129 if (nh && nh->offloaded) 4130 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD; 4131 else 4132 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4133 } 4134 } 4135 4136 static void 4137 mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) 4138 { 4139 struct mlxsw_sp_fib6_entry *fib6_entry; 4140 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 4141 4142 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, 4143 common); 4144 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 4145 struct fib6_info *rt = mlxsw_sp_rt6->rt; 4146 4147 rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4148 } 4149 } 4150 4151 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) 4152 { 4153 switch (fib_entry->fib_node->fib->proto) { 4154 case MLXSW_SP_L3_PROTO_IPV4: 4155 mlxsw_sp_fib4_entry_offload_set(fib_entry); 4156 break; 4157 case MLXSW_SP_L3_PROTO_IPV6: 4158 mlxsw_sp_fib6_entry_offload_set(fib_entry); 4159 break; 4160 } 4161 } 4162 4163 static void 4164 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) 4165 { 4166 switch (fib_entry->fib_node->fib->proto) { 4167 case MLXSW_SP_L3_PROTO_IPV4: 4168 mlxsw_sp_fib4_entry_offload_unset(fib_entry); 4169 break; 4170 case MLXSW_SP_L3_PROTO_IPV6: 4171 mlxsw_sp_fib6_entry_offload_unset(fib_entry); 4172 break; 4173 } 4174 } 4175 4176 static void 4177 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, 4178 enum mlxsw_reg_ralue_op op, int err) 4179 { 4180 switch (op) { 4181 case MLXSW_REG_RALUE_OP_WRITE_DELETE: 4182 return mlxsw_sp_fib_entry_offload_unset(fib_entry); 4183 case MLXSW_REG_RALUE_OP_WRITE_WRITE: 4184 if (err) 4185 return; 4186 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) 4187 mlxsw_sp_fib_entry_offload_set(fib_entry); 4188 else 4189 mlxsw_sp_fib_entry_offload_unset(fib_entry); 4190 return; 4191 default: 4192 return; 4193 } 4194 } 4195 4196 static void 4197 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl, 4198 const struct mlxsw_sp_fib_entry *fib_entry, 4199 enum mlxsw_reg_ralue_op op) 4200 { 4201 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; 4202 enum mlxsw_reg_ralxx_protocol proto; 4203 u32 *p_dip; 4204 4205 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto; 4206 4207 switch (fib->proto) { 4208 case MLXSW_SP_L3_PROTO_IPV4: 4209 p_dip = (u32 *) fib_entry->fib_node->key.addr; 4210 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id, 4211 fib_entry->fib_node->key.prefix_len, 4212 *p_dip); 4213 break; 4214 case MLXSW_SP_L3_PROTO_IPV6: 4215 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id, 4216 fib_entry->fib_node->key.prefix_len, 4217 fib_entry->fib_node->key.addr); 4218 break; 4219 } 4220 } 4221 4222 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index) 4223 { 4224 enum mlxsw_reg_ratr_trap_action trap_action; 4225 char ratr_pl[MLXSW_REG_RATR_LEN]; 4226 int err; 4227 4228 if (mlxsw_sp->router->adj_discard_index_valid) 4229 return 0; 4230 4231 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, 4232 &mlxsw_sp->router->adj_discard_index); 4233 if (err) 4234 return err; 4235 4236 trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS; 4237 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, 4238 MLXSW_REG_RATR_TYPE_ETHERNET, 4239 mlxsw_sp->router->adj_discard_index, rif_index); 4240 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action); 4241 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); 4242 if (err) 4243 goto err_ratr_write; 4244 4245 mlxsw_sp->router->adj_discard_index_valid = true; 4246 4247 return 0; 4248 4249 err_ratr_write: 4250 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, 4251 mlxsw_sp->router->adj_discard_index); 4252 return err; 4253 } 4254 4255 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, 4256 struct mlxsw_sp_fib_entry *fib_entry, 4257 enum mlxsw_reg_ralue_op op) 4258 { 4259 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; 4260 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4261 enum mlxsw_reg_ralue_trap_action trap_action; 4262 u16 trap_id = 0; 4263 u32 adjacency_index = 0; 4264 u16 ecmp_size = 0; 4265 int err; 4266 4267 /* In case the nexthop group adjacency index is valid, use it 4268 * with provided ECMP size. Otherwise, setup trap and pass 4269 * traffic to kernel. 4270 */ 4271 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { 4272 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; 4273 adjacency_index = fib_entry->nh_group->adj_index; 4274 ecmp_size = fib_entry->nh_group->ecmp_size; 4275 } else if (!nh_group->adj_index_valid && nh_group->count && 4276 nh_group->nh_rif) { 4277 err = mlxsw_sp_adj_discard_write(mlxsw_sp, 4278 nh_group->nh_rif->rif_index); 4279 if (err) 4280 return err; 4281 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; 4282 adjacency_index = mlxsw_sp->router->adj_discard_index; 4283 ecmp_size = 1; 4284 } else { 4285 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; 4286 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; 4287 } 4288 4289 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4290 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, 4291 adjacency_index, ecmp_size); 4292 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4293 } 4294 4295 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp, 4296 struct mlxsw_sp_fib_entry *fib_entry, 4297 enum mlxsw_reg_ralue_op op) 4298 { 4299 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif; 4300 enum mlxsw_reg_ralue_trap_action trap_action; 4301 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4302 u16 trap_id = 0; 4303 u16 rif_index = 0; 4304 4305 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { 4306 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; 4307 rif_index = rif->rif_index; 4308 } else { 4309 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; 4310 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; 4311 } 4312 4313 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4314 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 4315 rif_index); 4316 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4317 } 4318 4319 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp, 4320 struct mlxsw_sp_fib_entry *fib_entry, 4321 enum mlxsw_reg_ralue_op op) 4322 { 4323 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4324 4325 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4326 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); 4327 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4328 } 4329 4330 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp, 4331 struct mlxsw_sp_fib_entry *fib_entry, 4332 enum mlxsw_reg_ralue_op op) 4333 { 4334 enum mlxsw_reg_ralue_trap_action trap_action; 4335 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4336 4337 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR; 4338 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4339 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0); 4340 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4341 } 4342 4343 static int 4344 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp, 4345 struct mlxsw_sp_fib_entry *fib_entry, 4346 enum mlxsw_reg_ralue_op op) 4347 { 4348 enum mlxsw_reg_ralue_trap_action trap_action; 4349 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4350 u16 trap_id; 4351 4352 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; 4353 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1; 4354 4355 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4356 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0); 4357 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4358 } 4359 4360 static int 4361 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp, 4362 struct mlxsw_sp_fib_entry *fib_entry, 4363 enum mlxsw_reg_ralue_op op) 4364 { 4365 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry; 4366 const struct mlxsw_sp_ipip_ops *ipip_ops; 4367 4368 if (WARN_ON(!ipip_entry)) 4369 return -EINVAL; 4370 4371 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 4372 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op, 4373 fib_entry->decap.tunnel_index); 4374 } 4375 4376 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp, 4377 struct mlxsw_sp_fib_entry *fib_entry, 4378 enum mlxsw_reg_ralue_op op) 4379 { 4380 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4381 4382 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4383 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, 4384 fib_entry->decap.tunnel_index); 4385 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4386 } 4387 4388 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, 4389 struct mlxsw_sp_fib_entry *fib_entry, 4390 enum mlxsw_reg_ralue_op op) 4391 { 4392 switch (fib_entry->type) { 4393 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: 4394 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op); 4395 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: 4396 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op); 4397 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: 4398 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op); 4399 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: 4400 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op); 4401 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE: 4402 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry, 4403 op); 4404 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: 4405 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, 4406 fib_entry, op); 4407 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: 4408 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op); 4409 } 4410 return -EINVAL; 4411 } 4412 4413 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, 4414 struct mlxsw_sp_fib_entry *fib_entry, 4415 enum mlxsw_reg_ralue_op op) 4416 { 4417 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op); 4418 4419 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err); 4420 4421 return err; 4422 } 4423 4424 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, 4425 struct mlxsw_sp_fib_entry *fib_entry) 4426 { 4427 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, 4428 MLXSW_REG_RALUE_OP_WRITE_WRITE); 4429 } 4430 4431 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, 4432 struct mlxsw_sp_fib_entry *fib_entry) 4433 { 4434 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, 4435 MLXSW_REG_RALUE_OP_WRITE_DELETE); 4436 } 4437 4438 static int 4439 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, 4440 const struct fib_entry_notifier_info *fen_info, 4441 struct mlxsw_sp_fib_entry *fib_entry) 4442 { 4443 struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; 4444 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) }; 4445 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id); 4446 struct mlxsw_sp_ipip_entry *ipip_entry; 4447 struct fib_info *fi = fen_info->fi; 4448 4449 switch (fen_info->type) { 4450 case RTN_LOCAL: 4451 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev, 4452 MLXSW_SP_L3_PROTO_IPV4, dip); 4453 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) { 4454 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; 4455 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, 4456 fib_entry, 4457 ipip_entry); 4458 } 4459 if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id, 4460 dip.addr4)) { 4461 u32 t_index; 4462 4463 t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp); 4464 fib_entry->decap.tunnel_index = t_index; 4465 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; 4466 return 0; 4467 } 4468 /* fall through */ 4469 case RTN_BROADCAST: 4470 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 4471 return 0; 4472 case RTN_BLACKHOLE: 4473 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; 4474 return 0; 4475 case RTN_UNREACHABLE: /* fall through */ 4476 case RTN_PROHIBIT: 4477 /* Packets hitting these routes need to be trapped, but 4478 * can do so with a lower priority than packets directed 4479 * at the host, so use action type local instead of trap. 4480 */ 4481 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE; 4482 return 0; 4483 case RTN_UNICAST: 4484 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi)) 4485 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; 4486 else 4487 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; 4488 return 0; 4489 default: 4490 return -EINVAL; 4491 } 4492 } 4493 4494 static struct mlxsw_sp_fib4_entry * 4495 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, 4496 struct mlxsw_sp_fib_node *fib_node, 4497 const struct fib_entry_notifier_info *fen_info) 4498 { 4499 struct mlxsw_sp_fib4_entry *fib4_entry; 4500 struct mlxsw_sp_fib_entry *fib_entry; 4501 int err; 4502 4503 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL); 4504 if (!fib4_entry) 4505 return ERR_PTR(-ENOMEM); 4506 fib_entry = &fib4_entry->common; 4507 4508 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); 4509 if (err) 4510 goto err_fib4_entry_type_set; 4511 4512 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi); 4513 if (err) 4514 goto err_nexthop4_group_get; 4515 4516 fib4_entry->prio = fen_info->fi->fib_priority; 4517 fib4_entry->tb_id = fen_info->tb_id; 4518 fib4_entry->type = fen_info->type; 4519 fib4_entry->tos = fen_info->tos; 4520 4521 fib_entry->fib_node = fib_node; 4522 4523 return fib4_entry; 4524 4525 err_nexthop4_group_get: 4526 err_fib4_entry_type_set: 4527 kfree(fib4_entry); 4528 return ERR_PTR(err); 4529 } 4530 4531 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, 4532 struct mlxsw_sp_fib4_entry *fib4_entry) 4533 { 4534 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); 4535 kfree(fib4_entry); 4536 } 4537 4538 static struct mlxsw_sp_fib4_entry * 4539 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, 4540 const struct fib_entry_notifier_info *fen_info) 4541 { 4542 struct mlxsw_sp_fib4_entry *fib4_entry; 4543 struct mlxsw_sp_fib_node *fib_node; 4544 struct mlxsw_sp_fib *fib; 4545 struct mlxsw_sp_vr *vr; 4546 4547 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id); 4548 if (!vr) 4549 return NULL; 4550 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); 4551 4552 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, 4553 sizeof(fen_info->dst), 4554 fen_info->dst_len); 4555 if (!fib_node) 4556 return NULL; 4557 4558 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { 4559 if (fib4_entry->tb_id == fen_info->tb_id && 4560 fib4_entry->tos == fen_info->tos && 4561 fib4_entry->type == fen_info->type && 4562 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) == 4563 fen_info->fi) { 4564 return fib4_entry; 4565 } 4566 } 4567 4568 return NULL; 4569 } 4570 4571 static const struct rhashtable_params mlxsw_sp_fib_ht_params = { 4572 .key_offset = offsetof(struct mlxsw_sp_fib_node, key), 4573 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node), 4574 .key_len = sizeof(struct mlxsw_sp_fib_key), 4575 .automatic_shrinking = true, 4576 }; 4577 4578 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib, 4579 struct mlxsw_sp_fib_node *fib_node) 4580 { 4581 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node, 4582 mlxsw_sp_fib_ht_params); 4583 } 4584 4585 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib, 4586 struct mlxsw_sp_fib_node *fib_node) 4587 { 4588 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node, 4589 mlxsw_sp_fib_ht_params); 4590 } 4591 4592 static struct mlxsw_sp_fib_node * 4593 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, 4594 size_t addr_len, unsigned char prefix_len) 4595 { 4596 struct mlxsw_sp_fib_key key; 4597 4598 memset(&key, 0, sizeof(key)); 4599 memcpy(key.addr, addr, addr_len); 4600 key.prefix_len = prefix_len; 4601 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); 4602 } 4603 4604 static struct mlxsw_sp_fib_node * 4605 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr, 4606 size_t addr_len, unsigned char prefix_len) 4607 { 4608 struct mlxsw_sp_fib_node *fib_node; 4609 4610 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL); 4611 if (!fib_node) 4612 return NULL; 4613 4614 INIT_LIST_HEAD(&fib_node->entry_list); 4615 list_add(&fib_node->list, &fib->node_list); 4616 memcpy(fib_node->key.addr, addr, addr_len); 4617 fib_node->key.prefix_len = prefix_len; 4618 4619 return fib_node; 4620 } 4621 4622 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node) 4623 { 4624 list_del(&fib_node->list); 4625 WARN_ON(!list_empty(&fib_node->entry_list)); 4626 kfree(fib_node); 4627 } 4628 4629 static bool 4630 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, 4631 const struct mlxsw_sp_fib_entry *fib_entry) 4632 { 4633 return list_first_entry(&fib_node->entry_list, 4634 struct mlxsw_sp_fib_entry, list) == fib_entry; 4635 } 4636 4637 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp, 4638 struct mlxsw_sp_fib_node *fib_node) 4639 { 4640 struct mlxsw_sp_prefix_usage req_prefix_usage; 4641 struct mlxsw_sp_fib *fib = fib_node->fib; 4642 struct mlxsw_sp_lpm_tree *lpm_tree; 4643 int err; 4644 4645 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto]; 4646 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0) 4647 goto out; 4648 4649 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage); 4650 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); 4651 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, 4652 fib->proto); 4653 if (IS_ERR(lpm_tree)) 4654 return PTR_ERR(lpm_tree); 4655 4656 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); 4657 if (err) 4658 goto err_lpm_tree_replace; 4659 4660 out: 4661 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++; 4662 return 0; 4663 4664 err_lpm_tree_replace: 4665 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 4666 return err; 4667 } 4668 4669 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, 4670 struct mlxsw_sp_fib_node *fib_node) 4671 { 4672 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree; 4673 struct mlxsw_sp_prefix_usage req_prefix_usage; 4674 struct mlxsw_sp_fib *fib = fib_node->fib; 4675 int err; 4676 4677 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0) 4678 return; 4679 /* Try to construct a new LPM tree from the current prefix usage 4680 * minus the unused one. If we fail, continue using the old one. 4681 */ 4682 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage); 4683 mlxsw_sp_prefix_usage_clear(&req_prefix_usage, 4684 fib_node->key.prefix_len); 4685 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, 4686 fib->proto); 4687 if (IS_ERR(lpm_tree)) 4688 return; 4689 4690 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); 4691 if (err) 4692 goto err_lpm_tree_replace; 4693 4694 return; 4695 4696 err_lpm_tree_replace: 4697 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 4698 } 4699 4700 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, 4701 struct mlxsw_sp_fib_node *fib_node, 4702 struct mlxsw_sp_fib *fib) 4703 { 4704 int err; 4705 4706 err = mlxsw_sp_fib_node_insert(fib, fib_node); 4707 if (err) 4708 return err; 4709 fib_node->fib = fib; 4710 4711 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node); 4712 if (err) 4713 goto err_fib_lpm_tree_link; 4714 4715 return 0; 4716 4717 err_fib_lpm_tree_link: 4718 fib_node->fib = NULL; 4719 mlxsw_sp_fib_node_remove(fib, fib_node); 4720 return err; 4721 } 4722 4723 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp, 4724 struct mlxsw_sp_fib_node *fib_node) 4725 { 4726 struct mlxsw_sp_fib *fib = fib_node->fib; 4727 4728 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node); 4729 fib_node->fib = NULL; 4730 mlxsw_sp_fib_node_remove(fib, fib_node); 4731 } 4732 4733 static struct mlxsw_sp_fib_node * 4734 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr, 4735 size_t addr_len, unsigned char prefix_len, 4736 enum mlxsw_sp_l3proto proto) 4737 { 4738 struct mlxsw_sp_fib_node *fib_node; 4739 struct mlxsw_sp_fib *fib; 4740 struct mlxsw_sp_vr *vr; 4741 int err; 4742 4743 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL); 4744 if (IS_ERR(vr)) 4745 return ERR_CAST(vr); 4746 fib = mlxsw_sp_vr_fib(vr, proto); 4747 4748 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len); 4749 if (fib_node) 4750 return fib_node; 4751 4752 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len); 4753 if (!fib_node) { 4754 err = -ENOMEM; 4755 goto err_fib_node_create; 4756 } 4757 4758 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib); 4759 if (err) 4760 goto err_fib_node_init; 4761 4762 return fib_node; 4763 4764 err_fib_node_init: 4765 mlxsw_sp_fib_node_destroy(fib_node); 4766 err_fib_node_create: 4767 mlxsw_sp_vr_put(mlxsw_sp, vr); 4768 return ERR_PTR(err); 4769 } 4770 4771 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp, 4772 struct mlxsw_sp_fib_node *fib_node) 4773 { 4774 struct mlxsw_sp_vr *vr = fib_node->fib->vr; 4775 4776 if (!list_empty(&fib_node->entry_list)) 4777 return; 4778 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node); 4779 mlxsw_sp_fib_node_destroy(fib_node); 4780 mlxsw_sp_vr_put(mlxsw_sp, vr); 4781 } 4782 4783 static struct mlxsw_sp_fib4_entry * 4784 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, 4785 const struct mlxsw_sp_fib4_entry *new4_entry) 4786 { 4787 struct mlxsw_sp_fib4_entry *fib4_entry; 4788 4789 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { 4790 if (fib4_entry->tb_id > new4_entry->tb_id) 4791 continue; 4792 if (fib4_entry->tb_id != new4_entry->tb_id) 4793 break; 4794 if (fib4_entry->tos > new4_entry->tos) 4795 continue; 4796 if (fib4_entry->prio >= new4_entry->prio || 4797 fib4_entry->tos < new4_entry->tos) 4798 return fib4_entry; 4799 } 4800 4801 return NULL; 4802 } 4803 4804 static int 4805 mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry, 4806 struct mlxsw_sp_fib4_entry *new4_entry) 4807 { 4808 struct mlxsw_sp_fib_node *fib_node; 4809 4810 if (WARN_ON(!fib4_entry)) 4811 return -EINVAL; 4812 4813 fib_node = fib4_entry->common.fib_node; 4814 list_for_each_entry_from(fib4_entry, &fib_node->entry_list, 4815 common.list) { 4816 if (fib4_entry->tb_id != new4_entry->tb_id || 4817 fib4_entry->tos != new4_entry->tos || 4818 fib4_entry->prio != new4_entry->prio) 4819 break; 4820 } 4821 4822 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list); 4823 return 0; 4824 } 4825 4826 static int 4827 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry, 4828 bool replace, bool append) 4829 { 4830 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node; 4831 struct mlxsw_sp_fib4_entry *fib4_entry; 4832 4833 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry); 4834 4835 if (append) 4836 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry); 4837 if (replace && WARN_ON(!fib4_entry)) 4838 return -EINVAL; 4839 4840 /* Insert new entry before replaced one, so that we can later 4841 * remove the second. 4842 */ 4843 if (fib4_entry) { 4844 list_add_tail(&new4_entry->common.list, 4845 &fib4_entry->common.list); 4846 } else { 4847 struct mlxsw_sp_fib4_entry *last; 4848 4849 list_for_each_entry(last, &fib_node->entry_list, common.list) { 4850 if (new4_entry->tb_id > last->tb_id) 4851 break; 4852 fib4_entry = last; 4853 } 4854 4855 if (fib4_entry) 4856 list_add(&new4_entry->common.list, 4857 &fib4_entry->common.list); 4858 else 4859 list_add(&new4_entry->common.list, 4860 &fib_node->entry_list); 4861 } 4862 4863 return 0; 4864 } 4865 4866 static void 4867 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry) 4868 { 4869 list_del(&fib4_entry->common.list); 4870 } 4871 4872 static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp, 4873 struct mlxsw_sp_fib_entry *fib_entry) 4874 { 4875 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; 4876 4877 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) 4878 return 0; 4879 4880 /* To prevent packet loss, overwrite the previously offloaded 4881 * entry. 4882 */ 4883 if (!list_is_singular(&fib_node->entry_list)) { 4884 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE; 4885 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); 4886 4887 mlxsw_sp_fib_entry_offload_refresh(n, op, 0); 4888 } 4889 4890 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 4891 } 4892 4893 static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp, 4894 struct mlxsw_sp_fib_entry *fib_entry) 4895 { 4896 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; 4897 4898 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) 4899 return; 4900 4901 /* Promote the next entry by overwriting the deleted entry */ 4902 if (!list_is_singular(&fib_node->entry_list)) { 4903 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); 4904 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE; 4905 4906 mlxsw_sp_fib_entry_update(mlxsw_sp, n); 4907 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0); 4908 return; 4909 } 4910 4911 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); 4912 } 4913 4914 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, 4915 struct mlxsw_sp_fib4_entry *fib4_entry, 4916 bool replace, bool append) 4917 { 4918 int err; 4919 4920 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append); 4921 if (err) 4922 return err; 4923 4924 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common); 4925 if (err) 4926 goto err_fib_node_entry_add; 4927 4928 return 0; 4929 4930 err_fib_node_entry_add: 4931 mlxsw_sp_fib4_node_list_remove(fib4_entry); 4932 return err; 4933 } 4934 4935 static void 4936 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, 4937 struct mlxsw_sp_fib4_entry *fib4_entry) 4938 { 4939 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common); 4940 mlxsw_sp_fib4_node_list_remove(fib4_entry); 4941 4942 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) 4943 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common); 4944 } 4945 4946 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp, 4947 struct mlxsw_sp_fib4_entry *fib4_entry, 4948 bool replace) 4949 { 4950 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; 4951 struct mlxsw_sp_fib4_entry *replaced; 4952 4953 if (!replace) 4954 return; 4955 4956 /* We inserted the new entry before replaced one */ 4957 replaced = list_next_entry(fib4_entry, common.list); 4958 4959 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced); 4960 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced); 4961 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 4962 } 4963 4964 static int 4965 mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, 4966 const struct fib_entry_notifier_info *fen_info, 4967 bool replace, bool append) 4968 { 4969 struct mlxsw_sp_fib4_entry *fib4_entry; 4970 struct mlxsw_sp_fib_node *fib_node; 4971 int err; 4972 4973 if (mlxsw_sp->router->aborted) 4974 return 0; 4975 4976 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id, 4977 &fen_info->dst, sizeof(fen_info->dst), 4978 fen_info->dst_len, 4979 MLXSW_SP_L3_PROTO_IPV4); 4980 if (IS_ERR(fib_node)) { 4981 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n"); 4982 return PTR_ERR(fib_node); 4983 } 4984 4985 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); 4986 if (IS_ERR(fib4_entry)) { 4987 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); 4988 err = PTR_ERR(fib4_entry); 4989 goto err_fib4_entry_create; 4990 } 4991 4992 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace, 4993 append); 4994 if (err) { 4995 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); 4996 goto err_fib4_node_entry_link; 4997 } 4998 4999 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace); 5000 5001 return 0; 5002 5003 err_fib4_node_entry_link: 5004 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); 5005 err_fib4_entry_create: 5006 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5007 return err; 5008 } 5009 5010 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, 5011 struct fib_entry_notifier_info *fen_info) 5012 { 5013 struct mlxsw_sp_fib4_entry *fib4_entry; 5014 struct mlxsw_sp_fib_node *fib_node; 5015 5016 if (mlxsw_sp->router->aborted) 5017 return; 5018 5019 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); 5020 if (WARN_ON(!fib4_entry)) 5021 return; 5022 fib_node = fib4_entry->common.fib_node; 5023 5024 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); 5025 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); 5026 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5027 } 5028 5029 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) 5030 { 5031 /* Packets with link-local destination IP arriving to the router 5032 * are trapped to the CPU, so no need to program specific routes 5033 * for them. 5034 */ 5035 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) 5036 return true; 5037 5038 /* Multicast routes aren't supported, so ignore them. Neighbour 5039 * Discovery packets are specifically trapped. 5040 */ 5041 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST) 5042 return true; 5043 5044 /* Cloned routes are irrelevant in the forwarding path. */ 5045 if (rt->fib6_flags & RTF_CACHE) 5046 return true; 5047 5048 return false; 5049 } 5050 5051 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt) 5052 { 5053 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5054 5055 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL); 5056 if (!mlxsw_sp_rt6) 5057 return ERR_PTR(-ENOMEM); 5058 5059 /* In case of route replace, replaced route is deleted with 5060 * no notification. Take reference to prevent accessing freed 5061 * memory. 5062 */ 5063 mlxsw_sp_rt6->rt = rt; 5064 fib6_info_hold(rt); 5065 5066 return mlxsw_sp_rt6; 5067 } 5068 5069 #if IS_ENABLED(CONFIG_IPV6) 5070 static void mlxsw_sp_rt6_release(struct fib6_info *rt) 5071 { 5072 fib6_info_release(rt); 5073 } 5074 #else 5075 static void mlxsw_sp_rt6_release(struct fib6_info *rt) 5076 { 5077 } 5078 #endif 5079 5080 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) 5081 { 5082 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); 5083 kfree(mlxsw_sp_rt6); 5084 } 5085 5086 static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) 5087 { 5088 /* RTF_CACHE routes are ignored */ 5089 return !(rt->fib6_flags & RTF_ADDRCONF) && 5090 rt->fib6_nh->fib_nh_gw_family; 5091 } 5092 5093 static struct fib6_info * 5094 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) 5095 { 5096 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, 5097 list)->rt; 5098 } 5099 5100 static struct mlxsw_sp_fib6_entry * 5101 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, 5102 const struct fib6_info *nrt, bool replace) 5103 { 5104 struct mlxsw_sp_fib6_entry *fib6_entry; 5105 5106 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) 5107 return NULL; 5108 5109 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 5110 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5111 5112 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same 5113 * virtual router. 5114 */ 5115 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id) 5116 continue; 5117 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) 5118 break; 5119 if (rt->fib6_metric < nrt->fib6_metric) 5120 continue; 5121 if (rt->fib6_metric == nrt->fib6_metric && 5122 mlxsw_sp_fib6_rt_can_mp(rt)) 5123 return fib6_entry; 5124 if (rt->fib6_metric > nrt->fib6_metric) 5125 break; 5126 } 5127 5128 return NULL; 5129 } 5130 5131 static struct mlxsw_sp_rt6 * 5132 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry, 5133 const struct fib6_info *rt) 5134 { 5135 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5136 5137 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 5138 if (mlxsw_sp_rt6->rt == rt) 5139 return mlxsw_sp_rt6; 5140 } 5141 5142 return NULL; 5143 } 5144 5145 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp, 5146 const struct fib6_info *rt, 5147 enum mlxsw_sp_ipip_type *ret) 5148 { 5149 return rt->fib6_nh->fib_nh_dev && 5150 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret); 5151 } 5152 5153 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, 5154 struct mlxsw_sp_nexthop_group *nh_grp, 5155 struct mlxsw_sp_nexthop *nh, 5156 const struct fib6_info *rt) 5157 { 5158 const struct mlxsw_sp_ipip_ops *ipip_ops; 5159 struct mlxsw_sp_ipip_entry *ipip_entry; 5160 struct net_device *dev = rt->fib6_nh->fib_nh_dev; 5161 struct mlxsw_sp_rif *rif; 5162 int err; 5163 5164 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); 5165 if (ipip_entry) { 5166 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 5167 if (ipip_ops->can_offload(mlxsw_sp, dev, 5168 MLXSW_SP_L3_PROTO_IPV6)) { 5169 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 5170 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); 5171 return 0; 5172 } 5173 } 5174 5175 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 5176 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 5177 if (!rif) 5178 return 0; 5179 mlxsw_sp_nexthop_rif_init(nh, rif); 5180 5181 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); 5182 if (err) 5183 goto err_nexthop_neigh_init; 5184 5185 return 0; 5186 5187 err_nexthop_neigh_init: 5188 mlxsw_sp_nexthop_rif_fini(nh); 5189 return err; 5190 } 5191 5192 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp, 5193 struct mlxsw_sp_nexthop *nh) 5194 { 5195 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); 5196 } 5197 5198 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, 5199 struct mlxsw_sp_nexthop_group *nh_grp, 5200 struct mlxsw_sp_nexthop *nh, 5201 const struct fib6_info *rt) 5202 { 5203 struct net_device *dev = rt->fib6_nh->fib_nh_dev; 5204 5205 nh->nh_grp = nh_grp; 5206 nh->nh_weight = rt->fib6_nh->fib_nh_weight; 5207 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr)); 5208 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); 5209 5210 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); 5211 5212 if (!dev) 5213 return 0; 5214 nh->ifindex = dev->ifindex; 5215 5216 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt); 5217 } 5218 5219 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, 5220 struct mlxsw_sp_nexthop *nh) 5221 { 5222 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh); 5223 list_del(&nh->router_list_node); 5224 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); 5225 } 5226 5227 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, 5228 const struct fib6_info *rt) 5229 { 5230 return rt->fib6_nh->fib_nh_gw_family || 5231 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL); 5232 } 5233 5234 static struct mlxsw_sp_nexthop_group * 5235 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, 5236 struct mlxsw_sp_fib6_entry *fib6_entry) 5237 { 5238 struct mlxsw_sp_nexthop_group *nh_grp; 5239 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5240 struct mlxsw_sp_nexthop *nh; 5241 int i = 0; 5242 int err; 5243 5244 nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6), 5245 GFP_KERNEL); 5246 if (!nh_grp) 5247 return ERR_PTR(-ENOMEM); 5248 INIT_LIST_HEAD(&nh_grp->fib_list); 5249 #if IS_ENABLED(CONFIG_IPV6) 5250 nh_grp->neigh_tbl = &nd_tbl; 5251 #endif 5252 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list, 5253 struct mlxsw_sp_rt6, list); 5254 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt); 5255 nh_grp->count = fib6_entry->nrt6; 5256 for (i = 0; i < nh_grp->count; i++) { 5257 struct fib6_info *rt = mlxsw_sp_rt6->rt; 5258 5259 nh = &nh_grp->nexthops[i]; 5260 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt); 5261 if (err) 5262 goto err_nexthop6_init; 5263 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); 5264 } 5265 5266 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); 5267 if (err) 5268 goto err_nexthop_group_insert; 5269 5270 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); 5271 return nh_grp; 5272 5273 err_nexthop_group_insert: 5274 err_nexthop6_init: 5275 for (i--; i >= 0; i--) { 5276 nh = &nh_grp->nexthops[i]; 5277 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); 5278 } 5279 kfree(nh_grp); 5280 return ERR_PTR(err); 5281 } 5282 5283 static void 5284 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp, 5285 struct mlxsw_sp_nexthop_group *nh_grp) 5286 { 5287 struct mlxsw_sp_nexthop *nh; 5288 int i = nh_grp->count; 5289 5290 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); 5291 for (i--; i >= 0; i--) { 5292 nh = &nh_grp->nexthops[i]; 5293 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); 5294 } 5295 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); 5296 WARN_ON(nh_grp->adj_index_valid); 5297 kfree(nh_grp); 5298 } 5299 5300 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp, 5301 struct mlxsw_sp_fib6_entry *fib6_entry) 5302 { 5303 struct mlxsw_sp_nexthop_group *nh_grp; 5304 5305 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry); 5306 if (!nh_grp) { 5307 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry); 5308 if (IS_ERR(nh_grp)) 5309 return PTR_ERR(nh_grp); 5310 } 5311 5312 list_add_tail(&fib6_entry->common.nexthop_group_node, 5313 &nh_grp->fib_list); 5314 fib6_entry->common.nh_group = nh_grp; 5315 5316 return 0; 5317 } 5318 5319 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp, 5320 struct mlxsw_sp_fib_entry *fib_entry) 5321 { 5322 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 5323 5324 list_del(&fib_entry->nexthop_group_node); 5325 if (!list_empty(&nh_grp->fib_list)) 5326 return; 5327 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp); 5328 } 5329 5330 static int 5331 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp, 5332 struct mlxsw_sp_fib6_entry *fib6_entry) 5333 { 5334 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group; 5335 int err; 5336 5337 fib6_entry->common.nh_group = NULL; 5338 list_del(&fib6_entry->common.nexthop_group_node); 5339 5340 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); 5341 if (err) 5342 goto err_nexthop6_group_get; 5343 5344 /* In case this entry is offloaded, then the adjacency index 5345 * currently associated with it in the device's table is that 5346 * of the old group. Start using the new one instead. 5347 */ 5348 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); 5349 if (err) 5350 goto err_fib_node_entry_add; 5351 5352 if (list_empty(&old_nh_grp->fib_list)) 5353 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp); 5354 5355 return 0; 5356 5357 err_fib_node_entry_add: 5358 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); 5359 err_nexthop6_group_get: 5360 list_add_tail(&fib6_entry->common.nexthop_group_node, 5361 &old_nh_grp->fib_list); 5362 fib6_entry->common.nh_group = old_nh_grp; 5363 return err; 5364 } 5365 5366 static int 5367 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, 5368 struct mlxsw_sp_fib6_entry *fib6_entry, 5369 struct fib6_info **rt_arr, unsigned int nrt6) 5370 { 5371 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5372 int err, i; 5373 5374 for (i = 0; i < nrt6; i++) { 5375 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]); 5376 if (IS_ERR(mlxsw_sp_rt6)) { 5377 err = PTR_ERR(mlxsw_sp_rt6); 5378 goto err_rt6_create; 5379 } 5380 5381 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); 5382 fib6_entry->nrt6++; 5383 } 5384 5385 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); 5386 if (err) 5387 goto err_nexthop6_group_update; 5388 5389 return 0; 5390 5391 err_nexthop6_group_update: 5392 i = nrt6; 5393 err_rt6_create: 5394 for (i--; i >= 0; i--) { 5395 fib6_entry->nrt6--; 5396 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list, 5397 struct mlxsw_sp_rt6, list); 5398 list_del(&mlxsw_sp_rt6->list); 5399 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); 5400 } 5401 return err; 5402 } 5403 5404 static void 5405 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, 5406 struct mlxsw_sp_fib6_entry *fib6_entry, 5407 struct fib6_info **rt_arr, unsigned int nrt6) 5408 { 5409 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5410 int i; 5411 5412 for (i = 0; i < nrt6; i++) { 5413 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, 5414 rt_arr[i]); 5415 if (WARN_ON_ONCE(!mlxsw_sp_rt6)) 5416 continue; 5417 5418 fib6_entry->nrt6--; 5419 list_del(&mlxsw_sp_rt6->list); 5420 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); 5421 } 5422 5423 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); 5424 } 5425 5426 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, 5427 struct mlxsw_sp_fib_entry *fib_entry, 5428 const struct fib6_info *rt) 5429 { 5430 /* Packets hitting RTF_REJECT routes need to be discarded by the 5431 * stack. We can rely on their destination device not having a 5432 * RIF (it's the loopback device) and can thus use action type 5433 * local, which will cause them to be trapped with a lower 5434 * priority than packets that need to be locally received. 5435 */ 5436 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) 5437 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 5438 else if (rt->fib6_type == RTN_BLACKHOLE) 5439 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; 5440 else if (rt->fib6_flags & RTF_REJECT) 5441 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE; 5442 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt)) 5443 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; 5444 else 5445 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; 5446 } 5447 5448 static void 5449 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry) 5450 { 5451 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp; 5452 5453 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list, 5454 list) { 5455 fib6_entry->nrt6--; 5456 list_del(&mlxsw_sp_rt6->list); 5457 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); 5458 } 5459 } 5460 5461 static struct mlxsw_sp_fib6_entry * 5462 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, 5463 struct mlxsw_sp_fib_node *fib_node, 5464 struct fib6_info **rt_arr, unsigned int nrt6) 5465 { 5466 struct mlxsw_sp_fib6_entry *fib6_entry; 5467 struct mlxsw_sp_fib_entry *fib_entry; 5468 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5469 int err, i; 5470 5471 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL); 5472 if (!fib6_entry) 5473 return ERR_PTR(-ENOMEM); 5474 fib_entry = &fib6_entry->common; 5475 5476 INIT_LIST_HEAD(&fib6_entry->rt6_list); 5477 5478 for (i = 0; i < nrt6; i++) { 5479 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]); 5480 if (IS_ERR(mlxsw_sp_rt6)) { 5481 err = PTR_ERR(mlxsw_sp_rt6); 5482 goto err_rt6_create; 5483 } 5484 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); 5485 fib6_entry->nrt6++; 5486 } 5487 5488 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); 5489 5490 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); 5491 if (err) 5492 goto err_nexthop6_group_get; 5493 5494 fib_entry->fib_node = fib_node; 5495 5496 return fib6_entry; 5497 5498 err_nexthop6_group_get: 5499 i = nrt6; 5500 err_rt6_create: 5501 for (i--; i >= 0; i--) { 5502 fib6_entry->nrt6--; 5503 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list, 5504 struct mlxsw_sp_rt6, list); 5505 list_del(&mlxsw_sp_rt6->list); 5506 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); 5507 } 5508 kfree(fib6_entry); 5509 return ERR_PTR(err); 5510 } 5511 5512 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, 5513 struct mlxsw_sp_fib6_entry *fib6_entry) 5514 { 5515 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); 5516 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry); 5517 WARN_ON(fib6_entry->nrt6); 5518 kfree(fib6_entry); 5519 } 5520 5521 static struct mlxsw_sp_fib6_entry * 5522 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, 5523 const struct fib6_info *nrt, bool replace) 5524 { 5525 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; 5526 5527 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 5528 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5529 5530 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id) 5531 continue; 5532 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) 5533 break; 5534 if (replace && rt->fib6_metric == nrt->fib6_metric) { 5535 if (mlxsw_sp_fib6_rt_can_mp(rt) == 5536 mlxsw_sp_fib6_rt_can_mp(nrt)) 5537 return fib6_entry; 5538 if (mlxsw_sp_fib6_rt_can_mp(nrt)) 5539 fallback = fallback ?: fib6_entry; 5540 } 5541 if (rt->fib6_metric > nrt->fib6_metric) 5542 return fallback ?: fib6_entry; 5543 } 5544 5545 return fallback; 5546 } 5547 5548 static int 5549 mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, 5550 bool *p_replace) 5551 { 5552 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node; 5553 struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); 5554 struct mlxsw_sp_fib6_entry *fib6_entry; 5555 5556 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, *p_replace); 5557 5558 if (*p_replace && !fib6_entry) 5559 *p_replace = false; 5560 5561 if (fib6_entry) { 5562 list_add_tail(&new6_entry->common.list, 5563 &fib6_entry->common.list); 5564 } else { 5565 struct mlxsw_sp_fib6_entry *last; 5566 5567 list_for_each_entry(last, &fib_node->entry_list, common.list) { 5568 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last); 5569 5570 if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id) 5571 break; 5572 fib6_entry = last; 5573 } 5574 5575 if (fib6_entry) 5576 list_add(&new6_entry->common.list, 5577 &fib6_entry->common.list); 5578 else 5579 list_add(&new6_entry->common.list, 5580 &fib_node->entry_list); 5581 } 5582 5583 return 0; 5584 } 5585 5586 static void 5587 mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry) 5588 { 5589 list_del(&fib6_entry->common.list); 5590 } 5591 5592 static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp, 5593 struct mlxsw_sp_fib6_entry *fib6_entry, 5594 bool *p_replace) 5595 { 5596 int err; 5597 5598 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, p_replace); 5599 if (err) 5600 return err; 5601 5602 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); 5603 if (err) 5604 goto err_fib_node_entry_add; 5605 5606 return 0; 5607 5608 err_fib_node_entry_add: 5609 mlxsw_sp_fib6_node_list_remove(fib6_entry); 5610 return err; 5611 } 5612 5613 static void 5614 mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, 5615 struct mlxsw_sp_fib6_entry *fib6_entry) 5616 { 5617 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common); 5618 mlxsw_sp_fib6_node_list_remove(fib6_entry); 5619 } 5620 5621 static struct mlxsw_sp_fib6_entry * 5622 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, 5623 const struct fib6_info *rt) 5624 { 5625 struct mlxsw_sp_fib6_entry *fib6_entry; 5626 struct mlxsw_sp_fib_node *fib_node; 5627 struct mlxsw_sp_fib *fib; 5628 struct mlxsw_sp_vr *vr; 5629 5630 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id); 5631 if (!vr) 5632 return NULL; 5633 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6); 5634 5635 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr, 5636 sizeof(rt->fib6_dst.addr), 5637 rt->fib6_dst.plen); 5638 if (!fib_node) 5639 return NULL; 5640 5641 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 5642 struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5643 5644 if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id && 5645 rt->fib6_metric == iter_rt->fib6_metric && 5646 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt)) 5647 return fib6_entry; 5648 } 5649 5650 return NULL; 5651 } 5652 5653 static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, 5654 struct mlxsw_sp_fib6_entry *fib6_entry, 5655 bool replace) 5656 { 5657 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; 5658 struct mlxsw_sp_fib6_entry *replaced; 5659 5660 if (!replace) 5661 return; 5662 5663 replaced = list_next_entry(fib6_entry, common.list); 5664 5665 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced); 5666 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced); 5667 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5668 } 5669 5670 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, 5671 struct fib6_info **rt_arr, 5672 unsigned int nrt6, bool replace) 5673 { 5674 struct mlxsw_sp_fib6_entry *fib6_entry; 5675 struct mlxsw_sp_fib_node *fib_node; 5676 struct fib6_info *rt = rt_arr[0]; 5677 int err; 5678 5679 if (mlxsw_sp->router->aborted) 5680 return 0; 5681 5682 if (rt->fib6_src.plen) 5683 return -EINVAL; 5684 5685 if (mlxsw_sp_fib6_rt_should_ignore(rt)) 5686 return 0; 5687 5688 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id, 5689 &rt->fib6_dst.addr, 5690 sizeof(rt->fib6_dst.addr), 5691 rt->fib6_dst.plen, 5692 MLXSW_SP_L3_PROTO_IPV6); 5693 if (IS_ERR(fib_node)) 5694 return PTR_ERR(fib_node); 5695 5696 /* Before creating a new entry, try to append route to an existing 5697 * multipath entry. 5698 */ 5699 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); 5700 if (fib6_entry) { 5701 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, 5702 rt_arr, nrt6); 5703 if (err) 5704 goto err_fib6_entry_nexthop_add; 5705 return 0; 5706 } 5707 5708 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr, 5709 nrt6); 5710 if (IS_ERR(fib6_entry)) { 5711 err = PTR_ERR(fib6_entry); 5712 goto err_fib6_entry_create; 5713 } 5714 5715 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, &replace); 5716 if (err) 5717 goto err_fib6_node_entry_link; 5718 5719 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace); 5720 5721 return 0; 5722 5723 err_fib6_node_entry_link: 5724 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5725 err_fib6_entry_create: 5726 err_fib6_entry_nexthop_add: 5727 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5728 return err; 5729 } 5730 5731 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, 5732 struct fib6_info **rt_arr, 5733 unsigned int nrt6) 5734 { 5735 struct mlxsw_sp_fib6_entry *fib6_entry; 5736 struct mlxsw_sp_fib_node *fib_node; 5737 struct fib6_info *rt = rt_arr[0]; 5738 5739 if (mlxsw_sp->router->aborted) 5740 return; 5741 5742 if (mlxsw_sp_fib6_rt_should_ignore(rt)) 5743 return; 5744 5745 /* Multipath routes are first added to the FIB trie and only then 5746 * notified. If we vetoed the addition, we will get a delete 5747 * notification for a route we do not have. Therefore, do not warn if 5748 * route was not found. 5749 */ 5750 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); 5751 if (!fib6_entry) 5752 return; 5753 5754 /* If not all the nexthops are deleted, then only reduce the nexthop 5755 * group. 5756 */ 5757 if (nrt6 != fib6_entry->nrt6) { 5758 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr, 5759 nrt6); 5760 return; 5761 } 5762 5763 fib_node = fib6_entry->common.fib_node; 5764 5765 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); 5766 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5767 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5768 } 5769 5770 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, 5771 enum mlxsw_reg_ralxx_protocol proto, 5772 u8 tree_id) 5773 { 5774 char ralta_pl[MLXSW_REG_RALTA_LEN]; 5775 char ralst_pl[MLXSW_REG_RALST_LEN]; 5776 int i, err; 5777 5778 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id); 5779 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); 5780 if (err) 5781 return err; 5782 5783 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id); 5784 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); 5785 if (err) 5786 return err; 5787 5788 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 5789 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; 5790 char raltb_pl[MLXSW_REG_RALTB_LEN]; 5791 char ralue_pl[MLXSW_REG_RALUE_LEN]; 5792 5793 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id); 5794 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), 5795 raltb_pl); 5796 if (err) 5797 return err; 5798 5799 mlxsw_reg_ralue_pack(ralue_pl, proto, 5800 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0); 5801 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); 5802 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), 5803 ralue_pl); 5804 if (err) 5805 return err; 5806 } 5807 5808 return 0; 5809 } 5810 5811 static struct mlxsw_sp_mr_table * 5812 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family) 5813 { 5814 if (family == RTNL_FAMILY_IPMR) 5815 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]; 5816 else 5817 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; 5818 } 5819 5820 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp, 5821 struct mfc_entry_notifier_info *men_info, 5822 bool replace) 5823 { 5824 struct mlxsw_sp_mr_table *mrt; 5825 struct mlxsw_sp_vr *vr; 5826 5827 if (mlxsw_sp->router->aborted) 5828 return 0; 5829 5830 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL); 5831 if (IS_ERR(vr)) 5832 return PTR_ERR(vr); 5833 5834 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); 5835 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace); 5836 } 5837 5838 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp, 5839 struct mfc_entry_notifier_info *men_info) 5840 { 5841 struct mlxsw_sp_mr_table *mrt; 5842 struct mlxsw_sp_vr *vr; 5843 5844 if (mlxsw_sp->router->aborted) 5845 return; 5846 5847 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id); 5848 if (WARN_ON(!vr)) 5849 return; 5850 5851 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); 5852 mlxsw_sp_mr_route_del(mrt, men_info->mfc); 5853 mlxsw_sp_vr_put(mlxsw_sp, vr); 5854 } 5855 5856 static int 5857 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp, 5858 struct vif_entry_notifier_info *ven_info) 5859 { 5860 struct mlxsw_sp_mr_table *mrt; 5861 struct mlxsw_sp_rif *rif; 5862 struct mlxsw_sp_vr *vr; 5863 5864 if (mlxsw_sp->router->aborted) 5865 return 0; 5866 5867 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL); 5868 if (IS_ERR(vr)) 5869 return PTR_ERR(vr); 5870 5871 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); 5872 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev); 5873 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev, 5874 ven_info->vif_index, 5875 ven_info->vif_flags, rif); 5876 } 5877 5878 static void 5879 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, 5880 struct vif_entry_notifier_info *ven_info) 5881 { 5882 struct mlxsw_sp_mr_table *mrt; 5883 struct mlxsw_sp_vr *vr; 5884 5885 if (mlxsw_sp->router->aborted) 5886 return; 5887 5888 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id); 5889 if (WARN_ON(!vr)) 5890 return; 5891 5892 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); 5893 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index); 5894 mlxsw_sp_vr_put(mlxsw_sp, vr); 5895 } 5896 5897 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) 5898 { 5899 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4; 5900 int err; 5901 5902 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, 5903 MLXSW_SP_LPM_TREE_MIN); 5904 if (err) 5905 return err; 5906 5907 /* The multicast router code does not need an abort trap as by default, 5908 * packets that don't match any routes are trapped to the CPU. 5909 */ 5910 5911 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6; 5912 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, 5913 MLXSW_SP_LPM_TREE_MIN + 1); 5914 } 5915 5916 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, 5917 struct mlxsw_sp_fib_node *fib_node) 5918 { 5919 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp; 5920 5921 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list, 5922 common.list) { 5923 bool do_break = &tmp->common.list == &fib_node->entry_list; 5924 5925 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); 5926 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); 5927 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5928 /* Break when entry list is empty and node was freed. 5929 * Otherwise, we'll access freed memory in the next 5930 * iteration. 5931 */ 5932 if (do_break) 5933 break; 5934 } 5935 } 5936 5937 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp, 5938 struct mlxsw_sp_fib_node *fib_node) 5939 { 5940 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp; 5941 5942 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list, 5943 common.list) { 5944 bool do_break = &tmp->common.list == &fib_node->entry_list; 5945 5946 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); 5947 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5948 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5949 if (do_break) 5950 break; 5951 } 5952 } 5953 5954 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, 5955 struct mlxsw_sp_fib_node *fib_node) 5956 { 5957 switch (fib_node->fib->proto) { 5958 case MLXSW_SP_L3_PROTO_IPV4: 5959 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); 5960 break; 5961 case MLXSW_SP_L3_PROTO_IPV6: 5962 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node); 5963 break; 5964 } 5965 } 5966 5967 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp, 5968 struct mlxsw_sp_vr *vr, 5969 enum mlxsw_sp_l3proto proto) 5970 { 5971 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); 5972 struct mlxsw_sp_fib_node *fib_node, *tmp; 5973 5974 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) { 5975 bool do_break = &tmp->list == &fib->node_list; 5976 5977 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node); 5978 if (do_break) 5979 break; 5980 } 5981 } 5982 5983 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) 5984 { 5985 int i, j; 5986 5987 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 5988 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; 5989 5990 if (!mlxsw_sp_vr_is_used(vr)) 5991 continue; 5992 5993 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++) 5994 mlxsw_sp_mr_table_flush(vr->mr_table[j]); 5995 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); 5996 5997 /* If virtual router was only used for IPv4, then it's no 5998 * longer used. 5999 */ 6000 if (!mlxsw_sp_vr_is_used(vr)) 6001 continue; 6002 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); 6003 } 6004 6005 /* After flushing all the routes, it is not possible anyone is still 6006 * using the adjacency index that is discarding packets, so free it in 6007 * case it was allocated. 6008 */ 6009 if (!mlxsw_sp->router->adj_discard_index_valid) 6010 return; 6011 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, 6012 mlxsw_sp->router->adj_discard_index); 6013 mlxsw_sp->router->adj_discard_index_valid = false; 6014 } 6015 6016 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp) 6017 { 6018 int err; 6019 6020 if (mlxsw_sp->router->aborted) 6021 return; 6022 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n"); 6023 mlxsw_sp_router_fib_flush(mlxsw_sp); 6024 mlxsw_sp->router->aborted = true; 6025 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); 6026 if (err) 6027 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n"); 6028 } 6029 6030 struct mlxsw_sp_fib6_event_work { 6031 struct fib6_info **rt_arr; 6032 unsigned int nrt6; 6033 }; 6034 6035 struct mlxsw_sp_fib_event_work { 6036 struct work_struct work; 6037 union { 6038 struct mlxsw_sp_fib6_event_work fib6_work; 6039 struct fib_entry_notifier_info fen_info; 6040 struct fib_rule_notifier_info fr_info; 6041 struct fib_nh_notifier_info fnh_info; 6042 struct mfc_entry_notifier_info men_info; 6043 struct vif_entry_notifier_info ven_info; 6044 }; 6045 struct mlxsw_sp *mlxsw_sp; 6046 unsigned long event; 6047 }; 6048 6049 static int 6050 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work, 6051 struct fib6_entry_notifier_info *fen6_info) 6052 { 6053 struct fib6_info *rt = fen6_info->rt; 6054 struct fib6_info **rt_arr; 6055 struct fib6_info *iter; 6056 unsigned int nrt6; 6057 int i = 0; 6058 6059 nrt6 = fen6_info->nsiblings + 1; 6060 6061 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC); 6062 if (!rt_arr) 6063 return -ENOMEM; 6064 6065 fib6_work->rt_arr = rt_arr; 6066 fib6_work->nrt6 = nrt6; 6067 6068 rt_arr[0] = rt; 6069 fib6_info_hold(rt); 6070 6071 if (!fen6_info->nsiblings) 6072 return 0; 6073 6074 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) { 6075 if (i == fen6_info->nsiblings) 6076 break; 6077 6078 rt_arr[i + 1] = iter; 6079 fib6_info_hold(iter); 6080 i++; 6081 } 6082 WARN_ON_ONCE(i != fen6_info->nsiblings); 6083 6084 return 0; 6085 } 6086 6087 static void 6088 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work) 6089 { 6090 int i; 6091 6092 for (i = 0; i < fib6_work->nrt6; i++) 6093 mlxsw_sp_rt6_release(fib6_work->rt_arr[i]); 6094 kfree(fib6_work->rt_arr); 6095 } 6096 6097 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) 6098 { 6099 struct mlxsw_sp_fib_event_work *fib_work = 6100 container_of(work, struct mlxsw_sp_fib_event_work, work); 6101 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 6102 bool replace, append; 6103 int err; 6104 6105 /* Protect internal structures from changes */ 6106 rtnl_lock(); 6107 mlxsw_sp_span_respin(mlxsw_sp); 6108 6109 switch (fib_work->event) { 6110 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6111 case FIB_EVENT_ENTRY_APPEND: /* fall through */ 6112 case FIB_EVENT_ENTRY_ADD: 6113 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; 6114 append = fib_work->event == FIB_EVENT_ENTRY_APPEND; 6115 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info, 6116 replace, append); 6117 if (err) 6118 mlxsw_sp_router_fib_abort(mlxsw_sp); 6119 fib_info_put(fib_work->fen_info.fi); 6120 break; 6121 case FIB_EVENT_ENTRY_DEL: 6122 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info); 6123 fib_info_put(fib_work->fen_info.fi); 6124 break; 6125 case FIB_EVENT_NH_ADD: /* fall through */ 6126 case FIB_EVENT_NH_DEL: 6127 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event, 6128 fib_work->fnh_info.fib_nh); 6129 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); 6130 break; 6131 } 6132 rtnl_unlock(); 6133 kfree(fib_work); 6134 } 6135 6136 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) 6137 { 6138 struct mlxsw_sp_fib_event_work *fib_work = 6139 container_of(work, struct mlxsw_sp_fib_event_work, work); 6140 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 6141 bool replace; 6142 int err; 6143 6144 rtnl_lock(); 6145 mlxsw_sp_span_respin(mlxsw_sp); 6146 6147 switch (fib_work->event) { 6148 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6149 case FIB_EVENT_ENTRY_ADD: 6150 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; 6151 err = mlxsw_sp_router_fib6_add(mlxsw_sp, 6152 fib_work->fib6_work.rt_arr, 6153 fib_work->fib6_work.nrt6, 6154 replace); 6155 if (err) 6156 mlxsw_sp_router_fib_abort(mlxsw_sp); 6157 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work); 6158 break; 6159 case FIB_EVENT_ENTRY_DEL: 6160 mlxsw_sp_router_fib6_del(mlxsw_sp, 6161 fib_work->fib6_work.rt_arr, 6162 fib_work->fib6_work.nrt6); 6163 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work); 6164 break; 6165 } 6166 rtnl_unlock(); 6167 kfree(fib_work); 6168 } 6169 6170 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) 6171 { 6172 struct mlxsw_sp_fib_event_work *fib_work = 6173 container_of(work, struct mlxsw_sp_fib_event_work, work); 6174 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 6175 bool replace; 6176 int err; 6177 6178 rtnl_lock(); 6179 switch (fib_work->event) { 6180 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6181 case FIB_EVENT_ENTRY_ADD: 6182 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; 6183 6184 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info, 6185 replace); 6186 if (err) 6187 mlxsw_sp_router_fib_abort(mlxsw_sp); 6188 mr_cache_put(fib_work->men_info.mfc); 6189 break; 6190 case FIB_EVENT_ENTRY_DEL: 6191 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info); 6192 mr_cache_put(fib_work->men_info.mfc); 6193 break; 6194 case FIB_EVENT_VIF_ADD: 6195 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp, 6196 &fib_work->ven_info); 6197 if (err) 6198 mlxsw_sp_router_fib_abort(mlxsw_sp); 6199 dev_put(fib_work->ven_info.dev); 6200 break; 6201 case FIB_EVENT_VIF_DEL: 6202 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, 6203 &fib_work->ven_info); 6204 dev_put(fib_work->ven_info.dev); 6205 break; 6206 } 6207 rtnl_unlock(); 6208 kfree(fib_work); 6209 } 6210 6211 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, 6212 struct fib_notifier_info *info) 6213 { 6214 struct fib_entry_notifier_info *fen_info; 6215 struct fib_nh_notifier_info *fnh_info; 6216 6217 switch (fib_work->event) { 6218 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6219 case FIB_EVENT_ENTRY_APPEND: /* fall through */ 6220 case FIB_EVENT_ENTRY_ADD: /* fall through */ 6221 case FIB_EVENT_ENTRY_DEL: 6222 fen_info = container_of(info, struct fib_entry_notifier_info, 6223 info); 6224 fib_work->fen_info = *fen_info; 6225 /* Take reference on fib_info to prevent it from being 6226 * freed while work is queued. Release it afterwards. 6227 */ 6228 fib_info_hold(fib_work->fen_info.fi); 6229 break; 6230 case FIB_EVENT_NH_ADD: /* fall through */ 6231 case FIB_EVENT_NH_DEL: 6232 fnh_info = container_of(info, struct fib_nh_notifier_info, 6233 info); 6234 fib_work->fnh_info = *fnh_info; 6235 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); 6236 break; 6237 } 6238 } 6239 6240 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, 6241 struct fib_notifier_info *info) 6242 { 6243 struct fib6_entry_notifier_info *fen6_info; 6244 int err; 6245 6246 switch (fib_work->event) { 6247 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6248 case FIB_EVENT_ENTRY_ADD: /* fall through */ 6249 case FIB_EVENT_ENTRY_DEL: 6250 fen6_info = container_of(info, struct fib6_entry_notifier_info, 6251 info); 6252 err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work, 6253 fen6_info); 6254 if (err) 6255 return err; 6256 break; 6257 } 6258 6259 return 0; 6260 } 6261 6262 static void 6263 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, 6264 struct fib_notifier_info *info) 6265 { 6266 switch (fib_work->event) { 6267 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6268 case FIB_EVENT_ENTRY_ADD: /* fall through */ 6269 case FIB_EVENT_ENTRY_DEL: 6270 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info)); 6271 mr_cache_hold(fib_work->men_info.mfc); 6272 break; 6273 case FIB_EVENT_VIF_ADD: /* fall through */ 6274 case FIB_EVENT_VIF_DEL: 6275 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info)); 6276 dev_hold(fib_work->ven_info.dev); 6277 break; 6278 } 6279 } 6280 6281 static int mlxsw_sp_router_fib_rule_event(unsigned long event, 6282 struct fib_notifier_info *info, 6283 struct mlxsw_sp *mlxsw_sp) 6284 { 6285 struct netlink_ext_ack *extack = info->extack; 6286 struct fib_rule_notifier_info *fr_info; 6287 struct fib_rule *rule; 6288 int err = 0; 6289 6290 /* nothing to do at the moment */ 6291 if (event == FIB_EVENT_RULE_DEL) 6292 return 0; 6293 6294 if (mlxsw_sp->router->aborted) 6295 return 0; 6296 6297 fr_info = container_of(info, struct fib_rule_notifier_info, info); 6298 rule = fr_info->rule; 6299 6300 /* Rule only affects locally generated traffic */ 6301 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex) 6302 return 0; 6303 6304 switch (info->family) { 6305 case AF_INET: 6306 if (!fib4_rule_default(rule) && !rule->l3mdev) 6307 err = -EOPNOTSUPP; 6308 break; 6309 case AF_INET6: 6310 if (!fib6_rule_default(rule) && !rule->l3mdev) 6311 err = -EOPNOTSUPP; 6312 break; 6313 case RTNL_FAMILY_IPMR: 6314 if (!ipmr_rule_default(rule) && !rule->l3mdev) 6315 err = -EOPNOTSUPP; 6316 break; 6317 case RTNL_FAMILY_IP6MR: 6318 if (!ip6mr_rule_default(rule) && !rule->l3mdev) 6319 err = -EOPNOTSUPP; 6320 break; 6321 } 6322 6323 if (err < 0) 6324 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported"); 6325 6326 return err; 6327 } 6328 6329 /* Called with rcu_read_lock() */ 6330 static int mlxsw_sp_router_fib_event(struct notifier_block *nb, 6331 unsigned long event, void *ptr) 6332 { 6333 struct mlxsw_sp_fib_event_work *fib_work; 6334 struct fib_notifier_info *info = ptr; 6335 struct mlxsw_sp_router *router; 6336 int err; 6337 6338 if ((info->family != AF_INET && info->family != AF_INET6 && 6339 info->family != RTNL_FAMILY_IPMR && 6340 info->family != RTNL_FAMILY_IP6MR)) 6341 return NOTIFY_DONE; 6342 6343 router = container_of(nb, struct mlxsw_sp_router, fib_nb); 6344 6345 switch (event) { 6346 case FIB_EVENT_RULE_ADD: /* fall through */ 6347 case FIB_EVENT_RULE_DEL: 6348 err = mlxsw_sp_router_fib_rule_event(event, info, 6349 router->mlxsw_sp); 6350 return notifier_from_errno(err); 6351 case FIB_EVENT_ENTRY_ADD: 6352 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6353 case FIB_EVENT_ENTRY_APPEND: /* fall through */ 6354 if (router->aborted) { 6355 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route"); 6356 return notifier_from_errno(-EINVAL); 6357 } 6358 if (info->family == AF_INET) { 6359 struct fib_entry_notifier_info *fen_info = ptr; 6360 6361 if (fen_info->fi->fib_nh_is_v6) { 6362 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); 6363 return notifier_from_errno(-EINVAL); 6364 } 6365 if (fen_info->fi->nh) { 6366 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); 6367 return notifier_from_errno(-EINVAL); 6368 } 6369 } else if (info->family == AF_INET6) { 6370 struct fib6_entry_notifier_info *fen6_info; 6371 6372 fen6_info = container_of(info, 6373 struct fib6_entry_notifier_info, 6374 info); 6375 if (fen6_info->rt->nh) { 6376 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported"); 6377 return notifier_from_errno(-EINVAL); 6378 } 6379 } 6380 break; 6381 } 6382 6383 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); 6384 if (WARN_ON(!fib_work)) 6385 return NOTIFY_BAD; 6386 6387 fib_work->mlxsw_sp = router->mlxsw_sp; 6388 fib_work->event = event; 6389 6390 switch (info->family) { 6391 case AF_INET: 6392 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work); 6393 mlxsw_sp_router_fib4_event(fib_work, info); 6394 break; 6395 case AF_INET6: 6396 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); 6397 err = mlxsw_sp_router_fib6_event(fib_work, info); 6398 if (err) 6399 goto err_fib_event; 6400 break; 6401 case RTNL_FAMILY_IP6MR: 6402 case RTNL_FAMILY_IPMR: 6403 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work); 6404 mlxsw_sp_router_fibmr_event(fib_work, info); 6405 break; 6406 } 6407 6408 mlxsw_core_schedule_work(&fib_work->work); 6409 6410 return NOTIFY_DONE; 6411 6412 err_fib_event: 6413 kfree(fib_work); 6414 return NOTIFY_BAD; 6415 } 6416 6417 struct mlxsw_sp_rif * 6418 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, 6419 const struct net_device *dev) 6420 { 6421 int i; 6422 6423 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) 6424 if (mlxsw_sp->router->rifs[i] && 6425 mlxsw_sp->router->rifs[i]->dev == dev) 6426 return mlxsw_sp->router->rifs[i]; 6427 6428 return NULL; 6429 } 6430 6431 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) 6432 { 6433 char ritr_pl[MLXSW_REG_RITR_LEN]; 6434 int err; 6435 6436 mlxsw_reg_ritr_rif_pack(ritr_pl, rif); 6437 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 6438 if (err) 6439 return err; 6440 6441 mlxsw_reg_ritr_enable_set(ritr_pl, false); 6442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 6443 } 6444 6445 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 6446 struct mlxsw_sp_rif *rif) 6447 { 6448 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index); 6449 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif); 6450 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif); 6451 } 6452 6453 static bool 6454 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev, 6455 unsigned long event) 6456 { 6457 struct inet6_dev *inet6_dev; 6458 bool addr_list_empty = true; 6459 struct in_device *idev; 6460 6461 switch (event) { 6462 case NETDEV_UP: 6463 return rif == NULL; 6464 case NETDEV_DOWN: 6465 idev = __in_dev_get_rtnl(dev); 6466 if (idev && idev->ifa_list) 6467 addr_list_empty = false; 6468 6469 inet6_dev = __in6_dev_get(dev); 6470 if (addr_list_empty && inet6_dev && 6471 !list_empty(&inet6_dev->addr_list)) 6472 addr_list_empty = false; 6473 6474 /* macvlans do not have a RIF, but rather piggy back on the 6475 * RIF of their lower device. 6476 */ 6477 if (netif_is_macvlan(dev) && addr_list_empty) 6478 return true; 6479 6480 if (rif && addr_list_empty && 6481 !netif_is_l3_slave(rif->dev)) 6482 return true; 6483 /* It is possible we already removed the RIF ourselves 6484 * if it was assigned to a netdev that is now a bridge 6485 * or LAG slave. 6486 */ 6487 return false; 6488 } 6489 6490 return false; 6491 } 6492 6493 static enum mlxsw_sp_rif_type 6494 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp, 6495 const struct net_device *dev) 6496 { 6497 enum mlxsw_sp_fid_type type; 6498 6499 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL)) 6500 return MLXSW_SP_RIF_TYPE_IPIP_LB; 6501 6502 /* Otherwise RIF type is derived from the type of the underlying FID. */ 6503 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev))) 6504 type = MLXSW_SP_FID_TYPE_8021Q; 6505 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev)) 6506 type = MLXSW_SP_FID_TYPE_8021Q; 6507 else if (netif_is_bridge_master(dev)) 6508 type = MLXSW_SP_FID_TYPE_8021D; 6509 else 6510 type = MLXSW_SP_FID_TYPE_RFID; 6511 6512 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type); 6513 } 6514 6515 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index) 6516 { 6517 int i; 6518 6519 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { 6520 if (!mlxsw_sp->router->rifs[i]) { 6521 *p_rif_index = i; 6522 return 0; 6523 } 6524 } 6525 6526 return -ENOBUFS; 6527 } 6528 6529 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index, 6530 u16 vr_id, 6531 struct net_device *l3_dev) 6532 { 6533 struct mlxsw_sp_rif *rif; 6534 6535 rif = kzalloc(rif_size, GFP_KERNEL); 6536 if (!rif) 6537 return NULL; 6538 6539 INIT_LIST_HEAD(&rif->nexthop_list); 6540 INIT_LIST_HEAD(&rif->neigh_list); 6541 if (l3_dev) { 6542 ether_addr_copy(rif->addr, l3_dev->dev_addr); 6543 rif->mtu = l3_dev->mtu; 6544 rif->dev = l3_dev; 6545 } 6546 rif->vr_id = vr_id; 6547 rif->rif_index = rif_index; 6548 6549 return rif; 6550 } 6551 6552 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, 6553 u16 rif_index) 6554 { 6555 return mlxsw_sp->router->rifs[rif_index]; 6556 } 6557 6558 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif) 6559 { 6560 return rif->rif_index; 6561 } 6562 6563 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif) 6564 { 6565 return lb_rif->common.rif_index; 6566 } 6567 6568 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) 6569 { 6570 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev); 6571 struct mlxsw_sp_vr *ul_vr; 6572 6573 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL); 6574 if (WARN_ON(IS_ERR(ul_vr))) 6575 return 0; 6576 6577 return ul_vr->id; 6578 } 6579 6580 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) 6581 { 6582 return lb_rif->ul_rif_id; 6583 } 6584 6585 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif) 6586 { 6587 return rif->dev->ifindex; 6588 } 6589 6590 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) 6591 { 6592 return rif->dev; 6593 } 6594 6595 struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif) 6596 { 6597 return rif->fid; 6598 } 6599 6600 static struct mlxsw_sp_rif * 6601 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, 6602 const struct mlxsw_sp_rif_params *params, 6603 struct netlink_ext_ack *extack) 6604 { 6605 u32 tb_id = l3mdev_fib_table(params->dev); 6606 const struct mlxsw_sp_rif_ops *ops; 6607 struct mlxsw_sp_fid *fid = NULL; 6608 enum mlxsw_sp_rif_type type; 6609 struct mlxsw_sp_rif *rif; 6610 struct mlxsw_sp_vr *vr; 6611 u16 rif_index; 6612 int i, err; 6613 6614 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev); 6615 ops = mlxsw_sp->rif_ops_arr[type]; 6616 6617 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack); 6618 if (IS_ERR(vr)) 6619 return ERR_CAST(vr); 6620 vr->rif_count++; 6621 6622 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); 6623 if (err) { 6624 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces"); 6625 goto err_rif_index_alloc; 6626 } 6627 6628 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev); 6629 if (!rif) { 6630 err = -ENOMEM; 6631 goto err_rif_alloc; 6632 } 6633 dev_hold(rif->dev); 6634 mlxsw_sp->router->rifs[rif_index] = rif; 6635 rif->mlxsw_sp = mlxsw_sp; 6636 rif->ops = ops; 6637 6638 if (ops->fid_get) { 6639 fid = ops->fid_get(rif, extack); 6640 if (IS_ERR(fid)) { 6641 err = PTR_ERR(fid); 6642 goto err_fid_get; 6643 } 6644 rif->fid = fid; 6645 } 6646 6647 if (ops->setup) 6648 ops->setup(rif, params); 6649 6650 err = ops->configure(rif); 6651 if (err) 6652 goto err_configure; 6653 6654 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) { 6655 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif); 6656 if (err) 6657 goto err_mr_rif_add; 6658 } 6659 6660 mlxsw_sp_rif_counters_alloc(rif); 6661 6662 return rif; 6663 6664 err_mr_rif_add: 6665 for (i--; i >= 0; i--) 6666 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); 6667 ops->deconfigure(rif); 6668 err_configure: 6669 if (fid) 6670 mlxsw_sp_fid_put(fid); 6671 err_fid_get: 6672 mlxsw_sp->router->rifs[rif_index] = NULL; 6673 dev_put(rif->dev); 6674 kfree(rif); 6675 err_rif_alloc: 6676 err_rif_index_alloc: 6677 vr->rif_count--; 6678 mlxsw_sp_vr_put(mlxsw_sp, vr); 6679 return ERR_PTR(err); 6680 } 6681 6682 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) 6683 { 6684 const struct mlxsw_sp_rif_ops *ops = rif->ops; 6685 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 6686 struct mlxsw_sp_fid *fid = rif->fid; 6687 struct mlxsw_sp_vr *vr; 6688 int i; 6689 6690 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); 6691 vr = &mlxsw_sp->router->vrs[rif->vr_id]; 6692 6693 mlxsw_sp_rif_counters_free(rif); 6694 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) 6695 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); 6696 ops->deconfigure(rif); 6697 if (fid) 6698 /* Loopback RIFs are not associated with a FID. */ 6699 mlxsw_sp_fid_put(fid); 6700 mlxsw_sp->router->rifs[rif->rif_index] = NULL; 6701 dev_put(rif->dev); 6702 kfree(rif); 6703 vr->rif_count--; 6704 mlxsw_sp_vr_put(mlxsw_sp, vr); 6705 } 6706 6707 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, 6708 struct net_device *dev) 6709 { 6710 struct mlxsw_sp_rif *rif; 6711 6712 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 6713 if (!rif) 6714 return; 6715 mlxsw_sp_rif_destroy(rif); 6716 } 6717 6718 static void 6719 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, 6720 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 6721 { 6722 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 6723 6724 params->vid = mlxsw_sp_port_vlan->vid; 6725 params->lag = mlxsw_sp_port->lagged; 6726 if (params->lag) 6727 params->lag_id = mlxsw_sp_port->lag_id; 6728 else 6729 params->system_port = mlxsw_sp_port->local_port; 6730 } 6731 6732 static struct mlxsw_sp_rif_subport * 6733 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif) 6734 { 6735 return container_of(rif, struct mlxsw_sp_rif_subport, common); 6736 } 6737 6738 static struct mlxsw_sp_rif * 6739 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp, 6740 const struct mlxsw_sp_rif_params *params, 6741 struct netlink_ext_ack *extack) 6742 { 6743 struct mlxsw_sp_rif_subport *rif_subport; 6744 struct mlxsw_sp_rif *rif; 6745 6746 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev); 6747 if (!rif) 6748 return mlxsw_sp_rif_create(mlxsw_sp, params, extack); 6749 6750 rif_subport = mlxsw_sp_rif_subport_rif(rif); 6751 refcount_inc(&rif_subport->ref_count); 6752 return rif; 6753 } 6754 6755 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif) 6756 { 6757 struct mlxsw_sp_rif_subport *rif_subport; 6758 6759 rif_subport = mlxsw_sp_rif_subport_rif(rif); 6760 if (!refcount_dec_and_test(&rif_subport->ref_count)) 6761 return; 6762 6763 mlxsw_sp_rif_destroy(rif); 6764 } 6765 6766 static int 6767 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 6768 struct net_device *l3_dev, 6769 struct netlink_ext_ack *extack) 6770 { 6771 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 6772 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6773 struct mlxsw_sp_rif_params params = { 6774 .dev = l3_dev, 6775 }; 6776 u16 vid = mlxsw_sp_port_vlan->vid; 6777 struct mlxsw_sp_rif *rif; 6778 struct mlxsw_sp_fid *fid; 6779 int err; 6780 6781 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan); 6782 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack); 6783 if (IS_ERR(rif)) 6784 return PTR_ERR(rif); 6785 6786 /* FID was already created, just take a reference */ 6787 fid = rif->ops->fid_get(rif, extack); 6788 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); 6789 if (err) 6790 goto err_fid_port_vid_map; 6791 6792 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 6793 if (err) 6794 goto err_port_vid_learning_set; 6795 6796 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, 6797 BR_STATE_FORWARDING); 6798 if (err) 6799 goto err_port_vid_stp_set; 6800 6801 mlxsw_sp_port_vlan->fid = fid; 6802 6803 return 0; 6804 6805 err_port_vid_stp_set: 6806 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6807 err_port_vid_learning_set: 6808 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); 6809 err_fid_port_vid_map: 6810 mlxsw_sp_fid_put(fid); 6811 mlxsw_sp_rif_subport_put(rif); 6812 return err; 6813 } 6814 6815 void 6816 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 6817 { 6818 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 6819 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 6820 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid); 6821 u16 vid = mlxsw_sp_port_vlan->vid; 6822 6823 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID)) 6824 return; 6825 6826 mlxsw_sp_port_vlan->fid = NULL; 6827 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING); 6828 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6829 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); 6830 mlxsw_sp_fid_put(fid); 6831 mlxsw_sp_rif_subport_put(rif); 6832 } 6833 6834 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev, 6835 struct net_device *port_dev, 6836 unsigned long event, u16 vid, 6837 struct netlink_ext_ack *extack) 6838 { 6839 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); 6840 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 6841 6842 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 6843 if (WARN_ON(!mlxsw_sp_port_vlan)) 6844 return -EINVAL; 6845 6846 switch (event) { 6847 case NETDEV_UP: 6848 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, 6849 l3_dev, extack); 6850 case NETDEV_DOWN: 6851 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 6852 break; 6853 } 6854 6855 return 0; 6856 } 6857 6858 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, 6859 unsigned long event, 6860 struct netlink_ext_ack *extack) 6861 { 6862 if (netif_is_bridge_port(port_dev) || 6863 netif_is_lag_port(port_dev) || 6864 netif_is_ovs_port(port_dev)) 6865 return 0; 6866 6867 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 6868 MLXSW_SP_DEFAULT_VID, extack); 6869 } 6870 6871 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, 6872 struct net_device *lag_dev, 6873 unsigned long event, u16 vid, 6874 struct netlink_ext_ack *extack) 6875 { 6876 struct net_device *port_dev; 6877 struct list_head *iter; 6878 int err; 6879 6880 netdev_for_each_lower_dev(lag_dev, port_dev, iter) { 6881 if (mlxsw_sp_port_dev_check(port_dev)) { 6882 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev, 6883 port_dev, 6884 event, vid, 6885 extack); 6886 if (err) 6887 return err; 6888 } 6889 } 6890 6891 return 0; 6892 } 6893 6894 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, 6895 unsigned long event, 6896 struct netlink_ext_ack *extack) 6897 { 6898 if (netif_is_bridge_port(lag_dev)) 6899 return 0; 6900 6901 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 6902 MLXSW_SP_DEFAULT_VID, extack); 6903 } 6904 6905 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp, 6906 struct net_device *l3_dev, 6907 unsigned long event, 6908 struct netlink_ext_ack *extack) 6909 { 6910 struct mlxsw_sp_rif_params params = { 6911 .dev = l3_dev, 6912 }; 6913 struct mlxsw_sp_rif *rif; 6914 6915 switch (event) { 6916 case NETDEV_UP: 6917 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack); 6918 if (IS_ERR(rif)) 6919 return PTR_ERR(rif); 6920 break; 6921 case NETDEV_DOWN: 6922 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 6923 mlxsw_sp_rif_destroy(rif); 6924 break; 6925 } 6926 6927 return 0; 6928 } 6929 6930 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp, 6931 struct net_device *vlan_dev, 6932 unsigned long event, 6933 struct netlink_ext_ack *extack) 6934 { 6935 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6936 u16 vid = vlan_dev_vlan_id(vlan_dev); 6937 6938 if (netif_is_bridge_port(vlan_dev)) 6939 return 0; 6940 6941 if (mlxsw_sp_port_dev_check(real_dev)) 6942 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev, 6943 event, vid, extack); 6944 else if (netif_is_lag_master(real_dev)) 6945 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, 6946 vid, extack); 6947 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev)) 6948 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event, 6949 extack); 6950 6951 return 0; 6952 } 6953 6954 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac) 6955 { 6956 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 }; 6957 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 6958 6959 return ether_addr_equal_masked(mac, vrrp4, mask); 6960 } 6961 6962 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac) 6963 { 6964 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 }; 6965 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 6966 6967 return ether_addr_equal_masked(mac, vrrp6, mask); 6968 } 6969 6970 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index, 6971 const u8 *mac, bool adding) 6972 { 6973 char ritr_pl[MLXSW_REG_RITR_LEN]; 6974 u8 vrrp_id = adding ? mac[5] : 0; 6975 int err; 6976 6977 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) && 6978 !mlxsw_sp_rif_macvlan_is_vrrp6(mac)) 6979 return 0; 6980 6981 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); 6982 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 6983 if (err) 6984 return err; 6985 6986 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac)) 6987 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id); 6988 else 6989 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id); 6990 6991 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 6992 } 6993 6994 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, 6995 const struct net_device *macvlan_dev, 6996 struct netlink_ext_ack *extack) 6997 { 6998 struct macvlan_dev *vlan = netdev_priv(macvlan_dev); 6999 struct mlxsw_sp_rif *rif; 7000 int err; 7001 7002 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); 7003 if (!rif) { 7004 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 7005 return -EOPNOTSUPP; 7006 } 7007 7008 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, 7009 mlxsw_sp_fid_index(rif->fid), true); 7010 if (err) 7011 return err; 7012 7013 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, 7014 macvlan_dev->dev_addr, true); 7015 if (err) 7016 goto err_rif_vrrp_add; 7017 7018 /* Make sure the bridge driver does not have this MAC pointing at 7019 * some other port. 7020 */ 7021 if (rif->ops->fdb_del) 7022 rif->ops->fdb_del(rif, macvlan_dev->dev_addr); 7023 7024 return 0; 7025 7026 err_rif_vrrp_add: 7027 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, 7028 mlxsw_sp_fid_index(rif->fid), false); 7029 return err; 7030 } 7031 7032 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, 7033 const struct net_device *macvlan_dev) 7034 { 7035 struct macvlan_dev *vlan = netdev_priv(macvlan_dev); 7036 struct mlxsw_sp_rif *rif; 7037 7038 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); 7039 /* If we do not have a RIF, then we already took care of 7040 * removing the macvlan's MAC during RIF deletion. 7041 */ 7042 if (!rif) 7043 return; 7044 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr, 7045 false); 7046 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, 7047 mlxsw_sp_fid_index(rif->fid), false); 7048 } 7049 7050 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp, 7051 struct net_device *macvlan_dev, 7052 unsigned long event, 7053 struct netlink_ext_ack *extack) 7054 { 7055 switch (event) { 7056 case NETDEV_UP: 7057 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack); 7058 case NETDEV_DOWN: 7059 mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev); 7060 break; 7061 } 7062 7063 return 0; 7064 } 7065 7066 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, 7067 struct net_device *dev, 7068 const unsigned char *dev_addr, 7069 struct netlink_ext_ack *extack) 7070 { 7071 struct mlxsw_sp_rif *rif; 7072 int i; 7073 7074 /* A RIF is not created for macvlan netdevs. Their MAC is used to 7075 * populate the FDB 7076 */ 7077 if (netif_is_macvlan(dev) || netif_is_l3_master(dev)) 7078 return 0; 7079 7080 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { 7081 rif = mlxsw_sp->router->rifs[i]; 7082 if (rif && rif->ops && 7083 rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB) 7084 continue; 7085 if (rif && rif->dev && rif->dev != dev && 7086 !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr, 7087 mlxsw_sp->mac_mask)) { 7088 NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix"); 7089 return -EINVAL; 7090 } 7091 } 7092 7093 return 0; 7094 } 7095 7096 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp, 7097 struct net_device *dev, 7098 unsigned long event, 7099 struct netlink_ext_ack *extack) 7100 { 7101 if (mlxsw_sp_port_dev_check(dev)) 7102 return mlxsw_sp_inetaddr_port_event(dev, event, extack); 7103 else if (netif_is_lag_master(dev)) 7104 return mlxsw_sp_inetaddr_lag_event(dev, event, extack); 7105 else if (netif_is_bridge_master(dev)) 7106 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event, 7107 extack); 7108 else if (is_vlan_dev(dev)) 7109 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event, 7110 extack); 7111 else if (netif_is_macvlan(dev)) 7112 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event, 7113 extack); 7114 else 7115 return 0; 7116 } 7117 7118 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb, 7119 unsigned long event, void *ptr) 7120 { 7121 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 7122 struct net_device *dev = ifa->ifa_dev->dev; 7123 struct mlxsw_sp_router *router; 7124 struct mlxsw_sp_rif *rif; 7125 int err = 0; 7126 7127 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */ 7128 if (event == NETDEV_UP) 7129 goto out; 7130 7131 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb); 7132 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev); 7133 if (!mlxsw_sp_rif_should_config(rif, dev, event)) 7134 goto out; 7135 7136 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL); 7137 out: 7138 return notifier_from_errno(err); 7139 } 7140 7141 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, 7142 unsigned long event, void *ptr) 7143 { 7144 struct in_validator_info *ivi = (struct in_validator_info *) ptr; 7145 struct net_device *dev = ivi->ivi_dev->dev; 7146 struct mlxsw_sp *mlxsw_sp; 7147 struct mlxsw_sp_rif *rif; 7148 int err = 0; 7149 7150 mlxsw_sp = mlxsw_sp_lower_get(dev); 7151 if (!mlxsw_sp) 7152 goto out; 7153 7154 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 7155 if (!mlxsw_sp_rif_should_config(rif, dev, event)) 7156 goto out; 7157 7158 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr, 7159 ivi->extack); 7160 if (err) 7161 goto out; 7162 7163 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack); 7164 out: 7165 return notifier_from_errno(err); 7166 } 7167 7168 struct mlxsw_sp_inet6addr_event_work { 7169 struct work_struct work; 7170 struct mlxsw_sp *mlxsw_sp; 7171 struct net_device *dev; 7172 unsigned long event; 7173 }; 7174 7175 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work) 7176 { 7177 struct mlxsw_sp_inet6addr_event_work *inet6addr_work = 7178 container_of(work, struct mlxsw_sp_inet6addr_event_work, work); 7179 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp; 7180 struct net_device *dev = inet6addr_work->dev; 7181 unsigned long event = inet6addr_work->event; 7182 struct mlxsw_sp_rif *rif; 7183 7184 rtnl_lock(); 7185 7186 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 7187 if (!mlxsw_sp_rif_should_config(rif, dev, event)) 7188 goto out; 7189 7190 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL); 7191 out: 7192 rtnl_unlock(); 7193 dev_put(dev); 7194 kfree(inet6addr_work); 7195 } 7196 7197 /* Called with rcu_read_lock() */ 7198 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb, 7199 unsigned long event, void *ptr) 7200 { 7201 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr; 7202 struct mlxsw_sp_inet6addr_event_work *inet6addr_work; 7203 struct net_device *dev = if6->idev->dev; 7204 struct mlxsw_sp_router *router; 7205 7206 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */ 7207 if (event == NETDEV_UP) 7208 return NOTIFY_DONE; 7209 7210 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC); 7211 if (!inet6addr_work) 7212 return NOTIFY_BAD; 7213 7214 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb); 7215 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work); 7216 inet6addr_work->mlxsw_sp = router->mlxsw_sp; 7217 inet6addr_work->dev = dev; 7218 inet6addr_work->event = event; 7219 dev_hold(dev); 7220 mlxsw_core_schedule_work(&inet6addr_work->work); 7221 7222 return NOTIFY_DONE; 7223 } 7224 7225 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, 7226 unsigned long event, void *ptr) 7227 { 7228 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr; 7229 struct net_device *dev = i6vi->i6vi_dev->dev; 7230 struct mlxsw_sp *mlxsw_sp; 7231 struct mlxsw_sp_rif *rif; 7232 int err = 0; 7233 7234 mlxsw_sp = mlxsw_sp_lower_get(dev); 7235 if (!mlxsw_sp) 7236 goto out; 7237 7238 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 7239 if (!mlxsw_sp_rif_should_config(rif, dev, event)) 7240 goto out; 7241 7242 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr, 7243 i6vi->extack); 7244 if (err) 7245 goto out; 7246 7247 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack); 7248 out: 7249 return notifier_from_errno(err); 7250 } 7251 7252 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, 7253 const char *mac, int mtu) 7254 { 7255 char ritr_pl[MLXSW_REG_RITR_LEN]; 7256 int err; 7257 7258 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); 7259 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7260 if (err) 7261 return err; 7262 7263 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); 7264 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); 7265 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); 7266 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7267 } 7268 7269 static int 7270 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp, 7271 struct mlxsw_sp_rif *rif) 7272 { 7273 struct net_device *dev = rif->dev; 7274 u16 fid_index; 7275 int err; 7276 7277 fid_index = mlxsw_sp_fid_index(rif->fid); 7278 7279 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false); 7280 if (err) 7281 return err; 7282 7283 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr, 7284 dev->mtu); 7285 if (err) 7286 goto err_rif_edit; 7287 7288 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true); 7289 if (err) 7290 goto err_rif_fdb_op; 7291 7292 if (rif->mtu != dev->mtu) { 7293 struct mlxsw_sp_vr *vr; 7294 int i; 7295 7296 /* The RIF is relevant only to its mr_table instance, as unlike 7297 * unicast routing, in multicast routing a RIF cannot be shared 7298 * between several multicast routing tables. 7299 */ 7300 vr = &mlxsw_sp->router->vrs[rif->vr_id]; 7301 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) 7302 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i], 7303 rif, dev->mtu); 7304 } 7305 7306 ether_addr_copy(rif->addr, dev->dev_addr); 7307 rif->mtu = dev->mtu; 7308 7309 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index); 7310 7311 return 0; 7312 7313 err_rif_fdb_op: 7314 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu); 7315 err_rif_edit: 7316 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true); 7317 return err; 7318 } 7319 7320 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif, 7321 struct netdev_notifier_pre_changeaddr_info *info) 7322 { 7323 struct netlink_ext_ack *extack; 7324 7325 extack = netdev_notifier_info_to_extack(&info->info); 7326 return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev, 7327 info->dev_addr, extack); 7328 } 7329 7330 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, 7331 unsigned long event, void *ptr) 7332 { 7333 struct mlxsw_sp *mlxsw_sp; 7334 struct mlxsw_sp_rif *rif; 7335 7336 mlxsw_sp = mlxsw_sp_lower_get(dev); 7337 if (!mlxsw_sp) 7338 return 0; 7339 7340 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 7341 if (!rif) 7342 return 0; 7343 7344 switch (event) { 7345 case NETDEV_CHANGEMTU: /* fall through */ 7346 case NETDEV_CHANGEADDR: 7347 return mlxsw_sp_router_port_change_event(mlxsw_sp, rif); 7348 case NETDEV_PRE_CHANGEADDR: 7349 return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr); 7350 } 7351 7352 return 0; 7353 } 7354 7355 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, 7356 struct net_device *l3_dev, 7357 struct netlink_ext_ack *extack) 7358 { 7359 struct mlxsw_sp_rif *rif; 7360 7361 /* If netdev is already associated with a RIF, then we need to 7362 * destroy it and create a new one with the new virtual router ID. 7363 */ 7364 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 7365 if (rif) 7366 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, 7367 extack); 7368 7369 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack); 7370 } 7371 7372 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, 7373 struct net_device *l3_dev) 7374 { 7375 struct mlxsw_sp_rif *rif; 7376 7377 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 7378 if (!rif) 7379 return; 7380 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL); 7381 } 7382 7383 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, 7384 struct netdev_notifier_changeupper_info *info) 7385 { 7386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); 7387 int err = 0; 7388 7389 /* We do not create a RIF for a macvlan, but only use it to 7390 * direct more MAC addresses to the router. 7391 */ 7392 if (!mlxsw_sp || netif_is_macvlan(l3_dev)) 7393 return 0; 7394 7395 switch (event) { 7396 case NETDEV_PRECHANGEUPPER: 7397 return 0; 7398 case NETDEV_CHANGEUPPER: 7399 if (info->linking) { 7400 struct netlink_ext_ack *extack; 7401 7402 extack = netdev_notifier_info_to_extack(&info->info); 7403 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack); 7404 } else { 7405 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev); 7406 } 7407 break; 7408 } 7409 7410 return err; 7411 } 7412 7413 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data) 7414 { 7415 struct mlxsw_sp_rif *rif = data; 7416 7417 if (!netif_is_macvlan(dev)) 7418 return 0; 7419 7420 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, 7421 mlxsw_sp_fid_index(rif->fid), false); 7422 } 7423 7424 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif) 7425 { 7426 if (!netif_is_macvlan_port(rif->dev)) 7427 return 0; 7428 7429 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n"); 7430 return netdev_walk_all_upper_dev_rcu(rif->dev, 7431 __mlxsw_sp_rif_macvlan_flush, rif); 7432 } 7433 7434 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif, 7435 const struct mlxsw_sp_rif_params *params) 7436 { 7437 struct mlxsw_sp_rif_subport *rif_subport; 7438 7439 rif_subport = mlxsw_sp_rif_subport_rif(rif); 7440 refcount_set(&rif_subport->ref_count, 1); 7441 rif_subport->vid = params->vid; 7442 rif_subport->lag = params->lag; 7443 if (params->lag) 7444 rif_subport->lag_id = params->lag_id; 7445 else 7446 rif_subport->system_port = params->system_port; 7447 } 7448 7449 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable) 7450 { 7451 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7452 struct mlxsw_sp_rif_subport *rif_subport; 7453 char ritr_pl[MLXSW_REG_RITR_LEN]; 7454 7455 rif_subport = mlxsw_sp_rif_subport_rif(rif); 7456 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF, 7457 rif->rif_index, rif->vr_id, rif->dev->mtu); 7458 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr); 7459 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag, 7460 rif_subport->lag ? rif_subport->lag_id : 7461 rif_subport->system_port, 7462 rif_subport->vid); 7463 7464 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7465 } 7466 7467 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif) 7468 { 7469 int err; 7470 7471 err = mlxsw_sp_rif_subport_op(rif, true); 7472 if (err) 7473 return err; 7474 7475 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7476 mlxsw_sp_fid_index(rif->fid), true); 7477 if (err) 7478 goto err_rif_fdb_op; 7479 7480 mlxsw_sp_fid_rif_set(rif->fid, rif); 7481 return 0; 7482 7483 err_rif_fdb_op: 7484 mlxsw_sp_rif_subport_op(rif, false); 7485 return err; 7486 } 7487 7488 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) 7489 { 7490 struct mlxsw_sp_fid *fid = rif->fid; 7491 7492 mlxsw_sp_fid_rif_set(fid, NULL); 7493 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7494 mlxsw_sp_fid_index(fid), false); 7495 mlxsw_sp_rif_macvlan_flush(rif); 7496 mlxsw_sp_rif_subport_op(rif, false); 7497 } 7498 7499 static struct mlxsw_sp_fid * 7500 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif, 7501 struct netlink_ext_ack *extack) 7502 { 7503 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index); 7504 } 7505 7506 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = { 7507 .type = MLXSW_SP_RIF_TYPE_SUBPORT, 7508 .rif_size = sizeof(struct mlxsw_sp_rif_subport), 7509 .setup = mlxsw_sp_rif_subport_setup, 7510 .configure = mlxsw_sp_rif_subport_configure, 7511 .deconfigure = mlxsw_sp_rif_subport_deconfigure, 7512 .fid_get = mlxsw_sp_rif_subport_fid_get, 7513 }; 7514 7515 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif, 7516 enum mlxsw_reg_ritr_if_type type, 7517 u16 vid_fid, bool enable) 7518 { 7519 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7520 char ritr_pl[MLXSW_REG_RITR_LEN]; 7521 7522 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id, 7523 rif->dev->mtu); 7524 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr); 7525 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid); 7526 7527 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7528 } 7529 7530 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) 7531 { 7532 return mlxsw_core_max_ports(mlxsw_sp->core) + 1; 7533 } 7534 7535 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif) 7536 { 7537 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7538 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); 7539 int err; 7540 7541 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true); 7542 if (err) 7543 return err; 7544 7545 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7546 mlxsw_sp_router_port(mlxsw_sp), true); 7547 if (err) 7548 goto err_fid_mc_flood_set; 7549 7550 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7551 mlxsw_sp_router_port(mlxsw_sp), true); 7552 if (err) 7553 goto err_fid_bc_flood_set; 7554 7555 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7556 mlxsw_sp_fid_index(rif->fid), true); 7557 if (err) 7558 goto err_rif_fdb_op; 7559 7560 mlxsw_sp_fid_rif_set(rif->fid, rif); 7561 return 0; 7562 7563 err_rif_fdb_op: 7564 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7565 mlxsw_sp_router_port(mlxsw_sp), false); 7566 err_fid_bc_flood_set: 7567 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7568 mlxsw_sp_router_port(mlxsw_sp), false); 7569 err_fid_mc_flood_set: 7570 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); 7571 return err; 7572 } 7573 7574 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) 7575 { 7576 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); 7577 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7578 struct mlxsw_sp_fid *fid = rif->fid; 7579 7580 mlxsw_sp_fid_rif_set(fid, NULL); 7581 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7582 mlxsw_sp_fid_index(fid), false); 7583 mlxsw_sp_rif_macvlan_flush(rif); 7584 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7585 mlxsw_sp_router_port(mlxsw_sp), false); 7586 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7587 mlxsw_sp_router_port(mlxsw_sp), false); 7588 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); 7589 } 7590 7591 static struct mlxsw_sp_fid * 7592 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, 7593 struct netlink_ext_ack *extack) 7594 { 7595 struct net_device *br_dev = rif->dev; 7596 u16 vid; 7597 int err; 7598 7599 if (is_vlan_dev(rif->dev)) { 7600 vid = vlan_dev_vlan_id(rif->dev); 7601 br_dev = vlan_dev_real_dev(rif->dev); 7602 if (WARN_ON(!netif_is_bridge_master(br_dev))) 7603 return ERR_PTR(-EINVAL); 7604 } else { 7605 err = br_vlan_get_pvid(rif->dev, &vid); 7606 if (err < 0 || !vid) { 7607 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID"); 7608 return ERR_PTR(-EINVAL); 7609 } 7610 } 7611 7612 return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack); 7613 } 7614 7615 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) 7616 { 7617 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); 7618 struct switchdev_notifier_fdb_info info; 7619 struct net_device *br_dev; 7620 struct net_device *dev; 7621 7622 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev; 7623 dev = br_fdb_find_port(br_dev, mac, vid); 7624 if (!dev) 7625 return; 7626 7627 info.addr = mac; 7628 info.vid = vid; 7629 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info, 7630 NULL); 7631 } 7632 7633 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = { 7634 .type = MLXSW_SP_RIF_TYPE_VLAN, 7635 .rif_size = sizeof(struct mlxsw_sp_rif), 7636 .configure = mlxsw_sp_rif_vlan_configure, 7637 .deconfigure = mlxsw_sp_rif_vlan_deconfigure, 7638 .fid_get = mlxsw_sp_rif_vlan_fid_get, 7639 .fdb_del = mlxsw_sp_rif_vlan_fdb_del, 7640 }; 7641 7642 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) 7643 { 7644 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7645 u16 fid_index = mlxsw_sp_fid_index(rif->fid); 7646 int err; 7647 7648 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, 7649 true); 7650 if (err) 7651 return err; 7652 7653 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7654 mlxsw_sp_router_port(mlxsw_sp), true); 7655 if (err) 7656 goto err_fid_mc_flood_set; 7657 7658 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7659 mlxsw_sp_router_port(mlxsw_sp), true); 7660 if (err) 7661 goto err_fid_bc_flood_set; 7662 7663 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7664 mlxsw_sp_fid_index(rif->fid), true); 7665 if (err) 7666 goto err_rif_fdb_op; 7667 7668 mlxsw_sp_fid_rif_set(rif->fid, rif); 7669 return 0; 7670 7671 err_rif_fdb_op: 7672 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7673 mlxsw_sp_router_port(mlxsw_sp), false); 7674 err_fid_bc_flood_set: 7675 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7676 mlxsw_sp_router_port(mlxsw_sp), false); 7677 err_fid_mc_flood_set: 7678 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); 7679 return err; 7680 } 7681 7682 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) 7683 { 7684 u16 fid_index = mlxsw_sp_fid_index(rif->fid); 7685 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7686 struct mlxsw_sp_fid *fid = rif->fid; 7687 7688 mlxsw_sp_fid_rif_set(fid, NULL); 7689 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7690 mlxsw_sp_fid_index(fid), false); 7691 mlxsw_sp_rif_macvlan_flush(rif); 7692 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7693 mlxsw_sp_router_port(mlxsw_sp), false); 7694 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7695 mlxsw_sp_router_port(mlxsw_sp), false); 7696 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); 7697 } 7698 7699 static struct mlxsw_sp_fid * 7700 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, 7701 struct netlink_ext_ack *extack) 7702 { 7703 return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack); 7704 } 7705 7706 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) 7707 { 7708 struct switchdev_notifier_fdb_info info; 7709 struct net_device *dev; 7710 7711 dev = br_fdb_find_port(rif->dev, mac, 0); 7712 if (!dev) 7713 return; 7714 7715 info.addr = mac; 7716 info.vid = 0; 7717 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info, 7718 NULL); 7719 } 7720 7721 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { 7722 .type = MLXSW_SP_RIF_TYPE_FID, 7723 .rif_size = sizeof(struct mlxsw_sp_rif), 7724 .configure = mlxsw_sp_rif_fid_configure, 7725 .deconfigure = mlxsw_sp_rif_fid_deconfigure, 7726 .fid_get = mlxsw_sp_rif_fid_fid_get, 7727 .fdb_del = mlxsw_sp_rif_fid_fdb_del, 7728 }; 7729 7730 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = { 7731 .type = MLXSW_SP_RIF_TYPE_VLAN, 7732 .rif_size = sizeof(struct mlxsw_sp_rif), 7733 .configure = mlxsw_sp_rif_fid_configure, 7734 .deconfigure = mlxsw_sp_rif_fid_deconfigure, 7735 .fid_get = mlxsw_sp_rif_vlan_fid_get, 7736 .fdb_del = mlxsw_sp_rif_vlan_fdb_del, 7737 }; 7738 7739 static struct mlxsw_sp_rif_ipip_lb * 7740 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif) 7741 { 7742 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common); 7743 } 7744 7745 static void 7746 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif, 7747 const struct mlxsw_sp_rif_params *params) 7748 { 7749 struct mlxsw_sp_rif_params_ipip_lb *params_lb; 7750 struct mlxsw_sp_rif_ipip_lb *rif_lb; 7751 7752 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb, 7753 common); 7754 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif); 7755 rif_lb->lb_config = params_lb->lb_config; 7756 } 7757 7758 static int 7759 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) 7760 { 7761 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); 7762 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev); 7763 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7764 struct mlxsw_sp_vr *ul_vr; 7765 int err; 7766 7767 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL); 7768 if (IS_ERR(ul_vr)) 7769 return PTR_ERR(ul_vr); 7770 7771 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true); 7772 if (err) 7773 goto err_loopback_op; 7774 7775 lb_rif->ul_vr_id = ul_vr->id; 7776 lb_rif->ul_rif_id = 0; 7777 ++ul_vr->rif_count; 7778 return 0; 7779 7780 err_loopback_op: 7781 mlxsw_sp_vr_put(mlxsw_sp, ul_vr); 7782 return err; 7783 } 7784 7785 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif) 7786 { 7787 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); 7788 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7789 struct mlxsw_sp_vr *ul_vr; 7790 7791 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id]; 7792 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false); 7793 7794 --ul_vr->rif_count; 7795 mlxsw_sp_vr_put(mlxsw_sp, ul_vr); 7796 } 7797 7798 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = { 7799 .type = MLXSW_SP_RIF_TYPE_IPIP_LB, 7800 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb), 7801 .setup = mlxsw_sp_rif_ipip_lb_setup, 7802 .configure = mlxsw_sp1_rif_ipip_lb_configure, 7803 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure, 7804 }; 7805 7806 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = { 7807 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, 7808 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, 7809 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, 7810 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops, 7811 }; 7812 7813 static int 7814 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable) 7815 { 7816 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp; 7817 char ritr_pl[MLXSW_REG_RITR_LEN]; 7818 7819 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, 7820 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU); 7821 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl, 7822 MLXSW_REG_RITR_LOOPBACK_GENERIC); 7823 7824 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7825 } 7826 7827 static struct mlxsw_sp_rif * 7828 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, 7829 struct netlink_ext_ack *extack) 7830 { 7831 struct mlxsw_sp_rif *ul_rif; 7832 u16 rif_index; 7833 int err; 7834 7835 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); 7836 if (err) { 7837 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces"); 7838 return ERR_PTR(err); 7839 } 7840 7841 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL); 7842 if (!ul_rif) 7843 return ERR_PTR(-ENOMEM); 7844 7845 mlxsw_sp->router->rifs[rif_index] = ul_rif; 7846 ul_rif->mlxsw_sp = mlxsw_sp; 7847 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true); 7848 if (err) 7849 goto ul_rif_op_err; 7850 7851 return ul_rif; 7852 7853 ul_rif_op_err: 7854 mlxsw_sp->router->rifs[rif_index] = NULL; 7855 kfree(ul_rif); 7856 return ERR_PTR(err); 7857 } 7858 7859 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif) 7860 { 7861 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp; 7862 7863 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false); 7864 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL; 7865 kfree(ul_rif); 7866 } 7867 7868 static struct mlxsw_sp_rif * 7869 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, 7870 struct netlink_ext_ack *extack) 7871 { 7872 struct mlxsw_sp_vr *vr; 7873 int err; 7874 7875 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack); 7876 if (IS_ERR(vr)) 7877 return ERR_CAST(vr); 7878 7879 if (refcount_inc_not_zero(&vr->ul_rif_refcnt)) 7880 return vr->ul_rif; 7881 7882 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack); 7883 if (IS_ERR(vr->ul_rif)) { 7884 err = PTR_ERR(vr->ul_rif); 7885 goto err_ul_rif_create; 7886 } 7887 7888 vr->rif_count++; 7889 refcount_set(&vr->ul_rif_refcnt, 1); 7890 7891 return vr->ul_rif; 7892 7893 err_ul_rif_create: 7894 mlxsw_sp_vr_put(mlxsw_sp, vr); 7895 return ERR_PTR(err); 7896 } 7897 7898 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif) 7899 { 7900 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp; 7901 struct mlxsw_sp_vr *vr; 7902 7903 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id]; 7904 7905 if (!refcount_dec_and_test(&vr->ul_rif_refcnt)) 7906 return; 7907 7908 vr->rif_count--; 7909 mlxsw_sp_ul_rif_destroy(ul_rif); 7910 mlxsw_sp_vr_put(mlxsw_sp, vr); 7911 } 7912 7913 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, 7914 u16 *ul_rif_index) 7915 { 7916 struct mlxsw_sp_rif *ul_rif; 7917 7918 ASSERT_RTNL(); 7919 7920 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL); 7921 if (IS_ERR(ul_rif)) 7922 return PTR_ERR(ul_rif); 7923 *ul_rif_index = ul_rif->rif_index; 7924 7925 return 0; 7926 } 7927 7928 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index) 7929 { 7930 struct mlxsw_sp_rif *ul_rif; 7931 7932 ASSERT_RTNL(); 7933 7934 ul_rif = mlxsw_sp->router->rifs[ul_rif_index]; 7935 if (WARN_ON(!ul_rif)) 7936 return; 7937 7938 mlxsw_sp_ul_rif_put(ul_rif); 7939 } 7940 7941 static int 7942 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) 7943 { 7944 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); 7945 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev); 7946 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7947 struct mlxsw_sp_rif *ul_rif; 7948 int err; 7949 7950 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL); 7951 if (IS_ERR(ul_rif)) 7952 return PTR_ERR(ul_rif); 7953 7954 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true); 7955 if (err) 7956 goto err_loopback_op; 7957 7958 lb_rif->ul_vr_id = 0; 7959 lb_rif->ul_rif_id = ul_rif->rif_index; 7960 7961 return 0; 7962 7963 err_loopback_op: 7964 mlxsw_sp_ul_rif_put(ul_rif); 7965 return err; 7966 } 7967 7968 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif) 7969 { 7970 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); 7971 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7972 struct mlxsw_sp_rif *ul_rif; 7973 7974 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id); 7975 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false); 7976 mlxsw_sp_ul_rif_put(ul_rif); 7977 } 7978 7979 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = { 7980 .type = MLXSW_SP_RIF_TYPE_IPIP_LB, 7981 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb), 7982 .setup = mlxsw_sp_rif_ipip_lb_setup, 7983 .configure = mlxsw_sp2_rif_ipip_lb_configure, 7984 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure, 7985 }; 7986 7987 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = { 7988 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, 7989 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, 7990 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, 7991 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops, 7992 }; 7993 7994 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp) 7995 { 7996 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); 7997 7998 mlxsw_sp->router->rifs = kcalloc(max_rifs, 7999 sizeof(struct mlxsw_sp_rif *), 8000 GFP_KERNEL); 8001 if (!mlxsw_sp->router->rifs) 8002 return -ENOMEM; 8003 8004 return 0; 8005 } 8006 8007 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp) 8008 { 8009 int i; 8010 8011 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) 8012 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]); 8013 8014 kfree(mlxsw_sp->router->rifs); 8015 } 8016 8017 static int 8018 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp) 8019 { 8020 char tigcr_pl[MLXSW_REG_TIGCR_LEN]; 8021 8022 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0); 8023 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl); 8024 } 8025 8026 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) 8027 { 8028 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; 8029 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); 8030 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp); 8031 } 8032 8033 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) 8034 { 8035 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list)); 8036 } 8037 8038 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) 8039 { 8040 struct mlxsw_sp_router *router; 8041 8042 /* Flush pending FIB notifications and then flush the device's 8043 * table before requesting another dump. The FIB notification 8044 * block is unregistered, so no need to take RTNL. 8045 */ 8046 mlxsw_core_flush_owq(); 8047 router = container_of(nb, struct mlxsw_sp_router, fib_nb); 8048 mlxsw_sp_router_fib_flush(router->mlxsw_sp); 8049 } 8050 8051 #ifdef CONFIG_IP_ROUTE_MULTIPATH 8052 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header) 8053 { 8054 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true); 8055 } 8056 8057 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field) 8058 { 8059 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true); 8060 } 8061 8062 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl) 8063 { 8064 struct net *net = mlxsw_sp_net(mlxsw_sp); 8065 bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy; 8066 8067 mlxsw_sp_mp_hash_header_set(recr2_pl, 8068 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP); 8069 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP); 8070 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl); 8071 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl); 8072 if (only_l3) 8073 return; 8074 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4); 8075 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL); 8076 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT); 8077 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT); 8078 } 8079 8080 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl) 8081 { 8082 bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp)); 8083 8084 mlxsw_sp_mp_hash_header_set(recr2_pl, 8085 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP); 8086 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP); 8087 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl); 8088 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl); 8089 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER); 8090 if (only_l3) { 8091 mlxsw_sp_mp_hash_field_set(recr2_pl, 8092 MLXSW_REG_RECR2_IPV6_FLOW_LABEL); 8093 } else { 8094 mlxsw_sp_mp_hash_header_set(recr2_pl, 8095 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6); 8096 mlxsw_sp_mp_hash_field_set(recr2_pl, 8097 MLXSW_REG_RECR2_TCP_UDP_SPORT); 8098 mlxsw_sp_mp_hash_field_set(recr2_pl, 8099 MLXSW_REG_RECR2_TCP_UDP_DPORT); 8100 } 8101 } 8102 8103 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) 8104 { 8105 char recr2_pl[MLXSW_REG_RECR2_LEN]; 8106 u32 seed; 8107 8108 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); 8109 mlxsw_reg_recr2_pack(recr2_pl, seed); 8110 mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl); 8111 mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl); 8112 8113 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl); 8114 } 8115 #else 8116 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) 8117 { 8118 return 0; 8119 } 8120 #endif 8121 8122 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) 8123 { 8124 char rdpm_pl[MLXSW_REG_RDPM_LEN]; 8125 unsigned int i; 8126 8127 MLXSW_REG_ZERO(rdpm, rdpm_pl); 8128 8129 /* HW is determining switch priority based on DSCP-bits, but the 8130 * kernel is still doing that based on the ToS. Since there's a 8131 * mismatch in bits we need to make sure to translate the right 8132 * value ToS would observe, skipping the 2 least-significant ECN bits. 8133 */ 8134 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++) 8135 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2)); 8136 8137 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl); 8138 } 8139 8140 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) 8141 { 8142 struct net *net = mlxsw_sp_net(mlxsw_sp); 8143 bool usp = net->ipv4.sysctl_ip_fwd_update_priority; 8144 char rgcr_pl[MLXSW_REG_RGCR_LEN]; 8145 u64 max_rifs; 8146 int err; 8147 8148 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) 8149 return -EIO; 8150 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); 8151 8152 mlxsw_reg_rgcr_pack(rgcr_pl, true, true); 8153 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); 8154 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp); 8155 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); 8156 if (err) 8157 return err; 8158 return 0; 8159 } 8160 8161 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) 8162 { 8163 char rgcr_pl[MLXSW_REG_RGCR_LEN]; 8164 8165 mlxsw_reg_rgcr_pack(rgcr_pl, false, false); 8166 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); 8167 } 8168 8169 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, 8170 struct netlink_ext_ack *extack) 8171 { 8172 struct mlxsw_sp_router *router; 8173 int err; 8174 8175 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL); 8176 if (!router) 8177 return -ENOMEM; 8178 mlxsw_sp->router = router; 8179 router->mlxsw_sp = mlxsw_sp; 8180 8181 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; 8182 err = register_inetaddr_notifier(&router->inetaddr_nb); 8183 if (err) 8184 goto err_register_inetaddr_notifier; 8185 8186 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event; 8187 err = register_inet6addr_notifier(&router->inet6addr_nb); 8188 if (err) 8189 goto err_register_inet6addr_notifier; 8190 8191 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list); 8192 err = __mlxsw_sp_router_init(mlxsw_sp); 8193 if (err) 8194 goto err_router_init; 8195 8196 err = mlxsw_sp_rifs_init(mlxsw_sp); 8197 if (err) 8198 goto err_rifs_init; 8199 8200 err = mlxsw_sp_ipips_init(mlxsw_sp); 8201 if (err) 8202 goto err_ipips_init; 8203 8204 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht, 8205 &mlxsw_sp_nexthop_ht_params); 8206 if (err) 8207 goto err_nexthop_ht_init; 8208 8209 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht, 8210 &mlxsw_sp_nexthop_group_ht_params); 8211 if (err) 8212 goto err_nexthop_group_ht_init; 8213 8214 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list); 8215 err = mlxsw_sp_lpm_init(mlxsw_sp); 8216 if (err) 8217 goto err_lpm_init; 8218 8219 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops); 8220 if (err) 8221 goto err_mr_init; 8222 8223 err = mlxsw_sp_vrs_init(mlxsw_sp); 8224 if (err) 8225 goto err_vrs_init; 8226 8227 err = mlxsw_sp_neigh_init(mlxsw_sp); 8228 if (err) 8229 goto err_neigh_init; 8230 8231 mlxsw_sp->router->netevent_nb.notifier_call = 8232 mlxsw_sp_router_netevent_event; 8233 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb); 8234 if (err) 8235 goto err_register_netevent_notifier; 8236 8237 err = mlxsw_sp_mp_hash_init(mlxsw_sp); 8238 if (err) 8239 goto err_mp_hash_init; 8240 8241 err = mlxsw_sp_dscp_init(mlxsw_sp); 8242 if (err) 8243 goto err_dscp_init; 8244 8245 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event; 8246 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp), 8247 &mlxsw_sp->router->fib_nb, 8248 mlxsw_sp_router_fib_dump_flush, extack); 8249 if (err) 8250 goto err_register_fib_notifier; 8251 8252 return 0; 8253 8254 err_register_fib_notifier: 8255 err_dscp_init: 8256 err_mp_hash_init: 8257 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); 8258 err_register_netevent_notifier: 8259 mlxsw_sp_neigh_fini(mlxsw_sp); 8260 err_neigh_init: 8261 mlxsw_sp_vrs_fini(mlxsw_sp); 8262 err_vrs_init: 8263 mlxsw_sp_mr_fini(mlxsw_sp); 8264 err_mr_init: 8265 mlxsw_sp_lpm_fini(mlxsw_sp); 8266 err_lpm_init: 8267 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht); 8268 err_nexthop_group_ht_init: 8269 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht); 8270 err_nexthop_ht_init: 8271 mlxsw_sp_ipips_fini(mlxsw_sp); 8272 err_ipips_init: 8273 mlxsw_sp_rifs_fini(mlxsw_sp); 8274 err_rifs_init: 8275 __mlxsw_sp_router_fini(mlxsw_sp); 8276 err_router_init: 8277 unregister_inet6addr_notifier(&router->inet6addr_nb); 8278 err_register_inet6addr_notifier: 8279 unregister_inetaddr_notifier(&router->inetaddr_nb); 8280 err_register_inetaddr_notifier: 8281 kfree(mlxsw_sp->router); 8282 return err; 8283 } 8284 8285 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) 8286 { 8287 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), 8288 &mlxsw_sp->router->fib_nb); 8289 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); 8290 mlxsw_sp_neigh_fini(mlxsw_sp); 8291 mlxsw_sp_vrs_fini(mlxsw_sp); 8292 mlxsw_sp_mr_fini(mlxsw_sp); 8293 mlxsw_sp_lpm_fini(mlxsw_sp); 8294 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht); 8295 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht); 8296 mlxsw_sp_ipips_fini(mlxsw_sp); 8297 mlxsw_sp_rifs_fini(mlxsw_sp); 8298 __mlxsw_sp_router_fini(mlxsw_sp); 8299 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); 8300 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); 8301 kfree(mlxsw_sp->router); 8302 } 8303