1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/rhashtable.h> 7 #include <linux/bitops.h> 8 #include <linux/in6.h> 9 #include <linux/notifier.h> 10 #include <linux/inetdevice.h> 11 #include <linux/netdevice.h> 12 #include <linux/if_bridge.h> 13 #include <linux/socket.h> 14 #include <linux/route.h> 15 #include <linux/gcd.h> 16 #include <linux/if_macvlan.h> 17 #include <linux/refcount.h> 18 #include <linux/jhash.h> 19 #include <net/netevent.h> 20 #include <net/neighbour.h> 21 #include <net/arp.h> 22 #include <net/ip_fib.h> 23 #include <net/ip6_fib.h> 24 #include <net/nexthop.h> 25 #include <net/fib_rules.h> 26 #include <net/ip_tunnels.h> 27 #include <net/l3mdev.h> 28 #include <net/addrconf.h> 29 #include <net/ndisc.h> 30 #include <net/ipv6.h> 31 #include <net/fib_notifier.h> 32 #include <net/switchdev.h> 33 34 #include "spectrum.h" 35 #include "core.h" 36 #include "reg.h" 37 #include "spectrum_cnt.h" 38 #include "spectrum_dpipe.h" 39 #include "spectrum_ipip.h" 40 #include "spectrum_mr.h" 41 #include "spectrum_mr_tcam.h" 42 #include "spectrum_router.h" 43 #include "spectrum_span.h" 44 45 struct mlxsw_sp_fib; 46 struct mlxsw_sp_vr; 47 struct mlxsw_sp_lpm_tree; 48 struct mlxsw_sp_rif_ops; 49 50 struct mlxsw_sp_router { 51 struct mlxsw_sp *mlxsw_sp; 52 struct mlxsw_sp_rif **rifs; 53 struct mlxsw_sp_vr *vrs; 54 struct rhashtable neigh_ht; 55 struct rhashtable nexthop_group_ht; 56 struct rhashtable nexthop_ht; 57 struct list_head nexthop_list; 58 struct { 59 /* One tree for each protocol: IPv4 and IPv6 */ 60 struct mlxsw_sp_lpm_tree *proto_trees[2]; 61 struct mlxsw_sp_lpm_tree *trees; 62 unsigned int tree_count; 63 } lpm; 64 struct { 65 struct delayed_work dw; 66 unsigned long interval; /* ms */ 67 } neighs_update; 68 struct delayed_work nexthop_probe_dw; 69 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ 70 struct list_head nexthop_neighs_list; 71 struct list_head ipip_list; 72 bool aborted; 73 struct notifier_block fib_nb; 74 struct notifier_block netevent_nb; 75 struct notifier_block inetaddr_nb; 76 struct notifier_block inet6addr_nb; 77 const struct mlxsw_sp_rif_ops **rif_ops_arr; 78 const struct mlxsw_sp_ipip_ops **ipip_ops_arr; 79 }; 80 81 struct mlxsw_sp_rif { 82 struct list_head nexthop_list; 83 struct list_head neigh_list; 84 struct net_device *dev; /* NULL for underlay RIF */ 85 struct mlxsw_sp_fid *fid; 86 unsigned char addr[ETH_ALEN]; 87 int mtu; 88 u16 rif_index; 89 u16 vr_id; 90 const struct mlxsw_sp_rif_ops *ops; 91 struct mlxsw_sp *mlxsw_sp; 92 93 unsigned int counter_ingress; 94 bool counter_ingress_valid; 95 unsigned int counter_egress; 96 bool counter_egress_valid; 97 }; 98 99 struct mlxsw_sp_rif_params { 100 struct net_device *dev; 101 union { 102 u16 system_port; 103 u16 lag_id; 104 }; 105 u16 vid; 106 bool lag; 107 }; 108 109 struct mlxsw_sp_rif_subport { 110 struct mlxsw_sp_rif common; 111 refcount_t ref_count; 112 union { 113 u16 system_port; 114 u16 lag_id; 115 }; 116 u16 vid; 117 bool lag; 118 }; 119 120 struct mlxsw_sp_rif_ipip_lb { 121 struct mlxsw_sp_rif common; 122 struct mlxsw_sp_rif_ipip_lb_config lb_config; 123 u16 ul_vr_id; /* Reserved for Spectrum-2. */ 124 u16 ul_rif_id; /* Reserved for Spectrum. */ 125 }; 126 127 struct mlxsw_sp_rif_params_ipip_lb { 128 struct mlxsw_sp_rif_params common; 129 struct mlxsw_sp_rif_ipip_lb_config lb_config; 130 }; 131 132 struct mlxsw_sp_rif_ops { 133 enum mlxsw_sp_rif_type type; 134 size_t rif_size; 135 136 void (*setup)(struct mlxsw_sp_rif *rif, 137 const struct mlxsw_sp_rif_params *params); 138 int (*configure)(struct mlxsw_sp_rif *rif); 139 void (*deconfigure)(struct mlxsw_sp_rif *rif); 140 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, 141 struct netlink_ext_ack *extack); 142 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); 143 }; 144 145 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); 146 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); 147 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, 148 struct mlxsw_sp_lpm_tree *lpm_tree); 149 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, 150 const struct mlxsw_sp_fib *fib, 151 u8 tree_id); 152 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, 153 const struct mlxsw_sp_fib *fib); 154 155 static unsigned int * 156 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif, 157 enum mlxsw_sp_rif_counter_dir dir) 158 { 159 switch (dir) { 160 case MLXSW_SP_RIF_COUNTER_EGRESS: 161 return &rif->counter_egress; 162 case MLXSW_SP_RIF_COUNTER_INGRESS: 163 return &rif->counter_ingress; 164 } 165 return NULL; 166 } 167 168 static bool 169 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif, 170 enum mlxsw_sp_rif_counter_dir dir) 171 { 172 switch (dir) { 173 case MLXSW_SP_RIF_COUNTER_EGRESS: 174 return rif->counter_egress_valid; 175 case MLXSW_SP_RIF_COUNTER_INGRESS: 176 return rif->counter_ingress_valid; 177 } 178 return false; 179 } 180 181 static void 182 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif, 183 enum mlxsw_sp_rif_counter_dir dir, 184 bool valid) 185 { 186 switch (dir) { 187 case MLXSW_SP_RIF_COUNTER_EGRESS: 188 rif->counter_egress_valid = valid; 189 break; 190 case MLXSW_SP_RIF_COUNTER_INGRESS: 191 rif->counter_ingress_valid = valid; 192 break; 193 } 194 } 195 196 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, 197 unsigned int counter_index, bool enable, 198 enum mlxsw_sp_rif_counter_dir dir) 199 { 200 char ritr_pl[MLXSW_REG_RITR_LEN]; 201 bool is_egress = false; 202 int err; 203 204 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS) 205 is_egress = true; 206 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); 207 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 208 if (err) 209 return err; 210 211 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable, 212 is_egress); 213 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 214 } 215 216 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, 217 struct mlxsw_sp_rif *rif, 218 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt) 219 { 220 char ricnt_pl[MLXSW_REG_RICNT_LEN]; 221 unsigned int *p_counter_index; 222 bool valid; 223 int err; 224 225 valid = mlxsw_sp_rif_counter_valid_get(rif, dir); 226 if (!valid) 227 return -EINVAL; 228 229 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 230 if (!p_counter_index) 231 return -EINVAL; 232 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index, 233 MLXSW_REG_RICNT_OPCODE_NOP); 234 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); 235 if (err) 236 return err; 237 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl); 238 return 0; 239 } 240 241 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp, 242 unsigned int counter_index) 243 { 244 char ricnt_pl[MLXSW_REG_RICNT_LEN]; 245 246 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index, 247 MLXSW_REG_RICNT_OPCODE_CLEAR); 248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); 249 } 250 251 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp, 252 struct mlxsw_sp_rif *rif, 253 enum mlxsw_sp_rif_counter_dir dir) 254 { 255 unsigned int *p_counter_index; 256 int err; 257 258 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 259 if (!p_counter_index) 260 return -EINVAL; 261 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, 262 p_counter_index); 263 if (err) 264 return err; 265 266 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index); 267 if (err) 268 goto err_counter_clear; 269 270 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index, 271 *p_counter_index, true, dir); 272 if (err) 273 goto err_counter_edit; 274 mlxsw_sp_rif_counter_valid_set(rif, dir, true); 275 return 0; 276 277 err_counter_edit: 278 err_counter_clear: 279 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, 280 *p_counter_index); 281 return err; 282 } 283 284 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, 285 struct mlxsw_sp_rif *rif, 286 enum mlxsw_sp_rif_counter_dir dir) 287 { 288 unsigned int *p_counter_index; 289 290 if (!mlxsw_sp_rif_counter_valid_get(rif, dir)) 291 return; 292 293 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 294 if (WARN_ON(!p_counter_index)) 295 return; 296 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index, 297 *p_counter_index, false, dir); 298 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, 299 *p_counter_index); 300 mlxsw_sp_rif_counter_valid_set(rif, dir, false); 301 } 302 303 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif) 304 { 305 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 306 struct devlink *devlink; 307 308 devlink = priv_to_devlink(mlxsw_sp->core); 309 if (!devlink_dpipe_table_counter_enabled(devlink, 310 MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) 311 return; 312 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); 313 } 314 315 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif) 316 { 317 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 318 319 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); 320 } 321 322 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) 323 324 struct mlxsw_sp_prefix_usage { 325 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT); 326 }; 327 328 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ 329 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) 330 331 static bool 332 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1, 333 struct mlxsw_sp_prefix_usage *prefix_usage2) 334 { 335 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); 336 } 337 338 static void 339 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1, 340 struct mlxsw_sp_prefix_usage *prefix_usage2) 341 { 342 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); 343 } 344 345 static void 346 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage, 347 unsigned char prefix_len) 348 { 349 set_bit(prefix_len, prefix_usage->b); 350 } 351 352 static void 353 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage, 354 unsigned char prefix_len) 355 { 356 clear_bit(prefix_len, prefix_usage->b); 357 } 358 359 struct mlxsw_sp_fib_key { 360 unsigned char addr[sizeof(struct in6_addr)]; 361 unsigned char prefix_len; 362 }; 363 364 enum mlxsw_sp_fib_entry_type { 365 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE, 366 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL, 367 MLXSW_SP_FIB_ENTRY_TYPE_TRAP, 368 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE, 369 370 /* This is a special case of local delivery, where a packet should be 371 * decapsulated on reception. Note that there is no corresponding ENCAP, 372 * because that's a type of next hop, not of FIB entry. (There can be 373 * several next hops in a REMOTE entry, and some of them may be 374 * encapsulating entries.) 375 */ 376 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP, 377 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP, 378 }; 379 380 struct mlxsw_sp_nexthop_group; 381 382 struct mlxsw_sp_fib_node { 383 struct list_head entry_list; 384 struct list_head list; 385 struct rhash_head ht_node; 386 struct mlxsw_sp_fib *fib; 387 struct mlxsw_sp_fib_key key; 388 }; 389 390 struct mlxsw_sp_fib_entry_decap { 391 struct mlxsw_sp_ipip_entry *ipip_entry; 392 u32 tunnel_index; 393 }; 394 395 struct mlxsw_sp_fib_entry { 396 struct list_head list; 397 struct mlxsw_sp_fib_node *fib_node; 398 enum mlxsw_sp_fib_entry_type type; 399 struct list_head nexthop_group_node; 400 struct mlxsw_sp_nexthop_group *nh_group; 401 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */ 402 }; 403 404 struct mlxsw_sp_fib4_entry { 405 struct mlxsw_sp_fib_entry common; 406 u32 tb_id; 407 u32 prio; 408 u8 tos; 409 u8 type; 410 }; 411 412 struct mlxsw_sp_fib6_entry { 413 struct mlxsw_sp_fib_entry common; 414 struct list_head rt6_list; 415 unsigned int nrt6; 416 }; 417 418 struct mlxsw_sp_rt6 { 419 struct list_head list; 420 struct fib6_info *rt; 421 }; 422 423 struct mlxsw_sp_lpm_tree { 424 u8 id; /* tree ID */ 425 unsigned int ref_count; 426 enum mlxsw_sp_l3proto proto; 427 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; 428 struct mlxsw_sp_prefix_usage prefix_usage; 429 }; 430 431 struct mlxsw_sp_fib { 432 struct rhashtable ht; 433 struct list_head node_list; 434 struct mlxsw_sp_vr *vr; 435 struct mlxsw_sp_lpm_tree *lpm_tree; 436 enum mlxsw_sp_l3proto proto; 437 }; 438 439 struct mlxsw_sp_vr { 440 u16 id; /* virtual router ID */ 441 u32 tb_id; /* kernel fib table id */ 442 unsigned int rif_count; 443 struct mlxsw_sp_fib *fib4; 444 struct mlxsw_sp_fib *fib6; 445 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX]; 446 struct mlxsw_sp_rif *ul_rif; 447 refcount_t ul_rif_refcnt; 448 }; 449 450 static const struct rhashtable_params mlxsw_sp_fib_ht_params; 451 452 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp, 453 struct mlxsw_sp_vr *vr, 454 enum mlxsw_sp_l3proto proto) 455 { 456 struct mlxsw_sp_lpm_tree *lpm_tree; 457 struct mlxsw_sp_fib *fib; 458 int err; 459 460 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto]; 461 fib = kzalloc(sizeof(*fib), GFP_KERNEL); 462 if (!fib) 463 return ERR_PTR(-ENOMEM); 464 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params); 465 if (err) 466 goto err_rhashtable_init; 467 INIT_LIST_HEAD(&fib->node_list); 468 fib->proto = proto; 469 fib->vr = vr; 470 fib->lpm_tree = lpm_tree; 471 mlxsw_sp_lpm_tree_hold(lpm_tree); 472 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id); 473 if (err) 474 goto err_lpm_tree_bind; 475 return fib; 476 477 err_lpm_tree_bind: 478 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 479 err_rhashtable_init: 480 kfree(fib); 481 return ERR_PTR(err); 482 } 483 484 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp, 485 struct mlxsw_sp_fib *fib) 486 { 487 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); 488 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree); 489 WARN_ON(!list_empty(&fib->node_list)); 490 rhashtable_destroy(&fib->ht); 491 kfree(fib); 492 } 493 494 static struct mlxsw_sp_lpm_tree * 495 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp) 496 { 497 static struct mlxsw_sp_lpm_tree *lpm_tree; 498 int i; 499 500 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { 501 lpm_tree = &mlxsw_sp->router->lpm.trees[i]; 502 if (lpm_tree->ref_count == 0) 503 return lpm_tree; 504 } 505 return NULL; 506 } 507 508 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp, 509 struct mlxsw_sp_lpm_tree *lpm_tree) 510 { 511 char ralta_pl[MLXSW_REG_RALTA_LEN]; 512 513 mlxsw_reg_ralta_pack(ralta_pl, true, 514 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, 515 lpm_tree->id); 516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); 517 } 518 519 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp, 520 struct mlxsw_sp_lpm_tree *lpm_tree) 521 { 522 char ralta_pl[MLXSW_REG_RALTA_LEN]; 523 524 mlxsw_reg_ralta_pack(ralta_pl, false, 525 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, 526 lpm_tree->id); 527 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); 528 } 529 530 static int 531 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp, 532 struct mlxsw_sp_prefix_usage *prefix_usage, 533 struct mlxsw_sp_lpm_tree *lpm_tree) 534 { 535 char ralst_pl[MLXSW_REG_RALST_LEN]; 536 u8 root_bin = 0; 537 u8 prefix; 538 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD; 539 540 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) 541 root_bin = prefix; 542 543 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id); 544 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) { 545 if (prefix == 0) 546 continue; 547 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix, 548 MLXSW_REG_RALST_BIN_NO_CHILD); 549 last_prefix = prefix; 550 } 551 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); 552 } 553 554 static struct mlxsw_sp_lpm_tree * 555 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, 556 struct mlxsw_sp_prefix_usage *prefix_usage, 557 enum mlxsw_sp_l3proto proto) 558 { 559 struct mlxsw_sp_lpm_tree *lpm_tree; 560 int err; 561 562 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp); 563 if (!lpm_tree) 564 return ERR_PTR(-EBUSY); 565 lpm_tree->proto = proto; 566 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree); 567 if (err) 568 return ERR_PTR(err); 569 570 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage, 571 lpm_tree); 572 if (err) 573 goto err_left_struct_set; 574 memcpy(&lpm_tree->prefix_usage, prefix_usage, 575 sizeof(lpm_tree->prefix_usage)); 576 memset(&lpm_tree->prefix_ref_count, 0, 577 sizeof(lpm_tree->prefix_ref_count)); 578 lpm_tree->ref_count = 1; 579 return lpm_tree; 580 581 err_left_struct_set: 582 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); 583 return ERR_PTR(err); 584 } 585 586 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, 587 struct mlxsw_sp_lpm_tree *lpm_tree) 588 { 589 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); 590 } 591 592 static struct mlxsw_sp_lpm_tree * 593 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, 594 struct mlxsw_sp_prefix_usage *prefix_usage, 595 enum mlxsw_sp_l3proto proto) 596 { 597 struct mlxsw_sp_lpm_tree *lpm_tree; 598 int i; 599 600 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { 601 lpm_tree = &mlxsw_sp->router->lpm.trees[i]; 602 if (lpm_tree->ref_count != 0 && 603 lpm_tree->proto == proto && 604 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, 605 prefix_usage)) { 606 mlxsw_sp_lpm_tree_hold(lpm_tree); 607 return lpm_tree; 608 } 609 } 610 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto); 611 } 612 613 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree) 614 { 615 lpm_tree->ref_count++; 616 } 617 618 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, 619 struct mlxsw_sp_lpm_tree *lpm_tree) 620 { 621 if (--lpm_tree->ref_count == 0) 622 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); 623 } 624 625 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */ 626 627 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) 628 { 629 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } }; 630 struct mlxsw_sp_lpm_tree *lpm_tree; 631 u64 max_trees; 632 int err, i; 633 634 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES)) 635 return -EIO; 636 637 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES); 638 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN; 639 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count, 640 sizeof(struct mlxsw_sp_lpm_tree), 641 GFP_KERNEL); 642 if (!mlxsw_sp->router->lpm.trees) 643 return -ENOMEM; 644 645 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { 646 lpm_tree = &mlxsw_sp->router->lpm.trees[i]; 647 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN; 648 } 649 650 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, 651 MLXSW_SP_L3_PROTO_IPV4); 652 if (IS_ERR(lpm_tree)) { 653 err = PTR_ERR(lpm_tree); 654 goto err_ipv4_tree_get; 655 } 656 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree; 657 658 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, 659 MLXSW_SP_L3_PROTO_IPV6); 660 if (IS_ERR(lpm_tree)) { 661 err = PTR_ERR(lpm_tree); 662 goto err_ipv6_tree_get; 663 } 664 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree; 665 666 return 0; 667 668 err_ipv6_tree_get: 669 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4]; 670 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 671 err_ipv4_tree_get: 672 kfree(mlxsw_sp->router->lpm.trees); 673 return err; 674 } 675 676 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) 677 { 678 struct mlxsw_sp_lpm_tree *lpm_tree; 679 680 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6]; 681 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 682 683 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4]; 684 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 685 686 kfree(mlxsw_sp->router->lpm.trees); 687 } 688 689 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) 690 { 691 return !!vr->fib4 || !!vr->fib6 || 692 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] || 693 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; 694 } 695 696 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) 697 { 698 struct mlxsw_sp_vr *vr; 699 int i; 700 701 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 702 vr = &mlxsw_sp->router->vrs[i]; 703 if (!mlxsw_sp_vr_is_used(vr)) 704 return vr; 705 } 706 return NULL; 707 } 708 709 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, 710 const struct mlxsw_sp_fib *fib, u8 tree_id) 711 { 712 char raltb_pl[MLXSW_REG_RALTB_LEN]; 713 714 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, 715 (enum mlxsw_reg_ralxx_protocol) fib->proto, 716 tree_id); 717 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); 718 } 719 720 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, 721 const struct mlxsw_sp_fib *fib) 722 { 723 char raltb_pl[MLXSW_REG_RALTB_LEN]; 724 725 /* Bind to tree 0 which is default */ 726 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, 727 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0); 728 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); 729 } 730 731 static u32 mlxsw_sp_fix_tb_id(u32 tb_id) 732 { 733 /* For our purpose, squash main, default and local tables into one */ 734 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT) 735 tb_id = RT_TABLE_MAIN; 736 return tb_id; 737 } 738 739 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, 740 u32 tb_id) 741 { 742 struct mlxsw_sp_vr *vr; 743 int i; 744 745 tb_id = mlxsw_sp_fix_tb_id(tb_id); 746 747 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 748 vr = &mlxsw_sp->router->vrs[i]; 749 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id) 750 return vr; 751 } 752 return NULL; 753 } 754 755 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id, 756 u16 *vr_id) 757 { 758 struct mlxsw_sp_vr *vr; 759 760 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); 761 if (!vr) 762 return -ESRCH; 763 *vr_id = vr->id; 764 765 return 0; 766 } 767 768 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, 769 enum mlxsw_sp_l3proto proto) 770 { 771 switch (proto) { 772 case MLXSW_SP_L3_PROTO_IPV4: 773 return vr->fib4; 774 case MLXSW_SP_L3_PROTO_IPV6: 775 return vr->fib6; 776 } 777 return NULL; 778 } 779 780 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, 781 u32 tb_id, 782 struct netlink_ext_ack *extack) 783 { 784 struct mlxsw_sp_mr_table *mr4_table, *mr6_table; 785 struct mlxsw_sp_fib *fib4; 786 struct mlxsw_sp_fib *fib6; 787 struct mlxsw_sp_vr *vr; 788 int err; 789 790 vr = mlxsw_sp_vr_find_unused(mlxsw_sp); 791 if (!vr) { 792 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers"); 793 return ERR_PTR(-EBUSY); 794 } 795 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); 796 if (IS_ERR(fib4)) 797 return ERR_CAST(fib4); 798 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); 799 if (IS_ERR(fib6)) { 800 err = PTR_ERR(fib6); 801 goto err_fib6_create; 802 } 803 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, 804 MLXSW_SP_L3_PROTO_IPV4); 805 if (IS_ERR(mr4_table)) { 806 err = PTR_ERR(mr4_table); 807 goto err_mr4_table_create; 808 } 809 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, 810 MLXSW_SP_L3_PROTO_IPV6); 811 if (IS_ERR(mr6_table)) { 812 err = PTR_ERR(mr6_table); 813 goto err_mr6_table_create; 814 } 815 816 vr->fib4 = fib4; 817 vr->fib6 = fib6; 818 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table; 819 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table; 820 vr->tb_id = tb_id; 821 return vr; 822 823 err_mr6_table_create: 824 mlxsw_sp_mr_table_destroy(mr4_table); 825 err_mr4_table_create: 826 mlxsw_sp_fib_destroy(mlxsw_sp, fib6); 827 err_fib6_create: 828 mlxsw_sp_fib_destroy(mlxsw_sp, fib4); 829 return ERR_PTR(err); 830 } 831 832 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, 833 struct mlxsw_sp_vr *vr) 834 { 835 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]); 836 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL; 837 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]); 838 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL; 839 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); 840 vr->fib6 = NULL; 841 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); 842 vr->fib4 = NULL; 843 } 844 845 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, 846 struct netlink_ext_ack *extack) 847 { 848 struct mlxsw_sp_vr *vr; 849 850 tb_id = mlxsw_sp_fix_tb_id(tb_id); 851 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); 852 if (!vr) 853 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack); 854 return vr; 855 } 856 857 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) 858 { 859 if (!vr->rif_count && list_empty(&vr->fib4->node_list) && 860 list_empty(&vr->fib6->node_list) && 861 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) && 862 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6])) 863 mlxsw_sp_vr_destroy(mlxsw_sp, vr); 864 } 865 866 static bool 867 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr, 868 enum mlxsw_sp_l3proto proto, u8 tree_id) 869 { 870 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); 871 872 if (!mlxsw_sp_vr_is_used(vr)) 873 return false; 874 if (fib->lpm_tree->id == tree_id) 875 return true; 876 return false; 877 } 878 879 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, 880 struct mlxsw_sp_fib *fib, 881 struct mlxsw_sp_lpm_tree *new_tree) 882 { 883 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; 884 int err; 885 886 fib->lpm_tree = new_tree; 887 mlxsw_sp_lpm_tree_hold(new_tree); 888 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id); 889 if (err) 890 goto err_tree_bind; 891 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); 892 return 0; 893 894 err_tree_bind: 895 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); 896 fib->lpm_tree = old_tree; 897 return err; 898 } 899 900 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, 901 struct mlxsw_sp_fib *fib, 902 struct mlxsw_sp_lpm_tree *new_tree) 903 { 904 enum mlxsw_sp_l3proto proto = fib->proto; 905 struct mlxsw_sp_lpm_tree *old_tree; 906 u8 old_id, new_id = new_tree->id; 907 struct mlxsw_sp_vr *vr; 908 int i, err; 909 910 old_tree = mlxsw_sp->router->lpm.proto_trees[proto]; 911 old_id = old_tree->id; 912 913 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 914 vr = &mlxsw_sp->router->vrs[i]; 915 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id)) 916 continue; 917 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp, 918 mlxsw_sp_vr_fib(vr, proto), 919 new_tree); 920 if (err) 921 goto err_tree_replace; 922 } 923 924 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count, 925 sizeof(new_tree->prefix_ref_count)); 926 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree; 927 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); 928 929 return 0; 930 931 err_tree_replace: 932 for (i--; i >= 0; i--) { 933 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id)) 934 continue; 935 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp, 936 mlxsw_sp_vr_fib(vr, proto), 937 old_tree); 938 } 939 return err; 940 } 941 942 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) 943 { 944 struct mlxsw_sp_vr *vr; 945 u64 max_vrs; 946 int i; 947 948 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS)) 949 return -EIO; 950 951 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); 952 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr), 953 GFP_KERNEL); 954 if (!mlxsw_sp->router->vrs) 955 return -ENOMEM; 956 957 for (i = 0; i < max_vrs; i++) { 958 vr = &mlxsw_sp->router->vrs[i]; 959 vr->id = i; 960 } 961 962 return 0; 963 } 964 965 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp); 966 967 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) 968 { 969 /* At this stage we're guaranteed not to have new incoming 970 * FIB notifications and the work queue is free from FIBs 971 * sitting on top of mlxsw netdevs. However, we can still 972 * have other FIBs queued. Flush the queue before flushing 973 * the device's tables. No need for locks, as we're the only 974 * writer. 975 */ 976 mlxsw_core_flush_owq(); 977 mlxsw_sp_router_fib_flush(mlxsw_sp); 978 kfree(mlxsw_sp->router->vrs); 979 } 980 981 static struct net_device * 982 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) 983 { 984 struct ip_tunnel *tun = netdev_priv(ol_dev); 985 struct net *net = dev_net(ol_dev); 986 987 return __dev_get_by_index(net, tun->parms.link); 988 } 989 990 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) 991 { 992 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); 993 994 if (d) 995 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN; 996 else 997 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN; 998 } 999 1000 static struct mlxsw_sp_rif * 1001 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, 1002 const struct mlxsw_sp_rif_params *params, 1003 struct netlink_ext_ack *extack); 1004 1005 static struct mlxsw_sp_rif_ipip_lb * 1006 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp, 1007 enum mlxsw_sp_ipip_type ipipt, 1008 struct net_device *ol_dev, 1009 struct netlink_ext_ack *extack) 1010 { 1011 struct mlxsw_sp_rif_params_ipip_lb lb_params; 1012 const struct mlxsw_sp_ipip_ops *ipip_ops; 1013 struct mlxsw_sp_rif *rif; 1014 1015 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; 1016 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) { 1017 .common.dev = ol_dev, 1018 .common.lag = false, 1019 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev), 1020 }; 1021 1022 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack); 1023 if (IS_ERR(rif)) 1024 return ERR_CAST(rif); 1025 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common); 1026 } 1027 1028 static struct mlxsw_sp_ipip_entry * 1029 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, 1030 enum mlxsw_sp_ipip_type ipipt, 1031 struct net_device *ol_dev) 1032 { 1033 const struct mlxsw_sp_ipip_ops *ipip_ops; 1034 struct mlxsw_sp_ipip_entry *ipip_entry; 1035 struct mlxsw_sp_ipip_entry *ret = NULL; 1036 1037 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; 1038 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL); 1039 if (!ipip_entry) 1040 return ERR_PTR(-ENOMEM); 1041 1042 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt, 1043 ol_dev, NULL); 1044 if (IS_ERR(ipip_entry->ol_lb)) { 1045 ret = ERR_CAST(ipip_entry->ol_lb); 1046 goto err_ol_ipip_lb_create; 1047 } 1048 1049 ipip_entry->ipipt = ipipt; 1050 ipip_entry->ol_dev = ol_dev; 1051 1052 switch (ipip_ops->ul_proto) { 1053 case MLXSW_SP_L3_PROTO_IPV4: 1054 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); 1055 break; 1056 case MLXSW_SP_L3_PROTO_IPV6: 1057 WARN_ON(1); 1058 break; 1059 } 1060 1061 return ipip_entry; 1062 1063 err_ol_ipip_lb_create: 1064 kfree(ipip_entry); 1065 return ret; 1066 } 1067 1068 static void 1069 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry) 1070 { 1071 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); 1072 kfree(ipip_entry); 1073 } 1074 1075 static bool 1076 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp, 1077 const enum mlxsw_sp_l3proto ul_proto, 1078 union mlxsw_sp_l3addr saddr, 1079 u32 ul_tb_id, 1080 struct mlxsw_sp_ipip_entry *ipip_entry) 1081 { 1082 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); 1083 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; 1084 union mlxsw_sp_l3addr tun_saddr; 1085 1086 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) 1087 return false; 1088 1089 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev); 1090 return tun_ul_tb_id == ul_tb_id && 1091 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr); 1092 } 1093 1094 static int 1095 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, 1096 struct mlxsw_sp_fib_entry *fib_entry, 1097 struct mlxsw_sp_ipip_entry *ipip_entry) 1098 { 1099 u32 tunnel_index; 1100 int err; 1101 1102 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1103 1, &tunnel_index); 1104 if (err) 1105 return err; 1106 1107 ipip_entry->decap_fib_entry = fib_entry; 1108 fib_entry->decap.ipip_entry = ipip_entry; 1109 fib_entry->decap.tunnel_index = tunnel_index; 1110 return 0; 1111 } 1112 1113 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, 1114 struct mlxsw_sp_fib_entry *fib_entry) 1115 { 1116 /* Unlink this node from the IPIP entry that it's the decap entry of. */ 1117 fib_entry->decap.ipip_entry->decap_fib_entry = NULL; 1118 fib_entry->decap.ipip_entry = NULL; 1119 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1120 1, fib_entry->decap.tunnel_index); 1121 } 1122 1123 static struct mlxsw_sp_fib_node * 1124 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, 1125 size_t addr_len, unsigned char prefix_len); 1126 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, 1127 struct mlxsw_sp_fib_entry *fib_entry); 1128 1129 static void 1130 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp, 1131 struct mlxsw_sp_ipip_entry *ipip_entry) 1132 { 1133 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry; 1134 1135 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry); 1136 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 1137 1138 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1139 } 1140 1141 static void 1142 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp, 1143 struct mlxsw_sp_ipip_entry *ipip_entry, 1144 struct mlxsw_sp_fib_entry *decap_fib_entry) 1145 { 1146 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry, 1147 ipip_entry)) 1148 return; 1149 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; 1150 1151 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry)) 1152 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); 1153 } 1154 1155 static struct mlxsw_sp_fib_entry * 1156 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id, 1157 enum mlxsw_sp_l3proto proto, 1158 const union mlxsw_sp_l3addr *addr, 1159 enum mlxsw_sp_fib_entry_type type) 1160 { 1161 struct mlxsw_sp_fib_entry *fib_entry; 1162 struct mlxsw_sp_fib_node *fib_node; 1163 unsigned char addr_prefix_len; 1164 struct mlxsw_sp_fib *fib; 1165 struct mlxsw_sp_vr *vr; 1166 const void *addrp; 1167 size_t addr_len; 1168 u32 addr4; 1169 1170 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); 1171 if (!vr) 1172 return NULL; 1173 fib = mlxsw_sp_vr_fib(vr, proto); 1174 1175 switch (proto) { 1176 case MLXSW_SP_L3_PROTO_IPV4: 1177 addr4 = be32_to_cpu(addr->addr4); 1178 addrp = &addr4; 1179 addr_len = 4; 1180 addr_prefix_len = 32; 1181 break; 1182 case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ 1183 default: 1184 WARN_ON(1); 1185 return NULL; 1186 } 1187 1188 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len, 1189 addr_prefix_len); 1190 if (!fib_node || list_empty(&fib_node->entry_list)) 1191 return NULL; 1192 1193 fib_entry = list_first_entry(&fib_node->entry_list, 1194 struct mlxsw_sp_fib_entry, list); 1195 if (fib_entry->type != type) 1196 return NULL; 1197 1198 return fib_entry; 1199 } 1200 1201 /* Given an IPIP entry, find the corresponding decap route. */ 1202 static struct mlxsw_sp_fib_entry * 1203 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp, 1204 struct mlxsw_sp_ipip_entry *ipip_entry) 1205 { 1206 static struct mlxsw_sp_fib_node *fib_node; 1207 const struct mlxsw_sp_ipip_ops *ipip_ops; 1208 struct mlxsw_sp_fib_entry *fib_entry; 1209 unsigned char saddr_prefix_len; 1210 union mlxsw_sp_l3addr saddr; 1211 struct mlxsw_sp_fib *ul_fib; 1212 struct mlxsw_sp_vr *ul_vr; 1213 const void *saddrp; 1214 size_t saddr_len; 1215 u32 ul_tb_id; 1216 u32 saddr4; 1217 1218 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 1219 1220 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); 1221 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id); 1222 if (!ul_vr) 1223 return NULL; 1224 1225 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto); 1226 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto, 1227 ipip_entry->ol_dev); 1228 1229 switch (ipip_ops->ul_proto) { 1230 case MLXSW_SP_L3_PROTO_IPV4: 1231 saddr4 = be32_to_cpu(saddr.addr4); 1232 saddrp = &saddr4; 1233 saddr_len = 4; 1234 saddr_prefix_len = 32; 1235 break; 1236 case MLXSW_SP_L3_PROTO_IPV6: 1237 WARN_ON(1); 1238 return NULL; 1239 } 1240 1241 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len, 1242 saddr_prefix_len); 1243 if (!fib_node || list_empty(&fib_node->entry_list)) 1244 return NULL; 1245 1246 fib_entry = list_first_entry(&fib_node->entry_list, 1247 struct mlxsw_sp_fib_entry, list); 1248 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP) 1249 return NULL; 1250 1251 return fib_entry; 1252 } 1253 1254 static struct mlxsw_sp_ipip_entry * 1255 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp, 1256 enum mlxsw_sp_ipip_type ipipt, 1257 struct net_device *ol_dev) 1258 { 1259 struct mlxsw_sp_ipip_entry *ipip_entry; 1260 1261 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev); 1262 if (IS_ERR(ipip_entry)) 1263 return ipip_entry; 1264 1265 list_add_tail(&ipip_entry->ipip_list_node, 1266 &mlxsw_sp->router->ipip_list); 1267 1268 return ipip_entry; 1269 } 1270 1271 static void 1272 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp, 1273 struct mlxsw_sp_ipip_entry *ipip_entry) 1274 { 1275 list_del(&ipip_entry->ipip_list_node); 1276 mlxsw_sp_ipip_entry_dealloc(ipip_entry); 1277 } 1278 1279 static bool 1280 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp, 1281 const struct net_device *ul_dev, 1282 enum mlxsw_sp_l3proto ul_proto, 1283 union mlxsw_sp_l3addr ul_dip, 1284 struct mlxsw_sp_ipip_entry *ipip_entry) 1285 { 1286 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN; 1287 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; 1288 1289 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) 1290 return false; 1291 1292 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip, 1293 ul_tb_id, ipip_entry); 1294 } 1295 1296 /* Given decap parameters, find the corresponding IPIP entry. */ 1297 static struct mlxsw_sp_ipip_entry * 1298 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, 1299 const struct net_device *ul_dev, 1300 enum mlxsw_sp_l3proto ul_proto, 1301 union mlxsw_sp_l3addr ul_dip) 1302 { 1303 struct mlxsw_sp_ipip_entry *ipip_entry; 1304 1305 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, 1306 ipip_list_node) 1307 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev, 1308 ul_proto, ul_dip, 1309 ipip_entry)) 1310 return ipip_entry; 1311 1312 return NULL; 1313 } 1314 1315 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp, 1316 const struct net_device *dev, 1317 enum mlxsw_sp_ipip_type *p_type) 1318 { 1319 struct mlxsw_sp_router *router = mlxsw_sp->router; 1320 const struct mlxsw_sp_ipip_ops *ipip_ops; 1321 enum mlxsw_sp_ipip_type ipipt; 1322 1323 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) { 1324 ipip_ops = router->ipip_ops_arr[ipipt]; 1325 if (dev->type == ipip_ops->dev_type) { 1326 if (p_type) 1327 *p_type = ipipt; 1328 return true; 1329 } 1330 } 1331 return false; 1332 } 1333 1334 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, 1335 const struct net_device *dev) 1336 { 1337 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL); 1338 } 1339 1340 static struct mlxsw_sp_ipip_entry * 1341 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp, 1342 const struct net_device *ol_dev) 1343 { 1344 struct mlxsw_sp_ipip_entry *ipip_entry; 1345 1346 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, 1347 ipip_list_node) 1348 if (ipip_entry->ol_dev == ol_dev) 1349 return ipip_entry; 1350 1351 return NULL; 1352 } 1353 1354 static struct mlxsw_sp_ipip_entry * 1355 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp, 1356 const struct net_device *ul_dev, 1357 struct mlxsw_sp_ipip_entry *start) 1358 { 1359 struct mlxsw_sp_ipip_entry *ipip_entry; 1360 1361 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list, 1362 ipip_list_node); 1363 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list, 1364 ipip_list_node) { 1365 struct net_device *ipip_ul_dev = 1366 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); 1367 1368 if (ipip_ul_dev == ul_dev) 1369 return ipip_entry; 1370 } 1371 1372 return NULL; 1373 } 1374 1375 bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp, 1376 const struct net_device *dev) 1377 { 1378 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL); 1379 } 1380 1381 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp, 1382 const struct net_device *ol_dev, 1383 enum mlxsw_sp_ipip_type ipipt) 1384 { 1385 const struct mlxsw_sp_ipip_ops *ops 1386 = mlxsw_sp->router->ipip_ops_arr[ipipt]; 1387 1388 /* For deciding whether decap should be offloaded, we don't care about 1389 * overlay protocol, so ask whether either one is supported. 1390 */ 1391 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) || 1392 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6); 1393 } 1394 1395 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp, 1396 struct net_device *ol_dev) 1397 { 1398 struct mlxsw_sp_ipip_entry *ipip_entry; 1399 enum mlxsw_sp_l3proto ul_proto; 1400 enum mlxsw_sp_ipip_type ipipt; 1401 union mlxsw_sp_l3addr saddr; 1402 u32 ul_tb_id; 1403 1404 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt); 1405 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) { 1406 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); 1407 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto; 1408 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); 1409 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, 1410 saddr, ul_tb_id, 1411 NULL)) { 1412 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt, 1413 ol_dev); 1414 if (IS_ERR(ipip_entry)) 1415 return PTR_ERR(ipip_entry); 1416 } 1417 } 1418 1419 return 0; 1420 } 1421 1422 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp, 1423 struct net_device *ol_dev) 1424 { 1425 struct mlxsw_sp_ipip_entry *ipip_entry; 1426 1427 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1428 if (ipip_entry) 1429 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry); 1430 } 1431 1432 static void 1433 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp, 1434 struct mlxsw_sp_ipip_entry *ipip_entry) 1435 { 1436 struct mlxsw_sp_fib_entry *decap_fib_entry; 1437 1438 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry); 1439 if (decap_fib_entry) 1440 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry, 1441 decap_fib_entry); 1442 } 1443 1444 static int 1445 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id, 1446 u16 ul_rif_id, bool enable) 1447 { 1448 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; 1449 struct mlxsw_sp_rif *rif = &lb_rif->common; 1450 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 1451 char ritr_pl[MLXSW_REG_RITR_LEN]; 1452 u32 saddr4; 1453 1454 switch (lb_cf.ul_protocol) { 1455 case MLXSW_SP_L3_PROTO_IPV4: 1456 saddr4 = be32_to_cpu(lb_cf.saddr.addr4); 1457 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, 1458 rif->rif_index, rif->vr_id, rif->dev->mtu); 1459 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, 1460 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, 1461 ul_vr_id, ul_rif_id, saddr4, lb_cf.okey); 1462 break; 1463 1464 case MLXSW_SP_L3_PROTO_IPV6: 1465 return -EAFNOSUPPORT; 1466 } 1467 1468 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 1469 } 1470 1471 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp, 1472 struct net_device *ol_dev) 1473 { 1474 struct mlxsw_sp_ipip_entry *ipip_entry; 1475 struct mlxsw_sp_rif_ipip_lb *lb_rif; 1476 int err = 0; 1477 1478 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1479 if (ipip_entry) { 1480 lb_rif = ipip_entry->ol_lb; 1481 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id, 1482 lb_rif->ul_rif_id, true); 1483 if (err) 1484 goto out; 1485 lb_rif->common.mtu = ol_dev->mtu; 1486 } 1487 1488 out: 1489 return err; 1490 } 1491 1492 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, 1493 struct net_device *ol_dev) 1494 { 1495 struct mlxsw_sp_ipip_entry *ipip_entry; 1496 1497 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1498 if (ipip_entry) 1499 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry); 1500 } 1501 1502 static void 1503 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp, 1504 struct mlxsw_sp_ipip_entry *ipip_entry) 1505 { 1506 if (ipip_entry->decap_fib_entry) 1507 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); 1508 } 1509 1510 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp, 1511 struct net_device *ol_dev) 1512 { 1513 struct mlxsw_sp_ipip_entry *ipip_entry; 1514 1515 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1516 if (ipip_entry) 1517 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); 1518 } 1519 1520 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp, 1521 struct mlxsw_sp_rif *old_rif, 1522 struct mlxsw_sp_rif *new_rif); 1523 static int 1524 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp, 1525 struct mlxsw_sp_ipip_entry *ipip_entry, 1526 bool keep_encap, 1527 struct netlink_ext_ack *extack) 1528 { 1529 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb; 1530 struct mlxsw_sp_rif_ipip_lb *new_lb_rif; 1531 1532 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, 1533 ipip_entry->ipipt, 1534 ipip_entry->ol_dev, 1535 extack); 1536 if (IS_ERR(new_lb_rif)) 1537 return PTR_ERR(new_lb_rif); 1538 ipip_entry->ol_lb = new_lb_rif; 1539 1540 if (keep_encap) 1541 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common, 1542 &new_lb_rif->common); 1543 1544 mlxsw_sp_rif_destroy(&old_lb_rif->common); 1545 1546 return 0; 1547 } 1548 1549 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, 1550 struct mlxsw_sp_rif *rif); 1551 1552 /** 1553 * Update the offload related to an IPIP entry. This always updates decap, and 1554 * in addition to that it also: 1555 * @recreate_loopback: recreates the associated loopback RIF 1556 * @keep_encap: updates next hops that use the tunnel netdevice. This is only 1557 * relevant when recreate_loopback is true. 1558 * @update_nexthops: updates next hops, keeping the current loopback RIF. This 1559 * is only relevant when recreate_loopback is false. 1560 */ 1561 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp, 1562 struct mlxsw_sp_ipip_entry *ipip_entry, 1563 bool recreate_loopback, 1564 bool keep_encap, 1565 bool update_nexthops, 1566 struct netlink_ext_ack *extack) 1567 { 1568 int err; 1569 1570 /* RIFs can't be edited, so to update loopback, we need to destroy and 1571 * recreate it. That creates a window of opportunity where RALUE and 1572 * RATR registers end up referencing a RIF that's already gone. RATRs 1573 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care 1574 * of RALUE, demote the decap route back. 1575 */ 1576 if (ipip_entry->decap_fib_entry) 1577 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); 1578 1579 if (recreate_loopback) { 1580 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry, 1581 keep_encap, extack); 1582 if (err) 1583 return err; 1584 } else if (update_nexthops) { 1585 mlxsw_sp_nexthop_rif_update(mlxsw_sp, 1586 &ipip_entry->ol_lb->common); 1587 } 1588 1589 if (ipip_entry->ol_dev->flags & IFF_UP) 1590 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry); 1591 1592 return 0; 1593 } 1594 1595 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, 1596 struct net_device *ol_dev, 1597 struct netlink_ext_ack *extack) 1598 { 1599 struct mlxsw_sp_ipip_entry *ipip_entry = 1600 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1601 enum mlxsw_sp_l3proto ul_proto; 1602 union mlxsw_sp_l3addr saddr; 1603 u32 ul_tb_id; 1604 1605 if (!ipip_entry) 1606 return 0; 1607 1608 /* For flat configuration cases, moving overlay to a different VRF might 1609 * cause local address conflict, and the conflicting tunnels need to be 1610 * demoted. 1611 */ 1612 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); 1613 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; 1614 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); 1615 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, 1616 saddr, ul_tb_id, 1617 ipip_entry)) { 1618 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1619 return 0; 1620 } 1621 1622 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1623 true, false, false, extack); 1624 } 1625 1626 static int 1627 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp, 1628 struct mlxsw_sp_ipip_entry *ipip_entry, 1629 struct net_device *ul_dev, 1630 struct netlink_ext_ack *extack) 1631 { 1632 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1633 true, true, false, extack); 1634 } 1635 1636 static int 1637 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp, 1638 struct mlxsw_sp_ipip_entry *ipip_entry, 1639 struct net_device *ul_dev) 1640 { 1641 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1642 false, false, true, NULL); 1643 } 1644 1645 static int 1646 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp, 1647 struct mlxsw_sp_ipip_entry *ipip_entry, 1648 struct net_device *ul_dev) 1649 { 1650 /* A down underlay device causes encapsulated packets to not be 1651 * forwarded, but decap still works. So refresh next hops without 1652 * touching anything else. 1653 */ 1654 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1655 false, false, true, NULL); 1656 } 1657 1658 static int 1659 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp, 1660 struct net_device *ol_dev, 1661 struct netlink_ext_ack *extack) 1662 { 1663 const struct mlxsw_sp_ipip_ops *ipip_ops; 1664 struct mlxsw_sp_ipip_entry *ipip_entry; 1665 int err; 1666 1667 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1668 if (!ipip_entry) 1669 /* A change might make a tunnel eligible for offloading, but 1670 * that is currently not implemented. What falls to slow path 1671 * stays there. 1672 */ 1673 return 0; 1674 1675 /* A change might make a tunnel not eligible for offloading. */ 1676 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, 1677 ipip_entry->ipipt)) { 1678 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1679 return 0; 1680 } 1681 1682 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 1683 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack); 1684 return err; 1685 } 1686 1687 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp, 1688 struct mlxsw_sp_ipip_entry *ipip_entry) 1689 { 1690 struct net_device *ol_dev = ipip_entry->ol_dev; 1691 1692 if (ol_dev->flags & IFF_UP) 1693 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); 1694 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry); 1695 } 1696 1697 /* The configuration where several tunnels have the same local address in the 1698 * same underlay table needs special treatment in the HW. That is currently not 1699 * implemented in the driver. This function finds and demotes the first tunnel 1700 * with a given source address, except the one passed in in the argument 1701 * `except'. 1702 */ 1703 bool 1704 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp, 1705 enum mlxsw_sp_l3proto ul_proto, 1706 union mlxsw_sp_l3addr saddr, 1707 u32 ul_tb_id, 1708 const struct mlxsw_sp_ipip_entry *except) 1709 { 1710 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp; 1711 1712 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list, 1713 ipip_list_node) { 1714 if (ipip_entry != except && 1715 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr, 1716 ul_tb_id, ipip_entry)) { 1717 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1718 return true; 1719 } 1720 } 1721 1722 return false; 1723 } 1724 1725 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp, 1726 struct net_device *ul_dev) 1727 { 1728 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp; 1729 1730 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list, 1731 ipip_list_node) { 1732 struct net_device *ipip_ul_dev = 1733 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); 1734 1735 if (ipip_ul_dev == ul_dev) 1736 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1737 } 1738 } 1739 1740 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, 1741 struct net_device *ol_dev, 1742 unsigned long event, 1743 struct netdev_notifier_info *info) 1744 { 1745 struct netdev_notifier_changeupper_info *chup; 1746 struct netlink_ext_ack *extack; 1747 1748 switch (event) { 1749 case NETDEV_REGISTER: 1750 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev); 1751 case NETDEV_UNREGISTER: 1752 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev); 1753 return 0; 1754 case NETDEV_UP: 1755 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev); 1756 return 0; 1757 case NETDEV_DOWN: 1758 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev); 1759 return 0; 1760 case NETDEV_CHANGEUPPER: 1761 chup = container_of(info, typeof(*chup), info); 1762 extack = info->extack; 1763 if (netif_is_l3_master(chup->upper_dev)) 1764 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp, 1765 ol_dev, 1766 extack); 1767 return 0; 1768 case NETDEV_CHANGE: 1769 extack = info->extack; 1770 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp, 1771 ol_dev, extack); 1772 case NETDEV_CHANGEMTU: 1773 return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev); 1774 } 1775 return 0; 1776 } 1777 1778 static int 1779 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, 1780 struct mlxsw_sp_ipip_entry *ipip_entry, 1781 struct net_device *ul_dev, 1782 unsigned long event, 1783 struct netdev_notifier_info *info) 1784 { 1785 struct netdev_notifier_changeupper_info *chup; 1786 struct netlink_ext_ack *extack; 1787 1788 switch (event) { 1789 case NETDEV_CHANGEUPPER: 1790 chup = container_of(info, typeof(*chup), info); 1791 extack = info->extack; 1792 if (netif_is_l3_master(chup->upper_dev)) 1793 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp, 1794 ipip_entry, 1795 ul_dev, 1796 extack); 1797 break; 1798 1799 case NETDEV_UP: 1800 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry, 1801 ul_dev); 1802 case NETDEV_DOWN: 1803 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp, 1804 ipip_entry, 1805 ul_dev); 1806 } 1807 return 0; 1808 } 1809 1810 int 1811 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, 1812 struct net_device *ul_dev, 1813 unsigned long event, 1814 struct netdev_notifier_info *info) 1815 { 1816 struct mlxsw_sp_ipip_entry *ipip_entry = NULL; 1817 int err; 1818 1819 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, 1820 ul_dev, 1821 ipip_entry))) { 1822 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry, 1823 ul_dev, event, info); 1824 if (err) { 1825 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp, 1826 ul_dev); 1827 return err; 1828 } 1829 } 1830 1831 return 0; 1832 } 1833 1834 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, 1835 enum mlxsw_sp_l3proto ul_proto, 1836 const union mlxsw_sp_l3addr *ul_sip, 1837 u32 tunnel_index) 1838 { 1839 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 1840 struct mlxsw_sp_fib_entry *fib_entry; 1841 int err; 1842 1843 /* It is valid to create a tunnel with a local IP and only later 1844 * assign this IP address to a local interface 1845 */ 1846 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id, 1847 ul_proto, ul_sip, 1848 type); 1849 if (!fib_entry) 1850 return 0; 1851 1852 fib_entry->decap.tunnel_index = tunnel_index; 1853 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; 1854 1855 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1856 if (err) 1857 goto err_fib_entry_update; 1858 1859 return 0; 1860 1861 err_fib_entry_update: 1862 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 1863 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1864 return err; 1865 } 1866 1867 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, 1868 enum mlxsw_sp_l3proto ul_proto, 1869 const union mlxsw_sp_l3addr *ul_sip) 1870 { 1871 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; 1872 struct mlxsw_sp_fib_entry *fib_entry; 1873 1874 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id, 1875 ul_proto, ul_sip, 1876 type); 1877 if (!fib_entry) 1878 return; 1879 1880 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 1881 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1882 } 1883 1884 struct mlxsw_sp_neigh_key { 1885 struct neighbour *n; 1886 }; 1887 1888 struct mlxsw_sp_neigh_entry { 1889 struct list_head rif_list_node; 1890 struct rhash_head ht_node; 1891 struct mlxsw_sp_neigh_key key; 1892 u16 rif; 1893 bool connected; 1894 unsigned char ha[ETH_ALEN]; 1895 struct list_head nexthop_list; /* list of nexthops using 1896 * this neigh entry 1897 */ 1898 struct list_head nexthop_neighs_list_node; 1899 unsigned int counter_index; 1900 bool counter_valid; 1901 }; 1902 1903 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = { 1904 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key), 1905 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node), 1906 .key_len = sizeof(struct mlxsw_sp_neigh_key), 1907 }; 1908 1909 struct mlxsw_sp_neigh_entry * 1910 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif, 1911 struct mlxsw_sp_neigh_entry *neigh_entry) 1912 { 1913 if (!neigh_entry) { 1914 if (list_empty(&rif->neigh_list)) 1915 return NULL; 1916 else 1917 return list_first_entry(&rif->neigh_list, 1918 typeof(*neigh_entry), 1919 rif_list_node); 1920 } 1921 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list)) 1922 return NULL; 1923 return list_next_entry(neigh_entry, rif_list_node); 1924 } 1925 1926 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry) 1927 { 1928 return neigh_entry->key.n->tbl->family; 1929 } 1930 1931 unsigned char * 1932 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry) 1933 { 1934 return neigh_entry->ha; 1935 } 1936 1937 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry) 1938 { 1939 struct neighbour *n; 1940 1941 n = neigh_entry->key.n; 1942 return ntohl(*((__be32 *) n->primary_key)); 1943 } 1944 1945 struct in6_addr * 1946 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry) 1947 { 1948 struct neighbour *n; 1949 1950 n = neigh_entry->key.n; 1951 return (struct in6_addr *) &n->primary_key; 1952 } 1953 1954 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp, 1955 struct mlxsw_sp_neigh_entry *neigh_entry, 1956 u64 *p_counter) 1957 { 1958 if (!neigh_entry->counter_valid) 1959 return -EINVAL; 1960 1961 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index, 1962 p_counter, NULL); 1963 } 1964 1965 static struct mlxsw_sp_neigh_entry * 1966 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n, 1967 u16 rif) 1968 { 1969 struct mlxsw_sp_neigh_entry *neigh_entry; 1970 1971 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL); 1972 if (!neigh_entry) 1973 return NULL; 1974 1975 neigh_entry->key.n = n; 1976 neigh_entry->rif = rif; 1977 INIT_LIST_HEAD(&neigh_entry->nexthop_list); 1978 1979 return neigh_entry; 1980 } 1981 1982 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry) 1983 { 1984 kfree(neigh_entry); 1985 } 1986 1987 static int 1988 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp, 1989 struct mlxsw_sp_neigh_entry *neigh_entry) 1990 { 1991 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht, 1992 &neigh_entry->ht_node, 1993 mlxsw_sp_neigh_ht_params); 1994 } 1995 1996 static void 1997 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, 1998 struct mlxsw_sp_neigh_entry *neigh_entry) 1999 { 2000 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht, 2001 &neigh_entry->ht_node, 2002 mlxsw_sp_neigh_ht_params); 2003 } 2004 2005 static bool 2006 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp, 2007 struct mlxsw_sp_neigh_entry *neigh_entry) 2008 { 2009 struct devlink *devlink; 2010 const char *table_name; 2011 2012 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) { 2013 case AF_INET: 2014 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4; 2015 break; 2016 case AF_INET6: 2017 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6; 2018 break; 2019 default: 2020 WARN_ON(1); 2021 return false; 2022 } 2023 2024 devlink = priv_to_devlink(mlxsw_sp->core); 2025 return devlink_dpipe_table_counter_enabled(devlink, table_name); 2026 } 2027 2028 static void 2029 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp, 2030 struct mlxsw_sp_neigh_entry *neigh_entry) 2031 { 2032 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry)) 2033 return; 2034 2035 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index)) 2036 return; 2037 2038 neigh_entry->counter_valid = true; 2039 } 2040 2041 static void 2042 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp, 2043 struct mlxsw_sp_neigh_entry *neigh_entry) 2044 { 2045 if (!neigh_entry->counter_valid) 2046 return; 2047 mlxsw_sp_flow_counter_free(mlxsw_sp, 2048 neigh_entry->counter_index); 2049 neigh_entry->counter_valid = false; 2050 } 2051 2052 static struct mlxsw_sp_neigh_entry * 2053 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) 2054 { 2055 struct mlxsw_sp_neigh_entry *neigh_entry; 2056 struct mlxsw_sp_rif *rif; 2057 int err; 2058 2059 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); 2060 if (!rif) 2061 return ERR_PTR(-EINVAL); 2062 2063 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index); 2064 if (!neigh_entry) 2065 return ERR_PTR(-ENOMEM); 2066 2067 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); 2068 if (err) 2069 goto err_neigh_entry_insert; 2070 2071 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry); 2072 list_add(&neigh_entry->rif_list_node, &rif->neigh_list); 2073 2074 return neigh_entry; 2075 2076 err_neigh_entry_insert: 2077 mlxsw_sp_neigh_entry_free(neigh_entry); 2078 return ERR_PTR(err); 2079 } 2080 2081 static void 2082 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp, 2083 struct mlxsw_sp_neigh_entry *neigh_entry) 2084 { 2085 list_del(&neigh_entry->rif_list_node); 2086 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry); 2087 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); 2088 mlxsw_sp_neigh_entry_free(neigh_entry); 2089 } 2090 2091 static struct mlxsw_sp_neigh_entry * 2092 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) 2093 { 2094 struct mlxsw_sp_neigh_key key; 2095 2096 key.n = n; 2097 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht, 2098 &key, mlxsw_sp_neigh_ht_params); 2099 } 2100 2101 static void 2102 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp) 2103 { 2104 unsigned long interval; 2105 2106 #if IS_ENABLED(CONFIG_IPV6) 2107 interval = min_t(unsigned long, 2108 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME), 2109 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME)); 2110 #else 2111 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); 2112 #endif 2113 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval); 2114 } 2115 2116 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, 2117 char *rauhtd_pl, 2118 int ent_index) 2119 { 2120 struct net_device *dev; 2121 struct neighbour *n; 2122 __be32 dipn; 2123 u32 dip; 2124 u16 rif; 2125 2126 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip); 2127 2128 if (!mlxsw_sp->router->rifs[rif]) { 2129 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n"); 2130 return; 2131 } 2132 2133 dipn = htonl(dip); 2134 dev = mlxsw_sp->router->rifs[rif]->dev; 2135 n = neigh_lookup(&arp_tbl, &dipn, dev); 2136 if (!n) 2137 return; 2138 2139 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); 2140 neigh_event_send(n, NULL); 2141 neigh_release(n); 2142 } 2143 2144 #if IS_ENABLED(CONFIG_IPV6) 2145 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, 2146 char *rauhtd_pl, 2147 int rec_index) 2148 { 2149 struct net_device *dev; 2150 struct neighbour *n; 2151 struct in6_addr dip; 2152 u16 rif; 2153 2154 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif, 2155 (char *) &dip); 2156 2157 if (!mlxsw_sp->router->rifs[rif]) { 2158 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n"); 2159 return; 2160 } 2161 2162 dev = mlxsw_sp->router->rifs[rif]->dev; 2163 n = neigh_lookup(&nd_tbl, &dip, dev); 2164 if (!n) 2165 return; 2166 2167 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); 2168 neigh_event_send(n, NULL); 2169 neigh_release(n); 2170 } 2171 #else 2172 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, 2173 char *rauhtd_pl, 2174 int rec_index) 2175 { 2176 } 2177 #endif 2178 2179 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, 2180 char *rauhtd_pl, 2181 int rec_index) 2182 { 2183 u8 num_entries; 2184 int i; 2185 2186 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, 2187 rec_index); 2188 /* Hardware starts counting at 0, so add 1. */ 2189 num_entries++; 2190 2191 /* Each record consists of several neighbour entries. */ 2192 for (i = 0; i < num_entries; i++) { 2193 int ent_index; 2194 2195 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i; 2196 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl, 2197 ent_index); 2198 } 2199 2200 } 2201 2202 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp, 2203 char *rauhtd_pl, 2204 int rec_index) 2205 { 2206 /* One record contains one entry. */ 2207 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl, 2208 rec_index); 2209 } 2210 2211 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, 2212 char *rauhtd_pl, int rec_index) 2213 { 2214 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) { 2215 case MLXSW_REG_RAUHTD_TYPE_IPV4: 2216 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl, 2217 rec_index); 2218 break; 2219 case MLXSW_REG_RAUHTD_TYPE_IPV6: 2220 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl, 2221 rec_index); 2222 break; 2223 } 2224 } 2225 2226 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) 2227 { 2228 u8 num_rec, last_rec_index, num_entries; 2229 2230 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); 2231 last_rec_index = num_rec - 1; 2232 2233 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM) 2234 return false; 2235 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) == 2236 MLXSW_REG_RAUHTD_TYPE_IPV6) 2237 return true; 2238 2239 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, 2240 last_rec_index); 2241 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC) 2242 return true; 2243 return false; 2244 } 2245 2246 static int 2247 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp, 2248 char *rauhtd_pl, 2249 enum mlxsw_reg_rauhtd_type type) 2250 { 2251 int i, num_rec; 2252 int err; 2253 2254 /* Make sure the neighbour's netdev isn't removed in the 2255 * process. 2256 */ 2257 rtnl_lock(); 2258 do { 2259 mlxsw_reg_rauhtd_pack(rauhtd_pl, type); 2260 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd), 2261 rauhtd_pl); 2262 if (err) { 2263 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n"); 2264 break; 2265 } 2266 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); 2267 for (i = 0; i < num_rec; i++) 2268 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, 2269 i); 2270 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); 2271 rtnl_unlock(); 2272 2273 return err; 2274 } 2275 2276 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) 2277 { 2278 enum mlxsw_reg_rauhtd_type type; 2279 char *rauhtd_pl; 2280 int err; 2281 2282 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); 2283 if (!rauhtd_pl) 2284 return -ENOMEM; 2285 2286 type = MLXSW_REG_RAUHTD_TYPE_IPV4; 2287 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); 2288 if (err) 2289 goto out; 2290 2291 type = MLXSW_REG_RAUHTD_TYPE_IPV6; 2292 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); 2293 out: 2294 kfree(rauhtd_pl); 2295 return err; 2296 } 2297 2298 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) 2299 { 2300 struct mlxsw_sp_neigh_entry *neigh_entry; 2301 2302 /* Take RTNL mutex here to prevent lists from changes */ 2303 rtnl_lock(); 2304 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list, 2305 nexthop_neighs_list_node) 2306 /* If this neigh have nexthops, make the kernel think this neigh 2307 * is active regardless of the traffic. 2308 */ 2309 neigh_event_send(neigh_entry->key.n, NULL); 2310 rtnl_unlock(); 2311 } 2312 2313 static void 2314 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp) 2315 { 2316 unsigned long interval = mlxsw_sp->router->neighs_update.interval; 2317 2318 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 2319 msecs_to_jiffies(interval)); 2320 } 2321 2322 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work) 2323 { 2324 struct mlxsw_sp_router *router; 2325 int err; 2326 2327 router = container_of(work, struct mlxsw_sp_router, 2328 neighs_update.dw.work); 2329 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp); 2330 if (err) 2331 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity"); 2332 2333 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp); 2334 2335 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp); 2336 } 2337 2338 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) 2339 { 2340 struct mlxsw_sp_neigh_entry *neigh_entry; 2341 struct mlxsw_sp_router *router; 2342 2343 router = container_of(work, struct mlxsw_sp_router, 2344 nexthop_probe_dw.work); 2345 /* Iterate over nexthop neighbours, find those who are unresolved and 2346 * send arp on them. This solves the chicken-egg problem when 2347 * the nexthop wouldn't get offloaded until the neighbor is resolved 2348 * but it wouldn't get resolved ever in case traffic is flowing in HW 2349 * using different nexthop. 2350 * 2351 * Take RTNL mutex here to prevent lists from changes. 2352 */ 2353 rtnl_lock(); 2354 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list, 2355 nexthop_neighs_list_node) 2356 if (!neigh_entry->connected) 2357 neigh_event_send(neigh_entry->key.n, NULL); 2358 rtnl_unlock(); 2359 2360 mlxsw_core_schedule_dw(&router->nexthop_probe_dw, 2361 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL); 2362 } 2363 2364 static void 2365 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, 2366 struct mlxsw_sp_neigh_entry *neigh_entry, 2367 bool removing, bool dead); 2368 2369 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding) 2370 { 2371 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD : 2372 MLXSW_REG_RAUHT_OP_WRITE_DELETE; 2373 } 2374 2375 static int 2376 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp, 2377 struct mlxsw_sp_neigh_entry *neigh_entry, 2378 enum mlxsw_reg_rauht_op op) 2379 { 2380 struct neighbour *n = neigh_entry->key.n; 2381 u32 dip = ntohl(*((__be32 *) n->primary_key)); 2382 char rauht_pl[MLXSW_REG_RAUHT_LEN]; 2383 2384 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, 2385 dip); 2386 if (neigh_entry->counter_valid) 2387 mlxsw_reg_rauht_pack_counter(rauht_pl, 2388 neigh_entry->counter_index); 2389 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); 2390 } 2391 2392 static int 2393 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp, 2394 struct mlxsw_sp_neigh_entry *neigh_entry, 2395 enum mlxsw_reg_rauht_op op) 2396 { 2397 struct neighbour *n = neigh_entry->key.n; 2398 char rauht_pl[MLXSW_REG_RAUHT_LEN]; 2399 const char *dip = n->primary_key; 2400 2401 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, 2402 dip); 2403 if (neigh_entry->counter_valid) 2404 mlxsw_reg_rauht_pack_counter(rauht_pl, 2405 neigh_entry->counter_index); 2406 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); 2407 } 2408 2409 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry) 2410 { 2411 struct neighbour *n = neigh_entry->key.n; 2412 2413 /* Packets with a link-local destination address are trapped 2414 * after LPM lookup and never reach the neighbour table, so 2415 * there is no need to program such neighbours to the device. 2416 */ 2417 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) & 2418 IPV6_ADDR_LINKLOCAL) 2419 return true; 2420 return false; 2421 } 2422 2423 static void 2424 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, 2425 struct mlxsw_sp_neigh_entry *neigh_entry, 2426 bool adding) 2427 { 2428 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding); 2429 int err; 2430 2431 if (!adding && !neigh_entry->connected) 2432 return; 2433 neigh_entry->connected = adding; 2434 if (neigh_entry->key.n->tbl->family == AF_INET) { 2435 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, 2436 op); 2437 if (err) 2438 return; 2439 } else if (neigh_entry->key.n->tbl->family == AF_INET6) { 2440 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry)) 2441 return; 2442 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry, 2443 op); 2444 if (err) 2445 return; 2446 } else { 2447 WARN_ON_ONCE(1); 2448 return; 2449 } 2450 2451 if (adding) 2452 neigh_entry->key.n->flags |= NTF_OFFLOADED; 2453 else 2454 neigh_entry->key.n->flags &= ~NTF_OFFLOADED; 2455 } 2456 2457 void 2458 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp, 2459 struct mlxsw_sp_neigh_entry *neigh_entry, 2460 bool adding) 2461 { 2462 if (adding) 2463 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry); 2464 else 2465 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry); 2466 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true); 2467 } 2468 2469 struct mlxsw_sp_netevent_work { 2470 struct work_struct work; 2471 struct mlxsw_sp *mlxsw_sp; 2472 struct neighbour *n; 2473 }; 2474 2475 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) 2476 { 2477 struct mlxsw_sp_netevent_work *net_work = 2478 container_of(work, struct mlxsw_sp_netevent_work, work); 2479 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; 2480 struct mlxsw_sp_neigh_entry *neigh_entry; 2481 struct neighbour *n = net_work->n; 2482 unsigned char ha[ETH_ALEN]; 2483 bool entry_connected; 2484 u8 nud_state, dead; 2485 2486 /* If these parameters are changed after we release the lock, 2487 * then we are guaranteed to receive another event letting us 2488 * know about it. 2489 */ 2490 read_lock_bh(&n->lock); 2491 memcpy(ha, n->ha, ETH_ALEN); 2492 nud_state = n->nud_state; 2493 dead = n->dead; 2494 read_unlock_bh(&n->lock); 2495 2496 rtnl_lock(); 2497 mlxsw_sp_span_respin(mlxsw_sp); 2498 2499 entry_connected = nud_state & NUD_VALID && !dead; 2500 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); 2501 if (!entry_connected && !neigh_entry) 2502 goto out; 2503 if (!neigh_entry) { 2504 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); 2505 if (IS_ERR(neigh_entry)) 2506 goto out; 2507 } 2508 2509 memcpy(neigh_entry->ha, ha, ETH_ALEN); 2510 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected); 2511 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected, 2512 dead); 2513 2514 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) 2515 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); 2516 2517 out: 2518 rtnl_unlock(); 2519 neigh_release(n); 2520 kfree(net_work); 2521 } 2522 2523 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp); 2524 2525 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work) 2526 { 2527 struct mlxsw_sp_netevent_work *net_work = 2528 container_of(work, struct mlxsw_sp_netevent_work, work); 2529 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; 2530 2531 mlxsw_sp_mp_hash_init(mlxsw_sp); 2532 kfree(net_work); 2533 } 2534 2535 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); 2536 2537 static void mlxsw_sp_router_update_priority_work(struct work_struct *work) 2538 { 2539 struct mlxsw_sp_netevent_work *net_work = 2540 container_of(work, struct mlxsw_sp_netevent_work, work); 2541 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; 2542 2543 __mlxsw_sp_router_init(mlxsw_sp); 2544 kfree(net_work); 2545 } 2546 2547 static int mlxsw_sp_router_schedule_work(struct net *net, 2548 struct notifier_block *nb, 2549 void (*cb)(struct work_struct *)) 2550 { 2551 struct mlxsw_sp_netevent_work *net_work; 2552 struct mlxsw_sp_router *router; 2553 2554 if (!net_eq(net, &init_net)) 2555 return NOTIFY_DONE; 2556 2557 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); 2558 if (!net_work) 2559 return NOTIFY_BAD; 2560 2561 router = container_of(nb, struct mlxsw_sp_router, netevent_nb); 2562 INIT_WORK(&net_work->work, cb); 2563 net_work->mlxsw_sp = router->mlxsw_sp; 2564 mlxsw_core_schedule_work(&net_work->work); 2565 return NOTIFY_DONE; 2566 } 2567 2568 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, 2569 unsigned long event, void *ptr) 2570 { 2571 struct mlxsw_sp_netevent_work *net_work; 2572 struct mlxsw_sp_port *mlxsw_sp_port; 2573 struct mlxsw_sp *mlxsw_sp; 2574 unsigned long interval; 2575 struct neigh_parms *p; 2576 struct neighbour *n; 2577 2578 switch (event) { 2579 case NETEVENT_DELAY_PROBE_TIME_UPDATE: 2580 p = ptr; 2581 2582 /* We don't care about changes in the default table. */ 2583 if (!p->dev || (p->tbl->family != AF_INET && 2584 p->tbl->family != AF_INET6)) 2585 return NOTIFY_DONE; 2586 2587 /* We are in atomic context and can't take RTNL mutex, 2588 * so use RCU variant to walk the device chain. 2589 */ 2590 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev); 2591 if (!mlxsw_sp_port) 2592 return NOTIFY_DONE; 2593 2594 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2595 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME)); 2596 mlxsw_sp->router->neighs_update.interval = interval; 2597 2598 mlxsw_sp_port_dev_put(mlxsw_sp_port); 2599 break; 2600 case NETEVENT_NEIGH_UPDATE: 2601 n = ptr; 2602 2603 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6) 2604 return NOTIFY_DONE; 2605 2606 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev); 2607 if (!mlxsw_sp_port) 2608 return NOTIFY_DONE; 2609 2610 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); 2611 if (!net_work) { 2612 mlxsw_sp_port_dev_put(mlxsw_sp_port); 2613 return NOTIFY_BAD; 2614 } 2615 2616 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work); 2617 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 2618 net_work->n = n; 2619 2620 /* Take a reference to ensure the neighbour won't be 2621 * destructed until we drop the reference in delayed 2622 * work. 2623 */ 2624 neigh_clone(n); 2625 mlxsw_core_schedule_work(&net_work->work); 2626 mlxsw_sp_port_dev_put(mlxsw_sp_port); 2627 break; 2628 case NETEVENT_IPV4_MPATH_HASH_UPDATE: 2629 case NETEVENT_IPV6_MPATH_HASH_UPDATE: 2630 return mlxsw_sp_router_schedule_work(ptr, nb, 2631 mlxsw_sp_router_mp_hash_event_work); 2632 2633 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE: 2634 return mlxsw_sp_router_schedule_work(ptr, nb, 2635 mlxsw_sp_router_update_priority_work); 2636 } 2637 2638 return NOTIFY_DONE; 2639 } 2640 2641 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) 2642 { 2643 int err; 2644 2645 err = rhashtable_init(&mlxsw_sp->router->neigh_ht, 2646 &mlxsw_sp_neigh_ht_params); 2647 if (err) 2648 return err; 2649 2650 /* Initialize the polling interval according to the default 2651 * table. 2652 */ 2653 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp); 2654 2655 /* Create the delayed works for the activity_update */ 2656 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw, 2657 mlxsw_sp_router_neighs_update_work); 2658 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw, 2659 mlxsw_sp_router_probe_unresolved_nexthops); 2660 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0); 2661 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0); 2662 return 0; 2663 } 2664 2665 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) 2666 { 2667 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw); 2668 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw); 2669 rhashtable_destroy(&mlxsw_sp->router->neigh_ht); 2670 } 2671 2672 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 2673 struct mlxsw_sp_rif *rif) 2674 { 2675 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; 2676 2677 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, 2678 rif_list_node) { 2679 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false); 2680 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); 2681 } 2682 } 2683 2684 enum mlxsw_sp_nexthop_type { 2685 MLXSW_SP_NEXTHOP_TYPE_ETH, 2686 MLXSW_SP_NEXTHOP_TYPE_IPIP, 2687 }; 2688 2689 struct mlxsw_sp_nexthop_key { 2690 struct fib_nh *fib_nh; 2691 }; 2692 2693 struct mlxsw_sp_nexthop { 2694 struct list_head neigh_list_node; /* member of neigh entry list */ 2695 struct list_head rif_list_node; 2696 struct list_head router_list_node; 2697 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group 2698 * this belongs to 2699 */ 2700 struct rhash_head ht_node; 2701 struct mlxsw_sp_nexthop_key key; 2702 unsigned char gw_addr[sizeof(struct in6_addr)]; 2703 int ifindex; 2704 int nh_weight; 2705 int norm_nh_weight; 2706 int num_adj_entries; 2707 struct mlxsw_sp_rif *rif; 2708 u8 should_offload:1, /* set indicates this neigh is connected and 2709 * should be put to KVD linear area of this group. 2710 */ 2711 offloaded:1, /* set in case the neigh is actually put into 2712 * KVD linear area of this group. 2713 */ 2714 update:1; /* set indicates that MAC of this neigh should be 2715 * updated in HW 2716 */ 2717 enum mlxsw_sp_nexthop_type type; 2718 union { 2719 struct mlxsw_sp_neigh_entry *neigh_entry; 2720 struct mlxsw_sp_ipip_entry *ipip_entry; 2721 }; 2722 unsigned int counter_index; 2723 bool counter_valid; 2724 }; 2725 2726 struct mlxsw_sp_nexthop_group { 2727 void *priv; 2728 struct rhash_head ht_node; 2729 struct list_head fib_list; /* list of fib entries that use this group */ 2730 struct neigh_table *neigh_tbl; 2731 u8 adj_index_valid:1, 2732 gateway:1; /* routes using the group use a gateway */ 2733 u32 adj_index; 2734 u16 ecmp_size; 2735 u16 count; 2736 int sum_norm_weight; 2737 struct mlxsw_sp_nexthop nexthops[0]; 2738 #define nh_rif nexthops[0].rif 2739 }; 2740 2741 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, 2742 struct mlxsw_sp_nexthop *nh) 2743 { 2744 struct devlink *devlink; 2745 2746 devlink = priv_to_devlink(mlxsw_sp->core); 2747 if (!devlink_dpipe_table_counter_enabled(devlink, 2748 MLXSW_SP_DPIPE_TABLE_NAME_ADJ)) 2749 return; 2750 2751 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index)) 2752 return; 2753 2754 nh->counter_valid = true; 2755 } 2756 2757 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp, 2758 struct mlxsw_sp_nexthop *nh) 2759 { 2760 if (!nh->counter_valid) 2761 return; 2762 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index); 2763 nh->counter_valid = false; 2764 } 2765 2766 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp, 2767 struct mlxsw_sp_nexthop *nh, u64 *p_counter) 2768 { 2769 if (!nh->counter_valid) 2770 return -EINVAL; 2771 2772 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index, 2773 p_counter, NULL); 2774 } 2775 2776 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, 2777 struct mlxsw_sp_nexthop *nh) 2778 { 2779 if (!nh) { 2780 if (list_empty(&router->nexthop_list)) 2781 return NULL; 2782 else 2783 return list_first_entry(&router->nexthop_list, 2784 typeof(*nh), router_list_node); 2785 } 2786 if (list_is_last(&nh->router_list_node, &router->nexthop_list)) 2787 return NULL; 2788 return list_next_entry(nh, router_list_node); 2789 } 2790 2791 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh) 2792 { 2793 return nh->offloaded; 2794 } 2795 2796 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh) 2797 { 2798 if (!nh->offloaded) 2799 return NULL; 2800 return nh->neigh_entry->ha; 2801 } 2802 2803 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, 2804 u32 *p_adj_size, u32 *p_adj_hash_index) 2805 { 2806 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; 2807 u32 adj_hash_index = 0; 2808 int i; 2809 2810 if (!nh->offloaded || !nh_grp->adj_index_valid) 2811 return -EINVAL; 2812 2813 *p_adj_index = nh_grp->adj_index; 2814 *p_adj_size = nh_grp->ecmp_size; 2815 2816 for (i = 0; i < nh_grp->count; i++) { 2817 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i]; 2818 2819 if (nh_iter == nh) 2820 break; 2821 if (nh_iter->offloaded) 2822 adj_hash_index += nh_iter->num_adj_entries; 2823 } 2824 2825 *p_adj_hash_index = adj_hash_index; 2826 return 0; 2827 } 2828 2829 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh) 2830 { 2831 return nh->rif; 2832 } 2833 2834 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh) 2835 { 2836 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; 2837 int i; 2838 2839 for (i = 0; i < nh_grp->count; i++) { 2840 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i]; 2841 2842 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP) 2843 return true; 2844 } 2845 return false; 2846 } 2847 2848 static struct fib_info * 2849 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp) 2850 { 2851 return nh_grp->priv; 2852 } 2853 2854 struct mlxsw_sp_nexthop_group_cmp_arg { 2855 enum mlxsw_sp_l3proto proto; 2856 union { 2857 struct fib_info *fi; 2858 struct mlxsw_sp_fib6_entry *fib6_entry; 2859 }; 2860 }; 2861 2862 static bool 2863 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp, 2864 const struct in6_addr *gw, int ifindex, 2865 int weight) 2866 { 2867 int i; 2868 2869 for (i = 0; i < nh_grp->count; i++) { 2870 const struct mlxsw_sp_nexthop *nh; 2871 2872 nh = &nh_grp->nexthops[i]; 2873 if (nh->ifindex == ifindex && nh->nh_weight == weight && 2874 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr)) 2875 return true; 2876 } 2877 2878 return false; 2879 } 2880 2881 static bool 2882 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp, 2883 const struct mlxsw_sp_fib6_entry *fib6_entry) 2884 { 2885 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 2886 2887 if (nh_grp->count != fib6_entry->nrt6) 2888 return false; 2889 2890 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 2891 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh; 2892 struct in6_addr *gw; 2893 int ifindex, weight; 2894 2895 ifindex = fib6_nh->fib_nh_dev->ifindex; 2896 weight = fib6_nh->fib_nh_weight; 2897 gw = &fib6_nh->fib_nh_gw6; 2898 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex, 2899 weight)) 2900 return false; 2901 } 2902 2903 return true; 2904 } 2905 2906 static int 2907 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr) 2908 { 2909 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key; 2910 const struct mlxsw_sp_nexthop_group *nh_grp = ptr; 2911 2912 switch (cmp_arg->proto) { 2913 case MLXSW_SP_L3_PROTO_IPV4: 2914 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp); 2915 case MLXSW_SP_L3_PROTO_IPV6: 2916 return !mlxsw_sp_nexthop6_group_cmp(nh_grp, 2917 cmp_arg->fib6_entry); 2918 default: 2919 WARN_ON(1); 2920 return 1; 2921 } 2922 } 2923 2924 static int 2925 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp) 2926 { 2927 return nh_grp->neigh_tbl->family; 2928 } 2929 2930 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed) 2931 { 2932 const struct mlxsw_sp_nexthop_group *nh_grp = data; 2933 const struct mlxsw_sp_nexthop *nh; 2934 struct fib_info *fi; 2935 unsigned int val; 2936 int i; 2937 2938 switch (mlxsw_sp_nexthop_group_type(nh_grp)) { 2939 case AF_INET: 2940 fi = mlxsw_sp_nexthop4_group_fi(nh_grp); 2941 return jhash(&fi, sizeof(fi), seed); 2942 case AF_INET6: 2943 val = nh_grp->count; 2944 for (i = 0; i < nh_grp->count; i++) { 2945 nh = &nh_grp->nexthops[i]; 2946 val ^= nh->ifindex; 2947 } 2948 return jhash(&val, sizeof(val), seed); 2949 default: 2950 WARN_ON(1); 2951 return 0; 2952 } 2953 } 2954 2955 static u32 2956 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed) 2957 { 2958 unsigned int val = fib6_entry->nrt6; 2959 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 2960 struct net_device *dev; 2961 2962 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 2963 dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev; 2964 val ^= dev->ifindex; 2965 } 2966 2967 return jhash(&val, sizeof(val), seed); 2968 } 2969 2970 static u32 2971 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed) 2972 { 2973 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data; 2974 2975 switch (cmp_arg->proto) { 2976 case MLXSW_SP_L3_PROTO_IPV4: 2977 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed); 2978 case MLXSW_SP_L3_PROTO_IPV6: 2979 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed); 2980 default: 2981 WARN_ON(1); 2982 return 0; 2983 } 2984 } 2985 2986 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { 2987 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node), 2988 .hashfn = mlxsw_sp_nexthop_group_hash, 2989 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj, 2990 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp, 2991 }; 2992 2993 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, 2994 struct mlxsw_sp_nexthop_group *nh_grp) 2995 { 2996 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && 2997 !nh_grp->gateway) 2998 return 0; 2999 3000 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht, 3001 &nh_grp->ht_node, 3002 mlxsw_sp_nexthop_group_ht_params); 3003 } 3004 3005 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp, 3006 struct mlxsw_sp_nexthop_group *nh_grp) 3007 { 3008 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && 3009 !nh_grp->gateway) 3010 return; 3011 3012 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht, 3013 &nh_grp->ht_node, 3014 mlxsw_sp_nexthop_group_ht_params); 3015 } 3016 3017 static struct mlxsw_sp_nexthop_group * 3018 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp, 3019 struct fib_info *fi) 3020 { 3021 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; 3022 3023 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4; 3024 cmp_arg.fi = fi; 3025 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, 3026 &cmp_arg, 3027 mlxsw_sp_nexthop_group_ht_params); 3028 } 3029 3030 static struct mlxsw_sp_nexthop_group * 3031 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp, 3032 struct mlxsw_sp_fib6_entry *fib6_entry) 3033 { 3034 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; 3035 3036 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6; 3037 cmp_arg.fib6_entry = fib6_entry; 3038 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, 3039 &cmp_arg, 3040 mlxsw_sp_nexthop_group_ht_params); 3041 } 3042 3043 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = { 3044 .key_offset = offsetof(struct mlxsw_sp_nexthop, key), 3045 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node), 3046 .key_len = sizeof(struct mlxsw_sp_nexthop_key), 3047 }; 3048 3049 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp, 3050 struct mlxsw_sp_nexthop *nh) 3051 { 3052 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht, 3053 &nh->ht_node, mlxsw_sp_nexthop_ht_params); 3054 } 3055 3056 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp, 3057 struct mlxsw_sp_nexthop *nh) 3058 { 3059 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node, 3060 mlxsw_sp_nexthop_ht_params); 3061 } 3062 3063 static struct mlxsw_sp_nexthop * 3064 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp, 3065 struct mlxsw_sp_nexthop_key key) 3066 { 3067 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key, 3068 mlxsw_sp_nexthop_ht_params); 3069 } 3070 3071 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, 3072 const struct mlxsw_sp_fib *fib, 3073 u32 adj_index, u16 ecmp_size, 3074 u32 new_adj_index, 3075 u16 new_ecmp_size) 3076 { 3077 char raleu_pl[MLXSW_REG_RALEU_LEN]; 3078 3079 mlxsw_reg_raleu_pack(raleu_pl, 3080 (enum mlxsw_reg_ralxx_protocol) fib->proto, 3081 fib->vr->id, adj_index, ecmp_size, new_adj_index, 3082 new_ecmp_size); 3083 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl); 3084 } 3085 3086 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, 3087 struct mlxsw_sp_nexthop_group *nh_grp, 3088 u32 old_adj_index, u16 old_ecmp_size) 3089 { 3090 struct mlxsw_sp_fib_entry *fib_entry; 3091 struct mlxsw_sp_fib *fib = NULL; 3092 int err; 3093 3094 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { 3095 if (fib == fib_entry->fib_node->fib) 3096 continue; 3097 fib = fib_entry->fib_node->fib; 3098 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib, 3099 old_adj_index, 3100 old_ecmp_size, 3101 nh_grp->adj_index, 3102 nh_grp->ecmp_size); 3103 if (err) 3104 return err; 3105 } 3106 return 0; 3107 } 3108 3109 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, 3110 struct mlxsw_sp_nexthop *nh) 3111 { 3112 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; 3113 char ratr_pl[MLXSW_REG_RATR_LEN]; 3114 3115 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, 3116 true, MLXSW_REG_RATR_TYPE_ETHERNET, 3117 adj_index, neigh_entry->rif); 3118 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); 3119 if (nh->counter_valid) 3120 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true); 3121 else 3122 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false); 3123 3124 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); 3125 } 3126 3127 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, 3128 struct mlxsw_sp_nexthop *nh) 3129 { 3130 int i; 3131 3132 for (i = 0; i < nh->num_adj_entries; i++) { 3133 int err; 3134 3135 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh); 3136 if (err) 3137 return err; 3138 } 3139 3140 return 0; 3141 } 3142 3143 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, 3144 u32 adj_index, 3145 struct mlxsw_sp_nexthop *nh) 3146 { 3147 const struct mlxsw_sp_ipip_ops *ipip_ops; 3148 3149 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt]; 3150 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry); 3151 } 3152 3153 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, 3154 u32 adj_index, 3155 struct mlxsw_sp_nexthop *nh) 3156 { 3157 int i; 3158 3159 for (i = 0; i < nh->num_adj_entries; i++) { 3160 int err; 3161 3162 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i, 3163 nh); 3164 if (err) 3165 return err; 3166 } 3167 3168 return 0; 3169 } 3170 3171 static int 3172 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, 3173 struct mlxsw_sp_nexthop_group *nh_grp, 3174 bool reallocate) 3175 { 3176 u32 adj_index = nh_grp->adj_index; /* base */ 3177 struct mlxsw_sp_nexthop *nh; 3178 int i; 3179 int err; 3180 3181 for (i = 0; i < nh_grp->count; i++) { 3182 nh = &nh_grp->nexthops[i]; 3183 3184 if (!nh->should_offload) { 3185 nh->offloaded = 0; 3186 continue; 3187 } 3188 3189 if (nh->update || reallocate) { 3190 switch (nh->type) { 3191 case MLXSW_SP_NEXTHOP_TYPE_ETH: 3192 err = mlxsw_sp_nexthop_update 3193 (mlxsw_sp, adj_index, nh); 3194 break; 3195 case MLXSW_SP_NEXTHOP_TYPE_IPIP: 3196 err = mlxsw_sp_nexthop_ipip_update 3197 (mlxsw_sp, adj_index, nh); 3198 break; 3199 } 3200 if (err) 3201 return err; 3202 nh->update = 0; 3203 nh->offloaded = 1; 3204 } 3205 adj_index += nh->num_adj_entries; 3206 } 3207 return 0; 3208 } 3209 3210 static bool 3211 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, 3212 const struct mlxsw_sp_fib_entry *fib_entry); 3213 3214 static int 3215 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, 3216 struct mlxsw_sp_nexthop_group *nh_grp) 3217 { 3218 struct mlxsw_sp_fib_entry *fib_entry; 3219 int err; 3220 3221 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { 3222 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, 3223 fib_entry)) 3224 continue; 3225 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 3226 if (err) 3227 return err; 3228 } 3229 return 0; 3230 } 3231 3232 static void 3233 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, 3234 enum mlxsw_reg_ralue_op op, int err); 3235 3236 static void 3237 mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp) 3238 { 3239 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE; 3240 struct mlxsw_sp_fib_entry *fib_entry; 3241 3242 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { 3243 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, 3244 fib_entry)) 3245 continue; 3246 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0); 3247 } 3248 } 3249 3250 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size) 3251 { 3252 /* Valid sizes for an adjacency group are: 3253 * 1-64, 512, 1024, 2048 and 4096. 3254 */ 3255 if (*p_adj_grp_size <= 64) 3256 return; 3257 else if (*p_adj_grp_size <= 512) 3258 *p_adj_grp_size = 512; 3259 else if (*p_adj_grp_size <= 1024) 3260 *p_adj_grp_size = 1024; 3261 else if (*p_adj_grp_size <= 2048) 3262 *p_adj_grp_size = 2048; 3263 else 3264 *p_adj_grp_size = 4096; 3265 } 3266 3267 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size, 3268 unsigned int alloc_size) 3269 { 3270 if (alloc_size >= 4096) 3271 *p_adj_grp_size = 4096; 3272 else if (alloc_size >= 2048) 3273 *p_adj_grp_size = 2048; 3274 else if (alloc_size >= 1024) 3275 *p_adj_grp_size = 1024; 3276 else if (alloc_size >= 512) 3277 *p_adj_grp_size = 512; 3278 } 3279 3280 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, 3281 u16 *p_adj_grp_size) 3282 { 3283 unsigned int alloc_size; 3284 int err; 3285 3286 /* Round up the requested group size to the next size supported 3287 * by the device and make sure the request can be satisfied. 3288 */ 3289 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size); 3290 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, 3291 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 3292 *p_adj_grp_size, &alloc_size); 3293 if (err) 3294 return err; 3295 /* It is possible the allocation results in more allocated 3296 * entries than requested. Try to use as much of them as 3297 * possible. 3298 */ 3299 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size); 3300 3301 return 0; 3302 } 3303 3304 static void 3305 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp) 3306 { 3307 int i, g = 0, sum_norm_weight = 0; 3308 struct mlxsw_sp_nexthop *nh; 3309 3310 for (i = 0; i < nh_grp->count; i++) { 3311 nh = &nh_grp->nexthops[i]; 3312 3313 if (!nh->should_offload) 3314 continue; 3315 if (g > 0) 3316 g = gcd(nh->nh_weight, g); 3317 else 3318 g = nh->nh_weight; 3319 } 3320 3321 for (i = 0; i < nh_grp->count; i++) { 3322 nh = &nh_grp->nexthops[i]; 3323 3324 if (!nh->should_offload) 3325 continue; 3326 nh->norm_nh_weight = nh->nh_weight / g; 3327 sum_norm_weight += nh->norm_nh_weight; 3328 } 3329 3330 nh_grp->sum_norm_weight = sum_norm_weight; 3331 } 3332 3333 static void 3334 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp) 3335 { 3336 int total = nh_grp->sum_norm_weight; 3337 u16 ecmp_size = nh_grp->ecmp_size; 3338 int i, weight = 0, lower_bound = 0; 3339 3340 for (i = 0; i < nh_grp->count; i++) { 3341 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 3342 int upper_bound; 3343 3344 if (!nh->should_offload) 3345 continue; 3346 weight += nh->norm_nh_weight; 3347 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total); 3348 nh->num_adj_entries = upper_bound - lower_bound; 3349 lower_bound = upper_bound; 3350 } 3351 } 3352 3353 static void 3354 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, 3355 struct mlxsw_sp_nexthop_group *nh_grp) 3356 { 3357 u16 ecmp_size, old_ecmp_size; 3358 struct mlxsw_sp_nexthop *nh; 3359 bool offload_change = false; 3360 u32 adj_index; 3361 bool old_adj_index_valid; 3362 u32 old_adj_index; 3363 int i; 3364 int err; 3365 3366 if (!nh_grp->gateway) { 3367 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); 3368 return; 3369 } 3370 3371 for (i = 0; i < nh_grp->count; i++) { 3372 nh = &nh_grp->nexthops[i]; 3373 3374 if (nh->should_offload != nh->offloaded) { 3375 offload_change = true; 3376 if (nh->should_offload) 3377 nh->update = 1; 3378 } 3379 } 3380 if (!offload_change) { 3381 /* Nothing was added or removed, so no need to reallocate. Just 3382 * update MAC on existing adjacency indexes. 3383 */ 3384 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false); 3385 if (err) { 3386 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 3387 goto set_trap; 3388 } 3389 return; 3390 } 3391 mlxsw_sp_nexthop_group_normalize(nh_grp); 3392 if (!nh_grp->sum_norm_weight) 3393 /* No neigh of this group is connected so we just set 3394 * the trap and let everthing flow through kernel. 3395 */ 3396 goto set_trap; 3397 3398 ecmp_size = nh_grp->sum_norm_weight; 3399 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size); 3400 if (err) 3401 /* No valid allocation size available. */ 3402 goto set_trap; 3403 3404 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 3405 ecmp_size, &adj_index); 3406 if (err) { 3407 /* We ran out of KVD linear space, just set the 3408 * trap and let everything flow through kernel. 3409 */ 3410 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n"); 3411 goto set_trap; 3412 } 3413 old_adj_index_valid = nh_grp->adj_index_valid; 3414 old_adj_index = nh_grp->adj_index; 3415 old_ecmp_size = nh_grp->ecmp_size; 3416 nh_grp->adj_index_valid = 1; 3417 nh_grp->adj_index = adj_index; 3418 nh_grp->ecmp_size = ecmp_size; 3419 mlxsw_sp_nexthop_group_rebalance(nh_grp); 3420 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true); 3421 if (err) { 3422 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 3423 goto set_trap; 3424 } 3425 3426 if (!old_adj_index_valid) { 3427 /* The trap was set for fib entries, so we have to call 3428 * fib entry update to unset it and use adjacency index. 3429 */ 3430 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); 3431 if (err) { 3432 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n"); 3433 goto set_trap; 3434 } 3435 return; 3436 } 3437 3438 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, 3439 old_adj_index, old_ecmp_size); 3440 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 3441 old_ecmp_size, old_adj_index); 3442 if (err) { 3443 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); 3444 goto set_trap; 3445 } 3446 3447 /* Offload state within the group changed, so update the flags. */ 3448 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp); 3449 3450 return; 3451 3452 set_trap: 3453 old_adj_index_valid = nh_grp->adj_index_valid; 3454 nh_grp->adj_index_valid = 0; 3455 for (i = 0; i < nh_grp->count; i++) { 3456 nh = &nh_grp->nexthops[i]; 3457 nh->offloaded = 0; 3458 } 3459 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); 3460 if (err) 3461 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); 3462 if (old_adj_index_valid) 3463 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 3464 nh_grp->ecmp_size, nh_grp->adj_index); 3465 } 3466 3467 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, 3468 bool removing) 3469 { 3470 if (!removing) 3471 nh->should_offload = 1; 3472 else 3473 nh->should_offload = 0; 3474 nh->update = 1; 3475 } 3476 3477 static int 3478 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp, 3479 struct mlxsw_sp_neigh_entry *neigh_entry) 3480 { 3481 struct neighbour *n, *old_n = neigh_entry->key.n; 3482 struct mlxsw_sp_nexthop *nh; 3483 bool entry_connected; 3484 u8 nud_state, dead; 3485 int err; 3486 3487 nh = list_first_entry(&neigh_entry->nexthop_list, 3488 struct mlxsw_sp_nexthop, neigh_list_node); 3489 3490 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); 3491 if (!n) { 3492 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, 3493 nh->rif->dev); 3494 if (IS_ERR(n)) 3495 return PTR_ERR(n); 3496 neigh_event_send(n, NULL); 3497 } 3498 3499 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); 3500 neigh_entry->key.n = n; 3501 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); 3502 if (err) 3503 goto err_neigh_entry_insert; 3504 3505 read_lock_bh(&n->lock); 3506 nud_state = n->nud_state; 3507 dead = n->dead; 3508 read_unlock_bh(&n->lock); 3509 entry_connected = nud_state & NUD_VALID && !dead; 3510 3511 list_for_each_entry(nh, &neigh_entry->nexthop_list, 3512 neigh_list_node) { 3513 neigh_release(old_n); 3514 neigh_clone(n); 3515 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected); 3516 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3517 } 3518 3519 neigh_release(n); 3520 3521 return 0; 3522 3523 err_neigh_entry_insert: 3524 neigh_entry->key.n = old_n; 3525 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); 3526 neigh_release(n); 3527 return err; 3528 } 3529 3530 static void 3531 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, 3532 struct mlxsw_sp_neigh_entry *neigh_entry, 3533 bool removing, bool dead) 3534 { 3535 struct mlxsw_sp_nexthop *nh; 3536 3537 if (list_empty(&neigh_entry->nexthop_list)) 3538 return; 3539 3540 if (dead) { 3541 int err; 3542 3543 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp, 3544 neigh_entry); 3545 if (err) 3546 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n"); 3547 return; 3548 } 3549 3550 list_for_each_entry(nh, &neigh_entry->nexthop_list, 3551 neigh_list_node) { 3552 __mlxsw_sp_nexthop_neigh_update(nh, removing); 3553 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3554 } 3555 } 3556 3557 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh, 3558 struct mlxsw_sp_rif *rif) 3559 { 3560 if (nh->rif) 3561 return; 3562 3563 nh->rif = rif; 3564 list_add(&nh->rif_list_node, &rif->nexthop_list); 3565 } 3566 3567 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh) 3568 { 3569 if (!nh->rif) 3570 return; 3571 3572 list_del(&nh->rif_list_node); 3573 nh->rif = NULL; 3574 } 3575 3576 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, 3577 struct mlxsw_sp_nexthop *nh) 3578 { 3579 struct mlxsw_sp_neigh_entry *neigh_entry; 3580 struct neighbour *n; 3581 u8 nud_state, dead; 3582 int err; 3583 3584 if (!nh->nh_grp->gateway || nh->neigh_entry) 3585 return 0; 3586 3587 /* Take a reference of neigh here ensuring that neigh would 3588 * not be destructed before the nexthop entry is finished. 3589 * The reference is taken either in neigh_lookup() or 3590 * in neigh_create() in case n is not found. 3591 */ 3592 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); 3593 if (!n) { 3594 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, 3595 nh->rif->dev); 3596 if (IS_ERR(n)) 3597 return PTR_ERR(n); 3598 neigh_event_send(n, NULL); 3599 } 3600 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); 3601 if (!neigh_entry) { 3602 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); 3603 if (IS_ERR(neigh_entry)) { 3604 err = -EINVAL; 3605 goto err_neigh_entry_create; 3606 } 3607 } 3608 3609 /* If that is the first nexthop connected to that neigh, add to 3610 * nexthop_neighs_list 3611 */ 3612 if (list_empty(&neigh_entry->nexthop_list)) 3613 list_add_tail(&neigh_entry->nexthop_neighs_list_node, 3614 &mlxsw_sp->router->nexthop_neighs_list); 3615 3616 nh->neigh_entry = neigh_entry; 3617 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list); 3618 read_lock_bh(&n->lock); 3619 nud_state = n->nud_state; 3620 dead = n->dead; 3621 read_unlock_bh(&n->lock); 3622 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead)); 3623 3624 return 0; 3625 3626 err_neigh_entry_create: 3627 neigh_release(n); 3628 return err; 3629 } 3630 3631 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, 3632 struct mlxsw_sp_nexthop *nh) 3633 { 3634 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; 3635 struct neighbour *n; 3636 3637 if (!neigh_entry) 3638 return; 3639 n = neigh_entry->key.n; 3640 3641 __mlxsw_sp_nexthop_neigh_update(nh, true); 3642 list_del(&nh->neigh_list_node); 3643 nh->neigh_entry = NULL; 3644 3645 /* If that is the last nexthop connected to that neigh, remove from 3646 * nexthop_neighs_list 3647 */ 3648 if (list_empty(&neigh_entry->nexthop_list)) 3649 list_del(&neigh_entry->nexthop_neighs_list_node); 3650 3651 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) 3652 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); 3653 3654 neigh_release(n); 3655 } 3656 3657 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev) 3658 { 3659 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); 3660 3661 return ul_dev ? (ul_dev->flags & IFF_UP) : true; 3662 } 3663 3664 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, 3665 struct mlxsw_sp_nexthop *nh, 3666 struct mlxsw_sp_ipip_entry *ipip_entry) 3667 { 3668 bool removing; 3669 3670 if (!nh->nh_grp->gateway || nh->ipip_entry) 3671 return; 3672 3673 nh->ipip_entry = ipip_entry; 3674 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev); 3675 __mlxsw_sp_nexthop_neigh_update(nh, removing); 3676 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common); 3677 } 3678 3679 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp, 3680 struct mlxsw_sp_nexthop *nh) 3681 { 3682 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry; 3683 3684 if (!ipip_entry) 3685 return; 3686 3687 __mlxsw_sp_nexthop_neigh_update(nh, true); 3688 nh->ipip_entry = NULL; 3689 } 3690 3691 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp, 3692 const struct fib_nh *fib_nh, 3693 enum mlxsw_sp_ipip_type *p_ipipt) 3694 { 3695 struct net_device *dev = fib_nh->fib_nh_dev; 3696 3697 return dev && 3698 fib_nh->nh_parent->fib_type == RTN_UNICAST && 3699 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt); 3700 } 3701 3702 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp, 3703 struct mlxsw_sp_nexthop *nh) 3704 { 3705 switch (nh->type) { 3706 case MLXSW_SP_NEXTHOP_TYPE_ETH: 3707 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); 3708 mlxsw_sp_nexthop_rif_fini(nh); 3709 break; 3710 case MLXSW_SP_NEXTHOP_TYPE_IPIP: 3711 mlxsw_sp_nexthop_rif_fini(nh); 3712 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); 3713 break; 3714 } 3715 } 3716 3717 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp, 3718 struct mlxsw_sp_nexthop *nh, 3719 struct fib_nh *fib_nh) 3720 { 3721 const struct mlxsw_sp_ipip_ops *ipip_ops; 3722 struct net_device *dev = fib_nh->fib_nh_dev; 3723 struct mlxsw_sp_ipip_entry *ipip_entry; 3724 struct mlxsw_sp_rif *rif; 3725 int err; 3726 3727 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); 3728 if (ipip_entry) { 3729 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 3730 if (ipip_ops->can_offload(mlxsw_sp, dev, 3731 MLXSW_SP_L3_PROTO_IPV4)) { 3732 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 3733 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); 3734 return 0; 3735 } 3736 } 3737 3738 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 3739 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 3740 if (!rif) 3741 return 0; 3742 3743 mlxsw_sp_nexthop_rif_init(nh, rif); 3744 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); 3745 if (err) 3746 goto err_neigh_init; 3747 3748 return 0; 3749 3750 err_neigh_init: 3751 mlxsw_sp_nexthop_rif_fini(nh); 3752 return err; 3753 } 3754 3755 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp, 3756 struct mlxsw_sp_nexthop *nh) 3757 { 3758 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); 3759 } 3760 3761 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, 3762 struct mlxsw_sp_nexthop_group *nh_grp, 3763 struct mlxsw_sp_nexthop *nh, 3764 struct fib_nh *fib_nh) 3765 { 3766 struct net_device *dev = fib_nh->fib_nh_dev; 3767 struct in_device *in_dev; 3768 int err; 3769 3770 nh->nh_grp = nh_grp; 3771 nh->key.fib_nh = fib_nh; 3772 #ifdef CONFIG_IP_ROUTE_MULTIPATH 3773 nh->nh_weight = fib_nh->fib_nh_weight; 3774 #else 3775 nh->nh_weight = 1; 3776 #endif 3777 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4)); 3778 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); 3779 if (err) 3780 return err; 3781 3782 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); 3783 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); 3784 3785 if (!dev) 3786 return 0; 3787 3788 in_dev = __in_dev_get_rtnl(dev); 3789 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && 3790 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) 3791 return 0; 3792 3793 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); 3794 if (err) 3795 goto err_nexthop_neigh_init; 3796 3797 return 0; 3798 3799 err_nexthop_neigh_init: 3800 mlxsw_sp_nexthop_remove(mlxsw_sp, nh); 3801 return err; 3802 } 3803 3804 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp, 3805 struct mlxsw_sp_nexthop *nh) 3806 { 3807 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); 3808 list_del(&nh->router_list_node); 3809 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); 3810 mlxsw_sp_nexthop_remove(mlxsw_sp, nh); 3811 } 3812 3813 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp, 3814 unsigned long event, struct fib_nh *fib_nh) 3815 { 3816 struct mlxsw_sp_nexthop_key key; 3817 struct mlxsw_sp_nexthop *nh; 3818 3819 if (mlxsw_sp->router->aborted) 3820 return; 3821 3822 key.fib_nh = fib_nh; 3823 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key); 3824 if (WARN_ON_ONCE(!nh)) 3825 return; 3826 3827 switch (event) { 3828 case FIB_EVENT_NH_ADD: 3829 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); 3830 break; 3831 case FIB_EVENT_NH_DEL: 3832 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); 3833 break; 3834 } 3835 3836 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3837 } 3838 3839 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, 3840 struct mlxsw_sp_rif *rif) 3841 { 3842 struct mlxsw_sp_nexthop *nh; 3843 bool removing; 3844 3845 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) { 3846 switch (nh->type) { 3847 case MLXSW_SP_NEXTHOP_TYPE_ETH: 3848 removing = false; 3849 break; 3850 case MLXSW_SP_NEXTHOP_TYPE_IPIP: 3851 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev); 3852 break; 3853 default: 3854 WARN_ON(1); 3855 continue; 3856 } 3857 3858 __mlxsw_sp_nexthop_neigh_update(nh, removing); 3859 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3860 } 3861 } 3862 3863 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp, 3864 struct mlxsw_sp_rif *old_rif, 3865 struct mlxsw_sp_rif *new_rif) 3866 { 3867 struct mlxsw_sp_nexthop *nh; 3868 3869 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list); 3870 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node) 3871 nh->rif = new_rif; 3872 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif); 3873 } 3874 3875 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 3876 struct mlxsw_sp_rif *rif) 3877 { 3878 struct mlxsw_sp_nexthop *nh, *tmp; 3879 3880 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) { 3881 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); 3882 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); 3883 } 3884 } 3885 3886 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, 3887 struct fib_info *fi) 3888 { 3889 const struct fib_nh *nh = fib_info_nh(fi, 0); 3890 3891 return nh->fib_nh_scope == RT_SCOPE_LINK || 3892 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); 3893 } 3894 3895 static struct mlxsw_sp_nexthop_group * 3896 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) 3897 { 3898 unsigned int nhs = fib_info_num_path(fi); 3899 struct mlxsw_sp_nexthop_group *nh_grp; 3900 struct mlxsw_sp_nexthop *nh; 3901 struct fib_nh *fib_nh; 3902 int i; 3903 int err; 3904 3905 nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL); 3906 if (!nh_grp) 3907 return ERR_PTR(-ENOMEM); 3908 nh_grp->priv = fi; 3909 INIT_LIST_HEAD(&nh_grp->fib_list); 3910 nh_grp->neigh_tbl = &arp_tbl; 3911 3912 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi); 3913 nh_grp->count = nhs; 3914 fib_info_hold(fi); 3915 for (i = 0; i < nh_grp->count; i++) { 3916 nh = &nh_grp->nexthops[i]; 3917 fib_nh = fib_info_nh(fi, i); 3918 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh); 3919 if (err) 3920 goto err_nexthop4_init; 3921 } 3922 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); 3923 if (err) 3924 goto err_nexthop_group_insert; 3925 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); 3926 return nh_grp; 3927 3928 err_nexthop_group_insert: 3929 err_nexthop4_init: 3930 for (i--; i >= 0; i--) { 3931 nh = &nh_grp->nexthops[i]; 3932 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); 3933 } 3934 fib_info_put(fi); 3935 kfree(nh_grp); 3936 return ERR_PTR(err); 3937 } 3938 3939 static void 3940 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp, 3941 struct mlxsw_sp_nexthop_group *nh_grp) 3942 { 3943 struct mlxsw_sp_nexthop *nh; 3944 int i; 3945 3946 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); 3947 for (i = 0; i < nh_grp->count; i++) { 3948 nh = &nh_grp->nexthops[i]; 3949 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); 3950 } 3951 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); 3952 WARN_ON_ONCE(nh_grp->adj_index_valid); 3953 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp)); 3954 kfree(nh_grp); 3955 } 3956 3957 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp, 3958 struct mlxsw_sp_fib_entry *fib_entry, 3959 struct fib_info *fi) 3960 { 3961 struct mlxsw_sp_nexthop_group *nh_grp; 3962 3963 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi); 3964 if (!nh_grp) { 3965 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi); 3966 if (IS_ERR(nh_grp)) 3967 return PTR_ERR(nh_grp); 3968 } 3969 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list); 3970 fib_entry->nh_group = nh_grp; 3971 return 0; 3972 } 3973 3974 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp, 3975 struct mlxsw_sp_fib_entry *fib_entry) 3976 { 3977 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 3978 3979 list_del(&fib_entry->nexthop_group_node); 3980 if (!list_empty(&nh_grp->fib_list)) 3981 return; 3982 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp); 3983 } 3984 3985 static bool 3986 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) 3987 { 3988 struct mlxsw_sp_fib4_entry *fib4_entry; 3989 3990 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry, 3991 common); 3992 return !fib4_entry->tos; 3993 } 3994 3995 static bool 3996 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) 3997 { 3998 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; 3999 4000 switch (fib_entry->fib_node->fib->proto) { 4001 case MLXSW_SP_L3_PROTO_IPV4: 4002 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry)) 4003 return false; 4004 break; 4005 case MLXSW_SP_L3_PROTO_IPV6: 4006 break; 4007 } 4008 4009 switch (fib_entry->type) { 4010 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: 4011 return !!nh_group->adj_index_valid; 4012 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: 4013 return !!nh_group->nh_rif; 4014 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: 4015 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: 4016 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: 4017 return true; 4018 default: 4019 return false; 4020 } 4021 } 4022 4023 static struct mlxsw_sp_nexthop * 4024 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, 4025 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6) 4026 { 4027 int i; 4028 4029 for (i = 0; i < nh_grp->count; i++) { 4030 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 4031 struct fib6_info *rt = mlxsw_sp_rt6->rt; 4032 4033 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev && 4034 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, 4035 &rt->fib6_nh->fib_nh_gw6)) 4036 return nh; 4037 continue; 4038 } 4039 4040 return NULL; 4041 } 4042 4043 static void 4044 mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) 4045 { 4046 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 4047 int i; 4048 4049 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || 4050 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE || 4051 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP || 4052 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) { 4053 nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; 4054 return; 4055 } 4056 4057 for (i = 0; i < nh_grp->count; i++) { 4058 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 4059 4060 if (nh->offloaded) 4061 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; 4062 else 4063 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4064 } 4065 } 4066 4067 static void 4068 mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) 4069 { 4070 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 4071 int i; 4072 4073 if (!list_is_singular(&nh_grp->fib_list)) 4074 return; 4075 4076 for (i = 0; i < nh_grp->count; i++) { 4077 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 4078 4079 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4080 } 4081 } 4082 4083 static void 4084 mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) 4085 { 4086 struct mlxsw_sp_fib6_entry *fib6_entry; 4087 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 4088 4089 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, 4090 common); 4091 4092 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || 4093 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) { 4094 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, 4095 list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD; 4096 return; 4097 } 4098 4099 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 4100 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 4101 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh; 4102 struct mlxsw_sp_nexthop *nh; 4103 4104 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); 4105 if (nh && nh->offloaded) 4106 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD; 4107 else 4108 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4109 } 4110 } 4111 4112 static void 4113 mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) 4114 { 4115 struct mlxsw_sp_fib6_entry *fib6_entry; 4116 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 4117 4118 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, 4119 common); 4120 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 4121 struct fib6_info *rt = mlxsw_sp_rt6->rt; 4122 4123 rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4124 } 4125 } 4126 4127 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) 4128 { 4129 switch (fib_entry->fib_node->fib->proto) { 4130 case MLXSW_SP_L3_PROTO_IPV4: 4131 mlxsw_sp_fib4_entry_offload_set(fib_entry); 4132 break; 4133 case MLXSW_SP_L3_PROTO_IPV6: 4134 mlxsw_sp_fib6_entry_offload_set(fib_entry); 4135 break; 4136 } 4137 } 4138 4139 static void 4140 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) 4141 { 4142 switch (fib_entry->fib_node->fib->proto) { 4143 case MLXSW_SP_L3_PROTO_IPV4: 4144 mlxsw_sp_fib4_entry_offload_unset(fib_entry); 4145 break; 4146 case MLXSW_SP_L3_PROTO_IPV6: 4147 mlxsw_sp_fib6_entry_offload_unset(fib_entry); 4148 break; 4149 } 4150 } 4151 4152 static void 4153 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, 4154 enum mlxsw_reg_ralue_op op, int err) 4155 { 4156 switch (op) { 4157 case MLXSW_REG_RALUE_OP_WRITE_DELETE: 4158 return mlxsw_sp_fib_entry_offload_unset(fib_entry); 4159 case MLXSW_REG_RALUE_OP_WRITE_WRITE: 4160 if (err) 4161 return; 4162 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) 4163 mlxsw_sp_fib_entry_offload_set(fib_entry); 4164 else 4165 mlxsw_sp_fib_entry_offload_unset(fib_entry); 4166 return; 4167 default: 4168 return; 4169 } 4170 } 4171 4172 static void 4173 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl, 4174 const struct mlxsw_sp_fib_entry *fib_entry, 4175 enum mlxsw_reg_ralue_op op) 4176 { 4177 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; 4178 enum mlxsw_reg_ralxx_protocol proto; 4179 u32 *p_dip; 4180 4181 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto; 4182 4183 switch (fib->proto) { 4184 case MLXSW_SP_L3_PROTO_IPV4: 4185 p_dip = (u32 *) fib_entry->fib_node->key.addr; 4186 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id, 4187 fib_entry->fib_node->key.prefix_len, 4188 *p_dip); 4189 break; 4190 case MLXSW_SP_L3_PROTO_IPV6: 4191 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id, 4192 fib_entry->fib_node->key.prefix_len, 4193 fib_entry->fib_node->key.addr); 4194 break; 4195 } 4196 } 4197 4198 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, 4199 struct mlxsw_sp_fib_entry *fib_entry, 4200 enum mlxsw_reg_ralue_op op) 4201 { 4202 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4203 enum mlxsw_reg_ralue_trap_action trap_action; 4204 u16 trap_id = 0; 4205 u32 adjacency_index = 0; 4206 u16 ecmp_size = 0; 4207 4208 /* In case the nexthop group adjacency index is valid, use it 4209 * with provided ECMP size. Otherwise, setup trap and pass 4210 * traffic to kernel. 4211 */ 4212 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { 4213 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; 4214 adjacency_index = fib_entry->nh_group->adj_index; 4215 ecmp_size = fib_entry->nh_group->ecmp_size; 4216 } else { 4217 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; 4218 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; 4219 } 4220 4221 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4222 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, 4223 adjacency_index, ecmp_size); 4224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4225 } 4226 4227 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp, 4228 struct mlxsw_sp_fib_entry *fib_entry, 4229 enum mlxsw_reg_ralue_op op) 4230 { 4231 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif; 4232 enum mlxsw_reg_ralue_trap_action trap_action; 4233 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4234 u16 trap_id = 0; 4235 u16 rif_index = 0; 4236 4237 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { 4238 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; 4239 rif_index = rif->rif_index; 4240 } else { 4241 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; 4242 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; 4243 } 4244 4245 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4246 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 4247 rif_index); 4248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4249 } 4250 4251 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp, 4252 struct mlxsw_sp_fib_entry *fib_entry, 4253 enum mlxsw_reg_ralue_op op) 4254 { 4255 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4256 4257 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4258 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); 4259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4260 } 4261 4262 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp, 4263 struct mlxsw_sp_fib_entry *fib_entry, 4264 enum mlxsw_reg_ralue_op op) 4265 { 4266 enum mlxsw_reg_ralue_trap_action trap_action; 4267 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4268 4269 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR; 4270 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4271 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0); 4272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4273 } 4274 4275 static int 4276 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp, 4277 struct mlxsw_sp_fib_entry *fib_entry, 4278 enum mlxsw_reg_ralue_op op) 4279 { 4280 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry; 4281 const struct mlxsw_sp_ipip_ops *ipip_ops; 4282 4283 if (WARN_ON(!ipip_entry)) 4284 return -EINVAL; 4285 4286 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 4287 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op, 4288 fib_entry->decap.tunnel_index); 4289 } 4290 4291 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp, 4292 struct mlxsw_sp_fib_entry *fib_entry, 4293 enum mlxsw_reg_ralue_op op) 4294 { 4295 char ralue_pl[MLXSW_REG_RALUE_LEN]; 4296 4297 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); 4298 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, 4299 fib_entry->decap.tunnel_index); 4300 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 4301 } 4302 4303 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, 4304 struct mlxsw_sp_fib_entry *fib_entry, 4305 enum mlxsw_reg_ralue_op op) 4306 { 4307 switch (fib_entry->type) { 4308 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: 4309 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op); 4310 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: 4311 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op); 4312 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: 4313 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op); 4314 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: 4315 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op); 4316 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: 4317 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, 4318 fib_entry, op); 4319 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: 4320 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op); 4321 } 4322 return -EINVAL; 4323 } 4324 4325 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, 4326 struct mlxsw_sp_fib_entry *fib_entry, 4327 enum mlxsw_reg_ralue_op op) 4328 { 4329 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op); 4330 4331 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err); 4332 4333 return err; 4334 } 4335 4336 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, 4337 struct mlxsw_sp_fib_entry *fib_entry) 4338 { 4339 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, 4340 MLXSW_REG_RALUE_OP_WRITE_WRITE); 4341 } 4342 4343 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, 4344 struct mlxsw_sp_fib_entry *fib_entry) 4345 { 4346 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, 4347 MLXSW_REG_RALUE_OP_WRITE_DELETE); 4348 } 4349 4350 static int 4351 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, 4352 const struct fib_entry_notifier_info *fen_info, 4353 struct mlxsw_sp_fib_entry *fib_entry) 4354 { 4355 struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; 4356 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) }; 4357 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id); 4358 struct mlxsw_sp_ipip_entry *ipip_entry; 4359 struct fib_info *fi = fen_info->fi; 4360 4361 switch (fen_info->type) { 4362 case RTN_LOCAL: 4363 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev, 4364 MLXSW_SP_L3_PROTO_IPV4, dip); 4365 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) { 4366 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; 4367 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, 4368 fib_entry, 4369 ipip_entry); 4370 } 4371 if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id, 4372 dip.addr4)) { 4373 u32 t_index; 4374 4375 t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp); 4376 fib_entry->decap.tunnel_index = t_index; 4377 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; 4378 return 0; 4379 } 4380 /* fall through */ 4381 case RTN_BROADCAST: 4382 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 4383 return 0; 4384 case RTN_BLACKHOLE: 4385 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; 4386 return 0; 4387 case RTN_UNREACHABLE: /* fall through */ 4388 case RTN_PROHIBIT: 4389 /* Packets hitting these routes need to be trapped, but 4390 * can do so with a lower priority than packets directed 4391 * at the host, so use action type local instead of trap. 4392 */ 4393 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; 4394 return 0; 4395 case RTN_UNICAST: 4396 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi)) 4397 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; 4398 else 4399 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; 4400 return 0; 4401 default: 4402 return -EINVAL; 4403 } 4404 } 4405 4406 static struct mlxsw_sp_fib4_entry * 4407 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, 4408 struct mlxsw_sp_fib_node *fib_node, 4409 const struct fib_entry_notifier_info *fen_info) 4410 { 4411 struct mlxsw_sp_fib4_entry *fib4_entry; 4412 struct mlxsw_sp_fib_entry *fib_entry; 4413 int err; 4414 4415 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL); 4416 if (!fib4_entry) 4417 return ERR_PTR(-ENOMEM); 4418 fib_entry = &fib4_entry->common; 4419 4420 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); 4421 if (err) 4422 goto err_fib4_entry_type_set; 4423 4424 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi); 4425 if (err) 4426 goto err_nexthop4_group_get; 4427 4428 fib4_entry->prio = fen_info->fi->fib_priority; 4429 fib4_entry->tb_id = fen_info->tb_id; 4430 fib4_entry->type = fen_info->type; 4431 fib4_entry->tos = fen_info->tos; 4432 4433 fib_entry->fib_node = fib_node; 4434 4435 return fib4_entry; 4436 4437 err_nexthop4_group_get: 4438 err_fib4_entry_type_set: 4439 kfree(fib4_entry); 4440 return ERR_PTR(err); 4441 } 4442 4443 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, 4444 struct mlxsw_sp_fib4_entry *fib4_entry) 4445 { 4446 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); 4447 kfree(fib4_entry); 4448 } 4449 4450 static struct mlxsw_sp_fib4_entry * 4451 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, 4452 const struct fib_entry_notifier_info *fen_info) 4453 { 4454 struct mlxsw_sp_fib4_entry *fib4_entry; 4455 struct mlxsw_sp_fib_node *fib_node; 4456 struct mlxsw_sp_fib *fib; 4457 struct mlxsw_sp_vr *vr; 4458 4459 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id); 4460 if (!vr) 4461 return NULL; 4462 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); 4463 4464 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, 4465 sizeof(fen_info->dst), 4466 fen_info->dst_len); 4467 if (!fib_node) 4468 return NULL; 4469 4470 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { 4471 if (fib4_entry->tb_id == fen_info->tb_id && 4472 fib4_entry->tos == fen_info->tos && 4473 fib4_entry->type == fen_info->type && 4474 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) == 4475 fen_info->fi) { 4476 return fib4_entry; 4477 } 4478 } 4479 4480 return NULL; 4481 } 4482 4483 static const struct rhashtable_params mlxsw_sp_fib_ht_params = { 4484 .key_offset = offsetof(struct mlxsw_sp_fib_node, key), 4485 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node), 4486 .key_len = sizeof(struct mlxsw_sp_fib_key), 4487 .automatic_shrinking = true, 4488 }; 4489 4490 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib, 4491 struct mlxsw_sp_fib_node *fib_node) 4492 { 4493 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node, 4494 mlxsw_sp_fib_ht_params); 4495 } 4496 4497 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib, 4498 struct mlxsw_sp_fib_node *fib_node) 4499 { 4500 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node, 4501 mlxsw_sp_fib_ht_params); 4502 } 4503 4504 static struct mlxsw_sp_fib_node * 4505 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, 4506 size_t addr_len, unsigned char prefix_len) 4507 { 4508 struct mlxsw_sp_fib_key key; 4509 4510 memset(&key, 0, sizeof(key)); 4511 memcpy(key.addr, addr, addr_len); 4512 key.prefix_len = prefix_len; 4513 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); 4514 } 4515 4516 static struct mlxsw_sp_fib_node * 4517 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr, 4518 size_t addr_len, unsigned char prefix_len) 4519 { 4520 struct mlxsw_sp_fib_node *fib_node; 4521 4522 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL); 4523 if (!fib_node) 4524 return NULL; 4525 4526 INIT_LIST_HEAD(&fib_node->entry_list); 4527 list_add(&fib_node->list, &fib->node_list); 4528 memcpy(fib_node->key.addr, addr, addr_len); 4529 fib_node->key.prefix_len = prefix_len; 4530 4531 return fib_node; 4532 } 4533 4534 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node) 4535 { 4536 list_del(&fib_node->list); 4537 WARN_ON(!list_empty(&fib_node->entry_list)); 4538 kfree(fib_node); 4539 } 4540 4541 static bool 4542 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, 4543 const struct mlxsw_sp_fib_entry *fib_entry) 4544 { 4545 return list_first_entry(&fib_node->entry_list, 4546 struct mlxsw_sp_fib_entry, list) == fib_entry; 4547 } 4548 4549 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp, 4550 struct mlxsw_sp_fib_node *fib_node) 4551 { 4552 struct mlxsw_sp_prefix_usage req_prefix_usage; 4553 struct mlxsw_sp_fib *fib = fib_node->fib; 4554 struct mlxsw_sp_lpm_tree *lpm_tree; 4555 int err; 4556 4557 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto]; 4558 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0) 4559 goto out; 4560 4561 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage); 4562 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); 4563 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, 4564 fib->proto); 4565 if (IS_ERR(lpm_tree)) 4566 return PTR_ERR(lpm_tree); 4567 4568 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); 4569 if (err) 4570 goto err_lpm_tree_replace; 4571 4572 out: 4573 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++; 4574 return 0; 4575 4576 err_lpm_tree_replace: 4577 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 4578 return err; 4579 } 4580 4581 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, 4582 struct mlxsw_sp_fib_node *fib_node) 4583 { 4584 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree; 4585 struct mlxsw_sp_prefix_usage req_prefix_usage; 4586 struct mlxsw_sp_fib *fib = fib_node->fib; 4587 int err; 4588 4589 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0) 4590 return; 4591 /* Try to construct a new LPM tree from the current prefix usage 4592 * minus the unused one. If we fail, continue using the old one. 4593 */ 4594 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage); 4595 mlxsw_sp_prefix_usage_clear(&req_prefix_usage, 4596 fib_node->key.prefix_len); 4597 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, 4598 fib->proto); 4599 if (IS_ERR(lpm_tree)) 4600 return; 4601 4602 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); 4603 if (err) 4604 goto err_lpm_tree_replace; 4605 4606 return; 4607 4608 err_lpm_tree_replace: 4609 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); 4610 } 4611 4612 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, 4613 struct mlxsw_sp_fib_node *fib_node, 4614 struct mlxsw_sp_fib *fib) 4615 { 4616 int err; 4617 4618 err = mlxsw_sp_fib_node_insert(fib, fib_node); 4619 if (err) 4620 return err; 4621 fib_node->fib = fib; 4622 4623 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node); 4624 if (err) 4625 goto err_fib_lpm_tree_link; 4626 4627 return 0; 4628 4629 err_fib_lpm_tree_link: 4630 fib_node->fib = NULL; 4631 mlxsw_sp_fib_node_remove(fib, fib_node); 4632 return err; 4633 } 4634 4635 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp, 4636 struct mlxsw_sp_fib_node *fib_node) 4637 { 4638 struct mlxsw_sp_fib *fib = fib_node->fib; 4639 4640 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node); 4641 fib_node->fib = NULL; 4642 mlxsw_sp_fib_node_remove(fib, fib_node); 4643 } 4644 4645 static struct mlxsw_sp_fib_node * 4646 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr, 4647 size_t addr_len, unsigned char prefix_len, 4648 enum mlxsw_sp_l3proto proto) 4649 { 4650 struct mlxsw_sp_fib_node *fib_node; 4651 struct mlxsw_sp_fib *fib; 4652 struct mlxsw_sp_vr *vr; 4653 int err; 4654 4655 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL); 4656 if (IS_ERR(vr)) 4657 return ERR_CAST(vr); 4658 fib = mlxsw_sp_vr_fib(vr, proto); 4659 4660 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len); 4661 if (fib_node) 4662 return fib_node; 4663 4664 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len); 4665 if (!fib_node) { 4666 err = -ENOMEM; 4667 goto err_fib_node_create; 4668 } 4669 4670 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib); 4671 if (err) 4672 goto err_fib_node_init; 4673 4674 return fib_node; 4675 4676 err_fib_node_init: 4677 mlxsw_sp_fib_node_destroy(fib_node); 4678 err_fib_node_create: 4679 mlxsw_sp_vr_put(mlxsw_sp, vr); 4680 return ERR_PTR(err); 4681 } 4682 4683 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp, 4684 struct mlxsw_sp_fib_node *fib_node) 4685 { 4686 struct mlxsw_sp_vr *vr = fib_node->fib->vr; 4687 4688 if (!list_empty(&fib_node->entry_list)) 4689 return; 4690 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node); 4691 mlxsw_sp_fib_node_destroy(fib_node); 4692 mlxsw_sp_vr_put(mlxsw_sp, vr); 4693 } 4694 4695 static struct mlxsw_sp_fib4_entry * 4696 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, 4697 const struct mlxsw_sp_fib4_entry *new4_entry) 4698 { 4699 struct mlxsw_sp_fib4_entry *fib4_entry; 4700 4701 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { 4702 if (fib4_entry->tb_id > new4_entry->tb_id) 4703 continue; 4704 if (fib4_entry->tb_id != new4_entry->tb_id) 4705 break; 4706 if (fib4_entry->tos > new4_entry->tos) 4707 continue; 4708 if (fib4_entry->prio >= new4_entry->prio || 4709 fib4_entry->tos < new4_entry->tos) 4710 return fib4_entry; 4711 } 4712 4713 return NULL; 4714 } 4715 4716 static int 4717 mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry, 4718 struct mlxsw_sp_fib4_entry *new4_entry) 4719 { 4720 struct mlxsw_sp_fib_node *fib_node; 4721 4722 if (WARN_ON(!fib4_entry)) 4723 return -EINVAL; 4724 4725 fib_node = fib4_entry->common.fib_node; 4726 list_for_each_entry_from(fib4_entry, &fib_node->entry_list, 4727 common.list) { 4728 if (fib4_entry->tb_id != new4_entry->tb_id || 4729 fib4_entry->tos != new4_entry->tos || 4730 fib4_entry->prio != new4_entry->prio) 4731 break; 4732 } 4733 4734 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list); 4735 return 0; 4736 } 4737 4738 static int 4739 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry, 4740 bool replace, bool append) 4741 { 4742 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node; 4743 struct mlxsw_sp_fib4_entry *fib4_entry; 4744 4745 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry); 4746 4747 if (append) 4748 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry); 4749 if (replace && WARN_ON(!fib4_entry)) 4750 return -EINVAL; 4751 4752 /* Insert new entry before replaced one, so that we can later 4753 * remove the second. 4754 */ 4755 if (fib4_entry) { 4756 list_add_tail(&new4_entry->common.list, 4757 &fib4_entry->common.list); 4758 } else { 4759 struct mlxsw_sp_fib4_entry *last; 4760 4761 list_for_each_entry(last, &fib_node->entry_list, common.list) { 4762 if (new4_entry->tb_id > last->tb_id) 4763 break; 4764 fib4_entry = last; 4765 } 4766 4767 if (fib4_entry) 4768 list_add(&new4_entry->common.list, 4769 &fib4_entry->common.list); 4770 else 4771 list_add(&new4_entry->common.list, 4772 &fib_node->entry_list); 4773 } 4774 4775 return 0; 4776 } 4777 4778 static void 4779 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry) 4780 { 4781 list_del(&fib4_entry->common.list); 4782 } 4783 4784 static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp, 4785 struct mlxsw_sp_fib_entry *fib_entry) 4786 { 4787 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; 4788 4789 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) 4790 return 0; 4791 4792 /* To prevent packet loss, overwrite the previously offloaded 4793 * entry. 4794 */ 4795 if (!list_is_singular(&fib_node->entry_list)) { 4796 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE; 4797 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); 4798 4799 mlxsw_sp_fib_entry_offload_refresh(n, op, 0); 4800 } 4801 4802 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 4803 } 4804 4805 static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp, 4806 struct mlxsw_sp_fib_entry *fib_entry) 4807 { 4808 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; 4809 4810 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) 4811 return; 4812 4813 /* Promote the next entry by overwriting the deleted entry */ 4814 if (!list_is_singular(&fib_node->entry_list)) { 4815 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); 4816 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE; 4817 4818 mlxsw_sp_fib_entry_update(mlxsw_sp, n); 4819 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0); 4820 return; 4821 } 4822 4823 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); 4824 } 4825 4826 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, 4827 struct mlxsw_sp_fib4_entry *fib4_entry, 4828 bool replace, bool append) 4829 { 4830 int err; 4831 4832 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append); 4833 if (err) 4834 return err; 4835 4836 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common); 4837 if (err) 4838 goto err_fib_node_entry_add; 4839 4840 return 0; 4841 4842 err_fib_node_entry_add: 4843 mlxsw_sp_fib4_node_list_remove(fib4_entry); 4844 return err; 4845 } 4846 4847 static void 4848 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, 4849 struct mlxsw_sp_fib4_entry *fib4_entry) 4850 { 4851 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common); 4852 mlxsw_sp_fib4_node_list_remove(fib4_entry); 4853 4854 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) 4855 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common); 4856 } 4857 4858 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp, 4859 struct mlxsw_sp_fib4_entry *fib4_entry, 4860 bool replace) 4861 { 4862 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; 4863 struct mlxsw_sp_fib4_entry *replaced; 4864 4865 if (!replace) 4866 return; 4867 4868 /* We inserted the new entry before replaced one */ 4869 replaced = list_next_entry(fib4_entry, common.list); 4870 4871 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced); 4872 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced); 4873 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 4874 } 4875 4876 static int 4877 mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, 4878 const struct fib_entry_notifier_info *fen_info, 4879 bool replace, bool append) 4880 { 4881 struct mlxsw_sp_fib4_entry *fib4_entry; 4882 struct mlxsw_sp_fib_node *fib_node; 4883 int err; 4884 4885 if (mlxsw_sp->router->aborted) 4886 return 0; 4887 4888 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id, 4889 &fen_info->dst, sizeof(fen_info->dst), 4890 fen_info->dst_len, 4891 MLXSW_SP_L3_PROTO_IPV4); 4892 if (IS_ERR(fib_node)) { 4893 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n"); 4894 return PTR_ERR(fib_node); 4895 } 4896 4897 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); 4898 if (IS_ERR(fib4_entry)) { 4899 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); 4900 err = PTR_ERR(fib4_entry); 4901 goto err_fib4_entry_create; 4902 } 4903 4904 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace, 4905 append); 4906 if (err) { 4907 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); 4908 goto err_fib4_node_entry_link; 4909 } 4910 4911 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace); 4912 4913 return 0; 4914 4915 err_fib4_node_entry_link: 4916 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); 4917 err_fib4_entry_create: 4918 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 4919 return err; 4920 } 4921 4922 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, 4923 struct fib_entry_notifier_info *fen_info) 4924 { 4925 struct mlxsw_sp_fib4_entry *fib4_entry; 4926 struct mlxsw_sp_fib_node *fib_node; 4927 4928 if (mlxsw_sp->router->aborted) 4929 return; 4930 4931 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); 4932 if (WARN_ON(!fib4_entry)) 4933 return; 4934 fib_node = fib4_entry->common.fib_node; 4935 4936 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); 4937 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); 4938 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 4939 } 4940 4941 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) 4942 { 4943 /* Packets with link-local destination IP arriving to the router 4944 * are trapped to the CPU, so no need to program specific routes 4945 * for them. 4946 */ 4947 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) 4948 return true; 4949 4950 /* Multicast routes aren't supported, so ignore them. Neighbour 4951 * Discovery packets are specifically trapped. 4952 */ 4953 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST) 4954 return true; 4955 4956 /* Cloned routes are irrelevant in the forwarding path. */ 4957 if (rt->fib6_flags & RTF_CACHE) 4958 return true; 4959 4960 return false; 4961 } 4962 4963 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt) 4964 { 4965 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 4966 4967 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL); 4968 if (!mlxsw_sp_rt6) 4969 return ERR_PTR(-ENOMEM); 4970 4971 /* In case of route replace, replaced route is deleted with 4972 * no notification. Take reference to prevent accessing freed 4973 * memory. 4974 */ 4975 mlxsw_sp_rt6->rt = rt; 4976 fib6_info_hold(rt); 4977 4978 return mlxsw_sp_rt6; 4979 } 4980 4981 #if IS_ENABLED(CONFIG_IPV6) 4982 static void mlxsw_sp_rt6_release(struct fib6_info *rt) 4983 { 4984 fib6_info_release(rt); 4985 } 4986 #else 4987 static void mlxsw_sp_rt6_release(struct fib6_info *rt) 4988 { 4989 } 4990 #endif 4991 4992 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) 4993 { 4994 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); 4995 kfree(mlxsw_sp_rt6); 4996 } 4997 4998 static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) 4999 { 5000 /* RTF_CACHE routes are ignored */ 5001 return !(rt->fib6_flags & RTF_ADDRCONF) && 5002 rt->fib6_nh->fib_nh_gw_family; 5003 } 5004 5005 static struct fib6_info * 5006 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) 5007 { 5008 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, 5009 list)->rt; 5010 } 5011 5012 static struct mlxsw_sp_fib6_entry * 5013 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, 5014 const struct fib6_info *nrt, bool replace) 5015 { 5016 struct mlxsw_sp_fib6_entry *fib6_entry; 5017 5018 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) 5019 return NULL; 5020 5021 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 5022 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5023 5024 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same 5025 * virtual router. 5026 */ 5027 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id) 5028 continue; 5029 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) 5030 break; 5031 if (rt->fib6_metric < nrt->fib6_metric) 5032 continue; 5033 if (rt->fib6_metric == nrt->fib6_metric && 5034 mlxsw_sp_fib6_rt_can_mp(rt)) 5035 return fib6_entry; 5036 if (rt->fib6_metric > nrt->fib6_metric) 5037 break; 5038 } 5039 5040 return NULL; 5041 } 5042 5043 static struct mlxsw_sp_rt6 * 5044 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry, 5045 const struct fib6_info *rt) 5046 { 5047 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5048 5049 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { 5050 if (mlxsw_sp_rt6->rt == rt) 5051 return mlxsw_sp_rt6; 5052 } 5053 5054 return NULL; 5055 } 5056 5057 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp, 5058 const struct fib6_info *rt, 5059 enum mlxsw_sp_ipip_type *ret) 5060 { 5061 return rt->fib6_nh->fib_nh_dev && 5062 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret); 5063 } 5064 5065 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, 5066 struct mlxsw_sp_nexthop_group *nh_grp, 5067 struct mlxsw_sp_nexthop *nh, 5068 const struct fib6_info *rt) 5069 { 5070 const struct mlxsw_sp_ipip_ops *ipip_ops; 5071 struct mlxsw_sp_ipip_entry *ipip_entry; 5072 struct net_device *dev = rt->fib6_nh->fib_nh_dev; 5073 struct mlxsw_sp_rif *rif; 5074 int err; 5075 5076 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); 5077 if (ipip_entry) { 5078 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; 5079 if (ipip_ops->can_offload(mlxsw_sp, dev, 5080 MLXSW_SP_L3_PROTO_IPV6)) { 5081 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 5082 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); 5083 return 0; 5084 } 5085 } 5086 5087 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 5088 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 5089 if (!rif) 5090 return 0; 5091 mlxsw_sp_nexthop_rif_init(nh, rif); 5092 5093 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); 5094 if (err) 5095 goto err_nexthop_neigh_init; 5096 5097 return 0; 5098 5099 err_nexthop_neigh_init: 5100 mlxsw_sp_nexthop_rif_fini(nh); 5101 return err; 5102 } 5103 5104 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp, 5105 struct mlxsw_sp_nexthop *nh) 5106 { 5107 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); 5108 } 5109 5110 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, 5111 struct mlxsw_sp_nexthop_group *nh_grp, 5112 struct mlxsw_sp_nexthop *nh, 5113 const struct fib6_info *rt) 5114 { 5115 struct net_device *dev = rt->fib6_nh->fib_nh_dev; 5116 5117 nh->nh_grp = nh_grp; 5118 nh->nh_weight = rt->fib6_nh->fib_nh_weight; 5119 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr)); 5120 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); 5121 5122 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); 5123 5124 if (!dev) 5125 return 0; 5126 nh->ifindex = dev->ifindex; 5127 5128 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt); 5129 } 5130 5131 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, 5132 struct mlxsw_sp_nexthop *nh) 5133 { 5134 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh); 5135 list_del(&nh->router_list_node); 5136 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); 5137 } 5138 5139 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, 5140 const struct fib6_info *rt) 5141 { 5142 return rt->fib6_nh->fib_nh_gw_family || 5143 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL); 5144 } 5145 5146 static struct mlxsw_sp_nexthop_group * 5147 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, 5148 struct mlxsw_sp_fib6_entry *fib6_entry) 5149 { 5150 struct mlxsw_sp_nexthop_group *nh_grp; 5151 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5152 struct mlxsw_sp_nexthop *nh; 5153 int i = 0; 5154 int err; 5155 5156 nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6), 5157 GFP_KERNEL); 5158 if (!nh_grp) 5159 return ERR_PTR(-ENOMEM); 5160 INIT_LIST_HEAD(&nh_grp->fib_list); 5161 #if IS_ENABLED(CONFIG_IPV6) 5162 nh_grp->neigh_tbl = &nd_tbl; 5163 #endif 5164 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list, 5165 struct mlxsw_sp_rt6, list); 5166 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt); 5167 nh_grp->count = fib6_entry->nrt6; 5168 for (i = 0; i < nh_grp->count; i++) { 5169 struct fib6_info *rt = mlxsw_sp_rt6->rt; 5170 5171 nh = &nh_grp->nexthops[i]; 5172 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt); 5173 if (err) 5174 goto err_nexthop6_init; 5175 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); 5176 } 5177 5178 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); 5179 if (err) 5180 goto err_nexthop_group_insert; 5181 5182 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); 5183 return nh_grp; 5184 5185 err_nexthop_group_insert: 5186 err_nexthop6_init: 5187 for (i--; i >= 0; i--) { 5188 nh = &nh_grp->nexthops[i]; 5189 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); 5190 } 5191 kfree(nh_grp); 5192 return ERR_PTR(err); 5193 } 5194 5195 static void 5196 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp, 5197 struct mlxsw_sp_nexthop_group *nh_grp) 5198 { 5199 struct mlxsw_sp_nexthop *nh; 5200 int i = nh_grp->count; 5201 5202 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); 5203 for (i--; i >= 0; i--) { 5204 nh = &nh_grp->nexthops[i]; 5205 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); 5206 } 5207 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); 5208 WARN_ON(nh_grp->adj_index_valid); 5209 kfree(nh_grp); 5210 } 5211 5212 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp, 5213 struct mlxsw_sp_fib6_entry *fib6_entry) 5214 { 5215 struct mlxsw_sp_nexthop_group *nh_grp; 5216 5217 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry); 5218 if (!nh_grp) { 5219 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry); 5220 if (IS_ERR(nh_grp)) 5221 return PTR_ERR(nh_grp); 5222 } 5223 5224 list_add_tail(&fib6_entry->common.nexthop_group_node, 5225 &nh_grp->fib_list); 5226 fib6_entry->common.nh_group = nh_grp; 5227 5228 return 0; 5229 } 5230 5231 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp, 5232 struct mlxsw_sp_fib_entry *fib_entry) 5233 { 5234 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 5235 5236 list_del(&fib_entry->nexthop_group_node); 5237 if (!list_empty(&nh_grp->fib_list)) 5238 return; 5239 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp); 5240 } 5241 5242 static int 5243 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp, 5244 struct mlxsw_sp_fib6_entry *fib6_entry) 5245 { 5246 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group; 5247 int err; 5248 5249 fib6_entry->common.nh_group = NULL; 5250 list_del(&fib6_entry->common.nexthop_group_node); 5251 5252 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); 5253 if (err) 5254 goto err_nexthop6_group_get; 5255 5256 /* In case this entry is offloaded, then the adjacency index 5257 * currently associated with it in the device's table is that 5258 * of the old group. Start using the new one instead. 5259 */ 5260 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); 5261 if (err) 5262 goto err_fib_node_entry_add; 5263 5264 if (list_empty(&old_nh_grp->fib_list)) 5265 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp); 5266 5267 return 0; 5268 5269 err_fib_node_entry_add: 5270 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); 5271 err_nexthop6_group_get: 5272 list_add_tail(&fib6_entry->common.nexthop_group_node, 5273 &old_nh_grp->fib_list); 5274 fib6_entry->common.nh_group = old_nh_grp; 5275 return err; 5276 } 5277 5278 static int 5279 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, 5280 struct mlxsw_sp_fib6_entry *fib6_entry, 5281 struct fib6_info **rt_arr, unsigned int nrt6) 5282 { 5283 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5284 int err, i; 5285 5286 for (i = 0; i < nrt6; i++) { 5287 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]); 5288 if (IS_ERR(mlxsw_sp_rt6)) { 5289 err = PTR_ERR(mlxsw_sp_rt6); 5290 goto err_rt6_create; 5291 } 5292 5293 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); 5294 fib6_entry->nrt6++; 5295 } 5296 5297 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); 5298 if (err) 5299 goto err_nexthop6_group_update; 5300 5301 return 0; 5302 5303 err_nexthop6_group_update: 5304 i = nrt6; 5305 err_rt6_create: 5306 for (i--; i >= 0; i--) { 5307 fib6_entry->nrt6--; 5308 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list, 5309 struct mlxsw_sp_rt6, list); 5310 list_del(&mlxsw_sp_rt6->list); 5311 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); 5312 } 5313 return err; 5314 } 5315 5316 static void 5317 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, 5318 struct mlxsw_sp_fib6_entry *fib6_entry, 5319 struct fib6_info **rt_arr, unsigned int nrt6) 5320 { 5321 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5322 int i; 5323 5324 for (i = 0; i < nrt6; i++) { 5325 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, 5326 rt_arr[i]); 5327 if (WARN_ON_ONCE(!mlxsw_sp_rt6)) 5328 continue; 5329 5330 fib6_entry->nrt6--; 5331 list_del(&mlxsw_sp_rt6->list); 5332 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); 5333 } 5334 5335 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); 5336 } 5337 5338 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, 5339 struct mlxsw_sp_fib_entry *fib_entry, 5340 const struct fib6_info *rt) 5341 { 5342 /* Packets hitting RTF_REJECT routes need to be discarded by the 5343 * stack. We can rely on their destination device not having a 5344 * RIF (it's the loopback device) and can thus use action type 5345 * local, which will cause them to be trapped with a lower 5346 * priority than packets that need to be locally received. 5347 */ 5348 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) 5349 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; 5350 else if (rt->fib6_type == RTN_BLACKHOLE) 5351 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; 5352 else if (rt->fib6_flags & RTF_REJECT) 5353 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; 5354 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt)) 5355 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; 5356 else 5357 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; 5358 } 5359 5360 static void 5361 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry) 5362 { 5363 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp; 5364 5365 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list, 5366 list) { 5367 fib6_entry->nrt6--; 5368 list_del(&mlxsw_sp_rt6->list); 5369 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); 5370 } 5371 } 5372 5373 static struct mlxsw_sp_fib6_entry * 5374 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, 5375 struct mlxsw_sp_fib_node *fib_node, 5376 struct fib6_info **rt_arr, unsigned int nrt6) 5377 { 5378 struct mlxsw_sp_fib6_entry *fib6_entry; 5379 struct mlxsw_sp_fib_entry *fib_entry; 5380 struct mlxsw_sp_rt6 *mlxsw_sp_rt6; 5381 int err, i; 5382 5383 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL); 5384 if (!fib6_entry) 5385 return ERR_PTR(-ENOMEM); 5386 fib_entry = &fib6_entry->common; 5387 5388 INIT_LIST_HEAD(&fib6_entry->rt6_list); 5389 5390 for (i = 0; i < nrt6; i++) { 5391 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]); 5392 if (IS_ERR(mlxsw_sp_rt6)) { 5393 err = PTR_ERR(mlxsw_sp_rt6); 5394 goto err_rt6_create; 5395 } 5396 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); 5397 fib6_entry->nrt6++; 5398 } 5399 5400 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); 5401 5402 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); 5403 if (err) 5404 goto err_nexthop6_group_get; 5405 5406 fib_entry->fib_node = fib_node; 5407 5408 return fib6_entry; 5409 5410 err_nexthop6_group_get: 5411 i = nrt6; 5412 err_rt6_create: 5413 for (i--; i >= 0; i--) { 5414 fib6_entry->nrt6--; 5415 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list, 5416 struct mlxsw_sp_rt6, list); 5417 list_del(&mlxsw_sp_rt6->list); 5418 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); 5419 } 5420 kfree(fib6_entry); 5421 return ERR_PTR(err); 5422 } 5423 5424 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, 5425 struct mlxsw_sp_fib6_entry *fib6_entry) 5426 { 5427 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); 5428 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry); 5429 WARN_ON(fib6_entry->nrt6); 5430 kfree(fib6_entry); 5431 } 5432 5433 static struct mlxsw_sp_fib6_entry * 5434 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, 5435 const struct fib6_info *nrt, bool replace) 5436 { 5437 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; 5438 5439 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 5440 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5441 5442 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id) 5443 continue; 5444 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) 5445 break; 5446 if (replace && rt->fib6_metric == nrt->fib6_metric) { 5447 if (mlxsw_sp_fib6_rt_can_mp(rt) == 5448 mlxsw_sp_fib6_rt_can_mp(nrt)) 5449 return fib6_entry; 5450 if (mlxsw_sp_fib6_rt_can_mp(nrt)) 5451 fallback = fallback ?: fib6_entry; 5452 } 5453 if (rt->fib6_metric > nrt->fib6_metric) 5454 return fallback ?: fib6_entry; 5455 } 5456 5457 return fallback; 5458 } 5459 5460 static int 5461 mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, 5462 bool *p_replace) 5463 { 5464 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node; 5465 struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); 5466 struct mlxsw_sp_fib6_entry *fib6_entry; 5467 5468 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, *p_replace); 5469 5470 if (*p_replace && !fib6_entry) 5471 *p_replace = false; 5472 5473 if (fib6_entry) { 5474 list_add_tail(&new6_entry->common.list, 5475 &fib6_entry->common.list); 5476 } else { 5477 struct mlxsw_sp_fib6_entry *last; 5478 5479 list_for_each_entry(last, &fib_node->entry_list, common.list) { 5480 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last); 5481 5482 if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id) 5483 break; 5484 fib6_entry = last; 5485 } 5486 5487 if (fib6_entry) 5488 list_add(&new6_entry->common.list, 5489 &fib6_entry->common.list); 5490 else 5491 list_add(&new6_entry->common.list, 5492 &fib_node->entry_list); 5493 } 5494 5495 return 0; 5496 } 5497 5498 static void 5499 mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry) 5500 { 5501 list_del(&fib6_entry->common.list); 5502 } 5503 5504 static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp, 5505 struct mlxsw_sp_fib6_entry *fib6_entry, 5506 bool *p_replace) 5507 { 5508 int err; 5509 5510 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, p_replace); 5511 if (err) 5512 return err; 5513 5514 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); 5515 if (err) 5516 goto err_fib_node_entry_add; 5517 5518 return 0; 5519 5520 err_fib_node_entry_add: 5521 mlxsw_sp_fib6_node_list_remove(fib6_entry); 5522 return err; 5523 } 5524 5525 static void 5526 mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, 5527 struct mlxsw_sp_fib6_entry *fib6_entry) 5528 { 5529 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common); 5530 mlxsw_sp_fib6_node_list_remove(fib6_entry); 5531 } 5532 5533 static struct mlxsw_sp_fib6_entry * 5534 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, 5535 const struct fib6_info *rt) 5536 { 5537 struct mlxsw_sp_fib6_entry *fib6_entry; 5538 struct mlxsw_sp_fib_node *fib_node; 5539 struct mlxsw_sp_fib *fib; 5540 struct mlxsw_sp_vr *vr; 5541 5542 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id); 5543 if (!vr) 5544 return NULL; 5545 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6); 5546 5547 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr, 5548 sizeof(rt->fib6_dst.addr), 5549 rt->fib6_dst.plen); 5550 if (!fib_node) 5551 return NULL; 5552 5553 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 5554 struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5555 5556 if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id && 5557 rt->fib6_metric == iter_rt->fib6_metric && 5558 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt)) 5559 return fib6_entry; 5560 } 5561 5562 return NULL; 5563 } 5564 5565 static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, 5566 struct mlxsw_sp_fib6_entry *fib6_entry, 5567 bool replace) 5568 { 5569 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; 5570 struct mlxsw_sp_fib6_entry *replaced; 5571 5572 if (!replace) 5573 return; 5574 5575 replaced = list_next_entry(fib6_entry, common.list); 5576 5577 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced); 5578 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced); 5579 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5580 } 5581 5582 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, 5583 struct fib6_info **rt_arr, 5584 unsigned int nrt6, bool replace) 5585 { 5586 struct mlxsw_sp_fib6_entry *fib6_entry; 5587 struct mlxsw_sp_fib_node *fib_node; 5588 struct fib6_info *rt = rt_arr[0]; 5589 int err; 5590 5591 if (mlxsw_sp->router->aborted) 5592 return 0; 5593 5594 if (rt->fib6_src.plen) 5595 return -EINVAL; 5596 5597 if (mlxsw_sp_fib6_rt_should_ignore(rt)) 5598 return 0; 5599 5600 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id, 5601 &rt->fib6_dst.addr, 5602 sizeof(rt->fib6_dst.addr), 5603 rt->fib6_dst.plen, 5604 MLXSW_SP_L3_PROTO_IPV6); 5605 if (IS_ERR(fib_node)) 5606 return PTR_ERR(fib_node); 5607 5608 /* Before creating a new entry, try to append route to an existing 5609 * multipath entry. 5610 */ 5611 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); 5612 if (fib6_entry) { 5613 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, 5614 rt_arr, nrt6); 5615 if (err) 5616 goto err_fib6_entry_nexthop_add; 5617 return 0; 5618 } 5619 5620 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr, 5621 nrt6); 5622 if (IS_ERR(fib6_entry)) { 5623 err = PTR_ERR(fib6_entry); 5624 goto err_fib6_entry_create; 5625 } 5626 5627 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, &replace); 5628 if (err) 5629 goto err_fib6_node_entry_link; 5630 5631 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace); 5632 5633 return 0; 5634 5635 err_fib6_node_entry_link: 5636 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5637 err_fib6_entry_create: 5638 err_fib6_entry_nexthop_add: 5639 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5640 return err; 5641 } 5642 5643 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, 5644 struct fib6_info **rt_arr, 5645 unsigned int nrt6) 5646 { 5647 struct mlxsw_sp_fib6_entry *fib6_entry; 5648 struct mlxsw_sp_fib_node *fib_node; 5649 struct fib6_info *rt = rt_arr[0]; 5650 5651 if (mlxsw_sp->router->aborted) 5652 return; 5653 5654 if (mlxsw_sp_fib6_rt_should_ignore(rt)) 5655 return; 5656 5657 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); 5658 if (WARN_ON(!fib6_entry)) 5659 return; 5660 5661 /* If not all the nexthops are deleted, then only reduce the nexthop 5662 * group. 5663 */ 5664 if (nrt6 != fib6_entry->nrt6) { 5665 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr, 5666 nrt6); 5667 return; 5668 } 5669 5670 fib_node = fib6_entry->common.fib_node; 5671 5672 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); 5673 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5674 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5675 } 5676 5677 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, 5678 enum mlxsw_reg_ralxx_protocol proto, 5679 u8 tree_id) 5680 { 5681 char ralta_pl[MLXSW_REG_RALTA_LEN]; 5682 char ralst_pl[MLXSW_REG_RALST_LEN]; 5683 int i, err; 5684 5685 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id); 5686 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); 5687 if (err) 5688 return err; 5689 5690 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id); 5691 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); 5692 if (err) 5693 return err; 5694 5695 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 5696 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; 5697 char raltb_pl[MLXSW_REG_RALTB_LEN]; 5698 char ralue_pl[MLXSW_REG_RALUE_LEN]; 5699 5700 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id); 5701 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), 5702 raltb_pl); 5703 if (err) 5704 return err; 5705 5706 mlxsw_reg_ralue_pack(ralue_pl, proto, 5707 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0); 5708 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); 5709 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), 5710 ralue_pl); 5711 if (err) 5712 return err; 5713 } 5714 5715 return 0; 5716 } 5717 5718 static struct mlxsw_sp_mr_table * 5719 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family) 5720 { 5721 if (family == RTNL_FAMILY_IPMR) 5722 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]; 5723 else 5724 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; 5725 } 5726 5727 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp, 5728 struct mfc_entry_notifier_info *men_info, 5729 bool replace) 5730 { 5731 struct mlxsw_sp_mr_table *mrt; 5732 struct mlxsw_sp_vr *vr; 5733 5734 if (mlxsw_sp->router->aborted) 5735 return 0; 5736 5737 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL); 5738 if (IS_ERR(vr)) 5739 return PTR_ERR(vr); 5740 5741 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); 5742 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace); 5743 } 5744 5745 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp, 5746 struct mfc_entry_notifier_info *men_info) 5747 { 5748 struct mlxsw_sp_mr_table *mrt; 5749 struct mlxsw_sp_vr *vr; 5750 5751 if (mlxsw_sp->router->aborted) 5752 return; 5753 5754 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id); 5755 if (WARN_ON(!vr)) 5756 return; 5757 5758 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); 5759 mlxsw_sp_mr_route_del(mrt, men_info->mfc); 5760 mlxsw_sp_vr_put(mlxsw_sp, vr); 5761 } 5762 5763 static int 5764 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp, 5765 struct vif_entry_notifier_info *ven_info) 5766 { 5767 struct mlxsw_sp_mr_table *mrt; 5768 struct mlxsw_sp_rif *rif; 5769 struct mlxsw_sp_vr *vr; 5770 5771 if (mlxsw_sp->router->aborted) 5772 return 0; 5773 5774 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL); 5775 if (IS_ERR(vr)) 5776 return PTR_ERR(vr); 5777 5778 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); 5779 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev); 5780 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev, 5781 ven_info->vif_index, 5782 ven_info->vif_flags, rif); 5783 } 5784 5785 static void 5786 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, 5787 struct vif_entry_notifier_info *ven_info) 5788 { 5789 struct mlxsw_sp_mr_table *mrt; 5790 struct mlxsw_sp_vr *vr; 5791 5792 if (mlxsw_sp->router->aborted) 5793 return; 5794 5795 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id); 5796 if (WARN_ON(!vr)) 5797 return; 5798 5799 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); 5800 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index); 5801 mlxsw_sp_vr_put(mlxsw_sp, vr); 5802 } 5803 5804 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) 5805 { 5806 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4; 5807 int err; 5808 5809 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, 5810 MLXSW_SP_LPM_TREE_MIN); 5811 if (err) 5812 return err; 5813 5814 /* The multicast router code does not need an abort trap as by default, 5815 * packets that don't match any routes are trapped to the CPU. 5816 */ 5817 5818 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6; 5819 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, 5820 MLXSW_SP_LPM_TREE_MIN + 1); 5821 } 5822 5823 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, 5824 struct mlxsw_sp_fib_node *fib_node) 5825 { 5826 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp; 5827 5828 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list, 5829 common.list) { 5830 bool do_break = &tmp->common.list == &fib_node->entry_list; 5831 5832 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); 5833 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); 5834 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5835 /* Break when entry list is empty and node was freed. 5836 * Otherwise, we'll access freed memory in the next 5837 * iteration. 5838 */ 5839 if (do_break) 5840 break; 5841 } 5842 } 5843 5844 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp, 5845 struct mlxsw_sp_fib_node *fib_node) 5846 { 5847 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp; 5848 5849 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list, 5850 common.list) { 5851 bool do_break = &tmp->common.list == &fib_node->entry_list; 5852 5853 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); 5854 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5855 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5856 if (do_break) 5857 break; 5858 } 5859 } 5860 5861 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, 5862 struct mlxsw_sp_fib_node *fib_node) 5863 { 5864 switch (fib_node->fib->proto) { 5865 case MLXSW_SP_L3_PROTO_IPV4: 5866 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); 5867 break; 5868 case MLXSW_SP_L3_PROTO_IPV6: 5869 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node); 5870 break; 5871 } 5872 } 5873 5874 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp, 5875 struct mlxsw_sp_vr *vr, 5876 enum mlxsw_sp_l3proto proto) 5877 { 5878 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); 5879 struct mlxsw_sp_fib_node *fib_node, *tmp; 5880 5881 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) { 5882 bool do_break = &tmp->list == &fib->node_list; 5883 5884 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node); 5885 if (do_break) 5886 break; 5887 } 5888 } 5889 5890 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) 5891 { 5892 int i, j; 5893 5894 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { 5895 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; 5896 5897 if (!mlxsw_sp_vr_is_used(vr)) 5898 continue; 5899 5900 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++) 5901 mlxsw_sp_mr_table_flush(vr->mr_table[j]); 5902 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); 5903 5904 /* If virtual router was only used for IPv4, then it's no 5905 * longer used. 5906 */ 5907 if (!mlxsw_sp_vr_is_used(vr)) 5908 continue; 5909 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); 5910 } 5911 } 5912 5913 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp) 5914 { 5915 int err; 5916 5917 if (mlxsw_sp->router->aborted) 5918 return; 5919 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n"); 5920 mlxsw_sp_router_fib_flush(mlxsw_sp); 5921 mlxsw_sp->router->aborted = true; 5922 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); 5923 if (err) 5924 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n"); 5925 } 5926 5927 struct mlxsw_sp_fib6_event_work { 5928 struct fib6_info **rt_arr; 5929 unsigned int nrt6; 5930 }; 5931 5932 struct mlxsw_sp_fib_event_work { 5933 struct work_struct work; 5934 union { 5935 struct mlxsw_sp_fib6_event_work fib6_work; 5936 struct fib_entry_notifier_info fen_info; 5937 struct fib_rule_notifier_info fr_info; 5938 struct fib_nh_notifier_info fnh_info; 5939 struct mfc_entry_notifier_info men_info; 5940 struct vif_entry_notifier_info ven_info; 5941 }; 5942 struct mlxsw_sp *mlxsw_sp; 5943 unsigned long event; 5944 }; 5945 5946 static int 5947 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work, 5948 struct fib6_entry_notifier_info *fen6_info) 5949 { 5950 struct fib6_info *rt = fen6_info->rt; 5951 struct fib6_info **rt_arr; 5952 struct fib6_info *iter; 5953 unsigned int nrt6; 5954 int i = 0; 5955 5956 nrt6 = fen6_info->nsiblings + 1; 5957 5958 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC); 5959 if (!rt_arr) 5960 return -ENOMEM; 5961 5962 fib6_work->rt_arr = rt_arr; 5963 fib6_work->nrt6 = nrt6; 5964 5965 rt_arr[0] = rt; 5966 fib6_info_hold(rt); 5967 5968 if (!fen6_info->nsiblings) 5969 return 0; 5970 5971 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) { 5972 if (i == fen6_info->nsiblings) 5973 break; 5974 5975 rt_arr[i + 1] = iter; 5976 fib6_info_hold(iter); 5977 i++; 5978 } 5979 WARN_ON_ONCE(i != fen6_info->nsiblings); 5980 5981 return 0; 5982 } 5983 5984 static void 5985 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work) 5986 { 5987 int i; 5988 5989 for (i = 0; i < fib6_work->nrt6; i++) 5990 mlxsw_sp_rt6_release(fib6_work->rt_arr[i]); 5991 kfree(fib6_work->rt_arr); 5992 } 5993 5994 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) 5995 { 5996 struct mlxsw_sp_fib_event_work *fib_work = 5997 container_of(work, struct mlxsw_sp_fib_event_work, work); 5998 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 5999 bool replace, append; 6000 int err; 6001 6002 /* Protect internal structures from changes */ 6003 rtnl_lock(); 6004 mlxsw_sp_span_respin(mlxsw_sp); 6005 6006 switch (fib_work->event) { 6007 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6008 case FIB_EVENT_ENTRY_APPEND: /* fall through */ 6009 case FIB_EVENT_ENTRY_ADD: 6010 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; 6011 append = fib_work->event == FIB_EVENT_ENTRY_APPEND; 6012 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info, 6013 replace, append); 6014 if (err) 6015 mlxsw_sp_router_fib_abort(mlxsw_sp); 6016 fib_info_put(fib_work->fen_info.fi); 6017 break; 6018 case FIB_EVENT_ENTRY_DEL: 6019 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info); 6020 fib_info_put(fib_work->fen_info.fi); 6021 break; 6022 case FIB_EVENT_RULE_ADD: 6023 /* if we get here, a rule was added that we do not support. 6024 * just do the fib_abort 6025 */ 6026 mlxsw_sp_router_fib_abort(mlxsw_sp); 6027 break; 6028 case FIB_EVENT_NH_ADD: /* fall through */ 6029 case FIB_EVENT_NH_DEL: 6030 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event, 6031 fib_work->fnh_info.fib_nh); 6032 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); 6033 break; 6034 } 6035 rtnl_unlock(); 6036 kfree(fib_work); 6037 } 6038 6039 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) 6040 { 6041 struct mlxsw_sp_fib_event_work *fib_work = 6042 container_of(work, struct mlxsw_sp_fib_event_work, work); 6043 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 6044 bool replace; 6045 int err; 6046 6047 rtnl_lock(); 6048 mlxsw_sp_span_respin(mlxsw_sp); 6049 6050 switch (fib_work->event) { 6051 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6052 case FIB_EVENT_ENTRY_ADD: 6053 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; 6054 err = mlxsw_sp_router_fib6_add(mlxsw_sp, 6055 fib_work->fib6_work.rt_arr, 6056 fib_work->fib6_work.nrt6, 6057 replace); 6058 if (err) 6059 mlxsw_sp_router_fib_abort(mlxsw_sp); 6060 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work); 6061 break; 6062 case FIB_EVENT_ENTRY_DEL: 6063 mlxsw_sp_router_fib6_del(mlxsw_sp, 6064 fib_work->fib6_work.rt_arr, 6065 fib_work->fib6_work.nrt6); 6066 mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work); 6067 break; 6068 case FIB_EVENT_RULE_ADD: 6069 /* if we get here, a rule was added that we do not support. 6070 * just do the fib_abort 6071 */ 6072 mlxsw_sp_router_fib_abort(mlxsw_sp); 6073 break; 6074 } 6075 rtnl_unlock(); 6076 kfree(fib_work); 6077 } 6078 6079 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) 6080 { 6081 struct mlxsw_sp_fib_event_work *fib_work = 6082 container_of(work, struct mlxsw_sp_fib_event_work, work); 6083 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 6084 bool replace; 6085 int err; 6086 6087 rtnl_lock(); 6088 switch (fib_work->event) { 6089 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6090 case FIB_EVENT_ENTRY_ADD: 6091 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; 6092 6093 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info, 6094 replace); 6095 if (err) 6096 mlxsw_sp_router_fib_abort(mlxsw_sp); 6097 mr_cache_put(fib_work->men_info.mfc); 6098 break; 6099 case FIB_EVENT_ENTRY_DEL: 6100 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info); 6101 mr_cache_put(fib_work->men_info.mfc); 6102 break; 6103 case FIB_EVENT_VIF_ADD: 6104 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp, 6105 &fib_work->ven_info); 6106 if (err) 6107 mlxsw_sp_router_fib_abort(mlxsw_sp); 6108 dev_put(fib_work->ven_info.dev); 6109 break; 6110 case FIB_EVENT_VIF_DEL: 6111 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, 6112 &fib_work->ven_info); 6113 dev_put(fib_work->ven_info.dev); 6114 break; 6115 case FIB_EVENT_RULE_ADD: 6116 /* if we get here, a rule was added that we do not support. 6117 * just do the fib_abort 6118 */ 6119 mlxsw_sp_router_fib_abort(mlxsw_sp); 6120 break; 6121 } 6122 rtnl_unlock(); 6123 kfree(fib_work); 6124 } 6125 6126 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, 6127 struct fib_notifier_info *info) 6128 { 6129 struct fib_entry_notifier_info *fen_info; 6130 struct fib_nh_notifier_info *fnh_info; 6131 6132 switch (fib_work->event) { 6133 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6134 case FIB_EVENT_ENTRY_APPEND: /* fall through */ 6135 case FIB_EVENT_ENTRY_ADD: /* fall through */ 6136 case FIB_EVENT_ENTRY_DEL: 6137 fen_info = container_of(info, struct fib_entry_notifier_info, 6138 info); 6139 fib_work->fen_info = *fen_info; 6140 /* Take reference on fib_info to prevent it from being 6141 * freed while work is queued. Release it afterwards. 6142 */ 6143 fib_info_hold(fib_work->fen_info.fi); 6144 break; 6145 case FIB_EVENT_NH_ADD: /* fall through */ 6146 case FIB_EVENT_NH_DEL: 6147 fnh_info = container_of(info, struct fib_nh_notifier_info, 6148 info); 6149 fib_work->fnh_info = *fnh_info; 6150 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); 6151 break; 6152 } 6153 } 6154 6155 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, 6156 struct fib_notifier_info *info) 6157 { 6158 struct fib6_entry_notifier_info *fen6_info; 6159 int err; 6160 6161 switch (fib_work->event) { 6162 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6163 case FIB_EVENT_ENTRY_ADD: /* fall through */ 6164 case FIB_EVENT_ENTRY_DEL: 6165 fen6_info = container_of(info, struct fib6_entry_notifier_info, 6166 info); 6167 err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work, 6168 fen6_info); 6169 if (err) 6170 return err; 6171 break; 6172 } 6173 6174 return 0; 6175 } 6176 6177 static void 6178 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, 6179 struct fib_notifier_info *info) 6180 { 6181 switch (fib_work->event) { 6182 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6183 case FIB_EVENT_ENTRY_ADD: /* fall through */ 6184 case FIB_EVENT_ENTRY_DEL: 6185 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info)); 6186 mr_cache_hold(fib_work->men_info.mfc); 6187 break; 6188 case FIB_EVENT_VIF_ADD: /* fall through */ 6189 case FIB_EVENT_VIF_DEL: 6190 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info)); 6191 dev_hold(fib_work->ven_info.dev); 6192 break; 6193 } 6194 } 6195 6196 static int mlxsw_sp_router_fib_rule_event(unsigned long event, 6197 struct fib_notifier_info *info, 6198 struct mlxsw_sp *mlxsw_sp) 6199 { 6200 struct netlink_ext_ack *extack = info->extack; 6201 struct fib_rule_notifier_info *fr_info; 6202 struct fib_rule *rule; 6203 int err = 0; 6204 6205 /* nothing to do at the moment */ 6206 if (event == FIB_EVENT_RULE_DEL) 6207 return 0; 6208 6209 if (mlxsw_sp->router->aborted) 6210 return 0; 6211 6212 fr_info = container_of(info, struct fib_rule_notifier_info, info); 6213 rule = fr_info->rule; 6214 6215 /* Rule only affects locally generated traffic */ 6216 if (rule->iifindex == info->net->loopback_dev->ifindex) 6217 return 0; 6218 6219 switch (info->family) { 6220 case AF_INET: 6221 if (!fib4_rule_default(rule) && !rule->l3mdev) 6222 err = -EOPNOTSUPP; 6223 break; 6224 case AF_INET6: 6225 if (!fib6_rule_default(rule) && !rule->l3mdev) 6226 err = -EOPNOTSUPP; 6227 break; 6228 case RTNL_FAMILY_IPMR: 6229 if (!ipmr_rule_default(rule) && !rule->l3mdev) 6230 err = -EOPNOTSUPP; 6231 break; 6232 case RTNL_FAMILY_IP6MR: 6233 if (!ip6mr_rule_default(rule) && !rule->l3mdev) 6234 err = -EOPNOTSUPP; 6235 break; 6236 } 6237 6238 if (err < 0) 6239 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported"); 6240 6241 return err; 6242 } 6243 6244 /* Called with rcu_read_lock() */ 6245 static int mlxsw_sp_router_fib_event(struct notifier_block *nb, 6246 unsigned long event, void *ptr) 6247 { 6248 struct mlxsw_sp_fib_event_work *fib_work; 6249 struct fib_notifier_info *info = ptr; 6250 struct mlxsw_sp_router *router; 6251 int err; 6252 6253 if (!net_eq(info->net, &init_net) || 6254 (info->family != AF_INET && info->family != AF_INET6 && 6255 info->family != RTNL_FAMILY_IPMR && 6256 info->family != RTNL_FAMILY_IP6MR)) 6257 return NOTIFY_DONE; 6258 6259 router = container_of(nb, struct mlxsw_sp_router, fib_nb); 6260 6261 switch (event) { 6262 case FIB_EVENT_RULE_ADD: /* fall through */ 6263 case FIB_EVENT_RULE_DEL: 6264 err = mlxsw_sp_router_fib_rule_event(event, info, 6265 router->mlxsw_sp); 6266 if (!err || info->extack) 6267 return notifier_from_errno(err); 6268 break; 6269 case FIB_EVENT_ENTRY_ADD: 6270 case FIB_EVENT_ENTRY_REPLACE: /* fall through */ 6271 case FIB_EVENT_ENTRY_APPEND: /* fall through */ 6272 if (router->aborted) { 6273 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route"); 6274 return notifier_from_errno(-EINVAL); 6275 } 6276 if (info->family == AF_INET) { 6277 struct fib_entry_notifier_info *fen_info = ptr; 6278 6279 if (fen_info->fi->fib_nh_is_v6) { 6280 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); 6281 return notifier_from_errno(-EINVAL); 6282 } 6283 if (fen_info->fi->nh) { 6284 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); 6285 return notifier_from_errno(-EINVAL); 6286 } 6287 } else if (info->family == AF_INET6) { 6288 struct fib6_entry_notifier_info *fen6_info; 6289 6290 fen6_info = container_of(info, 6291 struct fib6_entry_notifier_info, 6292 info); 6293 if (fen6_info->rt->nh) { 6294 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported"); 6295 return notifier_from_errno(-EINVAL); 6296 } 6297 } 6298 break; 6299 } 6300 6301 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); 6302 if (WARN_ON(!fib_work)) 6303 return NOTIFY_BAD; 6304 6305 fib_work->mlxsw_sp = router->mlxsw_sp; 6306 fib_work->event = event; 6307 6308 switch (info->family) { 6309 case AF_INET: 6310 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work); 6311 mlxsw_sp_router_fib4_event(fib_work, info); 6312 break; 6313 case AF_INET6: 6314 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); 6315 err = mlxsw_sp_router_fib6_event(fib_work, info); 6316 if (err) 6317 goto err_fib_event; 6318 break; 6319 case RTNL_FAMILY_IP6MR: 6320 case RTNL_FAMILY_IPMR: 6321 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work); 6322 mlxsw_sp_router_fibmr_event(fib_work, info); 6323 break; 6324 } 6325 6326 mlxsw_core_schedule_work(&fib_work->work); 6327 6328 return NOTIFY_DONE; 6329 6330 err_fib_event: 6331 kfree(fib_work); 6332 return NOTIFY_BAD; 6333 } 6334 6335 struct mlxsw_sp_rif * 6336 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, 6337 const struct net_device *dev) 6338 { 6339 int i; 6340 6341 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) 6342 if (mlxsw_sp->router->rifs[i] && 6343 mlxsw_sp->router->rifs[i]->dev == dev) 6344 return mlxsw_sp->router->rifs[i]; 6345 6346 return NULL; 6347 } 6348 6349 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) 6350 { 6351 char ritr_pl[MLXSW_REG_RITR_LEN]; 6352 int err; 6353 6354 mlxsw_reg_ritr_rif_pack(ritr_pl, rif); 6355 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 6356 if (err) 6357 return err; 6358 6359 mlxsw_reg_ritr_enable_set(ritr_pl, false); 6360 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 6361 } 6362 6363 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 6364 struct mlxsw_sp_rif *rif) 6365 { 6366 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index); 6367 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif); 6368 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif); 6369 } 6370 6371 static bool 6372 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev, 6373 unsigned long event) 6374 { 6375 struct inet6_dev *inet6_dev; 6376 bool addr_list_empty = true; 6377 struct in_device *idev; 6378 6379 switch (event) { 6380 case NETDEV_UP: 6381 return rif == NULL; 6382 case NETDEV_DOWN: 6383 idev = __in_dev_get_rtnl(dev); 6384 if (idev && idev->ifa_list) 6385 addr_list_empty = false; 6386 6387 inet6_dev = __in6_dev_get(dev); 6388 if (addr_list_empty && inet6_dev && 6389 !list_empty(&inet6_dev->addr_list)) 6390 addr_list_empty = false; 6391 6392 /* macvlans do not have a RIF, but rather piggy back on the 6393 * RIF of their lower device. 6394 */ 6395 if (netif_is_macvlan(dev) && addr_list_empty) 6396 return true; 6397 6398 if (rif && addr_list_empty && 6399 !netif_is_l3_slave(rif->dev)) 6400 return true; 6401 /* It is possible we already removed the RIF ourselves 6402 * if it was assigned to a netdev that is now a bridge 6403 * or LAG slave. 6404 */ 6405 return false; 6406 } 6407 6408 return false; 6409 } 6410 6411 static enum mlxsw_sp_rif_type 6412 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp, 6413 const struct net_device *dev) 6414 { 6415 enum mlxsw_sp_fid_type type; 6416 6417 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL)) 6418 return MLXSW_SP_RIF_TYPE_IPIP_LB; 6419 6420 /* Otherwise RIF type is derived from the type of the underlying FID. */ 6421 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev))) 6422 type = MLXSW_SP_FID_TYPE_8021Q; 6423 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev)) 6424 type = MLXSW_SP_FID_TYPE_8021Q; 6425 else if (netif_is_bridge_master(dev)) 6426 type = MLXSW_SP_FID_TYPE_8021D; 6427 else 6428 type = MLXSW_SP_FID_TYPE_RFID; 6429 6430 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type); 6431 } 6432 6433 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index) 6434 { 6435 int i; 6436 6437 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { 6438 if (!mlxsw_sp->router->rifs[i]) { 6439 *p_rif_index = i; 6440 return 0; 6441 } 6442 } 6443 6444 return -ENOBUFS; 6445 } 6446 6447 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index, 6448 u16 vr_id, 6449 struct net_device *l3_dev) 6450 { 6451 struct mlxsw_sp_rif *rif; 6452 6453 rif = kzalloc(rif_size, GFP_KERNEL); 6454 if (!rif) 6455 return NULL; 6456 6457 INIT_LIST_HEAD(&rif->nexthop_list); 6458 INIT_LIST_HEAD(&rif->neigh_list); 6459 if (l3_dev) { 6460 ether_addr_copy(rif->addr, l3_dev->dev_addr); 6461 rif->mtu = l3_dev->mtu; 6462 rif->dev = l3_dev; 6463 } 6464 rif->vr_id = vr_id; 6465 rif->rif_index = rif_index; 6466 6467 return rif; 6468 } 6469 6470 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, 6471 u16 rif_index) 6472 { 6473 return mlxsw_sp->router->rifs[rif_index]; 6474 } 6475 6476 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif) 6477 { 6478 return rif->rif_index; 6479 } 6480 6481 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif) 6482 { 6483 return lb_rif->common.rif_index; 6484 } 6485 6486 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) 6487 { 6488 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev); 6489 struct mlxsw_sp_vr *ul_vr; 6490 6491 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL); 6492 if (WARN_ON(IS_ERR(ul_vr))) 6493 return 0; 6494 6495 return ul_vr->id; 6496 } 6497 6498 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) 6499 { 6500 return lb_rif->ul_rif_id; 6501 } 6502 6503 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif) 6504 { 6505 return rif->dev->ifindex; 6506 } 6507 6508 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) 6509 { 6510 return rif->dev; 6511 } 6512 6513 struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif) 6514 { 6515 return rif->fid; 6516 } 6517 6518 static struct mlxsw_sp_rif * 6519 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, 6520 const struct mlxsw_sp_rif_params *params, 6521 struct netlink_ext_ack *extack) 6522 { 6523 u32 tb_id = l3mdev_fib_table(params->dev); 6524 const struct mlxsw_sp_rif_ops *ops; 6525 struct mlxsw_sp_fid *fid = NULL; 6526 enum mlxsw_sp_rif_type type; 6527 struct mlxsw_sp_rif *rif; 6528 struct mlxsw_sp_vr *vr; 6529 u16 rif_index; 6530 int i, err; 6531 6532 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev); 6533 ops = mlxsw_sp->rif_ops_arr[type]; 6534 6535 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack); 6536 if (IS_ERR(vr)) 6537 return ERR_CAST(vr); 6538 vr->rif_count++; 6539 6540 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); 6541 if (err) { 6542 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces"); 6543 goto err_rif_index_alloc; 6544 } 6545 6546 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev); 6547 if (!rif) { 6548 err = -ENOMEM; 6549 goto err_rif_alloc; 6550 } 6551 dev_hold(rif->dev); 6552 mlxsw_sp->router->rifs[rif_index] = rif; 6553 rif->mlxsw_sp = mlxsw_sp; 6554 rif->ops = ops; 6555 6556 if (ops->fid_get) { 6557 fid = ops->fid_get(rif, extack); 6558 if (IS_ERR(fid)) { 6559 err = PTR_ERR(fid); 6560 goto err_fid_get; 6561 } 6562 rif->fid = fid; 6563 } 6564 6565 if (ops->setup) 6566 ops->setup(rif, params); 6567 6568 err = ops->configure(rif); 6569 if (err) 6570 goto err_configure; 6571 6572 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) { 6573 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif); 6574 if (err) 6575 goto err_mr_rif_add; 6576 } 6577 6578 mlxsw_sp_rif_counters_alloc(rif); 6579 6580 return rif; 6581 6582 err_mr_rif_add: 6583 for (i--; i >= 0; i--) 6584 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); 6585 ops->deconfigure(rif); 6586 err_configure: 6587 if (fid) 6588 mlxsw_sp_fid_put(fid); 6589 err_fid_get: 6590 mlxsw_sp->router->rifs[rif_index] = NULL; 6591 dev_put(rif->dev); 6592 kfree(rif); 6593 err_rif_alloc: 6594 err_rif_index_alloc: 6595 vr->rif_count--; 6596 mlxsw_sp_vr_put(mlxsw_sp, vr); 6597 return ERR_PTR(err); 6598 } 6599 6600 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) 6601 { 6602 const struct mlxsw_sp_rif_ops *ops = rif->ops; 6603 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 6604 struct mlxsw_sp_fid *fid = rif->fid; 6605 struct mlxsw_sp_vr *vr; 6606 int i; 6607 6608 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); 6609 vr = &mlxsw_sp->router->vrs[rif->vr_id]; 6610 6611 mlxsw_sp_rif_counters_free(rif); 6612 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) 6613 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); 6614 ops->deconfigure(rif); 6615 if (fid) 6616 /* Loopback RIFs are not associated with a FID. */ 6617 mlxsw_sp_fid_put(fid); 6618 mlxsw_sp->router->rifs[rif->rif_index] = NULL; 6619 dev_put(rif->dev); 6620 kfree(rif); 6621 vr->rif_count--; 6622 mlxsw_sp_vr_put(mlxsw_sp, vr); 6623 } 6624 6625 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, 6626 struct net_device *dev) 6627 { 6628 struct mlxsw_sp_rif *rif; 6629 6630 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 6631 if (!rif) 6632 return; 6633 mlxsw_sp_rif_destroy(rif); 6634 } 6635 6636 static void 6637 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, 6638 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 6639 { 6640 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 6641 6642 params->vid = mlxsw_sp_port_vlan->vid; 6643 params->lag = mlxsw_sp_port->lagged; 6644 if (params->lag) 6645 params->lag_id = mlxsw_sp_port->lag_id; 6646 else 6647 params->system_port = mlxsw_sp_port->local_port; 6648 } 6649 6650 static struct mlxsw_sp_rif_subport * 6651 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif) 6652 { 6653 return container_of(rif, struct mlxsw_sp_rif_subport, common); 6654 } 6655 6656 static struct mlxsw_sp_rif * 6657 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp, 6658 const struct mlxsw_sp_rif_params *params, 6659 struct netlink_ext_ack *extack) 6660 { 6661 struct mlxsw_sp_rif_subport *rif_subport; 6662 struct mlxsw_sp_rif *rif; 6663 6664 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev); 6665 if (!rif) 6666 return mlxsw_sp_rif_create(mlxsw_sp, params, extack); 6667 6668 rif_subport = mlxsw_sp_rif_subport_rif(rif); 6669 refcount_inc(&rif_subport->ref_count); 6670 return rif; 6671 } 6672 6673 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif) 6674 { 6675 struct mlxsw_sp_rif_subport *rif_subport; 6676 6677 rif_subport = mlxsw_sp_rif_subport_rif(rif); 6678 if (!refcount_dec_and_test(&rif_subport->ref_count)) 6679 return; 6680 6681 mlxsw_sp_rif_destroy(rif); 6682 } 6683 6684 static int 6685 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, 6686 struct net_device *l3_dev, 6687 struct netlink_ext_ack *extack) 6688 { 6689 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 6690 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 6691 struct mlxsw_sp_rif_params params = { 6692 .dev = l3_dev, 6693 }; 6694 u16 vid = mlxsw_sp_port_vlan->vid; 6695 struct mlxsw_sp_rif *rif; 6696 struct mlxsw_sp_fid *fid; 6697 int err; 6698 6699 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan); 6700 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack); 6701 if (IS_ERR(rif)) 6702 return PTR_ERR(rif); 6703 6704 /* FID was already created, just take a reference */ 6705 fid = rif->ops->fid_get(rif, extack); 6706 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); 6707 if (err) 6708 goto err_fid_port_vid_map; 6709 6710 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); 6711 if (err) 6712 goto err_port_vid_learning_set; 6713 6714 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, 6715 BR_STATE_FORWARDING); 6716 if (err) 6717 goto err_port_vid_stp_set; 6718 6719 mlxsw_sp_port_vlan->fid = fid; 6720 6721 return 0; 6722 6723 err_port_vid_stp_set: 6724 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6725 err_port_vid_learning_set: 6726 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); 6727 err_fid_port_vid_map: 6728 mlxsw_sp_fid_put(fid); 6729 mlxsw_sp_rif_subport_put(rif); 6730 return err; 6731 } 6732 6733 void 6734 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 6735 { 6736 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; 6737 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 6738 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid); 6739 u16 vid = mlxsw_sp_port_vlan->vid; 6740 6741 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID)) 6742 return; 6743 6744 mlxsw_sp_port_vlan->fid = NULL; 6745 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING); 6746 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); 6747 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); 6748 mlxsw_sp_fid_put(fid); 6749 mlxsw_sp_rif_subport_put(rif); 6750 } 6751 6752 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev, 6753 struct net_device *port_dev, 6754 unsigned long event, u16 vid, 6755 struct netlink_ext_ack *extack) 6756 { 6757 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); 6758 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 6759 6760 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 6761 if (WARN_ON(!mlxsw_sp_port_vlan)) 6762 return -EINVAL; 6763 6764 switch (event) { 6765 case NETDEV_UP: 6766 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, 6767 l3_dev, extack); 6768 case NETDEV_DOWN: 6769 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); 6770 break; 6771 } 6772 6773 return 0; 6774 } 6775 6776 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, 6777 unsigned long event, 6778 struct netlink_ext_ack *extack) 6779 { 6780 if (netif_is_bridge_port(port_dev) || 6781 netif_is_lag_port(port_dev) || 6782 netif_is_ovs_port(port_dev)) 6783 return 0; 6784 6785 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 6786 MLXSW_SP_DEFAULT_VID, extack); 6787 } 6788 6789 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, 6790 struct net_device *lag_dev, 6791 unsigned long event, u16 vid, 6792 struct netlink_ext_ack *extack) 6793 { 6794 struct net_device *port_dev; 6795 struct list_head *iter; 6796 int err; 6797 6798 netdev_for_each_lower_dev(lag_dev, port_dev, iter) { 6799 if (mlxsw_sp_port_dev_check(port_dev)) { 6800 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev, 6801 port_dev, 6802 event, vid, 6803 extack); 6804 if (err) 6805 return err; 6806 } 6807 } 6808 6809 return 0; 6810 } 6811 6812 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, 6813 unsigned long event, 6814 struct netlink_ext_ack *extack) 6815 { 6816 if (netif_is_bridge_port(lag_dev)) 6817 return 0; 6818 6819 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 6820 MLXSW_SP_DEFAULT_VID, extack); 6821 } 6822 6823 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp, 6824 struct net_device *l3_dev, 6825 unsigned long event, 6826 struct netlink_ext_ack *extack) 6827 { 6828 struct mlxsw_sp_rif_params params = { 6829 .dev = l3_dev, 6830 }; 6831 struct mlxsw_sp_rif *rif; 6832 6833 switch (event) { 6834 case NETDEV_UP: 6835 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack); 6836 if (IS_ERR(rif)) 6837 return PTR_ERR(rif); 6838 break; 6839 case NETDEV_DOWN: 6840 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 6841 mlxsw_sp_rif_destroy(rif); 6842 break; 6843 } 6844 6845 return 0; 6846 } 6847 6848 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp, 6849 struct net_device *vlan_dev, 6850 unsigned long event, 6851 struct netlink_ext_ack *extack) 6852 { 6853 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); 6854 u16 vid = vlan_dev_vlan_id(vlan_dev); 6855 6856 if (netif_is_bridge_port(vlan_dev)) 6857 return 0; 6858 6859 if (mlxsw_sp_port_dev_check(real_dev)) 6860 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev, 6861 event, vid, extack); 6862 else if (netif_is_lag_master(real_dev)) 6863 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, 6864 vid, extack); 6865 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev)) 6866 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event, 6867 extack); 6868 6869 return 0; 6870 } 6871 6872 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac) 6873 { 6874 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 }; 6875 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 6876 6877 return ether_addr_equal_masked(mac, vrrp4, mask); 6878 } 6879 6880 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac) 6881 { 6882 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 }; 6883 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 6884 6885 return ether_addr_equal_masked(mac, vrrp6, mask); 6886 } 6887 6888 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index, 6889 const u8 *mac, bool adding) 6890 { 6891 char ritr_pl[MLXSW_REG_RITR_LEN]; 6892 u8 vrrp_id = adding ? mac[5] : 0; 6893 int err; 6894 6895 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) && 6896 !mlxsw_sp_rif_macvlan_is_vrrp6(mac)) 6897 return 0; 6898 6899 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); 6900 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 6901 if (err) 6902 return err; 6903 6904 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac)) 6905 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id); 6906 else 6907 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id); 6908 6909 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 6910 } 6911 6912 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, 6913 const struct net_device *macvlan_dev, 6914 struct netlink_ext_ack *extack) 6915 { 6916 struct macvlan_dev *vlan = netdev_priv(macvlan_dev); 6917 struct mlxsw_sp_rif *rif; 6918 int err; 6919 6920 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); 6921 if (!rif) { 6922 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); 6923 return -EOPNOTSUPP; 6924 } 6925 6926 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, 6927 mlxsw_sp_fid_index(rif->fid), true); 6928 if (err) 6929 return err; 6930 6931 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, 6932 macvlan_dev->dev_addr, true); 6933 if (err) 6934 goto err_rif_vrrp_add; 6935 6936 /* Make sure the bridge driver does not have this MAC pointing at 6937 * some other port. 6938 */ 6939 if (rif->ops->fdb_del) 6940 rif->ops->fdb_del(rif, macvlan_dev->dev_addr); 6941 6942 return 0; 6943 6944 err_rif_vrrp_add: 6945 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, 6946 mlxsw_sp_fid_index(rif->fid), false); 6947 return err; 6948 } 6949 6950 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, 6951 const struct net_device *macvlan_dev) 6952 { 6953 struct macvlan_dev *vlan = netdev_priv(macvlan_dev); 6954 struct mlxsw_sp_rif *rif; 6955 6956 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); 6957 /* If we do not have a RIF, then we already took care of 6958 * removing the macvlan's MAC during RIF deletion. 6959 */ 6960 if (!rif) 6961 return; 6962 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr, 6963 false); 6964 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, 6965 mlxsw_sp_fid_index(rif->fid), false); 6966 } 6967 6968 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp, 6969 struct net_device *macvlan_dev, 6970 unsigned long event, 6971 struct netlink_ext_ack *extack) 6972 { 6973 switch (event) { 6974 case NETDEV_UP: 6975 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack); 6976 case NETDEV_DOWN: 6977 mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev); 6978 break; 6979 } 6980 6981 return 0; 6982 } 6983 6984 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, 6985 struct net_device *dev, 6986 const unsigned char *dev_addr, 6987 struct netlink_ext_ack *extack) 6988 { 6989 struct mlxsw_sp_rif *rif; 6990 int i; 6991 6992 /* A RIF is not created for macvlan netdevs. Their MAC is used to 6993 * populate the FDB 6994 */ 6995 if (netif_is_macvlan(dev) || netif_is_l3_master(dev)) 6996 return 0; 6997 6998 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { 6999 rif = mlxsw_sp->router->rifs[i]; 7000 if (rif && rif->dev && rif->dev != dev && 7001 !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr, 7002 mlxsw_sp->mac_mask)) { 7003 NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix"); 7004 return -EINVAL; 7005 } 7006 } 7007 7008 return 0; 7009 } 7010 7011 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp, 7012 struct net_device *dev, 7013 unsigned long event, 7014 struct netlink_ext_ack *extack) 7015 { 7016 if (mlxsw_sp_port_dev_check(dev)) 7017 return mlxsw_sp_inetaddr_port_event(dev, event, extack); 7018 else if (netif_is_lag_master(dev)) 7019 return mlxsw_sp_inetaddr_lag_event(dev, event, extack); 7020 else if (netif_is_bridge_master(dev)) 7021 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event, 7022 extack); 7023 else if (is_vlan_dev(dev)) 7024 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event, 7025 extack); 7026 else if (netif_is_macvlan(dev)) 7027 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event, 7028 extack); 7029 else 7030 return 0; 7031 } 7032 7033 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb, 7034 unsigned long event, void *ptr) 7035 { 7036 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 7037 struct net_device *dev = ifa->ifa_dev->dev; 7038 struct mlxsw_sp_router *router; 7039 struct mlxsw_sp_rif *rif; 7040 int err = 0; 7041 7042 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */ 7043 if (event == NETDEV_UP) 7044 goto out; 7045 7046 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb); 7047 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev); 7048 if (!mlxsw_sp_rif_should_config(rif, dev, event)) 7049 goto out; 7050 7051 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL); 7052 out: 7053 return notifier_from_errno(err); 7054 } 7055 7056 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, 7057 unsigned long event, void *ptr) 7058 { 7059 struct in_validator_info *ivi = (struct in_validator_info *) ptr; 7060 struct net_device *dev = ivi->ivi_dev->dev; 7061 struct mlxsw_sp *mlxsw_sp; 7062 struct mlxsw_sp_rif *rif; 7063 int err = 0; 7064 7065 mlxsw_sp = mlxsw_sp_lower_get(dev); 7066 if (!mlxsw_sp) 7067 goto out; 7068 7069 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 7070 if (!mlxsw_sp_rif_should_config(rif, dev, event)) 7071 goto out; 7072 7073 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr, 7074 ivi->extack); 7075 if (err) 7076 goto out; 7077 7078 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack); 7079 out: 7080 return notifier_from_errno(err); 7081 } 7082 7083 struct mlxsw_sp_inet6addr_event_work { 7084 struct work_struct work; 7085 struct mlxsw_sp *mlxsw_sp; 7086 struct net_device *dev; 7087 unsigned long event; 7088 }; 7089 7090 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work) 7091 { 7092 struct mlxsw_sp_inet6addr_event_work *inet6addr_work = 7093 container_of(work, struct mlxsw_sp_inet6addr_event_work, work); 7094 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp; 7095 struct net_device *dev = inet6addr_work->dev; 7096 unsigned long event = inet6addr_work->event; 7097 struct mlxsw_sp_rif *rif; 7098 7099 rtnl_lock(); 7100 7101 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 7102 if (!mlxsw_sp_rif_should_config(rif, dev, event)) 7103 goto out; 7104 7105 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL); 7106 out: 7107 rtnl_unlock(); 7108 dev_put(dev); 7109 kfree(inet6addr_work); 7110 } 7111 7112 /* Called with rcu_read_lock() */ 7113 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb, 7114 unsigned long event, void *ptr) 7115 { 7116 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr; 7117 struct mlxsw_sp_inet6addr_event_work *inet6addr_work; 7118 struct net_device *dev = if6->idev->dev; 7119 struct mlxsw_sp_router *router; 7120 7121 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */ 7122 if (event == NETDEV_UP) 7123 return NOTIFY_DONE; 7124 7125 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC); 7126 if (!inet6addr_work) 7127 return NOTIFY_BAD; 7128 7129 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb); 7130 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work); 7131 inet6addr_work->mlxsw_sp = router->mlxsw_sp; 7132 inet6addr_work->dev = dev; 7133 inet6addr_work->event = event; 7134 dev_hold(dev); 7135 mlxsw_core_schedule_work(&inet6addr_work->work); 7136 7137 return NOTIFY_DONE; 7138 } 7139 7140 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, 7141 unsigned long event, void *ptr) 7142 { 7143 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr; 7144 struct net_device *dev = i6vi->i6vi_dev->dev; 7145 struct mlxsw_sp *mlxsw_sp; 7146 struct mlxsw_sp_rif *rif; 7147 int err = 0; 7148 7149 mlxsw_sp = mlxsw_sp_lower_get(dev); 7150 if (!mlxsw_sp) 7151 goto out; 7152 7153 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 7154 if (!mlxsw_sp_rif_should_config(rif, dev, event)) 7155 goto out; 7156 7157 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr, 7158 i6vi->extack); 7159 if (err) 7160 goto out; 7161 7162 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack); 7163 out: 7164 return notifier_from_errno(err); 7165 } 7166 7167 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, 7168 const char *mac, int mtu) 7169 { 7170 char ritr_pl[MLXSW_REG_RITR_LEN]; 7171 int err; 7172 7173 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); 7174 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7175 if (err) 7176 return err; 7177 7178 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); 7179 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); 7180 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); 7181 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7182 } 7183 7184 static int 7185 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp, 7186 struct mlxsw_sp_rif *rif) 7187 { 7188 struct net_device *dev = rif->dev; 7189 u16 fid_index; 7190 int err; 7191 7192 fid_index = mlxsw_sp_fid_index(rif->fid); 7193 7194 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false); 7195 if (err) 7196 return err; 7197 7198 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr, 7199 dev->mtu); 7200 if (err) 7201 goto err_rif_edit; 7202 7203 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true); 7204 if (err) 7205 goto err_rif_fdb_op; 7206 7207 if (rif->mtu != dev->mtu) { 7208 struct mlxsw_sp_vr *vr; 7209 int i; 7210 7211 /* The RIF is relevant only to its mr_table instance, as unlike 7212 * unicast routing, in multicast routing a RIF cannot be shared 7213 * between several multicast routing tables. 7214 */ 7215 vr = &mlxsw_sp->router->vrs[rif->vr_id]; 7216 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) 7217 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i], 7218 rif, dev->mtu); 7219 } 7220 7221 ether_addr_copy(rif->addr, dev->dev_addr); 7222 rif->mtu = dev->mtu; 7223 7224 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index); 7225 7226 return 0; 7227 7228 err_rif_fdb_op: 7229 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu); 7230 err_rif_edit: 7231 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true); 7232 return err; 7233 } 7234 7235 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif, 7236 struct netdev_notifier_pre_changeaddr_info *info) 7237 { 7238 struct netlink_ext_ack *extack; 7239 7240 extack = netdev_notifier_info_to_extack(&info->info); 7241 return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev, 7242 info->dev_addr, extack); 7243 } 7244 7245 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, 7246 unsigned long event, void *ptr) 7247 { 7248 struct mlxsw_sp *mlxsw_sp; 7249 struct mlxsw_sp_rif *rif; 7250 7251 mlxsw_sp = mlxsw_sp_lower_get(dev); 7252 if (!mlxsw_sp) 7253 return 0; 7254 7255 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 7256 if (!rif) 7257 return 0; 7258 7259 switch (event) { 7260 case NETDEV_CHANGEMTU: /* fall through */ 7261 case NETDEV_CHANGEADDR: 7262 return mlxsw_sp_router_port_change_event(mlxsw_sp, rif); 7263 case NETDEV_PRE_CHANGEADDR: 7264 return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr); 7265 } 7266 7267 return 0; 7268 } 7269 7270 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, 7271 struct net_device *l3_dev, 7272 struct netlink_ext_ack *extack) 7273 { 7274 struct mlxsw_sp_rif *rif; 7275 7276 /* If netdev is already associated with a RIF, then we need to 7277 * destroy it and create a new one with the new virtual router ID. 7278 */ 7279 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 7280 if (rif) 7281 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, 7282 extack); 7283 7284 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack); 7285 } 7286 7287 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, 7288 struct net_device *l3_dev) 7289 { 7290 struct mlxsw_sp_rif *rif; 7291 7292 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); 7293 if (!rif) 7294 return; 7295 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL); 7296 } 7297 7298 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, 7299 struct netdev_notifier_changeupper_info *info) 7300 { 7301 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); 7302 int err = 0; 7303 7304 /* We do not create a RIF for a macvlan, but only use it to 7305 * direct more MAC addresses to the router. 7306 */ 7307 if (!mlxsw_sp || netif_is_macvlan(l3_dev)) 7308 return 0; 7309 7310 switch (event) { 7311 case NETDEV_PRECHANGEUPPER: 7312 return 0; 7313 case NETDEV_CHANGEUPPER: 7314 if (info->linking) { 7315 struct netlink_ext_ack *extack; 7316 7317 extack = netdev_notifier_info_to_extack(&info->info); 7318 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack); 7319 } else { 7320 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev); 7321 } 7322 break; 7323 } 7324 7325 return err; 7326 } 7327 7328 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data) 7329 { 7330 struct mlxsw_sp_rif *rif = data; 7331 7332 if (!netif_is_macvlan(dev)) 7333 return 0; 7334 7335 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, 7336 mlxsw_sp_fid_index(rif->fid), false); 7337 } 7338 7339 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif) 7340 { 7341 if (!netif_is_macvlan_port(rif->dev)) 7342 return 0; 7343 7344 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n"); 7345 return netdev_walk_all_upper_dev_rcu(rif->dev, 7346 __mlxsw_sp_rif_macvlan_flush, rif); 7347 } 7348 7349 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif, 7350 const struct mlxsw_sp_rif_params *params) 7351 { 7352 struct mlxsw_sp_rif_subport *rif_subport; 7353 7354 rif_subport = mlxsw_sp_rif_subport_rif(rif); 7355 refcount_set(&rif_subport->ref_count, 1); 7356 rif_subport->vid = params->vid; 7357 rif_subport->lag = params->lag; 7358 if (params->lag) 7359 rif_subport->lag_id = params->lag_id; 7360 else 7361 rif_subport->system_port = params->system_port; 7362 } 7363 7364 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable) 7365 { 7366 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7367 struct mlxsw_sp_rif_subport *rif_subport; 7368 char ritr_pl[MLXSW_REG_RITR_LEN]; 7369 7370 rif_subport = mlxsw_sp_rif_subport_rif(rif); 7371 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF, 7372 rif->rif_index, rif->vr_id, rif->dev->mtu); 7373 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr); 7374 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag, 7375 rif_subport->lag ? rif_subport->lag_id : 7376 rif_subport->system_port, 7377 rif_subport->vid); 7378 7379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7380 } 7381 7382 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif) 7383 { 7384 int err; 7385 7386 err = mlxsw_sp_rif_subport_op(rif, true); 7387 if (err) 7388 return err; 7389 7390 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7391 mlxsw_sp_fid_index(rif->fid), true); 7392 if (err) 7393 goto err_rif_fdb_op; 7394 7395 mlxsw_sp_fid_rif_set(rif->fid, rif); 7396 return 0; 7397 7398 err_rif_fdb_op: 7399 mlxsw_sp_rif_subport_op(rif, false); 7400 return err; 7401 } 7402 7403 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) 7404 { 7405 struct mlxsw_sp_fid *fid = rif->fid; 7406 7407 mlxsw_sp_fid_rif_set(fid, NULL); 7408 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7409 mlxsw_sp_fid_index(fid), false); 7410 mlxsw_sp_rif_macvlan_flush(rif); 7411 mlxsw_sp_rif_subport_op(rif, false); 7412 } 7413 7414 static struct mlxsw_sp_fid * 7415 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif, 7416 struct netlink_ext_ack *extack) 7417 { 7418 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index); 7419 } 7420 7421 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = { 7422 .type = MLXSW_SP_RIF_TYPE_SUBPORT, 7423 .rif_size = sizeof(struct mlxsw_sp_rif_subport), 7424 .setup = mlxsw_sp_rif_subport_setup, 7425 .configure = mlxsw_sp_rif_subport_configure, 7426 .deconfigure = mlxsw_sp_rif_subport_deconfigure, 7427 .fid_get = mlxsw_sp_rif_subport_fid_get, 7428 }; 7429 7430 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif, 7431 enum mlxsw_reg_ritr_if_type type, 7432 u16 vid_fid, bool enable) 7433 { 7434 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7435 char ritr_pl[MLXSW_REG_RITR_LEN]; 7436 7437 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id, 7438 rif->dev->mtu); 7439 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr); 7440 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid); 7441 7442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7443 } 7444 7445 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) 7446 { 7447 return mlxsw_core_max_ports(mlxsw_sp->core) + 1; 7448 } 7449 7450 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif) 7451 { 7452 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7453 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); 7454 int err; 7455 7456 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true); 7457 if (err) 7458 return err; 7459 7460 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7461 mlxsw_sp_router_port(mlxsw_sp), true); 7462 if (err) 7463 goto err_fid_mc_flood_set; 7464 7465 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7466 mlxsw_sp_router_port(mlxsw_sp), true); 7467 if (err) 7468 goto err_fid_bc_flood_set; 7469 7470 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7471 mlxsw_sp_fid_index(rif->fid), true); 7472 if (err) 7473 goto err_rif_fdb_op; 7474 7475 mlxsw_sp_fid_rif_set(rif->fid, rif); 7476 return 0; 7477 7478 err_rif_fdb_op: 7479 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7480 mlxsw_sp_router_port(mlxsw_sp), false); 7481 err_fid_bc_flood_set: 7482 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7483 mlxsw_sp_router_port(mlxsw_sp), false); 7484 err_fid_mc_flood_set: 7485 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); 7486 return err; 7487 } 7488 7489 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) 7490 { 7491 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); 7492 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7493 struct mlxsw_sp_fid *fid = rif->fid; 7494 7495 mlxsw_sp_fid_rif_set(fid, NULL); 7496 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7497 mlxsw_sp_fid_index(fid), false); 7498 mlxsw_sp_rif_macvlan_flush(rif); 7499 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7500 mlxsw_sp_router_port(mlxsw_sp), false); 7501 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7502 mlxsw_sp_router_port(mlxsw_sp), false); 7503 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); 7504 } 7505 7506 static struct mlxsw_sp_fid * 7507 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, 7508 struct netlink_ext_ack *extack) 7509 { 7510 struct net_device *br_dev = rif->dev; 7511 u16 vid; 7512 int err; 7513 7514 if (is_vlan_dev(rif->dev)) { 7515 vid = vlan_dev_vlan_id(rif->dev); 7516 br_dev = vlan_dev_real_dev(rif->dev); 7517 if (WARN_ON(!netif_is_bridge_master(br_dev))) 7518 return ERR_PTR(-EINVAL); 7519 } else { 7520 err = br_vlan_get_pvid(rif->dev, &vid); 7521 if (err < 0 || !vid) { 7522 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID"); 7523 return ERR_PTR(-EINVAL); 7524 } 7525 } 7526 7527 return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack); 7528 } 7529 7530 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) 7531 { 7532 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); 7533 struct switchdev_notifier_fdb_info info; 7534 struct net_device *br_dev; 7535 struct net_device *dev; 7536 7537 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev; 7538 dev = br_fdb_find_port(br_dev, mac, vid); 7539 if (!dev) 7540 return; 7541 7542 info.addr = mac; 7543 info.vid = vid; 7544 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info, 7545 NULL); 7546 } 7547 7548 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = { 7549 .type = MLXSW_SP_RIF_TYPE_VLAN, 7550 .rif_size = sizeof(struct mlxsw_sp_rif), 7551 .configure = mlxsw_sp_rif_vlan_configure, 7552 .deconfigure = mlxsw_sp_rif_vlan_deconfigure, 7553 .fid_get = mlxsw_sp_rif_vlan_fid_get, 7554 .fdb_del = mlxsw_sp_rif_vlan_fdb_del, 7555 }; 7556 7557 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) 7558 { 7559 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7560 u16 fid_index = mlxsw_sp_fid_index(rif->fid); 7561 int err; 7562 7563 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, 7564 true); 7565 if (err) 7566 return err; 7567 7568 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7569 mlxsw_sp_router_port(mlxsw_sp), true); 7570 if (err) 7571 goto err_fid_mc_flood_set; 7572 7573 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7574 mlxsw_sp_router_port(mlxsw_sp), true); 7575 if (err) 7576 goto err_fid_bc_flood_set; 7577 7578 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7579 mlxsw_sp_fid_index(rif->fid), true); 7580 if (err) 7581 goto err_rif_fdb_op; 7582 7583 mlxsw_sp_fid_rif_set(rif->fid, rif); 7584 return 0; 7585 7586 err_rif_fdb_op: 7587 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7588 mlxsw_sp_router_port(mlxsw_sp), false); 7589 err_fid_bc_flood_set: 7590 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7591 mlxsw_sp_router_port(mlxsw_sp), false); 7592 err_fid_mc_flood_set: 7593 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); 7594 return err; 7595 } 7596 7597 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) 7598 { 7599 u16 fid_index = mlxsw_sp_fid_index(rif->fid); 7600 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7601 struct mlxsw_sp_fid *fid = rif->fid; 7602 7603 mlxsw_sp_fid_rif_set(fid, NULL); 7604 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, 7605 mlxsw_sp_fid_index(fid), false); 7606 mlxsw_sp_rif_macvlan_flush(rif); 7607 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, 7608 mlxsw_sp_router_port(mlxsw_sp), false); 7609 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, 7610 mlxsw_sp_router_port(mlxsw_sp), false); 7611 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); 7612 } 7613 7614 static struct mlxsw_sp_fid * 7615 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, 7616 struct netlink_ext_ack *extack) 7617 { 7618 return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack); 7619 } 7620 7621 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) 7622 { 7623 struct switchdev_notifier_fdb_info info; 7624 struct net_device *dev; 7625 7626 dev = br_fdb_find_port(rif->dev, mac, 0); 7627 if (!dev) 7628 return; 7629 7630 info.addr = mac; 7631 info.vid = 0; 7632 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info, 7633 NULL); 7634 } 7635 7636 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { 7637 .type = MLXSW_SP_RIF_TYPE_FID, 7638 .rif_size = sizeof(struct mlxsw_sp_rif), 7639 .configure = mlxsw_sp_rif_fid_configure, 7640 .deconfigure = mlxsw_sp_rif_fid_deconfigure, 7641 .fid_get = mlxsw_sp_rif_fid_fid_get, 7642 .fdb_del = mlxsw_sp_rif_fid_fdb_del, 7643 }; 7644 7645 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = { 7646 .type = MLXSW_SP_RIF_TYPE_VLAN, 7647 .rif_size = sizeof(struct mlxsw_sp_rif), 7648 .configure = mlxsw_sp_rif_fid_configure, 7649 .deconfigure = mlxsw_sp_rif_fid_deconfigure, 7650 .fid_get = mlxsw_sp_rif_vlan_fid_get, 7651 .fdb_del = mlxsw_sp_rif_vlan_fdb_del, 7652 }; 7653 7654 static struct mlxsw_sp_rif_ipip_lb * 7655 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif) 7656 { 7657 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common); 7658 } 7659 7660 static void 7661 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif, 7662 const struct mlxsw_sp_rif_params *params) 7663 { 7664 struct mlxsw_sp_rif_params_ipip_lb *params_lb; 7665 struct mlxsw_sp_rif_ipip_lb *rif_lb; 7666 7667 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb, 7668 common); 7669 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif); 7670 rif_lb->lb_config = params_lb->lb_config; 7671 } 7672 7673 static int 7674 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) 7675 { 7676 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); 7677 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev); 7678 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7679 struct mlxsw_sp_vr *ul_vr; 7680 int err; 7681 7682 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL); 7683 if (IS_ERR(ul_vr)) 7684 return PTR_ERR(ul_vr); 7685 7686 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true); 7687 if (err) 7688 goto err_loopback_op; 7689 7690 lb_rif->ul_vr_id = ul_vr->id; 7691 lb_rif->ul_rif_id = 0; 7692 ++ul_vr->rif_count; 7693 return 0; 7694 7695 err_loopback_op: 7696 mlxsw_sp_vr_put(mlxsw_sp, ul_vr); 7697 return err; 7698 } 7699 7700 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif) 7701 { 7702 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); 7703 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7704 struct mlxsw_sp_vr *ul_vr; 7705 7706 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id]; 7707 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false); 7708 7709 --ul_vr->rif_count; 7710 mlxsw_sp_vr_put(mlxsw_sp, ul_vr); 7711 } 7712 7713 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = { 7714 .type = MLXSW_SP_RIF_TYPE_IPIP_LB, 7715 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb), 7716 .setup = mlxsw_sp_rif_ipip_lb_setup, 7717 .configure = mlxsw_sp1_rif_ipip_lb_configure, 7718 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure, 7719 }; 7720 7721 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = { 7722 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, 7723 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, 7724 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, 7725 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops, 7726 }; 7727 7728 static int 7729 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable) 7730 { 7731 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp; 7732 char ritr_pl[MLXSW_REG_RITR_LEN]; 7733 7734 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, 7735 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU); 7736 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl, 7737 MLXSW_REG_RITR_LOOPBACK_GENERIC); 7738 7739 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); 7740 } 7741 7742 static struct mlxsw_sp_rif * 7743 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, 7744 struct netlink_ext_ack *extack) 7745 { 7746 struct mlxsw_sp_rif *ul_rif; 7747 u16 rif_index; 7748 int err; 7749 7750 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); 7751 if (err) { 7752 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces"); 7753 return ERR_PTR(err); 7754 } 7755 7756 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL); 7757 if (!ul_rif) 7758 return ERR_PTR(-ENOMEM); 7759 7760 mlxsw_sp->router->rifs[rif_index] = ul_rif; 7761 ul_rif->mlxsw_sp = mlxsw_sp; 7762 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true); 7763 if (err) 7764 goto ul_rif_op_err; 7765 7766 return ul_rif; 7767 7768 ul_rif_op_err: 7769 mlxsw_sp->router->rifs[rif_index] = NULL; 7770 kfree(ul_rif); 7771 return ERR_PTR(err); 7772 } 7773 7774 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif) 7775 { 7776 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp; 7777 7778 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false); 7779 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL; 7780 kfree(ul_rif); 7781 } 7782 7783 static struct mlxsw_sp_rif * 7784 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, 7785 struct netlink_ext_ack *extack) 7786 { 7787 struct mlxsw_sp_vr *vr; 7788 int err; 7789 7790 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack); 7791 if (IS_ERR(vr)) 7792 return ERR_CAST(vr); 7793 7794 if (refcount_inc_not_zero(&vr->ul_rif_refcnt)) 7795 return vr->ul_rif; 7796 7797 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack); 7798 if (IS_ERR(vr->ul_rif)) { 7799 err = PTR_ERR(vr->ul_rif); 7800 goto err_ul_rif_create; 7801 } 7802 7803 vr->rif_count++; 7804 refcount_set(&vr->ul_rif_refcnt, 1); 7805 7806 return vr->ul_rif; 7807 7808 err_ul_rif_create: 7809 mlxsw_sp_vr_put(mlxsw_sp, vr); 7810 return ERR_PTR(err); 7811 } 7812 7813 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif) 7814 { 7815 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp; 7816 struct mlxsw_sp_vr *vr; 7817 7818 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id]; 7819 7820 if (!refcount_dec_and_test(&vr->ul_rif_refcnt)) 7821 return; 7822 7823 vr->rif_count--; 7824 mlxsw_sp_ul_rif_destroy(ul_rif); 7825 mlxsw_sp_vr_put(mlxsw_sp, vr); 7826 } 7827 7828 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, 7829 u16 *ul_rif_index) 7830 { 7831 struct mlxsw_sp_rif *ul_rif; 7832 7833 ASSERT_RTNL(); 7834 7835 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL); 7836 if (IS_ERR(ul_rif)) 7837 return PTR_ERR(ul_rif); 7838 *ul_rif_index = ul_rif->rif_index; 7839 7840 return 0; 7841 } 7842 7843 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index) 7844 { 7845 struct mlxsw_sp_rif *ul_rif; 7846 7847 ASSERT_RTNL(); 7848 7849 ul_rif = mlxsw_sp->router->rifs[ul_rif_index]; 7850 if (WARN_ON(!ul_rif)) 7851 return; 7852 7853 mlxsw_sp_ul_rif_put(ul_rif); 7854 } 7855 7856 static int 7857 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) 7858 { 7859 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); 7860 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev); 7861 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7862 struct mlxsw_sp_rif *ul_rif; 7863 int err; 7864 7865 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL); 7866 if (IS_ERR(ul_rif)) 7867 return PTR_ERR(ul_rif); 7868 7869 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true); 7870 if (err) 7871 goto err_loopback_op; 7872 7873 lb_rif->ul_vr_id = 0; 7874 lb_rif->ul_rif_id = ul_rif->rif_index; 7875 7876 return 0; 7877 7878 err_loopback_op: 7879 mlxsw_sp_ul_rif_put(ul_rif); 7880 return err; 7881 } 7882 7883 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif) 7884 { 7885 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); 7886 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; 7887 struct mlxsw_sp_rif *ul_rif; 7888 7889 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id); 7890 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false); 7891 mlxsw_sp_ul_rif_put(ul_rif); 7892 } 7893 7894 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = { 7895 .type = MLXSW_SP_RIF_TYPE_IPIP_LB, 7896 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb), 7897 .setup = mlxsw_sp_rif_ipip_lb_setup, 7898 .configure = mlxsw_sp2_rif_ipip_lb_configure, 7899 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure, 7900 }; 7901 7902 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = { 7903 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, 7904 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, 7905 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, 7906 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops, 7907 }; 7908 7909 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp) 7910 { 7911 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); 7912 7913 mlxsw_sp->router->rifs = kcalloc(max_rifs, 7914 sizeof(struct mlxsw_sp_rif *), 7915 GFP_KERNEL); 7916 if (!mlxsw_sp->router->rifs) 7917 return -ENOMEM; 7918 7919 return 0; 7920 } 7921 7922 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp) 7923 { 7924 int i; 7925 7926 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) 7927 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]); 7928 7929 kfree(mlxsw_sp->router->rifs); 7930 } 7931 7932 static int 7933 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp) 7934 { 7935 char tigcr_pl[MLXSW_REG_TIGCR_LEN]; 7936 7937 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0); 7938 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl); 7939 } 7940 7941 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) 7942 { 7943 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; 7944 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); 7945 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp); 7946 } 7947 7948 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) 7949 { 7950 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list)); 7951 } 7952 7953 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) 7954 { 7955 struct mlxsw_sp_router *router; 7956 7957 /* Flush pending FIB notifications and then flush the device's 7958 * table before requesting another dump. The FIB notification 7959 * block is unregistered, so no need to take RTNL. 7960 */ 7961 mlxsw_core_flush_owq(); 7962 router = container_of(nb, struct mlxsw_sp_router, fib_nb); 7963 mlxsw_sp_router_fib_flush(router->mlxsw_sp); 7964 } 7965 7966 #ifdef CONFIG_IP_ROUTE_MULTIPATH 7967 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header) 7968 { 7969 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true); 7970 } 7971 7972 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field) 7973 { 7974 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true); 7975 } 7976 7977 static void mlxsw_sp_mp4_hash_init(char *recr2_pl) 7978 { 7979 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy; 7980 7981 mlxsw_sp_mp_hash_header_set(recr2_pl, 7982 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP); 7983 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP); 7984 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl); 7985 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl); 7986 if (only_l3) 7987 return; 7988 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4); 7989 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL); 7990 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT); 7991 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT); 7992 } 7993 7994 static void mlxsw_sp_mp6_hash_init(char *recr2_pl) 7995 { 7996 bool only_l3 = !ip6_multipath_hash_policy(&init_net); 7997 7998 mlxsw_sp_mp_hash_header_set(recr2_pl, 7999 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP); 8000 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP); 8001 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl); 8002 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl); 8003 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER); 8004 if (only_l3) { 8005 mlxsw_sp_mp_hash_field_set(recr2_pl, 8006 MLXSW_REG_RECR2_IPV6_FLOW_LABEL); 8007 } else { 8008 mlxsw_sp_mp_hash_header_set(recr2_pl, 8009 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6); 8010 mlxsw_sp_mp_hash_field_set(recr2_pl, 8011 MLXSW_REG_RECR2_TCP_UDP_SPORT); 8012 mlxsw_sp_mp_hash_field_set(recr2_pl, 8013 MLXSW_REG_RECR2_TCP_UDP_DPORT); 8014 } 8015 } 8016 8017 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) 8018 { 8019 char recr2_pl[MLXSW_REG_RECR2_LEN]; 8020 u32 seed; 8021 8022 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); 8023 mlxsw_reg_recr2_pack(recr2_pl, seed); 8024 mlxsw_sp_mp4_hash_init(recr2_pl); 8025 mlxsw_sp_mp6_hash_init(recr2_pl); 8026 8027 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl); 8028 } 8029 #else 8030 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) 8031 { 8032 return 0; 8033 } 8034 #endif 8035 8036 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) 8037 { 8038 char rdpm_pl[MLXSW_REG_RDPM_LEN]; 8039 unsigned int i; 8040 8041 MLXSW_REG_ZERO(rdpm, rdpm_pl); 8042 8043 /* HW is determining switch priority based on DSCP-bits, but the 8044 * kernel is still doing that based on the ToS. Since there's a 8045 * mismatch in bits we need to make sure to translate the right 8046 * value ToS would observe, skipping the 2 least-significant ECN bits. 8047 */ 8048 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++) 8049 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2)); 8050 8051 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl); 8052 } 8053 8054 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) 8055 { 8056 bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority; 8057 char rgcr_pl[MLXSW_REG_RGCR_LEN]; 8058 u64 max_rifs; 8059 int err; 8060 8061 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) 8062 return -EIO; 8063 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); 8064 8065 mlxsw_reg_rgcr_pack(rgcr_pl, true, true); 8066 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); 8067 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp); 8068 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); 8069 if (err) 8070 return err; 8071 return 0; 8072 } 8073 8074 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) 8075 { 8076 char rgcr_pl[MLXSW_REG_RGCR_LEN]; 8077 8078 mlxsw_reg_rgcr_pack(rgcr_pl, false, false); 8079 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); 8080 } 8081 8082 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) 8083 { 8084 struct mlxsw_sp_router *router; 8085 int err; 8086 8087 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL); 8088 if (!router) 8089 return -ENOMEM; 8090 mlxsw_sp->router = router; 8091 router->mlxsw_sp = mlxsw_sp; 8092 8093 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; 8094 err = register_inetaddr_notifier(&router->inetaddr_nb); 8095 if (err) 8096 goto err_register_inetaddr_notifier; 8097 8098 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event; 8099 err = register_inet6addr_notifier(&router->inet6addr_nb); 8100 if (err) 8101 goto err_register_inet6addr_notifier; 8102 8103 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list); 8104 err = __mlxsw_sp_router_init(mlxsw_sp); 8105 if (err) 8106 goto err_router_init; 8107 8108 err = mlxsw_sp_rifs_init(mlxsw_sp); 8109 if (err) 8110 goto err_rifs_init; 8111 8112 err = mlxsw_sp_ipips_init(mlxsw_sp); 8113 if (err) 8114 goto err_ipips_init; 8115 8116 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht, 8117 &mlxsw_sp_nexthop_ht_params); 8118 if (err) 8119 goto err_nexthop_ht_init; 8120 8121 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht, 8122 &mlxsw_sp_nexthop_group_ht_params); 8123 if (err) 8124 goto err_nexthop_group_ht_init; 8125 8126 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list); 8127 err = mlxsw_sp_lpm_init(mlxsw_sp); 8128 if (err) 8129 goto err_lpm_init; 8130 8131 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops); 8132 if (err) 8133 goto err_mr_init; 8134 8135 err = mlxsw_sp_vrs_init(mlxsw_sp); 8136 if (err) 8137 goto err_vrs_init; 8138 8139 err = mlxsw_sp_neigh_init(mlxsw_sp); 8140 if (err) 8141 goto err_neigh_init; 8142 8143 mlxsw_sp->router->netevent_nb.notifier_call = 8144 mlxsw_sp_router_netevent_event; 8145 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb); 8146 if (err) 8147 goto err_register_netevent_notifier; 8148 8149 err = mlxsw_sp_mp_hash_init(mlxsw_sp); 8150 if (err) 8151 goto err_mp_hash_init; 8152 8153 err = mlxsw_sp_dscp_init(mlxsw_sp); 8154 if (err) 8155 goto err_dscp_init; 8156 8157 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event; 8158 err = register_fib_notifier(&mlxsw_sp->router->fib_nb, 8159 mlxsw_sp_router_fib_dump_flush); 8160 if (err) 8161 goto err_register_fib_notifier; 8162 8163 return 0; 8164 8165 err_register_fib_notifier: 8166 err_dscp_init: 8167 err_mp_hash_init: 8168 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); 8169 err_register_netevent_notifier: 8170 mlxsw_sp_neigh_fini(mlxsw_sp); 8171 err_neigh_init: 8172 mlxsw_sp_vrs_fini(mlxsw_sp); 8173 err_vrs_init: 8174 mlxsw_sp_mr_fini(mlxsw_sp); 8175 err_mr_init: 8176 mlxsw_sp_lpm_fini(mlxsw_sp); 8177 err_lpm_init: 8178 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht); 8179 err_nexthop_group_ht_init: 8180 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht); 8181 err_nexthop_ht_init: 8182 mlxsw_sp_ipips_fini(mlxsw_sp); 8183 err_ipips_init: 8184 mlxsw_sp_rifs_fini(mlxsw_sp); 8185 err_rifs_init: 8186 __mlxsw_sp_router_fini(mlxsw_sp); 8187 err_router_init: 8188 unregister_inet6addr_notifier(&router->inet6addr_nb); 8189 err_register_inet6addr_notifier: 8190 unregister_inetaddr_notifier(&router->inetaddr_nb); 8191 err_register_inetaddr_notifier: 8192 kfree(mlxsw_sp->router); 8193 return err; 8194 } 8195 8196 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) 8197 { 8198 unregister_fib_notifier(&mlxsw_sp->router->fib_nb); 8199 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); 8200 mlxsw_sp_neigh_fini(mlxsw_sp); 8201 mlxsw_sp_vrs_fini(mlxsw_sp); 8202 mlxsw_sp_mr_fini(mlxsw_sp); 8203 mlxsw_sp_lpm_fini(mlxsw_sp); 8204 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht); 8205 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht); 8206 mlxsw_sp_ipips_fini(mlxsw_sp); 8207 mlxsw_sp_rifs_fini(mlxsw_sp); 8208 __mlxsw_sp_router_fini(mlxsw_sp); 8209 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); 8210 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); 8211 kfree(mlxsw_sp->router); 8212 } 8213