1 // SPDX-License-Identifier: GPL-2.0 2 /* Generic nexthop implementation 3 * 4 * Copyright (c) 2017-19 Cumulus Networks 5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com> 6 */ 7 8 #include <linux/nexthop.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 #include <net/arp.h> 13 #include <net/ipv6_stubs.h> 14 #include <net/lwtunnel.h> 15 #include <net/ndisc.h> 16 #include <net/nexthop.h> 17 #include <net/route.h> 18 #include <net/sock.h> 19 20 #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ) 21 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */ 22 23 static void remove_nexthop(struct net *net, struct nexthop *nh, 24 struct nl_info *nlinfo); 25 26 #define NH_DEV_HASHBITS 8 27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS) 28 29 static const struct nla_policy rtm_nh_policy_new[] = { 30 [NHA_ID] = { .type = NLA_U32 }, 31 [NHA_GROUP] = { .type = NLA_BINARY }, 32 [NHA_GROUP_TYPE] = { .type = NLA_U16 }, 33 [NHA_BLACKHOLE] = { .type = NLA_FLAG }, 34 [NHA_OIF] = { .type = NLA_U32 }, 35 [NHA_GATEWAY] = { .type = NLA_BINARY }, 36 [NHA_ENCAP_TYPE] = { .type = NLA_U16 }, 37 [NHA_ENCAP] = { .type = NLA_NESTED }, 38 [NHA_FDB] = { .type = NLA_FLAG }, 39 [NHA_RES_GROUP] = { .type = NLA_NESTED }, 40 }; 41 42 static const struct nla_policy rtm_nh_policy_get[] = { 43 [NHA_ID] = { .type = NLA_U32 }, 44 }; 45 46 static const struct nla_policy rtm_nh_policy_dump[] = { 47 [NHA_OIF] = { .type = NLA_U32 }, 48 [NHA_GROUPS] = { .type = NLA_FLAG }, 49 [NHA_MASTER] = { .type = NLA_U32 }, 50 [NHA_FDB] = { .type = NLA_FLAG }, 51 }; 52 53 static const struct nla_policy rtm_nh_res_policy_new[] = { 54 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 }, 55 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 }, 56 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 }, 57 }; 58 59 static const struct nla_policy rtm_nh_policy_dump_bucket[] = { 60 [NHA_ID] = { .type = NLA_U32 }, 61 [NHA_OIF] = { .type = NLA_U32 }, 62 [NHA_MASTER] = { .type = NLA_U32 }, 63 [NHA_RES_BUCKET] = { .type = NLA_NESTED }, 64 }; 65 66 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = { 67 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 }, 68 }; 69 70 static const struct nla_policy rtm_nh_policy_get_bucket[] = { 71 [NHA_ID] = { .type = NLA_U32 }, 72 [NHA_RES_BUCKET] = { .type = NLA_NESTED }, 73 }; 74 75 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = { 76 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 }, 77 }; 78 79 static bool nexthop_notifiers_is_empty(struct net *net) 80 { 81 return !net->nexthop.notifier_chain.head; 82 } 83 84 static void 85 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info, 86 const struct nh_info *nhi) 87 { 88 nh_info->dev = nhi->fib_nhc.nhc_dev; 89 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family; 90 if (nh_info->gw_family == AF_INET) 91 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4; 92 else if (nh_info->gw_family == AF_INET6) 93 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6; 94 95 nh_info->is_reject = nhi->reject_nh; 96 nh_info->is_fdb = nhi->fdb_nh; 97 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate; 98 } 99 100 static int nh_notifier_single_info_init(struct nh_notifier_info *info, 101 const struct nexthop *nh) 102 { 103 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 104 105 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE; 106 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL); 107 if (!info->nh) 108 return -ENOMEM; 109 110 __nh_notifier_single_info_init(info->nh, nhi); 111 112 return 0; 113 } 114 115 static void nh_notifier_single_info_fini(struct nh_notifier_info *info) 116 { 117 kfree(info->nh); 118 } 119 120 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info, 121 struct nh_group *nhg) 122 { 123 u16 num_nh = nhg->num_nh; 124 int i; 125 126 info->type = NH_NOTIFIER_INFO_TYPE_GRP; 127 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh), 128 GFP_KERNEL); 129 if (!info->nh_grp) 130 return -ENOMEM; 131 132 info->nh_grp->num_nh = num_nh; 133 info->nh_grp->is_fdb = nhg->fdb_nh; 134 135 for (i = 0; i < num_nh; i++) { 136 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 137 struct nh_info *nhi; 138 139 nhi = rtnl_dereference(nhge->nh->nh_info); 140 info->nh_grp->nh_entries[i].id = nhge->nh->id; 141 info->nh_grp->nh_entries[i].weight = nhge->weight; 142 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh, 143 nhi); 144 } 145 146 return 0; 147 } 148 149 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info, 150 struct nh_group *nhg) 151 { 152 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); 153 u16 num_nh_buckets = res_table->num_nh_buckets; 154 unsigned long size; 155 u16 i; 156 157 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE; 158 size = struct_size(info->nh_res_table, nhs, num_nh_buckets); 159 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | 160 __GFP_NOWARN); 161 if (!info->nh_res_table) 162 return -ENOMEM; 163 164 info->nh_res_table->num_nh_buckets = num_nh_buckets; 165 166 for (i = 0; i < num_nh_buckets; i++) { 167 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 168 struct nh_grp_entry *nhge; 169 struct nh_info *nhi; 170 171 nhge = rtnl_dereference(bucket->nh_entry); 172 nhi = rtnl_dereference(nhge->nh->nh_info); 173 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i], 174 nhi); 175 } 176 177 return 0; 178 } 179 180 static int nh_notifier_grp_info_init(struct nh_notifier_info *info, 181 const struct nexthop *nh) 182 { 183 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 184 185 if (nhg->hash_threshold) 186 return nh_notifier_mpath_info_init(info, nhg); 187 else if (nhg->resilient) 188 return nh_notifier_res_table_info_init(info, nhg); 189 return -EINVAL; 190 } 191 192 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info, 193 const struct nexthop *nh) 194 { 195 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 196 197 if (nhg->hash_threshold) 198 kfree(info->nh_grp); 199 else if (nhg->resilient) 200 vfree(info->nh_res_table); 201 } 202 203 static int nh_notifier_info_init(struct nh_notifier_info *info, 204 const struct nexthop *nh) 205 { 206 info->id = nh->id; 207 208 if (nh->is_group) 209 return nh_notifier_grp_info_init(info, nh); 210 else 211 return nh_notifier_single_info_init(info, nh); 212 } 213 214 static void nh_notifier_info_fini(struct nh_notifier_info *info, 215 const struct nexthop *nh) 216 { 217 if (nh->is_group) 218 nh_notifier_grp_info_fini(info, nh); 219 else 220 nh_notifier_single_info_fini(info); 221 } 222 223 static int call_nexthop_notifiers(struct net *net, 224 enum nexthop_event_type event_type, 225 struct nexthop *nh, 226 struct netlink_ext_ack *extack) 227 { 228 struct nh_notifier_info info = { 229 .net = net, 230 .extack = extack, 231 }; 232 int err; 233 234 ASSERT_RTNL(); 235 236 if (nexthop_notifiers_is_empty(net)) 237 return 0; 238 239 err = nh_notifier_info_init(&info, nh); 240 if (err) { 241 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); 242 return err; 243 } 244 245 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 246 event_type, &info); 247 nh_notifier_info_fini(&info, nh); 248 249 return notifier_to_errno(err); 250 } 251 252 static int 253 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info, 254 bool force, unsigned int *p_idle_timer_ms) 255 { 256 struct nh_res_table *res_table; 257 struct nh_group *nhg; 258 struct nexthop *nh; 259 int err = 0; 260 261 /* When 'force' is false, nexthop bucket replacement is performed 262 * because the bucket was deemed to be idle. In this case, capable 263 * listeners can choose to perform an atomic replacement: The bucket is 264 * only replaced if it is inactive. However, if the idle timer interval 265 * is smaller than the interval in which a listener is querying 266 * buckets' activity from the device, then atomic replacement should 267 * not be tried. Pass the idle timer value to listeners, so that they 268 * could determine which type of replacement to perform. 269 */ 270 if (force) { 271 *p_idle_timer_ms = 0; 272 return 0; 273 } 274 275 rcu_read_lock(); 276 277 nh = nexthop_find_by_id(info->net, info->id); 278 if (!nh) { 279 err = -EINVAL; 280 goto out; 281 } 282 283 nhg = rcu_dereference(nh->nh_grp); 284 res_table = rcu_dereference(nhg->res_table); 285 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer); 286 287 out: 288 rcu_read_unlock(); 289 290 return err; 291 } 292 293 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info, 294 u16 bucket_index, bool force, 295 struct nh_info *oldi, 296 struct nh_info *newi) 297 { 298 unsigned int idle_timer_ms; 299 int err; 300 301 err = nh_notifier_res_bucket_idle_timer_get(info, force, 302 &idle_timer_ms); 303 if (err) 304 return err; 305 306 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET; 307 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket), 308 GFP_KERNEL); 309 if (!info->nh_res_bucket) 310 return -ENOMEM; 311 312 info->nh_res_bucket->bucket_index = bucket_index; 313 info->nh_res_bucket->idle_timer_ms = idle_timer_ms; 314 info->nh_res_bucket->force = force; 315 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi); 316 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi); 317 return 0; 318 } 319 320 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info) 321 { 322 kfree(info->nh_res_bucket); 323 } 324 325 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, 326 u16 bucket_index, bool force, 327 struct nh_info *oldi, 328 struct nh_info *newi, 329 struct netlink_ext_ack *extack) 330 { 331 struct nh_notifier_info info = { 332 .net = net, 333 .extack = extack, 334 .id = nhg_id, 335 }; 336 int err; 337 338 if (nexthop_notifiers_is_empty(net)) 339 return 0; 340 341 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force, 342 oldi, newi); 343 if (err) 344 return err; 345 346 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 347 NEXTHOP_EVENT_BUCKET_REPLACE, &info); 348 nh_notifier_res_bucket_info_fini(&info); 349 350 return notifier_to_errno(err); 351 } 352 353 /* There are three users of RES_TABLE, and NHs etc. referenced from there: 354 * 355 * 1) a collection of callbacks for NH maintenance. This operates under 356 * RTNL, 357 * 2) the delayed work that gradually balances the resilient table, 358 * 3) and nexthop_select_path(), operating under RCU. 359 * 360 * Both the delayed work and the RTNL block are writers, and need to 361 * maintain mutual exclusion. Since there are only two and well-known 362 * writers for each table, the RTNL code can make sure it has exclusive 363 * access thus: 364 * 365 * - Have the DW operate without locking; 366 * - synchronously cancel the DW; 367 * - do the writing; 368 * - if the write was not actually a delete, call upkeep, which schedules 369 * DW again if necessary. 370 * 371 * The functions that are always called from the RTNL context use 372 * rtnl_dereference(). The functions that can also be called from the DW do 373 * a raw dereference and rely on the above mutual exclusion scheme. 374 */ 375 #define nh_res_dereference(p) (rcu_dereference_raw(p)) 376 377 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id, 378 u16 bucket_index, bool force, 379 struct nexthop *old_nh, 380 struct nexthop *new_nh, 381 struct netlink_ext_ack *extack) 382 { 383 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info); 384 struct nh_info *newi = nh_res_dereference(new_nh->nh_info); 385 386 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index, 387 force, oldi, newi, extack); 388 } 389 390 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh, 391 struct netlink_ext_ack *extack) 392 { 393 struct nh_notifier_info info = { 394 .net = net, 395 .extack = extack, 396 }; 397 struct nh_group *nhg; 398 int err; 399 400 ASSERT_RTNL(); 401 402 if (nexthop_notifiers_is_empty(net)) 403 return 0; 404 405 /* At this point, the nexthop buckets are still not populated. Only 406 * emit a notification with the logical nexthops, so that a listener 407 * could potentially veto it in case of unsupported configuration. 408 */ 409 nhg = rtnl_dereference(nh->nh_grp); 410 err = nh_notifier_mpath_info_init(&info, nhg); 411 if (err) { 412 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info"); 413 return err; 414 } 415 416 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, 417 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE, 418 &info); 419 kfree(info.nh_grp); 420 421 return notifier_to_errno(err); 422 } 423 424 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net, 425 enum nexthop_event_type event_type, 426 struct nexthop *nh, 427 struct netlink_ext_ack *extack) 428 { 429 struct nh_notifier_info info = { 430 .net = net, 431 .extack = extack, 432 }; 433 int err; 434 435 err = nh_notifier_info_init(&info, nh); 436 if (err) 437 return err; 438 439 err = nb->notifier_call(nb, event_type, &info); 440 nh_notifier_info_fini(&info, nh); 441 442 return notifier_to_errno(err); 443 } 444 445 static unsigned int nh_dev_hashfn(unsigned int val) 446 { 447 unsigned int mask = NH_DEV_HASHSIZE - 1; 448 449 return (val ^ 450 (val >> NH_DEV_HASHBITS) ^ 451 (val >> (NH_DEV_HASHBITS * 2))) & mask; 452 } 453 454 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi) 455 { 456 struct net_device *dev = nhi->fib_nhc.nhc_dev; 457 struct hlist_head *head; 458 unsigned int hash; 459 460 WARN_ON(!dev); 461 462 hash = nh_dev_hashfn(dev->ifindex); 463 head = &net->nexthop.devhash[hash]; 464 hlist_add_head(&nhi->dev_hash, head); 465 } 466 467 static void nexthop_free_group(struct nexthop *nh) 468 { 469 struct nh_group *nhg; 470 int i; 471 472 nhg = rcu_dereference_raw(nh->nh_grp); 473 for (i = 0; i < nhg->num_nh; ++i) { 474 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 475 476 WARN_ON(!list_empty(&nhge->nh_list)); 477 nexthop_put(nhge->nh); 478 } 479 480 WARN_ON(nhg->spare == nhg); 481 482 if (nhg->resilient) 483 vfree(rcu_dereference_raw(nhg->res_table)); 484 485 kfree(nhg->spare); 486 kfree(nhg); 487 } 488 489 static void nexthop_free_single(struct nexthop *nh) 490 { 491 struct nh_info *nhi; 492 493 nhi = rcu_dereference_raw(nh->nh_info); 494 switch (nhi->family) { 495 case AF_INET: 496 fib_nh_release(nh->net, &nhi->fib_nh); 497 break; 498 case AF_INET6: 499 ipv6_stub->fib6_nh_release(&nhi->fib6_nh); 500 break; 501 } 502 kfree(nhi); 503 } 504 505 void nexthop_free_rcu(struct rcu_head *head) 506 { 507 struct nexthop *nh = container_of(head, struct nexthop, rcu); 508 509 if (nh->is_group) 510 nexthop_free_group(nh); 511 else 512 nexthop_free_single(nh); 513 514 kfree(nh); 515 } 516 EXPORT_SYMBOL_GPL(nexthop_free_rcu); 517 518 static struct nexthop *nexthop_alloc(void) 519 { 520 struct nexthop *nh; 521 522 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL); 523 if (nh) { 524 INIT_LIST_HEAD(&nh->fi_list); 525 INIT_LIST_HEAD(&nh->f6i_list); 526 INIT_LIST_HEAD(&nh->grp_list); 527 INIT_LIST_HEAD(&nh->fdb_list); 528 } 529 return nh; 530 } 531 532 static struct nh_group *nexthop_grp_alloc(u16 num_nh) 533 { 534 struct nh_group *nhg; 535 536 nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL); 537 if (nhg) 538 nhg->num_nh = num_nh; 539 540 return nhg; 541 } 542 543 static void nh_res_table_upkeep_dw(struct work_struct *work); 544 545 static struct nh_res_table * 546 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg) 547 { 548 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets; 549 struct nh_res_table *res_table; 550 unsigned long size; 551 552 size = struct_size(res_table, nh_buckets, num_nh_buckets); 553 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); 554 if (!res_table) 555 return NULL; 556 557 res_table->net = net; 558 res_table->nhg_id = nhg_id; 559 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw); 560 INIT_LIST_HEAD(&res_table->uw_nh_entries); 561 res_table->idle_timer = cfg->nh_grp_res_idle_timer; 562 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer; 563 res_table->num_nh_buckets = num_nh_buckets; 564 return res_table; 565 } 566 567 static void nh_base_seq_inc(struct net *net) 568 { 569 while (++net->nexthop.seq == 0) 570 ; 571 } 572 573 /* no reference taken; rcu lock or rtnl must be held */ 574 struct nexthop *nexthop_find_by_id(struct net *net, u32 id) 575 { 576 struct rb_node **pp, *parent = NULL, *next; 577 578 pp = &net->nexthop.rb_root.rb_node; 579 while (1) { 580 struct nexthop *nh; 581 582 next = rcu_dereference_raw(*pp); 583 if (!next) 584 break; 585 parent = next; 586 587 nh = rb_entry(parent, struct nexthop, rb_node); 588 if (id < nh->id) 589 pp = &next->rb_left; 590 else if (id > nh->id) 591 pp = &next->rb_right; 592 else 593 return nh; 594 } 595 return NULL; 596 } 597 EXPORT_SYMBOL_GPL(nexthop_find_by_id); 598 599 /* used for auto id allocation; called with rtnl held */ 600 static u32 nh_find_unused_id(struct net *net) 601 { 602 u32 id_start = net->nexthop.last_id_allocated; 603 604 while (1) { 605 net->nexthop.last_id_allocated++; 606 if (net->nexthop.last_id_allocated == id_start) 607 break; 608 609 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated)) 610 return net->nexthop.last_id_allocated; 611 } 612 return 0; 613 } 614 615 static void nh_res_time_set_deadline(unsigned long next_time, 616 unsigned long *deadline) 617 { 618 if (time_before(next_time, *deadline)) 619 *deadline = next_time; 620 } 621 622 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table) 623 { 624 if (list_empty(&res_table->uw_nh_entries)) 625 return 0; 626 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since); 627 } 628 629 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg) 630 { 631 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); 632 struct nlattr *nest; 633 634 nest = nla_nest_start(skb, NHA_RES_GROUP); 635 if (!nest) 636 return -EMSGSIZE; 637 638 if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS, 639 res_table->num_nh_buckets) || 640 nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER, 641 jiffies_to_clock_t(res_table->idle_timer)) || 642 nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER, 643 jiffies_to_clock_t(res_table->unbalanced_timer)) || 644 nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME, 645 nh_res_table_unbalanced_time(res_table), 646 NHA_RES_GROUP_PAD)) 647 goto nla_put_failure; 648 649 nla_nest_end(skb, nest); 650 return 0; 651 652 nla_put_failure: 653 nla_nest_cancel(skb, nest); 654 return -EMSGSIZE; 655 } 656 657 static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg) 658 { 659 struct nexthop_grp *p; 660 size_t len = nhg->num_nh * sizeof(*p); 661 struct nlattr *nla; 662 u16 group_type = 0; 663 int i; 664 665 if (nhg->hash_threshold) 666 group_type = NEXTHOP_GRP_TYPE_MPATH; 667 else if (nhg->resilient) 668 group_type = NEXTHOP_GRP_TYPE_RES; 669 670 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type)) 671 goto nla_put_failure; 672 673 nla = nla_reserve(skb, NHA_GROUP, len); 674 if (!nla) 675 goto nla_put_failure; 676 677 p = nla_data(nla); 678 for (i = 0; i < nhg->num_nh; ++i) { 679 p->id = nhg->nh_entries[i].nh->id; 680 p->weight = nhg->nh_entries[i].weight - 1; 681 p += 1; 682 } 683 684 if (nhg->resilient && nla_put_nh_group_res(skb, nhg)) 685 goto nla_put_failure; 686 687 return 0; 688 689 nla_put_failure: 690 return -EMSGSIZE; 691 } 692 693 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh, 694 int event, u32 portid, u32 seq, unsigned int nlflags) 695 { 696 struct fib6_nh *fib6_nh; 697 struct fib_nh *fib_nh; 698 struct nlmsghdr *nlh; 699 struct nh_info *nhi; 700 struct nhmsg *nhm; 701 702 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); 703 if (!nlh) 704 return -EMSGSIZE; 705 706 nhm = nlmsg_data(nlh); 707 nhm->nh_family = AF_UNSPEC; 708 nhm->nh_flags = nh->nh_flags; 709 nhm->nh_protocol = nh->protocol; 710 nhm->nh_scope = 0; 711 nhm->resvd = 0; 712 713 if (nla_put_u32(skb, NHA_ID, nh->id)) 714 goto nla_put_failure; 715 716 if (nh->is_group) { 717 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 718 719 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB)) 720 goto nla_put_failure; 721 if (nla_put_nh_group(skb, nhg)) 722 goto nla_put_failure; 723 goto out; 724 } 725 726 nhi = rtnl_dereference(nh->nh_info); 727 nhm->nh_family = nhi->family; 728 if (nhi->reject_nh) { 729 if (nla_put_flag(skb, NHA_BLACKHOLE)) 730 goto nla_put_failure; 731 goto out; 732 } else if (nhi->fdb_nh) { 733 if (nla_put_flag(skb, NHA_FDB)) 734 goto nla_put_failure; 735 } else { 736 const struct net_device *dev; 737 738 dev = nhi->fib_nhc.nhc_dev; 739 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex)) 740 goto nla_put_failure; 741 } 742 743 nhm->nh_scope = nhi->fib_nhc.nhc_scope; 744 switch (nhi->family) { 745 case AF_INET: 746 fib_nh = &nhi->fib_nh; 747 if (fib_nh->fib_nh_gw_family && 748 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4)) 749 goto nla_put_failure; 750 break; 751 752 case AF_INET6: 753 fib6_nh = &nhi->fib6_nh; 754 if (fib6_nh->fib_nh_gw_family && 755 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6)) 756 goto nla_put_failure; 757 break; 758 } 759 760 if (nhi->fib_nhc.nhc_lwtstate && 761 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate, 762 NHA_ENCAP, NHA_ENCAP_TYPE) < 0) 763 goto nla_put_failure; 764 765 out: 766 nlmsg_end(skb, nlh); 767 return 0; 768 769 nla_put_failure: 770 nlmsg_cancel(skb, nlh); 771 return -EMSGSIZE; 772 } 773 774 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg) 775 { 776 return nla_total_size(0) + /* NHA_RES_GROUP */ 777 nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */ 778 nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */ 779 nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */ 780 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */ 781 } 782 783 static size_t nh_nlmsg_size_grp(struct nexthop *nh) 784 { 785 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 786 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh; 787 size_t tot = nla_total_size(sz) + 788 nla_total_size(2); /* NHA_GROUP_TYPE */ 789 790 if (nhg->resilient) 791 tot += nh_nlmsg_size_grp_res(nhg); 792 793 return tot; 794 } 795 796 static size_t nh_nlmsg_size_single(struct nexthop *nh) 797 { 798 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 799 size_t sz; 800 801 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE 802 * are mutually exclusive 803 */ 804 sz = nla_total_size(4); /* NHA_OIF */ 805 806 switch (nhi->family) { 807 case AF_INET: 808 if (nhi->fib_nh.fib_nh_gw_family) 809 sz += nla_total_size(4); /* NHA_GATEWAY */ 810 break; 811 812 case AF_INET6: 813 /* NHA_GATEWAY */ 814 if (nhi->fib6_nh.fib_nh_gw_family) 815 sz += nla_total_size(sizeof(const struct in6_addr)); 816 break; 817 } 818 819 if (nhi->fib_nhc.nhc_lwtstate) { 820 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate); 821 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */ 822 } 823 824 return sz; 825 } 826 827 static size_t nh_nlmsg_size(struct nexthop *nh) 828 { 829 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg)); 830 831 sz += nla_total_size(4); /* NHA_ID */ 832 833 if (nh->is_group) 834 sz += nh_nlmsg_size_grp(nh); 835 else 836 sz += nh_nlmsg_size_single(nh); 837 838 return sz; 839 } 840 841 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info) 842 { 843 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0; 844 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 845 struct sk_buff *skb; 846 int err = -ENOBUFS; 847 848 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any()); 849 if (!skb) 850 goto errout; 851 852 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags); 853 if (err < 0) { 854 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */ 855 WARN_ON(err == -EMSGSIZE); 856 kfree_skb(skb); 857 goto errout; 858 } 859 860 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP, 861 info->nlh, gfp_any()); 862 return; 863 errout: 864 if (err < 0) 865 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err); 866 } 867 868 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket) 869 { 870 return (unsigned long)atomic_long_read(&bucket->used_time); 871 } 872 873 static unsigned long 874 nh_res_bucket_idle_point(const struct nh_res_table *res_table, 875 const struct nh_res_bucket *bucket, 876 unsigned long now) 877 { 878 unsigned long time = nh_res_bucket_used_time(bucket); 879 880 /* Bucket was not used since it was migrated. The idle time is now. */ 881 if (time == bucket->migrated_time) 882 return now; 883 884 return time + res_table->idle_timer; 885 } 886 887 static unsigned long 888 nh_res_table_unb_point(const struct nh_res_table *res_table) 889 { 890 return res_table->unbalanced_since + res_table->unbalanced_timer; 891 } 892 893 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table, 894 struct nh_res_bucket *bucket) 895 { 896 unsigned long now = jiffies; 897 898 atomic_long_set(&bucket->used_time, (long)now); 899 bucket->migrated_time = now; 900 } 901 902 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket) 903 { 904 atomic_long_set(&bucket->used_time, (long)jiffies); 905 } 906 907 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket) 908 { 909 unsigned long used_time = nh_res_bucket_used_time(bucket); 910 911 return jiffies_delta_to_clock_t(jiffies - used_time); 912 } 913 914 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh, 915 struct nh_res_bucket *bucket, u16 bucket_index, 916 int event, u32 portid, u32 seq, 917 unsigned int nlflags, 918 struct netlink_ext_ack *extack) 919 { 920 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); 921 struct nlmsghdr *nlh; 922 struct nlattr *nest; 923 struct nhmsg *nhm; 924 925 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags); 926 if (!nlh) 927 return -EMSGSIZE; 928 929 nhm = nlmsg_data(nlh); 930 nhm->nh_family = AF_UNSPEC; 931 nhm->nh_flags = bucket->nh_flags; 932 nhm->nh_protocol = nh->protocol; 933 nhm->nh_scope = 0; 934 nhm->resvd = 0; 935 936 if (nla_put_u32(skb, NHA_ID, nh->id)) 937 goto nla_put_failure; 938 939 nest = nla_nest_start(skb, NHA_RES_BUCKET); 940 if (!nest) 941 goto nla_put_failure; 942 943 if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) || 944 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) || 945 nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME, 946 nh_res_bucket_idle_time(bucket), 947 NHA_RES_BUCKET_PAD)) 948 goto nla_put_failure_nest; 949 950 nla_nest_end(skb, nest); 951 nlmsg_end(skb, nlh); 952 return 0; 953 954 nla_put_failure_nest: 955 nla_nest_cancel(skb, nest); 956 nla_put_failure: 957 nlmsg_cancel(skb, nlh); 958 return -EMSGSIZE; 959 } 960 961 static void nexthop_bucket_notify(struct nh_res_table *res_table, 962 u16 bucket_index) 963 { 964 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; 965 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); 966 struct nexthop *nh = nhge->nh_parent; 967 struct sk_buff *skb; 968 int err = -ENOBUFS; 969 970 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 971 if (!skb) 972 goto errout; 973 974 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, 975 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE, 976 NULL); 977 if (err < 0) { 978 kfree_skb(skb); 979 goto errout; 980 } 981 982 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL); 983 return; 984 errout: 985 if (err < 0) 986 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err); 987 } 988 989 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths, 990 bool *is_fdb, struct netlink_ext_ack *extack) 991 { 992 if (nh->is_group) { 993 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 994 995 /* Nesting groups within groups is not supported. */ 996 if (nhg->hash_threshold) { 997 NL_SET_ERR_MSG(extack, 998 "Hash-threshold group can not be a nexthop within a group"); 999 return false; 1000 } 1001 if (nhg->resilient) { 1002 NL_SET_ERR_MSG(extack, 1003 "Resilient group can not be a nexthop within a group"); 1004 return false; 1005 } 1006 *is_fdb = nhg->fdb_nh; 1007 } else { 1008 struct nh_info *nhi = rtnl_dereference(nh->nh_info); 1009 1010 if (nhi->reject_nh && npaths > 1) { 1011 NL_SET_ERR_MSG(extack, 1012 "Blackhole nexthop can not be used in a group with more than 1 path"); 1013 return false; 1014 } 1015 *is_fdb = nhi->fdb_nh; 1016 } 1017 1018 return true; 1019 } 1020 1021 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family, 1022 struct netlink_ext_ack *extack) 1023 { 1024 struct nh_info *nhi; 1025 1026 nhi = rtnl_dereference(nh->nh_info); 1027 1028 if (!nhi->fdb_nh) { 1029 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops"); 1030 return -EINVAL; 1031 } 1032 1033 if (*nh_family == AF_UNSPEC) { 1034 *nh_family = nhi->family; 1035 } else if (*nh_family != nhi->family) { 1036 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops"); 1037 return -EINVAL; 1038 } 1039 1040 return 0; 1041 } 1042 1043 static int nh_check_attr_group(struct net *net, 1044 struct nlattr *tb[], size_t tb_size, 1045 u16 nh_grp_type, struct netlink_ext_ack *extack) 1046 { 1047 unsigned int len = nla_len(tb[NHA_GROUP]); 1048 u8 nh_family = AF_UNSPEC; 1049 struct nexthop_grp *nhg; 1050 unsigned int i, j; 1051 u8 nhg_fdb = 0; 1052 1053 if (!len || len & (sizeof(struct nexthop_grp) - 1)) { 1054 NL_SET_ERR_MSG(extack, 1055 "Invalid length for nexthop group attribute"); 1056 return -EINVAL; 1057 } 1058 1059 /* convert len to number of nexthop ids */ 1060 len /= sizeof(*nhg); 1061 1062 nhg = nla_data(tb[NHA_GROUP]); 1063 for (i = 0; i < len; ++i) { 1064 if (nhg[i].resvd1 || nhg[i].resvd2) { 1065 NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0"); 1066 return -EINVAL; 1067 } 1068 if (nhg[i].weight > 254) { 1069 NL_SET_ERR_MSG(extack, "Invalid value for weight"); 1070 return -EINVAL; 1071 } 1072 for (j = i + 1; j < len; ++j) { 1073 if (nhg[i].id == nhg[j].id) { 1074 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group"); 1075 return -EINVAL; 1076 } 1077 } 1078 } 1079 1080 if (tb[NHA_FDB]) 1081 nhg_fdb = 1; 1082 nhg = nla_data(tb[NHA_GROUP]); 1083 for (i = 0; i < len; ++i) { 1084 struct nexthop *nh; 1085 bool is_fdb_nh; 1086 1087 nh = nexthop_find_by_id(net, nhg[i].id); 1088 if (!nh) { 1089 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 1090 return -EINVAL; 1091 } 1092 if (!valid_group_nh(nh, len, &is_fdb_nh, extack)) 1093 return -EINVAL; 1094 1095 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack)) 1096 return -EINVAL; 1097 1098 if (!nhg_fdb && is_fdb_nh) { 1099 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops"); 1100 return -EINVAL; 1101 } 1102 } 1103 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) { 1104 if (!tb[i]) 1105 continue; 1106 switch (i) { 1107 case NHA_FDB: 1108 continue; 1109 case NHA_RES_GROUP: 1110 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES) 1111 continue; 1112 break; 1113 } 1114 NL_SET_ERR_MSG(extack, 1115 "No other attributes can be set in nexthop groups"); 1116 return -EINVAL; 1117 } 1118 1119 return 0; 1120 } 1121 1122 static bool ipv6_good_nh(const struct fib6_nh *nh) 1123 { 1124 int state = NUD_REACHABLE; 1125 struct neighbour *n; 1126 1127 rcu_read_lock(); 1128 1129 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6); 1130 if (n) 1131 state = READ_ONCE(n->nud_state); 1132 1133 rcu_read_unlock(); 1134 1135 return !!(state & NUD_VALID); 1136 } 1137 1138 static bool ipv4_good_nh(const struct fib_nh *nh) 1139 { 1140 int state = NUD_REACHABLE; 1141 struct neighbour *n; 1142 1143 rcu_read_lock(); 1144 1145 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, 1146 (__force u32)nh->fib_nh_gw4); 1147 if (n) 1148 state = READ_ONCE(n->nud_state); 1149 1150 rcu_read_unlock(); 1151 1152 return !!(state & NUD_VALID); 1153 } 1154 1155 static bool nexthop_is_good_nh(const struct nexthop *nh) 1156 { 1157 struct nh_info *nhi = rcu_dereference(nh->nh_info); 1158 1159 switch (nhi->family) { 1160 case AF_INET: 1161 return ipv4_good_nh(&nhi->fib_nh); 1162 case AF_INET6: 1163 return ipv6_good_nh(&nhi->fib6_nh); 1164 } 1165 1166 return false; 1167 } 1168 1169 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash) 1170 { 1171 int i; 1172 1173 for (i = 0; i < nhg->num_nh; i++) { 1174 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1175 1176 if (hash > atomic_read(&nhge->hthr.upper_bound)) 1177 continue; 1178 1179 return nhge->nh; 1180 } 1181 1182 WARN_ON_ONCE(1); 1183 return NULL; 1184 } 1185 1186 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash) 1187 { 1188 struct nexthop *rc = NULL; 1189 int i; 1190 1191 if (nhg->fdb_nh) 1192 return nexthop_select_path_fdb(nhg, hash); 1193 1194 for (i = 0; i < nhg->num_nh; ++i) { 1195 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1196 1197 /* nexthops always check if it is good and does 1198 * not rely on a sysctl for this behavior 1199 */ 1200 if (!nexthop_is_good_nh(nhge->nh)) 1201 continue; 1202 1203 if (!rc) 1204 rc = nhge->nh; 1205 1206 if (hash > atomic_read(&nhge->hthr.upper_bound)) 1207 continue; 1208 1209 return nhge->nh; 1210 } 1211 1212 return rc ? : nhg->nh_entries[0].nh; 1213 } 1214 1215 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash) 1216 { 1217 struct nh_res_table *res_table = rcu_dereference(nhg->res_table); 1218 u16 bucket_index = hash % res_table->num_nh_buckets; 1219 struct nh_res_bucket *bucket; 1220 struct nh_grp_entry *nhge; 1221 1222 /* nexthop_select_path() is expected to return a non-NULL value, so 1223 * skip protocol validation and just hand out whatever there is. 1224 */ 1225 bucket = &res_table->nh_buckets[bucket_index]; 1226 nh_res_bucket_set_busy(bucket); 1227 nhge = rcu_dereference(bucket->nh_entry); 1228 return nhge->nh; 1229 } 1230 1231 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash) 1232 { 1233 struct nh_group *nhg; 1234 1235 if (!nh->is_group) 1236 return nh; 1237 1238 nhg = rcu_dereference(nh->nh_grp); 1239 if (nhg->hash_threshold) 1240 return nexthop_select_path_hthr(nhg, hash); 1241 else if (nhg->resilient) 1242 return nexthop_select_path_res(nhg, hash); 1243 1244 /* Unreachable. */ 1245 return NULL; 1246 } 1247 EXPORT_SYMBOL_GPL(nexthop_select_path); 1248 1249 int nexthop_for_each_fib6_nh(struct nexthop *nh, 1250 int (*cb)(struct fib6_nh *nh, void *arg), 1251 void *arg) 1252 { 1253 struct nh_info *nhi; 1254 int err; 1255 1256 if (nh->is_group) { 1257 struct nh_group *nhg; 1258 int i; 1259 1260 nhg = rcu_dereference_rtnl(nh->nh_grp); 1261 for (i = 0; i < nhg->num_nh; i++) { 1262 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1263 1264 nhi = rcu_dereference_rtnl(nhge->nh->nh_info); 1265 err = cb(&nhi->fib6_nh, arg); 1266 if (err) 1267 return err; 1268 } 1269 } else { 1270 nhi = rcu_dereference_rtnl(nh->nh_info); 1271 err = cb(&nhi->fib6_nh, arg); 1272 if (err) 1273 return err; 1274 } 1275 1276 return 0; 1277 } 1278 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh); 1279 1280 static int check_src_addr(const struct in6_addr *saddr, 1281 struct netlink_ext_ack *extack) 1282 { 1283 if (!ipv6_addr_any(saddr)) { 1284 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects"); 1285 return -EINVAL; 1286 } 1287 return 0; 1288 } 1289 1290 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg, 1291 struct netlink_ext_ack *extack) 1292 { 1293 struct nh_info *nhi; 1294 bool is_fdb_nh; 1295 1296 /* fib6_src is unique to a fib6_info and limits the ability to cache 1297 * routes in fib6_nh within a nexthop that is potentially shared 1298 * across multiple fib entries. If the config wants to use source 1299 * routing it can not use nexthop objects. mlxsw also does not allow 1300 * fib6_src on routes. 1301 */ 1302 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0) 1303 return -EINVAL; 1304 1305 if (nh->is_group) { 1306 struct nh_group *nhg; 1307 1308 nhg = rtnl_dereference(nh->nh_grp); 1309 if (nhg->has_v4) 1310 goto no_v4_nh; 1311 is_fdb_nh = nhg->fdb_nh; 1312 } else { 1313 nhi = rtnl_dereference(nh->nh_info); 1314 if (nhi->family == AF_INET) 1315 goto no_v4_nh; 1316 is_fdb_nh = nhi->fdb_nh; 1317 } 1318 1319 if (is_fdb_nh) { 1320 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1321 return -EINVAL; 1322 } 1323 1324 return 0; 1325 no_v4_nh: 1326 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop"); 1327 return -EINVAL; 1328 } 1329 EXPORT_SYMBOL_GPL(fib6_check_nexthop); 1330 1331 /* if existing nexthop has ipv6 routes linked to it, need 1332 * to verify this new spec works with ipv6 1333 */ 1334 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new, 1335 struct netlink_ext_ack *extack) 1336 { 1337 struct fib6_info *f6i; 1338 1339 if (list_empty(&old->f6i_list)) 1340 return 0; 1341 1342 list_for_each_entry(f6i, &old->f6i_list, nh_list) { 1343 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0) 1344 return -EINVAL; 1345 } 1346 1347 return fib6_check_nexthop(new, NULL, extack); 1348 } 1349 1350 static int nexthop_check_scope(struct nh_info *nhi, u8 scope, 1351 struct netlink_ext_ack *extack) 1352 { 1353 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) { 1354 NL_SET_ERR_MSG(extack, 1355 "Route with host scope can not have a gateway"); 1356 return -EINVAL; 1357 } 1358 1359 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) { 1360 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop"); 1361 return -EINVAL; 1362 } 1363 1364 return 0; 1365 } 1366 1367 /* Invoked by fib add code to verify nexthop by id is ok with 1368 * config for prefix; parts of fib_check_nh not done when nexthop 1369 * object is used. 1370 */ 1371 int fib_check_nexthop(struct nexthop *nh, u8 scope, 1372 struct netlink_ext_ack *extack) 1373 { 1374 struct nh_info *nhi; 1375 int err = 0; 1376 1377 if (nh->is_group) { 1378 struct nh_group *nhg; 1379 1380 nhg = rtnl_dereference(nh->nh_grp); 1381 if (nhg->fdb_nh) { 1382 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1383 err = -EINVAL; 1384 goto out; 1385 } 1386 1387 if (scope == RT_SCOPE_HOST) { 1388 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops"); 1389 err = -EINVAL; 1390 goto out; 1391 } 1392 1393 /* all nexthops in a group have the same scope */ 1394 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info); 1395 err = nexthop_check_scope(nhi, scope, extack); 1396 } else { 1397 nhi = rtnl_dereference(nh->nh_info); 1398 if (nhi->fdb_nh) { 1399 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop"); 1400 err = -EINVAL; 1401 goto out; 1402 } 1403 err = nexthop_check_scope(nhi, scope, extack); 1404 } 1405 1406 out: 1407 return err; 1408 } 1409 1410 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new, 1411 struct netlink_ext_ack *extack) 1412 { 1413 struct fib_info *fi; 1414 1415 list_for_each_entry(fi, &old->fi_list, nh_list) { 1416 int err; 1417 1418 err = fib_check_nexthop(new, fi->fib_scope, extack); 1419 if (err) 1420 return err; 1421 } 1422 return 0; 1423 } 1424 1425 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge) 1426 { 1427 return nhge->res.count_buckets == nhge->res.wants_buckets; 1428 } 1429 1430 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge) 1431 { 1432 return nhge->res.count_buckets > nhge->res.wants_buckets; 1433 } 1434 1435 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge) 1436 { 1437 return nhge->res.count_buckets < nhge->res.wants_buckets; 1438 } 1439 1440 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table) 1441 { 1442 return list_empty(&res_table->uw_nh_entries); 1443 } 1444 1445 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket) 1446 { 1447 struct nh_grp_entry *nhge; 1448 1449 if (bucket->occupied) { 1450 nhge = nh_res_dereference(bucket->nh_entry); 1451 nhge->res.count_buckets--; 1452 bucket->occupied = false; 1453 } 1454 } 1455 1456 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket, 1457 struct nh_grp_entry *nhge) 1458 { 1459 nh_res_bucket_unset_nh(bucket); 1460 1461 bucket->occupied = true; 1462 rcu_assign_pointer(bucket->nh_entry, nhge); 1463 nhge->res.count_buckets++; 1464 } 1465 1466 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table, 1467 struct nh_res_bucket *bucket, 1468 unsigned long *deadline, bool *force) 1469 { 1470 unsigned long now = jiffies; 1471 struct nh_grp_entry *nhge; 1472 unsigned long idle_point; 1473 1474 if (!bucket->occupied) { 1475 /* The bucket is not occupied, its NHGE pointer is either 1476 * NULL or obsolete. We _have to_ migrate: set force. 1477 */ 1478 *force = true; 1479 return true; 1480 } 1481 1482 nhge = nh_res_dereference(bucket->nh_entry); 1483 1484 /* If the bucket is populated by an underweight or balanced 1485 * nexthop, do not migrate. 1486 */ 1487 if (!nh_res_nhge_is_ow(nhge)) 1488 return false; 1489 1490 /* At this point we know that the bucket is populated with an 1491 * overweight nexthop. It needs to be migrated to a new nexthop if 1492 * the idle timer of unbalanced timer expired. 1493 */ 1494 1495 idle_point = nh_res_bucket_idle_point(res_table, bucket, now); 1496 if (time_after_eq(now, idle_point)) { 1497 /* The bucket is idle. We _can_ migrate: unset force. */ 1498 *force = false; 1499 return true; 1500 } 1501 1502 /* Unbalanced timer of 0 means "never force". */ 1503 if (res_table->unbalanced_timer) { 1504 unsigned long unb_point; 1505 1506 unb_point = nh_res_table_unb_point(res_table); 1507 if (time_after(now, unb_point)) { 1508 /* The bucket is not idle, but the unbalanced timer 1509 * expired. We _can_ migrate, but set force anyway, 1510 * so that drivers know to ignore activity reports 1511 * from the HW. 1512 */ 1513 *force = true; 1514 return true; 1515 } 1516 1517 nh_res_time_set_deadline(unb_point, deadline); 1518 } 1519 1520 nh_res_time_set_deadline(idle_point, deadline); 1521 return false; 1522 } 1523 1524 static bool nh_res_bucket_migrate(struct nh_res_table *res_table, 1525 u16 bucket_index, bool notify, 1526 bool notify_nl, bool force) 1527 { 1528 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; 1529 struct nh_grp_entry *new_nhge; 1530 struct netlink_ext_ack extack; 1531 int err; 1532 1533 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries, 1534 struct nh_grp_entry, 1535 res.uw_nh_entry); 1536 if (WARN_ON_ONCE(!new_nhge)) 1537 /* If this function is called, "bucket" is either not 1538 * occupied, or it belongs to a next hop that is 1539 * overweight. In either case, there ought to be a 1540 * corresponding underweight next hop. 1541 */ 1542 return false; 1543 1544 if (notify) { 1545 struct nh_grp_entry *old_nhge; 1546 1547 old_nhge = nh_res_dereference(bucket->nh_entry); 1548 err = call_nexthop_res_bucket_notifiers(res_table->net, 1549 res_table->nhg_id, 1550 bucket_index, force, 1551 old_nhge->nh, 1552 new_nhge->nh, &extack); 1553 if (err) { 1554 pr_err_ratelimited("%s\n", extack._msg); 1555 if (!force) 1556 return false; 1557 /* It is not possible to veto a forced replacement, so 1558 * just clear the hardware flags from the nexthop 1559 * bucket to indicate to user space that this bucket is 1560 * not correctly populated in hardware. 1561 */ 1562 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 1563 } 1564 } 1565 1566 nh_res_bucket_set_nh(bucket, new_nhge); 1567 nh_res_bucket_set_idle(res_table, bucket); 1568 1569 if (notify_nl) 1570 nexthop_bucket_notify(res_table, bucket_index); 1571 1572 if (nh_res_nhge_is_balanced(new_nhge)) 1573 list_del(&new_nhge->res.uw_nh_entry); 1574 return true; 1575 } 1576 1577 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2) 1578 1579 static void nh_res_table_upkeep(struct nh_res_table *res_table, 1580 bool notify, bool notify_nl) 1581 { 1582 unsigned long now = jiffies; 1583 unsigned long deadline; 1584 u16 i; 1585 1586 /* Deadline is the next time that upkeep should be run. It is the 1587 * earliest time at which one of the buckets might be migrated. 1588 * Start at the most pessimistic estimate: either unbalanced_timer 1589 * from now, or if there is none, idle_timer from now. For each 1590 * encountered time point, call nh_res_time_set_deadline() to 1591 * refine the estimate. 1592 */ 1593 if (res_table->unbalanced_timer) 1594 deadline = now + res_table->unbalanced_timer; 1595 else 1596 deadline = now + res_table->idle_timer; 1597 1598 for (i = 0; i < res_table->num_nh_buckets; i++) { 1599 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 1600 bool force; 1601 1602 if (nh_res_bucket_should_migrate(res_table, bucket, 1603 &deadline, &force)) { 1604 if (!nh_res_bucket_migrate(res_table, i, notify, 1605 notify_nl, force)) { 1606 unsigned long idle_point; 1607 1608 /* A driver can override the migration 1609 * decision if the HW reports that the 1610 * bucket is actually not idle. Therefore 1611 * remark the bucket as busy again and 1612 * update the deadline. 1613 */ 1614 nh_res_bucket_set_busy(bucket); 1615 idle_point = nh_res_bucket_idle_point(res_table, 1616 bucket, 1617 now); 1618 nh_res_time_set_deadline(idle_point, &deadline); 1619 } 1620 } 1621 } 1622 1623 /* If the group is still unbalanced, schedule the next upkeep to 1624 * either the deadline computed above, or the minimum deadline, 1625 * whichever comes later. 1626 */ 1627 if (!nh_res_table_is_balanced(res_table)) { 1628 unsigned long now = jiffies; 1629 unsigned long min_deadline; 1630 1631 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL; 1632 if (time_before(deadline, min_deadline)) 1633 deadline = min_deadline; 1634 1635 queue_delayed_work(system_power_efficient_wq, 1636 &res_table->upkeep_dw, deadline - now); 1637 } 1638 } 1639 1640 static void nh_res_table_upkeep_dw(struct work_struct *work) 1641 { 1642 struct delayed_work *dw = to_delayed_work(work); 1643 struct nh_res_table *res_table; 1644 1645 res_table = container_of(dw, struct nh_res_table, upkeep_dw); 1646 nh_res_table_upkeep(res_table, true, true); 1647 } 1648 1649 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table) 1650 { 1651 cancel_delayed_work_sync(&res_table->upkeep_dw); 1652 } 1653 1654 static void nh_res_group_rebalance(struct nh_group *nhg, 1655 struct nh_res_table *res_table) 1656 { 1657 int prev_upper_bound = 0; 1658 int total = 0; 1659 int w = 0; 1660 int i; 1661 1662 INIT_LIST_HEAD(&res_table->uw_nh_entries); 1663 1664 for (i = 0; i < nhg->num_nh; ++i) 1665 total += nhg->nh_entries[i].weight; 1666 1667 for (i = 0; i < nhg->num_nh; ++i) { 1668 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1669 int upper_bound; 1670 1671 w += nhge->weight; 1672 upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w, 1673 total); 1674 nhge->res.wants_buckets = upper_bound - prev_upper_bound; 1675 prev_upper_bound = upper_bound; 1676 1677 if (nh_res_nhge_is_uw(nhge)) { 1678 if (list_empty(&res_table->uw_nh_entries)) 1679 res_table->unbalanced_since = jiffies; 1680 list_add(&nhge->res.uw_nh_entry, 1681 &res_table->uw_nh_entries); 1682 } 1683 } 1684 } 1685 1686 /* Migrate buckets in res_table so that they reference NHGE's from NHG with 1687 * the right NH ID. Set those buckets that do not have a corresponding NHGE 1688 * entry in NHG as not occupied. 1689 */ 1690 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table, 1691 struct nh_group *nhg) 1692 { 1693 u16 i; 1694 1695 for (i = 0; i < res_table->num_nh_buckets; i++) { 1696 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 1697 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id; 1698 bool found = false; 1699 int j; 1700 1701 for (j = 0; j < nhg->num_nh; j++) { 1702 struct nh_grp_entry *nhge = &nhg->nh_entries[j]; 1703 1704 if (nhge->nh->id == id) { 1705 nh_res_bucket_set_nh(bucket, nhge); 1706 found = true; 1707 break; 1708 } 1709 } 1710 1711 if (!found) 1712 nh_res_bucket_unset_nh(bucket); 1713 } 1714 } 1715 1716 static void replace_nexthop_grp_res(struct nh_group *oldg, 1717 struct nh_group *newg) 1718 { 1719 /* For NH group replacement, the new NHG might only have a stub 1720 * hash table with 0 buckets, because the number of buckets was not 1721 * specified. For NH removal, oldg and newg both reference the same 1722 * res_table. So in any case, in the following, we want to work 1723 * with oldg->res_table. 1724 */ 1725 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table); 1726 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since; 1727 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries); 1728 1729 nh_res_table_cancel_upkeep(old_res_table); 1730 nh_res_table_migrate_buckets(old_res_table, newg); 1731 nh_res_group_rebalance(newg, old_res_table); 1732 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries)) 1733 old_res_table->unbalanced_since = prev_unbalanced_since; 1734 nh_res_table_upkeep(old_res_table, true, false); 1735 } 1736 1737 static void nh_hthr_group_rebalance(struct nh_group *nhg) 1738 { 1739 int total = 0; 1740 int w = 0; 1741 int i; 1742 1743 for (i = 0; i < nhg->num_nh; ++i) 1744 total += nhg->nh_entries[i].weight; 1745 1746 for (i = 0; i < nhg->num_nh; ++i) { 1747 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1748 int upper_bound; 1749 1750 w += nhge->weight; 1751 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1; 1752 atomic_set(&nhge->hthr.upper_bound, upper_bound); 1753 } 1754 } 1755 1756 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, 1757 struct nl_info *nlinfo) 1758 { 1759 struct nh_grp_entry *nhges, *new_nhges; 1760 struct nexthop *nhp = nhge->nh_parent; 1761 struct netlink_ext_ack extack; 1762 struct nexthop *nh = nhge->nh; 1763 struct nh_group *nhg, *newg; 1764 int i, j, err; 1765 1766 WARN_ON(!nh); 1767 1768 nhg = rtnl_dereference(nhp->nh_grp); 1769 newg = nhg->spare; 1770 1771 /* last entry, keep it visible and remove the parent */ 1772 if (nhg->num_nh == 1) { 1773 remove_nexthop(net, nhp, nlinfo); 1774 return; 1775 } 1776 1777 newg->has_v4 = false; 1778 newg->is_multipath = nhg->is_multipath; 1779 newg->hash_threshold = nhg->hash_threshold; 1780 newg->resilient = nhg->resilient; 1781 newg->fdb_nh = nhg->fdb_nh; 1782 newg->num_nh = nhg->num_nh; 1783 1784 /* copy old entries to new except the one getting removed */ 1785 nhges = nhg->nh_entries; 1786 new_nhges = newg->nh_entries; 1787 for (i = 0, j = 0; i < nhg->num_nh; ++i) { 1788 struct nh_info *nhi; 1789 1790 /* current nexthop getting removed */ 1791 if (nhg->nh_entries[i].nh == nh) { 1792 newg->num_nh--; 1793 continue; 1794 } 1795 1796 nhi = rtnl_dereference(nhges[i].nh->nh_info); 1797 if (nhi->family == AF_INET) 1798 newg->has_v4 = true; 1799 1800 list_del(&nhges[i].nh_list); 1801 new_nhges[j].nh_parent = nhges[i].nh_parent; 1802 new_nhges[j].nh = nhges[i].nh; 1803 new_nhges[j].weight = nhges[i].weight; 1804 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); 1805 j++; 1806 } 1807 1808 if (newg->hash_threshold) 1809 nh_hthr_group_rebalance(newg); 1810 else if (newg->resilient) 1811 replace_nexthop_grp_res(nhg, newg); 1812 1813 rcu_assign_pointer(nhp->nh_grp, newg); 1814 1815 list_del(&nhge->nh_list); 1816 nexthop_put(nhge->nh); 1817 1818 /* Removal of a NH from a resilient group is notified through 1819 * bucket notifications. 1820 */ 1821 if (newg->hash_threshold) { 1822 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp, 1823 &extack); 1824 if (err) 1825 pr_err("%s\n", extack._msg); 1826 } 1827 1828 if (nlinfo) 1829 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); 1830 } 1831 1832 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, 1833 struct nl_info *nlinfo) 1834 { 1835 struct nh_grp_entry *nhge, *tmp; 1836 1837 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) 1838 remove_nh_grp_entry(net, nhge, nlinfo); 1839 1840 /* make sure all see the newly published array before releasing rtnl */ 1841 synchronize_net(); 1842 } 1843 1844 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) 1845 { 1846 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); 1847 struct nh_res_table *res_table; 1848 int i, num_nh = nhg->num_nh; 1849 1850 for (i = 0; i < num_nh; ++i) { 1851 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1852 1853 if (WARN_ON(!nhge->nh)) 1854 continue; 1855 1856 list_del_init(&nhge->nh_list); 1857 } 1858 1859 if (nhg->resilient) { 1860 res_table = rtnl_dereference(nhg->res_table); 1861 nh_res_table_cancel_upkeep(res_table); 1862 } 1863 } 1864 1865 /* not called for nexthop replace */ 1866 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh) 1867 { 1868 struct fib6_info *f6i, *tmp; 1869 bool do_flush = false; 1870 struct fib_info *fi; 1871 1872 list_for_each_entry(fi, &nh->fi_list, nh_list) { 1873 fi->fib_flags |= RTNH_F_DEAD; 1874 do_flush = true; 1875 } 1876 if (do_flush) 1877 fib_flush(net); 1878 1879 /* ip6_del_rt removes the entry from this list hence the _safe */ 1880 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) { 1881 /* __ip6_del_rt does a release, so do a hold here */ 1882 fib6_info_hold(f6i); 1883 ipv6_stub->ip6_del_rt(net, f6i, 1884 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)); 1885 } 1886 } 1887 1888 static void __remove_nexthop(struct net *net, struct nexthop *nh, 1889 struct nl_info *nlinfo) 1890 { 1891 __remove_nexthop_fib(net, nh); 1892 1893 if (nh->is_group) { 1894 remove_nexthop_group(nh, nlinfo); 1895 } else { 1896 struct nh_info *nhi; 1897 1898 nhi = rtnl_dereference(nh->nh_info); 1899 if (nhi->fib_nhc.nhc_dev) 1900 hlist_del(&nhi->dev_hash); 1901 1902 remove_nexthop_from_groups(net, nh, nlinfo); 1903 } 1904 } 1905 1906 static void remove_nexthop(struct net *net, struct nexthop *nh, 1907 struct nl_info *nlinfo) 1908 { 1909 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL); 1910 1911 /* remove from the tree */ 1912 rb_erase(&nh->rb_node, &net->nexthop.rb_root); 1913 1914 if (nlinfo) 1915 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo); 1916 1917 __remove_nexthop(net, nh, nlinfo); 1918 nh_base_seq_inc(net); 1919 1920 nexthop_put(nh); 1921 } 1922 1923 /* if any FIB entries reference this nexthop, any dst entries 1924 * need to be regenerated 1925 */ 1926 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh, 1927 struct nexthop *replaced_nh) 1928 { 1929 struct fib6_info *f6i; 1930 struct nh_group *nhg; 1931 int i; 1932 1933 if (!list_empty(&nh->fi_list)) 1934 rt_cache_flush(net); 1935 1936 list_for_each_entry(f6i, &nh->f6i_list, nh_list) 1937 ipv6_stub->fib6_update_sernum(net, f6i); 1938 1939 /* if an IPv6 group was replaced, we have to release all old 1940 * dsts to make sure all refcounts are released 1941 */ 1942 if (!replaced_nh->is_group) 1943 return; 1944 1945 nhg = rtnl_dereference(replaced_nh->nh_grp); 1946 for (i = 0; i < nhg->num_nh; i++) { 1947 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 1948 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info); 1949 1950 if (nhi->family == AF_INET6) 1951 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh); 1952 } 1953 } 1954 1955 static int replace_nexthop_grp(struct net *net, struct nexthop *old, 1956 struct nexthop *new, const struct nh_config *cfg, 1957 struct netlink_ext_ack *extack) 1958 { 1959 struct nh_res_table *tmp_table = NULL; 1960 struct nh_res_table *new_res_table; 1961 struct nh_res_table *old_res_table; 1962 struct nh_group *oldg, *newg; 1963 int i, err; 1964 1965 if (!new->is_group) { 1966 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop."); 1967 return -EINVAL; 1968 } 1969 1970 oldg = rtnl_dereference(old->nh_grp); 1971 newg = rtnl_dereference(new->nh_grp); 1972 1973 if (newg->hash_threshold != oldg->hash_threshold) { 1974 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type."); 1975 return -EINVAL; 1976 } 1977 1978 if (newg->hash_threshold) { 1979 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, 1980 extack); 1981 if (err) 1982 return err; 1983 } else if (newg->resilient) { 1984 new_res_table = rtnl_dereference(newg->res_table); 1985 old_res_table = rtnl_dereference(oldg->res_table); 1986 1987 /* Accept if num_nh_buckets was not given, but if it was 1988 * given, demand that the value be correct. 1989 */ 1990 if (cfg->nh_grp_res_has_num_buckets && 1991 cfg->nh_grp_res_num_buckets != 1992 old_res_table->num_nh_buckets) { 1993 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group."); 1994 return -EINVAL; 1995 } 1996 1997 /* Emit a pre-replace notification so that listeners could veto 1998 * a potentially unsupported configuration. Otherwise, 1999 * individual bucket replacement notifications would need to be 2000 * vetoed, which is something that should only happen if the 2001 * bucket is currently active. 2002 */ 2003 err = call_nexthop_res_table_notifiers(net, new, extack); 2004 if (err) 2005 return err; 2006 2007 if (cfg->nh_grp_res_has_idle_timer) 2008 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer; 2009 if (cfg->nh_grp_res_has_unbalanced_timer) 2010 old_res_table->unbalanced_timer = 2011 cfg->nh_grp_res_unbalanced_timer; 2012 2013 replace_nexthop_grp_res(oldg, newg); 2014 2015 tmp_table = new_res_table; 2016 rcu_assign_pointer(newg->res_table, old_res_table); 2017 rcu_assign_pointer(newg->spare->res_table, old_res_table); 2018 } 2019 2020 /* update parents - used by nexthop code for cleanup */ 2021 for (i = 0; i < newg->num_nh; i++) 2022 newg->nh_entries[i].nh_parent = old; 2023 2024 rcu_assign_pointer(old->nh_grp, newg); 2025 2026 /* Make sure concurrent readers are not using 'oldg' anymore. */ 2027 synchronize_net(); 2028 2029 if (newg->resilient) { 2030 rcu_assign_pointer(oldg->res_table, tmp_table); 2031 rcu_assign_pointer(oldg->spare->res_table, tmp_table); 2032 } 2033 2034 for (i = 0; i < oldg->num_nh; i++) 2035 oldg->nh_entries[i].nh_parent = new; 2036 2037 rcu_assign_pointer(new->nh_grp, oldg); 2038 2039 return 0; 2040 } 2041 2042 static void nh_group_v4_update(struct nh_group *nhg) 2043 { 2044 struct nh_grp_entry *nhges; 2045 bool has_v4 = false; 2046 int i; 2047 2048 nhges = nhg->nh_entries; 2049 for (i = 0; i < nhg->num_nh; i++) { 2050 struct nh_info *nhi; 2051 2052 nhi = rtnl_dereference(nhges[i].nh->nh_info); 2053 if (nhi->family == AF_INET) 2054 has_v4 = true; 2055 } 2056 nhg->has_v4 = has_v4; 2057 } 2058 2059 static int replace_nexthop_single_notify_res(struct net *net, 2060 struct nh_res_table *res_table, 2061 struct nexthop *old, 2062 struct nh_info *oldi, 2063 struct nh_info *newi, 2064 struct netlink_ext_ack *extack) 2065 { 2066 u32 nhg_id = res_table->nhg_id; 2067 int err; 2068 u16 i; 2069 2070 for (i = 0; i < res_table->num_nh_buckets; i++) { 2071 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 2072 struct nh_grp_entry *nhge; 2073 2074 nhge = rtnl_dereference(bucket->nh_entry); 2075 if (nhge->nh == old) { 2076 err = __call_nexthop_res_bucket_notifiers(net, nhg_id, 2077 i, true, 2078 oldi, newi, 2079 extack); 2080 if (err) 2081 goto err_notify; 2082 } 2083 } 2084 2085 return 0; 2086 2087 err_notify: 2088 while (i-- > 0) { 2089 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; 2090 struct nh_grp_entry *nhge; 2091 2092 nhge = rtnl_dereference(bucket->nh_entry); 2093 if (nhge->nh == old) 2094 __call_nexthop_res_bucket_notifiers(net, nhg_id, i, 2095 true, newi, oldi, 2096 extack); 2097 } 2098 return err; 2099 } 2100 2101 static int replace_nexthop_single_notify(struct net *net, 2102 struct nexthop *group_nh, 2103 struct nexthop *old, 2104 struct nh_info *oldi, 2105 struct nh_info *newi, 2106 struct netlink_ext_ack *extack) 2107 { 2108 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp); 2109 struct nh_res_table *res_table; 2110 2111 if (nhg->hash_threshold) { 2112 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, 2113 group_nh, extack); 2114 } else if (nhg->resilient) { 2115 res_table = rtnl_dereference(nhg->res_table); 2116 return replace_nexthop_single_notify_res(net, res_table, 2117 old, oldi, newi, 2118 extack); 2119 } 2120 2121 return -EINVAL; 2122 } 2123 2124 static int replace_nexthop_single(struct net *net, struct nexthop *old, 2125 struct nexthop *new, 2126 struct netlink_ext_ack *extack) 2127 { 2128 u8 old_protocol, old_nh_flags; 2129 struct nh_info *oldi, *newi; 2130 struct nh_grp_entry *nhge; 2131 int err; 2132 2133 if (new->is_group) { 2134 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group."); 2135 return -EINVAL; 2136 } 2137 2138 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack); 2139 if (err) 2140 return err; 2141 2142 /* Hardware flags were set on 'old' as 'new' is not in the red-black 2143 * tree. Therefore, inherit the flags from 'old' to 'new'. 2144 */ 2145 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP); 2146 2147 oldi = rtnl_dereference(old->nh_info); 2148 newi = rtnl_dereference(new->nh_info); 2149 2150 newi->nh_parent = old; 2151 oldi->nh_parent = new; 2152 2153 old_protocol = old->protocol; 2154 old_nh_flags = old->nh_flags; 2155 2156 old->protocol = new->protocol; 2157 old->nh_flags = new->nh_flags; 2158 2159 rcu_assign_pointer(old->nh_info, newi); 2160 rcu_assign_pointer(new->nh_info, oldi); 2161 2162 /* Send a replace notification for all the groups using the nexthop. */ 2163 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2164 struct nexthop *nhp = nhge->nh_parent; 2165 2166 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi, 2167 extack); 2168 if (err) 2169 goto err_notify; 2170 } 2171 2172 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially 2173 * update IPv4 indication in all the groups using the nexthop. 2174 */ 2175 if (oldi->family == AF_INET && newi->family == AF_INET6) { 2176 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2177 struct nexthop *nhp = nhge->nh_parent; 2178 struct nh_group *nhg; 2179 2180 nhg = rtnl_dereference(nhp->nh_grp); 2181 nh_group_v4_update(nhg); 2182 } 2183 } 2184 2185 return 0; 2186 2187 err_notify: 2188 rcu_assign_pointer(new->nh_info, newi); 2189 rcu_assign_pointer(old->nh_info, oldi); 2190 old->nh_flags = old_nh_flags; 2191 old->protocol = old_protocol; 2192 oldi->nh_parent = old; 2193 newi->nh_parent = new; 2194 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) { 2195 struct nexthop *nhp = nhge->nh_parent; 2196 2197 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL); 2198 } 2199 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack); 2200 return err; 2201 } 2202 2203 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh, 2204 struct nl_info *info) 2205 { 2206 struct fib6_info *f6i; 2207 2208 if (!list_empty(&nh->fi_list)) { 2209 struct fib_info *fi; 2210 2211 /* expectation is a few fib_info per nexthop and then 2212 * a lot of routes per fib_info. So mark the fib_info 2213 * and then walk the fib tables once 2214 */ 2215 list_for_each_entry(fi, &nh->fi_list, nh_list) 2216 fi->nh_updated = true; 2217 2218 fib_info_notify_update(net, info); 2219 2220 list_for_each_entry(fi, &nh->fi_list, nh_list) 2221 fi->nh_updated = false; 2222 } 2223 2224 list_for_each_entry(f6i, &nh->f6i_list, nh_list) 2225 ipv6_stub->fib6_rt_update(net, f6i, info); 2226 } 2227 2228 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries 2229 * linked to this nexthop and for all groups that the nexthop 2230 * is a member of 2231 */ 2232 static void nexthop_replace_notify(struct net *net, struct nexthop *nh, 2233 struct nl_info *info) 2234 { 2235 struct nh_grp_entry *nhge; 2236 2237 __nexthop_replace_notify(net, nh, info); 2238 2239 list_for_each_entry(nhge, &nh->grp_list, nh_list) 2240 __nexthop_replace_notify(net, nhge->nh_parent, info); 2241 } 2242 2243 static int replace_nexthop(struct net *net, struct nexthop *old, 2244 struct nexthop *new, const struct nh_config *cfg, 2245 struct netlink_ext_ack *extack) 2246 { 2247 bool new_is_reject = false; 2248 struct nh_grp_entry *nhge; 2249 int err; 2250 2251 /* check that existing FIB entries are ok with the 2252 * new nexthop definition 2253 */ 2254 err = fib_check_nh_list(old, new, extack); 2255 if (err) 2256 return err; 2257 2258 err = fib6_check_nh_list(old, new, extack); 2259 if (err) 2260 return err; 2261 2262 if (!new->is_group) { 2263 struct nh_info *nhi = rtnl_dereference(new->nh_info); 2264 2265 new_is_reject = nhi->reject_nh; 2266 } 2267 2268 list_for_each_entry(nhge, &old->grp_list, nh_list) { 2269 /* if new nexthop is a blackhole, any groups using this 2270 * nexthop cannot have more than 1 path 2271 */ 2272 if (new_is_reject && 2273 nexthop_num_path(nhge->nh_parent) > 1) { 2274 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path"); 2275 return -EINVAL; 2276 } 2277 2278 err = fib_check_nh_list(nhge->nh_parent, new, extack); 2279 if (err) 2280 return err; 2281 2282 err = fib6_check_nh_list(nhge->nh_parent, new, extack); 2283 if (err) 2284 return err; 2285 } 2286 2287 if (old->is_group) 2288 err = replace_nexthop_grp(net, old, new, cfg, extack); 2289 else 2290 err = replace_nexthop_single(net, old, new, extack); 2291 2292 if (!err) { 2293 nh_rt_cache_flush(net, old, new); 2294 2295 __remove_nexthop(net, new, NULL); 2296 nexthop_put(new); 2297 } 2298 2299 return err; 2300 } 2301 2302 /* called with rtnl_lock held */ 2303 static int insert_nexthop(struct net *net, struct nexthop *new_nh, 2304 struct nh_config *cfg, struct netlink_ext_ack *extack) 2305 { 2306 struct rb_node **pp, *parent = NULL, *next; 2307 struct rb_root *root = &net->nexthop.rb_root; 2308 bool replace = !!(cfg->nlflags & NLM_F_REPLACE); 2309 bool create = !!(cfg->nlflags & NLM_F_CREATE); 2310 u32 new_id = new_nh->id; 2311 int replace_notify = 0; 2312 int rc = -EEXIST; 2313 2314 pp = &root->rb_node; 2315 while (1) { 2316 struct nexthop *nh; 2317 2318 next = *pp; 2319 if (!next) 2320 break; 2321 2322 parent = next; 2323 2324 nh = rb_entry(parent, struct nexthop, rb_node); 2325 if (new_id < nh->id) { 2326 pp = &next->rb_left; 2327 } else if (new_id > nh->id) { 2328 pp = &next->rb_right; 2329 } else if (replace) { 2330 rc = replace_nexthop(net, nh, new_nh, cfg, extack); 2331 if (!rc) { 2332 new_nh = nh; /* send notification with old nh */ 2333 replace_notify = 1; 2334 } 2335 goto out; 2336 } else { 2337 /* id already exists and not a replace */ 2338 goto out; 2339 } 2340 } 2341 2342 if (replace && !create) { 2343 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists"); 2344 rc = -ENOENT; 2345 goto out; 2346 } 2347 2348 if (new_nh->is_group) { 2349 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp); 2350 struct nh_res_table *res_table; 2351 2352 if (nhg->resilient) { 2353 res_table = rtnl_dereference(nhg->res_table); 2354 2355 /* Not passing the number of buckets is OK when 2356 * replacing, but not when creating a new group. 2357 */ 2358 if (!cfg->nh_grp_res_has_num_buckets) { 2359 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion"); 2360 rc = -EINVAL; 2361 goto out; 2362 } 2363 2364 nh_res_group_rebalance(nhg, res_table); 2365 2366 /* Do not send bucket notifications, we do full 2367 * notification below. 2368 */ 2369 nh_res_table_upkeep(res_table, false, false); 2370 } 2371 } 2372 2373 rb_link_node_rcu(&new_nh->rb_node, parent, pp); 2374 rb_insert_color(&new_nh->rb_node, root); 2375 2376 /* The initial insertion is a full notification for hash-threshold as 2377 * well as resilient groups. 2378 */ 2379 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack); 2380 if (rc) 2381 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root); 2382 2383 out: 2384 if (!rc) { 2385 nh_base_seq_inc(net); 2386 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo); 2387 if (replace_notify && 2388 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)) 2389 nexthop_replace_notify(net, new_nh, &cfg->nlinfo); 2390 } 2391 2392 return rc; 2393 } 2394 2395 /* rtnl */ 2396 /* remove all nexthops tied to a device being deleted */ 2397 static void nexthop_flush_dev(struct net_device *dev, unsigned long event) 2398 { 2399 unsigned int hash = nh_dev_hashfn(dev->ifindex); 2400 struct net *net = dev_net(dev); 2401 struct hlist_head *head = &net->nexthop.devhash[hash]; 2402 struct hlist_node *n; 2403 struct nh_info *nhi; 2404 2405 hlist_for_each_entry_safe(nhi, n, head, dev_hash) { 2406 if (nhi->fib_nhc.nhc_dev != dev) 2407 continue; 2408 2409 if (nhi->reject_nh && 2410 (event == NETDEV_DOWN || event == NETDEV_CHANGE)) 2411 continue; 2412 2413 remove_nexthop(net, nhi->nh_parent, NULL); 2414 } 2415 } 2416 2417 /* rtnl; called when net namespace is deleted */ 2418 static void flush_all_nexthops(struct net *net) 2419 { 2420 struct rb_root *root = &net->nexthop.rb_root; 2421 struct rb_node *node; 2422 struct nexthop *nh; 2423 2424 while ((node = rb_first(root))) { 2425 nh = rb_entry(node, struct nexthop, rb_node); 2426 remove_nexthop(net, nh, NULL); 2427 cond_resched(); 2428 } 2429 } 2430 2431 static struct nexthop *nexthop_create_group(struct net *net, 2432 struct nh_config *cfg) 2433 { 2434 struct nlattr *grps_attr = cfg->nh_grp; 2435 struct nexthop_grp *entry = nla_data(grps_attr); 2436 u16 num_nh = nla_len(grps_attr) / sizeof(*entry); 2437 struct nh_group *nhg; 2438 struct nexthop *nh; 2439 int err; 2440 int i; 2441 2442 if (WARN_ON(!num_nh)) 2443 return ERR_PTR(-EINVAL); 2444 2445 nh = nexthop_alloc(); 2446 if (!nh) 2447 return ERR_PTR(-ENOMEM); 2448 2449 nh->is_group = 1; 2450 2451 nhg = nexthop_grp_alloc(num_nh); 2452 if (!nhg) { 2453 kfree(nh); 2454 return ERR_PTR(-ENOMEM); 2455 } 2456 2457 /* spare group used for removals */ 2458 nhg->spare = nexthop_grp_alloc(num_nh); 2459 if (!nhg->spare) { 2460 kfree(nhg); 2461 kfree(nh); 2462 return ERR_PTR(-ENOMEM); 2463 } 2464 nhg->spare->spare = nhg; 2465 2466 for (i = 0; i < nhg->num_nh; ++i) { 2467 struct nexthop *nhe; 2468 struct nh_info *nhi; 2469 2470 nhe = nexthop_find_by_id(net, entry[i].id); 2471 if (!nexthop_get(nhe)) { 2472 err = -ENOENT; 2473 goto out_no_nh; 2474 } 2475 2476 nhi = rtnl_dereference(nhe->nh_info); 2477 if (nhi->family == AF_INET) 2478 nhg->has_v4 = true; 2479 2480 nhg->nh_entries[i].nh = nhe; 2481 nhg->nh_entries[i].weight = entry[i].weight + 1; 2482 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list); 2483 nhg->nh_entries[i].nh_parent = nh; 2484 } 2485 2486 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) { 2487 nhg->hash_threshold = 1; 2488 nhg->is_multipath = true; 2489 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) { 2490 struct nh_res_table *res_table; 2491 2492 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg); 2493 if (!res_table) { 2494 err = -ENOMEM; 2495 goto out_no_nh; 2496 } 2497 2498 rcu_assign_pointer(nhg->spare->res_table, res_table); 2499 rcu_assign_pointer(nhg->res_table, res_table); 2500 nhg->resilient = true; 2501 nhg->is_multipath = true; 2502 } 2503 2504 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1); 2505 2506 if (nhg->hash_threshold) 2507 nh_hthr_group_rebalance(nhg); 2508 2509 if (cfg->nh_fdb) 2510 nhg->fdb_nh = 1; 2511 2512 rcu_assign_pointer(nh->nh_grp, nhg); 2513 2514 return nh; 2515 2516 out_no_nh: 2517 for (i--; i >= 0; --i) { 2518 list_del(&nhg->nh_entries[i].nh_list); 2519 nexthop_put(nhg->nh_entries[i].nh); 2520 } 2521 2522 kfree(nhg->spare); 2523 kfree(nhg); 2524 kfree(nh); 2525 2526 return ERR_PTR(err); 2527 } 2528 2529 static int nh_create_ipv4(struct net *net, struct nexthop *nh, 2530 struct nh_info *nhi, struct nh_config *cfg, 2531 struct netlink_ext_ack *extack) 2532 { 2533 struct fib_nh *fib_nh = &nhi->fib_nh; 2534 struct fib_config fib_cfg = { 2535 .fc_oif = cfg->nh_ifindex, 2536 .fc_gw4 = cfg->gw.ipv4, 2537 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0, 2538 .fc_flags = cfg->nh_flags, 2539 .fc_nlinfo = cfg->nlinfo, 2540 .fc_encap = cfg->nh_encap, 2541 .fc_encap_type = cfg->nh_encap_type, 2542 }; 2543 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN); 2544 int err; 2545 2546 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack); 2547 if (err) { 2548 fib_nh_release(net, fib_nh); 2549 goto out; 2550 } 2551 2552 if (nhi->fdb_nh) 2553 goto out; 2554 2555 /* sets nh_dev if successful */ 2556 err = fib_check_nh(net, fib_nh, tb_id, 0, extack); 2557 if (!err) { 2558 nh->nh_flags = fib_nh->fib_nh_flags; 2559 fib_info_update_nhc_saddr(net, &fib_nh->nh_common, 2560 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1); 2561 } else { 2562 fib_nh_release(net, fib_nh); 2563 } 2564 out: 2565 return err; 2566 } 2567 2568 static int nh_create_ipv6(struct net *net, struct nexthop *nh, 2569 struct nh_info *nhi, struct nh_config *cfg, 2570 struct netlink_ext_ack *extack) 2571 { 2572 struct fib6_nh *fib6_nh = &nhi->fib6_nh; 2573 struct fib6_config fib6_cfg = { 2574 .fc_table = l3mdev_fib_table(cfg->dev), 2575 .fc_ifindex = cfg->nh_ifindex, 2576 .fc_gateway = cfg->gw.ipv6, 2577 .fc_flags = cfg->nh_flags, 2578 .fc_nlinfo = cfg->nlinfo, 2579 .fc_encap = cfg->nh_encap, 2580 .fc_encap_type = cfg->nh_encap_type, 2581 .fc_is_fdb = cfg->nh_fdb, 2582 }; 2583 int err; 2584 2585 if (!ipv6_addr_any(&cfg->gw.ipv6)) 2586 fib6_cfg.fc_flags |= RTF_GATEWAY; 2587 2588 /* sets nh_dev if successful */ 2589 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL, 2590 extack); 2591 if (err) { 2592 /* IPv6 is not enabled, don't call fib6_nh_release */ 2593 if (err == -EAFNOSUPPORT) 2594 goto out; 2595 ipv6_stub->fib6_nh_release(fib6_nh); 2596 } else { 2597 nh->nh_flags = fib6_nh->fib_nh_flags; 2598 } 2599 out: 2600 return err; 2601 } 2602 2603 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg, 2604 struct netlink_ext_ack *extack) 2605 { 2606 struct nh_info *nhi; 2607 struct nexthop *nh; 2608 int err = 0; 2609 2610 nh = nexthop_alloc(); 2611 if (!nh) 2612 return ERR_PTR(-ENOMEM); 2613 2614 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL); 2615 if (!nhi) { 2616 kfree(nh); 2617 return ERR_PTR(-ENOMEM); 2618 } 2619 2620 nh->nh_flags = cfg->nh_flags; 2621 nh->net = net; 2622 2623 nhi->nh_parent = nh; 2624 nhi->family = cfg->nh_family; 2625 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK; 2626 2627 if (cfg->nh_fdb) 2628 nhi->fdb_nh = 1; 2629 2630 if (cfg->nh_blackhole) { 2631 nhi->reject_nh = 1; 2632 cfg->nh_ifindex = net->loopback_dev->ifindex; 2633 } 2634 2635 switch (cfg->nh_family) { 2636 case AF_INET: 2637 err = nh_create_ipv4(net, nh, nhi, cfg, extack); 2638 break; 2639 case AF_INET6: 2640 err = nh_create_ipv6(net, nh, nhi, cfg, extack); 2641 break; 2642 } 2643 2644 if (err) { 2645 kfree(nhi); 2646 kfree(nh); 2647 return ERR_PTR(err); 2648 } 2649 2650 /* add the entry to the device based hash */ 2651 if (!nhi->fdb_nh) 2652 nexthop_devhash_add(net, nhi); 2653 2654 rcu_assign_pointer(nh->nh_info, nhi); 2655 2656 return nh; 2657 } 2658 2659 /* called with rtnl lock held */ 2660 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg, 2661 struct netlink_ext_ack *extack) 2662 { 2663 struct nexthop *nh; 2664 int err; 2665 2666 if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) { 2667 NL_SET_ERR_MSG(extack, "Replace requires nexthop id"); 2668 return ERR_PTR(-EINVAL); 2669 } 2670 2671 if (!cfg->nh_id) { 2672 cfg->nh_id = nh_find_unused_id(net); 2673 if (!cfg->nh_id) { 2674 NL_SET_ERR_MSG(extack, "No unused id"); 2675 return ERR_PTR(-EINVAL); 2676 } 2677 } 2678 2679 if (cfg->nh_grp) 2680 nh = nexthop_create_group(net, cfg); 2681 else 2682 nh = nexthop_create(net, cfg, extack); 2683 2684 if (IS_ERR(nh)) 2685 return nh; 2686 2687 refcount_set(&nh->refcnt, 1); 2688 nh->id = cfg->nh_id; 2689 nh->protocol = cfg->nh_protocol; 2690 nh->net = net; 2691 2692 err = insert_nexthop(net, nh, cfg, extack); 2693 if (err) { 2694 __remove_nexthop(net, nh, NULL); 2695 nexthop_put(nh); 2696 nh = ERR_PTR(err); 2697 } 2698 2699 return nh; 2700 } 2701 2702 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback, 2703 unsigned long *timer_p, bool *has_p, 2704 struct netlink_ext_ack *extack) 2705 { 2706 unsigned long timer; 2707 u32 value; 2708 2709 if (!attr) { 2710 *timer_p = fallback; 2711 *has_p = false; 2712 return 0; 2713 } 2714 2715 value = nla_get_u32(attr); 2716 timer = clock_t_to_jiffies(value); 2717 if (timer == ~0UL) { 2718 NL_SET_ERR_MSG(extack, "Timer value too large"); 2719 return -EINVAL; 2720 } 2721 2722 *timer_p = timer; 2723 *has_p = true; 2724 return 0; 2725 } 2726 2727 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg, 2728 struct netlink_ext_ack *extack) 2729 { 2730 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {}; 2731 int err; 2732 2733 if (res) { 2734 err = nla_parse_nested(tb, 2735 ARRAY_SIZE(rtm_nh_res_policy_new) - 1, 2736 res, rtm_nh_res_policy_new, extack); 2737 if (err < 0) 2738 return err; 2739 } 2740 2741 if (tb[NHA_RES_GROUP_BUCKETS]) { 2742 cfg->nh_grp_res_num_buckets = 2743 nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]); 2744 cfg->nh_grp_res_has_num_buckets = true; 2745 if (!cfg->nh_grp_res_num_buckets) { 2746 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0"); 2747 return -EINVAL; 2748 } 2749 } 2750 2751 err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER], 2752 NH_RES_DEFAULT_IDLE_TIMER, 2753 &cfg->nh_grp_res_idle_timer, 2754 &cfg->nh_grp_res_has_idle_timer, 2755 extack); 2756 if (err) 2757 return err; 2758 2759 return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER], 2760 NH_RES_DEFAULT_UNBALANCED_TIMER, 2761 &cfg->nh_grp_res_unbalanced_timer, 2762 &cfg->nh_grp_res_has_unbalanced_timer, 2763 extack); 2764 } 2765 2766 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, 2767 struct nlmsghdr *nlh, struct nh_config *cfg, 2768 struct netlink_ext_ack *extack) 2769 { 2770 struct nhmsg *nhm = nlmsg_data(nlh); 2771 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)]; 2772 int err; 2773 2774 err = nlmsg_parse(nlh, sizeof(*nhm), tb, 2775 ARRAY_SIZE(rtm_nh_policy_new) - 1, 2776 rtm_nh_policy_new, extack); 2777 if (err < 0) 2778 return err; 2779 2780 err = -EINVAL; 2781 if (nhm->resvd || nhm->nh_scope) { 2782 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header"); 2783 goto out; 2784 } 2785 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) { 2786 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header"); 2787 goto out; 2788 } 2789 2790 switch (nhm->nh_family) { 2791 case AF_INET: 2792 case AF_INET6: 2793 break; 2794 case AF_UNSPEC: 2795 if (tb[NHA_GROUP]) 2796 break; 2797 fallthrough; 2798 default: 2799 NL_SET_ERR_MSG(extack, "Invalid address family"); 2800 goto out; 2801 } 2802 2803 memset(cfg, 0, sizeof(*cfg)); 2804 cfg->nlflags = nlh->nlmsg_flags; 2805 cfg->nlinfo.portid = NETLINK_CB(skb).portid; 2806 cfg->nlinfo.nlh = nlh; 2807 cfg->nlinfo.nl_net = net; 2808 2809 cfg->nh_family = nhm->nh_family; 2810 cfg->nh_protocol = nhm->nh_protocol; 2811 cfg->nh_flags = nhm->nh_flags; 2812 2813 if (tb[NHA_ID]) 2814 cfg->nh_id = nla_get_u32(tb[NHA_ID]); 2815 2816 if (tb[NHA_FDB]) { 2817 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] || 2818 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) { 2819 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole"); 2820 goto out; 2821 } 2822 if (nhm->nh_flags) { 2823 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header"); 2824 goto out; 2825 } 2826 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]); 2827 } 2828 2829 if (tb[NHA_GROUP]) { 2830 if (nhm->nh_family != AF_UNSPEC) { 2831 NL_SET_ERR_MSG(extack, "Invalid family for group"); 2832 goto out; 2833 } 2834 cfg->nh_grp = tb[NHA_GROUP]; 2835 2836 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH; 2837 if (tb[NHA_GROUP_TYPE]) 2838 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]); 2839 2840 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) { 2841 NL_SET_ERR_MSG(extack, "Invalid group type"); 2842 goto out; 2843 } 2844 err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb), 2845 cfg->nh_grp_type, extack); 2846 if (err) 2847 goto out; 2848 2849 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) 2850 err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP], 2851 cfg, extack); 2852 2853 /* no other attributes should be set */ 2854 goto out; 2855 } 2856 2857 if (tb[NHA_BLACKHOLE]) { 2858 if (tb[NHA_GATEWAY] || tb[NHA_OIF] || 2859 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) { 2860 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb"); 2861 goto out; 2862 } 2863 2864 cfg->nh_blackhole = 1; 2865 err = 0; 2866 goto out; 2867 } 2868 2869 if (!cfg->nh_fdb && !tb[NHA_OIF]) { 2870 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops"); 2871 goto out; 2872 } 2873 2874 if (!cfg->nh_fdb && tb[NHA_OIF]) { 2875 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]); 2876 if (cfg->nh_ifindex) 2877 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex); 2878 2879 if (!cfg->dev) { 2880 NL_SET_ERR_MSG(extack, "Invalid device index"); 2881 goto out; 2882 } else if (!(cfg->dev->flags & IFF_UP)) { 2883 NL_SET_ERR_MSG(extack, "Nexthop device is not up"); 2884 err = -ENETDOWN; 2885 goto out; 2886 } else if (!netif_carrier_ok(cfg->dev)) { 2887 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down"); 2888 err = -ENETDOWN; 2889 goto out; 2890 } 2891 } 2892 2893 err = -EINVAL; 2894 if (tb[NHA_GATEWAY]) { 2895 struct nlattr *gwa = tb[NHA_GATEWAY]; 2896 2897 switch (cfg->nh_family) { 2898 case AF_INET: 2899 if (nla_len(gwa) != sizeof(u32)) { 2900 NL_SET_ERR_MSG(extack, "Invalid gateway"); 2901 goto out; 2902 } 2903 cfg->gw.ipv4 = nla_get_be32(gwa); 2904 break; 2905 case AF_INET6: 2906 if (nla_len(gwa) != sizeof(struct in6_addr)) { 2907 NL_SET_ERR_MSG(extack, "Invalid gateway"); 2908 goto out; 2909 } 2910 cfg->gw.ipv6 = nla_get_in6_addr(gwa); 2911 break; 2912 default: 2913 NL_SET_ERR_MSG(extack, 2914 "Unknown address family for gateway"); 2915 goto out; 2916 } 2917 } else { 2918 /* device only nexthop (no gateway) */ 2919 if (cfg->nh_flags & RTNH_F_ONLINK) { 2920 NL_SET_ERR_MSG(extack, 2921 "ONLINK flag can not be set for nexthop without a gateway"); 2922 goto out; 2923 } 2924 } 2925 2926 if (tb[NHA_ENCAP]) { 2927 cfg->nh_encap = tb[NHA_ENCAP]; 2928 2929 if (!tb[NHA_ENCAP_TYPE]) { 2930 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing"); 2931 goto out; 2932 } 2933 2934 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]); 2935 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack); 2936 if (err < 0) 2937 goto out; 2938 2939 } else if (tb[NHA_ENCAP_TYPE]) { 2940 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing"); 2941 goto out; 2942 } 2943 2944 2945 err = 0; 2946 out: 2947 return err; 2948 } 2949 2950 /* rtnl */ 2951 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, 2952 struct netlink_ext_ack *extack) 2953 { 2954 struct net *net = sock_net(skb->sk); 2955 struct nh_config cfg; 2956 struct nexthop *nh; 2957 int err; 2958 2959 err = rtm_to_nh_config(net, skb, nlh, &cfg, extack); 2960 if (!err) { 2961 nh = nexthop_add(net, &cfg, extack); 2962 if (IS_ERR(nh)) 2963 err = PTR_ERR(nh); 2964 } 2965 2966 return err; 2967 } 2968 2969 static int __nh_valid_get_del_req(const struct nlmsghdr *nlh, 2970 struct nlattr **tb, u32 *id, 2971 struct netlink_ext_ack *extack) 2972 { 2973 struct nhmsg *nhm = nlmsg_data(nlh); 2974 2975 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { 2976 NL_SET_ERR_MSG(extack, "Invalid values in header"); 2977 return -EINVAL; 2978 } 2979 2980 if (!tb[NHA_ID]) { 2981 NL_SET_ERR_MSG(extack, "Nexthop id is missing"); 2982 return -EINVAL; 2983 } 2984 2985 *id = nla_get_u32(tb[NHA_ID]); 2986 if (!(*id)) { 2987 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 2988 return -EINVAL; 2989 } 2990 2991 return 0; 2992 } 2993 2994 static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id, 2995 struct netlink_ext_ack *extack) 2996 { 2997 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)]; 2998 int err; 2999 3000 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3001 ARRAY_SIZE(rtm_nh_policy_get) - 1, 3002 rtm_nh_policy_get, extack); 3003 if (err < 0) 3004 return err; 3005 3006 return __nh_valid_get_del_req(nlh, tb, id, extack); 3007 } 3008 3009 /* rtnl */ 3010 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, 3011 struct netlink_ext_ack *extack) 3012 { 3013 struct net *net = sock_net(skb->sk); 3014 struct nl_info nlinfo = { 3015 .nlh = nlh, 3016 .nl_net = net, 3017 .portid = NETLINK_CB(skb).portid, 3018 }; 3019 struct nexthop *nh; 3020 int err; 3021 u32 id; 3022 3023 err = nh_valid_get_del_req(nlh, &id, extack); 3024 if (err) 3025 return err; 3026 3027 nh = nexthop_find_by_id(net, id); 3028 if (!nh) 3029 return -ENOENT; 3030 3031 remove_nexthop(net, nh, &nlinfo); 3032 3033 return 0; 3034 } 3035 3036 /* rtnl */ 3037 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3038 struct netlink_ext_ack *extack) 3039 { 3040 struct net *net = sock_net(in_skb->sk); 3041 struct sk_buff *skb = NULL; 3042 struct nexthop *nh; 3043 int err; 3044 u32 id; 3045 3046 err = nh_valid_get_del_req(nlh, &id, extack); 3047 if (err) 3048 return err; 3049 3050 err = -ENOBUFS; 3051 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3052 if (!skb) 3053 goto out; 3054 3055 err = -ENOENT; 3056 nh = nexthop_find_by_id(net, id); 3057 if (!nh) 3058 goto errout_free; 3059 3060 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid, 3061 nlh->nlmsg_seq, 0); 3062 if (err < 0) { 3063 WARN_ON(err == -EMSGSIZE); 3064 goto errout_free; 3065 } 3066 3067 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3068 out: 3069 return err; 3070 errout_free: 3071 kfree_skb(skb); 3072 goto out; 3073 } 3074 3075 struct nh_dump_filter { 3076 u32 nh_id; 3077 int dev_idx; 3078 int master_idx; 3079 bool group_filter; 3080 bool fdb_filter; 3081 u32 res_bucket_nh_id; 3082 }; 3083 3084 static bool nh_dump_filtered(struct nexthop *nh, 3085 struct nh_dump_filter *filter, u8 family) 3086 { 3087 const struct net_device *dev; 3088 const struct nh_info *nhi; 3089 3090 if (filter->group_filter && !nh->is_group) 3091 return true; 3092 3093 if (!filter->dev_idx && !filter->master_idx && !family) 3094 return false; 3095 3096 if (nh->is_group) 3097 return true; 3098 3099 nhi = rtnl_dereference(nh->nh_info); 3100 if (family && nhi->family != family) 3101 return true; 3102 3103 dev = nhi->fib_nhc.nhc_dev; 3104 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx)) 3105 return true; 3106 3107 if (filter->master_idx) { 3108 struct net_device *master; 3109 3110 if (!dev) 3111 return true; 3112 3113 master = netdev_master_upper_dev_get((struct net_device *)dev); 3114 if (!master || master->ifindex != filter->master_idx) 3115 return true; 3116 } 3117 3118 return false; 3119 } 3120 3121 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb, 3122 struct nh_dump_filter *filter, 3123 struct netlink_ext_ack *extack) 3124 { 3125 struct nhmsg *nhm; 3126 u32 idx; 3127 3128 if (tb[NHA_OIF]) { 3129 idx = nla_get_u32(tb[NHA_OIF]); 3130 if (idx > INT_MAX) { 3131 NL_SET_ERR_MSG(extack, "Invalid device index"); 3132 return -EINVAL; 3133 } 3134 filter->dev_idx = idx; 3135 } 3136 if (tb[NHA_MASTER]) { 3137 idx = nla_get_u32(tb[NHA_MASTER]); 3138 if (idx > INT_MAX) { 3139 NL_SET_ERR_MSG(extack, "Invalid master device index"); 3140 return -EINVAL; 3141 } 3142 filter->master_idx = idx; 3143 } 3144 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]); 3145 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]); 3146 3147 nhm = nlmsg_data(nlh); 3148 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { 3149 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request"); 3150 return -EINVAL; 3151 } 3152 3153 return 0; 3154 } 3155 3156 static int nh_valid_dump_req(const struct nlmsghdr *nlh, 3157 struct nh_dump_filter *filter, 3158 struct netlink_callback *cb) 3159 { 3160 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)]; 3161 int err; 3162 3163 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3164 ARRAY_SIZE(rtm_nh_policy_dump) - 1, 3165 rtm_nh_policy_dump, cb->extack); 3166 if (err < 0) 3167 return err; 3168 3169 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); 3170 } 3171 3172 struct rtm_dump_nh_ctx { 3173 u32 idx; 3174 }; 3175 3176 static struct rtm_dump_nh_ctx * 3177 rtm_dump_nh_ctx(struct netlink_callback *cb) 3178 { 3179 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx; 3180 3181 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 3182 return ctx; 3183 } 3184 3185 static int rtm_dump_walk_nexthops(struct sk_buff *skb, 3186 struct netlink_callback *cb, 3187 struct rb_root *root, 3188 struct rtm_dump_nh_ctx *ctx, 3189 int (*nh_cb)(struct sk_buff *skb, 3190 struct netlink_callback *cb, 3191 struct nexthop *nh, void *data), 3192 void *data) 3193 { 3194 struct rb_node *node; 3195 int s_idx; 3196 int err; 3197 3198 s_idx = ctx->idx; 3199 for (node = rb_first(root); node; node = rb_next(node)) { 3200 struct nexthop *nh; 3201 3202 nh = rb_entry(node, struct nexthop, rb_node); 3203 if (nh->id < s_idx) 3204 continue; 3205 3206 ctx->idx = nh->id; 3207 err = nh_cb(skb, cb, nh, data); 3208 if (err) 3209 return err; 3210 } 3211 3212 ctx->idx++; 3213 return 0; 3214 } 3215 3216 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb, 3217 struct nexthop *nh, void *data) 3218 { 3219 struct nhmsg *nhm = nlmsg_data(cb->nlh); 3220 struct nh_dump_filter *filter = data; 3221 3222 if (nh_dump_filtered(nh, filter, nhm->nh_family)) 3223 return 0; 3224 3225 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP, 3226 NETLINK_CB(cb->skb).portid, 3227 cb->nlh->nlmsg_seq, NLM_F_MULTI); 3228 } 3229 3230 /* rtnl */ 3231 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb) 3232 { 3233 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb); 3234 struct net *net = sock_net(skb->sk); 3235 struct rb_root *root = &net->nexthop.rb_root; 3236 struct nh_dump_filter filter = {}; 3237 int err; 3238 3239 err = nh_valid_dump_req(cb->nlh, &filter, cb); 3240 if (err < 0) 3241 return err; 3242 3243 err = rtm_dump_walk_nexthops(skb, cb, root, ctx, 3244 &rtm_dump_nexthop_cb, &filter); 3245 if (err < 0) { 3246 if (likely(skb->len)) 3247 err = skb->len; 3248 } 3249 3250 cb->seq = net->nexthop.seq; 3251 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 3252 return err; 3253 } 3254 3255 static struct nexthop * 3256 nexthop_find_group_resilient(struct net *net, u32 id, 3257 struct netlink_ext_ack *extack) 3258 { 3259 struct nh_group *nhg; 3260 struct nexthop *nh; 3261 3262 nh = nexthop_find_by_id(net, id); 3263 if (!nh) 3264 return ERR_PTR(-ENOENT); 3265 3266 if (!nh->is_group) { 3267 NL_SET_ERR_MSG(extack, "Not a nexthop group"); 3268 return ERR_PTR(-EINVAL); 3269 } 3270 3271 nhg = rtnl_dereference(nh->nh_grp); 3272 if (!nhg->resilient) { 3273 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient"); 3274 return ERR_PTR(-EINVAL); 3275 } 3276 3277 return nh; 3278 } 3279 3280 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p, 3281 struct netlink_ext_ack *extack) 3282 { 3283 u32 idx; 3284 3285 if (attr) { 3286 idx = nla_get_u32(attr); 3287 if (!idx) { 3288 NL_SET_ERR_MSG(extack, "Invalid nexthop id"); 3289 return -EINVAL; 3290 } 3291 *nh_id_p = idx; 3292 } else { 3293 *nh_id_p = 0; 3294 } 3295 3296 return 0; 3297 } 3298 3299 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh, 3300 struct nh_dump_filter *filter, 3301 struct netlink_callback *cb) 3302 { 3303 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)]; 3304 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)]; 3305 int err; 3306 3307 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3308 ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1, 3309 rtm_nh_policy_dump_bucket, NULL); 3310 if (err < 0) 3311 return err; 3312 3313 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack); 3314 if (err) 3315 return err; 3316 3317 if (tb[NHA_RES_BUCKET]) { 3318 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1; 3319 3320 err = nla_parse_nested(res_tb, max, 3321 tb[NHA_RES_BUCKET], 3322 rtm_nh_res_bucket_policy_dump, 3323 cb->extack); 3324 if (err < 0) 3325 return err; 3326 3327 err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID], 3328 &filter->res_bucket_nh_id, 3329 cb->extack); 3330 if (err) 3331 return err; 3332 } 3333 3334 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); 3335 } 3336 3337 struct rtm_dump_res_bucket_ctx { 3338 struct rtm_dump_nh_ctx nh; 3339 u16 bucket_index; 3340 u32 done_nh_idx; /* 1 + the index of the last fully processed NH. */ 3341 }; 3342 3343 static struct rtm_dump_res_bucket_ctx * 3344 rtm_dump_res_bucket_ctx(struct netlink_callback *cb) 3345 { 3346 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx; 3347 3348 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 3349 return ctx; 3350 } 3351 3352 struct rtm_dump_nexthop_bucket_data { 3353 struct rtm_dump_res_bucket_ctx *ctx; 3354 struct nh_dump_filter filter; 3355 }; 3356 3357 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb, 3358 struct netlink_callback *cb, 3359 struct nexthop *nh, 3360 struct rtm_dump_nexthop_bucket_data *dd) 3361 { 3362 u32 portid = NETLINK_CB(cb->skb).portid; 3363 struct nhmsg *nhm = nlmsg_data(cb->nlh); 3364 struct nh_res_table *res_table; 3365 struct nh_group *nhg; 3366 u16 bucket_index; 3367 int err; 3368 3369 if (dd->ctx->nh.idx < dd->ctx->done_nh_idx) 3370 return 0; 3371 3372 nhg = rtnl_dereference(nh->nh_grp); 3373 res_table = rtnl_dereference(nhg->res_table); 3374 for (bucket_index = dd->ctx->bucket_index; 3375 bucket_index < res_table->num_nh_buckets; 3376 bucket_index++) { 3377 struct nh_res_bucket *bucket; 3378 struct nh_grp_entry *nhge; 3379 3380 bucket = &res_table->nh_buckets[bucket_index]; 3381 nhge = rtnl_dereference(bucket->nh_entry); 3382 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family)) 3383 continue; 3384 3385 if (dd->filter.res_bucket_nh_id && 3386 dd->filter.res_bucket_nh_id != nhge->nh->id) 3387 continue; 3388 3389 dd->ctx->bucket_index = bucket_index; 3390 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, 3391 RTM_NEWNEXTHOPBUCKET, portid, 3392 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3393 cb->extack); 3394 if (err) 3395 return err; 3396 } 3397 3398 dd->ctx->done_nh_idx = dd->ctx->nh.idx + 1; 3399 dd->ctx->bucket_index = 0; 3400 3401 return 0; 3402 } 3403 3404 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb, 3405 struct netlink_callback *cb, 3406 struct nexthop *nh, void *data) 3407 { 3408 struct rtm_dump_nexthop_bucket_data *dd = data; 3409 struct nh_group *nhg; 3410 3411 if (!nh->is_group) 3412 return 0; 3413 3414 nhg = rtnl_dereference(nh->nh_grp); 3415 if (!nhg->resilient) 3416 return 0; 3417 3418 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd); 3419 } 3420 3421 /* rtnl */ 3422 static int rtm_dump_nexthop_bucket(struct sk_buff *skb, 3423 struct netlink_callback *cb) 3424 { 3425 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb); 3426 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx }; 3427 struct net *net = sock_net(skb->sk); 3428 struct nexthop *nh; 3429 int err; 3430 3431 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb); 3432 if (err) 3433 return err; 3434 3435 if (dd.filter.nh_id) { 3436 nh = nexthop_find_group_resilient(net, dd.filter.nh_id, 3437 cb->extack); 3438 if (IS_ERR(nh)) 3439 return PTR_ERR(nh); 3440 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd); 3441 } else { 3442 struct rb_root *root = &net->nexthop.rb_root; 3443 3444 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh, 3445 &rtm_dump_nexthop_bucket_cb, &dd); 3446 } 3447 3448 if (err < 0) { 3449 if (likely(skb->len)) 3450 err = skb->len; 3451 } 3452 3453 cb->seq = net->nexthop.seq; 3454 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 3455 return err; 3456 } 3457 3458 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res, 3459 u16 *bucket_index, 3460 struct netlink_ext_ack *extack) 3461 { 3462 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)]; 3463 int err; 3464 3465 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1, 3466 res, rtm_nh_res_bucket_policy_get, extack); 3467 if (err < 0) 3468 return err; 3469 3470 if (!tb[NHA_RES_BUCKET_INDEX]) { 3471 NL_SET_ERR_MSG(extack, "Bucket index is missing"); 3472 return -EINVAL; 3473 } 3474 3475 *bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]); 3476 return 0; 3477 } 3478 3479 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh, 3480 u32 *id, u16 *bucket_index, 3481 struct netlink_ext_ack *extack) 3482 { 3483 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)]; 3484 int err; 3485 3486 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, 3487 ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1, 3488 rtm_nh_policy_get_bucket, extack); 3489 if (err < 0) 3490 return err; 3491 3492 err = __nh_valid_get_del_req(nlh, tb, id, extack); 3493 if (err) 3494 return err; 3495 3496 if (!tb[NHA_RES_BUCKET]) { 3497 NL_SET_ERR_MSG(extack, "Bucket information is missing"); 3498 return -EINVAL; 3499 } 3500 3501 err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET], 3502 bucket_index, extack); 3503 if (err) 3504 return err; 3505 3506 return 0; 3507 } 3508 3509 /* rtnl */ 3510 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3511 struct netlink_ext_ack *extack) 3512 { 3513 struct net *net = sock_net(in_skb->sk); 3514 struct nh_res_table *res_table; 3515 struct sk_buff *skb = NULL; 3516 struct nh_group *nhg; 3517 struct nexthop *nh; 3518 u16 bucket_index; 3519 int err; 3520 u32 id; 3521 3522 err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack); 3523 if (err) 3524 return err; 3525 3526 nh = nexthop_find_group_resilient(net, id, extack); 3527 if (IS_ERR(nh)) 3528 return PTR_ERR(nh); 3529 3530 nhg = rtnl_dereference(nh->nh_grp); 3531 res_table = rtnl_dereference(nhg->res_table); 3532 if (bucket_index >= res_table->num_nh_buckets) { 3533 NL_SET_ERR_MSG(extack, "Bucket index out of bounds"); 3534 return -ENOENT; 3535 } 3536 3537 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3538 if (!skb) 3539 return -ENOBUFS; 3540 3541 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index], 3542 bucket_index, RTM_NEWNEXTHOPBUCKET, 3543 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 3544 0, extack); 3545 if (err < 0) { 3546 WARN_ON(err == -EMSGSIZE); 3547 goto errout_free; 3548 } 3549 3550 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3551 3552 errout_free: 3553 kfree_skb(skb); 3554 return err; 3555 } 3556 3557 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu) 3558 { 3559 unsigned int hash = nh_dev_hashfn(dev->ifindex); 3560 struct net *net = dev_net(dev); 3561 struct hlist_head *head = &net->nexthop.devhash[hash]; 3562 struct hlist_node *n; 3563 struct nh_info *nhi; 3564 3565 hlist_for_each_entry_safe(nhi, n, head, dev_hash) { 3566 if (nhi->fib_nhc.nhc_dev == dev) { 3567 if (nhi->family == AF_INET) 3568 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu, 3569 orig_mtu); 3570 } 3571 } 3572 } 3573 3574 /* rtnl */ 3575 static int nh_netdev_event(struct notifier_block *this, 3576 unsigned long event, void *ptr) 3577 { 3578 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3579 struct netdev_notifier_info_ext *info_ext; 3580 3581 switch (event) { 3582 case NETDEV_DOWN: 3583 case NETDEV_UNREGISTER: 3584 nexthop_flush_dev(dev, event); 3585 break; 3586 case NETDEV_CHANGE: 3587 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP))) 3588 nexthop_flush_dev(dev, event); 3589 break; 3590 case NETDEV_CHANGEMTU: 3591 info_ext = ptr; 3592 nexthop_sync_mtu(dev, info_ext->ext.mtu); 3593 rt_cache_flush(dev_net(dev)); 3594 break; 3595 } 3596 return NOTIFY_DONE; 3597 } 3598 3599 static struct notifier_block nh_netdev_notifier = { 3600 .notifier_call = nh_netdev_event, 3601 }; 3602 3603 static int nexthops_dump(struct net *net, struct notifier_block *nb, 3604 enum nexthop_event_type event_type, 3605 struct netlink_ext_ack *extack) 3606 { 3607 struct rb_root *root = &net->nexthop.rb_root; 3608 struct rb_node *node; 3609 int err = 0; 3610 3611 for (node = rb_first(root); node; node = rb_next(node)) { 3612 struct nexthop *nh; 3613 3614 nh = rb_entry(node, struct nexthop, rb_node); 3615 err = call_nexthop_notifier(nb, net, event_type, nh, extack); 3616 if (err) 3617 break; 3618 } 3619 3620 return err; 3621 } 3622 3623 int register_nexthop_notifier(struct net *net, struct notifier_block *nb, 3624 struct netlink_ext_ack *extack) 3625 { 3626 int err; 3627 3628 rtnl_lock(); 3629 err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack); 3630 if (err) 3631 goto unlock; 3632 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain, 3633 nb); 3634 unlock: 3635 rtnl_unlock(); 3636 return err; 3637 } 3638 EXPORT_SYMBOL(register_nexthop_notifier); 3639 3640 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3641 { 3642 int err; 3643 3644 rtnl_lock(); 3645 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain, 3646 nb); 3647 if (err) 3648 goto unlock; 3649 nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL); 3650 unlock: 3651 rtnl_unlock(); 3652 return err; 3653 } 3654 EXPORT_SYMBOL(unregister_nexthop_notifier); 3655 3656 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap) 3657 { 3658 struct nexthop *nexthop; 3659 3660 rcu_read_lock(); 3661 3662 nexthop = nexthop_find_by_id(net, id); 3663 if (!nexthop) 3664 goto out; 3665 3666 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 3667 if (offload) 3668 nexthop->nh_flags |= RTNH_F_OFFLOAD; 3669 if (trap) 3670 nexthop->nh_flags |= RTNH_F_TRAP; 3671 3672 out: 3673 rcu_read_unlock(); 3674 } 3675 EXPORT_SYMBOL(nexthop_set_hw_flags); 3676 3677 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index, 3678 bool offload, bool trap) 3679 { 3680 struct nh_res_table *res_table; 3681 struct nh_res_bucket *bucket; 3682 struct nexthop *nexthop; 3683 struct nh_group *nhg; 3684 3685 rcu_read_lock(); 3686 3687 nexthop = nexthop_find_by_id(net, id); 3688 if (!nexthop || !nexthop->is_group) 3689 goto out; 3690 3691 nhg = rcu_dereference(nexthop->nh_grp); 3692 if (!nhg->resilient) 3693 goto out; 3694 3695 if (bucket_index >= nhg->res_table->num_nh_buckets) 3696 goto out; 3697 3698 res_table = rcu_dereference(nhg->res_table); 3699 bucket = &res_table->nh_buckets[bucket_index]; 3700 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); 3701 if (offload) 3702 bucket->nh_flags |= RTNH_F_OFFLOAD; 3703 if (trap) 3704 bucket->nh_flags |= RTNH_F_TRAP; 3705 3706 out: 3707 rcu_read_unlock(); 3708 } 3709 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags); 3710 3711 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets, 3712 unsigned long *activity) 3713 { 3714 struct nh_res_table *res_table; 3715 struct nexthop *nexthop; 3716 struct nh_group *nhg; 3717 u16 i; 3718 3719 rcu_read_lock(); 3720 3721 nexthop = nexthop_find_by_id(net, id); 3722 if (!nexthop || !nexthop->is_group) 3723 goto out; 3724 3725 nhg = rcu_dereference(nexthop->nh_grp); 3726 if (!nhg->resilient) 3727 goto out; 3728 3729 /* Instead of silently ignoring some buckets, demand that the sizes 3730 * be the same. 3731 */ 3732 res_table = rcu_dereference(nhg->res_table); 3733 if (num_buckets != res_table->num_nh_buckets) 3734 goto out; 3735 3736 for (i = 0; i < num_buckets; i++) { 3737 if (test_bit(i, activity)) 3738 nh_res_bucket_set_busy(&res_table->nh_buckets[i]); 3739 } 3740 3741 out: 3742 rcu_read_unlock(); 3743 } 3744 EXPORT_SYMBOL(nexthop_res_grp_activity_update); 3745 3746 static void __net_exit nexthop_net_exit_batch(struct list_head *net_list) 3747 { 3748 struct net *net; 3749 3750 rtnl_lock(); 3751 list_for_each_entry(net, net_list, exit_list) { 3752 flush_all_nexthops(net); 3753 kfree(net->nexthop.devhash); 3754 } 3755 rtnl_unlock(); 3756 } 3757 3758 static int __net_init nexthop_net_init(struct net *net) 3759 { 3760 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE; 3761 3762 net->nexthop.rb_root = RB_ROOT; 3763 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL); 3764 if (!net->nexthop.devhash) 3765 return -ENOMEM; 3766 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain); 3767 3768 return 0; 3769 } 3770 3771 static struct pernet_operations nexthop_net_ops = { 3772 .init = nexthop_net_init, 3773 .exit_batch = nexthop_net_exit_batch, 3774 }; 3775 3776 static int __init nexthop_init(void) 3777 { 3778 register_pernet_subsys(&nexthop_net_ops); 3779 3780 register_netdevice_notifier(&nh_netdev_notifier); 3781 3782 rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); 3783 rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0); 3784 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop, 3785 rtm_dump_nexthop, 0); 3786 3787 rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); 3788 rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0); 3789 3790 rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0); 3791 rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0); 3792 3793 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket, 3794 rtm_dump_nexthop_bucket, 0); 3795 3796 return 0; 3797 } 3798 subsys_initcall(nexthop_init); 3799