1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux INET6 implementation 4 * FIB front-end. 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 */ 9 10 /* Changes: 11 * 12 * YOSHIFUJI Hideaki @USAGI 13 * reworked default router selection. 14 * - respect outgoing interface 15 * - select from (probably) reachable routers (i.e. 16 * routers in REACHABLE, STALE, DELAY or PROBE states). 17 * - always select the same router if it is (probably) 18 * reachable. otherwise, round-robin the list. 19 * Ville Nuorvala 20 * Fixed routing subtrees. 21 */ 22 23 #define pr_fmt(fmt) "IPv6: " fmt 24 25 #include <linux/capability.h> 26 #include <linux/errno.h> 27 #include <linux/export.h> 28 #include <linux/types.h> 29 #include <linux/times.h> 30 #include <linux/socket.h> 31 #include <linux/sockios.h> 32 #include <linux/net.h> 33 #include <linux/route.h> 34 #include <linux/netdevice.h> 35 #include <linux/in6.h> 36 #include <linux/mroute6.h> 37 #include <linux/init.h> 38 #include <linux/if_arp.h> 39 #include <linux/proc_fs.h> 40 #include <linux/seq_file.h> 41 #include <linux/nsproxy.h> 42 #include <linux/slab.h> 43 #include <linux/jhash.h> 44 #include <linux/siphash.h> 45 #include <net/net_namespace.h> 46 #include <net/snmp.h> 47 #include <net/ipv6.h> 48 #include <net/ip6_fib.h> 49 #include <net/ip6_route.h> 50 #include <net/ndisc.h> 51 #include <net/addrconf.h> 52 #include <net/tcp.h> 53 #include <linux/rtnetlink.h> 54 #include <net/dst.h> 55 #include <net/dst_metadata.h> 56 #include <net/xfrm.h> 57 #include <net/netevent.h> 58 #include <net/netlink.h> 59 #include <net/rtnh.h> 60 #include <net/lwtunnel.h> 61 #include <net/ip_tunnels.h> 62 #include <net/l3mdev.h> 63 #include <net/ip.h> 64 #include <linux/uaccess.h> 65 #include <linux/btf_ids.h> 66 67 #ifdef CONFIG_SYSCTL 68 #include <linux/sysctl.h> 69 #endif 70 71 static int ip6_rt_type_to_error(u8 fib6_type); 72 73 #define CREATE_TRACE_POINTS 74 #include <trace/events/fib6.h> 75 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup); 76 #undef CREATE_TRACE_POINTS 77 78 enum rt6_nud_state { 79 RT6_NUD_FAIL_HARD = -3, 80 RT6_NUD_FAIL_PROBE = -2, 81 RT6_NUD_FAIL_DO_RR = -1, 82 RT6_NUD_SUCCEED = 1 83 }; 84 85 INDIRECT_CALLABLE_SCOPE 86 struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 87 static unsigned int ip6_default_advmss(const struct dst_entry *dst); 88 INDIRECT_CALLABLE_SCOPE 89 unsigned int ip6_mtu(const struct dst_entry *dst); 90 static struct dst_entry *ip6_negative_advice(struct dst_entry *); 91 static void ip6_dst_destroy(struct dst_entry *); 92 static void ip6_dst_ifdown(struct dst_entry *, 93 struct net_device *dev, int how); 94 static int ip6_dst_gc(struct dst_ops *ops); 95 96 static int ip6_pkt_discard(struct sk_buff *skb); 97 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); 98 static int ip6_pkt_prohibit(struct sk_buff *skb); 99 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); 100 static void ip6_link_failure(struct sk_buff *skb); 101 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 102 struct sk_buff *skb, u32 mtu, 103 bool confirm_neigh); 104 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, 105 struct sk_buff *skb); 106 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif, 107 int strict); 108 static size_t rt6_nlmsg_size(struct fib6_info *f6i); 109 static int rt6_fill_node(struct net *net, struct sk_buff *skb, 110 struct fib6_info *rt, struct dst_entry *dst, 111 struct in6_addr *dest, struct in6_addr *src, 112 int iif, int type, u32 portid, u32 seq, 113 unsigned int flags); 114 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res, 115 const struct in6_addr *daddr, 116 const struct in6_addr *saddr); 117 118 #ifdef CONFIG_IPV6_ROUTE_INFO 119 static struct fib6_info *rt6_add_route_info(struct net *net, 120 const struct in6_addr *prefix, int prefixlen, 121 const struct in6_addr *gwaddr, 122 struct net_device *dev, 123 unsigned int pref); 124 static struct fib6_info *rt6_get_route_info(struct net *net, 125 const struct in6_addr *prefix, int prefixlen, 126 const struct in6_addr *gwaddr, 127 struct net_device *dev); 128 #endif 129 130 struct uncached_list { 131 spinlock_t lock; 132 struct list_head head; 133 }; 134 135 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); 136 137 void rt6_uncached_list_add(struct rt6_info *rt) 138 { 139 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); 140 141 rt->rt6i_uncached_list = ul; 142 143 spin_lock_bh(&ul->lock); 144 list_add_tail(&rt->rt6i_uncached, &ul->head); 145 spin_unlock_bh(&ul->lock); 146 } 147 148 void rt6_uncached_list_del(struct rt6_info *rt) 149 { 150 if (!list_empty(&rt->rt6i_uncached)) { 151 struct uncached_list *ul = rt->rt6i_uncached_list; 152 struct net *net = dev_net(rt->dst.dev); 153 154 spin_lock_bh(&ul->lock); 155 list_del(&rt->rt6i_uncached); 156 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache); 157 spin_unlock_bh(&ul->lock); 158 } 159 } 160 161 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) 162 { 163 struct net_device *loopback_dev = net->loopback_dev; 164 int cpu; 165 166 if (dev == loopback_dev) 167 return; 168 169 for_each_possible_cpu(cpu) { 170 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); 171 struct rt6_info *rt; 172 173 spin_lock_bh(&ul->lock); 174 list_for_each_entry(rt, &ul->head, rt6i_uncached) { 175 struct inet6_dev *rt_idev = rt->rt6i_idev; 176 struct net_device *rt_dev = rt->dst.dev; 177 178 if (rt_idev->dev == dev) { 179 rt->rt6i_idev = in6_dev_get(loopback_dev); 180 in6_dev_put(rt_idev); 181 } 182 183 if (rt_dev == dev) { 184 rt->dst.dev = blackhole_netdev; 185 dev_hold(rt->dst.dev); 186 dev_put(rt_dev); 187 } 188 } 189 spin_unlock_bh(&ul->lock); 190 } 191 } 192 193 static inline const void *choose_neigh_daddr(const struct in6_addr *p, 194 struct sk_buff *skb, 195 const void *daddr) 196 { 197 if (!ipv6_addr_any(p)) 198 return (const void *) p; 199 else if (skb) 200 return &ipv6_hdr(skb)->daddr; 201 return daddr; 202 } 203 204 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw, 205 struct net_device *dev, 206 struct sk_buff *skb, 207 const void *daddr) 208 { 209 struct neighbour *n; 210 211 daddr = choose_neigh_daddr(gw, skb, daddr); 212 n = __ipv6_neigh_lookup(dev, daddr); 213 if (n) 214 return n; 215 216 n = neigh_create(&nd_tbl, daddr, dev); 217 return IS_ERR(n) ? NULL : n; 218 } 219 220 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst, 221 struct sk_buff *skb, 222 const void *daddr) 223 { 224 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst); 225 226 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any), 227 dst->dev, skb, daddr); 228 } 229 230 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) 231 { 232 struct net_device *dev = dst->dev; 233 struct rt6_info *rt = (struct rt6_info *)dst; 234 235 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr); 236 if (!daddr) 237 return; 238 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) 239 return; 240 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr)) 241 return; 242 __ipv6_confirm_neigh(dev, daddr); 243 } 244 245 static struct dst_ops ip6_dst_ops_template = { 246 .family = AF_INET6, 247 .gc = ip6_dst_gc, 248 .gc_thresh = 1024, 249 .check = ip6_dst_check, 250 .default_advmss = ip6_default_advmss, 251 .mtu = ip6_mtu, 252 .cow_metrics = dst_cow_metrics_generic, 253 .destroy = ip6_dst_destroy, 254 .ifdown = ip6_dst_ifdown, 255 .negative_advice = ip6_negative_advice, 256 .link_failure = ip6_link_failure, 257 .update_pmtu = ip6_rt_update_pmtu, 258 .redirect = rt6_do_redirect, 259 .local_out = __ip6_local_out, 260 .neigh_lookup = ip6_dst_neigh_lookup, 261 .confirm_neigh = ip6_confirm_neigh, 262 }; 263 264 static struct dst_ops ip6_dst_blackhole_ops = { 265 .family = AF_INET6, 266 .default_advmss = ip6_default_advmss, 267 .neigh_lookup = ip6_dst_neigh_lookup, 268 .check = ip6_dst_check, 269 .destroy = ip6_dst_destroy, 270 .cow_metrics = dst_cow_metrics_generic, 271 .update_pmtu = dst_blackhole_update_pmtu, 272 .redirect = dst_blackhole_redirect, 273 .mtu = dst_blackhole_mtu, 274 }; 275 276 static const u32 ip6_template_metrics[RTAX_MAX] = { 277 [RTAX_HOPLIMIT - 1] = 0, 278 }; 279 280 static const struct fib6_info fib6_null_entry_template = { 281 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP), 282 .fib6_protocol = RTPROT_KERNEL, 283 .fib6_metric = ~(u32)0, 284 .fib6_ref = REFCOUNT_INIT(1), 285 .fib6_type = RTN_UNREACHABLE, 286 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics, 287 }; 288 289 static const struct rt6_info ip6_null_entry_template = { 290 .dst = { 291 .__refcnt = ATOMIC_INIT(1), 292 .__use = 1, 293 .obsolete = DST_OBSOLETE_FORCE_CHK, 294 .error = -ENETUNREACH, 295 .input = ip6_pkt_discard, 296 .output = ip6_pkt_discard_out, 297 }, 298 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 299 }; 300 301 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 302 303 static const struct rt6_info ip6_prohibit_entry_template = { 304 .dst = { 305 .__refcnt = ATOMIC_INIT(1), 306 .__use = 1, 307 .obsolete = DST_OBSOLETE_FORCE_CHK, 308 .error = -EACCES, 309 .input = ip6_pkt_prohibit, 310 .output = ip6_pkt_prohibit_out, 311 }, 312 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 313 }; 314 315 static const struct rt6_info ip6_blk_hole_entry_template = { 316 .dst = { 317 .__refcnt = ATOMIC_INIT(1), 318 .__use = 1, 319 .obsolete = DST_OBSOLETE_FORCE_CHK, 320 .error = -EINVAL, 321 .input = dst_discard, 322 .output = dst_discard_out, 323 }, 324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 325 }; 326 327 #endif 328 329 static void rt6_info_init(struct rt6_info *rt) 330 { 331 struct dst_entry *dst = &rt->dst; 332 333 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); 334 INIT_LIST_HEAD(&rt->rt6i_uncached); 335 } 336 337 /* allocate dst with ip6_dst_ops */ 338 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, 339 int flags) 340 { 341 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 342 1, DST_OBSOLETE_FORCE_CHK, flags); 343 344 if (rt) { 345 rt6_info_init(rt); 346 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc); 347 } 348 349 return rt; 350 } 351 EXPORT_SYMBOL(ip6_dst_alloc); 352 353 static void ip6_dst_destroy(struct dst_entry *dst) 354 { 355 struct rt6_info *rt = (struct rt6_info *)dst; 356 struct fib6_info *from; 357 struct inet6_dev *idev; 358 359 ip_dst_metrics_put(dst); 360 rt6_uncached_list_del(rt); 361 362 idev = rt->rt6i_idev; 363 if (idev) { 364 rt->rt6i_idev = NULL; 365 in6_dev_put(idev); 366 } 367 368 from = xchg((__force struct fib6_info **)&rt->from, NULL); 369 fib6_info_release(from); 370 } 371 372 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 373 int how) 374 { 375 struct rt6_info *rt = (struct rt6_info *)dst; 376 struct inet6_dev *idev = rt->rt6i_idev; 377 struct net_device *loopback_dev = 378 dev_net(dev)->loopback_dev; 379 380 if (idev && idev->dev != loopback_dev) { 381 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev); 382 if (loopback_idev) { 383 rt->rt6i_idev = loopback_idev; 384 in6_dev_put(idev); 385 } 386 } 387 } 388 389 static bool __rt6_check_expired(const struct rt6_info *rt) 390 { 391 if (rt->rt6i_flags & RTF_EXPIRES) 392 return time_after(jiffies, rt->dst.expires); 393 else 394 return false; 395 } 396 397 static bool rt6_check_expired(const struct rt6_info *rt) 398 { 399 struct fib6_info *from; 400 401 from = rcu_dereference(rt->from); 402 403 if (rt->rt6i_flags & RTF_EXPIRES) { 404 if (time_after(jiffies, rt->dst.expires)) 405 return true; 406 } else if (from) { 407 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK || 408 fib6_check_expired(from); 409 } 410 return false; 411 } 412 413 void fib6_select_path(const struct net *net, struct fib6_result *res, 414 struct flowi6 *fl6, int oif, bool have_oif_match, 415 const struct sk_buff *skb, int strict) 416 { 417 struct fib6_info *sibling, *next_sibling; 418 struct fib6_info *match = res->f6i; 419 420 if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) 421 goto out; 422 423 if (match->nh && have_oif_match && res->nh) 424 return; 425 426 /* We might have already computed the hash for ICMPv6 errors. In such 427 * case it will always be non-zero. Otherwise now is the time to do it. 428 */ 429 if (!fl6->mp_hash && 430 (!match->nh || nexthop_is_multipath(match->nh))) 431 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL); 432 433 if (unlikely(match->nh)) { 434 nexthop_path_fib6_result(res, fl6->mp_hash); 435 return; 436 } 437 438 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound)) 439 goto out; 440 441 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings, 442 fib6_siblings) { 443 const struct fib6_nh *nh = sibling->fib6_nh; 444 int nh_upper_bound; 445 446 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound); 447 if (fl6->mp_hash > nh_upper_bound) 448 continue; 449 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0) 450 break; 451 match = sibling; 452 break; 453 } 454 455 out: 456 res->f6i = match; 457 res->nh = match->fib6_nh; 458 } 459 460 /* 461 * Route lookup. rcu_read_lock() should be held. 462 */ 463 464 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh, 465 const struct in6_addr *saddr, int oif, int flags) 466 { 467 const struct net_device *dev; 468 469 if (nh->fib_nh_flags & RTNH_F_DEAD) 470 return false; 471 472 dev = nh->fib_nh_dev; 473 if (oif) { 474 if (dev->ifindex == oif) 475 return true; 476 } else { 477 if (ipv6_chk_addr(net, saddr, dev, 478 flags & RT6_LOOKUP_F_IFACE)) 479 return true; 480 } 481 482 return false; 483 } 484 485 struct fib6_nh_dm_arg { 486 struct net *net; 487 const struct in6_addr *saddr; 488 int oif; 489 int flags; 490 struct fib6_nh *nh; 491 }; 492 493 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg) 494 { 495 struct fib6_nh_dm_arg *arg = _arg; 496 497 arg->nh = nh; 498 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif, 499 arg->flags); 500 } 501 502 /* returns fib6_nh from nexthop or NULL */ 503 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh, 504 struct fib6_result *res, 505 const struct in6_addr *saddr, 506 int oif, int flags) 507 { 508 struct fib6_nh_dm_arg arg = { 509 .net = net, 510 .saddr = saddr, 511 .oif = oif, 512 .flags = flags, 513 }; 514 515 if (nexthop_is_blackhole(nh)) 516 return NULL; 517 518 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg)) 519 return arg.nh; 520 521 return NULL; 522 } 523 524 static void rt6_device_match(struct net *net, struct fib6_result *res, 525 const struct in6_addr *saddr, int oif, int flags) 526 { 527 struct fib6_info *f6i = res->f6i; 528 struct fib6_info *spf6i; 529 struct fib6_nh *nh; 530 531 if (!oif && ipv6_addr_any(saddr)) { 532 if (unlikely(f6i->nh)) { 533 nh = nexthop_fib6_nh(f6i->nh); 534 if (nexthop_is_blackhole(f6i->nh)) 535 goto out_blackhole; 536 } else { 537 nh = f6i->fib6_nh; 538 } 539 if (!(nh->fib_nh_flags & RTNH_F_DEAD)) 540 goto out; 541 } 542 543 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) { 544 bool matched = false; 545 546 if (unlikely(spf6i->nh)) { 547 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr, 548 oif, flags); 549 if (nh) 550 matched = true; 551 } else { 552 nh = spf6i->fib6_nh; 553 if (__rt6_device_match(net, nh, saddr, oif, flags)) 554 matched = true; 555 } 556 if (matched) { 557 res->f6i = spf6i; 558 goto out; 559 } 560 } 561 562 if (oif && flags & RT6_LOOKUP_F_IFACE) { 563 res->f6i = net->ipv6.fib6_null_entry; 564 nh = res->f6i->fib6_nh; 565 goto out; 566 } 567 568 if (unlikely(f6i->nh)) { 569 nh = nexthop_fib6_nh(f6i->nh); 570 if (nexthop_is_blackhole(f6i->nh)) 571 goto out_blackhole; 572 } else { 573 nh = f6i->fib6_nh; 574 } 575 576 if (nh->fib_nh_flags & RTNH_F_DEAD) { 577 res->f6i = net->ipv6.fib6_null_entry; 578 nh = res->f6i->fib6_nh; 579 } 580 out: 581 res->nh = nh; 582 res->fib6_type = res->f6i->fib6_type; 583 res->fib6_flags = res->f6i->fib6_flags; 584 return; 585 586 out_blackhole: 587 res->fib6_flags |= RTF_REJECT; 588 res->fib6_type = RTN_BLACKHOLE; 589 res->nh = nh; 590 } 591 592 #ifdef CONFIG_IPV6_ROUTER_PREF 593 struct __rt6_probe_work { 594 struct work_struct work; 595 struct in6_addr target; 596 struct net_device *dev; 597 }; 598 599 static void rt6_probe_deferred(struct work_struct *w) 600 { 601 struct in6_addr mcaddr; 602 struct __rt6_probe_work *work = 603 container_of(w, struct __rt6_probe_work, work); 604 605 addrconf_addr_solict_mult(&work->target, &mcaddr); 606 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); 607 dev_put(work->dev); 608 kfree(work); 609 } 610 611 static void rt6_probe(struct fib6_nh *fib6_nh) 612 { 613 struct __rt6_probe_work *work = NULL; 614 const struct in6_addr *nh_gw; 615 unsigned long last_probe; 616 struct neighbour *neigh; 617 struct net_device *dev; 618 struct inet6_dev *idev; 619 620 /* 621 * Okay, this does not seem to be appropriate 622 * for now, however, we need to check if it 623 * is really so; aka Router Reachability Probing. 624 * 625 * Router Reachability Probe MUST be rate-limited 626 * to no more than one per minute. 627 */ 628 if (!fib6_nh->fib_nh_gw_family) 629 return; 630 631 nh_gw = &fib6_nh->fib_nh_gw6; 632 dev = fib6_nh->fib_nh_dev; 633 rcu_read_lock_bh(); 634 last_probe = READ_ONCE(fib6_nh->last_probe); 635 idev = __in6_dev_get(dev); 636 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); 637 if (neigh) { 638 if (neigh->nud_state & NUD_VALID) 639 goto out; 640 641 write_lock(&neigh->lock); 642 if (!(neigh->nud_state & NUD_VALID) && 643 time_after(jiffies, 644 neigh->updated + idev->cnf.rtr_probe_interval)) { 645 work = kmalloc(sizeof(*work), GFP_ATOMIC); 646 if (work) 647 __neigh_set_probe_once(neigh); 648 } 649 write_unlock(&neigh->lock); 650 } else if (time_after(jiffies, last_probe + 651 idev->cnf.rtr_probe_interval)) { 652 work = kmalloc(sizeof(*work), GFP_ATOMIC); 653 } 654 655 if (!work || cmpxchg(&fib6_nh->last_probe, 656 last_probe, jiffies) != last_probe) { 657 kfree(work); 658 } else { 659 INIT_WORK(&work->work, rt6_probe_deferred); 660 work->target = *nh_gw; 661 dev_hold(dev); 662 work->dev = dev; 663 schedule_work(&work->work); 664 } 665 666 out: 667 rcu_read_unlock_bh(); 668 } 669 #else 670 static inline void rt6_probe(struct fib6_nh *fib6_nh) 671 { 672 } 673 #endif 674 675 /* 676 * Default Router Selection (RFC 2461 6.3.6) 677 */ 678 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh) 679 { 680 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; 681 struct neighbour *neigh; 682 683 rcu_read_lock_bh(); 684 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev, 685 &fib6_nh->fib_nh_gw6); 686 if (neigh) { 687 read_lock(&neigh->lock); 688 if (neigh->nud_state & NUD_VALID) 689 ret = RT6_NUD_SUCCEED; 690 #ifdef CONFIG_IPV6_ROUTER_PREF 691 else if (!(neigh->nud_state & NUD_FAILED)) 692 ret = RT6_NUD_SUCCEED; 693 else 694 ret = RT6_NUD_FAIL_PROBE; 695 #endif 696 read_unlock(&neigh->lock); 697 } else { 698 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ? 699 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR; 700 } 701 rcu_read_unlock_bh(); 702 703 return ret; 704 } 705 706 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif, 707 int strict) 708 { 709 int m = 0; 710 711 if (!oif || nh->fib_nh_dev->ifindex == oif) 712 m = 2; 713 714 if (!m && (strict & RT6_LOOKUP_F_IFACE)) 715 return RT6_NUD_FAIL_HARD; 716 #ifdef CONFIG_IPV6_ROUTER_PREF 717 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2; 718 #endif 719 if ((strict & RT6_LOOKUP_F_REACHABLE) && 720 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) { 721 int n = rt6_check_neigh(nh); 722 if (n < 0) 723 return n; 724 } 725 return m; 726 } 727 728 static bool find_match(struct fib6_nh *nh, u32 fib6_flags, 729 int oif, int strict, int *mpri, bool *do_rr) 730 { 731 bool match_do_rr = false; 732 bool rc = false; 733 int m; 734 735 if (nh->fib_nh_flags & RTNH_F_DEAD) 736 goto out; 737 738 if (ip6_ignore_linkdown(nh->fib_nh_dev) && 739 nh->fib_nh_flags & RTNH_F_LINKDOWN && 740 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) 741 goto out; 742 743 m = rt6_score_route(nh, fib6_flags, oif, strict); 744 if (m == RT6_NUD_FAIL_DO_RR) { 745 match_do_rr = true; 746 m = 0; /* lowest valid score */ 747 } else if (m == RT6_NUD_FAIL_HARD) { 748 goto out; 749 } 750 751 if (strict & RT6_LOOKUP_F_REACHABLE) 752 rt6_probe(nh); 753 754 /* note that m can be RT6_NUD_FAIL_PROBE at this point */ 755 if (m > *mpri) { 756 *do_rr = match_do_rr; 757 *mpri = m; 758 rc = true; 759 } 760 out: 761 return rc; 762 } 763 764 struct fib6_nh_frl_arg { 765 u32 flags; 766 int oif; 767 int strict; 768 int *mpri; 769 bool *do_rr; 770 struct fib6_nh *nh; 771 }; 772 773 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg) 774 { 775 struct fib6_nh_frl_arg *arg = _arg; 776 777 arg->nh = nh; 778 return find_match(nh, arg->flags, arg->oif, arg->strict, 779 arg->mpri, arg->do_rr); 780 } 781 782 static void __find_rr_leaf(struct fib6_info *f6i_start, 783 struct fib6_info *nomatch, u32 metric, 784 struct fib6_result *res, struct fib6_info **cont, 785 int oif, int strict, bool *do_rr, int *mpri) 786 { 787 struct fib6_info *f6i; 788 789 for (f6i = f6i_start; 790 f6i && f6i != nomatch; 791 f6i = rcu_dereference(f6i->fib6_next)) { 792 bool matched = false; 793 struct fib6_nh *nh; 794 795 if (cont && f6i->fib6_metric != metric) { 796 *cont = f6i; 797 return; 798 } 799 800 if (fib6_check_expired(f6i)) 801 continue; 802 803 if (unlikely(f6i->nh)) { 804 struct fib6_nh_frl_arg arg = { 805 .flags = f6i->fib6_flags, 806 .oif = oif, 807 .strict = strict, 808 .mpri = mpri, 809 .do_rr = do_rr 810 }; 811 812 if (nexthop_is_blackhole(f6i->nh)) { 813 res->fib6_flags = RTF_REJECT; 814 res->fib6_type = RTN_BLACKHOLE; 815 res->f6i = f6i; 816 res->nh = nexthop_fib6_nh(f6i->nh); 817 return; 818 } 819 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match, 820 &arg)) { 821 matched = true; 822 nh = arg.nh; 823 } 824 } else { 825 nh = f6i->fib6_nh; 826 if (find_match(nh, f6i->fib6_flags, oif, strict, 827 mpri, do_rr)) 828 matched = true; 829 } 830 if (matched) { 831 res->f6i = f6i; 832 res->nh = nh; 833 res->fib6_flags = f6i->fib6_flags; 834 res->fib6_type = f6i->fib6_type; 835 } 836 } 837 } 838 839 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf, 840 struct fib6_info *rr_head, int oif, int strict, 841 bool *do_rr, struct fib6_result *res) 842 { 843 u32 metric = rr_head->fib6_metric; 844 struct fib6_info *cont = NULL; 845 int mpri = -1; 846 847 __find_rr_leaf(rr_head, NULL, metric, res, &cont, 848 oif, strict, do_rr, &mpri); 849 850 __find_rr_leaf(leaf, rr_head, metric, res, &cont, 851 oif, strict, do_rr, &mpri); 852 853 if (res->f6i || !cont) 854 return; 855 856 __find_rr_leaf(cont, NULL, metric, res, NULL, 857 oif, strict, do_rr, &mpri); 858 } 859 860 static void rt6_select(struct net *net, struct fib6_node *fn, int oif, 861 struct fib6_result *res, int strict) 862 { 863 struct fib6_info *leaf = rcu_dereference(fn->leaf); 864 struct fib6_info *rt0; 865 bool do_rr = false; 866 int key_plen; 867 868 /* make sure this function or its helpers sets f6i */ 869 res->f6i = NULL; 870 871 if (!leaf || leaf == net->ipv6.fib6_null_entry) 872 goto out; 873 874 rt0 = rcu_dereference(fn->rr_ptr); 875 if (!rt0) 876 rt0 = leaf; 877 878 /* Double check to make sure fn is not an intermediate node 879 * and fn->leaf does not points to its child's leaf 880 * (This might happen if all routes under fn are deleted from 881 * the tree and fib6_repair_tree() is called on the node.) 882 */ 883 key_plen = rt0->fib6_dst.plen; 884 #ifdef CONFIG_IPV6_SUBTREES 885 if (rt0->fib6_src.plen) 886 key_plen = rt0->fib6_src.plen; 887 #endif 888 if (fn->fn_bit != key_plen) 889 goto out; 890 891 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res); 892 if (do_rr) { 893 struct fib6_info *next = rcu_dereference(rt0->fib6_next); 894 895 /* no entries matched; do round-robin */ 896 if (!next || next->fib6_metric != rt0->fib6_metric) 897 next = leaf; 898 899 if (next != rt0) { 900 spin_lock_bh(&leaf->fib6_table->tb6_lock); 901 /* make sure next is not being deleted from the tree */ 902 if (next->fib6_node) 903 rcu_assign_pointer(fn->rr_ptr, next); 904 spin_unlock_bh(&leaf->fib6_table->tb6_lock); 905 } 906 } 907 908 out: 909 if (!res->f6i) { 910 res->f6i = net->ipv6.fib6_null_entry; 911 res->nh = res->f6i->fib6_nh; 912 res->fib6_flags = res->f6i->fib6_flags; 913 res->fib6_type = res->f6i->fib6_type; 914 } 915 } 916 917 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res) 918 { 919 return (res->f6i->fib6_flags & RTF_NONEXTHOP) || 920 res->nh->fib_nh_gw_family; 921 } 922 923 #ifdef CONFIG_IPV6_ROUTE_INFO 924 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 925 const struct in6_addr *gwaddr) 926 { 927 struct net *net = dev_net(dev); 928 struct route_info *rinfo = (struct route_info *) opt; 929 struct in6_addr prefix_buf, *prefix; 930 unsigned int pref; 931 unsigned long lifetime; 932 struct fib6_info *rt; 933 934 if (len < sizeof(struct route_info)) { 935 return -EINVAL; 936 } 937 938 /* Sanity check for prefix_len and length */ 939 if (rinfo->length > 3) { 940 return -EINVAL; 941 } else if (rinfo->prefix_len > 128) { 942 return -EINVAL; 943 } else if (rinfo->prefix_len > 64) { 944 if (rinfo->length < 2) { 945 return -EINVAL; 946 } 947 } else if (rinfo->prefix_len > 0) { 948 if (rinfo->length < 1) { 949 return -EINVAL; 950 } 951 } 952 953 pref = rinfo->route_pref; 954 if (pref == ICMPV6_ROUTER_PREF_INVALID) 955 return -EINVAL; 956 957 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ); 958 959 if (rinfo->length == 3) 960 prefix = (struct in6_addr *)rinfo->prefix; 961 else { 962 /* this function is safe */ 963 ipv6_addr_prefix(&prefix_buf, 964 (struct in6_addr *)rinfo->prefix, 965 rinfo->prefix_len); 966 prefix = &prefix_buf; 967 } 968 969 if (rinfo->prefix_len == 0) 970 rt = rt6_get_dflt_router(net, gwaddr, dev); 971 else 972 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, 973 gwaddr, dev); 974 975 if (rt && !lifetime) { 976 ip6_del_rt(net, rt, false); 977 rt = NULL; 978 } 979 980 if (!rt && lifetime) 981 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, 982 dev, pref); 983 else if (rt) 984 rt->fib6_flags = RTF_ROUTEINFO | 985 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 986 987 if (rt) { 988 if (!addrconf_finite_timeout(lifetime)) 989 fib6_clean_expires(rt); 990 else 991 fib6_set_expires(rt, jiffies + HZ * lifetime); 992 993 fib6_info_release(rt); 994 } 995 return 0; 996 } 997 #endif 998 999 /* 1000 * Misc support functions 1001 */ 1002 1003 /* called with rcu_lock held */ 1004 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res) 1005 { 1006 struct net_device *dev = res->nh->fib_nh_dev; 1007 1008 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) { 1009 /* for copies of local routes, dst->dev needs to be the 1010 * device if it is a master device, the master device if 1011 * device is enslaved, and the loopback as the default 1012 */ 1013 if (netif_is_l3_slave(dev) && 1014 !rt6_need_strict(&res->f6i->fib6_dst.addr)) 1015 dev = l3mdev_master_dev_rcu(dev); 1016 else if (!netif_is_l3_master(dev)) 1017 dev = dev_net(dev)->loopback_dev; 1018 /* last case is netif_is_l3_master(dev) is true in which 1019 * case we want dev returned to be dev 1020 */ 1021 } 1022 1023 return dev; 1024 } 1025 1026 static const int fib6_prop[RTN_MAX + 1] = { 1027 [RTN_UNSPEC] = 0, 1028 [RTN_UNICAST] = 0, 1029 [RTN_LOCAL] = 0, 1030 [RTN_BROADCAST] = 0, 1031 [RTN_ANYCAST] = 0, 1032 [RTN_MULTICAST] = 0, 1033 [RTN_BLACKHOLE] = -EINVAL, 1034 [RTN_UNREACHABLE] = -EHOSTUNREACH, 1035 [RTN_PROHIBIT] = -EACCES, 1036 [RTN_THROW] = -EAGAIN, 1037 [RTN_NAT] = -EINVAL, 1038 [RTN_XRESOLVE] = -EINVAL, 1039 }; 1040 1041 static int ip6_rt_type_to_error(u8 fib6_type) 1042 { 1043 return fib6_prop[fib6_type]; 1044 } 1045 1046 static unsigned short fib6_info_dst_flags(struct fib6_info *rt) 1047 { 1048 unsigned short flags = 0; 1049 1050 if (rt->dst_nocount) 1051 flags |= DST_NOCOUNT; 1052 if (rt->dst_nopolicy) 1053 flags |= DST_NOPOLICY; 1054 1055 return flags; 1056 } 1057 1058 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type) 1059 { 1060 rt->dst.error = ip6_rt_type_to_error(fib6_type); 1061 1062 switch (fib6_type) { 1063 case RTN_BLACKHOLE: 1064 rt->dst.output = dst_discard_out; 1065 rt->dst.input = dst_discard; 1066 break; 1067 case RTN_PROHIBIT: 1068 rt->dst.output = ip6_pkt_prohibit_out; 1069 rt->dst.input = ip6_pkt_prohibit; 1070 break; 1071 case RTN_THROW: 1072 case RTN_UNREACHABLE: 1073 default: 1074 rt->dst.output = ip6_pkt_discard_out; 1075 rt->dst.input = ip6_pkt_discard; 1076 break; 1077 } 1078 } 1079 1080 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res) 1081 { 1082 struct fib6_info *f6i = res->f6i; 1083 1084 if (res->fib6_flags & RTF_REJECT) { 1085 ip6_rt_init_dst_reject(rt, res->fib6_type); 1086 return; 1087 } 1088 1089 rt->dst.error = 0; 1090 rt->dst.output = ip6_output; 1091 1092 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) { 1093 rt->dst.input = ip6_input; 1094 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { 1095 rt->dst.input = ip6_mc_input; 1096 } else { 1097 rt->dst.input = ip6_forward; 1098 } 1099 1100 if (res->nh->fib_nh_lws) { 1101 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws); 1102 lwtunnel_set_redirect(&rt->dst); 1103 } 1104 1105 rt->dst.lastuse = jiffies; 1106 } 1107 1108 /* Caller must already hold reference to @from */ 1109 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) 1110 { 1111 rt->rt6i_flags &= ~RTF_EXPIRES; 1112 rcu_assign_pointer(rt->from, from); 1113 ip_dst_init_metrics(&rt->dst, from->fib6_metrics); 1114 } 1115 1116 /* Caller must already hold reference to f6i in result */ 1117 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res) 1118 { 1119 const struct fib6_nh *nh = res->nh; 1120 const struct net_device *dev = nh->fib_nh_dev; 1121 struct fib6_info *f6i = res->f6i; 1122 1123 ip6_rt_init_dst(rt, res); 1124 1125 rt->rt6i_dst = f6i->fib6_dst; 1126 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL; 1127 rt->rt6i_flags = res->fib6_flags; 1128 if (nh->fib_nh_gw_family) { 1129 rt->rt6i_gateway = nh->fib_nh_gw6; 1130 rt->rt6i_flags |= RTF_GATEWAY; 1131 } 1132 rt6_set_from(rt, f6i); 1133 #ifdef CONFIG_IPV6_SUBTREES 1134 rt->rt6i_src = f6i->fib6_src; 1135 #endif 1136 } 1137 1138 static struct fib6_node* fib6_backtrack(struct fib6_node *fn, 1139 struct in6_addr *saddr) 1140 { 1141 struct fib6_node *pn, *sn; 1142 while (1) { 1143 if (fn->fn_flags & RTN_TL_ROOT) 1144 return NULL; 1145 pn = rcu_dereference(fn->parent); 1146 sn = FIB6_SUBTREE(pn); 1147 if (sn && sn != fn) 1148 fn = fib6_node_lookup(sn, NULL, saddr); 1149 else 1150 fn = pn; 1151 if (fn->fn_flags & RTN_RTINFO) 1152 return fn; 1153 } 1154 } 1155 1156 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt) 1157 { 1158 struct rt6_info *rt = *prt; 1159 1160 if (dst_hold_safe(&rt->dst)) 1161 return true; 1162 if (net) { 1163 rt = net->ipv6.ip6_null_entry; 1164 dst_hold(&rt->dst); 1165 } else { 1166 rt = NULL; 1167 } 1168 *prt = rt; 1169 return false; 1170 } 1171 1172 /* called with rcu_lock held */ 1173 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res) 1174 { 1175 struct net_device *dev = res->nh->fib_nh_dev; 1176 struct fib6_info *f6i = res->f6i; 1177 unsigned short flags; 1178 struct rt6_info *nrt; 1179 1180 if (!fib6_info_hold_safe(f6i)) 1181 goto fallback; 1182 1183 flags = fib6_info_dst_flags(f6i); 1184 nrt = ip6_dst_alloc(dev_net(dev), dev, flags); 1185 if (!nrt) { 1186 fib6_info_release(f6i); 1187 goto fallback; 1188 } 1189 1190 ip6_rt_copy_init(nrt, res); 1191 return nrt; 1192 1193 fallback: 1194 nrt = dev_net(dev)->ipv6.ip6_null_entry; 1195 dst_hold(&nrt->dst); 1196 return nrt; 1197 } 1198 1199 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net, 1200 struct fib6_table *table, 1201 struct flowi6 *fl6, 1202 const struct sk_buff *skb, 1203 int flags) 1204 { 1205 struct fib6_result res = {}; 1206 struct fib6_node *fn; 1207 struct rt6_info *rt; 1208 1209 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) 1210 flags &= ~RT6_LOOKUP_F_IFACE; 1211 1212 rcu_read_lock(); 1213 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 1214 restart: 1215 res.f6i = rcu_dereference(fn->leaf); 1216 if (!res.f6i) 1217 res.f6i = net->ipv6.fib6_null_entry; 1218 else 1219 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif, 1220 flags); 1221 1222 if (res.f6i == net->ipv6.fib6_null_entry) { 1223 fn = fib6_backtrack(fn, &fl6->saddr); 1224 if (fn) 1225 goto restart; 1226 1227 rt = net->ipv6.ip6_null_entry; 1228 dst_hold(&rt->dst); 1229 goto out; 1230 } else if (res.fib6_flags & RTF_REJECT) { 1231 goto do_create; 1232 } 1233 1234 fib6_select_path(net, &res, fl6, fl6->flowi6_oif, 1235 fl6->flowi6_oif != 0, skb, flags); 1236 1237 /* Search through exception table */ 1238 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr); 1239 if (rt) { 1240 if (ip6_hold_safe(net, &rt)) 1241 dst_use_noref(&rt->dst, jiffies); 1242 } else { 1243 do_create: 1244 rt = ip6_create_rt_rcu(&res); 1245 } 1246 1247 out: 1248 trace_fib6_table_lookup(net, &res, table, fl6); 1249 1250 rcu_read_unlock(); 1251 1252 return rt; 1253 } 1254 1255 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, 1256 const struct sk_buff *skb, int flags) 1257 { 1258 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup); 1259 } 1260 EXPORT_SYMBOL_GPL(ip6_route_lookup); 1261 1262 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, 1263 const struct in6_addr *saddr, int oif, 1264 const struct sk_buff *skb, int strict) 1265 { 1266 struct flowi6 fl6 = { 1267 .flowi6_oif = oif, 1268 .daddr = *daddr, 1269 }; 1270 struct dst_entry *dst; 1271 int flags = strict ? RT6_LOOKUP_F_IFACE : 0; 1272 1273 if (saddr) { 1274 memcpy(&fl6.saddr, saddr, sizeof(*saddr)); 1275 flags |= RT6_LOOKUP_F_HAS_SADDR; 1276 } 1277 1278 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup); 1279 if (dst->error == 0) 1280 return (struct rt6_info *) dst; 1281 1282 dst_release(dst); 1283 1284 return NULL; 1285 } 1286 EXPORT_SYMBOL(rt6_lookup); 1287 1288 /* ip6_ins_rt is called with FREE table->tb6_lock. 1289 * It takes new route entry, the addition fails by any reason the 1290 * route is released. 1291 * Caller must hold dst before calling it. 1292 */ 1293 1294 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info, 1295 struct netlink_ext_ack *extack) 1296 { 1297 int err; 1298 struct fib6_table *table; 1299 1300 table = rt->fib6_table; 1301 spin_lock_bh(&table->tb6_lock); 1302 err = fib6_add(&table->tb6_root, rt, info, extack); 1303 spin_unlock_bh(&table->tb6_lock); 1304 1305 return err; 1306 } 1307 1308 int ip6_ins_rt(struct net *net, struct fib6_info *rt) 1309 { 1310 struct nl_info info = { .nl_net = net, }; 1311 1312 return __ip6_ins_rt(rt, &info, NULL); 1313 } 1314 1315 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res, 1316 const struct in6_addr *daddr, 1317 const struct in6_addr *saddr) 1318 { 1319 struct fib6_info *f6i = res->f6i; 1320 struct net_device *dev; 1321 struct rt6_info *rt; 1322 1323 /* 1324 * Clone the route. 1325 */ 1326 1327 if (!fib6_info_hold_safe(f6i)) 1328 return NULL; 1329 1330 dev = ip6_rt_get_dev_rcu(res); 1331 rt = ip6_dst_alloc(dev_net(dev), dev, 0); 1332 if (!rt) { 1333 fib6_info_release(f6i); 1334 return NULL; 1335 } 1336 1337 ip6_rt_copy_init(rt, res); 1338 rt->rt6i_flags |= RTF_CACHE; 1339 rt->rt6i_dst.addr = *daddr; 1340 rt->rt6i_dst.plen = 128; 1341 1342 if (!rt6_is_gw_or_nonexthop(res)) { 1343 if (f6i->fib6_dst.plen != 128 && 1344 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr)) 1345 rt->rt6i_flags |= RTF_ANYCAST; 1346 #ifdef CONFIG_IPV6_SUBTREES 1347 if (rt->rt6i_src.plen && saddr) { 1348 rt->rt6i_src.addr = *saddr; 1349 rt->rt6i_src.plen = 128; 1350 } 1351 #endif 1352 } 1353 1354 return rt; 1355 } 1356 1357 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res) 1358 { 1359 struct fib6_info *f6i = res->f6i; 1360 unsigned short flags = fib6_info_dst_flags(f6i); 1361 struct net_device *dev; 1362 struct rt6_info *pcpu_rt; 1363 1364 if (!fib6_info_hold_safe(f6i)) 1365 return NULL; 1366 1367 rcu_read_lock(); 1368 dev = ip6_rt_get_dev_rcu(res); 1369 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT); 1370 rcu_read_unlock(); 1371 if (!pcpu_rt) { 1372 fib6_info_release(f6i); 1373 return NULL; 1374 } 1375 ip6_rt_copy_init(pcpu_rt, res); 1376 pcpu_rt->rt6i_flags |= RTF_PCPU; 1377 1378 if (f6i->nh) 1379 pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev)); 1380 1381 return pcpu_rt; 1382 } 1383 1384 static bool rt6_is_valid(const struct rt6_info *rt6) 1385 { 1386 return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev)); 1387 } 1388 1389 /* It should be called with rcu_read_lock() acquired */ 1390 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) 1391 { 1392 struct rt6_info *pcpu_rt; 1393 1394 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu); 1395 1396 if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) { 1397 struct rt6_info *prev, **p; 1398 1399 p = this_cpu_ptr(res->nh->rt6i_pcpu); 1400 prev = xchg(p, NULL); 1401 if (prev) { 1402 dst_dev_put(&prev->dst); 1403 dst_release(&prev->dst); 1404 } 1405 1406 pcpu_rt = NULL; 1407 } 1408 1409 return pcpu_rt; 1410 } 1411 1412 static struct rt6_info *rt6_make_pcpu_route(struct net *net, 1413 const struct fib6_result *res) 1414 { 1415 struct rt6_info *pcpu_rt, *prev, **p; 1416 1417 pcpu_rt = ip6_rt_pcpu_alloc(res); 1418 if (!pcpu_rt) 1419 return NULL; 1420 1421 p = this_cpu_ptr(res->nh->rt6i_pcpu); 1422 prev = cmpxchg(p, NULL, pcpu_rt); 1423 BUG_ON(prev); 1424 1425 if (res->f6i->fib6_destroying) { 1426 struct fib6_info *from; 1427 1428 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); 1429 fib6_info_release(from); 1430 } 1431 1432 return pcpu_rt; 1433 } 1434 1435 /* exception hash table implementation 1436 */ 1437 static DEFINE_SPINLOCK(rt6_exception_lock); 1438 1439 /* Remove rt6_ex from hash table and free the memory 1440 * Caller must hold rt6_exception_lock 1441 */ 1442 static void rt6_remove_exception(struct rt6_exception_bucket *bucket, 1443 struct rt6_exception *rt6_ex) 1444 { 1445 struct fib6_info *from; 1446 struct net *net; 1447 1448 if (!bucket || !rt6_ex) 1449 return; 1450 1451 net = dev_net(rt6_ex->rt6i->dst.dev); 1452 net->ipv6.rt6_stats->fib_rt_cache--; 1453 1454 /* purge completely the exception to allow releasing the held resources: 1455 * some [sk] cache may keep the dst around for unlimited time 1456 */ 1457 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL); 1458 fib6_info_release(from); 1459 dst_dev_put(&rt6_ex->rt6i->dst); 1460 1461 hlist_del_rcu(&rt6_ex->hlist); 1462 dst_release(&rt6_ex->rt6i->dst); 1463 kfree_rcu(rt6_ex, rcu); 1464 WARN_ON_ONCE(!bucket->depth); 1465 bucket->depth--; 1466 } 1467 1468 /* Remove oldest rt6_ex in bucket and free the memory 1469 * Caller must hold rt6_exception_lock 1470 */ 1471 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) 1472 { 1473 struct rt6_exception *rt6_ex, *oldest = NULL; 1474 1475 if (!bucket) 1476 return; 1477 1478 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 1479 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp)) 1480 oldest = rt6_ex; 1481 } 1482 rt6_remove_exception(bucket, oldest); 1483 } 1484 1485 static u32 rt6_exception_hash(const struct in6_addr *dst, 1486 const struct in6_addr *src) 1487 { 1488 static siphash_key_t rt6_exception_key __read_mostly; 1489 struct { 1490 struct in6_addr dst; 1491 struct in6_addr src; 1492 } __aligned(SIPHASH_ALIGNMENT) combined = { 1493 .dst = *dst, 1494 }; 1495 u64 val; 1496 1497 net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key)); 1498 1499 #ifdef CONFIG_IPV6_SUBTREES 1500 if (src) 1501 combined.src = *src; 1502 #endif 1503 val = siphash(&combined, sizeof(combined), &rt6_exception_key); 1504 1505 return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); 1506 } 1507 1508 /* Helper function to find the cached rt in the hash table 1509 * and update bucket pointer to point to the bucket for this 1510 * (daddr, saddr) pair 1511 * Caller must hold rt6_exception_lock 1512 */ 1513 static struct rt6_exception * 1514 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket, 1515 const struct in6_addr *daddr, 1516 const struct in6_addr *saddr) 1517 { 1518 struct rt6_exception *rt6_ex; 1519 u32 hval; 1520 1521 if (!(*bucket) || !daddr) 1522 return NULL; 1523 1524 hval = rt6_exception_hash(daddr, saddr); 1525 *bucket += hval; 1526 1527 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) { 1528 struct rt6_info *rt6 = rt6_ex->rt6i; 1529 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr); 1530 1531 #ifdef CONFIG_IPV6_SUBTREES 1532 if (matched && saddr) 1533 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr); 1534 #endif 1535 if (matched) 1536 return rt6_ex; 1537 } 1538 return NULL; 1539 } 1540 1541 /* Helper function to find the cached rt in the hash table 1542 * and update bucket pointer to point to the bucket for this 1543 * (daddr, saddr) pair 1544 * Caller must hold rcu_read_lock() 1545 */ 1546 static struct rt6_exception * 1547 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket, 1548 const struct in6_addr *daddr, 1549 const struct in6_addr *saddr) 1550 { 1551 struct rt6_exception *rt6_ex; 1552 u32 hval; 1553 1554 WARN_ON_ONCE(!rcu_read_lock_held()); 1555 1556 if (!(*bucket) || !daddr) 1557 return NULL; 1558 1559 hval = rt6_exception_hash(daddr, saddr); 1560 *bucket += hval; 1561 1562 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) { 1563 struct rt6_info *rt6 = rt6_ex->rt6i; 1564 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr); 1565 1566 #ifdef CONFIG_IPV6_SUBTREES 1567 if (matched && saddr) 1568 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr); 1569 #endif 1570 if (matched) 1571 return rt6_ex; 1572 } 1573 return NULL; 1574 } 1575 1576 static unsigned int fib6_mtu(const struct fib6_result *res) 1577 { 1578 const struct fib6_nh *nh = res->nh; 1579 unsigned int mtu; 1580 1581 if (res->f6i->fib6_pmtu) { 1582 mtu = res->f6i->fib6_pmtu; 1583 } else { 1584 struct net_device *dev = nh->fib_nh_dev; 1585 struct inet6_dev *idev; 1586 1587 rcu_read_lock(); 1588 idev = __in6_dev_get(dev); 1589 mtu = idev->cnf.mtu6; 1590 rcu_read_unlock(); 1591 } 1592 1593 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); 1594 1595 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu); 1596 } 1597 1598 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL 1599 1600 /* used when the flushed bit is not relevant, only access to the bucket 1601 * (ie., all bucket users except rt6_insert_exception); 1602 * 1603 * called under rcu lock; sometimes called with rt6_exception_lock held 1604 */ 1605 static 1606 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh, 1607 spinlock_t *lock) 1608 { 1609 struct rt6_exception_bucket *bucket; 1610 1611 if (lock) 1612 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket, 1613 lockdep_is_held(lock)); 1614 else 1615 bucket = rcu_dereference(nh->rt6i_exception_bucket); 1616 1617 /* remove bucket flushed bit if set */ 1618 if (bucket) { 1619 unsigned long p = (unsigned long)bucket; 1620 1621 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED; 1622 bucket = (struct rt6_exception_bucket *)p; 1623 } 1624 1625 return bucket; 1626 } 1627 1628 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket) 1629 { 1630 unsigned long p = (unsigned long)bucket; 1631 1632 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED); 1633 } 1634 1635 /* called with rt6_exception_lock held */ 1636 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh, 1637 spinlock_t *lock) 1638 { 1639 struct rt6_exception_bucket *bucket; 1640 unsigned long p; 1641 1642 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket, 1643 lockdep_is_held(lock)); 1644 1645 p = (unsigned long)bucket; 1646 p |= FIB6_EXCEPTION_BUCKET_FLUSHED; 1647 bucket = (struct rt6_exception_bucket *)p; 1648 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket); 1649 } 1650 1651 static int rt6_insert_exception(struct rt6_info *nrt, 1652 const struct fib6_result *res) 1653 { 1654 struct net *net = dev_net(nrt->dst.dev); 1655 struct rt6_exception_bucket *bucket; 1656 struct fib6_info *f6i = res->f6i; 1657 struct in6_addr *src_key = NULL; 1658 struct rt6_exception *rt6_ex; 1659 struct fib6_nh *nh = res->nh; 1660 int err = 0; 1661 1662 spin_lock_bh(&rt6_exception_lock); 1663 1664 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket, 1665 lockdep_is_held(&rt6_exception_lock)); 1666 if (!bucket) { 1667 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket), 1668 GFP_ATOMIC); 1669 if (!bucket) { 1670 err = -ENOMEM; 1671 goto out; 1672 } 1673 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket); 1674 } else if (fib6_nh_excptn_bucket_flushed(bucket)) { 1675 err = -EINVAL; 1676 goto out; 1677 } 1678 1679 #ifdef CONFIG_IPV6_SUBTREES 1680 /* fib6_src.plen != 0 indicates f6i is in subtree 1681 * and exception table is indexed by a hash of 1682 * both fib6_dst and fib6_src. 1683 * Otherwise, the exception table is indexed by 1684 * a hash of only fib6_dst. 1685 */ 1686 if (f6i->fib6_src.plen) 1687 src_key = &nrt->rt6i_src.addr; 1688 #endif 1689 /* rt6_mtu_change() might lower mtu on f6i. 1690 * Only insert this exception route if its mtu 1691 * is less than f6i's mtu value. 1692 */ 1693 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) { 1694 err = -EINVAL; 1695 goto out; 1696 } 1697 1698 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr, 1699 src_key); 1700 if (rt6_ex) 1701 rt6_remove_exception(bucket, rt6_ex); 1702 1703 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC); 1704 if (!rt6_ex) { 1705 err = -ENOMEM; 1706 goto out; 1707 } 1708 rt6_ex->rt6i = nrt; 1709 rt6_ex->stamp = jiffies; 1710 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain); 1711 bucket->depth++; 1712 net->ipv6.rt6_stats->fib_rt_cache++; 1713 1714 if (bucket->depth > FIB6_MAX_DEPTH) 1715 rt6_exception_remove_oldest(bucket); 1716 1717 out: 1718 spin_unlock_bh(&rt6_exception_lock); 1719 1720 /* Update fn->fn_sernum to invalidate all cached dst */ 1721 if (!err) { 1722 spin_lock_bh(&f6i->fib6_table->tb6_lock); 1723 fib6_update_sernum(net, f6i); 1724 spin_unlock_bh(&f6i->fib6_table->tb6_lock); 1725 fib6_force_start_gc(net); 1726 } 1727 1728 return err; 1729 } 1730 1731 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from) 1732 { 1733 struct rt6_exception_bucket *bucket; 1734 struct rt6_exception *rt6_ex; 1735 struct hlist_node *tmp; 1736 int i; 1737 1738 spin_lock_bh(&rt6_exception_lock); 1739 1740 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 1741 if (!bucket) 1742 goto out; 1743 1744 /* Prevent rt6_insert_exception() to recreate the bucket list */ 1745 if (!from) 1746 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock); 1747 1748 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 1749 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) { 1750 if (!from || 1751 rcu_access_pointer(rt6_ex->rt6i->from) == from) 1752 rt6_remove_exception(bucket, rt6_ex); 1753 } 1754 WARN_ON_ONCE(!from && bucket->depth); 1755 bucket++; 1756 } 1757 out: 1758 spin_unlock_bh(&rt6_exception_lock); 1759 } 1760 1761 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg) 1762 { 1763 struct fib6_info *f6i = arg; 1764 1765 fib6_nh_flush_exceptions(nh, f6i); 1766 1767 return 0; 1768 } 1769 1770 void rt6_flush_exceptions(struct fib6_info *f6i) 1771 { 1772 if (f6i->nh) 1773 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions, 1774 f6i); 1775 else 1776 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i); 1777 } 1778 1779 /* Find cached rt in the hash table inside passed in rt 1780 * Caller has to hold rcu_read_lock() 1781 */ 1782 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res, 1783 const struct in6_addr *daddr, 1784 const struct in6_addr *saddr) 1785 { 1786 const struct in6_addr *src_key = NULL; 1787 struct rt6_exception_bucket *bucket; 1788 struct rt6_exception *rt6_ex; 1789 struct rt6_info *ret = NULL; 1790 1791 #ifdef CONFIG_IPV6_SUBTREES 1792 /* fib6i_src.plen != 0 indicates f6i is in subtree 1793 * and exception table is indexed by a hash of 1794 * both fib6_dst and fib6_src. 1795 * However, the src addr used to create the hash 1796 * might not be exactly the passed in saddr which 1797 * is a /128 addr from the flow. 1798 * So we need to use f6i->fib6_src to redo lookup 1799 * if the passed in saddr does not find anything. 1800 * (See the logic in ip6_rt_cache_alloc() on how 1801 * rt->rt6i_src is updated.) 1802 */ 1803 if (res->f6i->fib6_src.plen) 1804 src_key = saddr; 1805 find_ex: 1806 #endif 1807 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL); 1808 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); 1809 1810 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i)) 1811 ret = rt6_ex->rt6i; 1812 1813 #ifdef CONFIG_IPV6_SUBTREES 1814 /* Use fib6_src as src_key and redo lookup */ 1815 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) { 1816 src_key = &res->f6i->fib6_src.addr; 1817 goto find_ex; 1818 } 1819 #endif 1820 1821 return ret; 1822 } 1823 1824 /* Remove the passed in cached rt from the hash table that contains it */ 1825 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen, 1826 const struct rt6_info *rt) 1827 { 1828 const struct in6_addr *src_key = NULL; 1829 struct rt6_exception_bucket *bucket; 1830 struct rt6_exception *rt6_ex; 1831 int err; 1832 1833 if (!rcu_access_pointer(nh->rt6i_exception_bucket)) 1834 return -ENOENT; 1835 1836 spin_lock_bh(&rt6_exception_lock); 1837 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 1838 1839 #ifdef CONFIG_IPV6_SUBTREES 1840 /* rt6i_src.plen != 0 indicates 'from' is in subtree 1841 * and exception table is indexed by a hash of 1842 * both rt6i_dst and rt6i_src. 1843 * Otherwise, the exception table is indexed by 1844 * a hash of only rt6i_dst. 1845 */ 1846 if (plen) 1847 src_key = &rt->rt6i_src.addr; 1848 #endif 1849 rt6_ex = __rt6_find_exception_spinlock(&bucket, 1850 &rt->rt6i_dst.addr, 1851 src_key); 1852 if (rt6_ex) { 1853 rt6_remove_exception(bucket, rt6_ex); 1854 err = 0; 1855 } else { 1856 err = -ENOENT; 1857 } 1858 1859 spin_unlock_bh(&rt6_exception_lock); 1860 return err; 1861 } 1862 1863 struct fib6_nh_excptn_arg { 1864 struct rt6_info *rt; 1865 int plen; 1866 }; 1867 1868 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg) 1869 { 1870 struct fib6_nh_excptn_arg *arg = _arg; 1871 int err; 1872 1873 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt); 1874 if (err == 0) 1875 return 1; 1876 1877 return 0; 1878 } 1879 1880 static int rt6_remove_exception_rt(struct rt6_info *rt) 1881 { 1882 struct fib6_info *from; 1883 1884 from = rcu_dereference(rt->from); 1885 if (!from || !(rt->rt6i_flags & RTF_CACHE)) 1886 return -EINVAL; 1887 1888 if (from->nh) { 1889 struct fib6_nh_excptn_arg arg = { 1890 .rt = rt, 1891 .plen = from->fib6_src.plen 1892 }; 1893 int rc; 1894 1895 /* rc = 1 means an entry was found */ 1896 rc = nexthop_for_each_fib6_nh(from->nh, 1897 rt6_nh_remove_exception_rt, 1898 &arg); 1899 return rc ? 0 : -ENOENT; 1900 } 1901 1902 return fib6_nh_remove_exception(from->fib6_nh, 1903 from->fib6_src.plen, rt); 1904 } 1905 1906 /* Find rt6_ex which contains the passed in rt cache and 1907 * refresh its stamp 1908 */ 1909 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen, 1910 const struct rt6_info *rt) 1911 { 1912 const struct in6_addr *src_key = NULL; 1913 struct rt6_exception_bucket *bucket; 1914 struct rt6_exception *rt6_ex; 1915 1916 bucket = fib6_nh_get_excptn_bucket(nh, NULL); 1917 #ifdef CONFIG_IPV6_SUBTREES 1918 /* rt6i_src.plen != 0 indicates 'from' is in subtree 1919 * and exception table is indexed by a hash of 1920 * both rt6i_dst and rt6i_src. 1921 * Otherwise, the exception table is indexed by 1922 * a hash of only rt6i_dst. 1923 */ 1924 if (plen) 1925 src_key = &rt->rt6i_src.addr; 1926 #endif 1927 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key); 1928 if (rt6_ex) 1929 rt6_ex->stamp = jiffies; 1930 } 1931 1932 struct fib6_nh_match_arg { 1933 const struct net_device *dev; 1934 const struct in6_addr *gw; 1935 struct fib6_nh *match; 1936 }; 1937 1938 /* determine if fib6_nh has given device and gateway */ 1939 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg) 1940 { 1941 struct fib6_nh_match_arg *arg = _arg; 1942 1943 if (arg->dev != nh->fib_nh_dev || 1944 (arg->gw && !nh->fib_nh_gw_family) || 1945 (!arg->gw && nh->fib_nh_gw_family) || 1946 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6))) 1947 return 0; 1948 1949 arg->match = nh; 1950 1951 /* found a match, break the loop */ 1952 return 1; 1953 } 1954 1955 static void rt6_update_exception_stamp_rt(struct rt6_info *rt) 1956 { 1957 struct fib6_info *from; 1958 struct fib6_nh *fib6_nh; 1959 1960 rcu_read_lock(); 1961 1962 from = rcu_dereference(rt->from); 1963 if (!from || !(rt->rt6i_flags & RTF_CACHE)) 1964 goto unlock; 1965 1966 if (from->nh) { 1967 struct fib6_nh_match_arg arg = { 1968 .dev = rt->dst.dev, 1969 .gw = &rt->rt6i_gateway, 1970 }; 1971 1972 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg); 1973 1974 if (!arg.match) 1975 goto unlock; 1976 fib6_nh = arg.match; 1977 } else { 1978 fib6_nh = from->fib6_nh; 1979 } 1980 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt); 1981 unlock: 1982 rcu_read_unlock(); 1983 } 1984 1985 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, 1986 struct rt6_info *rt, int mtu) 1987 { 1988 /* If the new MTU is lower than the route PMTU, this new MTU will be the 1989 * lowest MTU in the path: always allow updating the route PMTU to 1990 * reflect PMTU decreases. 1991 * 1992 * If the new MTU is higher, and the route PMTU is equal to the local 1993 * MTU, this means the old MTU is the lowest in the path, so allow 1994 * updating it: if other nodes now have lower MTUs, PMTU discovery will 1995 * handle this. 1996 */ 1997 1998 if (dst_mtu(&rt->dst) >= mtu) 1999 return true; 2000 2001 if (dst_mtu(&rt->dst) == idev->cnf.mtu6) 2002 return true; 2003 2004 return false; 2005 } 2006 2007 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev, 2008 const struct fib6_nh *nh, int mtu) 2009 { 2010 struct rt6_exception_bucket *bucket; 2011 struct rt6_exception *rt6_ex; 2012 int i; 2013 2014 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 2015 if (!bucket) 2016 return; 2017 2018 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 2019 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 2020 struct rt6_info *entry = rt6_ex->rt6i; 2021 2022 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected 2023 * route), the metrics of its rt->from have already 2024 * been updated. 2025 */ 2026 if (dst_metric_raw(&entry->dst, RTAX_MTU) && 2027 rt6_mtu_change_route_allowed(idev, entry, mtu)) 2028 dst_metric_set(&entry->dst, RTAX_MTU, mtu); 2029 } 2030 bucket++; 2031 } 2032 } 2033 2034 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) 2035 2036 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh, 2037 const struct in6_addr *gateway) 2038 { 2039 struct rt6_exception_bucket *bucket; 2040 struct rt6_exception *rt6_ex; 2041 struct hlist_node *tmp; 2042 int i; 2043 2044 if (!rcu_access_pointer(nh->rt6i_exception_bucket)) 2045 return; 2046 2047 spin_lock_bh(&rt6_exception_lock); 2048 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 2049 if (bucket) { 2050 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 2051 hlist_for_each_entry_safe(rt6_ex, tmp, 2052 &bucket->chain, hlist) { 2053 struct rt6_info *entry = rt6_ex->rt6i; 2054 2055 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) == 2056 RTF_CACHE_GATEWAY && 2057 ipv6_addr_equal(gateway, 2058 &entry->rt6i_gateway)) { 2059 rt6_remove_exception(bucket, rt6_ex); 2060 } 2061 } 2062 bucket++; 2063 } 2064 } 2065 2066 spin_unlock_bh(&rt6_exception_lock); 2067 } 2068 2069 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket, 2070 struct rt6_exception *rt6_ex, 2071 struct fib6_gc_args *gc_args, 2072 unsigned long now) 2073 { 2074 struct rt6_info *rt = rt6_ex->rt6i; 2075 2076 /* we are pruning and obsoleting aged-out and non gateway exceptions 2077 * even if others have still references to them, so that on next 2078 * dst_check() such references can be dropped. 2079 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when 2080 * expired, independently from their aging, as per RFC 8201 section 4 2081 */ 2082 if (!(rt->rt6i_flags & RTF_EXPIRES)) { 2083 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) { 2084 RT6_TRACE("aging clone %p\n", rt); 2085 rt6_remove_exception(bucket, rt6_ex); 2086 return; 2087 } 2088 } else if (time_after(jiffies, rt->dst.expires)) { 2089 RT6_TRACE("purging expired route %p\n", rt); 2090 rt6_remove_exception(bucket, rt6_ex); 2091 return; 2092 } 2093 2094 if (rt->rt6i_flags & RTF_GATEWAY) { 2095 struct neighbour *neigh; 2096 2097 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); 2098 2099 if (!(neigh && (neigh->flags & NTF_ROUTER))) { 2100 RT6_TRACE("purging route %p via non-router but gateway\n", 2101 rt); 2102 rt6_remove_exception(bucket, rt6_ex); 2103 return; 2104 } 2105 } 2106 2107 gc_args->more++; 2108 } 2109 2110 static void fib6_nh_age_exceptions(const struct fib6_nh *nh, 2111 struct fib6_gc_args *gc_args, 2112 unsigned long now) 2113 { 2114 struct rt6_exception_bucket *bucket; 2115 struct rt6_exception *rt6_ex; 2116 struct hlist_node *tmp; 2117 int i; 2118 2119 if (!rcu_access_pointer(nh->rt6i_exception_bucket)) 2120 return; 2121 2122 rcu_read_lock_bh(); 2123 spin_lock(&rt6_exception_lock); 2124 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 2125 if (bucket) { 2126 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 2127 hlist_for_each_entry_safe(rt6_ex, tmp, 2128 &bucket->chain, hlist) { 2129 rt6_age_examine_exception(bucket, rt6_ex, 2130 gc_args, now); 2131 } 2132 bucket++; 2133 } 2134 } 2135 spin_unlock(&rt6_exception_lock); 2136 rcu_read_unlock_bh(); 2137 } 2138 2139 struct fib6_nh_age_excptn_arg { 2140 struct fib6_gc_args *gc_args; 2141 unsigned long now; 2142 }; 2143 2144 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg) 2145 { 2146 struct fib6_nh_age_excptn_arg *arg = _arg; 2147 2148 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now); 2149 return 0; 2150 } 2151 2152 void rt6_age_exceptions(struct fib6_info *f6i, 2153 struct fib6_gc_args *gc_args, 2154 unsigned long now) 2155 { 2156 if (f6i->nh) { 2157 struct fib6_nh_age_excptn_arg arg = { 2158 .gc_args = gc_args, 2159 .now = now 2160 }; 2161 2162 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions, 2163 &arg); 2164 } else { 2165 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now); 2166 } 2167 } 2168 2169 /* must be called with rcu lock held */ 2170 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif, 2171 struct flowi6 *fl6, struct fib6_result *res, int strict) 2172 { 2173 struct fib6_node *fn, *saved_fn; 2174 2175 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 2176 saved_fn = fn; 2177 2178 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) 2179 oif = 0; 2180 2181 redo_rt6_select: 2182 rt6_select(net, fn, oif, res, strict); 2183 if (res->f6i == net->ipv6.fib6_null_entry) { 2184 fn = fib6_backtrack(fn, &fl6->saddr); 2185 if (fn) 2186 goto redo_rt6_select; 2187 else if (strict & RT6_LOOKUP_F_REACHABLE) { 2188 /* also consider unreachable route */ 2189 strict &= ~RT6_LOOKUP_F_REACHABLE; 2190 fn = saved_fn; 2191 goto redo_rt6_select; 2192 } 2193 } 2194 2195 trace_fib6_table_lookup(net, res, table, fl6); 2196 2197 return 0; 2198 } 2199 2200 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, 2201 int oif, struct flowi6 *fl6, 2202 const struct sk_buff *skb, int flags) 2203 { 2204 struct fib6_result res = {}; 2205 struct rt6_info *rt = NULL; 2206 int strict = 0; 2207 2208 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) && 2209 !rcu_read_lock_held()); 2210 2211 strict |= flags & RT6_LOOKUP_F_IFACE; 2212 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; 2213 if (net->ipv6.devconf_all->forwarding == 0) 2214 strict |= RT6_LOOKUP_F_REACHABLE; 2215 2216 rcu_read_lock(); 2217 2218 fib6_table_lookup(net, table, oif, fl6, &res, strict); 2219 if (res.f6i == net->ipv6.fib6_null_entry) 2220 goto out; 2221 2222 fib6_select_path(net, &res, fl6, oif, false, skb, strict); 2223 2224 /*Search through exception table */ 2225 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr); 2226 if (rt) { 2227 goto out; 2228 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && 2229 !res.nh->fib_nh_gw_family)) { 2230 /* Create a RTF_CACHE clone which will not be 2231 * owned by the fib6 tree. It is for the special case where 2232 * the daddr in the skb during the neighbor look-up is different 2233 * from the fl6->daddr used to look-up route here. 2234 */ 2235 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL); 2236 2237 if (rt) { 2238 /* 1 refcnt is taken during ip6_rt_cache_alloc(). 2239 * As rt6_uncached_list_add() does not consume refcnt, 2240 * this refcnt is always returned to the caller even 2241 * if caller sets RT6_LOOKUP_F_DST_NOREF flag. 2242 */ 2243 rt6_uncached_list_add(rt); 2244 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache); 2245 rcu_read_unlock(); 2246 2247 return rt; 2248 } 2249 } else { 2250 /* Get a percpu copy */ 2251 local_bh_disable(); 2252 rt = rt6_get_pcpu_route(&res); 2253 2254 if (!rt) 2255 rt = rt6_make_pcpu_route(net, &res); 2256 2257 local_bh_enable(); 2258 } 2259 out: 2260 if (!rt) 2261 rt = net->ipv6.ip6_null_entry; 2262 if (!(flags & RT6_LOOKUP_F_DST_NOREF)) 2263 ip6_hold_safe(net, &rt); 2264 rcu_read_unlock(); 2265 2266 return rt; 2267 } 2268 EXPORT_SYMBOL_GPL(ip6_pol_route); 2269 2270 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net, 2271 struct fib6_table *table, 2272 struct flowi6 *fl6, 2273 const struct sk_buff *skb, 2274 int flags) 2275 { 2276 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags); 2277 } 2278 2279 struct dst_entry *ip6_route_input_lookup(struct net *net, 2280 struct net_device *dev, 2281 struct flowi6 *fl6, 2282 const struct sk_buff *skb, 2283 int flags) 2284 { 2285 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) 2286 flags |= RT6_LOOKUP_F_IFACE; 2287 2288 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input); 2289 } 2290 EXPORT_SYMBOL_GPL(ip6_route_input_lookup); 2291 2292 static void ip6_multipath_l3_keys(const struct sk_buff *skb, 2293 struct flow_keys *keys, 2294 struct flow_keys *flkeys) 2295 { 2296 const struct ipv6hdr *outer_iph = ipv6_hdr(skb); 2297 const struct ipv6hdr *key_iph = outer_iph; 2298 struct flow_keys *_flkeys = flkeys; 2299 const struct ipv6hdr *inner_iph; 2300 const struct icmp6hdr *icmph; 2301 struct ipv6hdr _inner_iph; 2302 struct icmp6hdr _icmph; 2303 2304 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) 2305 goto out; 2306 2307 icmph = skb_header_pointer(skb, skb_transport_offset(skb), 2308 sizeof(_icmph), &_icmph); 2309 if (!icmph) 2310 goto out; 2311 2312 if (!icmpv6_is_err(icmph->icmp6_type)) 2313 goto out; 2314 2315 inner_iph = skb_header_pointer(skb, 2316 skb_transport_offset(skb) + sizeof(*icmph), 2317 sizeof(_inner_iph), &_inner_iph); 2318 if (!inner_iph) 2319 goto out; 2320 2321 key_iph = inner_iph; 2322 _flkeys = NULL; 2323 out: 2324 if (_flkeys) { 2325 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src; 2326 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst; 2327 keys->tags.flow_label = _flkeys->tags.flow_label; 2328 keys->basic.ip_proto = _flkeys->basic.ip_proto; 2329 } else { 2330 keys->addrs.v6addrs.src = key_iph->saddr; 2331 keys->addrs.v6addrs.dst = key_iph->daddr; 2332 keys->tags.flow_label = ip6_flowlabel(key_iph); 2333 keys->basic.ip_proto = key_iph->nexthdr; 2334 } 2335 } 2336 2337 static u32 rt6_multipath_custom_hash_outer(const struct net *net, 2338 const struct sk_buff *skb, 2339 bool *p_has_inner) 2340 { 2341 u32 hash_fields = ip6_multipath_hash_fields(net); 2342 struct flow_keys keys, hash_keys; 2343 2344 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) 2345 return 0; 2346 2347 memset(&hash_keys, 0, sizeof(hash_keys)); 2348 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP); 2349 2350 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2351 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) 2352 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; 2353 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) 2354 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst; 2355 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) 2356 hash_keys.basic.ip_proto = keys.basic.ip_proto; 2357 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL) 2358 hash_keys.tags.flow_label = keys.tags.flow_label; 2359 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) 2360 hash_keys.ports.src = keys.ports.src; 2361 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) 2362 hash_keys.ports.dst = keys.ports.dst; 2363 2364 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION); 2365 return flow_hash_from_keys(&hash_keys); 2366 } 2367 2368 static u32 rt6_multipath_custom_hash_inner(const struct net *net, 2369 const struct sk_buff *skb, 2370 bool has_inner) 2371 { 2372 u32 hash_fields = ip6_multipath_hash_fields(net); 2373 struct flow_keys keys, hash_keys; 2374 2375 /* We assume the packet carries an encapsulation, but if none was 2376 * encountered during dissection of the outer flow, then there is no 2377 * point in calling the flow dissector again. 2378 */ 2379 if (!has_inner) 2380 return 0; 2381 2382 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)) 2383 return 0; 2384 2385 memset(&hash_keys, 0, sizeof(hash_keys)); 2386 skb_flow_dissect_flow_keys(skb, &keys, 0); 2387 2388 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION)) 2389 return 0; 2390 2391 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2392 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2393 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) 2394 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 2395 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) 2396 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 2397 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2398 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2399 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) 2400 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; 2401 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) 2402 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst; 2403 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL) 2404 hash_keys.tags.flow_label = keys.tags.flow_label; 2405 } 2406 2407 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO) 2408 hash_keys.basic.ip_proto = keys.basic.ip_proto; 2409 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT) 2410 hash_keys.ports.src = keys.ports.src; 2411 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) 2412 hash_keys.ports.dst = keys.ports.dst; 2413 2414 return flow_hash_from_keys(&hash_keys); 2415 } 2416 2417 static u32 rt6_multipath_custom_hash_skb(const struct net *net, 2418 const struct sk_buff *skb) 2419 { 2420 u32 mhash, mhash_inner; 2421 bool has_inner = true; 2422 2423 mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner); 2424 mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner); 2425 2426 return jhash_2words(mhash, mhash_inner, 0); 2427 } 2428 2429 static u32 rt6_multipath_custom_hash_fl6(const struct net *net, 2430 const struct flowi6 *fl6) 2431 { 2432 u32 hash_fields = ip6_multipath_hash_fields(net); 2433 struct flow_keys hash_keys; 2434 2435 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) 2436 return 0; 2437 2438 memset(&hash_keys, 0, sizeof(hash_keys)); 2439 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2440 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) 2441 hash_keys.addrs.v6addrs.src = fl6->saddr; 2442 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) 2443 hash_keys.addrs.v6addrs.dst = fl6->daddr; 2444 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) 2445 hash_keys.basic.ip_proto = fl6->flowi6_proto; 2446 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL) 2447 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); 2448 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) 2449 hash_keys.ports.src = fl6->fl6_sport; 2450 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) 2451 hash_keys.ports.dst = fl6->fl6_dport; 2452 2453 return flow_hash_from_keys(&hash_keys); 2454 } 2455 2456 /* if skb is set it will be used and fl6 can be NULL */ 2457 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, 2458 const struct sk_buff *skb, struct flow_keys *flkeys) 2459 { 2460 struct flow_keys hash_keys; 2461 u32 mhash = 0; 2462 2463 switch (ip6_multipath_hash_policy(net)) { 2464 case 0: 2465 memset(&hash_keys, 0, sizeof(hash_keys)); 2466 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2467 if (skb) { 2468 ip6_multipath_l3_keys(skb, &hash_keys, flkeys); 2469 } else { 2470 hash_keys.addrs.v6addrs.src = fl6->saddr; 2471 hash_keys.addrs.v6addrs.dst = fl6->daddr; 2472 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); 2473 hash_keys.basic.ip_proto = fl6->flowi6_proto; 2474 } 2475 mhash = flow_hash_from_keys(&hash_keys); 2476 break; 2477 case 1: 2478 if (skb) { 2479 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; 2480 struct flow_keys keys; 2481 2482 /* short-circuit if we already have L4 hash present */ 2483 if (skb->l4_hash) 2484 return skb_get_hash_raw(skb) >> 1; 2485 2486 memset(&hash_keys, 0, sizeof(hash_keys)); 2487 2488 if (!flkeys) { 2489 skb_flow_dissect_flow_keys(skb, &keys, flag); 2490 flkeys = &keys; 2491 } 2492 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2493 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src; 2494 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst; 2495 hash_keys.ports.src = flkeys->ports.src; 2496 hash_keys.ports.dst = flkeys->ports.dst; 2497 hash_keys.basic.ip_proto = flkeys->basic.ip_proto; 2498 } else { 2499 memset(&hash_keys, 0, sizeof(hash_keys)); 2500 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2501 hash_keys.addrs.v6addrs.src = fl6->saddr; 2502 hash_keys.addrs.v6addrs.dst = fl6->daddr; 2503 hash_keys.ports.src = fl6->fl6_sport; 2504 hash_keys.ports.dst = fl6->fl6_dport; 2505 hash_keys.basic.ip_proto = fl6->flowi6_proto; 2506 } 2507 mhash = flow_hash_from_keys(&hash_keys); 2508 break; 2509 case 2: 2510 memset(&hash_keys, 0, sizeof(hash_keys)); 2511 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2512 if (skb) { 2513 struct flow_keys keys; 2514 2515 if (!flkeys) { 2516 skb_flow_dissect_flow_keys(skb, &keys, 0); 2517 flkeys = &keys; 2518 } 2519 2520 /* Inner can be v4 or v6 */ 2521 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2522 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2523 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src; 2524 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst; 2525 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2526 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2527 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src; 2528 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst; 2529 hash_keys.tags.flow_label = flkeys->tags.flow_label; 2530 hash_keys.basic.ip_proto = flkeys->basic.ip_proto; 2531 } else { 2532 /* Same as case 0 */ 2533 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2534 ip6_multipath_l3_keys(skb, &hash_keys, flkeys); 2535 } 2536 } else { 2537 /* Same as case 0 */ 2538 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2539 hash_keys.addrs.v6addrs.src = fl6->saddr; 2540 hash_keys.addrs.v6addrs.dst = fl6->daddr; 2541 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); 2542 hash_keys.basic.ip_proto = fl6->flowi6_proto; 2543 } 2544 mhash = flow_hash_from_keys(&hash_keys); 2545 break; 2546 case 3: 2547 if (skb) 2548 mhash = rt6_multipath_custom_hash_skb(net, skb); 2549 else 2550 mhash = rt6_multipath_custom_hash_fl6(net, fl6); 2551 break; 2552 } 2553 2554 return mhash >> 1; 2555 } 2556 2557 /* Called with rcu held */ 2558 void ip6_route_input(struct sk_buff *skb) 2559 { 2560 const struct ipv6hdr *iph = ipv6_hdr(skb); 2561 struct net *net = dev_net(skb->dev); 2562 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF; 2563 struct ip_tunnel_info *tun_info; 2564 struct flowi6 fl6 = { 2565 .flowi6_iif = skb->dev->ifindex, 2566 .daddr = iph->daddr, 2567 .saddr = iph->saddr, 2568 .flowlabel = ip6_flowinfo(iph), 2569 .flowi6_mark = skb->mark, 2570 .flowi6_proto = iph->nexthdr, 2571 }; 2572 struct flow_keys *flkeys = NULL, _flkeys; 2573 2574 tun_info = skb_tunnel_info(skb); 2575 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) 2576 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id; 2577 2578 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys)) 2579 flkeys = &_flkeys; 2580 2581 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6)) 2582 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys); 2583 skb_dst_drop(skb); 2584 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev, 2585 &fl6, skb, flags)); 2586 } 2587 2588 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net, 2589 struct fib6_table *table, 2590 struct flowi6 *fl6, 2591 const struct sk_buff *skb, 2592 int flags) 2593 { 2594 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags); 2595 } 2596 2597 struct dst_entry *ip6_route_output_flags_noref(struct net *net, 2598 const struct sock *sk, 2599 struct flowi6 *fl6, int flags) 2600 { 2601 bool any_src; 2602 2603 if (ipv6_addr_type(&fl6->daddr) & 2604 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) { 2605 struct dst_entry *dst; 2606 2607 /* This function does not take refcnt on the dst */ 2608 dst = l3mdev_link_scope_lookup(net, fl6); 2609 if (dst) 2610 return dst; 2611 } 2612 2613 fl6->flowi6_iif = LOOPBACK_IFINDEX; 2614 2615 flags |= RT6_LOOKUP_F_DST_NOREF; 2616 any_src = ipv6_addr_any(&fl6->saddr); 2617 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || 2618 (fl6->flowi6_oif && any_src)) 2619 flags |= RT6_LOOKUP_F_IFACE; 2620 2621 if (!any_src) 2622 flags |= RT6_LOOKUP_F_HAS_SADDR; 2623 else if (sk) 2624 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); 2625 2626 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output); 2627 } 2628 EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref); 2629 2630 struct dst_entry *ip6_route_output_flags(struct net *net, 2631 const struct sock *sk, 2632 struct flowi6 *fl6, 2633 int flags) 2634 { 2635 struct dst_entry *dst; 2636 struct rt6_info *rt6; 2637 2638 rcu_read_lock(); 2639 dst = ip6_route_output_flags_noref(net, sk, fl6, flags); 2640 rt6 = (struct rt6_info *)dst; 2641 /* For dst cached in uncached_list, refcnt is already taken. */ 2642 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) { 2643 dst = &net->ipv6.ip6_null_entry->dst; 2644 dst_hold(dst); 2645 } 2646 rcu_read_unlock(); 2647 2648 return dst; 2649 } 2650 EXPORT_SYMBOL_GPL(ip6_route_output_flags); 2651 2652 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2653 { 2654 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; 2655 struct net_device *loopback_dev = net->loopback_dev; 2656 struct dst_entry *new = NULL; 2657 2658 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, 2659 DST_OBSOLETE_DEAD, 0); 2660 if (rt) { 2661 rt6_info_init(rt); 2662 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc); 2663 2664 new = &rt->dst; 2665 new->__use = 1; 2666 new->input = dst_discard; 2667 new->output = dst_discard_out; 2668 2669 dst_copy_metrics(new, &ort->dst); 2670 2671 rt->rt6i_idev = in6_dev_get(loopback_dev); 2672 rt->rt6i_gateway = ort->rt6i_gateway; 2673 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; 2674 2675 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 2676 #ifdef CONFIG_IPV6_SUBTREES 2677 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); 2678 #endif 2679 } 2680 2681 dst_release(dst_orig); 2682 return new ? new : ERR_PTR(-ENOMEM); 2683 } 2684 2685 /* 2686 * Destination cache support functions 2687 */ 2688 2689 static bool fib6_check(struct fib6_info *f6i, u32 cookie) 2690 { 2691 u32 rt_cookie = 0; 2692 2693 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie) 2694 return false; 2695 2696 if (fib6_check_expired(f6i)) 2697 return false; 2698 2699 return true; 2700 } 2701 2702 static struct dst_entry *rt6_check(struct rt6_info *rt, 2703 struct fib6_info *from, 2704 u32 cookie) 2705 { 2706 u32 rt_cookie = 0; 2707 2708 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) || 2709 rt_cookie != cookie) 2710 return NULL; 2711 2712 if (rt6_check_expired(rt)) 2713 return NULL; 2714 2715 return &rt->dst; 2716 } 2717 2718 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, 2719 struct fib6_info *from, 2720 u32 cookie) 2721 { 2722 if (!__rt6_check_expired(rt) && 2723 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 2724 fib6_check(from, cookie)) 2725 return &rt->dst; 2726 else 2727 return NULL; 2728 } 2729 2730 INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst, 2731 u32 cookie) 2732 { 2733 struct dst_entry *dst_ret; 2734 struct fib6_info *from; 2735 struct rt6_info *rt; 2736 2737 rt = container_of(dst, struct rt6_info, dst); 2738 2739 if (rt->sernum) 2740 return rt6_is_valid(rt) ? dst : NULL; 2741 2742 rcu_read_lock(); 2743 2744 /* All IPV6 dsts are created with ->obsolete set to the value 2745 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 2746 * into this function always. 2747 */ 2748 2749 from = rcu_dereference(rt->from); 2750 2751 if (from && (rt->rt6i_flags & RTF_PCPU || 2752 unlikely(!list_empty(&rt->rt6i_uncached)))) 2753 dst_ret = rt6_dst_from_check(rt, from, cookie); 2754 else 2755 dst_ret = rt6_check(rt, from, cookie); 2756 2757 rcu_read_unlock(); 2758 2759 return dst_ret; 2760 } 2761 EXPORT_INDIRECT_CALLABLE(ip6_dst_check); 2762 2763 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) 2764 { 2765 struct rt6_info *rt = (struct rt6_info *) dst; 2766 2767 if (rt) { 2768 if (rt->rt6i_flags & RTF_CACHE) { 2769 rcu_read_lock(); 2770 if (rt6_check_expired(rt)) { 2771 rt6_remove_exception_rt(rt); 2772 dst = NULL; 2773 } 2774 rcu_read_unlock(); 2775 } else { 2776 dst_release(dst); 2777 dst = NULL; 2778 } 2779 } 2780 return dst; 2781 } 2782 2783 static void ip6_link_failure(struct sk_buff *skb) 2784 { 2785 struct rt6_info *rt; 2786 2787 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 2788 2789 rt = (struct rt6_info *) skb_dst(skb); 2790 if (rt) { 2791 rcu_read_lock(); 2792 if (rt->rt6i_flags & RTF_CACHE) { 2793 rt6_remove_exception_rt(rt); 2794 } else { 2795 struct fib6_info *from; 2796 struct fib6_node *fn; 2797 2798 from = rcu_dereference(rt->from); 2799 if (from) { 2800 fn = rcu_dereference(from->fib6_node); 2801 if (fn && (rt->rt6i_flags & RTF_DEFAULT)) 2802 fn->fn_sernum = -1; 2803 } 2804 } 2805 rcu_read_unlock(); 2806 } 2807 } 2808 2809 static void rt6_update_expires(struct rt6_info *rt0, int timeout) 2810 { 2811 if (!(rt0->rt6i_flags & RTF_EXPIRES)) { 2812 struct fib6_info *from; 2813 2814 rcu_read_lock(); 2815 from = rcu_dereference(rt0->from); 2816 if (from) 2817 rt0->dst.expires = from->expires; 2818 rcu_read_unlock(); 2819 } 2820 2821 dst_set_expires(&rt0->dst, timeout); 2822 rt0->rt6i_flags |= RTF_EXPIRES; 2823 } 2824 2825 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) 2826 { 2827 struct net *net = dev_net(rt->dst.dev); 2828 2829 dst_metric_set(&rt->dst, RTAX_MTU, mtu); 2830 rt->rt6i_flags |= RTF_MODIFIED; 2831 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 2832 } 2833 2834 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) 2835 { 2836 return !(rt->rt6i_flags & RTF_CACHE) && 2837 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from)); 2838 } 2839 2840 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 2841 const struct ipv6hdr *iph, u32 mtu, 2842 bool confirm_neigh) 2843 { 2844 const struct in6_addr *daddr, *saddr; 2845 struct rt6_info *rt6 = (struct rt6_info *)dst; 2846 2847 /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU) 2848 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it. 2849 * [see also comment in rt6_mtu_change_route()] 2850 */ 2851 2852 if (iph) { 2853 daddr = &iph->daddr; 2854 saddr = &iph->saddr; 2855 } else if (sk) { 2856 daddr = &sk->sk_v6_daddr; 2857 saddr = &inet6_sk(sk)->saddr; 2858 } else { 2859 daddr = NULL; 2860 saddr = NULL; 2861 } 2862 2863 if (confirm_neigh) 2864 dst_confirm_neigh(dst, daddr); 2865 2866 if (mtu < IPV6_MIN_MTU) 2867 return; 2868 if (mtu >= dst_mtu(dst)) 2869 return; 2870 2871 if (!rt6_cache_allowed_for_pmtu(rt6)) { 2872 rt6_do_update_pmtu(rt6, mtu); 2873 /* update rt6_ex->stamp for cache */ 2874 if (rt6->rt6i_flags & RTF_CACHE) 2875 rt6_update_exception_stamp_rt(rt6); 2876 } else if (daddr) { 2877 struct fib6_result res = {}; 2878 struct rt6_info *nrt6; 2879 2880 rcu_read_lock(); 2881 res.f6i = rcu_dereference(rt6->from); 2882 if (!res.f6i) 2883 goto out_unlock; 2884 2885 res.fib6_flags = res.f6i->fib6_flags; 2886 res.fib6_type = res.f6i->fib6_type; 2887 2888 if (res.f6i->nh) { 2889 struct fib6_nh_match_arg arg = { 2890 .dev = dst->dev, 2891 .gw = &rt6->rt6i_gateway, 2892 }; 2893 2894 nexthop_for_each_fib6_nh(res.f6i->nh, 2895 fib6_nh_find_match, &arg); 2896 2897 /* fib6_info uses a nexthop that does not have fib6_nh 2898 * using the dst->dev + gw. Should be impossible. 2899 */ 2900 if (!arg.match) 2901 goto out_unlock; 2902 2903 res.nh = arg.match; 2904 } else { 2905 res.nh = res.f6i->fib6_nh; 2906 } 2907 2908 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr); 2909 if (nrt6) { 2910 rt6_do_update_pmtu(nrt6, mtu); 2911 if (rt6_insert_exception(nrt6, &res)) 2912 dst_release_immediate(&nrt6->dst); 2913 } 2914 out_unlock: 2915 rcu_read_unlock(); 2916 } 2917 } 2918 2919 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 2920 struct sk_buff *skb, u32 mtu, 2921 bool confirm_neigh) 2922 { 2923 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu, 2924 confirm_neigh); 2925 } 2926 2927 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, 2928 int oif, u32 mark, kuid_t uid) 2929 { 2930 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; 2931 struct dst_entry *dst; 2932 struct flowi6 fl6 = { 2933 .flowi6_oif = oif, 2934 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark), 2935 .daddr = iph->daddr, 2936 .saddr = iph->saddr, 2937 .flowlabel = ip6_flowinfo(iph), 2938 .flowi6_uid = uid, 2939 }; 2940 2941 dst = ip6_route_output(net, NULL, &fl6); 2942 if (!dst->error) 2943 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true); 2944 dst_release(dst); 2945 } 2946 EXPORT_SYMBOL_GPL(ip6_update_pmtu); 2947 2948 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 2949 { 2950 int oif = sk->sk_bound_dev_if; 2951 struct dst_entry *dst; 2952 2953 if (!oif && skb->dev) 2954 oif = l3mdev_master_ifindex(skb->dev); 2955 2956 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid); 2957 2958 dst = __sk_dst_get(sk); 2959 if (!dst || !dst->obsolete || 2960 dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) 2961 return; 2962 2963 bh_lock_sock(sk); 2964 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 2965 ip6_datagram_dst_update(sk, false); 2966 bh_unlock_sock(sk); 2967 } 2968 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 2969 2970 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst, 2971 const struct flowi6 *fl6) 2972 { 2973 #ifdef CONFIG_IPV6_SUBTREES 2974 struct ipv6_pinfo *np = inet6_sk(sk); 2975 #endif 2976 2977 ip6_dst_store(sk, dst, 2978 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ? 2979 &sk->sk_v6_daddr : NULL, 2980 #ifdef CONFIG_IPV6_SUBTREES 2981 ipv6_addr_equal(&fl6->saddr, &np->saddr) ? 2982 &np->saddr : 2983 #endif 2984 NULL); 2985 } 2986 2987 static bool ip6_redirect_nh_match(const struct fib6_result *res, 2988 struct flowi6 *fl6, 2989 const struct in6_addr *gw, 2990 struct rt6_info **ret) 2991 { 2992 const struct fib6_nh *nh = res->nh; 2993 2994 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family || 2995 fl6->flowi6_oif != nh->fib_nh_dev->ifindex) 2996 return false; 2997 2998 /* rt_cache's gateway might be different from its 'parent' 2999 * in the case of an ip redirect. 3000 * So we keep searching in the exception table if the gateway 3001 * is different. 3002 */ 3003 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) { 3004 struct rt6_info *rt_cache; 3005 3006 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr); 3007 if (rt_cache && 3008 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) { 3009 *ret = rt_cache; 3010 return true; 3011 } 3012 return false; 3013 } 3014 return true; 3015 } 3016 3017 struct fib6_nh_rd_arg { 3018 struct fib6_result *res; 3019 struct flowi6 *fl6; 3020 const struct in6_addr *gw; 3021 struct rt6_info **ret; 3022 }; 3023 3024 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg) 3025 { 3026 struct fib6_nh_rd_arg *arg = _arg; 3027 3028 arg->res->nh = nh; 3029 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret); 3030 } 3031 3032 /* Handle redirects */ 3033 struct ip6rd_flowi { 3034 struct flowi6 fl6; 3035 struct in6_addr gateway; 3036 }; 3037 3038 INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net, 3039 struct fib6_table *table, 3040 struct flowi6 *fl6, 3041 const struct sk_buff *skb, 3042 int flags) 3043 { 3044 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; 3045 struct rt6_info *ret = NULL; 3046 struct fib6_result res = {}; 3047 struct fib6_nh_rd_arg arg = { 3048 .res = &res, 3049 .fl6 = fl6, 3050 .gw = &rdfl->gateway, 3051 .ret = &ret 3052 }; 3053 struct fib6_info *rt; 3054 struct fib6_node *fn; 3055 3056 /* l3mdev_update_flow overrides oif if the device is enslaved; in 3057 * this case we must match on the real ingress device, so reset it 3058 */ 3059 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) 3060 fl6->flowi6_oif = skb->dev->ifindex; 3061 3062 /* Get the "current" route for this destination and 3063 * check if the redirect has come from appropriate router. 3064 * 3065 * RFC 4861 specifies that redirects should only be 3066 * accepted if they come from the nexthop to the target. 3067 * Due to the way the routes are chosen, this notion 3068 * is a bit fuzzy and one might need to check all possible 3069 * routes. 3070 */ 3071 3072 rcu_read_lock(); 3073 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 3074 restart: 3075 for_each_fib6_node_rt_rcu(fn) { 3076 res.f6i = rt; 3077 if (fib6_check_expired(rt)) 3078 continue; 3079 if (rt->fib6_flags & RTF_REJECT) 3080 break; 3081 if (unlikely(rt->nh)) { 3082 if (nexthop_is_blackhole(rt->nh)) 3083 continue; 3084 /* on match, res->nh is filled in and potentially ret */ 3085 if (nexthop_for_each_fib6_nh(rt->nh, 3086 fib6_nh_redirect_match, 3087 &arg)) 3088 goto out; 3089 } else { 3090 res.nh = rt->fib6_nh; 3091 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway, 3092 &ret)) 3093 goto out; 3094 } 3095 } 3096 3097 if (!rt) 3098 rt = net->ipv6.fib6_null_entry; 3099 else if (rt->fib6_flags & RTF_REJECT) { 3100 ret = net->ipv6.ip6_null_entry; 3101 goto out; 3102 } 3103 3104 if (rt == net->ipv6.fib6_null_entry) { 3105 fn = fib6_backtrack(fn, &fl6->saddr); 3106 if (fn) 3107 goto restart; 3108 } 3109 3110 res.f6i = rt; 3111 res.nh = rt->fib6_nh; 3112 out: 3113 if (ret) { 3114 ip6_hold_safe(net, &ret); 3115 } else { 3116 res.fib6_flags = res.f6i->fib6_flags; 3117 res.fib6_type = res.f6i->fib6_type; 3118 ret = ip6_create_rt_rcu(&res); 3119 } 3120 3121 rcu_read_unlock(); 3122 3123 trace_fib6_table_lookup(net, &res, table, fl6); 3124 return ret; 3125 }; 3126 3127 static struct dst_entry *ip6_route_redirect(struct net *net, 3128 const struct flowi6 *fl6, 3129 const struct sk_buff *skb, 3130 const struct in6_addr *gateway) 3131 { 3132 int flags = RT6_LOOKUP_F_HAS_SADDR; 3133 struct ip6rd_flowi rdfl; 3134 3135 rdfl.fl6 = *fl6; 3136 rdfl.gateway = *gateway; 3137 3138 return fib6_rule_lookup(net, &rdfl.fl6, skb, 3139 flags, __ip6_route_redirect); 3140 } 3141 3142 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, 3143 kuid_t uid) 3144 { 3145 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; 3146 struct dst_entry *dst; 3147 struct flowi6 fl6 = { 3148 .flowi6_iif = LOOPBACK_IFINDEX, 3149 .flowi6_oif = oif, 3150 .flowi6_mark = mark, 3151 .daddr = iph->daddr, 3152 .saddr = iph->saddr, 3153 .flowlabel = ip6_flowinfo(iph), 3154 .flowi6_uid = uid, 3155 }; 3156 3157 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr); 3158 rt6_do_redirect(dst, NULL, skb); 3159 dst_release(dst); 3160 } 3161 EXPORT_SYMBOL_GPL(ip6_redirect); 3162 3163 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif) 3164 { 3165 const struct ipv6hdr *iph = ipv6_hdr(skb); 3166 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); 3167 struct dst_entry *dst; 3168 struct flowi6 fl6 = { 3169 .flowi6_iif = LOOPBACK_IFINDEX, 3170 .flowi6_oif = oif, 3171 .daddr = msg->dest, 3172 .saddr = iph->daddr, 3173 .flowi6_uid = sock_net_uid(net, NULL), 3174 }; 3175 3176 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr); 3177 rt6_do_redirect(dst, NULL, skb); 3178 dst_release(dst); 3179 } 3180 3181 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 3182 { 3183 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark, 3184 sk->sk_uid); 3185 } 3186 EXPORT_SYMBOL_GPL(ip6_sk_redirect); 3187 3188 static unsigned int ip6_default_advmss(const struct dst_entry *dst) 3189 { 3190 struct net_device *dev = dst->dev; 3191 unsigned int mtu = dst_mtu(dst); 3192 struct net *net = dev_net(dev); 3193 3194 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); 3195 3196 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss) 3197 mtu = net->ipv6.sysctl.ip6_rt_min_advmss; 3198 3199 /* 3200 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and 3201 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size. 3202 * IPV6_MAXPLEN is also valid and means: "any MSS, 3203 * rely only on pmtu discovery" 3204 */ 3205 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr)) 3206 mtu = IPV6_MAXPLEN; 3207 return mtu; 3208 } 3209 3210 INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst) 3211 { 3212 struct inet6_dev *idev; 3213 unsigned int mtu; 3214 3215 mtu = dst_metric_raw(dst, RTAX_MTU); 3216 if (mtu) 3217 goto out; 3218 3219 mtu = IPV6_MIN_MTU; 3220 3221 rcu_read_lock(); 3222 idev = __in6_dev_get(dst->dev); 3223 if (idev) 3224 mtu = idev->cnf.mtu6; 3225 rcu_read_unlock(); 3226 3227 out: 3228 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); 3229 3230 return mtu - lwtunnel_headroom(dst->lwtstate, mtu); 3231 } 3232 EXPORT_INDIRECT_CALLABLE(ip6_mtu); 3233 3234 /* MTU selection: 3235 * 1. mtu on route is locked - use it 3236 * 2. mtu from nexthop exception 3237 * 3. mtu from egress device 3238 * 3239 * based on ip6_dst_mtu_forward and exception logic of 3240 * rt6_find_cached_rt; called with rcu_read_lock 3241 */ 3242 u32 ip6_mtu_from_fib6(const struct fib6_result *res, 3243 const struct in6_addr *daddr, 3244 const struct in6_addr *saddr) 3245 { 3246 const struct fib6_nh *nh = res->nh; 3247 struct fib6_info *f6i = res->f6i; 3248 struct inet6_dev *idev; 3249 struct rt6_info *rt; 3250 u32 mtu = 0; 3251 3252 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) { 3253 mtu = f6i->fib6_pmtu; 3254 if (mtu) 3255 goto out; 3256 } 3257 3258 rt = rt6_find_cached_rt(res, daddr, saddr); 3259 if (unlikely(rt)) { 3260 mtu = dst_metric_raw(&rt->dst, RTAX_MTU); 3261 } else { 3262 struct net_device *dev = nh->fib_nh_dev; 3263 3264 mtu = IPV6_MIN_MTU; 3265 idev = __in6_dev_get(dev); 3266 if (idev && idev->cnf.mtu6 > mtu) 3267 mtu = idev->cnf.mtu6; 3268 } 3269 3270 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); 3271 out: 3272 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu); 3273 } 3274 3275 struct dst_entry *icmp6_dst_alloc(struct net_device *dev, 3276 struct flowi6 *fl6) 3277 { 3278 struct dst_entry *dst; 3279 struct rt6_info *rt; 3280 struct inet6_dev *idev = in6_dev_get(dev); 3281 struct net *net = dev_net(dev); 3282 3283 if (unlikely(!idev)) 3284 return ERR_PTR(-ENODEV); 3285 3286 rt = ip6_dst_alloc(net, dev, 0); 3287 if (unlikely(!rt)) { 3288 in6_dev_put(idev); 3289 dst = ERR_PTR(-ENOMEM); 3290 goto out; 3291 } 3292 3293 rt->dst.input = ip6_input; 3294 rt->dst.output = ip6_output; 3295 rt->rt6i_gateway = fl6->daddr; 3296 rt->rt6i_dst.addr = fl6->daddr; 3297 rt->rt6i_dst.plen = 128; 3298 rt->rt6i_idev = idev; 3299 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); 3300 3301 /* Add this dst into uncached_list so that rt6_disable_ip() can 3302 * do proper release of the net_device 3303 */ 3304 rt6_uncached_list_add(rt); 3305 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache); 3306 3307 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0); 3308 3309 out: 3310 return dst; 3311 } 3312 3313 static int ip6_dst_gc(struct dst_ops *ops) 3314 { 3315 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops); 3316 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; 3317 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; 3318 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; 3319 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout; 3320 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc; 3321 int entries; 3322 3323 entries = dst_entries_get_fast(ops); 3324 if (entries > rt_max_size) 3325 entries = dst_entries_get_slow(ops); 3326 3327 if (time_after(rt_last_gc + rt_min_interval, jiffies) && 3328 entries <= rt_max_size) 3329 goto out; 3330 3331 net->ipv6.ip6_rt_gc_expire++; 3332 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true); 3333 entries = dst_entries_get_slow(ops); 3334 if (entries < ops->gc_thresh) 3335 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; 3336 out: 3337 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity; 3338 return entries > rt_max_size; 3339 } 3340 3341 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg, 3342 const struct in6_addr *gw_addr, u32 tbid, 3343 int flags, struct fib6_result *res) 3344 { 3345 struct flowi6 fl6 = { 3346 .flowi6_oif = cfg->fc_ifindex, 3347 .daddr = *gw_addr, 3348 .saddr = cfg->fc_prefsrc, 3349 }; 3350 struct fib6_table *table; 3351 int err; 3352 3353 table = fib6_get_table(net, tbid); 3354 if (!table) 3355 return -EINVAL; 3356 3357 if (!ipv6_addr_any(&cfg->fc_prefsrc)) 3358 flags |= RT6_LOOKUP_F_HAS_SADDR; 3359 3360 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE; 3361 3362 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags); 3363 if (!err && res->f6i != net->ipv6.fib6_null_entry) 3364 fib6_select_path(net, res, &fl6, cfg->fc_ifindex, 3365 cfg->fc_ifindex != 0, NULL, flags); 3366 3367 return err; 3368 } 3369 3370 static int ip6_route_check_nh_onlink(struct net *net, 3371 struct fib6_config *cfg, 3372 const struct net_device *dev, 3373 struct netlink_ext_ack *extack) 3374 { 3375 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; 3376 const struct in6_addr *gw_addr = &cfg->fc_gateway; 3377 struct fib6_result res = {}; 3378 int err; 3379 3380 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res); 3381 if (!err && !(res.fib6_flags & RTF_REJECT) && 3382 /* ignore match if it is the default route */ 3383 !ipv6_addr_any(&res.f6i->fib6_dst.addr) && 3384 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) { 3385 NL_SET_ERR_MSG(extack, 3386 "Nexthop has invalid gateway or device mismatch"); 3387 err = -EINVAL; 3388 } 3389 3390 return err; 3391 } 3392 3393 static int ip6_route_check_nh(struct net *net, 3394 struct fib6_config *cfg, 3395 struct net_device **_dev, 3396 struct inet6_dev **idev) 3397 { 3398 const struct in6_addr *gw_addr = &cfg->fc_gateway; 3399 struct net_device *dev = _dev ? *_dev : NULL; 3400 int flags = RT6_LOOKUP_F_IFACE; 3401 struct fib6_result res = {}; 3402 int err = -EHOSTUNREACH; 3403 3404 if (cfg->fc_table) { 3405 err = ip6_nh_lookup_table(net, cfg, gw_addr, 3406 cfg->fc_table, flags, &res); 3407 /* gw_addr can not require a gateway or resolve to a reject 3408 * route. If a device is given, it must match the result. 3409 */ 3410 if (err || res.fib6_flags & RTF_REJECT || 3411 res.nh->fib_nh_gw_family || 3412 (dev && dev != res.nh->fib_nh_dev)) 3413 err = -EHOSTUNREACH; 3414 } 3415 3416 if (err < 0) { 3417 struct flowi6 fl6 = { 3418 .flowi6_oif = cfg->fc_ifindex, 3419 .daddr = *gw_addr, 3420 }; 3421 3422 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags); 3423 if (err || res.fib6_flags & RTF_REJECT || 3424 res.nh->fib_nh_gw_family) 3425 err = -EHOSTUNREACH; 3426 3427 if (err) 3428 return err; 3429 3430 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex, 3431 cfg->fc_ifindex != 0, NULL, flags); 3432 } 3433 3434 err = 0; 3435 if (dev) { 3436 if (dev != res.nh->fib_nh_dev) 3437 err = -EHOSTUNREACH; 3438 } else { 3439 *_dev = dev = res.nh->fib_nh_dev; 3440 dev_hold(dev); 3441 *idev = in6_dev_get(dev); 3442 } 3443 3444 return err; 3445 } 3446 3447 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg, 3448 struct net_device **_dev, struct inet6_dev **idev, 3449 struct netlink_ext_ack *extack) 3450 { 3451 const struct in6_addr *gw_addr = &cfg->fc_gateway; 3452 int gwa_type = ipv6_addr_type(gw_addr); 3453 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true; 3454 const struct net_device *dev = *_dev; 3455 bool need_addr_check = !dev; 3456 int err = -EINVAL; 3457 3458 /* if gw_addr is local we will fail to detect this in case 3459 * address is still TENTATIVE (DAD in progress). rt6_lookup() 3460 * will return already-added prefix route via interface that 3461 * prefix route was assigned to, which might be non-loopback. 3462 */ 3463 if (dev && 3464 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) { 3465 NL_SET_ERR_MSG(extack, "Gateway can not be a local address"); 3466 goto out; 3467 } 3468 3469 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) { 3470 /* IPv6 strictly inhibits using not link-local 3471 * addresses as nexthop address. 3472 * Otherwise, router will not able to send redirects. 3473 * It is very good, but in some (rare!) circumstances 3474 * (SIT, PtP, NBMA NOARP links) it is handy to allow 3475 * some exceptions. --ANK 3476 * We allow IPv4-mapped nexthops to support RFC4798-type 3477 * addressing 3478 */ 3479 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) { 3480 NL_SET_ERR_MSG(extack, "Invalid gateway address"); 3481 goto out; 3482 } 3483 3484 rcu_read_lock(); 3485 3486 if (cfg->fc_flags & RTNH_F_ONLINK) 3487 err = ip6_route_check_nh_onlink(net, cfg, dev, extack); 3488 else 3489 err = ip6_route_check_nh(net, cfg, _dev, idev); 3490 3491 rcu_read_unlock(); 3492 3493 if (err) 3494 goto out; 3495 } 3496 3497 /* reload in case device was changed */ 3498 dev = *_dev; 3499 3500 err = -EINVAL; 3501 if (!dev) { 3502 NL_SET_ERR_MSG(extack, "Egress device not specified"); 3503 goto out; 3504 } else if (dev->flags & IFF_LOOPBACK) { 3505 NL_SET_ERR_MSG(extack, 3506 "Egress device can not be loopback device for this route"); 3507 goto out; 3508 } 3509 3510 /* if we did not check gw_addr above, do so now that the 3511 * egress device has been resolved. 3512 */ 3513 if (need_addr_check && 3514 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) { 3515 NL_SET_ERR_MSG(extack, "Gateway can not be a local address"); 3516 goto out; 3517 } 3518 3519 err = 0; 3520 out: 3521 return err; 3522 } 3523 3524 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type) 3525 { 3526 if ((flags & RTF_REJECT) || 3527 (dev && (dev->flags & IFF_LOOPBACK) && 3528 !(addr_type & IPV6_ADDR_LOOPBACK) && 3529 !(flags & (RTF_ANYCAST | RTF_LOCAL)))) 3530 return true; 3531 3532 return false; 3533 } 3534 3535 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, 3536 struct fib6_config *cfg, gfp_t gfp_flags, 3537 struct netlink_ext_ack *extack) 3538 { 3539 struct net_device *dev = NULL; 3540 struct inet6_dev *idev = NULL; 3541 int addr_type; 3542 int err; 3543 3544 fib6_nh->fib_nh_family = AF_INET6; 3545 #ifdef CONFIG_IPV6_ROUTER_PREF 3546 fib6_nh->last_probe = jiffies; 3547 #endif 3548 if (cfg->fc_is_fdb) { 3549 fib6_nh->fib_nh_gw6 = cfg->fc_gateway; 3550 fib6_nh->fib_nh_gw_family = AF_INET6; 3551 return 0; 3552 } 3553 3554 err = -ENODEV; 3555 if (cfg->fc_ifindex) { 3556 dev = dev_get_by_index(net, cfg->fc_ifindex); 3557 if (!dev) 3558 goto out; 3559 idev = in6_dev_get(dev); 3560 if (!idev) 3561 goto out; 3562 } 3563 3564 if (cfg->fc_flags & RTNH_F_ONLINK) { 3565 if (!dev) { 3566 NL_SET_ERR_MSG(extack, 3567 "Nexthop device required for onlink"); 3568 goto out; 3569 } 3570 3571 if (!(dev->flags & IFF_UP)) { 3572 NL_SET_ERR_MSG(extack, "Nexthop device is not up"); 3573 err = -ENETDOWN; 3574 goto out; 3575 } 3576 3577 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK; 3578 } 3579 3580 fib6_nh->fib_nh_weight = 1; 3581 3582 /* We cannot add true routes via loopback here, 3583 * they would result in kernel looping; promote them to reject routes 3584 */ 3585 addr_type = ipv6_addr_type(&cfg->fc_dst); 3586 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) { 3587 /* hold loopback dev/idev if we haven't done so. */ 3588 if (dev != net->loopback_dev) { 3589 if (dev) { 3590 dev_put(dev); 3591 in6_dev_put(idev); 3592 } 3593 dev = net->loopback_dev; 3594 dev_hold(dev); 3595 idev = in6_dev_get(dev); 3596 if (!idev) { 3597 err = -ENODEV; 3598 goto out; 3599 } 3600 } 3601 goto pcpu_alloc; 3602 } 3603 3604 if (cfg->fc_flags & RTF_GATEWAY) { 3605 err = ip6_validate_gw(net, cfg, &dev, &idev, extack); 3606 if (err) 3607 goto out; 3608 3609 fib6_nh->fib_nh_gw6 = cfg->fc_gateway; 3610 fib6_nh->fib_nh_gw_family = AF_INET6; 3611 } 3612 3613 err = -ENODEV; 3614 if (!dev) 3615 goto out; 3616 3617 if (idev->cnf.disable_ipv6) { 3618 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device"); 3619 err = -EACCES; 3620 goto out; 3621 } 3622 3623 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) { 3624 NL_SET_ERR_MSG(extack, "Nexthop device is not up"); 3625 err = -ENETDOWN; 3626 goto out; 3627 } 3628 3629 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) && 3630 !netif_carrier_ok(dev)) 3631 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN; 3632 3633 err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap, 3634 cfg->fc_encap_type, cfg, gfp_flags, extack); 3635 if (err) 3636 goto out; 3637 3638 pcpu_alloc: 3639 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags); 3640 if (!fib6_nh->rt6i_pcpu) { 3641 err = -ENOMEM; 3642 goto out; 3643 } 3644 3645 fib6_nh->fib_nh_dev = dev; 3646 fib6_nh->fib_nh_oif = dev->ifindex; 3647 err = 0; 3648 out: 3649 if (idev) 3650 in6_dev_put(idev); 3651 3652 if (err) { 3653 lwtstate_put(fib6_nh->fib_nh_lws); 3654 fib6_nh->fib_nh_lws = NULL; 3655 if (dev) 3656 dev_put(dev); 3657 } 3658 3659 return err; 3660 } 3661 3662 void fib6_nh_release(struct fib6_nh *fib6_nh) 3663 { 3664 struct rt6_exception_bucket *bucket; 3665 3666 rcu_read_lock(); 3667 3668 fib6_nh_flush_exceptions(fib6_nh, NULL); 3669 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL); 3670 if (bucket) { 3671 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL); 3672 kfree(bucket); 3673 } 3674 3675 rcu_read_unlock(); 3676 3677 if (fib6_nh->rt6i_pcpu) { 3678 int cpu; 3679 3680 for_each_possible_cpu(cpu) { 3681 struct rt6_info **ppcpu_rt; 3682 struct rt6_info *pcpu_rt; 3683 3684 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu); 3685 pcpu_rt = *ppcpu_rt; 3686 if (pcpu_rt) { 3687 dst_dev_put(&pcpu_rt->dst); 3688 dst_release(&pcpu_rt->dst); 3689 *ppcpu_rt = NULL; 3690 } 3691 } 3692 3693 free_percpu(fib6_nh->rt6i_pcpu); 3694 } 3695 3696 fib_nh_common_release(&fib6_nh->nh_common); 3697 } 3698 3699 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, 3700 gfp_t gfp_flags, 3701 struct netlink_ext_ack *extack) 3702 { 3703 struct net *net = cfg->fc_nlinfo.nl_net; 3704 struct fib6_info *rt = NULL; 3705 struct nexthop *nh = NULL; 3706 struct fib6_table *table; 3707 struct fib6_nh *fib6_nh; 3708 int err = -EINVAL; 3709 int addr_type; 3710 3711 /* RTF_PCPU is an internal flag; can not be set by userspace */ 3712 if (cfg->fc_flags & RTF_PCPU) { 3713 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); 3714 goto out; 3715 } 3716 3717 /* RTF_CACHE is an internal flag; can not be set by userspace */ 3718 if (cfg->fc_flags & RTF_CACHE) { 3719 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE"); 3720 goto out; 3721 } 3722 3723 if (cfg->fc_type > RTN_MAX) { 3724 NL_SET_ERR_MSG(extack, "Invalid route type"); 3725 goto out; 3726 } 3727 3728 if (cfg->fc_dst_len > 128) { 3729 NL_SET_ERR_MSG(extack, "Invalid prefix length"); 3730 goto out; 3731 } 3732 if (cfg->fc_src_len > 128) { 3733 NL_SET_ERR_MSG(extack, "Invalid source address length"); 3734 goto out; 3735 } 3736 #ifndef CONFIG_IPV6_SUBTREES 3737 if (cfg->fc_src_len) { 3738 NL_SET_ERR_MSG(extack, 3739 "Specifying source address requires IPV6_SUBTREES to be enabled"); 3740 goto out; 3741 } 3742 #endif 3743 if (cfg->fc_nh_id) { 3744 nh = nexthop_find_by_id(net, cfg->fc_nh_id); 3745 if (!nh) { 3746 NL_SET_ERR_MSG(extack, "Nexthop id does not exist"); 3747 goto out; 3748 } 3749 err = fib6_check_nexthop(nh, cfg, extack); 3750 if (err) 3751 goto out; 3752 } 3753 3754 err = -ENOBUFS; 3755 if (cfg->fc_nlinfo.nlh && 3756 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { 3757 table = fib6_get_table(net, cfg->fc_table); 3758 if (!table) { 3759 pr_warn("NLM_F_CREATE should be specified when creating new route\n"); 3760 table = fib6_new_table(net, cfg->fc_table); 3761 } 3762 } else { 3763 table = fib6_new_table(net, cfg->fc_table); 3764 } 3765 3766 if (!table) 3767 goto out; 3768 3769 err = -ENOMEM; 3770 rt = fib6_info_alloc(gfp_flags, !nh); 3771 if (!rt) 3772 goto out; 3773 3774 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len, 3775 extack); 3776 if (IS_ERR(rt->fib6_metrics)) { 3777 err = PTR_ERR(rt->fib6_metrics); 3778 /* Do not leave garbage there. */ 3779 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics; 3780 goto out_free; 3781 } 3782 3783 if (cfg->fc_flags & RTF_ADDRCONF) 3784 rt->dst_nocount = true; 3785 3786 if (cfg->fc_flags & RTF_EXPIRES) 3787 fib6_set_expires(rt, jiffies + 3788 clock_t_to_jiffies(cfg->fc_expires)); 3789 else 3790 fib6_clean_expires(rt); 3791 3792 if (cfg->fc_protocol == RTPROT_UNSPEC) 3793 cfg->fc_protocol = RTPROT_BOOT; 3794 rt->fib6_protocol = cfg->fc_protocol; 3795 3796 rt->fib6_table = table; 3797 rt->fib6_metric = cfg->fc_metric; 3798 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST; 3799 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY; 3800 3801 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); 3802 rt->fib6_dst.plen = cfg->fc_dst_len; 3803 3804 #ifdef CONFIG_IPV6_SUBTREES 3805 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len); 3806 rt->fib6_src.plen = cfg->fc_src_len; 3807 #endif 3808 if (nh) { 3809 if (rt->fib6_src.plen) { 3810 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); 3811 goto out_free; 3812 } 3813 if (!nexthop_get(nh)) { 3814 NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); 3815 goto out_free; 3816 } 3817 rt->nh = nh; 3818 fib6_nh = nexthop_fib6_nh(rt->nh); 3819 } else { 3820 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack); 3821 if (err) 3822 goto out; 3823 3824 fib6_nh = rt->fib6_nh; 3825 3826 /* We cannot add true routes via loopback here, they would 3827 * result in kernel looping; promote them to reject routes 3828 */ 3829 addr_type = ipv6_addr_type(&cfg->fc_dst); 3830 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev, 3831 addr_type)) 3832 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP; 3833 } 3834 3835 if (!ipv6_addr_any(&cfg->fc_prefsrc)) { 3836 struct net_device *dev = fib6_nh->fib_nh_dev; 3837 3838 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) { 3839 NL_SET_ERR_MSG(extack, "Invalid source address"); 3840 err = -EINVAL; 3841 goto out; 3842 } 3843 rt->fib6_prefsrc.addr = cfg->fc_prefsrc; 3844 rt->fib6_prefsrc.plen = 128; 3845 } else 3846 rt->fib6_prefsrc.plen = 0; 3847 3848 return rt; 3849 out: 3850 fib6_info_release(rt); 3851 return ERR_PTR(err); 3852 out_free: 3853 ip_fib_metrics_put(rt->fib6_metrics); 3854 kfree(rt); 3855 return ERR_PTR(err); 3856 } 3857 3858 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, 3859 struct netlink_ext_ack *extack) 3860 { 3861 struct fib6_info *rt; 3862 int err; 3863 3864 rt = ip6_route_info_create(cfg, gfp_flags, extack); 3865 if (IS_ERR(rt)) 3866 return PTR_ERR(rt); 3867 3868 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack); 3869 fib6_info_release(rt); 3870 3871 return err; 3872 } 3873 3874 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info) 3875 { 3876 struct net *net = info->nl_net; 3877 struct fib6_table *table; 3878 int err; 3879 3880 if (rt == net->ipv6.fib6_null_entry) { 3881 err = -ENOENT; 3882 goto out; 3883 } 3884 3885 table = rt->fib6_table; 3886 spin_lock_bh(&table->tb6_lock); 3887 err = fib6_del(rt, info); 3888 spin_unlock_bh(&table->tb6_lock); 3889 3890 out: 3891 fib6_info_release(rt); 3892 return err; 3893 } 3894 3895 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify) 3896 { 3897 struct nl_info info = { 3898 .nl_net = net, 3899 .skip_notify = skip_notify 3900 }; 3901 3902 return __ip6_del_rt(rt, &info); 3903 } 3904 3905 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg) 3906 { 3907 struct nl_info *info = &cfg->fc_nlinfo; 3908 struct net *net = info->nl_net; 3909 struct sk_buff *skb = NULL; 3910 struct fib6_table *table; 3911 int err = -ENOENT; 3912 3913 if (rt == net->ipv6.fib6_null_entry) 3914 goto out_put; 3915 table = rt->fib6_table; 3916 spin_lock_bh(&table->tb6_lock); 3917 3918 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) { 3919 struct fib6_info *sibling, *next_sibling; 3920 struct fib6_node *fn; 3921 3922 /* prefer to send a single notification with all hops */ 3923 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 3924 if (skb) { 3925 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 3926 3927 if (rt6_fill_node(net, skb, rt, NULL, 3928 NULL, NULL, 0, RTM_DELROUTE, 3929 info->portid, seq, 0) < 0) { 3930 kfree_skb(skb); 3931 skb = NULL; 3932 } else 3933 info->skip_notify = 1; 3934 } 3935 3936 /* 'rt' points to the first sibling route. If it is not the 3937 * leaf, then we do not need to send a notification. Otherwise, 3938 * we need to check if the last sibling has a next route or not 3939 * and emit a replace or delete notification, respectively. 3940 */ 3941 info->skip_notify_kernel = 1; 3942 fn = rcu_dereference_protected(rt->fib6_node, 3943 lockdep_is_held(&table->tb6_lock)); 3944 if (rcu_access_pointer(fn->leaf) == rt) { 3945 struct fib6_info *last_sibling, *replace_rt; 3946 3947 last_sibling = list_last_entry(&rt->fib6_siblings, 3948 struct fib6_info, 3949 fib6_siblings); 3950 replace_rt = rcu_dereference_protected( 3951 last_sibling->fib6_next, 3952 lockdep_is_held(&table->tb6_lock)); 3953 if (replace_rt) 3954 call_fib6_entry_notifiers_replace(net, 3955 replace_rt); 3956 else 3957 call_fib6_multipath_entry_notifiers(net, 3958 FIB_EVENT_ENTRY_DEL, 3959 rt, rt->fib6_nsiblings, 3960 NULL); 3961 } 3962 list_for_each_entry_safe(sibling, next_sibling, 3963 &rt->fib6_siblings, 3964 fib6_siblings) { 3965 err = fib6_del(sibling, info); 3966 if (err) 3967 goto out_unlock; 3968 } 3969 } 3970 3971 err = fib6_del(rt, info); 3972 out_unlock: 3973 spin_unlock_bh(&table->tb6_lock); 3974 out_put: 3975 fib6_info_release(rt); 3976 3977 if (skb) { 3978 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 3979 info->nlh, gfp_any()); 3980 } 3981 return err; 3982 } 3983 3984 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg) 3985 { 3986 int rc = -ESRCH; 3987 3988 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex) 3989 goto out; 3990 3991 if (cfg->fc_flags & RTF_GATEWAY && 3992 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) 3993 goto out; 3994 3995 rc = rt6_remove_exception_rt(rt); 3996 out: 3997 return rc; 3998 } 3999 4000 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt, 4001 struct fib6_nh *nh) 4002 { 4003 struct fib6_result res = { 4004 .f6i = rt, 4005 .nh = nh, 4006 }; 4007 struct rt6_info *rt_cache; 4008 4009 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src); 4010 if (rt_cache) 4011 return __ip6_del_cached_rt(rt_cache, cfg); 4012 4013 return 0; 4014 } 4015 4016 struct fib6_nh_del_cached_rt_arg { 4017 struct fib6_config *cfg; 4018 struct fib6_info *f6i; 4019 }; 4020 4021 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg) 4022 { 4023 struct fib6_nh_del_cached_rt_arg *arg = _arg; 4024 int rc; 4025 4026 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh); 4027 return rc != -ESRCH ? rc : 0; 4028 } 4029 4030 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i) 4031 { 4032 struct fib6_nh_del_cached_rt_arg arg = { 4033 .cfg = cfg, 4034 .f6i = f6i 4035 }; 4036 4037 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg); 4038 } 4039 4040 static int ip6_route_del(struct fib6_config *cfg, 4041 struct netlink_ext_ack *extack) 4042 { 4043 struct fib6_table *table; 4044 struct fib6_info *rt; 4045 struct fib6_node *fn; 4046 int err = -ESRCH; 4047 4048 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); 4049 if (!table) { 4050 NL_SET_ERR_MSG(extack, "FIB table does not exist"); 4051 return err; 4052 } 4053 4054 rcu_read_lock(); 4055 4056 fn = fib6_locate(&table->tb6_root, 4057 &cfg->fc_dst, cfg->fc_dst_len, 4058 &cfg->fc_src, cfg->fc_src_len, 4059 !(cfg->fc_flags & RTF_CACHE)); 4060 4061 if (fn) { 4062 for_each_fib6_node_rt_rcu(fn) { 4063 struct fib6_nh *nh; 4064 4065 if (rt->nh && cfg->fc_nh_id && 4066 rt->nh->id != cfg->fc_nh_id) 4067 continue; 4068 4069 if (cfg->fc_flags & RTF_CACHE) { 4070 int rc = 0; 4071 4072 if (rt->nh) { 4073 rc = ip6_del_cached_rt_nh(cfg, rt); 4074 } else if (cfg->fc_nh_id) { 4075 continue; 4076 } else { 4077 nh = rt->fib6_nh; 4078 rc = ip6_del_cached_rt(cfg, rt, nh); 4079 } 4080 if (rc != -ESRCH) { 4081 rcu_read_unlock(); 4082 return rc; 4083 } 4084 continue; 4085 } 4086 4087 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric) 4088 continue; 4089 if (cfg->fc_protocol && 4090 cfg->fc_protocol != rt->fib6_protocol) 4091 continue; 4092 4093 if (rt->nh) { 4094 if (!fib6_info_hold_safe(rt)) 4095 continue; 4096 rcu_read_unlock(); 4097 4098 return __ip6_del_rt(rt, &cfg->fc_nlinfo); 4099 } 4100 if (cfg->fc_nh_id) 4101 continue; 4102 4103 nh = rt->fib6_nh; 4104 if (cfg->fc_ifindex && 4105 (!nh->fib_nh_dev || 4106 nh->fib_nh_dev->ifindex != cfg->fc_ifindex)) 4107 continue; 4108 if (cfg->fc_flags & RTF_GATEWAY && 4109 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6)) 4110 continue; 4111 if (!fib6_info_hold_safe(rt)) 4112 continue; 4113 rcu_read_unlock(); 4114 4115 /* if gateway was specified only delete the one hop */ 4116 if (cfg->fc_flags & RTF_GATEWAY) 4117 return __ip6_del_rt(rt, &cfg->fc_nlinfo); 4118 4119 return __ip6_del_rt_siblings(rt, cfg); 4120 } 4121 } 4122 rcu_read_unlock(); 4123 4124 return err; 4125 } 4126 4127 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) 4128 { 4129 struct netevent_redirect netevent; 4130 struct rt6_info *rt, *nrt = NULL; 4131 struct fib6_result res = {}; 4132 struct ndisc_options ndopts; 4133 struct inet6_dev *in6_dev; 4134 struct neighbour *neigh; 4135 struct rd_msg *msg; 4136 int optlen, on_link; 4137 u8 *lladdr; 4138 4139 optlen = skb_tail_pointer(skb) - skb_transport_header(skb); 4140 optlen -= sizeof(*msg); 4141 4142 if (optlen < 0) { 4143 net_dbg_ratelimited("rt6_do_redirect: packet too short\n"); 4144 return; 4145 } 4146 4147 msg = (struct rd_msg *)icmp6_hdr(skb); 4148 4149 if (ipv6_addr_is_multicast(&msg->dest)) { 4150 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n"); 4151 return; 4152 } 4153 4154 on_link = 0; 4155 if (ipv6_addr_equal(&msg->dest, &msg->target)) { 4156 on_link = 1; 4157 } else if (ipv6_addr_type(&msg->target) != 4158 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { 4159 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n"); 4160 return; 4161 } 4162 4163 in6_dev = __in6_dev_get(skb->dev); 4164 if (!in6_dev) 4165 return; 4166 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) 4167 return; 4168 4169 /* RFC2461 8.1: 4170 * The IP source address of the Redirect MUST be the same as the current 4171 * first-hop router for the specified ICMP Destination Address. 4172 */ 4173 4174 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) { 4175 net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); 4176 return; 4177 } 4178 4179 lladdr = NULL; 4180 if (ndopts.nd_opts_tgt_lladdr) { 4181 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, 4182 skb->dev); 4183 if (!lladdr) { 4184 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n"); 4185 return; 4186 } 4187 } 4188 4189 rt = (struct rt6_info *) dst; 4190 if (rt->rt6i_flags & RTF_REJECT) { 4191 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); 4192 return; 4193 } 4194 4195 /* Redirect received -> path was valid. 4196 * Look, redirects are sent only in response to data packets, 4197 * so that this nexthop apparently is reachable. --ANK 4198 */ 4199 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr); 4200 4201 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1); 4202 if (!neigh) 4203 return; 4204 4205 /* 4206 * We have finally decided to accept it. 4207 */ 4208 4209 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, 4210 NEIGH_UPDATE_F_WEAK_OVERRIDE| 4211 NEIGH_UPDATE_F_OVERRIDE| 4212 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| 4213 NEIGH_UPDATE_F_ISROUTER)), 4214 NDISC_REDIRECT, &ndopts); 4215 4216 rcu_read_lock(); 4217 res.f6i = rcu_dereference(rt->from); 4218 if (!res.f6i) 4219 goto out; 4220 4221 if (res.f6i->nh) { 4222 struct fib6_nh_match_arg arg = { 4223 .dev = dst->dev, 4224 .gw = &rt->rt6i_gateway, 4225 }; 4226 4227 nexthop_for_each_fib6_nh(res.f6i->nh, 4228 fib6_nh_find_match, &arg); 4229 4230 /* fib6_info uses a nexthop that does not have fib6_nh 4231 * using the dst->dev. Should be impossible 4232 */ 4233 if (!arg.match) 4234 goto out; 4235 res.nh = arg.match; 4236 } else { 4237 res.nh = res.f6i->fib6_nh; 4238 } 4239 4240 res.fib6_flags = res.f6i->fib6_flags; 4241 res.fib6_type = res.f6i->fib6_type; 4242 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL); 4243 if (!nrt) 4244 goto out; 4245 4246 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE; 4247 if (on_link) 4248 nrt->rt6i_flags &= ~RTF_GATEWAY; 4249 4250 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 4251 4252 /* rt6_insert_exception() will take care of duplicated exceptions */ 4253 if (rt6_insert_exception(nrt, &res)) { 4254 dst_release_immediate(&nrt->dst); 4255 goto out; 4256 } 4257 4258 netevent.old = &rt->dst; 4259 netevent.new = &nrt->dst; 4260 netevent.daddr = &msg->dest; 4261 netevent.neigh = neigh; 4262 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 4263 4264 out: 4265 rcu_read_unlock(); 4266 neigh_release(neigh); 4267 } 4268 4269 #ifdef CONFIG_IPV6_ROUTE_INFO 4270 static struct fib6_info *rt6_get_route_info(struct net *net, 4271 const struct in6_addr *prefix, int prefixlen, 4272 const struct in6_addr *gwaddr, 4273 struct net_device *dev) 4274 { 4275 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; 4276 int ifindex = dev->ifindex; 4277 struct fib6_node *fn; 4278 struct fib6_info *rt = NULL; 4279 struct fib6_table *table; 4280 4281 table = fib6_get_table(net, tb_id); 4282 if (!table) 4283 return NULL; 4284 4285 rcu_read_lock(); 4286 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true); 4287 if (!fn) 4288 goto out; 4289 4290 for_each_fib6_node_rt_rcu(fn) { 4291 /* these routes do not use nexthops */ 4292 if (rt->nh) 4293 continue; 4294 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex) 4295 continue; 4296 if (!(rt->fib6_flags & RTF_ROUTEINFO) || 4297 !rt->fib6_nh->fib_nh_gw_family) 4298 continue; 4299 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr)) 4300 continue; 4301 if (!fib6_info_hold_safe(rt)) 4302 continue; 4303 break; 4304 } 4305 out: 4306 rcu_read_unlock(); 4307 return rt; 4308 } 4309 4310 static struct fib6_info *rt6_add_route_info(struct net *net, 4311 const struct in6_addr *prefix, int prefixlen, 4312 const struct in6_addr *gwaddr, 4313 struct net_device *dev, 4314 unsigned int pref) 4315 { 4316 struct fib6_config cfg = { 4317 .fc_metric = IP6_RT_PRIO_USER, 4318 .fc_ifindex = dev->ifindex, 4319 .fc_dst_len = prefixlen, 4320 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 4321 RTF_UP | RTF_PREF(pref), 4322 .fc_protocol = RTPROT_RA, 4323 .fc_type = RTN_UNICAST, 4324 .fc_nlinfo.portid = 0, 4325 .fc_nlinfo.nlh = NULL, 4326 .fc_nlinfo.nl_net = net, 4327 }; 4328 4329 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; 4330 cfg.fc_dst = *prefix; 4331 cfg.fc_gateway = *gwaddr; 4332 4333 /* We should treat it as a default route if prefix length is 0. */ 4334 if (!prefixlen) 4335 cfg.fc_flags |= RTF_DEFAULT; 4336 4337 ip6_route_add(&cfg, GFP_ATOMIC, NULL); 4338 4339 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); 4340 } 4341 #endif 4342 4343 struct fib6_info *rt6_get_dflt_router(struct net *net, 4344 const struct in6_addr *addr, 4345 struct net_device *dev) 4346 { 4347 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; 4348 struct fib6_info *rt; 4349 struct fib6_table *table; 4350 4351 table = fib6_get_table(net, tb_id); 4352 if (!table) 4353 return NULL; 4354 4355 rcu_read_lock(); 4356 for_each_fib6_node_rt_rcu(&table->tb6_root) { 4357 struct fib6_nh *nh; 4358 4359 /* RA routes do not use nexthops */ 4360 if (rt->nh) 4361 continue; 4362 4363 nh = rt->fib6_nh; 4364 if (dev == nh->fib_nh_dev && 4365 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && 4366 ipv6_addr_equal(&nh->fib_nh_gw6, addr)) 4367 break; 4368 } 4369 if (rt && !fib6_info_hold_safe(rt)) 4370 rt = NULL; 4371 rcu_read_unlock(); 4372 return rt; 4373 } 4374 4375 struct fib6_info *rt6_add_dflt_router(struct net *net, 4376 const struct in6_addr *gwaddr, 4377 struct net_device *dev, 4378 unsigned int pref, 4379 u32 defrtr_usr_metric) 4380 { 4381 struct fib6_config cfg = { 4382 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT, 4383 .fc_metric = defrtr_usr_metric, 4384 .fc_ifindex = dev->ifindex, 4385 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 4386 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 4387 .fc_protocol = RTPROT_RA, 4388 .fc_type = RTN_UNICAST, 4389 .fc_nlinfo.portid = 0, 4390 .fc_nlinfo.nlh = NULL, 4391 .fc_nlinfo.nl_net = net, 4392 }; 4393 4394 cfg.fc_gateway = *gwaddr; 4395 4396 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) { 4397 struct fib6_table *table; 4398 4399 table = fib6_get_table(dev_net(dev), cfg.fc_table); 4400 if (table) 4401 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; 4402 } 4403 4404 return rt6_get_dflt_router(net, gwaddr, dev); 4405 } 4406 4407 static void __rt6_purge_dflt_routers(struct net *net, 4408 struct fib6_table *table) 4409 { 4410 struct fib6_info *rt; 4411 4412 restart: 4413 rcu_read_lock(); 4414 for_each_fib6_node_rt_rcu(&table->tb6_root) { 4415 struct net_device *dev = fib6_info_nh_dev(rt); 4416 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; 4417 4418 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && 4419 (!idev || idev->cnf.accept_ra != 2) && 4420 fib6_info_hold_safe(rt)) { 4421 rcu_read_unlock(); 4422 ip6_del_rt(net, rt, false); 4423 goto restart; 4424 } 4425 } 4426 rcu_read_unlock(); 4427 4428 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; 4429 } 4430 4431 void rt6_purge_dflt_routers(struct net *net) 4432 { 4433 struct fib6_table *table; 4434 struct hlist_head *head; 4435 unsigned int h; 4436 4437 rcu_read_lock(); 4438 4439 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 4440 head = &net->ipv6.fib_table_hash[h]; 4441 hlist_for_each_entry_rcu(table, head, tb6_hlist) { 4442 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) 4443 __rt6_purge_dflt_routers(net, table); 4444 } 4445 } 4446 4447 rcu_read_unlock(); 4448 } 4449 4450 static void rtmsg_to_fib6_config(struct net *net, 4451 struct in6_rtmsg *rtmsg, 4452 struct fib6_config *cfg) 4453 { 4454 *cfg = (struct fib6_config){ 4455 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? 4456 : RT6_TABLE_MAIN, 4457 .fc_ifindex = rtmsg->rtmsg_ifindex, 4458 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER, 4459 .fc_expires = rtmsg->rtmsg_info, 4460 .fc_dst_len = rtmsg->rtmsg_dst_len, 4461 .fc_src_len = rtmsg->rtmsg_src_len, 4462 .fc_flags = rtmsg->rtmsg_flags, 4463 .fc_type = rtmsg->rtmsg_type, 4464 4465 .fc_nlinfo.nl_net = net, 4466 4467 .fc_dst = rtmsg->rtmsg_dst, 4468 .fc_src = rtmsg->rtmsg_src, 4469 .fc_gateway = rtmsg->rtmsg_gateway, 4470 }; 4471 } 4472 4473 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg) 4474 { 4475 struct fib6_config cfg; 4476 int err; 4477 4478 if (cmd != SIOCADDRT && cmd != SIOCDELRT) 4479 return -EINVAL; 4480 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 4481 return -EPERM; 4482 4483 rtmsg_to_fib6_config(net, rtmsg, &cfg); 4484 4485 rtnl_lock(); 4486 switch (cmd) { 4487 case SIOCADDRT: 4488 err = ip6_route_add(&cfg, GFP_KERNEL, NULL); 4489 break; 4490 case SIOCDELRT: 4491 err = ip6_route_del(&cfg, NULL); 4492 break; 4493 } 4494 rtnl_unlock(); 4495 return err; 4496 } 4497 4498 /* 4499 * Drop the packet on the floor 4500 */ 4501 4502 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) 4503 { 4504 struct dst_entry *dst = skb_dst(skb); 4505 struct net *net = dev_net(dst->dev); 4506 struct inet6_dev *idev; 4507 int type; 4508 4509 if (netif_is_l3_master(skb->dev) && 4510 dst->dev == net->loopback_dev) 4511 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif)); 4512 else 4513 idev = ip6_dst_idev(dst); 4514 4515 switch (ipstats_mib_noroutes) { 4516 case IPSTATS_MIB_INNOROUTES: 4517 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); 4518 if (type == IPV6_ADDR_ANY) { 4519 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); 4520 break; 4521 } 4522 fallthrough; 4523 case IPSTATS_MIB_OUTNOROUTES: 4524 IP6_INC_STATS(net, idev, ipstats_mib_noroutes); 4525 break; 4526 } 4527 4528 /* Start over by dropping the dst for l3mdev case */ 4529 if (netif_is_l3_master(skb->dev)) 4530 skb_dst_drop(skb); 4531 4532 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0); 4533 kfree_skb(skb); 4534 return 0; 4535 } 4536 4537 static int ip6_pkt_discard(struct sk_buff *skb) 4538 { 4539 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); 4540 } 4541 4542 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) 4543 { 4544 skb->dev = skb_dst(skb)->dev; 4545 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); 4546 } 4547 4548 static int ip6_pkt_prohibit(struct sk_buff *skb) 4549 { 4550 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); 4551 } 4552 4553 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb) 4554 { 4555 skb->dev = skb_dst(skb)->dev; 4556 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); 4557 } 4558 4559 /* 4560 * Allocate a dst for local (unicast / anycast) address. 4561 */ 4562 4563 struct fib6_info *addrconf_f6i_alloc(struct net *net, 4564 struct inet6_dev *idev, 4565 const struct in6_addr *addr, 4566 bool anycast, gfp_t gfp_flags) 4567 { 4568 struct fib6_config cfg = { 4569 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL, 4570 .fc_ifindex = idev->dev->ifindex, 4571 .fc_flags = RTF_UP | RTF_NONEXTHOP, 4572 .fc_dst = *addr, 4573 .fc_dst_len = 128, 4574 .fc_protocol = RTPROT_KERNEL, 4575 .fc_nlinfo.nl_net = net, 4576 .fc_ignore_dev_down = true, 4577 }; 4578 struct fib6_info *f6i; 4579 4580 if (anycast) { 4581 cfg.fc_type = RTN_ANYCAST; 4582 cfg.fc_flags |= RTF_ANYCAST; 4583 } else { 4584 cfg.fc_type = RTN_LOCAL; 4585 cfg.fc_flags |= RTF_LOCAL; 4586 } 4587 4588 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL); 4589 if (!IS_ERR(f6i)) 4590 f6i->dst_nocount = true; 4591 return f6i; 4592 } 4593 4594 /* remove deleted ip from prefsrc entries */ 4595 struct arg_dev_net_ip { 4596 struct net_device *dev; 4597 struct net *net; 4598 struct in6_addr *addr; 4599 }; 4600 4601 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg) 4602 { 4603 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev; 4604 struct net *net = ((struct arg_dev_net_ip *)arg)->net; 4605 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; 4606 4607 if (!rt->nh && 4608 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) && 4609 rt != net->ipv6.fib6_null_entry && 4610 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) { 4611 spin_lock_bh(&rt6_exception_lock); 4612 /* remove prefsrc entry */ 4613 rt->fib6_prefsrc.plen = 0; 4614 spin_unlock_bh(&rt6_exception_lock); 4615 } 4616 return 0; 4617 } 4618 4619 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) 4620 { 4621 struct net *net = dev_net(ifp->idev->dev); 4622 struct arg_dev_net_ip adni = { 4623 .dev = ifp->idev->dev, 4624 .net = net, 4625 .addr = &ifp->addr, 4626 }; 4627 fib6_clean_all(net, fib6_remove_prefsrc, &adni); 4628 } 4629 4630 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT) 4631 4632 /* Remove routers and update dst entries when gateway turn into host. */ 4633 static int fib6_clean_tohost(struct fib6_info *rt, void *arg) 4634 { 4635 struct in6_addr *gateway = (struct in6_addr *)arg; 4636 struct fib6_nh *nh; 4637 4638 /* RA routes do not use nexthops */ 4639 if (rt->nh) 4640 return 0; 4641 4642 nh = rt->fib6_nh; 4643 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) && 4644 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6)) 4645 return -1; 4646 4647 /* Further clean up cached routes in exception table. 4648 * This is needed because cached route may have a different 4649 * gateway than its 'parent' in the case of an ip redirect. 4650 */ 4651 fib6_nh_exceptions_clean_tohost(nh, gateway); 4652 4653 return 0; 4654 } 4655 4656 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway) 4657 { 4658 fib6_clean_all(net, fib6_clean_tohost, gateway); 4659 } 4660 4661 struct arg_netdev_event { 4662 const struct net_device *dev; 4663 union { 4664 unsigned char nh_flags; 4665 unsigned long event; 4666 }; 4667 }; 4668 4669 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt) 4670 { 4671 struct fib6_info *iter; 4672 struct fib6_node *fn; 4673 4674 fn = rcu_dereference_protected(rt->fib6_node, 4675 lockdep_is_held(&rt->fib6_table->tb6_lock)); 4676 iter = rcu_dereference_protected(fn->leaf, 4677 lockdep_is_held(&rt->fib6_table->tb6_lock)); 4678 while (iter) { 4679 if (iter->fib6_metric == rt->fib6_metric && 4680 rt6_qualify_for_ecmp(iter)) 4681 return iter; 4682 iter = rcu_dereference_protected(iter->fib6_next, 4683 lockdep_is_held(&rt->fib6_table->tb6_lock)); 4684 } 4685 4686 return NULL; 4687 } 4688 4689 /* only called for fib entries with builtin fib6_nh */ 4690 static bool rt6_is_dead(const struct fib6_info *rt) 4691 { 4692 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD || 4693 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN && 4694 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev))) 4695 return true; 4696 4697 return false; 4698 } 4699 4700 static int rt6_multipath_total_weight(const struct fib6_info *rt) 4701 { 4702 struct fib6_info *iter; 4703 int total = 0; 4704 4705 if (!rt6_is_dead(rt)) 4706 total += rt->fib6_nh->fib_nh_weight; 4707 4708 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) { 4709 if (!rt6_is_dead(iter)) 4710 total += iter->fib6_nh->fib_nh_weight; 4711 } 4712 4713 return total; 4714 } 4715 4716 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total) 4717 { 4718 int upper_bound = -1; 4719 4720 if (!rt6_is_dead(rt)) { 4721 *weight += rt->fib6_nh->fib_nh_weight; 4722 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31, 4723 total) - 1; 4724 } 4725 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound); 4726 } 4727 4728 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total) 4729 { 4730 struct fib6_info *iter; 4731 int weight = 0; 4732 4733 rt6_upper_bound_set(rt, &weight, total); 4734 4735 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4736 rt6_upper_bound_set(iter, &weight, total); 4737 } 4738 4739 void rt6_multipath_rebalance(struct fib6_info *rt) 4740 { 4741 struct fib6_info *first; 4742 int total; 4743 4744 /* In case the entire multipath route was marked for flushing, 4745 * then there is no need to rebalance upon the removal of every 4746 * sibling route. 4747 */ 4748 if (!rt->fib6_nsiblings || rt->should_flush) 4749 return; 4750 4751 /* During lookup routes are evaluated in order, so we need to 4752 * make sure upper bounds are assigned from the first sibling 4753 * onwards. 4754 */ 4755 first = rt6_multipath_first_sibling(rt); 4756 if (WARN_ON_ONCE(!first)) 4757 return; 4758 4759 total = rt6_multipath_total_weight(first); 4760 rt6_multipath_upper_bound_set(first, total); 4761 } 4762 4763 static int fib6_ifup(struct fib6_info *rt, void *p_arg) 4764 { 4765 const struct arg_netdev_event *arg = p_arg; 4766 struct net *net = dev_net(arg->dev); 4767 4768 if (rt != net->ipv6.fib6_null_entry && !rt->nh && 4769 rt->fib6_nh->fib_nh_dev == arg->dev) { 4770 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags; 4771 fib6_update_sernum_upto_root(net, rt); 4772 rt6_multipath_rebalance(rt); 4773 } 4774 4775 return 0; 4776 } 4777 4778 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags) 4779 { 4780 struct arg_netdev_event arg = { 4781 .dev = dev, 4782 { 4783 .nh_flags = nh_flags, 4784 }, 4785 }; 4786 4787 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev)) 4788 arg.nh_flags |= RTNH_F_LINKDOWN; 4789 4790 fib6_clean_all(dev_net(dev), fib6_ifup, &arg); 4791 } 4792 4793 /* only called for fib entries with inline fib6_nh */ 4794 static bool rt6_multipath_uses_dev(const struct fib6_info *rt, 4795 const struct net_device *dev) 4796 { 4797 struct fib6_info *iter; 4798 4799 if (rt->fib6_nh->fib_nh_dev == dev) 4800 return true; 4801 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4802 if (iter->fib6_nh->fib_nh_dev == dev) 4803 return true; 4804 4805 return false; 4806 } 4807 4808 static void rt6_multipath_flush(struct fib6_info *rt) 4809 { 4810 struct fib6_info *iter; 4811 4812 rt->should_flush = 1; 4813 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4814 iter->should_flush = 1; 4815 } 4816 4817 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt, 4818 const struct net_device *down_dev) 4819 { 4820 struct fib6_info *iter; 4821 unsigned int dead = 0; 4822 4823 if (rt->fib6_nh->fib_nh_dev == down_dev || 4824 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD) 4825 dead++; 4826 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4827 if (iter->fib6_nh->fib_nh_dev == down_dev || 4828 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD) 4829 dead++; 4830 4831 return dead; 4832 } 4833 4834 static void rt6_multipath_nh_flags_set(struct fib6_info *rt, 4835 const struct net_device *dev, 4836 unsigned char nh_flags) 4837 { 4838 struct fib6_info *iter; 4839 4840 if (rt->fib6_nh->fib_nh_dev == dev) 4841 rt->fib6_nh->fib_nh_flags |= nh_flags; 4842 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4843 if (iter->fib6_nh->fib_nh_dev == dev) 4844 iter->fib6_nh->fib_nh_flags |= nh_flags; 4845 } 4846 4847 /* called with write lock held for table with rt */ 4848 static int fib6_ifdown(struct fib6_info *rt, void *p_arg) 4849 { 4850 const struct arg_netdev_event *arg = p_arg; 4851 const struct net_device *dev = arg->dev; 4852 struct net *net = dev_net(dev); 4853 4854 if (rt == net->ipv6.fib6_null_entry || rt->nh) 4855 return 0; 4856 4857 switch (arg->event) { 4858 case NETDEV_UNREGISTER: 4859 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0; 4860 case NETDEV_DOWN: 4861 if (rt->should_flush) 4862 return -1; 4863 if (!rt->fib6_nsiblings) 4864 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0; 4865 if (rt6_multipath_uses_dev(rt, dev)) { 4866 unsigned int count; 4867 4868 count = rt6_multipath_dead_count(rt, dev); 4869 if (rt->fib6_nsiblings + 1 == count) { 4870 rt6_multipath_flush(rt); 4871 return -1; 4872 } 4873 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD | 4874 RTNH_F_LINKDOWN); 4875 fib6_update_sernum(net, rt); 4876 rt6_multipath_rebalance(rt); 4877 } 4878 return -2; 4879 case NETDEV_CHANGE: 4880 if (rt->fib6_nh->fib_nh_dev != dev || 4881 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) 4882 break; 4883 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN; 4884 rt6_multipath_rebalance(rt); 4885 break; 4886 } 4887 4888 return 0; 4889 } 4890 4891 void rt6_sync_down_dev(struct net_device *dev, unsigned long event) 4892 { 4893 struct arg_netdev_event arg = { 4894 .dev = dev, 4895 { 4896 .event = event, 4897 }, 4898 }; 4899 struct net *net = dev_net(dev); 4900 4901 if (net->ipv6.sysctl.skip_notify_on_dev_down) 4902 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg); 4903 else 4904 fib6_clean_all(net, fib6_ifdown, &arg); 4905 } 4906 4907 void rt6_disable_ip(struct net_device *dev, unsigned long event) 4908 { 4909 rt6_sync_down_dev(dev, event); 4910 rt6_uncached_list_flush_dev(dev_net(dev), dev); 4911 neigh_ifdown(&nd_tbl, dev); 4912 } 4913 4914 struct rt6_mtu_change_arg { 4915 struct net_device *dev; 4916 unsigned int mtu; 4917 struct fib6_info *f6i; 4918 }; 4919 4920 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg) 4921 { 4922 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg; 4923 struct fib6_info *f6i = arg->f6i; 4924 4925 /* For administrative MTU increase, there is no way to discover 4926 * IPv6 PMTU increase, so PMTU increase should be updated here. 4927 * Since RFC 1981 doesn't include administrative MTU increase 4928 * update PMTU increase is a MUST. (i.e. jumbo frame) 4929 */ 4930 if (nh->fib_nh_dev == arg->dev) { 4931 struct inet6_dev *idev = __in6_dev_get(arg->dev); 4932 u32 mtu = f6i->fib6_pmtu; 4933 4934 if (mtu >= arg->mtu || 4935 (mtu < arg->mtu && mtu == idev->cnf.mtu6)) 4936 fib6_metric_set(f6i, RTAX_MTU, arg->mtu); 4937 4938 spin_lock_bh(&rt6_exception_lock); 4939 rt6_exceptions_update_pmtu(idev, nh, arg->mtu); 4940 spin_unlock_bh(&rt6_exception_lock); 4941 } 4942 4943 return 0; 4944 } 4945 4946 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg) 4947 { 4948 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; 4949 struct inet6_dev *idev; 4950 4951 /* In IPv6 pmtu discovery is not optional, 4952 so that RTAX_MTU lock cannot disable it. 4953 We still use this lock to block changes 4954 caused by addrconf/ndisc. 4955 */ 4956 4957 idev = __in6_dev_get(arg->dev); 4958 if (!idev) 4959 return 0; 4960 4961 if (fib6_metric_locked(f6i, RTAX_MTU)) 4962 return 0; 4963 4964 arg->f6i = f6i; 4965 if (f6i->nh) { 4966 /* fib6_nh_mtu_change only returns 0, so this is safe */ 4967 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change, 4968 arg); 4969 } 4970 4971 return fib6_nh_mtu_change(f6i->fib6_nh, arg); 4972 } 4973 4974 void rt6_mtu_change(struct net_device *dev, unsigned int mtu) 4975 { 4976 struct rt6_mtu_change_arg arg = { 4977 .dev = dev, 4978 .mtu = mtu, 4979 }; 4980 4981 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg); 4982 } 4983 4984 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 4985 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 }, 4986 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, 4987 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) }, 4988 [RTA_OIF] = { .type = NLA_U32 }, 4989 [RTA_IIF] = { .type = NLA_U32 }, 4990 [RTA_PRIORITY] = { .type = NLA_U32 }, 4991 [RTA_METRICS] = { .type = NLA_NESTED }, 4992 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 4993 [RTA_PREF] = { .type = NLA_U8 }, 4994 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 4995 [RTA_ENCAP] = { .type = NLA_NESTED }, 4996 [RTA_EXPIRES] = { .type = NLA_U32 }, 4997 [RTA_UID] = { .type = NLA_U32 }, 4998 [RTA_MARK] = { .type = NLA_U32 }, 4999 [RTA_TABLE] = { .type = NLA_U32 }, 5000 [RTA_IP_PROTO] = { .type = NLA_U8 }, 5001 [RTA_SPORT] = { .type = NLA_U16 }, 5002 [RTA_DPORT] = { .type = NLA_U16 }, 5003 [RTA_NH_ID] = { .type = NLA_U32 }, 5004 }; 5005 5006 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 5007 struct fib6_config *cfg, 5008 struct netlink_ext_ack *extack) 5009 { 5010 struct rtmsg *rtm; 5011 struct nlattr *tb[RTA_MAX+1]; 5012 unsigned int pref; 5013 int err; 5014 5015 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, 5016 rtm_ipv6_policy, extack); 5017 if (err < 0) 5018 goto errout; 5019 5020 err = -EINVAL; 5021 rtm = nlmsg_data(nlh); 5022 5023 *cfg = (struct fib6_config){ 5024 .fc_table = rtm->rtm_table, 5025 .fc_dst_len = rtm->rtm_dst_len, 5026 .fc_src_len = rtm->rtm_src_len, 5027 .fc_flags = RTF_UP, 5028 .fc_protocol = rtm->rtm_protocol, 5029 .fc_type = rtm->rtm_type, 5030 5031 .fc_nlinfo.portid = NETLINK_CB(skb).portid, 5032 .fc_nlinfo.nlh = nlh, 5033 .fc_nlinfo.nl_net = sock_net(skb->sk), 5034 }; 5035 5036 if (rtm->rtm_type == RTN_UNREACHABLE || 5037 rtm->rtm_type == RTN_BLACKHOLE || 5038 rtm->rtm_type == RTN_PROHIBIT || 5039 rtm->rtm_type == RTN_THROW) 5040 cfg->fc_flags |= RTF_REJECT; 5041 5042 if (rtm->rtm_type == RTN_LOCAL) 5043 cfg->fc_flags |= RTF_LOCAL; 5044 5045 if (rtm->rtm_flags & RTM_F_CLONED) 5046 cfg->fc_flags |= RTF_CACHE; 5047 5048 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK); 5049 5050 if (tb[RTA_NH_ID]) { 5051 if (tb[RTA_GATEWAY] || tb[RTA_OIF] || 5052 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) { 5053 NL_SET_ERR_MSG(extack, 5054 "Nexthop specification and nexthop id are mutually exclusive"); 5055 goto errout; 5056 } 5057 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]); 5058 } 5059 5060 if (tb[RTA_GATEWAY]) { 5061 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); 5062 cfg->fc_flags |= RTF_GATEWAY; 5063 } 5064 if (tb[RTA_VIA]) { 5065 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute"); 5066 goto errout; 5067 } 5068 5069 if (tb[RTA_DST]) { 5070 int plen = (rtm->rtm_dst_len + 7) >> 3; 5071 5072 if (nla_len(tb[RTA_DST]) < plen) 5073 goto errout; 5074 5075 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen); 5076 } 5077 5078 if (tb[RTA_SRC]) { 5079 int plen = (rtm->rtm_src_len + 7) >> 3; 5080 5081 if (nla_len(tb[RTA_SRC]) < plen) 5082 goto errout; 5083 5084 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); 5085 } 5086 5087 if (tb[RTA_PREFSRC]) 5088 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]); 5089 5090 if (tb[RTA_OIF]) 5091 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); 5092 5093 if (tb[RTA_PRIORITY]) 5094 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]); 5095 5096 if (tb[RTA_METRICS]) { 5097 cfg->fc_mx = nla_data(tb[RTA_METRICS]); 5098 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]); 5099 } 5100 5101 if (tb[RTA_TABLE]) 5102 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); 5103 5104 if (tb[RTA_MULTIPATH]) { 5105 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); 5106 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); 5107 5108 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, 5109 cfg->fc_mp_len, extack); 5110 if (err < 0) 5111 goto errout; 5112 } 5113 5114 if (tb[RTA_PREF]) { 5115 pref = nla_get_u8(tb[RTA_PREF]); 5116 if (pref != ICMPV6_ROUTER_PREF_LOW && 5117 pref != ICMPV6_ROUTER_PREF_HIGH) 5118 pref = ICMPV6_ROUTER_PREF_MEDIUM; 5119 cfg->fc_flags |= RTF_PREF(pref); 5120 } 5121 5122 if (tb[RTA_ENCAP]) 5123 cfg->fc_encap = tb[RTA_ENCAP]; 5124 5125 if (tb[RTA_ENCAP_TYPE]) { 5126 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); 5127 5128 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack); 5129 if (err < 0) 5130 goto errout; 5131 } 5132 5133 if (tb[RTA_EXPIRES]) { 5134 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); 5135 5136 if (addrconf_finite_timeout(timeout)) { 5137 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ); 5138 cfg->fc_flags |= RTF_EXPIRES; 5139 } 5140 } 5141 5142 err = 0; 5143 errout: 5144 return err; 5145 } 5146 5147 struct rt6_nh { 5148 struct fib6_info *fib6_info; 5149 struct fib6_config r_cfg; 5150 struct list_head next; 5151 }; 5152 5153 static int ip6_route_info_append(struct net *net, 5154 struct list_head *rt6_nh_list, 5155 struct fib6_info *rt, 5156 struct fib6_config *r_cfg) 5157 { 5158 struct rt6_nh *nh; 5159 int err = -EEXIST; 5160 5161 list_for_each_entry(nh, rt6_nh_list, next) { 5162 /* check if fib6_info already exists */ 5163 if (rt6_duplicate_nexthop(nh->fib6_info, rt)) 5164 return err; 5165 } 5166 5167 nh = kzalloc(sizeof(*nh), GFP_KERNEL); 5168 if (!nh) 5169 return -ENOMEM; 5170 nh->fib6_info = rt; 5171 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); 5172 list_add_tail(&nh->next, rt6_nh_list); 5173 5174 return 0; 5175 } 5176 5177 static void ip6_route_mpath_notify(struct fib6_info *rt, 5178 struct fib6_info *rt_last, 5179 struct nl_info *info, 5180 __u16 nlflags) 5181 { 5182 /* if this is an APPEND route, then rt points to the first route 5183 * inserted and rt_last points to last route inserted. Userspace 5184 * wants a consistent dump of the route which starts at the first 5185 * nexthop. Since sibling routes are always added at the end of 5186 * the list, find the first sibling of the last route appended 5187 */ 5188 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) { 5189 rt = list_first_entry(&rt_last->fib6_siblings, 5190 struct fib6_info, 5191 fib6_siblings); 5192 } 5193 5194 if (rt) 5195 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); 5196 } 5197 5198 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt) 5199 { 5200 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt); 5201 bool should_notify = false; 5202 struct fib6_info *leaf; 5203 struct fib6_node *fn; 5204 5205 rcu_read_lock(); 5206 fn = rcu_dereference(rt->fib6_node); 5207 if (!fn) 5208 goto out; 5209 5210 leaf = rcu_dereference(fn->leaf); 5211 if (!leaf) 5212 goto out; 5213 5214 if (rt == leaf || 5215 (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric && 5216 rt6_qualify_for_ecmp(leaf))) 5217 should_notify = true; 5218 out: 5219 rcu_read_unlock(); 5220 5221 return should_notify; 5222 } 5223 5224 static int ip6_route_multipath_add(struct fib6_config *cfg, 5225 struct netlink_ext_ack *extack) 5226 { 5227 struct fib6_info *rt_notif = NULL, *rt_last = NULL; 5228 struct nl_info *info = &cfg->fc_nlinfo; 5229 struct fib6_config r_cfg; 5230 struct rtnexthop *rtnh; 5231 struct fib6_info *rt; 5232 struct rt6_nh *err_nh; 5233 struct rt6_nh *nh, *nh_safe; 5234 __u16 nlflags; 5235 int remaining; 5236 int attrlen; 5237 int err = 1; 5238 int nhn = 0; 5239 int replace = (cfg->fc_nlinfo.nlh && 5240 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); 5241 LIST_HEAD(rt6_nh_list); 5242 5243 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE; 5244 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) 5245 nlflags |= NLM_F_APPEND; 5246 5247 remaining = cfg->fc_mp_len; 5248 rtnh = (struct rtnexthop *)cfg->fc_mp; 5249 5250 /* Parse a Multipath Entry and build a list (rt6_nh_list) of 5251 * fib6_info structs per nexthop 5252 */ 5253 while (rtnh_ok(rtnh, remaining)) { 5254 memcpy(&r_cfg, cfg, sizeof(*cfg)); 5255 if (rtnh->rtnh_ifindex) 5256 r_cfg.fc_ifindex = rtnh->rtnh_ifindex; 5257 5258 attrlen = rtnh_attrlen(rtnh); 5259 if (attrlen > 0) { 5260 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 5261 5262 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 5263 if (nla) { 5264 r_cfg.fc_gateway = nla_get_in6_addr(nla); 5265 r_cfg.fc_flags |= RTF_GATEWAY; 5266 } 5267 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); 5268 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); 5269 if (nla) 5270 r_cfg.fc_encap_type = nla_get_u16(nla); 5271 } 5272 5273 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK); 5274 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack); 5275 if (IS_ERR(rt)) { 5276 err = PTR_ERR(rt); 5277 rt = NULL; 5278 goto cleanup; 5279 } 5280 if (!rt6_qualify_for_ecmp(rt)) { 5281 err = -EINVAL; 5282 NL_SET_ERR_MSG(extack, 5283 "Device only routes can not be added for IPv6 using the multipath API."); 5284 fib6_info_release(rt); 5285 goto cleanup; 5286 } 5287 5288 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1; 5289 5290 err = ip6_route_info_append(info->nl_net, &rt6_nh_list, 5291 rt, &r_cfg); 5292 if (err) { 5293 fib6_info_release(rt); 5294 goto cleanup; 5295 } 5296 5297 rtnh = rtnh_next(rtnh, &remaining); 5298 } 5299 5300 if (list_empty(&rt6_nh_list)) { 5301 NL_SET_ERR_MSG(extack, 5302 "Invalid nexthop configuration - no valid nexthops"); 5303 return -EINVAL; 5304 } 5305 5306 /* for add and replace send one notification with all nexthops. 5307 * Skip the notification in fib6_add_rt2node and send one with 5308 * the full route when done 5309 */ 5310 info->skip_notify = 1; 5311 5312 /* For add and replace, send one notification with all nexthops. For 5313 * append, send one notification with all appended nexthops. 5314 */ 5315 info->skip_notify_kernel = 1; 5316 5317 err_nh = NULL; 5318 list_for_each_entry(nh, &rt6_nh_list, next) { 5319 err = __ip6_ins_rt(nh->fib6_info, info, extack); 5320 fib6_info_release(nh->fib6_info); 5321 5322 if (!err) { 5323 /* save reference to last route successfully inserted */ 5324 rt_last = nh->fib6_info; 5325 5326 /* save reference to first route for notification */ 5327 if (!rt_notif) 5328 rt_notif = nh->fib6_info; 5329 } 5330 5331 /* nh->fib6_info is used or freed at this point, reset to NULL*/ 5332 nh->fib6_info = NULL; 5333 if (err) { 5334 if (replace && nhn) 5335 NL_SET_ERR_MSG_MOD(extack, 5336 "multipath route replace failed (check consistency of installed routes)"); 5337 err_nh = nh; 5338 goto add_errout; 5339 } 5340 5341 /* Because each route is added like a single route we remove 5342 * these flags after the first nexthop: if there is a collision, 5343 * we have already failed to add the first nexthop: 5344 * fib6_add_rt2node() has rejected it; when replacing, old 5345 * nexthops have been replaced by first new, the rest should 5346 * be added to it. 5347 */ 5348 if (cfg->fc_nlinfo.nlh) { 5349 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | 5350 NLM_F_REPLACE); 5351 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; 5352 } 5353 nhn++; 5354 } 5355 5356 /* An in-kernel notification should only be sent in case the new 5357 * multipath route is added as the first route in the node, or if 5358 * it was appended to it. We pass 'rt_notif' since it is the first 5359 * sibling and might allow us to skip some checks in the replace case. 5360 */ 5361 if (ip6_route_mpath_should_notify(rt_notif)) { 5362 enum fib_event_type fib_event; 5363 5364 if (rt_notif->fib6_nsiblings != nhn - 1) 5365 fib_event = FIB_EVENT_ENTRY_APPEND; 5366 else 5367 fib_event = FIB_EVENT_ENTRY_REPLACE; 5368 5369 err = call_fib6_multipath_entry_notifiers(info->nl_net, 5370 fib_event, rt_notif, 5371 nhn - 1, extack); 5372 if (err) { 5373 /* Delete all the siblings that were just added */ 5374 err_nh = NULL; 5375 goto add_errout; 5376 } 5377 } 5378 5379 /* success ... tell user about new route */ 5380 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); 5381 goto cleanup; 5382 5383 add_errout: 5384 /* send notification for routes that were added so that 5385 * the delete notifications sent by ip6_route_del are 5386 * coherent 5387 */ 5388 if (rt_notif) 5389 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); 5390 5391 /* Delete routes that were already added */ 5392 list_for_each_entry(nh, &rt6_nh_list, next) { 5393 if (err_nh == nh) 5394 break; 5395 ip6_route_del(&nh->r_cfg, extack); 5396 } 5397 5398 cleanup: 5399 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { 5400 if (nh->fib6_info) 5401 fib6_info_release(nh->fib6_info); 5402 list_del(&nh->next); 5403 kfree(nh); 5404 } 5405 5406 return err; 5407 } 5408 5409 static int ip6_route_multipath_del(struct fib6_config *cfg, 5410 struct netlink_ext_ack *extack) 5411 { 5412 struct fib6_config r_cfg; 5413 struct rtnexthop *rtnh; 5414 int last_err = 0; 5415 int remaining; 5416 int attrlen; 5417 int err; 5418 5419 remaining = cfg->fc_mp_len; 5420 rtnh = (struct rtnexthop *)cfg->fc_mp; 5421 5422 /* Parse a Multipath Entry */ 5423 while (rtnh_ok(rtnh, remaining)) { 5424 memcpy(&r_cfg, cfg, sizeof(*cfg)); 5425 if (rtnh->rtnh_ifindex) 5426 r_cfg.fc_ifindex = rtnh->rtnh_ifindex; 5427 5428 attrlen = rtnh_attrlen(rtnh); 5429 if (attrlen > 0) { 5430 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 5431 5432 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 5433 if (nla) { 5434 nla_memcpy(&r_cfg.fc_gateway, nla, 16); 5435 r_cfg.fc_flags |= RTF_GATEWAY; 5436 } 5437 } 5438 err = ip6_route_del(&r_cfg, extack); 5439 if (err) 5440 last_err = err; 5441 5442 rtnh = rtnh_next(rtnh, &remaining); 5443 } 5444 5445 return last_err; 5446 } 5447 5448 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, 5449 struct netlink_ext_ack *extack) 5450 { 5451 struct fib6_config cfg; 5452 int err; 5453 5454 err = rtm_to_fib6_config(skb, nlh, &cfg, extack); 5455 if (err < 0) 5456 return err; 5457 5458 if (cfg.fc_nh_id && 5459 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) { 5460 NL_SET_ERR_MSG(extack, "Nexthop id does not exist"); 5461 return -EINVAL; 5462 } 5463 5464 if (cfg.fc_mp) 5465 return ip6_route_multipath_del(&cfg, extack); 5466 else { 5467 cfg.fc_delete_all_nh = 1; 5468 return ip6_route_del(&cfg, extack); 5469 } 5470 } 5471 5472 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, 5473 struct netlink_ext_ack *extack) 5474 { 5475 struct fib6_config cfg; 5476 int err; 5477 5478 err = rtm_to_fib6_config(skb, nlh, &cfg, extack); 5479 if (err < 0) 5480 return err; 5481 5482 if (cfg.fc_metric == 0) 5483 cfg.fc_metric = IP6_RT_PRIO_USER; 5484 5485 if (cfg.fc_mp) 5486 return ip6_route_multipath_add(&cfg, extack); 5487 else 5488 return ip6_route_add(&cfg, GFP_KERNEL, extack); 5489 } 5490 5491 /* add the overhead of this fib6_nh to nexthop_len */ 5492 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg) 5493 { 5494 int *nexthop_len = arg; 5495 5496 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */ 5497 + NLA_ALIGN(sizeof(struct rtnexthop)) 5498 + nla_total_size(16); /* RTA_GATEWAY */ 5499 5500 if (nh->fib_nh_lws) { 5501 /* RTA_ENCAP_TYPE */ 5502 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws); 5503 /* RTA_ENCAP */ 5504 *nexthop_len += nla_total_size(2); 5505 } 5506 5507 return 0; 5508 } 5509 5510 static size_t rt6_nlmsg_size(struct fib6_info *f6i) 5511 { 5512 int nexthop_len; 5513 5514 if (f6i->nh) { 5515 nexthop_len = nla_total_size(4); /* RTA_NH_ID */ 5516 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size, 5517 &nexthop_len); 5518 } else { 5519 struct fib6_nh *nh = f6i->fib6_nh; 5520 5521 nexthop_len = 0; 5522 if (f6i->fib6_nsiblings) { 5523 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ 5524 + NLA_ALIGN(sizeof(struct rtnexthop)) 5525 + nla_total_size(16) /* RTA_GATEWAY */ 5526 + lwtunnel_get_encap_size(nh->fib_nh_lws); 5527 5528 nexthop_len *= f6i->fib6_nsiblings; 5529 } 5530 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws); 5531 } 5532 5533 return NLMSG_ALIGN(sizeof(struct rtmsg)) 5534 + nla_total_size(16) /* RTA_SRC */ 5535 + nla_total_size(16) /* RTA_DST */ 5536 + nla_total_size(16) /* RTA_GATEWAY */ 5537 + nla_total_size(16) /* RTA_PREFSRC */ 5538 + nla_total_size(4) /* RTA_TABLE */ 5539 + nla_total_size(4) /* RTA_IIF */ 5540 + nla_total_size(4) /* RTA_OIF */ 5541 + nla_total_size(4) /* RTA_PRIORITY */ 5542 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ 5543 + nla_total_size(sizeof(struct rta_cacheinfo)) 5544 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ 5545 + nla_total_size(1) /* RTA_PREF */ 5546 + nexthop_len; 5547 } 5548 5549 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh, 5550 unsigned char *flags) 5551 { 5552 if (nexthop_is_multipath(nh)) { 5553 struct nlattr *mp; 5554 5555 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH); 5556 if (!mp) 5557 goto nla_put_failure; 5558 5559 if (nexthop_mpath_fill_node(skb, nh, AF_INET6)) 5560 goto nla_put_failure; 5561 5562 nla_nest_end(skb, mp); 5563 } else { 5564 struct fib6_nh *fib6_nh; 5565 5566 fib6_nh = nexthop_fib6_nh(nh); 5567 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6, 5568 flags, false) < 0) 5569 goto nla_put_failure; 5570 } 5571 5572 return 0; 5573 5574 nla_put_failure: 5575 return -EMSGSIZE; 5576 } 5577 5578 static int rt6_fill_node(struct net *net, struct sk_buff *skb, 5579 struct fib6_info *rt, struct dst_entry *dst, 5580 struct in6_addr *dest, struct in6_addr *src, 5581 int iif, int type, u32 portid, u32 seq, 5582 unsigned int flags) 5583 { 5584 struct rt6_info *rt6 = (struct rt6_info *)dst; 5585 struct rt6key *rt6_dst, *rt6_src; 5586 u32 *pmetrics, table, rt6_flags; 5587 unsigned char nh_flags = 0; 5588 struct nlmsghdr *nlh; 5589 struct rtmsg *rtm; 5590 long expires = 0; 5591 5592 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); 5593 if (!nlh) 5594 return -EMSGSIZE; 5595 5596 if (rt6) { 5597 rt6_dst = &rt6->rt6i_dst; 5598 rt6_src = &rt6->rt6i_src; 5599 rt6_flags = rt6->rt6i_flags; 5600 } else { 5601 rt6_dst = &rt->fib6_dst; 5602 rt6_src = &rt->fib6_src; 5603 rt6_flags = rt->fib6_flags; 5604 } 5605 5606 rtm = nlmsg_data(nlh); 5607 rtm->rtm_family = AF_INET6; 5608 rtm->rtm_dst_len = rt6_dst->plen; 5609 rtm->rtm_src_len = rt6_src->plen; 5610 rtm->rtm_tos = 0; 5611 if (rt->fib6_table) 5612 table = rt->fib6_table->tb6_id; 5613 else 5614 table = RT6_TABLE_UNSPEC; 5615 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT; 5616 if (nla_put_u32(skb, RTA_TABLE, table)) 5617 goto nla_put_failure; 5618 5619 rtm->rtm_type = rt->fib6_type; 5620 rtm->rtm_flags = 0; 5621 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 5622 rtm->rtm_protocol = rt->fib6_protocol; 5623 5624 if (rt6_flags & RTF_CACHE) 5625 rtm->rtm_flags |= RTM_F_CLONED; 5626 5627 if (dest) { 5628 if (nla_put_in6_addr(skb, RTA_DST, dest)) 5629 goto nla_put_failure; 5630 rtm->rtm_dst_len = 128; 5631 } else if (rtm->rtm_dst_len) 5632 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr)) 5633 goto nla_put_failure; 5634 #ifdef CONFIG_IPV6_SUBTREES 5635 if (src) { 5636 if (nla_put_in6_addr(skb, RTA_SRC, src)) 5637 goto nla_put_failure; 5638 rtm->rtm_src_len = 128; 5639 } else if (rtm->rtm_src_len && 5640 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr)) 5641 goto nla_put_failure; 5642 #endif 5643 if (iif) { 5644 #ifdef CONFIG_IPV6_MROUTE 5645 if (ipv6_addr_is_multicast(&rt6_dst->addr)) { 5646 int err = ip6mr_get_route(net, skb, rtm, portid); 5647 5648 if (err == 0) 5649 return 0; 5650 if (err < 0) 5651 goto nla_put_failure; 5652 } else 5653 #endif 5654 if (nla_put_u32(skb, RTA_IIF, iif)) 5655 goto nla_put_failure; 5656 } else if (dest) { 5657 struct in6_addr saddr_buf; 5658 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 && 5659 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 5660 goto nla_put_failure; 5661 } 5662 5663 if (rt->fib6_prefsrc.plen) { 5664 struct in6_addr saddr_buf; 5665 saddr_buf = rt->fib6_prefsrc.addr; 5666 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 5667 goto nla_put_failure; 5668 } 5669 5670 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics; 5671 if (rtnetlink_put_metrics(skb, pmetrics) < 0) 5672 goto nla_put_failure; 5673 5674 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric)) 5675 goto nla_put_failure; 5676 5677 /* For multipath routes, walk the siblings list and add 5678 * each as a nexthop within RTA_MULTIPATH. 5679 */ 5680 if (rt6) { 5681 if (rt6_flags & RTF_GATEWAY && 5682 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway)) 5683 goto nla_put_failure; 5684 5685 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex)) 5686 goto nla_put_failure; 5687 5688 if (dst->lwtstate && 5689 lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0) 5690 goto nla_put_failure; 5691 } else if (rt->fib6_nsiblings) { 5692 struct fib6_info *sibling, *next_sibling; 5693 struct nlattr *mp; 5694 5695 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH); 5696 if (!mp) 5697 goto nla_put_failure; 5698 5699 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common, 5700 rt->fib6_nh->fib_nh_weight, AF_INET6) < 0) 5701 goto nla_put_failure; 5702 5703 list_for_each_entry_safe(sibling, next_sibling, 5704 &rt->fib6_siblings, fib6_siblings) { 5705 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common, 5706 sibling->fib6_nh->fib_nh_weight, 5707 AF_INET6) < 0) 5708 goto nla_put_failure; 5709 } 5710 5711 nla_nest_end(skb, mp); 5712 } else if (rt->nh) { 5713 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id)) 5714 goto nla_put_failure; 5715 5716 if (nexthop_is_blackhole(rt->nh)) 5717 rtm->rtm_type = RTN_BLACKHOLE; 5718 5719 if (net->ipv4.sysctl_nexthop_compat_mode && 5720 rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0) 5721 goto nla_put_failure; 5722 5723 rtm->rtm_flags |= nh_flags; 5724 } else { 5725 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6, 5726 &nh_flags, false) < 0) 5727 goto nla_put_failure; 5728 5729 rtm->rtm_flags |= nh_flags; 5730 } 5731 5732 if (rt6_flags & RTF_EXPIRES) { 5733 expires = dst ? dst->expires : rt->expires; 5734 expires -= jiffies; 5735 } 5736 5737 if (!dst) { 5738 if (rt->offload) 5739 rtm->rtm_flags |= RTM_F_OFFLOAD; 5740 if (rt->trap) 5741 rtm->rtm_flags |= RTM_F_TRAP; 5742 if (rt->offload_failed) 5743 rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED; 5744 } 5745 5746 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) 5747 goto nla_put_failure; 5748 5749 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags))) 5750 goto nla_put_failure; 5751 5752 5753 nlmsg_end(skb, nlh); 5754 return 0; 5755 5756 nla_put_failure: 5757 nlmsg_cancel(skb, nlh); 5758 return -EMSGSIZE; 5759 } 5760 5761 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg) 5762 { 5763 const struct net_device *dev = arg; 5764 5765 if (nh->fib_nh_dev == dev) 5766 return 1; 5767 5768 return 0; 5769 } 5770 5771 static bool fib6_info_uses_dev(const struct fib6_info *f6i, 5772 const struct net_device *dev) 5773 { 5774 if (f6i->nh) { 5775 struct net_device *_dev = (struct net_device *)dev; 5776 5777 return !!nexthop_for_each_fib6_nh(f6i->nh, 5778 fib6_info_nh_uses_dev, 5779 _dev); 5780 } 5781 5782 if (f6i->fib6_nh->fib_nh_dev == dev) 5783 return true; 5784 5785 if (f6i->fib6_nsiblings) { 5786 struct fib6_info *sibling, *next_sibling; 5787 5788 list_for_each_entry_safe(sibling, next_sibling, 5789 &f6i->fib6_siblings, fib6_siblings) { 5790 if (sibling->fib6_nh->fib_nh_dev == dev) 5791 return true; 5792 } 5793 } 5794 5795 return false; 5796 } 5797 5798 struct fib6_nh_exception_dump_walker { 5799 struct rt6_rtnl_dump_arg *dump; 5800 struct fib6_info *rt; 5801 unsigned int flags; 5802 unsigned int skip; 5803 unsigned int count; 5804 }; 5805 5806 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg) 5807 { 5808 struct fib6_nh_exception_dump_walker *w = arg; 5809 struct rt6_rtnl_dump_arg *dump = w->dump; 5810 struct rt6_exception_bucket *bucket; 5811 struct rt6_exception *rt6_ex; 5812 int i, err; 5813 5814 bucket = fib6_nh_get_excptn_bucket(nh, NULL); 5815 if (!bucket) 5816 return 0; 5817 5818 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 5819 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 5820 if (w->skip) { 5821 w->skip--; 5822 continue; 5823 } 5824 5825 /* Expiration of entries doesn't bump sernum, insertion 5826 * does. Removal is triggered by insertion, so we can 5827 * rely on the fact that if entries change between two 5828 * partial dumps, this node is scanned again completely, 5829 * see rt6_insert_exception() and fib6_dump_table(). 5830 * 5831 * Count expired entries we go through as handled 5832 * entries that we'll skip next time, in case of partial 5833 * node dump. Otherwise, if entries expire meanwhile, 5834 * we'll skip the wrong amount. 5835 */ 5836 if (rt6_check_expired(rt6_ex->rt6i)) { 5837 w->count++; 5838 continue; 5839 } 5840 5841 err = rt6_fill_node(dump->net, dump->skb, w->rt, 5842 &rt6_ex->rt6i->dst, NULL, NULL, 0, 5843 RTM_NEWROUTE, 5844 NETLINK_CB(dump->cb->skb).portid, 5845 dump->cb->nlh->nlmsg_seq, w->flags); 5846 if (err) 5847 return err; 5848 5849 w->count++; 5850 } 5851 bucket++; 5852 } 5853 5854 return 0; 5855 } 5856 5857 /* Return -1 if done with node, number of handled routes on partial dump */ 5858 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip) 5859 { 5860 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; 5861 struct fib_dump_filter *filter = &arg->filter; 5862 unsigned int flags = NLM_F_MULTI; 5863 struct net *net = arg->net; 5864 int count = 0; 5865 5866 if (rt == net->ipv6.fib6_null_entry) 5867 return -1; 5868 5869 if ((filter->flags & RTM_F_PREFIX) && 5870 !(rt->fib6_flags & RTF_PREFIX_RT)) { 5871 /* success since this is not a prefix route */ 5872 return -1; 5873 } 5874 if (filter->filter_set && 5875 ((filter->rt_type && rt->fib6_type != filter->rt_type) || 5876 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) || 5877 (filter->protocol && rt->fib6_protocol != filter->protocol))) { 5878 return -1; 5879 } 5880 5881 if (filter->filter_set || 5882 !filter->dump_routes || !filter->dump_exceptions) { 5883 flags |= NLM_F_DUMP_FILTERED; 5884 } 5885 5886 if (filter->dump_routes) { 5887 if (skip) { 5888 skip--; 5889 } else { 5890 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 5891 0, RTM_NEWROUTE, 5892 NETLINK_CB(arg->cb->skb).portid, 5893 arg->cb->nlh->nlmsg_seq, flags)) { 5894 return 0; 5895 } 5896 count++; 5897 } 5898 } 5899 5900 if (filter->dump_exceptions) { 5901 struct fib6_nh_exception_dump_walker w = { .dump = arg, 5902 .rt = rt, 5903 .flags = flags, 5904 .skip = skip, 5905 .count = 0 }; 5906 int err; 5907 5908 rcu_read_lock(); 5909 if (rt->nh) { 5910 err = nexthop_for_each_fib6_nh(rt->nh, 5911 rt6_nh_dump_exceptions, 5912 &w); 5913 } else { 5914 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w); 5915 } 5916 rcu_read_unlock(); 5917 5918 if (err) 5919 return count += w.count; 5920 } 5921 5922 return -1; 5923 } 5924 5925 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb, 5926 const struct nlmsghdr *nlh, 5927 struct nlattr **tb, 5928 struct netlink_ext_ack *extack) 5929 { 5930 struct rtmsg *rtm; 5931 int i, err; 5932 5933 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) { 5934 NL_SET_ERR_MSG_MOD(extack, 5935 "Invalid header for get route request"); 5936 return -EINVAL; 5937 } 5938 5939 if (!netlink_strict_get_check(skb)) 5940 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, 5941 rtm_ipv6_policy, extack); 5942 5943 rtm = nlmsg_data(nlh); 5944 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) || 5945 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) || 5946 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope || 5947 rtm->rtm_type) { 5948 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request"); 5949 return -EINVAL; 5950 } 5951 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) { 5952 NL_SET_ERR_MSG_MOD(extack, 5953 "Invalid flags for get route request"); 5954 return -EINVAL; 5955 } 5956 5957 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX, 5958 rtm_ipv6_policy, extack); 5959 if (err) 5960 return err; 5961 5962 if ((tb[RTA_SRC] && !rtm->rtm_src_len) || 5963 (tb[RTA_DST] && !rtm->rtm_dst_len)) { 5964 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6"); 5965 return -EINVAL; 5966 } 5967 5968 for (i = 0; i <= RTA_MAX; i++) { 5969 if (!tb[i]) 5970 continue; 5971 5972 switch (i) { 5973 case RTA_SRC: 5974 case RTA_DST: 5975 case RTA_IIF: 5976 case RTA_OIF: 5977 case RTA_MARK: 5978 case RTA_UID: 5979 case RTA_SPORT: 5980 case RTA_DPORT: 5981 case RTA_IP_PROTO: 5982 break; 5983 default: 5984 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request"); 5985 return -EINVAL; 5986 } 5987 } 5988 5989 return 0; 5990 } 5991 5992 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 5993 struct netlink_ext_ack *extack) 5994 { 5995 struct net *net = sock_net(in_skb->sk); 5996 struct nlattr *tb[RTA_MAX+1]; 5997 int err, iif = 0, oif = 0; 5998 struct fib6_info *from; 5999 struct dst_entry *dst; 6000 struct rt6_info *rt; 6001 struct sk_buff *skb; 6002 struct rtmsg *rtm; 6003 struct flowi6 fl6 = {}; 6004 bool fibmatch; 6005 6006 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack); 6007 if (err < 0) 6008 goto errout; 6009 6010 err = -EINVAL; 6011 rtm = nlmsg_data(nlh); 6012 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0); 6013 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH); 6014 6015 if (tb[RTA_SRC]) { 6016 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) 6017 goto errout; 6018 6019 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]); 6020 } 6021 6022 if (tb[RTA_DST]) { 6023 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) 6024 goto errout; 6025 6026 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]); 6027 } 6028 6029 if (tb[RTA_IIF]) 6030 iif = nla_get_u32(tb[RTA_IIF]); 6031 6032 if (tb[RTA_OIF]) 6033 oif = nla_get_u32(tb[RTA_OIF]); 6034 6035 if (tb[RTA_MARK]) 6036 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); 6037 6038 if (tb[RTA_UID]) 6039 fl6.flowi6_uid = make_kuid(current_user_ns(), 6040 nla_get_u32(tb[RTA_UID])); 6041 else 6042 fl6.flowi6_uid = iif ? INVALID_UID : current_uid(); 6043 6044 if (tb[RTA_SPORT]) 6045 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]); 6046 6047 if (tb[RTA_DPORT]) 6048 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]); 6049 6050 if (tb[RTA_IP_PROTO]) { 6051 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO], 6052 &fl6.flowi6_proto, AF_INET6, 6053 extack); 6054 if (err) 6055 goto errout; 6056 } 6057 6058 if (iif) { 6059 struct net_device *dev; 6060 int flags = 0; 6061 6062 rcu_read_lock(); 6063 6064 dev = dev_get_by_index_rcu(net, iif); 6065 if (!dev) { 6066 rcu_read_unlock(); 6067 err = -ENODEV; 6068 goto errout; 6069 } 6070 6071 fl6.flowi6_iif = iif; 6072 6073 if (!ipv6_addr_any(&fl6.saddr)) 6074 flags |= RT6_LOOKUP_F_HAS_SADDR; 6075 6076 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags); 6077 6078 rcu_read_unlock(); 6079 } else { 6080 fl6.flowi6_oif = oif; 6081 6082 dst = ip6_route_output(net, NULL, &fl6); 6083 } 6084 6085 6086 rt = container_of(dst, struct rt6_info, dst); 6087 if (rt->dst.error) { 6088 err = rt->dst.error; 6089 ip6_rt_put(rt); 6090 goto errout; 6091 } 6092 6093 if (rt == net->ipv6.ip6_null_entry) { 6094 err = rt->dst.error; 6095 ip6_rt_put(rt); 6096 goto errout; 6097 } 6098 6099 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 6100 if (!skb) { 6101 ip6_rt_put(rt); 6102 err = -ENOBUFS; 6103 goto errout; 6104 } 6105 6106 skb_dst_set(skb, &rt->dst); 6107 6108 rcu_read_lock(); 6109 from = rcu_dereference(rt->from); 6110 if (from) { 6111 if (fibmatch) 6112 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, 6113 iif, RTM_NEWROUTE, 6114 NETLINK_CB(in_skb).portid, 6115 nlh->nlmsg_seq, 0); 6116 else 6117 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr, 6118 &fl6.saddr, iif, RTM_NEWROUTE, 6119 NETLINK_CB(in_skb).portid, 6120 nlh->nlmsg_seq, 0); 6121 } else { 6122 err = -ENETUNREACH; 6123 } 6124 rcu_read_unlock(); 6125 6126 if (err < 0) { 6127 kfree_skb(skb); 6128 goto errout; 6129 } 6130 6131 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 6132 errout: 6133 return err; 6134 } 6135 6136 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, 6137 unsigned int nlm_flags) 6138 { 6139 struct sk_buff *skb; 6140 struct net *net = info->nl_net; 6141 u32 seq; 6142 int err; 6143 6144 err = -ENOBUFS; 6145 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 6146 6147 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 6148 if (!skb) 6149 goto errout; 6150 6151 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0, 6152 event, info->portid, seq, nlm_flags); 6153 if (err < 0) { 6154 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 6155 WARN_ON(err == -EMSGSIZE); 6156 kfree_skb(skb); 6157 goto errout; 6158 } 6159 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 6160 info->nlh, gfp_any()); 6161 return; 6162 errout: 6163 if (err < 0) 6164 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); 6165 } 6166 6167 void fib6_rt_update(struct net *net, struct fib6_info *rt, 6168 struct nl_info *info) 6169 { 6170 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 6171 struct sk_buff *skb; 6172 int err = -ENOBUFS; 6173 6174 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 6175 if (!skb) 6176 goto errout; 6177 6178 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0, 6179 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE); 6180 if (err < 0) { 6181 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 6182 WARN_ON(err == -EMSGSIZE); 6183 kfree_skb(skb); 6184 goto errout; 6185 } 6186 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 6187 info->nlh, gfp_any()); 6188 return; 6189 errout: 6190 if (err < 0) 6191 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); 6192 } 6193 6194 void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i, 6195 bool offload, bool trap, bool offload_failed) 6196 { 6197 struct sk_buff *skb; 6198 int err; 6199 6200 if (f6i->offload == offload && f6i->trap == trap && 6201 f6i->offload_failed == offload_failed) 6202 return; 6203 6204 f6i->offload = offload; 6205 f6i->trap = trap; 6206 6207 /* 2 means send notifications only if offload_failed was changed. */ 6208 if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 && 6209 f6i->offload_failed == offload_failed) 6210 return; 6211 6212 f6i->offload_failed = offload_failed; 6213 6214 if (!rcu_access_pointer(f6i->fib6_node)) 6215 /* The route was removed from the tree, do not send 6216 * notification. 6217 */ 6218 return; 6219 6220 if (!net->ipv6.sysctl.fib_notify_on_flag_change) 6221 return; 6222 6223 skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL); 6224 if (!skb) { 6225 err = -ENOBUFS; 6226 goto errout; 6227 } 6228 6229 err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0, 6230 0, 0); 6231 if (err < 0) { 6232 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 6233 WARN_ON(err == -EMSGSIZE); 6234 kfree_skb(skb); 6235 goto errout; 6236 } 6237 6238 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL); 6239 return; 6240 6241 errout: 6242 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); 6243 } 6244 EXPORT_SYMBOL(fib6_info_hw_flags_set); 6245 6246 static int ip6_route_dev_notify(struct notifier_block *this, 6247 unsigned long event, void *ptr) 6248 { 6249 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6250 struct net *net = dev_net(dev); 6251 6252 if (!(dev->flags & IFF_LOOPBACK)) 6253 return NOTIFY_OK; 6254 6255 if (event == NETDEV_REGISTER) { 6256 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev; 6257 net->ipv6.ip6_null_entry->dst.dev = dev; 6258 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); 6259 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6260 net->ipv6.ip6_prohibit_entry->dst.dev = dev; 6261 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); 6262 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 6263 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 6264 #endif 6265 } else if (event == NETDEV_UNREGISTER && 6266 dev->reg_state != NETREG_UNREGISTERED) { 6267 /* NETDEV_UNREGISTER could be fired for multiple times by 6268 * netdev_wait_allrefs(). Make sure we only call this once. 6269 */ 6270 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev); 6271 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6272 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev); 6273 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev); 6274 #endif 6275 } 6276 6277 return NOTIFY_OK; 6278 } 6279 6280 /* 6281 * /proc 6282 */ 6283 6284 #ifdef CONFIG_PROC_FS 6285 static int rt6_stats_seq_show(struct seq_file *seq, void *v) 6286 { 6287 struct net *net = (struct net *)seq->private; 6288 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", 6289 net->ipv6.rt6_stats->fib_nodes, 6290 net->ipv6.rt6_stats->fib_route_nodes, 6291 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc), 6292 net->ipv6.rt6_stats->fib_rt_entries, 6293 net->ipv6.rt6_stats->fib_rt_cache, 6294 dst_entries_get_slow(&net->ipv6.ip6_dst_ops), 6295 net->ipv6.rt6_stats->fib_discarded_routes); 6296 6297 return 0; 6298 } 6299 #endif /* CONFIG_PROC_FS */ 6300 6301 #ifdef CONFIG_SYSCTL 6302 6303 static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, 6304 void *buffer, size_t *lenp, loff_t *ppos) 6305 { 6306 struct net *net; 6307 int delay; 6308 int ret; 6309 if (!write) 6310 return -EINVAL; 6311 6312 net = (struct net *)ctl->extra1; 6313 delay = net->ipv6.sysctl.flush_delay; 6314 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 6315 if (ret) 6316 return ret; 6317 6318 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0); 6319 return 0; 6320 } 6321 6322 static struct ctl_table ipv6_route_table_template[] = { 6323 { 6324 .procname = "flush", 6325 .data = &init_net.ipv6.sysctl.flush_delay, 6326 .maxlen = sizeof(int), 6327 .mode = 0200, 6328 .proc_handler = ipv6_sysctl_rtcache_flush 6329 }, 6330 { 6331 .procname = "gc_thresh", 6332 .data = &ip6_dst_ops_template.gc_thresh, 6333 .maxlen = sizeof(int), 6334 .mode = 0644, 6335 .proc_handler = proc_dointvec, 6336 }, 6337 { 6338 .procname = "max_size", 6339 .data = &init_net.ipv6.sysctl.ip6_rt_max_size, 6340 .maxlen = sizeof(int), 6341 .mode = 0644, 6342 .proc_handler = proc_dointvec, 6343 }, 6344 { 6345 .procname = "gc_min_interval", 6346 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, 6347 .maxlen = sizeof(int), 6348 .mode = 0644, 6349 .proc_handler = proc_dointvec_jiffies, 6350 }, 6351 { 6352 .procname = "gc_timeout", 6353 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout, 6354 .maxlen = sizeof(int), 6355 .mode = 0644, 6356 .proc_handler = proc_dointvec_jiffies, 6357 }, 6358 { 6359 .procname = "gc_interval", 6360 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval, 6361 .maxlen = sizeof(int), 6362 .mode = 0644, 6363 .proc_handler = proc_dointvec_jiffies, 6364 }, 6365 { 6366 .procname = "gc_elasticity", 6367 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, 6368 .maxlen = sizeof(int), 6369 .mode = 0644, 6370 .proc_handler = proc_dointvec, 6371 }, 6372 { 6373 .procname = "mtu_expires", 6374 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires, 6375 .maxlen = sizeof(int), 6376 .mode = 0644, 6377 .proc_handler = proc_dointvec_jiffies, 6378 }, 6379 { 6380 .procname = "min_adv_mss", 6381 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, 6382 .maxlen = sizeof(int), 6383 .mode = 0644, 6384 .proc_handler = proc_dointvec, 6385 }, 6386 { 6387 .procname = "gc_min_interval_ms", 6388 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, 6389 .maxlen = sizeof(int), 6390 .mode = 0644, 6391 .proc_handler = proc_dointvec_ms_jiffies, 6392 }, 6393 { 6394 .procname = "skip_notify_on_dev_down", 6395 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down, 6396 .maxlen = sizeof(int), 6397 .mode = 0644, 6398 .proc_handler = proc_dointvec_minmax, 6399 .extra1 = SYSCTL_ZERO, 6400 .extra2 = SYSCTL_ONE, 6401 }, 6402 { } 6403 }; 6404 6405 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) 6406 { 6407 struct ctl_table *table; 6408 6409 table = kmemdup(ipv6_route_table_template, 6410 sizeof(ipv6_route_table_template), 6411 GFP_KERNEL); 6412 6413 if (table) { 6414 table[0].data = &net->ipv6.sysctl.flush_delay; 6415 table[0].extra1 = net; 6416 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 6417 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 6418 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 6419 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; 6420 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; 6421 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; 6422 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; 6423 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; 6424 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 6425 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down; 6426 6427 /* Don't export sysctls to unprivileged users */ 6428 if (net->user_ns != &init_user_ns) 6429 table[0].procname = NULL; 6430 } 6431 6432 return table; 6433 } 6434 #endif 6435 6436 static int __net_init ip6_route_net_init(struct net *net) 6437 { 6438 int ret = -ENOMEM; 6439 6440 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template, 6441 sizeof(net->ipv6.ip6_dst_ops)); 6442 6443 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0) 6444 goto out_ip6_dst_ops; 6445 6446 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true); 6447 if (!net->ipv6.fib6_null_entry) 6448 goto out_ip6_dst_entries; 6449 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template, 6450 sizeof(*net->ipv6.fib6_null_entry)); 6451 6452 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, 6453 sizeof(*net->ipv6.ip6_null_entry), 6454 GFP_KERNEL); 6455 if (!net->ipv6.ip6_null_entry) 6456 goto out_fib6_null_entry; 6457 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; 6458 dst_init_metrics(&net->ipv6.ip6_null_entry->dst, 6459 ip6_template_metrics, true); 6460 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached); 6461 6462 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6463 net->ipv6.fib6_has_custom_rules = false; 6464 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, 6465 sizeof(*net->ipv6.ip6_prohibit_entry), 6466 GFP_KERNEL); 6467 if (!net->ipv6.ip6_prohibit_entry) 6468 goto out_ip6_null_entry; 6469 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; 6470 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst, 6471 ip6_template_metrics, true); 6472 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached); 6473 6474 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, 6475 sizeof(*net->ipv6.ip6_blk_hole_entry), 6476 GFP_KERNEL); 6477 if (!net->ipv6.ip6_blk_hole_entry) 6478 goto out_ip6_prohibit_entry; 6479 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; 6480 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, 6481 ip6_template_metrics, true); 6482 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached); 6483 #ifdef CONFIG_IPV6_SUBTREES 6484 net->ipv6.fib6_routes_require_src = 0; 6485 #endif 6486 #endif 6487 6488 net->ipv6.sysctl.flush_delay = 0; 6489 net->ipv6.sysctl.ip6_rt_max_size = 4096; 6490 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2; 6491 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ; 6492 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ; 6493 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9; 6494 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; 6495 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; 6496 net->ipv6.sysctl.skip_notify_on_dev_down = 0; 6497 6498 net->ipv6.ip6_rt_gc_expire = 30*HZ; 6499 6500 ret = 0; 6501 out: 6502 return ret; 6503 6504 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6505 out_ip6_prohibit_entry: 6506 kfree(net->ipv6.ip6_prohibit_entry); 6507 out_ip6_null_entry: 6508 kfree(net->ipv6.ip6_null_entry); 6509 #endif 6510 out_fib6_null_entry: 6511 kfree(net->ipv6.fib6_null_entry); 6512 out_ip6_dst_entries: 6513 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 6514 out_ip6_dst_ops: 6515 goto out; 6516 } 6517 6518 static void __net_exit ip6_route_net_exit(struct net *net) 6519 { 6520 kfree(net->ipv6.fib6_null_entry); 6521 kfree(net->ipv6.ip6_null_entry); 6522 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6523 kfree(net->ipv6.ip6_prohibit_entry); 6524 kfree(net->ipv6.ip6_blk_hole_entry); 6525 #endif 6526 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 6527 } 6528 6529 static int __net_init ip6_route_net_init_late(struct net *net) 6530 { 6531 #ifdef CONFIG_PROC_FS 6532 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops, 6533 sizeof(struct ipv6_route_iter)); 6534 proc_create_net_single("rt6_stats", 0444, net->proc_net, 6535 rt6_stats_seq_show, NULL); 6536 #endif 6537 return 0; 6538 } 6539 6540 static void __net_exit ip6_route_net_exit_late(struct net *net) 6541 { 6542 #ifdef CONFIG_PROC_FS 6543 remove_proc_entry("ipv6_route", net->proc_net); 6544 remove_proc_entry("rt6_stats", net->proc_net); 6545 #endif 6546 } 6547 6548 static struct pernet_operations ip6_route_net_ops = { 6549 .init = ip6_route_net_init, 6550 .exit = ip6_route_net_exit, 6551 }; 6552 6553 static int __net_init ipv6_inetpeer_init(struct net *net) 6554 { 6555 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); 6556 6557 if (!bp) 6558 return -ENOMEM; 6559 inet_peer_base_init(bp); 6560 net->ipv6.peers = bp; 6561 return 0; 6562 } 6563 6564 static void __net_exit ipv6_inetpeer_exit(struct net *net) 6565 { 6566 struct inet_peer_base *bp = net->ipv6.peers; 6567 6568 net->ipv6.peers = NULL; 6569 inetpeer_invalidate_tree(bp); 6570 kfree(bp); 6571 } 6572 6573 static struct pernet_operations ipv6_inetpeer_ops = { 6574 .init = ipv6_inetpeer_init, 6575 .exit = ipv6_inetpeer_exit, 6576 }; 6577 6578 static struct pernet_operations ip6_route_net_late_ops = { 6579 .init = ip6_route_net_init_late, 6580 .exit = ip6_route_net_exit_late, 6581 }; 6582 6583 static struct notifier_block ip6_route_dev_notifier = { 6584 .notifier_call = ip6_route_dev_notify, 6585 .priority = ADDRCONF_NOTIFY_PRIORITY - 10, 6586 }; 6587 6588 void __init ip6_route_init_special_entries(void) 6589 { 6590 /* Registering of the loopback is done before this portion of code, 6591 * the loopback reference in rt6_info will not be taken, do it 6592 * manually for init_net */ 6593 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev; 6594 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; 6595 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 6596 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6597 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; 6598 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 6599 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; 6600 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 6601 #endif 6602 } 6603 6604 #if IS_BUILTIN(CONFIG_IPV6) 6605 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 6606 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt) 6607 6608 BTF_ID_LIST(btf_fib6_info_id) 6609 BTF_ID(struct, fib6_info) 6610 6611 static const struct bpf_iter_seq_info ipv6_route_seq_info = { 6612 .seq_ops = &ipv6_route_seq_ops, 6613 .init_seq_private = bpf_iter_init_seq_net, 6614 .fini_seq_private = bpf_iter_fini_seq_net, 6615 .seq_priv_size = sizeof(struct ipv6_route_iter), 6616 }; 6617 6618 static struct bpf_iter_reg ipv6_route_reg_info = { 6619 .target = "ipv6_route", 6620 .ctx_arg_info_size = 1, 6621 .ctx_arg_info = { 6622 { offsetof(struct bpf_iter__ipv6_route, rt), 6623 PTR_TO_BTF_ID_OR_NULL }, 6624 }, 6625 .seq_info = &ipv6_route_seq_info, 6626 }; 6627 6628 static int __init bpf_iter_register(void) 6629 { 6630 ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id; 6631 return bpf_iter_reg_target(&ipv6_route_reg_info); 6632 } 6633 6634 static void bpf_iter_unregister(void) 6635 { 6636 bpf_iter_unreg_target(&ipv6_route_reg_info); 6637 } 6638 #endif 6639 #endif 6640 6641 int __init ip6_route_init(void) 6642 { 6643 int ret; 6644 int cpu; 6645 6646 ret = -ENOMEM; 6647 ip6_dst_ops_template.kmem_cachep = 6648 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 6649 SLAB_HWCACHE_ALIGN, NULL); 6650 if (!ip6_dst_ops_template.kmem_cachep) 6651 goto out; 6652 6653 ret = dst_entries_init(&ip6_dst_blackhole_ops); 6654 if (ret) 6655 goto out_kmem_cache; 6656 6657 ret = register_pernet_subsys(&ipv6_inetpeer_ops); 6658 if (ret) 6659 goto out_dst_entries; 6660 6661 ret = register_pernet_subsys(&ip6_route_net_ops); 6662 if (ret) 6663 goto out_register_inetpeer; 6664 6665 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; 6666 6667 ret = fib6_init(); 6668 if (ret) 6669 goto out_register_subsys; 6670 6671 ret = xfrm6_init(); 6672 if (ret) 6673 goto out_fib6_init; 6674 6675 ret = fib6_rules_init(); 6676 if (ret) 6677 goto xfrm6_init; 6678 6679 ret = register_pernet_subsys(&ip6_route_net_late_ops); 6680 if (ret) 6681 goto fib6_rules_init; 6682 6683 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE, 6684 inet6_rtm_newroute, NULL, 0); 6685 if (ret < 0) 6686 goto out_register_late_subsys; 6687 6688 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE, 6689 inet6_rtm_delroute, NULL, 0); 6690 if (ret < 0) 6691 goto out_register_late_subsys; 6692 6693 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, 6694 inet6_rtm_getroute, NULL, 6695 RTNL_FLAG_DOIT_UNLOCKED); 6696 if (ret < 0) 6697 goto out_register_late_subsys; 6698 6699 ret = register_netdevice_notifier(&ip6_route_dev_notifier); 6700 if (ret) 6701 goto out_register_late_subsys; 6702 6703 #if IS_BUILTIN(CONFIG_IPV6) 6704 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 6705 ret = bpf_iter_register(); 6706 if (ret) 6707 goto out_register_late_subsys; 6708 #endif 6709 #endif 6710 6711 for_each_possible_cpu(cpu) { 6712 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); 6713 6714 INIT_LIST_HEAD(&ul->head); 6715 spin_lock_init(&ul->lock); 6716 } 6717 6718 out: 6719 return ret; 6720 6721 out_register_late_subsys: 6722 rtnl_unregister_all(PF_INET6); 6723 unregister_pernet_subsys(&ip6_route_net_late_ops); 6724 fib6_rules_init: 6725 fib6_rules_cleanup(); 6726 xfrm6_init: 6727 xfrm6_fini(); 6728 out_fib6_init: 6729 fib6_gc_cleanup(); 6730 out_register_subsys: 6731 unregister_pernet_subsys(&ip6_route_net_ops); 6732 out_register_inetpeer: 6733 unregister_pernet_subsys(&ipv6_inetpeer_ops); 6734 out_dst_entries: 6735 dst_entries_destroy(&ip6_dst_blackhole_ops); 6736 out_kmem_cache: 6737 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 6738 goto out; 6739 } 6740 6741 void ip6_route_cleanup(void) 6742 { 6743 #if IS_BUILTIN(CONFIG_IPV6) 6744 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 6745 bpf_iter_unregister(); 6746 #endif 6747 #endif 6748 unregister_netdevice_notifier(&ip6_route_dev_notifier); 6749 unregister_pernet_subsys(&ip6_route_net_late_ops); 6750 fib6_rules_cleanup(); 6751 xfrm6_fini(); 6752 fib6_gc_cleanup(); 6753 unregister_pernet_subsys(&ipv6_inetpeer_ops); 6754 unregister_pernet_subsys(&ip6_route_net_ops); 6755 dst_entries_destroy(&ip6_dst_blackhole_ops); 6756 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 6757 } 6758