1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux INET6 implementation 4 * FIB front-end. 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 */ 9 10 /* Changes: 11 * 12 * YOSHIFUJI Hideaki @USAGI 13 * reworked default router selection. 14 * - respect outgoing interface 15 * - select from (probably) reachable routers (i.e. 16 * routers in REACHABLE, STALE, DELAY or PROBE states). 17 * - always select the same router if it is (probably) 18 * reachable. otherwise, round-robin the list. 19 * Ville Nuorvala 20 * Fixed routing subtrees. 21 */ 22 23 #define pr_fmt(fmt) "IPv6: " fmt 24 25 #include <linux/capability.h> 26 #include <linux/errno.h> 27 #include <linux/export.h> 28 #include <linux/types.h> 29 #include <linux/times.h> 30 #include <linux/socket.h> 31 #include <linux/sockios.h> 32 #include <linux/net.h> 33 #include <linux/route.h> 34 #include <linux/netdevice.h> 35 #include <linux/in6.h> 36 #include <linux/mroute6.h> 37 #include <linux/init.h> 38 #include <linux/if_arp.h> 39 #include <linux/proc_fs.h> 40 #include <linux/seq_file.h> 41 #include <linux/nsproxy.h> 42 #include <linux/slab.h> 43 #include <linux/jhash.h> 44 #include <linux/siphash.h> 45 #include <net/net_namespace.h> 46 #include <net/snmp.h> 47 #include <net/ipv6.h> 48 #include <net/ip6_fib.h> 49 #include <net/ip6_route.h> 50 #include <net/ndisc.h> 51 #include <net/addrconf.h> 52 #include <net/tcp.h> 53 #include <linux/rtnetlink.h> 54 #include <net/dst.h> 55 #include <net/dst_metadata.h> 56 #include <net/xfrm.h> 57 #include <net/netevent.h> 58 #include <net/netlink.h> 59 #include <net/rtnh.h> 60 #include <net/lwtunnel.h> 61 #include <net/ip_tunnels.h> 62 #include <net/l3mdev.h> 63 #include <net/ip.h> 64 #include <linux/uaccess.h> 65 #include <linux/btf_ids.h> 66 67 #ifdef CONFIG_SYSCTL 68 #include <linux/sysctl.h> 69 #endif 70 71 static int ip6_rt_type_to_error(u8 fib6_type); 72 73 #define CREATE_TRACE_POINTS 74 #include <trace/events/fib6.h> 75 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup); 76 #undef CREATE_TRACE_POINTS 77 78 enum rt6_nud_state { 79 RT6_NUD_FAIL_HARD = -3, 80 RT6_NUD_FAIL_PROBE = -2, 81 RT6_NUD_FAIL_DO_RR = -1, 82 RT6_NUD_SUCCEED = 1 83 }; 84 85 INDIRECT_CALLABLE_SCOPE 86 struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 87 static unsigned int ip6_default_advmss(const struct dst_entry *dst); 88 INDIRECT_CALLABLE_SCOPE 89 unsigned int ip6_mtu(const struct dst_entry *dst); 90 static void ip6_negative_advice(struct sock *sk, 91 struct dst_entry *dst); 92 static void ip6_dst_destroy(struct dst_entry *); 93 static void ip6_dst_ifdown(struct dst_entry *, 94 struct net_device *dev); 95 static void ip6_dst_gc(struct dst_ops *ops); 96 97 static int ip6_pkt_discard(struct sk_buff *skb); 98 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); 99 static int ip6_pkt_prohibit(struct sk_buff *skb); 100 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); 101 static void ip6_link_failure(struct sk_buff *skb); 102 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 103 struct sk_buff *skb, u32 mtu, 104 bool confirm_neigh); 105 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, 106 struct sk_buff *skb); 107 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif, 108 int strict); 109 static size_t rt6_nlmsg_size(struct fib6_info *f6i); 110 static int rt6_fill_node(struct net *net, struct sk_buff *skb, 111 struct fib6_info *rt, struct dst_entry *dst, 112 struct in6_addr *dest, struct in6_addr *src, 113 int iif, int type, u32 portid, u32 seq, 114 unsigned int flags); 115 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res, 116 const struct in6_addr *daddr, 117 const struct in6_addr *saddr); 118 119 #ifdef CONFIG_IPV6_ROUTE_INFO 120 static struct fib6_info *rt6_add_route_info(struct net *net, 121 const struct in6_addr *prefix, int prefixlen, 122 const struct in6_addr *gwaddr, 123 struct net_device *dev, 124 unsigned int pref); 125 static struct fib6_info *rt6_get_route_info(struct net *net, 126 const struct in6_addr *prefix, int prefixlen, 127 const struct in6_addr *gwaddr, 128 struct net_device *dev); 129 #endif 130 131 struct uncached_list { 132 spinlock_t lock; 133 struct list_head head; 134 struct list_head quarantine; 135 }; 136 137 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); 138 139 void rt6_uncached_list_add(struct rt6_info *rt) 140 { 141 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); 142 143 rt->dst.rt_uncached_list = ul; 144 145 spin_lock_bh(&ul->lock); 146 list_add_tail(&rt->dst.rt_uncached, &ul->head); 147 spin_unlock_bh(&ul->lock); 148 } 149 150 void rt6_uncached_list_del(struct rt6_info *rt) 151 { 152 if (!list_empty(&rt->dst.rt_uncached)) { 153 struct uncached_list *ul = rt->dst.rt_uncached_list; 154 155 spin_lock_bh(&ul->lock); 156 list_del_init(&rt->dst.rt_uncached); 157 spin_unlock_bh(&ul->lock); 158 } 159 } 160 161 static void rt6_uncached_list_flush_dev(struct net_device *dev) 162 { 163 int cpu; 164 165 for_each_possible_cpu(cpu) { 166 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); 167 struct rt6_info *rt, *safe; 168 169 if (list_empty(&ul->head)) 170 continue; 171 172 spin_lock_bh(&ul->lock); 173 list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) { 174 struct inet6_dev *rt_idev = rt->rt6i_idev; 175 struct net_device *rt_dev = rt->dst.dev; 176 bool handled = false; 177 178 if (rt_idev->dev == dev) { 179 rt->rt6i_idev = in6_dev_get(blackhole_netdev); 180 in6_dev_put(rt_idev); 181 handled = true; 182 } 183 184 if (rt_dev == dev) { 185 rt->dst.dev = blackhole_netdev; 186 netdev_ref_replace(rt_dev, blackhole_netdev, 187 &rt->dst.dev_tracker, 188 GFP_ATOMIC); 189 handled = true; 190 } 191 if (handled) 192 list_move(&rt->dst.rt_uncached, 193 &ul->quarantine); 194 } 195 spin_unlock_bh(&ul->lock); 196 } 197 } 198 199 static inline const void *choose_neigh_daddr(const struct in6_addr *p, 200 struct sk_buff *skb, 201 const void *daddr) 202 { 203 if (!ipv6_addr_any(p)) 204 return (const void *) p; 205 else if (skb) 206 return &ipv6_hdr(skb)->daddr; 207 return daddr; 208 } 209 210 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw, 211 struct net_device *dev, 212 struct sk_buff *skb, 213 const void *daddr) 214 { 215 struct neighbour *n; 216 217 daddr = choose_neigh_daddr(gw, skb, daddr); 218 n = __ipv6_neigh_lookup(dev, daddr); 219 if (n) 220 return n; 221 222 n = neigh_create(&nd_tbl, daddr, dev); 223 return IS_ERR(n) ? NULL : n; 224 } 225 226 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst, 227 struct sk_buff *skb, 228 const void *daddr) 229 { 230 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst); 231 232 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any), 233 dst->dev, skb, daddr); 234 } 235 236 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) 237 { 238 struct net_device *dev = dst->dev; 239 struct rt6_info *rt = (struct rt6_info *)dst; 240 241 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr); 242 if (!daddr) 243 return; 244 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) 245 return; 246 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr)) 247 return; 248 __ipv6_confirm_neigh(dev, daddr); 249 } 250 251 static struct dst_ops ip6_dst_ops_template = { 252 .family = AF_INET6, 253 .gc = ip6_dst_gc, 254 .gc_thresh = 1024, 255 .check = ip6_dst_check, 256 .default_advmss = ip6_default_advmss, 257 .mtu = ip6_mtu, 258 .cow_metrics = dst_cow_metrics_generic, 259 .destroy = ip6_dst_destroy, 260 .ifdown = ip6_dst_ifdown, 261 .negative_advice = ip6_negative_advice, 262 .link_failure = ip6_link_failure, 263 .update_pmtu = ip6_rt_update_pmtu, 264 .redirect = rt6_do_redirect, 265 .local_out = __ip6_local_out, 266 .neigh_lookup = ip6_dst_neigh_lookup, 267 .confirm_neigh = ip6_confirm_neigh, 268 }; 269 270 static struct dst_ops ip6_dst_blackhole_ops = { 271 .family = AF_INET6, 272 .default_advmss = ip6_default_advmss, 273 .neigh_lookup = ip6_dst_neigh_lookup, 274 .check = ip6_dst_check, 275 .destroy = ip6_dst_destroy, 276 .cow_metrics = dst_cow_metrics_generic, 277 .update_pmtu = dst_blackhole_update_pmtu, 278 .redirect = dst_blackhole_redirect, 279 .mtu = dst_blackhole_mtu, 280 }; 281 282 static const u32 ip6_template_metrics[RTAX_MAX] = { 283 [RTAX_HOPLIMIT - 1] = 0, 284 }; 285 286 static const struct fib6_info fib6_null_entry_template = { 287 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP), 288 .fib6_protocol = RTPROT_KERNEL, 289 .fib6_metric = ~(u32)0, 290 .fib6_ref = REFCOUNT_INIT(1), 291 .fib6_type = RTN_UNREACHABLE, 292 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics, 293 }; 294 295 static const struct rt6_info ip6_null_entry_template = { 296 .dst = { 297 .__rcuref = RCUREF_INIT(1), 298 .__use = 1, 299 .obsolete = DST_OBSOLETE_FORCE_CHK, 300 .error = -ENETUNREACH, 301 .input = ip6_pkt_discard, 302 .output = ip6_pkt_discard_out, 303 }, 304 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 305 }; 306 307 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 308 309 static const struct rt6_info ip6_prohibit_entry_template = { 310 .dst = { 311 .__rcuref = RCUREF_INIT(1), 312 .__use = 1, 313 .obsolete = DST_OBSOLETE_FORCE_CHK, 314 .error = -EACCES, 315 .input = ip6_pkt_prohibit, 316 .output = ip6_pkt_prohibit_out, 317 }, 318 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 319 }; 320 321 static const struct rt6_info ip6_blk_hole_entry_template = { 322 .dst = { 323 .__rcuref = RCUREF_INIT(1), 324 .__use = 1, 325 .obsolete = DST_OBSOLETE_FORCE_CHK, 326 .error = -EINVAL, 327 .input = dst_discard, 328 .output = dst_discard_out, 329 }, 330 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 331 }; 332 333 #endif 334 335 static void rt6_info_init(struct rt6_info *rt) 336 { 337 memset_after(rt, 0, dst); 338 } 339 340 /* allocate dst with ip6_dst_ops */ 341 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, 342 int flags) 343 { 344 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 345 1, DST_OBSOLETE_FORCE_CHK, flags); 346 347 if (rt) { 348 rt6_info_init(rt); 349 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc); 350 } 351 352 return rt; 353 } 354 EXPORT_SYMBOL(ip6_dst_alloc); 355 356 static void ip6_dst_destroy(struct dst_entry *dst) 357 { 358 struct rt6_info *rt = (struct rt6_info *)dst; 359 struct fib6_info *from; 360 struct inet6_dev *idev; 361 362 ip_dst_metrics_put(dst); 363 rt6_uncached_list_del(rt); 364 365 idev = rt->rt6i_idev; 366 if (idev) { 367 rt->rt6i_idev = NULL; 368 in6_dev_put(idev); 369 } 370 371 from = xchg((__force struct fib6_info **)&rt->from, NULL); 372 fib6_info_release(from); 373 } 374 375 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 376 { 377 struct rt6_info *rt = (struct rt6_info *)dst; 378 struct inet6_dev *idev = rt->rt6i_idev; 379 380 if (idev && idev->dev != blackhole_netdev) { 381 struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev); 382 383 if (blackhole_idev) { 384 rt->rt6i_idev = blackhole_idev; 385 in6_dev_put(idev); 386 } 387 } 388 } 389 390 static bool __rt6_check_expired(const struct rt6_info *rt) 391 { 392 if (rt->rt6i_flags & RTF_EXPIRES) 393 return time_after(jiffies, rt->dst.expires); 394 else 395 return false; 396 } 397 398 static bool rt6_check_expired(const struct rt6_info *rt) 399 { 400 struct fib6_info *from; 401 402 from = rcu_dereference(rt->from); 403 404 if (rt->rt6i_flags & RTF_EXPIRES) { 405 if (time_after(jiffies, rt->dst.expires)) 406 return true; 407 } else if (from) { 408 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK || 409 fib6_check_expired(from); 410 } 411 return false; 412 } 413 414 void fib6_select_path(const struct net *net, struct fib6_result *res, 415 struct flowi6 *fl6, int oif, bool have_oif_match, 416 const struct sk_buff *skb, int strict) 417 { 418 struct fib6_info *sibling, *next_sibling; 419 struct fib6_info *match = res->f6i; 420 421 if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) 422 goto out; 423 424 if (match->nh && have_oif_match && res->nh) 425 return; 426 427 if (skb) 428 IP6CB(skb)->flags |= IP6SKB_MULTIPATH; 429 430 /* We might have already computed the hash for ICMPv6 errors. In such 431 * case it will always be non-zero. Otherwise now is the time to do it. 432 */ 433 if (!fl6->mp_hash && 434 (!match->nh || nexthop_is_multipath(match->nh))) 435 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL); 436 437 if (unlikely(match->nh)) { 438 nexthop_path_fib6_result(res, fl6->mp_hash); 439 return; 440 } 441 442 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound)) 443 goto out; 444 445 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings, 446 fib6_siblings) { 447 const struct fib6_nh *nh = sibling->fib6_nh; 448 int nh_upper_bound; 449 450 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound); 451 if (fl6->mp_hash > nh_upper_bound) 452 continue; 453 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0) 454 break; 455 match = sibling; 456 break; 457 } 458 459 out: 460 res->f6i = match; 461 res->nh = match->fib6_nh; 462 } 463 464 /* 465 * Route lookup. rcu_read_lock() should be held. 466 */ 467 468 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh, 469 const struct in6_addr *saddr, int oif, int flags) 470 { 471 const struct net_device *dev; 472 473 if (nh->fib_nh_flags & RTNH_F_DEAD) 474 return false; 475 476 dev = nh->fib_nh_dev; 477 if (oif) { 478 if (dev->ifindex == oif) 479 return true; 480 } else { 481 if (ipv6_chk_addr(net, saddr, dev, 482 flags & RT6_LOOKUP_F_IFACE)) 483 return true; 484 } 485 486 return false; 487 } 488 489 struct fib6_nh_dm_arg { 490 struct net *net; 491 const struct in6_addr *saddr; 492 int oif; 493 int flags; 494 struct fib6_nh *nh; 495 }; 496 497 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg) 498 { 499 struct fib6_nh_dm_arg *arg = _arg; 500 501 arg->nh = nh; 502 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif, 503 arg->flags); 504 } 505 506 /* returns fib6_nh from nexthop or NULL */ 507 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh, 508 struct fib6_result *res, 509 const struct in6_addr *saddr, 510 int oif, int flags) 511 { 512 struct fib6_nh_dm_arg arg = { 513 .net = net, 514 .saddr = saddr, 515 .oif = oif, 516 .flags = flags, 517 }; 518 519 if (nexthop_is_blackhole(nh)) 520 return NULL; 521 522 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg)) 523 return arg.nh; 524 525 return NULL; 526 } 527 528 static void rt6_device_match(struct net *net, struct fib6_result *res, 529 const struct in6_addr *saddr, int oif, int flags) 530 { 531 struct fib6_info *f6i = res->f6i; 532 struct fib6_info *spf6i; 533 struct fib6_nh *nh; 534 535 if (!oif && ipv6_addr_any(saddr)) { 536 if (unlikely(f6i->nh)) { 537 nh = nexthop_fib6_nh(f6i->nh); 538 if (nexthop_is_blackhole(f6i->nh)) 539 goto out_blackhole; 540 } else { 541 nh = f6i->fib6_nh; 542 } 543 if (!(nh->fib_nh_flags & RTNH_F_DEAD)) 544 goto out; 545 } 546 547 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) { 548 bool matched = false; 549 550 if (unlikely(spf6i->nh)) { 551 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr, 552 oif, flags); 553 if (nh) 554 matched = true; 555 } else { 556 nh = spf6i->fib6_nh; 557 if (__rt6_device_match(net, nh, saddr, oif, flags)) 558 matched = true; 559 } 560 if (matched) { 561 res->f6i = spf6i; 562 goto out; 563 } 564 } 565 566 if (oif && flags & RT6_LOOKUP_F_IFACE) { 567 res->f6i = net->ipv6.fib6_null_entry; 568 nh = res->f6i->fib6_nh; 569 goto out; 570 } 571 572 if (unlikely(f6i->nh)) { 573 nh = nexthop_fib6_nh(f6i->nh); 574 if (nexthop_is_blackhole(f6i->nh)) 575 goto out_blackhole; 576 } else { 577 nh = f6i->fib6_nh; 578 } 579 580 if (nh->fib_nh_flags & RTNH_F_DEAD) { 581 res->f6i = net->ipv6.fib6_null_entry; 582 nh = res->f6i->fib6_nh; 583 } 584 out: 585 res->nh = nh; 586 res->fib6_type = res->f6i->fib6_type; 587 res->fib6_flags = res->f6i->fib6_flags; 588 return; 589 590 out_blackhole: 591 res->fib6_flags |= RTF_REJECT; 592 res->fib6_type = RTN_BLACKHOLE; 593 res->nh = nh; 594 } 595 596 #ifdef CONFIG_IPV6_ROUTER_PREF 597 struct __rt6_probe_work { 598 struct work_struct work; 599 struct in6_addr target; 600 struct net_device *dev; 601 netdevice_tracker dev_tracker; 602 }; 603 604 static void rt6_probe_deferred(struct work_struct *w) 605 { 606 struct in6_addr mcaddr; 607 struct __rt6_probe_work *work = 608 container_of(w, struct __rt6_probe_work, work); 609 610 addrconf_addr_solict_mult(&work->target, &mcaddr); 611 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); 612 netdev_put(work->dev, &work->dev_tracker); 613 kfree(work); 614 } 615 616 static void rt6_probe(struct fib6_nh *fib6_nh) 617 { 618 struct __rt6_probe_work *work = NULL; 619 const struct in6_addr *nh_gw; 620 unsigned long last_probe; 621 struct neighbour *neigh; 622 struct net_device *dev; 623 struct inet6_dev *idev; 624 625 /* 626 * Okay, this does not seem to be appropriate 627 * for now, however, we need to check if it 628 * is really so; aka Router Reachability Probing. 629 * 630 * Router Reachability Probe MUST be rate-limited 631 * to no more than one per minute. 632 */ 633 if (!fib6_nh->fib_nh_gw_family) 634 return; 635 636 nh_gw = &fib6_nh->fib_nh_gw6; 637 dev = fib6_nh->fib_nh_dev; 638 rcu_read_lock(); 639 last_probe = READ_ONCE(fib6_nh->last_probe); 640 idev = __in6_dev_get(dev); 641 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); 642 if (neigh) { 643 if (READ_ONCE(neigh->nud_state) & NUD_VALID) 644 goto out; 645 646 write_lock_bh(&neigh->lock); 647 if (!(neigh->nud_state & NUD_VALID) && 648 time_after(jiffies, 649 neigh->updated + idev->cnf.rtr_probe_interval)) { 650 work = kmalloc(sizeof(*work), GFP_ATOMIC); 651 if (work) 652 __neigh_set_probe_once(neigh); 653 } 654 write_unlock_bh(&neigh->lock); 655 } else if (time_after(jiffies, last_probe + 656 idev->cnf.rtr_probe_interval)) { 657 work = kmalloc(sizeof(*work), GFP_ATOMIC); 658 } 659 660 if (!work || cmpxchg(&fib6_nh->last_probe, 661 last_probe, jiffies) != last_probe) { 662 kfree(work); 663 } else { 664 INIT_WORK(&work->work, rt6_probe_deferred); 665 work->target = *nh_gw; 666 netdev_hold(dev, &work->dev_tracker, GFP_ATOMIC); 667 work->dev = dev; 668 schedule_work(&work->work); 669 } 670 671 out: 672 rcu_read_unlock(); 673 } 674 #else 675 static inline void rt6_probe(struct fib6_nh *fib6_nh) 676 { 677 } 678 #endif 679 680 /* 681 * Default Router Selection (RFC 2461 6.3.6) 682 */ 683 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh) 684 { 685 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; 686 struct neighbour *neigh; 687 688 rcu_read_lock(); 689 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev, 690 &fib6_nh->fib_nh_gw6); 691 if (neigh) { 692 u8 nud_state = READ_ONCE(neigh->nud_state); 693 694 if (nud_state & NUD_VALID) 695 ret = RT6_NUD_SUCCEED; 696 #ifdef CONFIG_IPV6_ROUTER_PREF 697 else if (!(nud_state & NUD_FAILED)) 698 ret = RT6_NUD_SUCCEED; 699 else 700 ret = RT6_NUD_FAIL_PROBE; 701 #endif 702 } else { 703 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ? 704 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR; 705 } 706 rcu_read_unlock(); 707 708 return ret; 709 } 710 711 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif, 712 int strict) 713 { 714 int m = 0; 715 716 if (!oif || nh->fib_nh_dev->ifindex == oif) 717 m = 2; 718 719 if (!m && (strict & RT6_LOOKUP_F_IFACE)) 720 return RT6_NUD_FAIL_HARD; 721 #ifdef CONFIG_IPV6_ROUTER_PREF 722 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2; 723 #endif 724 if ((strict & RT6_LOOKUP_F_REACHABLE) && 725 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) { 726 int n = rt6_check_neigh(nh); 727 if (n < 0) 728 return n; 729 } 730 return m; 731 } 732 733 static bool find_match(struct fib6_nh *nh, u32 fib6_flags, 734 int oif, int strict, int *mpri, bool *do_rr) 735 { 736 bool match_do_rr = false; 737 bool rc = false; 738 int m; 739 740 if (nh->fib_nh_flags & RTNH_F_DEAD) 741 goto out; 742 743 if (ip6_ignore_linkdown(nh->fib_nh_dev) && 744 nh->fib_nh_flags & RTNH_F_LINKDOWN && 745 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) 746 goto out; 747 748 m = rt6_score_route(nh, fib6_flags, oif, strict); 749 if (m == RT6_NUD_FAIL_DO_RR) { 750 match_do_rr = true; 751 m = 0; /* lowest valid score */ 752 } else if (m == RT6_NUD_FAIL_HARD) { 753 goto out; 754 } 755 756 if (strict & RT6_LOOKUP_F_REACHABLE) 757 rt6_probe(nh); 758 759 /* note that m can be RT6_NUD_FAIL_PROBE at this point */ 760 if (m > *mpri) { 761 *do_rr = match_do_rr; 762 *mpri = m; 763 rc = true; 764 } 765 out: 766 return rc; 767 } 768 769 struct fib6_nh_frl_arg { 770 u32 flags; 771 int oif; 772 int strict; 773 int *mpri; 774 bool *do_rr; 775 struct fib6_nh *nh; 776 }; 777 778 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg) 779 { 780 struct fib6_nh_frl_arg *arg = _arg; 781 782 arg->nh = nh; 783 return find_match(nh, arg->flags, arg->oif, arg->strict, 784 arg->mpri, arg->do_rr); 785 } 786 787 static void __find_rr_leaf(struct fib6_info *f6i_start, 788 struct fib6_info *nomatch, u32 metric, 789 struct fib6_result *res, struct fib6_info **cont, 790 int oif, int strict, bool *do_rr, int *mpri) 791 { 792 struct fib6_info *f6i; 793 794 for (f6i = f6i_start; 795 f6i && f6i != nomatch; 796 f6i = rcu_dereference(f6i->fib6_next)) { 797 bool matched = false; 798 struct fib6_nh *nh; 799 800 if (cont && f6i->fib6_metric != metric) { 801 *cont = f6i; 802 return; 803 } 804 805 if (fib6_check_expired(f6i)) 806 continue; 807 808 if (unlikely(f6i->nh)) { 809 struct fib6_nh_frl_arg arg = { 810 .flags = f6i->fib6_flags, 811 .oif = oif, 812 .strict = strict, 813 .mpri = mpri, 814 .do_rr = do_rr 815 }; 816 817 if (nexthop_is_blackhole(f6i->nh)) { 818 res->fib6_flags = RTF_REJECT; 819 res->fib6_type = RTN_BLACKHOLE; 820 res->f6i = f6i; 821 res->nh = nexthop_fib6_nh(f6i->nh); 822 return; 823 } 824 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match, 825 &arg)) { 826 matched = true; 827 nh = arg.nh; 828 } 829 } else { 830 nh = f6i->fib6_nh; 831 if (find_match(nh, f6i->fib6_flags, oif, strict, 832 mpri, do_rr)) 833 matched = true; 834 } 835 if (matched) { 836 res->f6i = f6i; 837 res->nh = nh; 838 res->fib6_flags = f6i->fib6_flags; 839 res->fib6_type = f6i->fib6_type; 840 } 841 } 842 } 843 844 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf, 845 struct fib6_info *rr_head, int oif, int strict, 846 bool *do_rr, struct fib6_result *res) 847 { 848 u32 metric = rr_head->fib6_metric; 849 struct fib6_info *cont = NULL; 850 int mpri = -1; 851 852 __find_rr_leaf(rr_head, NULL, metric, res, &cont, 853 oif, strict, do_rr, &mpri); 854 855 __find_rr_leaf(leaf, rr_head, metric, res, &cont, 856 oif, strict, do_rr, &mpri); 857 858 if (res->f6i || !cont) 859 return; 860 861 __find_rr_leaf(cont, NULL, metric, res, NULL, 862 oif, strict, do_rr, &mpri); 863 } 864 865 static void rt6_select(struct net *net, struct fib6_node *fn, int oif, 866 struct fib6_result *res, int strict) 867 { 868 struct fib6_info *leaf = rcu_dereference(fn->leaf); 869 struct fib6_info *rt0; 870 bool do_rr = false; 871 int key_plen; 872 873 /* make sure this function or its helpers sets f6i */ 874 res->f6i = NULL; 875 876 if (!leaf || leaf == net->ipv6.fib6_null_entry) 877 goto out; 878 879 rt0 = rcu_dereference(fn->rr_ptr); 880 if (!rt0) 881 rt0 = leaf; 882 883 /* Double check to make sure fn is not an intermediate node 884 * and fn->leaf does not points to its child's leaf 885 * (This might happen if all routes under fn are deleted from 886 * the tree and fib6_repair_tree() is called on the node.) 887 */ 888 key_plen = rt0->fib6_dst.plen; 889 #ifdef CONFIG_IPV6_SUBTREES 890 if (rt0->fib6_src.plen) 891 key_plen = rt0->fib6_src.plen; 892 #endif 893 if (fn->fn_bit != key_plen) 894 goto out; 895 896 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res); 897 if (do_rr) { 898 struct fib6_info *next = rcu_dereference(rt0->fib6_next); 899 900 /* no entries matched; do round-robin */ 901 if (!next || next->fib6_metric != rt0->fib6_metric) 902 next = leaf; 903 904 if (next != rt0) { 905 spin_lock_bh(&leaf->fib6_table->tb6_lock); 906 /* make sure next is not being deleted from the tree */ 907 if (next->fib6_node) 908 rcu_assign_pointer(fn->rr_ptr, next); 909 spin_unlock_bh(&leaf->fib6_table->tb6_lock); 910 } 911 } 912 913 out: 914 if (!res->f6i) { 915 res->f6i = net->ipv6.fib6_null_entry; 916 res->nh = res->f6i->fib6_nh; 917 res->fib6_flags = res->f6i->fib6_flags; 918 res->fib6_type = res->f6i->fib6_type; 919 } 920 } 921 922 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res) 923 { 924 return (res->f6i->fib6_flags & RTF_NONEXTHOP) || 925 res->nh->fib_nh_gw_family; 926 } 927 928 #ifdef CONFIG_IPV6_ROUTE_INFO 929 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 930 const struct in6_addr *gwaddr) 931 { 932 struct net *net = dev_net(dev); 933 struct route_info *rinfo = (struct route_info *) opt; 934 struct in6_addr prefix_buf, *prefix; 935 unsigned int pref; 936 unsigned long lifetime; 937 struct fib6_info *rt; 938 939 if (len < sizeof(struct route_info)) { 940 return -EINVAL; 941 } 942 943 /* Sanity check for prefix_len and length */ 944 if (rinfo->length > 3) { 945 return -EINVAL; 946 } else if (rinfo->prefix_len > 128) { 947 return -EINVAL; 948 } else if (rinfo->prefix_len > 64) { 949 if (rinfo->length < 2) { 950 return -EINVAL; 951 } 952 } else if (rinfo->prefix_len > 0) { 953 if (rinfo->length < 1) { 954 return -EINVAL; 955 } 956 } 957 958 pref = rinfo->route_pref; 959 if (pref == ICMPV6_ROUTER_PREF_INVALID) 960 return -EINVAL; 961 962 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ); 963 964 if (rinfo->length == 3) 965 prefix = (struct in6_addr *)rinfo->prefix; 966 else { 967 /* this function is safe */ 968 ipv6_addr_prefix(&prefix_buf, 969 (struct in6_addr *)rinfo->prefix, 970 rinfo->prefix_len); 971 prefix = &prefix_buf; 972 } 973 974 if (rinfo->prefix_len == 0) 975 rt = rt6_get_dflt_router(net, gwaddr, dev); 976 else 977 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, 978 gwaddr, dev); 979 980 if (rt && !lifetime) { 981 ip6_del_rt(net, rt, false); 982 rt = NULL; 983 } 984 985 if (!rt && lifetime) 986 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, 987 dev, pref); 988 else if (rt) 989 rt->fib6_flags = RTF_ROUTEINFO | 990 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 991 992 if (rt) { 993 if (!addrconf_finite_timeout(lifetime)) 994 fib6_clean_expires(rt); 995 else 996 fib6_set_expires(rt, jiffies + HZ * lifetime); 997 998 fib6_info_release(rt); 999 } 1000 return 0; 1001 } 1002 #endif 1003 1004 /* 1005 * Misc support functions 1006 */ 1007 1008 /* called with rcu_lock held */ 1009 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res) 1010 { 1011 struct net_device *dev = res->nh->fib_nh_dev; 1012 1013 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) { 1014 /* for copies of local routes, dst->dev needs to be the 1015 * device if it is a master device, the master device if 1016 * device is enslaved, and the loopback as the default 1017 */ 1018 if (netif_is_l3_slave(dev) && 1019 !rt6_need_strict(&res->f6i->fib6_dst.addr)) 1020 dev = l3mdev_master_dev_rcu(dev); 1021 else if (!netif_is_l3_master(dev)) 1022 dev = dev_net(dev)->loopback_dev; 1023 /* last case is netif_is_l3_master(dev) is true in which 1024 * case we want dev returned to be dev 1025 */ 1026 } 1027 1028 return dev; 1029 } 1030 1031 static const int fib6_prop[RTN_MAX + 1] = { 1032 [RTN_UNSPEC] = 0, 1033 [RTN_UNICAST] = 0, 1034 [RTN_LOCAL] = 0, 1035 [RTN_BROADCAST] = 0, 1036 [RTN_ANYCAST] = 0, 1037 [RTN_MULTICAST] = 0, 1038 [RTN_BLACKHOLE] = -EINVAL, 1039 [RTN_UNREACHABLE] = -EHOSTUNREACH, 1040 [RTN_PROHIBIT] = -EACCES, 1041 [RTN_THROW] = -EAGAIN, 1042 [RTN_NAT] = -EINVAL, 1043 [RTN_XRESOLVE] = -EINVAL, 1044 }; 1045 1046 static int ip6_rt_type_to_error(u8 fib6_type) 1047 { 1048 return fib6_prop[fib6_type]; 1049 } 1050 1051 static unsigned short fib6_info_dst_flags(struct fib6_info *rt) 1052 { 1053 unsigned short flags = 0; 1054 1055 if (rt->dst_nocount) 1056 flags |= DST_NOCOUNT; 1057 if (rt->dst_nopolicy) 1058 flags |= DST_NOPOLICY; 1059 1060 return flags; 1061 } 1062 1063 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type) 1064 { 1065 rt->dst.error = ip6_rt_type_to_error(fib6_type); 1066 1067 switch (fib6_type) { 1068 case RTN_BLACKHOLE: 1069 rt->dst.output = dst_discard_out; 1070 rt->dst.input = dst_discard; 1071 break; 1072 case RTN_PROHIBIT: 1073 rt->dst.output = ip6_pkt_prohibit_out; 1074 rt->dst.input = ip6_pkt_prohibit; 1075 break; 1076 case RTN_THROW: 1077 case RTN_UNREACHABLE: 1078 default: 1079 rt->dst.output = ip6_pkt_discard_out; 1080 rt->dst.input = ip6_pkt_discard; 1081 break; 1082 } 1083 } 1084 1085 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res) 1086 { 1087 struct fib6_info *f6i = res->f6i; 1088 1089 if (res->fib6_flags & RTF_REJECT) { 1090 ip6_rt_init_dst_reject(rt, res->fib6_type); 1091 return; 1092 } 1093 1094 rt->dst.error = 0; 1095 rt->dst.output = ip6_output; 1096 1097 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) { 1098 rt->dst.input = ip6_input; 1099 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { 1100 rt->dst.input = ip6_mc_input; 1101 } else { 1102 rt->dst.input = ip6_forward; 1103 } 1104 1105 if (res->nh->fib_nh_lws) { 1106 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws); 1107 lwtunnel_set_redirect(&rt->dst); 1108 } 1109 1110 rt->dst.lastuse = jiffies; 1111 } 1112 1113 /* Caller must already hold reference to @from */ 1114 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) 1115 { 1116 rt->rt6i_flags &= ~RTF_EXPIRES; 1117 rcu_assign_pointer(rt->from, from); 1118 ip_dst_init_metrics(&rt->dst, from->fib6_metrics); 1119 } 1120 1121 /* Caller must already hold reference to f6i in result */ 1122 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res) 1123 { 1124 const struct fib6_nh *nh = res->nh; 1125 const struct net_device *dev = nh->fib_nh_dev; 1126 struct fib6_info *f6i = res->f6i; 1127 1128 ip6_rt_init_dst(rt, res); 1129 1130 rt->rt6i_dst = f6i->fib6_dst; 1131 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL; 1132 rt->rt6i_flags = res->fib6_flags; 1133 if (nh->fib_nh_gw_family) { 1134 rt->rt6i_gateway = nh->fib_nh_gw6; 1135 rt->rt6i_flags |= RTF_GATEWAY; 1136 } 1137 rt6_set_from(rt, f6i); 1138 #ifdef CONFIG_IPV6_SUBTREES 1139 rt->rt6i_src = f6i->fib6_src; 1140 #endif 1141 } 1142 1143 static struct fib6_node* fib6_backtrack(struct fib6_node *fn, 1144 struct in6_addr *saddr) 1145 { 1146 struct fib6_node *pn, *sn; 1147 while (1) { 1148 if (fn->fn_flags & RTN_TL_ROOT) 1149 return NULL; 1150 pn = rcu_dereference(fn->parent); 1151 sn = FIB6_SUBTREE(pn); 1152 if (sn && sn != fn) 1153 fn = fib6_node_lookup(sn, NULL, saddr); 1154 else 1155 fn = pn; 1156 if (fn->fn_flags & RTN_RTINFO) 1157 return fn; 1158 } 1159 } 1160 1161 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt) 1162 { 1163 struct rt6_info *rt = *prt; 1164 1165 if (dst_hold_safe(&rt->dst)) 1166 return true; 1167 if (net) { 1168 rt = net->ipv6.ip6_null_entry; 1169 dst_hold(&rt->dst); 1170 } else { 1171 rt = NULL; 1172 } 1173 *prt = rt; 1174 return false; 1175 } 1176 1177 /* called with rcu_lock held */ 1178 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res) 1179 { 1180 struct net_device *dev = res->nh->fib_nh_dev; 1181 struct fib6_info *f6i = res->f6i; 1182 unsigned short flags; 1183 struct rt6_info *nrt; 1184 1185 if (!fib6_info_hold_safe(f6i)) 1186 goto fallback; 1187 1188 flags = fib6_info_dst_flags(f6i); 1189 nrt = ip6_dst_alloc(dev_net(dev), dev, flags); 1190 if (!nrt) { 1191 fib6_info_release(f6i); 1192 goto fallback; 1193 } 1194 1195 ip6_rt_copy_init(nrt, res); 1196 return nrt; 1197 1198 fallback: 1199 nrt = dev_net(dev)->ipv6.ip6_null_entry; 1200 dst_hold(&nrt->dst); 1201 return nrt; 1202 } 1203 1204 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net, 1205 struct fib6_table *table, 1206 struct flowi6 *fl6, 1207 const struct sk_buff *skb, 1208 int flags) 1209 { 1210 struct fib6_result res = {}; 1211 struct fib6_node *fn; 1212 struct rt6_info *rt; 1213 1214 rcu_read_lock(); 1215 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 1216 restart: 1217 res.f6i = rcu_dereference(fn->leaf); 1218 if (!res.f6i) 1219 res.f6i = net->ipv6.fib6_null_entry; 1220 else 1221 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif, 1222 flags); 1223 1224 if (res.f6i == net->ipv6.fib6_null_entry) { 1225 fn = fib6_backtrack(fn, &fl6->saddr); 1226 if (fn) 1227 goto restart; 1228 1229 rt = net->ipv6.ip6_null_entry; 1230 dst_hold(&rt->dst); 1231 goto out; 1232 } else if (res.fib6_flags & RTF_REJECT) { 1233 goto do_create; 1234 } 1235 1236 fib6_select_path(net, &res, fl6, fl6->flowi6_oif, 1237 fl6->flowi6_oif != 0, skb, flags); 1238 1239 /* Search through exception table */ 1240 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr); 1241 if (rt) { 1242 if (ip6_hold_safe(net, &rt)) 1243 dst_use_noref(&rt->dst, jiffies); 1244 } else { 1245 do_create: 1246 rt = ip6_create_rt_rcu(&res); 1247 } 1248 1249 out: 1250 trace_fib6_table_lookup(net, &res, table, fl6); 1251 1252 rcu_read_unlock(); 1253 1254 return rt; 1255 } 1256 1257 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, 1258 const struct sk_buff *skb, int flags) 1259 { 1260 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup); 1261 } 1262 EXPORT_SYMBOL_GPL(ip6_route_lookup); 1263 1264 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, 1265 const struct in6_addr *saddr, int oif, 1266 const struct sk_buff *skb, int strict) 1267 { 1268 struct flowi6 fl6 = { 1269 .flowi6_oif = oif, 1270 .daddr = *daddr, 1271 }; 1272 struct dst_entry *dst; 1273 int flags = strict ? RT6_LOOKUP_F_IFACE : 0; 1274 1275 if (saddr) { 1276 memcpy(&fl6.saddr, saddr, sizeof(*saddr)); 1277 flags |= RT6_LOOKUP_F_HAS_SADDR; 1278 } 1279 1280 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup); 1281 if (dst->error == 0) 1282 return (struct rt6_info *) dst; 1283 1284 dst_release(dst); 1285 1286 return NULL; 1287 } 1288 EXPORT_SYMBOL(rt6_lookup); 1289 1290 /* ip6_ins_rt is called with FREE table->tb6_lock. 1291 * It takes new route entry, the addition fails by any reason the 1292 * route is released. 1293 * Caller must hold dst before calling it. 1294 */ 1295 1296 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info, 1297 struct netlink_ext_ack *extack) 1298 { 1299 int err; 1300 struct fib6_table *table; 1301 1302 table = rt->fib6_table; 1303 spin_lock_bh(&table->tb6_lock); 1304 err = fib6_add(&table->tb6_root, rt, info, extack); 1305 spin_unlock_bh(&table->tb6_lock); 1306 1307 return err; 1308 } 1309 1310 int ip6_ins_rt(struct net *net, struct fib6_info *rt) 1311 { 1312 struct nl_info info = { .nl_net = net, }; 1313 1314 return __ip6_ins_rt(rt, &info, NULL); 1315 } 1316 1317 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res, 1318 const struct in6_addr *daddr, 1319 const struct in6_addr *saddr) 1320 { 1321 struct fib6_info *f6i = res->f6i; 1322 struct net_device *dev; 1323 struct rt6_info *rt; 1324 1325 /* 1326 * Clone the route. 1327 */ 1328 1329 if (!fib6_info_hold_safe(f6i)) 1330 return NULL; 1331 1332 dev = ip6_rt_get_dev_rcu(res); 1333 rt = ip6_dst_alloc(dev_net(dev), dev, 0); 1334 if (!rt) { 1335 fib6_info_release(f6i); 1336 return NULL; 1337 } 1338 1339 ip6_rt_copy_init(rt, res); 1340 rt->rt6i_flags |= RTF_CACHE; 1341 rt->rt6i_dst.addr = *daddr; 1342 rt->rt6i_dst.plen = 128; 1343 1344 if (!rt6_is_gw_or_nonexthop(res)) { 1345 if (f6i->fib6_dst.plen != 128 && 1346 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr)) 1347 rt->rt6i_flags |= RTF_ANYCAST; 1348 #ifdef CONFIG_IPV6_SUBTREES 1349 if (rt->rt6i_src.plen && saddr) { 1350 rt->rt6i_src.addr = *saddr; 1351 rt->rt6i_src.plen = 128; 1352 } 1353 #endif 1354 } 1355 1356 return rt; 1357 } 1358 1359 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res) 1360 { 1361 struct fib6_info *f6i = res->f6i; 1362 unsigned short flags = fib6_info_dst_flags(f6i); 1363 struct net_device *dev; 1364 struct rt6_info *pcpu_rt; 1365 1366 if (!fib6_info_hold_safe(f6i)) 1367 return NULL; 1368 1369 rcu_read_lock(); 1370 dev = ip6_rt_get_dev_rcu(res); 1371 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT); 1372 rcu_read_unlock(); 1373 if (!pcpu_rt) { 1374 fib6_info_release(f6i); 1375 return NULL; 1376 } 1377 ip6_rt_copy_init(pcpu_rt, res); 1378 pcpu_rt->rt6i_flags |= RTF_PCPU; 1379 1380 if (f6i->nh) 1381 pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev)); 1382 1383 return pcpu_rt; 1384 } 1385 1386 static bool rt6_is_valid(const struct rt6_info *rt6) 1387 { 1388 return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev)); 1389 } 1390 1391 /* It should be called with rcu_read_lock() acquired */ 1392 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) 1393 { 1394 struct rt6_info *pcpu_rt; 1395 1396 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu); 1397 1398 if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) { 1399 struct rt6_info *prev, **p; 1400 1401 p = this_cpu_ptr(res->nh->rt6i_pcpu); 1402 /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */ 1403 prev = xchg(p, NULL); 1404 if (prev) { 1405 dst_dev_put(&prev->dst); 1406 dst_release(&prev->dst); 1407 } 1408 1409 pcpu_rt = NULL; 1410 } 1411 1412 return pcpu_rt; 1413 } 1414 1415 static struct rt6_info *rt6_make_pcpu_route(struct net *net, 1416 const struct fib6_result *res) 1417 { 1418 struct rt6_info *pcpu_rt, *prev, **p; 1419 1420 pcpu_rt = ip6_rt_pcpu_alloc(res); 1421 if (!pcpu_rt) 1422 return NULL; 1423 1424 p = this_cpu_ptr(res->nh->rt6i_pcpu); 1425 prev = cmpxchg(p, NULL, pcpu_rt); 1426 BUG_ON(prev); 1427 1428 if (res->f6i->fib6_destroying) { 1429 struct fib6_info *from; 1430 1431 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); 1432 fib6_info_release(from); 1433 } 1434 1435 return pcpu_rt; 1436 } 1437 1438 /* exception hash table implementation 1439 */ 1440 static DEFINE_SPINLOCK(rt6_exception_lock); 1441 1442 /* Remove rt6_ex from hash table and free the memory 1443 * Caller must hold rt6_exception_lock 1444 */ 1445 static void rt6_remove_exception(struct rt6_exception_bucket *bucket, 1446 struct rt6_exception *rt6_ex) 1447 { 1448 struct fib6_info *from; 1449 struct net *net; 1450 1451 if (!bucket || !rt6_ex) 1452 return; 1453 1454 net = dev_net(rt6_ex->rt6i->dst.dev); 1455 net->ipv6.rt6_stats->fib_rt_cache--; 1456 1457 /* purge completely the exception to allow releasing the held resources: 1458 * some [sk] cache may keep the dst around for unlimited time 1459 */ 1460 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL); 1461 fib6_info_release(from); 1462 dst_dev_put(&rt6_ex->rt6i->dst); 1463 1464 hlist_del_rcu(&rt6_ex->hlist); 1465 dst_release(&rt6_ex->rt6i->dst); 1466 kfree_rcu(rt6_ex, rcu); 1467 WARN_ON_ONCE(!bucket->depth); 1468 bucket->depth--; 1469 } 1470 1471 /* Remove oldest rt6_ex in bucket and free the memory 1472 * Caller must hold rt6_exception_lock 1473 */ 1474 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) 1475 { 1476 struct rt6_exception *rt6_ex, *oldest = NULL; 1477 1478 if (!bucket) 1479 return; 1480 1481 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 1482 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp)) 1483 oldest = rt6_ex; 1484 } 1485 rt6_remove_exception(bucket, oldest); 1486 } 1487 1488 static u32 rt6_exception_hash(const struct in6_addr *dst, 1489 const struct in6_addr *src) 1490 { 1491 static siphash_aligned_key_t rt6_exception_key; 1492 struct { 1493 struct in6_addr dst; 1494 struct in6_addr src; 1495 } __aligned(SIPHASH_ALIGNMENT) combined = { 1496 .dst = *dst, 1497 }; 1498 u64 val; 1499 1500 net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key)); 1501 1502 #ifdef CONFIG_IPV6_SUBTREES 1503 if (src) 1504 combined.src = *src; 1505 #endif 1506 val = siphash(&combined, sizeof(combined), &rt6_exception_key); 1507 1508 return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); 1509 } 1510 1511 /* Helper function to find the cached rt in the hash table 1512 * and update bucket pointer to point to the bucket for this 1513 * (daddr, saddr) pair 1514 * Caller must hold rt6_exception_lock 1515 */ 1516 static struct rt6_exception * 1517 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket, 1518 const struct in6_addr *daddr, 1519 const struct in6_addr *saddr) 1520 { 1521 struct rt6_exception *rt6_ex; 1522 u32 hval; 1523 1524 if (!(*bucket) || !daddr) 1525 return NULL; 1526 1527 hval = rt6_exception_hash(daddr, saddr); 1528 *bucket += hval; 1529 1530 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) { 1531 struct rt6_info *rt6 = rt6_ex->rt6i; 1532 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr); 1533 1534 #ifdef CONFIG_IPV6_SUBTREES 1535 if (matched && saddr) 1536 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr); 1537 #endif 1538 if (matched) 1539 return rt6_ex; 1540 } 1541 return NULL; 1542 } 1543 1544 /* Helper function to find the cached rt in the hash table 1545 * and update bucket pointer to point to the bucket for this 1546 * (daddr, saddr) pair 1547 * Caller must hold rcu_read_lock() 1548 */ 1549 static struct rt6_exception * 1550 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket, 1551 const struct in6_addr *daddr, 1552 const struct in6_addr *saddr) 1553 { 1554 struct rt6_exception *rt6_ex; 1555 u32 hval; 1556 1557 WARN_ON_ONCE(!rcu_read_lock_held()); 1558 1559 if (!(*bucket) || !daddr) 1560 return NULL; 1561 1562 hval = rt6_exception_hash(daddr, saddr); 1563 *bucket += hval; 1564 1565 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) { 1566 struct rt6_info *rt6 = rt6_ex->rt6i; 1567 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr); 1568 1569 #ifdef CONFIG_IPV6_SUBTREES 1570 if (matched && saddr) 1571 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr); 1572 #endif 1573 if (matched) 1574 return rt6_ex; 1575 } 1576 return NULL; 1577 } 1578 1579 static unsigned int fib6_mtu(const struct fib6_result *res) 1580 { 1581 const struct fib6_nh *nh = res->nh; 1582 unsigned int mtu; 1583 1584 if (res->f6i->fib6_pmtu) { 1585 mtu = res->f6i->fib6_pmtu; 1586 } else { 1587 struct net_device *dev = nh->fib_nh_dev; 1588 struct inet6_dev *idev; 1589 1590 rcu_read_lock(); 1591 idev = __in6_dev_get(dev); 1592 mtu = idev->cnf.mtu6; 1593 rcu_read_unlock(); 1594 } 1595 1596 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); 1597 1598 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu); 1599 } 1600 1601 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL 1602 1603 /* used when the flushed bit is not relevant, only access to the bucket 1604 * (ie., all bucket users except rt6_insert_exception); 1605 * 1606 * called under rcu lock; sometimes called with rt6_exception_lock held 1607 */ 1608 static 1609 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh, 1610 spinlock_t *lock) 1611 { 1612 struct rt6_exception_bucket *bucket; 1613 1614 if (lock) 1615 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket, 1616 lockdep_is_held(lock)); 1617 else 1618 bucket = rcu_dereference(nh->rt6i_exception_bucket); 1619 1620 /* remove bucket flushed bit if set */ 1621 if (bucket) { 1622 unsigned long p = (unsigned long)bucket; 1623 1624 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED; 1625 bucket = (struct rt6_exception_bucket *)p; 1626 } 1627 1628 return bucket; 1629 } 1630 1631 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket) 1632 { 1633 unsigned long p = (unsigned long)bucket; 1634 1635 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED); 1636 } 1637 1638 /* called with rt6_exception_lock held */ 1639 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh, 1640 spinlock_t *lock) 1641 { 1642 struct rt6_exception_bucket *bucket; 1643 unsigned long p; 1644 1645 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket, 1646 lockdep_is_held(lock)); 1647 1648 p = (unsigned long)bucket; 1649 p |= FIB6_EXCEPTION_BUCKET_FLUSHED; 1650 bucket = (struct rt6_exception_bucket *)p; 1651 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket); 1652 } 1653 1654 static int rt6_insert_exception(struct rt6_info *nrt, 1655 const struct fib6_result *res) 1656 { 1657 struct net *net = dev_net(nrt->dst.dev); 1658 struct rt6_exception_bucket *bucket; 1659 struct fib6_info *f6i = res->f6i; 1660 struct in6_addr *src_key = NULL; 1661 struct rt6_exception *rt6_ex; 1662 struct fib6_nh *nh = res->nh; 1663 int max_depth; 1664 int err = 0; 1665 1666 spin_lock_bh(&rt6_exception_lock); 1667 1668 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket, 1669 lockdep_is_held(&rt6_exception_lock)); 1670 if (!bucket) { 1671 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket), 1672 GFP_ATOMIC); 1673 if (!bucket) { 1674 err = -ENOMEM; 1675 goto out; 1676 } 1677 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket); 1678 } else if (fib6_nh_excptn_bucket_flushed(bucket)) { 1679 err = -EINVAL; 1680 goto out; 1681 } 1682 1683 #ifdef CONFIG_IPV6_SUBTREES 1684 /* fib6_src.plen != 0 indicates f6i is in subtree 1685 * and exception table is indexed by a hash of 1686 * both fib6_dst and fib6_src. 1687 * Otherwise, the exception table is indexed by 1688 * a hash of only fib6_dst. 1689 */ 1690 if (f6i->fib6_src.plen) 1691 src_key = &nrt->rt6i_src.addr; 1692 #endif 1693 /* rt6_mtu_change() might lower mtu on f6i. 1694 * Only insert this exception route if its mtu 1695 * is less than f6i's mtu value. 1696 */ 1697 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) { 1698 err = -EINVAL; 1699 goto out; 1700 } 1701 1702 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr, 1703 src_key); 1704 if (rt6_ex) 1705 rt6_remove_exception(bucket, rt6_ex); 1706 1707 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC); 1708 if (!rt6_ex) { 1709 err = -ENOMEM; 1710 goto out; 1711 } 1712 rt6_ex->rt6i = nrt; 1713 rt6_ex->stamp = jiffies; 1714 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain); 1715 bucket->depth++; 1716 net->ipv6.rt6_stats->fib_rt_cache++; 1717 1718 /* Randomize max depth to avoid some side channels attacks. */ 1719 max_depth = FIB6_MAX_DEPTH + get_random_u32_below(FIB6_MAX_DEPTH); 1720 while (bucket->depth > max_depth) 1721 rt6_exception_remove_oldest(bucket); 1722 1723 out: 1724 spin_unlock_bh(&rt6_exception_lock); 1725 1726 /* Update fn->fn_sernum to invalidate all cached dst */ 1727 if (!err) { 1728 spin_lock_bh(&f6i->fib6_table->tb6_lock); 1729 fib6_update_sernum(net, f6i); 1730 spin_unlock_bh(&f6i->fib6_table->tb6_lock); 1731 fib6_force_start_gc(net); 1732 } 1733 1734 return err; 1735 } 1736 1737 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from) 1738 { 1739 struct rt6_exception_bucket *bucket; 1740 struct rt6_exception *rt6_ex; 1741 struct hlist_node *tmp; 1742 int i; 1743 1744 spin_lock_bh(&rt6_exception_lock); 1745 1746 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 1747 if (!bucket) 1748 goto out; 1749 1750 /* Prevent rt6_insert_exception() to recreate the bucket list */ 1751 if (!from) 1752 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock); 1753 1754 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 1755 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) { 1756 if (!from || 1757 rcu_access_pointer(rt6_ex->rt6i->from) == from) 1758 rt6_remove_exception(bucket, rt6_ex); 1759 } 1760 WARN_ON_ONCE(!from && bucket->depth); 1761 bucket++; 1762 } 1763 out: 1764 spin_unlock_bh(&rt6_exception_lock); 1765 } 1766 1767 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg) 1768 { 1769 struct fib6_info *f6i = arg; 1770 1771 fib6_nh_flush_exceptions(nh, f6i); 1772 1773 return 0; 1774 } 1775 1776 void rt6_flush_exceptions(struct fib6_info *f6i) 1777 { 1778 if (f6i->nh) 1779 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions, 1780 f6i); 1781 else 1782 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i); 1783 } 1784 1785 /* Find cached rt in the hash table inside passed in rt 1786 * Caller has to hold rcu_read_lock() 1787 */ 1788 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res, 1789 const struct in6_addr *daddr, 1790 const struct in6_addr *saddr) 1791 { 1792 const struct in6_addr *src_key = NULL; 1793 struct rt6_exception_bucket *bucket; 1794 struct rt6_exception *rt6_ex; 1795 struct rt6_info *ret = NULL; 1796 1797 #ifdef CONFIG_IPV6_SUBTREES 1798 /* fib6i_src.plen != 0 indicates f6i is in subtree 1799 * and exception table is indexed by a hash of 1800 * both fib6_dst and fib6_src. 1801 * However, the src addr used to create the hash 1802 * might not be exactly the passed in saddr which 1803 * is a /128 addr from the flow. 1804 * So we need to use f6i->fib6_src to redo lookup 1805 * if the passed in saddr does not find anything. 1806 * (See the logic in ip6_rt_cache_alloc() on how 1807 * rt->rt6i_src is updated.) 1808 */ 1809 if (res->f6i->fib6_src.plen) 1810 src_key = saddr; 1811 find_ex: 1812 #endif 1813 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL); 1814 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); 1815 1816 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i)) 1817 ret = rt6_ex->rt6i; 1818 1819 #ifdef CONFIG_IPV6_SUBTREES 1820 /* Use fib6_src as src_key and redo lookup */ 1821 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) { 1822 src_key = &res->f6i->fib6_src.addr; 1823 goto find_ex; 1824 } 1825 #endif 1826 1827 return ret; 1828 } 1829 1830 /* Remove the passed in cached rt from the hash table that contains it */ 1831 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen, 1832 const struct rt6_info *rt) 1833 { 1834 const struct in6_addr *src_key = NULL; 1835 struct rt6_exception_bucket *bucket; 1836 struct rt6_exception *rt6_ex; 1837 int err; 1838 1839 if (!rcu_access_pointer(nh->rt6i_exception_bucket)) 1840 return -ENOENT; 1841 1842 spin_lock_bh(&rt6_exception_lock); 1843 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 1844 1845 #ifdef CONFIG_IPV6_SUBTREES 1846 /* rt6i_src.plen != 0 indicates 'from' is in subtree 1847 * and exception table is indexed by a hash of 1848 * both rt6i_dst and rt6i_src. 1849 * Otherwise, the exception table is indexed by 1850 * a hash of only rt6i_dst. 1851 */ 1852 if (plen) 1853 src_key = &rt->rt6i_src.addr; 1854 #endif 1855 rt6_ex = __rt6_find_exception_spinlock(&bucket, 1856 &rt->rt6i_dst.addr, 1857 src_key); 1858 if (rt6_ex) { 1859 rt6_remove_exception(bucket, rt6_ex); 1860 err = 0; 1861 } else { 1862 err = -ENOENT; 1863 } 1864 1865 spin_unlock_bh(&rt6_exception_lock); 1866 return err; 1867 } 1868 1869 struct fib6_nh_excptn_arg { 1870 struct rt6_info *rt; 1871 int plen; 1872 }; 1873 1874 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg) 1875 { 1876 struct fib6_nh_excptn_arg *arg = _arg; 1877 int err; 1878 1879 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt); 1880 if (err == 0) 1881 return 1; 1882 1883 return 0; 1884 } 1885 1886 static int rt6_remove_exception_rt(struct rt6_info *rt) 1887 { 1888 struct fib6_info *from; 1889 1890 from = rcu_dereference(rt->from); 1891 if (!from || !(rt->rt6i_flags & RTF_CACHE)) 1892 return -EINVAL; 1893 1894 if (from->nh) { 1895 struct fib6_nh_excptn_arg arg = { 1896 .rt = rt, 1897 .plen = from->fib6_src.plen 1898 }; 1899 int rc; 1900 1901 /* rc = 1 means an entry was found */ 1902 rc = nexthop_for_each_fib6_nh(from->nh, 1903 rt6_nh_remove_exception_rt, 1904 &arg); 1905 return rc ? 0 : -ENOENT; 1906 } 1907 1908 return fib6_nh_remove_exception(from->fib6_nh, 1909 from->fib6_src.plen, rt); 1910 } 1911 1912 /* Find rt6_ex which contains the passed in rt cache and 1913 * refresh its stamp 1914 */ 1915 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen, 1916 const struct rt6_info *rt) 1917 { 1918 const struct in6_addr *src_key = NULL; 1919 struct rt6_exception_bucket *bucket; 1920 struct rt6_exception *rt6_ex; 1921 1922 bucket = fib6_nh_get_excptn_bucket(nh, NULL); 1923 #ifdef CONFIG_IPV6_SUBTREES 1924 /* rt6i_src.plen != 0 indicates 'from' is in subtree 1925 * and exception table is indexed by a hash of 1926 * both rt6i_dst and rt6i_src. 1927 * Otherwise, the exception table is indexed by 1928 * a hash of only rt6i_dst. 1929 */ 1930 if (plen) 1931 src_key = &rt->rt6i_src.addr; 1932 #endif 1933 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key); 1934 if (rt6_ex) 1935 rt6_ex->stamp = jiffies; 1936 } 1937 1938 struct fib6_nh_match_arg { 1939 const struct net_device *dev; 1940 const struct in6_addr *gw; 1941 struct fib6_nh *match; 1942 }; 1943 1944 /* determine if fib6_nh has given device and gateway */ 1945 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg) 1946 { 1947 struct fib6_nh_match_arg *arg = _arg; 1948 1949 if (arg->dev != nh->fib_nh_dev || 1950 (arg->gw && !nh->fib_nh_gw_family) || 1951 (!arg->gw && nh->fib_nh_gw_family) || 1952 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6))) 1953 return 0; 1954 1955 arg->match = nh; 1956 1957 /* found a match, break the loop */ 1958 return 1; 1959 } 1960 1961 static void rt6_update_exception_stamp_rt(struct rt6_info *rt) 1962 { 1963 struct fib6_info *from; 1964 struct fib6_nh *fib6_nh; 1965 1966 rcu_read_lock(); 1967 1968 from = rcu_dereference(rt->from); 1969 if (!from || !(rt->rt6i_flags & RTF_CACHE)) 1970 goto unlock; 1971 1972 if (from->nh) { 1973 struct fib6_nh_match_arg arg = { 1974 .dev = rt->dst.dev, 1975 .gw = &rt->rt6i_gateway, 1976 }; 1977 1978 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg); 1979 1980 if (!arg.match) 1981 goto unlock; 1982 fib6_nh = arg.match; 1983 } else { 1984 fib6_nh = from->fib6_nh; 1985 } 1986 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt); 1987 unlock: 1988 rcu_read_unlock(); 1989 } 1990 1991 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, 1992 struct rt6_info *rt, int mtu) 1993 { 1994 /* If the new MTU is lower than the route PMTU, this new MTU will be the 1995 * lowest MTU in the path: always allow updating the route PMTU to 1996 * reflect PMTU decreases. 1997 * 1998 * If the new MTU is higher, and the route PMTU is equal to the local 1999 * MTU, this means the old MTU is the lowest in the path, so allow 2000 * updating it: if other nodes now have lower MTUs, PMTU discovery will 2001 * handle this. 2002 */ 2003 2004 if (dst_mtu(&rt->dst) >= mtu) 2005 return true; 2006 2007 if (dst_mtu(&rt->dst) == idev->cnf.mtu6) 2008 return true; 2009 2010 return false; 2011 } 2012 2013 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev, 2014 const struct fib6_nh *nh, int mtu) 2015 { 2016 struct rt6_exception_bucket *bucket; 2017 struct rt6_exception *rt6_ex; 2018 int i; 2019 2020 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 2021 if (!bucket) 2022 return; 2023 2024 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 2025 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 2026 struct rt6_info *entry = rt6_ex->rt6i; 2027 2028 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected 2029 * route), the metrics of its rt->from have already 2030 * been updated. 2031 */ 2032 if (dst_metric_raw(&entry->dst, RTAX_MTU) && 2033 rt6_mtu_change_route_allowed(idev, entry, mtu)) 2034 dst_metric_set(&entry->dst, RTAX_MTU, mtu); 2035 } 2036 bucket++; 2037 } 2038 } 2039 2040 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) 2041 2042 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh, 2043 const struct in6_addr *gateway) 2044 { 2045 struct rt6_exception_bucket *bucket; 2046 struct rt6_exception *rt6_ex; 2047 struct hlist_node *tmp; 2048 int i; 2049 2050 if (!rcu_access_pointer(nh->rt6i_exception_bucket)) 2051 return; 2052 2053 spin_lock_bh(&rt6_exception_lock); 2054 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 2055 if (bucket) { 2056 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 2057 hlist_for_each_entry_safe(rt6_ex, tmp, 2058 &bucket->chain, hlist) { 2059 struct rt6_info *entry = rt6_ex->rt6i; 2060 2061 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) == 2062 RTF_CACHE_GATEWAY && 2063 ipv6_addr_equal(gateway, 2064 &entry->rt6i_gateway)) { 2065 rt6_remove_exception(bucket, rt6_ex); 2066 } 2067 } 2068 bucket++; 2069 } 2070 } 2071 2072 spin_unlock_bh(&rt6_exception_lock); 2073 } 2074 2075 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket, 2076 struct rt6_exception *rt6_ex, 2077 struct fib6_gc_args *gc_args, 2078 unsigned long now) 2079 { 2080 struct rt6_info *rt = rt6_ex->rt6i; 2081 2082 /* we are pruning and obsoleting aged-out and non gateway exceptions 2083 * even if others have still references to them, so that on next 2084 * dst_check() such references can be dropped. 2085 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when 2086 * expired, independently from their aging, as per RFC 8201 section 4 2087 */ 2088 if (!(rt->rt6i_flags & RTF_EXPIRES)) { 2089 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) { 2090 RT6_TRACE("aging clone %p\n", rt); 2091 rt6_remove_exception(bucket, rt6_ex); 2092 return; 2093 } 2094 } else if (time_after(jiffies, rt->dst.expires)) { 2095 RT6_TRACE("purging expired route %p\n", rt); 2096 rt6_remove_exception(bucket, rt6_ex); 2097 return; 2098 } 2099 2100 if (rt->rt6i_flags & RTF_GATEWAY) { 2101 struct neighbour *neigh; 2102 2103 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); 2104 2105 if (!(neigh && (neigh->flags & NTF_ROUTER))) { 2106 RT6_TRACE("purging route %p via non-router but gateway\n", 2107 rt); 2108 rt6_remove_exception(bucket, rt6_ex); 2109 return; 2110 } 2111 } 2112 2113 gc_args->more++; 2114 } 2115 2116 static void fib6_nh_age_exceptions(const struct fib6_nh *nh, 2117 struct fib6_gc_args *gc_args, 2118 unsigned long now) 2119 { 2120 struct rt6_exception_bucket *bucket; 2121 struct rt6_exception *rt6_ex; 2122 struct hlist_node *tmp; 2123 int i; 2124 2125 if (!rcu_access_pointer(nh->rt6i_exception_bucket)) 2126 return; 2127 2128 rcu_read_lock_bh(); 2129 spin_lock(&rt6_exception_lock); 2130 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); 2131 if (bucket) { 2132 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 2133 hlist_for_each_entry_safe(rt6_ex, tmp, 2134 &bucket->chain, hlist) { 2135 rt6_age_examine_exception(bucket, rt6_ex, 2136 gc_args, now); 2137 } 2138 bucket++; 2139 } 2140 } 2141 spin_unlock(&rt6_exception_lock); 2142 rcu_read_unlock_bh(); 2143 } 2144 2145 struct fib6_nh_age_excptn_arg { 2146 struct fib6_gc_args *gc_args; 2147 unsigned long now; 2148 }; 2149 2150 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg) 2151 { 2152 struct fib6_nh_age_excptn_arg *arg = _arg; 2153 2154 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now); 2155 return 0; 2156 } 2157 2158 void rt6_age_exceptions(struct fib6_info *f6i, 2159 struct fib6_gc_args *gc_args, 2160 unsigned long now) 2161 { 2162 if (f6i->nh) { 2163 struct fib6_nh_age_excptn_arg arg = { 2164 .gc_args = gc_args, 2165 .now = now 2166 }; 2167 2168 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions, 2169 &arg); 2170 } else { 2171 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now); 2172 } 2173 } 2174 2175 /* must be called with rcu lock held */ 2176 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif, 2177 struct flowi6 *fl6, struct fib6_result *res, int strict) 2178 { 2179 struct fib6_node *fn, *saved_fn; 2180 2181 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 2182 saved_fn = fn; 2183 2184 redo_rt6_select: 2185 rt6_select(net, fn, oif, res, strict); 2186 if (res->f6i == net->ipv6.fib6_null_entry) { 2187 fn = fib6_backtrack(fn, &fl6->saddr); 2188 if (fn) 2189 goto redo_rt6_select; 2190 else if (strict & RT6_LOOKUP_F_REACHABLE) { 2191 /* also consider unreachable route */ 2192 strict &= ~RT6_LOOKUP_F_REACHABLE; 2193 fn = saved_fn; 2194 goto redo_rt6_select; 2195 } 2196 } 2197 2198 trace_fib6_table_lookup(net, res, table, fl6); 2199 2200 return 0; 2201 } 2202 2203 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, 2204 int oif, struct flowi6 *fl6, 2205 const struct sk_buff *skb, int flags) 2206 { 2207 struct fib6_result res = {}; 2208 struct rt6_info *rt = NULL; 2209 int strict = 0; 2210 2211 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) && 2212 !rcu_read_lock_held()); 2213 2214 strict |= flags & RT6_LOOKUP_F_IFACE; 2215 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; 2216 if (net->ipv6.devconf_all->forwarding == 0) 2217 strict |= RT6_LOOKUP_F_REACHABLE; 2218 2219 rcu_read_lock(); 2220 2221 fib6_table_lookup(net, table, oif, fl6, &res, strict); 2222 if (res.f6i == net->ipv6.fib6_null_entry) 2223 goto out; 2224 2225 fib6_select_path(net, &res, fl6, oif, false, skb, strict); 2226 2227 /*Search through exception table */ 2228 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr); 2229 if (rt) { 2230 goto out; 2231 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && 2232 !res.nh->fib_nh_gw_family)) { 2233 /* Create a RTF_CACHE clone which will not be 2234 * owned by the fib6 tree. It is for the special case where 2235 * the daddr in the skb during the neighbor look-up is different 2236 * from the fl6->daddr used to look-up route here. 2237 */ 2238 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL); 2239 2240 if (rt) { 2241 /* 1 refcnt is taken during ip6_rt_cache_alloc(). 2242 * As rt6_uncached_list_add() does not consume refcnt, 2243 * this refcnt is always returned to the caller even 2244 * if caller sets RT6_LOOKUP_F_DST_NOREF flag. 2245 */ 2246 rt6_uncached_list_add(rt); 2247 rcu_read_unlock(); 2248 2249 return rt; 2250 } 2251 } else { 2252 /* Get a percpu copy */ 2253 local_bh_disable(); 2254 rt = rt6_get_pcpu_route(&res); 2255 2256 if (!rt) 2257 rt = rt6_make_pcpu_route(net, &res); 2258 2259 local_bh_enable(); 2260 } 2261 out: 2262 if (!rt) 2263 rt = net->ipv6.ip6_null_entry; 2264 if (!(flags & RT6_LOOKUP_F_DST_NOREF)) 2265 ip6_hold_safe(net, &rt); 2266 rcu_read_unlock(); 2267 2268 return rt; 2269 } 2270 EXPORT_SYMBOL_GPL(ip6_pol_route); 2271 2272 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net, 2273 struct fib6_table *table, 2274 struct flowi6 *fl6, 2275 const struct sk_buff *skb, 2276 int flags) 2277 { 2278 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags); 2279 } 2280 2281 struct dst_entry *ip6_route_input_lookup(struct net *net, 2282 struct net_device *dev, 2283 struct flowi6 *fl6, 2284 const struct sk_buff *skb, 2285 int flags) 2286 { 2287 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) 2288 flags |= RT6_LOOKUP_F_IFACE; 2289 2290 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input); 2291 } 2292 EXPORT_SYMBOL_GPL(ip6_route_input_lookup); 2293 2294 static void ip6_multipath_l3_keys(const struct sk_buff *skb, 2295 struct flow_keys *keys, 2296 struct flow_keys *flkeys) 2297 { 2298 const struct ipv6hdr *outer_iph = ipv6_hdr(skb); 2299 const struct ipv6hdr *key_iph = outer_iph; 2300 struct flow_keys *_flkeys = flkeys; 2301 const struct ipv6hdr *inner_iph; 2302 const struct icmp6hdr *icmph; 2303 struct ipv6hdr _inner_iph; 2304 struct icmp6hdr _icmph; 2305 2306 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) 2307 goto out; 2308 2309 icmph = skb_header_pointer(skb, skb_transport_offset(skb), 2310 sizeof(_icmph), &_icmph); 2311 if (!icmph) 2312 goto out; 2313 2314 if (!icmpv6_is_err(icmph->icmp6_type)) 2315 goto out; 2316 2317 inner_iph = skb_header_pointer(skb, 2318 skb_transport_offset(skb) + sizeof(*icmph), 2319 sizeof(_inner_iph), &_inner_iph); 2320 if (!inner_iph) 2321 goto out; 2322 2323 key_iph = inner_iph; 2324 _flkeys = NULL; 2325 out: 2326 if (_flkeys) { 2327 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src; 2328 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst; 2329 keys->tags.flow_label = _flkeys->tags.flow_label; 2330 keys->basic.ip_proto = _flkeys->basic.ip_proto; 2331 } else { 2332 keys->addrs.v6addrs.src = key_iph->saddr; 2333 keys->addrs.v6addrs.dst = key_iph->daddr; 2334 keys->tags.flow_label = ip6_flowlabel(key_iph); 2335 keys->basic.ip_proto = key_iph->nexthdr; 2336 } 2337 } 2338 2339 static u32 rt6_multipath_custom_hash_outer(const struct net *net, 2340 const struct sk_buff *skb, 2341 bool *p_has_inner) 2342 { 2343 u32 hash_fields = ip6_multipath_hash_fields(net); 2344 struct flow_keys keys, hash_keys; 2345 2346 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) 2347 return 0; 2348 2349 memset(&hash_keys, 0, sizeof(hash_keys)); 2350 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP); 2351 2352 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2353 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) 2354 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; 2355 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) 2356 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst; 2357 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) 2358 hash_keys.basic.ip_proto = keys.basic.ip_proto; 2359 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL) 2360 hash_keys.tags.flow_label = keys.tags.flow_label; 2361 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) 2362 hash_keys.ports.src = keys.ports.src; 2363 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) 2364 hash_keys.ports.dst = keys.ports.dst; 2365 2366 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION); 2367 return flow_hash_from_keys(&hash_keys); 2368 } 2369 2370 static u32 rt6_multipath_custom_hash_inner(const struct net *net, 2371 const struct sk_buff *skb, 2372 bool has_inner) 2373 { 2374 u32 hash_fields = ip6_multipath_hash_fields(net); 2375 struct flow_keys keys, hash_keys; 2376 2377 /* We assume the packet carries an encapsulation, but if none was 2378 * encountered during dissection of the outer flow, then there is no 2379 * point in calling the flow dissector again. 2380 */ 2381 if (!has_inner) 2382 return 0; 2383 2384 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)) 2385 return 0; 2386 2387 memset(&hash_keys, 0, sizeof(hash_keys)); 2388 skb_flow_dissect_flow_keys(skb, &keys, 0); 2389 2390 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION)) 2391 return 0; 2392 2393 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2394 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2395 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) 2396 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 2397 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) 2398 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 2399 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2400 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2401 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) 2402 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; 2403 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) 2404 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst; 2405 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL) 2406 hash_keys.tags.flow_label = keys.tags.flow_label; 2407 } 2408 2409 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO) 2410 hash_keys.basic.ip_proto = keys.basic.ip_proto; 2411 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT) 2412 hash_keys.ports.src = keys.ports.src; 2413 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) 2414 hash_keys.ports.dst = keys.ports.dst; 2415 2416 return flow_hash_from_keys(&hash_keys); 2417 } 2418 2419 static u32 rt6_multipath_custom_hash_skb(const struct net *net, 2420 const struct sk_buff *skb) 2421 { 2422 u32 mhash, mhash_inner; 2423 bool has_inner = true; 2424 2425 mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner); 2426 mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner); 2427 2428 return jhash_2words(mhash, mhash_inner, 0); 2429 } 2430 2431 static u32 rt6_multipath_custom_hash_fl6(const struct net *net, 2432 const struct flowi6 *fl6) 2433 { 2434 u32 hash_fields = ip6_multipath_hash_fields(net); 2435 struct flow_keys hash_keys; 2436 2437 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) 2438 return 0; 2439 2440 memset(&hash_keys, 0, sizeof(hash_keys)); 2441 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2442 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) 2443 hash_keys.addrs.v6addrs.src = fl6->saddr; 2444 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) 2445 hash_keys.addrs.v6addrs.dst = fl6->daddr; 2446 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) 2447 hash_keys.basic.ip_proto = fl6->flowi6_proto; 2448 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL) 2449 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); 2450 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) 2451 hash_keys.ports.src = fl6->fl6_sport; 2452 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) 2453 hash_keys.ports.dst = fl6->fl6_dport; 2454 2455 return flow_hash_from_keys(&hash_keys); 2456 } 2457 2458 /* if skb is set it will be used and fl6 can be NULL */ 2459 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, 2460 const struct sk_buff *skb, struct flow_keys *flkeys) 2461 { 2462 struct flow_keys hash_keys; 2463 u32 mhash = 0; 2464 2465 switch (ip6_multipath_hash_policy(net)) { 2466 case 0: 2467 memset(&hash_keys, 0, sizeof(hash_keys)); 2468 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2469 if (skb) { 2470 ip6_multipath_l3_keys(skb, &hash_keys, flkeys); 2471 } else { 2472 hash_keys.addrs.v6addrs.src = fl6->saddr; 2473 hash_keys.addrs.v6addrs.dst = fl6->daddr; 2474 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); 2475 hash_keys.basic.ip_proto = fl6->flowi6_proto; 2476 } 2477 mhash = flow_hash_from_keys(&hash_keys); 2478 break; 2479 case 1: 2480 if (skb) { 2481 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; 2482 struct flow_keys keys; 2483 2484 /* short-circuit if we already have L4 hash present */ 2485 if (skb->l4_hash) 2486 return skb_get_hash_raw(skb) >> 1; 2487 2488 memset(&hash_keys, 0, sizeof(hash_keys)); 2489 2490 if (!flkeys) { 2491 skb_flow_dissect_flow_keys(skb, &keys, flag); 2492 flkeys = &keys; 2493 } 2494 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2495 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src; 2496 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst; 2497 hash_keys.ports.src = flkeys->ports.src; 2498 hash_keys.ports.dst = flkeys->ports.dst; 2499 hash_keys.basic.ip_proto = flkeys->basic.ip_proto; 2500 } else { 2501 memset(&hash_keys, 0, sizeof(hash_keys)); 2502 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2503 hash_keys.addrs.v6addrs.src = fl6->saddr; 2504 hash_keys.addrs.v6addrs.dst = fl6->daddr; 2505 hash_keys.ports.src = fl6->fl6_sport; 2506 hash_keys.ports.dst = fl6->fl6_dport; 2507 hash_keys.basic.ip_proto = fl6->flowi6_proto; 2508 } 2509 mhash = flow_hash_from_keys(&hash_keys); 2510 break; 2511 case 2: 2512 memset(&hash_keys, 0, sizeof(hash_keys)); 2513 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2514 if (skb) { 2515 struct flow_keys keys; 2516 2517 if (!flkeys) { 2518 skb_flow_dissect_flow_keys(skb, &keys, 0); 2519 flkeys = &keys; 2520 } 2521 2522 /* Inner can be v4 or v6 */ 2523 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2524 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2525 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src; 2526 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst; 2527 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2528 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2529 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src; 2530 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst; 2531 hash_keys.tags.flow_label = flkeys->tags.flow_label; 2532 hash_keys.basic.ip_proto = flkeys->basic.ip_proto; 2533 } else { 2534 /* Same as case 0 */ 2535 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2536 ip6_multipath_l3_keys(skb, &hash_keys, flkeys); 2537 } 2538 } else { 2539 /* Same as case 0 */ 2540 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2541 hash_keys.addrs.v6addrs.src = fl6->saddr; 2542 hash_keys.addrs.v6addrs.dst = fl6->daddr; 2543 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); 2544 hash_keys.basic.ip_proto = fl6->flowi6_proto; 2545 } 2546 mhash = flow_hash_from_keys(&hash_keys); 2547 break; 2548 case 3: 2549 if (skb) 2550 mhash = rt6_multipath_custom_hash_skb(net, skb); 2551 else 2552 mhash = rt6_multipath_custom_hash_fl6(net, fl6); 2553 break; 2554 } 2555 2556 return mhash >> 1; 2557 } 2558 2559 /* Called with rcu held */ 2560 void ip6_route_input(struct sk_buff *skb) 2561 { 2562 const struct ipv6hdr *iph = ipv6_hdr(skb); 2563 struct net *net = dev_net(skb->dev); 2564 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF; 2565 struct ip_tunnel_info *tun_info; 2566 struct flowi6 fl6 = { 2567 .flowi6_iif = skb->dev->ifindex, 2568 .daddr = iph->daddr, 2569 .saddr = iph->saddr, 2570 .flowlabel = ip6_flowinfo(iph), 2571 .flowi6_mark = skb->mark, 2572 .flowi6_proto = iph->nexthdr, 2573 }; 2574 struct flow_keys *flkeys = NULL, _flkeys; 2575 2576 tun_info = skb_tunnel_info(skb); 2577 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) 2578 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id; 2579 2580 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys)) 2581 flkeys = &_flkeys; 2582 2583 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6)) 2584 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys); 2585 skb_dst_drop(skb); 2586 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev, 2587 &fl6, skb, flags)); 2588 } 2589 2590 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net, 2591 struct fib6_table *table, 2592 struct flowi6 *fl6, 2593 const struct sk_buff *skb, 2594 int flags) 2595 { 2596 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags); 2597 } 2598 2599 static struct dst_entry *ip6_route_output_flags_noref(struct net *net, 2600 const struct sock *sk, 2601 struct flowi6 *fl6, 2602 int flags) 2603 { 2604 bool any_src; 2605 2606 if (ipv6_addr_type(&fl6->daddr) & 2607 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) { 2608 struct dst_entry *dst; 2609 2610 /* This function does not take refcnt on the dst */ 2611 dst = l3mdev_link_scope_lookup(net, fl6); 2612 if (dst) 2613 return dst; 2614 } 2615 2616 fl6->flowi6_iif = LOOPBACK_IFINDEX; 2617 2618 flags |= RT6_LOOKUP_F_DST_NOREF; 2619 any_src = ipv6_addr_any(&fl6->saddr); 2620 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || 2621 (fl6->flowi6_oif && any_src)) 2622 flags |= RT6_LOOKUP_F_IFACE; 2623 2624 if (!any_src) 2625 flags |= RT6_LOOKUP_F_HAS_SADDR; 2626 else if (sk) 2627 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); 2628 2629 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output); 2630 } 2631 2632 struct dst_entry *ip6_route_output_flags(struct net *net, 2633 const struct sock *sk, 2634 struct flowi6 *fl6, 2635 int flags) 2636 { 2637 struct dst_entry *dst; 2638 struct rt6_info *rt6; 2639 2640 rcu_read_lock(); 2641 dst = ip6_route_output_flags_noref(net, sk, fl6, flags); 2642 rt6 = (struct rt6_info *)dst; 2643 /* For dst cached in uncached_list, refcnt is already taken. */ 2644 if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) { 2645 dst = &net->ipv6.ip6_null_entry->dst; 2646 dst_hold(dst); 2647 } 2648 rcu_read_unlock(); 2649 2650 return dst; 2651 } 2652 EXPORT_SYMBOL_GPL(ip6_route_output_flags); 2653 2654 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2655 { 2656 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; 2657 struct net_device *loopback_dev = net->loopback_dev; 2658 struct dst_entry *new = NULL; 2659 2660 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, 2661 DST_OBSOLETE_DEAD, 0); 2662 if (rt) { 2663 rt6_info_init(rt); 2664 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc); 2665 2666 new = &rt->dst; 2667 new->__use = 1; 2668 new->input = dst_discard; 2669 new->output = dst_discard_out; 2670 2671 dst_copy_metrics(new, &ort->dst); 2672 2673 rt->rt6i_idev = in6_dev_get(loopback_dev); 2674 rt->rt6i_gateway = ort->rt6i_gateway; 2675 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; 2676 2677 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 2678 #ifdef CONFIG_IPV6_SUBTREES 2679 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); 2680 #endif 2681 } 2682 2683 dst_release(dst_orig); 2684 return new ? new : ERR_PTR(-ENOMEM); 2685 } 2686 2687 /* 2688 * Destination cache support functions 2689 */ 2690 2691 static bool fib6_check(struct fib6_info *f6i, u32 cookie) 2692 { 2693 u32 rt_cookie = 0; 2694 2695 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie) 2696 return false; 2697 2698 if (fib6_check_expired(f6i)) 2699 return false; 2700 2701 return true; 2702 } 2703 2704 static struct dst_entry *rt6_check(struct rt6_info *rt, 2705 struct fib6_info *from, 2706 u32 cookie) 2707 { 2708 u32 rt_cookie = 0; 2709 2710 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) || 2711 rt_cookie != cookie) 2712 return NULL; 2713 2714 if (rt6_check_expired(rt)) 2715 return NULL; 2716 2717 return &rt->dst; 2718 } 2719 2720 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, 2721 struct fib6_info *from, 2722 u32 cookie) 2723 { 2724 if (!__rt6_check_expired(rt) && 2725 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 2726 fib6_check(from, cookie)) 2727 return &rt->dst; 2728 else 2729 return NULL; 2730 } 2731 2732 INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst, 2733 u32 cookie) 2734 { 2735 struct dst_entry *dst_ret; 2736 struct fib6_info *from; 2737 struct rt6_info *rt; 2738 2739 rt = container_of(dst, struct rt6_info, dst); 2740 2741 if (rt->sernum) 2742 return rt6_is_valid(rt) ? dst : NULL; 2743 2744 rcu_read_lock(); 2745 2746 /* All IPV6 dsts are created with ->obsolete set to the value 2747 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 2748 * into this function always. 2749 */ 2750 2751 from = rcu_dereference(rt->from); 2752 2753 if (from && (rt->rt6i_flags & RTF_PCPU || 2754 unlikely(!list_empty(&rt->dst.rt_uncached)))) 2755 dst_ret = rt6_dst_from_check(rt, from, cookie); 2756 else 2757 dst_ret = rt6_check(rt, from, cookie); 2758 2759 rcu_read_unlock(); 2760 2761 return dst_ret; 2762 } 2763 EXPORT_INDIRECT_CALLABLE(ip6_dst_check); 2764 2765 static void ip6_negative_advice(struct sock *sk, 2766 struct dst_entry *dst) 2767 { 2768 struct rt6_info *rt = (struct rt6_info *) dst; 2769 2770 if (rt->rt6i_flags & RTF_CACHE) { 2771 rcu_read_lock(); 2772 if (rt6_check_expired(rt)) { 2773 /* counteract the dst_release() in sk_dst_reset() */ 2774 dst_hold(dst); 2775 sk_dst_reset(sk); 2776 2777 rt6_remove_exception_rt(rt); 2778 } 2779 rcu_read_unlock(); 2780 return; 2781 } 2782 sk_dst_reset(sk); 2783 } 2784 2785 static void ip6_link_failure(struct sk_buff *skb) 2786 { 2787 struct rt6_info *rt; 2788 2789 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 2790 2791 rt = (struct rt6_info *) skb_dst(skb); 2792 if (rt) { 2793 rcu_read_lock(); 2794 if (rt->rt6i_flags & RTF_CACHE) { 2795 rt6_remove_exception_rt(rt); 2796 } else { 2797 struct fib6_info *from; 2798 struct fib6_node *fn; 2799 2800 from = rcu_dereference(rt->from); 2801 if (from) { 2802 fn = rcu_dereference(from->fib6_node); 2803 if (fn && (rt->rt6i_flags & RTF_DEFAULT)) 2804 WRITE_ONCE(fn->fn_sernum, -1); 2805 } 2806 } 2807 rcu_read_unlock(); 2808 } 2809 } 2810 2811 static void rt6_update_expires(struct rt6_info *rt0, int timeout) 2812 { 2813 if (!(rt0->rt6i_flags & RTF_EXPIRES)) { 2814 struct fib6_info *from; 2815 2816 rcu_read_lock(); 2817 from = rcu_dereference(rt0->from); 2818 if (from) 2819 rt0->dst.expires = from->expires; 2820 rcu_read_unlock(); 2821 } 2822 2823 dst_set_expires(&rt0->dst, timeout); 2824 rt0->rt6i_flags |= RTF_EXPIRES; 2825 } 2826 2827 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) 2828 { 2829 struct net *net = dev_net(rt->dst.dev); 2830 2831 dst_metric_set(&rt->dst, RTAX_MTU, mtu); 2832 rt->rt6i_flags |= RTF_MODIFIED; 2833 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 2834 } 2835 2836 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) 2837 { 2838 return !(rt->rt6i_flags & RTF_CACHE) && 2839 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from)); 2840 } 2841 2842 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 2843 const struct ipv6hdr *iph, u32 mtu, 2844 bool confirm_neigh) 2845 { 2846 const struct in6_addr *daddr, *saddr; 2847 struct rt6_info *rt6 = (struct rt6_info *)dst; 2848 2849 /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU) 2850 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it. 2851 * [see also comment in rt6_mtu_change_route()] 2852 */ 2853 2854 if (iph) { 2855 daddr = &iph->daddr; 2856 saddr = &iph->saddr; 2857 } else if (sk) { 2858 daddr = &sk->sk_v6_daddr; 2859 saddr = &inet6_sk(sk)->saddr; 2860 } else { 2861 daddr = NULL; 2862 saddr = NULL; 2863 } 2864 2865 if (confirm_neigh) 2866 dst_confirm_neigh(dst, daddr); 2867 2868 if (mtu < IPV6_MIN_MTU) 2869 return; 2870 if (mtu >= dst_mtu(dst)) 2871 return; 2872 2873 if (!rt6_cache_allowed_for_pmtu(rt6)) { 2874 rt6_do_update_pmtu(rt6, mtu); 2875 /* update rt6_ex->stamp for cache */ 2876 if (rt6->rt6i_flags & RTF_CACHE) 2877 rt6_update_exception_stamp_rt(rt6); 2878 } else if (daddr) { 2879 struct fib6_result res = {}; 2880 struct rt6_info *nrt6; 2881 2882 rcu_read_lock(); 2883 res.f6i = rcu_dereference(rt6->from); 2884 if (!res.f6i) 2885 goto out_unlock; 2886 2887 res.fib6_flags = res.f6i->fib6_flags; 2888 res.fib6_type = res.f6i->fib6_type; 2889 2890 if (res.f6i->nh) { 2891 struct fib6_nh_match_arg arg = { 2892 .dev = dst->dev, 2893 .gw = &rt6->rt6i_gateway, 2894 }; 2895 2896 nexthop_for_each_fib6_nh(res.f6i->nh, 2897 fib6_nh_find_match, &arg); 2898 2899 /* fib6_info uses a nexthop that does not have fib6_nh 2900 * using the dst->dev + gw. Should be impossible. 2901 */ 2902 if (!arg.match) 2903 goto out_unlock; 2904 2905 res.nh = arg.match; 2906 } else { 2907 res.nh = res.f6i->fib6_nh; 2908 } 2909 2910 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr); 2911 if (nrt6) { 2912 rt6_do_update_pmtu(nrt6, mtu); 2913 if (rt6_insert_exception(nrt6, &res)) 2914 dst_release_immediate(&nrt6->dst); 2915 } 2916 out_unlock: 2917 rcu_read_unlock(); 2918 } 2919 } 2920 2921 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 2922 struct sk_buff *skb, u32 mtu, 2923 bool confirm_neigh) 2924 { 2925 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu, 2926 confirm_neigh); 2927 } 2928 2929 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, 2930 int oif, u32 mark, kuid_t uid) 2931 { 2932 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; 2933 struct dst_entry *dst; 2934 struct flowi6 fl6 = { 2935 .flowi6_oif = oif, 2936 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark), 2937 .daddr = iph->daddr, 2938 .saddr = iph->saddr, 2939 .flowlabel = ip6_flowinfo(iph), 2940 .flowi6_uid = uid, 2941 }; 2942 2943 dst = ip6_route_output(net, NULL, &fl6); 2944 if (!dst->error) 2945 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true); 2946 dst_release(dst); 2947 } 2948 EXPORT_SYMBOL_GPL(ip6_update_pmtu); 2949 2950 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 2951 { 2952 int oif = sk->sk_bound_dev_if; 2953 struct dst_entry *dst; 2954 2955 if (!oif && skb->dev) 2956 oif = l3mdev_master_ifindex(skb->dev); 2957 2958 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark), 2959 sk->sk_uid); 2960 2961 dst = __sk_dst_get(sk); 2962 if (!dst || !dst->obsolete || 2963 dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) 2964 return; 2965 2966 bh_lock_sock(sk); 2967 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 2968 ip6_datagram_dst_update(sk, false); 2969 bh_unlock_sock(sk); 2970 } 2971 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 2972 2973 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst, 2974 const struct flowi6 *fl6) 2975 { 2976 #ifdef CONFIG_IPV6_SUBTREES 2977 struct ipv6_pinfo *np = inet6_sk(sk); 2978 #endif 2979 2980 ip6_dst_store(sk, dst, 2981 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ? 2982 &sk->sk_v6_daddr : NULL, 2983 #ifdef CONFIG_IPV6_SUBTREES 2984 ipv6_addr_equal(&fl6->saddr, &np->saddr) ? 2985 &np->saddr : 2986 #endif 2987 NULL); 2988 } 2989 2990 static bool ip6_redirect_nh_match(const struct fib6_result *res, 2991 struct flowi6 *fl6, 2992 const struct in6_addr *gw, 2993 struct rt6_info **ret) 2994 { 2995 const struct fib6_nh *nh = res->nh; 2996 2997 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family || 2998 fl6->flowi6_oif != nh->fib_nh_dev->ifindex) 2999 return false; 3000 3001 /* rt_cache's gateway might be different from its 'parent' 3002 * in the case of an ip redirect. 3003 * So we keep searching in the exception table if the gateway 3004 * is different. 3005 */ 3006 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) { 3007 struct rt6_info *rt_cache; 3008 3009 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr); 3010 if (rt_cache && 3011 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) { 3012 *ret = rt_cache; 3013 return true; 3014 } 3015 return false; 3016 } 3017 return true; 3018 } 3019 3020 struct fib6_nh_rd_arg { 3021 struct fib6_result *res; 3022 struct flowi6 *fl6; 3023 const struct in6_addr *gw; 3024 struct rt6_info **ret; 3025 }; 3026 3027 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg) 3028 { 3029 struct fib6_nh_rd_arg *arg = _arg; 3030 3031 arg->res->nh = nh; 3032 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret); 3033 } 3034 3035 /* Handle redirects */ 3036 struct ip6rd_flowi { 3037 struct flowi6 fl6; 3038 struct in6_addr gateway; 3039 }; 3040 3041 INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net, 3042 struct fib6_table *table, 3043 struct flowi6 *fl6, 3044 const struct sk_buff *skb, 3045 int flags) 3046 { 3047 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; 3048 struct rt6_info *ret = NULL; 3049 struct fib6_result res = {}; 3050 struct fib6_nh_rd_arg arg = { 3051 .res = &res, 3052 .fl6 = fl6, 3053 .gw = &rdfl->gateway, 3054 .ret = &ret 3055 }; 3056 struct fib6_info *rt; 3057 struct fib6_node *fn; 3058 3059 /* Get the "current" route for this destination and 3060 * check if the redirect has come from appropriate router. 3061 * 3062 * RFC 4861 specifies that redirects should only be 3063 * accepted if they come from the nexthop to the target. 3064 * Due to the way the routes are chosen, this notion 3065 * is a bit fuzzy and one might need to check all possible 3066 * routes. 3067 */ 3068 3069 rcu_read_lock(); 3070 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 3071 restart: 3072 for_each_fib6_node_rt_rcu(fn) { 3073 res.f6i = rt; 3074 if (fib6_check_expired(rt)) 3075 continue; 3076 if (rt->fib6_flags & RTF_REJECT) 3077 break; 3078 if (unlikely(rt->nh)) { 3079 if (nexthop_is_blackhole(rt->nh)) 3080 continue; 3081 /* on match, res->nh is filled in and potentially ret */ 3082 if (nexthop_for_each_fib6_nh(rt->nh, 3083 fib6_nh_redirect_match, 3084 &arg)) 3085 goto out; 3086 } else { 3087 res.nh = rt->fib6_nh; 3088 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway, 3089 &ret)) 3090 goto out; 3091 } 3092 } 3093 3094 if (!rt) 3095 rt = net->ipv6.fib6_null_entry; 3096 else if (rt->fib6_flags & RTF_REJECT) { 3097 ret = net->ipv6.ip6_null_entry; 3098 goto out; 3099 } 3100 3101 if (rt == net->ipv6.fib6_null_entry) { 3102 fn = fib6_backtrack(fn, &fl6->saddr); 3103 if (fn) 3104 goto restart; 3105 } 3106 3107 res.f6i = rt; 3108 res.nh = rt->fib6_nh; 3109 out: 3110 if (ret) { 3111 ip6_hold_safe(net, &ret); 3112 } else { 3113 res.fib6_flags = res.f6i->fib6_flags; 3114 res.fib6_type = res.f6i->fib6_type; 3115 ret = ip6_create_rt_rcu(&res); 3116 } 3117 3118 rcu_read_unlock(); 3119 3120 trace_fib6_table_lookup(net, &res, table, fl6); 3121 return ret; 3122 }; 3123 3124 static struct dst_entry *ip6_route_redirect(struct net *net, 3125 const struct flowi6 *fl6, 3126 const struct sk_buff *skb, 3127 const struct in6_addr *gateway) 3128 { 3129 int flags = RT6_LOOKUP_F_HAS_SADDR; 3130 struct ip6rd_flowi rdfl; 3131 3132 rdfl.fl6 = *fl6; 3133 rdfl.gateway = *gateway; 3134 3135 return fib6_rule_lookup(net, &rdfl.fl6, skb, 3136 flags, __ip6_route_redirect); 3137 } 3138 3139 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, 3140 kuid_t uid) 3141 { 3142 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; 3143 struct dst_entry *dst; 3144 struct flowi6 fl6 = { 3145 .flowi6_iif = LOOPBACK_IFINDEX, 3146 .flowi6_oif = oif, 3147 .flowi6_mark = mark, 3148 .daddr = iph->daddr, 3149 .saddr = iph->saddr, 3150 .flowlabel = ip6_flowinfo(iph), 3151 .flowi6_uid = uid, 3152 }; 3153 3154 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr); 3155 rt6_do_redirect(dst, NULL, skb); 3156 dst_release(dst); 3157 } 3158 EXPORT_SYMBOL_GPL(ip6_redirect); 3159 3160 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif) 3161 { 3162 const struct ipv6hdr *iph = ipv6_hdr(skb); 3163 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); 3164 struct dst_entry *dst; 3165 struct flowi6 fl6 = { 3166 .flowi6_iif = LOOPBACK_IFINDEX, 3167 .flowi6_oif = oif, 3168 .daddr = msg->dest, 3169 .saddr = iph->daddr, 3170 .flowi6_uid = sock_net_uid(net, NULL), 3171 }; 3172 3173 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr); 3174 rt6_do_redirect(dst, NULL, skb); 3175 dst_release(dst); 3176 } 3177 3178 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 3179 { 3180 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, 3181 READ_ONCE(sk->sk_mark), sk->sk_uid); 3182 } 3183 EXPORT_SYMBOL_GPL(ip6_sk_redirect); 3184 3185 static unsigned int ip6_default_advmss(const struct dst_entry *dst) 3186 { 3187 struct net_device *dev = dst->dev; 3188 unsigned int mtu = dst_mtu(dst); 3189 struct net *net = dev_net(dev); 3190 3191 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); 3192 3193 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss) 3194 mtu = net->ipv6.sysctl.ip6_rt_min_advmss; 3195 3196 /* 3197 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and 3198 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size. 3199 * IPV6_MAXPLEN is also valid and means: "any MSS, 3200 * rely only on pmtu discovery" 3201 */ 3202 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr)) 3203 mtu = IPV6_MAXPLEN; 3204 return mtu; 3205 } 3206 3207 INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst) 3208 { 3209 return ip6_dst_mtu_maybe_forward(dst, false); 3210 } 3211 EXPORT_INDIRECT_CALLABLE(ip6_mtu); 3212 3213 /* MTU selection: 3214 * 1. mtu on route is locked - use it 3215 * 2. mtu from nexthop exception 3216 * 3. mtu from egress device 3217 * 3218 * based on ip6_dst_mtu_forward and exception logic of 3219 * rt6_find_cached_rt; called with rcu_read_lock 3220 */ 3221 u32 ip6_mtu_from_fib6(const struct fib6_result *res, 3222 const struct in6_addr *daddr, 3223 const struct in6_addr *saddr) 3224 { 3225 const struct fib6_nh *nh = res->nh; 3226 struct fib6_info *f6i = res->f6i; 3227 struct inet6_dev *idev; 3228 struct rt6_info *rt; 3229 u32 mtu = 0; 3230 3231 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) { 3232 mtu = f6i->fib6_pmtu; 3233 if (mtu) 3234 goto out; 3235 } 3236 3237 rt = rt6_find_cached_rt(res, daddr, saddr); 3238 if (unlikely(rt)) { 3239 mtu = dst_metric_raw(&rt->dst, RTAX_MTU); 3240 } else { 3241 struct net_device *dev = nh->fib_nh_dev; 3242 3243 mtu = IPV6_MIN_MTU; 3244 idev = __in6_dev_get(dev); 3245 if (idev && idev->cnf.mtu6 > mtu) 3246 mtu = idev->cnf.mtu6; 3247 } 3248 3249 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); 3250 out: 3251 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu); 3252 } 3253 3254 struct dst_entry *icmp6_dst_alloc(struct net_device *dev, 3255 struct flowi6 *fl6) 3256 { 3257 struct dst_entry *dst; 3258 struct rt6_info *rt; 3259 struct inet6_dev *idev = in6_dev_get(dev); 3260 struct net *net = dev_net(dev); 3261 3262 if (unlikely(!idev)) 3263 return ERR_PTR(-ENODEV); 3264 3265 rt = ip6_dst_alloc(net, dev, 0); 3266 if (unlikely(!rt)) { 3267 in6_dev_put(idev); 3268 dst = ERR_PTR(-ENOMEM); 3269 goto out; 3270 } 3271 3272 rt->dst.input = ip6_input; 3273 rt->dst.output = ip6_output; 3274 rt->rt6i_gateway = fl6->daddr; 3275 rt->rt6i_dst.addr = fl6->daddr; 3276 rt->rt6i_dst.plen = 128; 3277 rt->rt6i_idev = idev; 3278 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); 3279 3280 /* Add this dst into uncached_list so that rt6_disable_ip() can 3281 * do proper release of the net_device 3282 */ 3283 rt6_uncached_list_add(rt); 3284 3285 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0); 3286 3287 out: 3288 return dst; 3289 } 3290 3291 static void ip6_dst_gc(struct dst_ops *ops) 3292 { 3293 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops); 3294 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; 3295 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; 3296 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout; 3297 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc; 3298 unsigned int val; 3299 int entries; 3300 3301 if (time_after(rt_last_gc + rt_min_interval, jiffies)) 3302 goto out; 3303 3304 fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true); 3305 entries = dst_entries_get_slow(ops); 3306 if (entries < ops->gc_thresh) 3307 atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1); 3308 out: 3309 val = atomic_read(&net->ipv6.ip6_rt_gc_expire); 3310 atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity)); 3311 } 3312 3313 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg, 3314 const struct in6_addr *gw_addr, u32 tbid, 3315 int flags, struct fib6_result *res) 3316 { 3317 struct flowi6 fl6 = { 3318 .flowi6_oif = cfg->fc_ifindex, 3319 .daddr = *gw_addr, 3320 .saddr = cfg->fc_prefsrc, 3321 }; 3322 struct fib6_table *table; 3323 int err; 3324 3325 table = fib6_get_table(net, tbid); 3326 if (!table) 3327 return -EINVAL; 3328 3329 if (!ipv6_addr_any(&cfg->fc_prefsrc)) 3330 flags |= RT6_LOOKUP_F_HAS_SADDR; 3331 3332 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE; 3333 3334 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags); 3335 if (!err && res->f6i != net->ipv6.fib6_null_entry) 3336 fib6_select_path(net, res, &fl6, cfg->fc_ifindex, 3337 cfg->fc_ifindex != 0, NULL, flags); 3338 3339 return err; 3340 } 3341 3342 static int ip6_route_check_nh_onlink(struct net *net, 3343 struct fib6_config *cfg, 3344 const struct net_device *dev, 3345 struct netlink_ext_ack *extack) 3346 { 3347 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; 3348 const struct in6_addr *gw_addr = &cfg->fc_gateway; 3349 struct fib6_result res = {}; 3350 int err; 3351 3352 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res); 3353 if (!err && !(res.fib6_flags & RTF_REJECT) && 3354 /* ignore match if it is the default route */ 3355 !ipv6_addr_any(&res.f6i->fib6_dst.addr) && 3356 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) { 3357 NL_SET_ERR_MSG(extack, 3358 "Nexthop has invalid gateway or device mismatch"); 3359 err = -EINVAL; 3360 } 3361 3362 return err; 3363 } 3364 3365 static int ip6_route_check_nh(struct net *net, 3366 struct fib6_config *cfg, 3367 struct net_device **_dev, 3368 netdevice_tracker *dev_tracker, 3369 struct inet6_dev **idev) 3370 { 3371 const struct in6_addr *gw_addr = &cfg->fc_gateway; 3372 struct net_device *dev = _dev ? *_dev : NULL; 3373 int flags = RT6_LOOKUP_F_IFACE; 3374 struct fib6_result res = {}; 3375 int err = -EHOSTUNREACH; 3376 3377 if (cfg->fc_table) { 3378 err = ip6_nh_lookup_table(net, cfg, gw_addr, 3379 cfg->fc_table, flags, &res); 3380 /* gw_addr can not require a gateway or resolve to a reject 3381 * route. If a device is given, it must match the result. 3382 */ 3383 if (err || res.fib6_flags & RTF_REJECT || 3384 res.nh->fib_nh_gw_family || 3385 (dev && dev != res.nh->fib_nh_dev)) 3386 err = -EHOSTUNREACH; 3387 } 3388 3389 if (err < 0) { 3390 struct flowi6 fl6 = { 3391 .flowi6_oif = cfg->fc_ifindex, 3392 .daddr = *gw_addr, 3393 }; 3394 3395 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags); 3396 if (err || res.fib6_flags & RTF_REJECT || 3397 res.nh->fib_nh_gw_family) 3398 err = -EHOSTUNREACH; 3399 3400 if (err) 3401 return err; 3402 3403 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex, 3404 cfg->fc_ifindex != 0, NULL, flags); 3405 } 3406 3407 err = 0; 3408 if (dev) { 3409 if (dev != res.nh->fib_nh_dev) 3410 err = -EHOSTUNREACH; 3411 } else { 3412 *_dev = dev = res.nh->fib_nh_dev; 3413 netdev_hold(dev, dev_tracker, GFP_ATOMIC); 3414 *idev = in6_dev_get(dev); 3415 } 3416 3417 return err; 3418 } 3419 3420 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg, 3421 struct net_device **_dev, 3422 netdevice_tracker *dev_tracker, 3423 struct inet6_dev **idev, 3424 struct netlink_ext_ack *extack) 3425 { 3426 const struct in6_addr *gw_addr = &cfg->fc_gateway; 3427 int gwa_type = ipv6_addr_type(gw_addr); 3428 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true; 3429 const struct net_device *dev = *_dev; 3430 bool need_addr_check = !dev; 3431 int err = -EINVAL; 3432 3433 /* if gw_addr is local we will fail to detect this in case 3434 * address is still TENTATIVE (DAD in progress). rt6_lookup() 3435 * will return already-added prefix route via interface that 3436 * prefix route was assigned to, which might be non-loopback. 3437 */ 3438 if (dev && 3439 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) { 3440 NL_SET_ERR_MSG(extack, "Gateway can not be a local address"); 3441 goto out; 3442 } 3443 3444 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) { 3445 /* IPv6 strictly inhibits using not link-local 3446 * addresses as nexthop address. 3447 * Otherwise, router will not able to send redirects. 3448 * It is very good, but in some (rare!) circumstances 3449 * (SIT, PtP, NBMA NOARP links) it is handy to allow 3450 * some exceptions. --ANK 3451 * We allow IPv4-mapped nexthops to support RFC4798-type 3452 * addressing 3453 */ 3454 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) { 3455 NL_SET_ERR_MSG(extack, "Invalid gateway address"); 3456 goto out; 3457 } 3458 3459 rcu_read_lock(); 3460 3461 if (cfg->fc_flags & RTNH_F_ONLINK) 3462 err = ip6_route_check_nh_onlink(net, cfg, dev, extack); 3463 else 3464 err = ip6_route_check_nh(net, cfg, _dev, dev_tracker, 3465 idev); 3466 3467 rcu_read_unlock(); 3468 3469 if (err) 3470 goto out; 3471 } 3472 3473 /* reload in case device was changed */ 3474 dev = *_dev; 3475 3476 err = -EINVAL; 3477 if (!dev) { 3478 NL_SET_ERR_MSG(extack, "Egress device not specified"); 3479 goto out; 3480 } else if (dev->flags & IFF_LOOPBACK) { 3481 NL_SET_ERR_MSG(extack, 3482 "Egress device can not be loopback device for this route"); 3483 goto out; 3484 } 3485 3486 /* if we did not check gw_addr above, do so now that the 3487 * egress device has been resolved. 3488 */ 3489 if (need_addr_check && 3490 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) { 3491 NL_SET_ERR_MSG(extack, "Gateway can not be a local address"); 3492 goto out; 3493 } 3494 3495 err = 0; 3496 out: 3497 return err; 3498 } 3499 3500 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type) 3501 { 3502 if ((flags & RTF_REJECT) || 3503 (dev && (dev->flags & IFF_LOOPBACK) && 3504 !(addr_type & IPV6_ADDR_LOOPBACK) && 3505 !(flags & (RTF_ANYCAST | RTF_LOCAL)))) 3506 return true; 3507 3508 return false; 3509 } 3510 3511 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, 3512 struct fib6_config *cfg, gfp_t gfp_flags, 3513 struct netlink_ext_ack *extack) 3514 { 3515 netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker; 3516 struct net_device *dev = NULL; 3517 struct inet6_dev *idev = NULL; 3518 int addr_type; 3519 int err; 3520 3521 fib6_nh->fib_nh_family = AF_INET6; 3522 #ifdef CONFIG_IPV6_ROUTER_PREF 3523 fib6_nh->last_probe = jiffies; 3524 #endif 3525 if (cfg->fc_is_fdb) { 3526 fib6_nh->fib_nh_gw6 = cfg->fc_gateway; 3527 fib6_nh->fib_nh_gw_family = AF_INET6; 3528 return 0; 3529 } 3530 3531 err = -ENODEV; 3532 if (cfg->fc_ifindex) { 3533 dev = netdev_get_by_index(net, cfg->fc_ifindex, 3534 dev_tracker, gfp_flags); 3535 if (!dev) 3536 goto out; 3537 idev = in6_dev_get(dev); 3538 if (!idev) 3539 goto out; 3540 } 3541 3542 if (cfg->fc_flags & RTNH_F_ONLINK) { 3543 if (!dev) { 3544 NL_SET_ERR_MSG(extack, 3545 "Nexthop device required for onlink"); 3546 goto out; 3547 } 3548 3549 if (!(dev->flags & IFF_UP)) { 3550 NL_SET_ERR_MSG(extack, "Nexthop device is not up"); 3551 err = -ENETDOWN; 3552 goto out; 3553 } 3554 3555 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK; 3556 } 3557 3558 fib6_nh->fib_nh_weight = 1; 3559 3560 /* We cannot add true routes via loopback here, 3561 * they would result in kernel looping; promote them to reject routes 3562 */ 3563 addr_type = ipv6_addr_type(&cfg->fc_dst); 3564 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) { 3565 /* hold loopback dev/idev if we haven't done so. */ 3566 if (dev != net->loopback_dev) { 3567 if (dev) { 3568 netdev_put(dev, dev_tracker); 3569 in6_dev_put(idev); 3570 } 3571 dev = net->loopback_dev; 3572 netdev_hold(dev, dev_tracker, gfp_flags); 3573 idev = in6_dev_get(dev); 3574 if (!idev) { 3575 err = -ENODEV; 3576 goto out; 3577 } 3578 } 3579 goto pcpu_alloc; 3580 } 3581 3582 if (cfg->fc_flags & RTF_GATEWAY) { 3583 err = ip6_validate_gw(net, cfg, &dev, dev_tracker, 3584 &idev, extack); 3585 if (err) 3586 goto out; 3587 3588 fib6_nh->fib_nh_gw6 = cfg->fc_gateway; 3589 fib6_nh->fib_nh_gw_family = AF_INET6; 3590 } 3591 3592 err = -ENODEV; 3593 if (!dev) 3594 goto out; 3595 3596 if (idev->cnf.disable_ipv6) { 3597 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device"); 3598 err = -EACCES; 3599 goto out; 3600 } 3601 3602 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) { 3603 NL_SET_ERR_MSG(extack, "Nexthop device is not up"); 3604 err = -ENETDOWN; 3605 goto out; 3606 } 3607 3608 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) && 3609 !netif_carrier_ok(dev)) 3610 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN; 3611 3612 err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap, 3613 cfg->fc_encap_type, cfg, gfp_flags, extack); 3614 if (err) 3615 goto out; 3616 3617 pcpu_alloc: 3618 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags); 3619 if (!fib6_nh->rt6i_pcpu) { 3620 err = -ENOMEM; 3621 goto out; 3622 } 3623 3624 fib6_nh->fib_nh_dev = dev; 3625 fib6_nh->fib_nh_oif = dev->ifindex; 3626 err = 0; 3627 out: 3628 if (idev) 3629 in6_dev_put(idev); 3630 3631 if (err) { 3632 lwtstate_put(fib6_nh->fib_nh_lws); 3633 fib6_nh->fib_nh_lws = NULL; 3634 netdev_put(dev, dev_tracker); 3635 } 3636 3637 return err; 3638 } 3639 3640 void fib6_nh_release(struct fib6_nh *fib6_nh) 3641 { 3642 struct rt6_exception_bucket *bucket; 3643 3644 rcu_read_lock(); 3645 3646 fib6_nh_flush_exceptions(fib6_nh, NULL); 3647 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL); 3648 if (bucket) { 3649 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL); 3650 kfree(bucket); 3651 } 3652 3653 rcu_read_unlock(); 3654 3655 fib6_nh_release_dsts(fib6_nh); 3656 free_percpu(fib6_nh->rt6i_pcpu); 3657 3658 fib_nh_common_release(&fib6_nh->nh_common); 3659 } 3660 3661 void fib6_nh_release_dsts(struct fib6_nh *fib6_nh) 3662 { 3663 int cpu; 3664 3665 if (!fib6_nh->rt6i_pcpu) 3666 return; 3667 3668 for_each_possible_cpu(cpu) { 3669 struct rt6_info *pcpu_rt, **ppcpu_rt; 3670 3671 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu); 3672 pcpu_rt = xchg(ppcpu_rt, NULL); 3673 if (pcpu_rt) { 3674 dst_dev_put(&pcpu_rt->dst); 3675 dst_release(&pcpu_rt->dst); 3676 } 3677 } 3678 } 3679 3680 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, 3681 gfp_t gfp_flags, 3682 struct netlink_ext_ack *extack) 3683 { 3684 struct net *net = cfg->fc_nlinfo.nl_net; 3685 struct fib6_info *rt = NULL; 3686 struct nexthop *nh = NULL; 3687 struct fib6_table *table; 3688 struct fib6_nh *fib6_nh; 3689 int err = -EINVAL; 3690 int addr_type; 3691 3692 /* RTF_PCPU is an internal flag; can not be set by userspace */ 3693 if (cfg->fc_flags & RTF_PCPU) { 3694 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); 3695 goto out; 3696 } 3697 3698 /* RTF_CACHE is an internal flag; can not be set by userspace */ 3699 if (cfg->fc_flags & RTF_CACHE) { 3700 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE"); 3701 goto out; 3702 } 3703 3704 if (cfg->fc_type > RTN_MAX) { 3705 NL_SET_ERR_MSG(extack, "Invalid route type"); 3706 goto out; 3707 } 3708 3709 if (cfg->fc_dst_len > 128) { 3710 NL_SET_ERR_MSG(extack, "Invalid prefix length"); 3711 goto out; 3712 } 3713 if (cfg->fc_src_len > 128) { 3714 NL_SET_ERR_MSG(extack, "Invalid source address length"); 3715 goto out; 3716 } 3717 #ifndef CONFIG_IPV6_SUBTREES 3718 if (cfg->fc_src_len) { 3719 NL_SET_ERR_MSG(extack, 3720 "Specifying source address requires IPV6_SUBTREES to be enabled"); 3721 goto out; 3722 } 3723 #endif 3724 if (cfg->fc_nh_id) { 3725 nh = nexthop_find_by_id(net, cfg->fc_nh_id); 3726 if (!nh) { 3727 NL_SET_ERR_MSG(extack, "Nexthop id does not exist"); 3728 goto out; 3729 } 3730 err = fib6_check_nexthop(nh, cfg, extack); 3731 if (err) 3732 goto out; 3733 } 3734 3735 err = -ENOBUFS; 3736 if (cfg->fc_nlinfo.nlh && 3737 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { 3738 table = fib6_get_table(net, cfg->fc_table); 3739 if (!table) { 3740 pr_warn("NLM_F_CREATE should be specified when creating new route\n"); 3741 table = fib6_new_table(net, cfg->fc_table); 3742 } 3743 } else { 3744 table = fib6_new_table(net, cfg->fc_table); 3745 } 3746 3747 if (!table) 3748 goto out; 3749 3750 err = -ENOMEM; 3751 rt = fib6_info_alloc(gfp_flags, !nh); 3752 if (!rt) 3753 goto out; 3754 3755 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len, 3756 extack); 3757 if (IS_ERR(rt->fib6_metrics)) { 3758 err = PTR_ERR(rt->fib6_metrics); 3759 /* Do not leave garbage there. */ 3760 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics; 3761 goto out_free; 3762 } 3763 3764 if (cfg->fc_flags & RTF_ADDRCONF) 3765 rt->dst_nocount = true; 3766 3767 if (cfg->fc_flags & RTF_EXPIRES) 3768 fib6_set_expires(rt, jiffies + 3769 clock_t_to_jiffies(cfg->fc_expires)); 3770 else 3771 fib6_clean_expires(rt); 3772 3773 if (cfg->fc_protocol == RTPROT_UNSPEC) 3774 cfg->fc_protocol = RTPROT_BOOT; 3775 rt->fib6_protocol = cfg->fc_protocol; 3776 3777 rt->fib6_table = table; 3778 rt->fib6_metric = cfg->fc_metric; 3779 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST; 3780 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY; 3781 3782 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); 3783 rt->fib6_dst.plen = cfg->fc_dst_len; 3784 3785 #ifdef CONFIG_IPV6_SUBTREES 3786 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len); 3787 rt->fib6_src.plen = cfg->fc_src_len; 3788 #endif 3789 if (nh) { 3790 if (rt->fib6_src.plen) { 3791 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); 3792 goto out_free; 3793 } 3794 if (!nexthop_get(nh)) { 3795 NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); 3796 goto out_free; 3797 } 3798 rt->nh = nh; 3799 fib6_nh = nexthop_fib6_nh(rt->nh); 3800 } else { 3801 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack); 3802 if (err) 3803 goto out; 3804 3805 fib6_nh = rt->fib6_nh; 3806 3807 /* We cannot add true routes via loopback here, they would 3808 * result in kernel looping; promote them to reject routes 3809 */ 3810 addr_type = ipv6_addr_type(&cfg->fc_dst); 3811 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev, 3812 addr_type)) 3813 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP; 3814 } 3815 3816 if (!ipv6_addr_any(&cfg->fc_prefsrc)) { 3817 struct net_device *dev = fib6_nh->fib_nh_dev; 3818 3819 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) { 3820 NL_SET_ERR_MSG(extack, "Invalid source address"); 3821 err = -EINVAL; 3822 goto out; 3823 } 3824 rt->fib6_prefsrc.addr = cfg->fc_prefsrc; 3825 rt->fib6_prefsrc.plen = 128; 3826 } else 3827 rt->fib6_prefsrc.plen = 0; 3828 3829 return rt; 3830 out: 3831 fib6_info_release(rt); 3832 return ERR_PTR(err); 3833 out_free: 3834 ip_fib_metrics_put(rt->fib6_metrics); 3835 kfree(rt); 3836 return ERR_PTR(err); 3837 } 3838 3839 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, 3840 struct netlink_ext_ack *extack) 3841 { 3842 struct fib6_info *rt; 3843 int err; 3844 3845 rt = ip6_route_info_create(cfg, gfp_flags, extack); 3846 if (IS_ERR(rt)) 3847 return PTR_ERR(rt); 3848 3849 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack); 3850 fib6_info_release(rt); 3851 3852 return err; 3853 } 3854 3855 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info) 3856 { 3857 struct net *net = info->nl_net; 3858 struct fib6_table *table; 3859 int err; 3860 3861 if (rt == net->ipv6.fib6_null_entry) { 3862 err = -ENOENT; 3863 goto out; 3864 } 3865 3866 table = rt->fib6_table; 3867 spin_lock_bh(&table->tb6_lock); 3868 err = fib6_del(rt, info); 3869 spin_unlock_bh(&table->tb6_lock); 3870 3871 out: 3872 fib6_info_release(rt); 3873 return err; 3874 } 3875 3876 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify) 3877 { 3878 struct nl_info info = { 3879 .nl_net = net, 3880 .skip_notify = skip_notify 3881 }; 3882 3883 return __ip6_del_rt(rt, &info); 3884 } 3885 3886 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg) 3887 { 3888 struct nl_info *info = &cfg->fc_nlinfo; 3889 struct net *net = info->nl_net; 3890 struct sk_buff *skb = NULL; 3891 struct fib6_table *table; 3892 int err = -ENOENT; 3893 3894 if (rt == net->ipv6.fib6_null_entry) 3895 goto out_put; 3896 table = rt->fib6_table; 3897 spin_lock_bh(&table->tb6_lock); 3898 3899 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) { 3900 struct fib6_info *sibling, *next_sibling; 3901 struct fib6_node *fn; 3902 3903 /* prefer to send a single notification with all hops */ 3904 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 3905 if (skb) { 3906 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 3907 3908 if (rt6_fill_node(net, skb, rt, NULL, 3909 NULL, NULL, 0, RTM_DELROUTE, 3910 info->portid, seq, 0) < 0) { 3911 kfree_skb(skb); 3912 skb = NULL; 3913 } else 3914 info->skip_notify = 1; 3915 } 3916 3917 /* 'rt' points to the first sibling route. If it is not the 3918 * leaf, then we do not need to send a notification. Otherwise, 3919 * we need to check if the last sibling has a next route or not 3920 * and emit a replace or delete notification, respectively. 3921 */ 3922 info->skip_notify_kernel = 1; 3923 fn = rcu_dereference_protected(rt->fib6_node, 3924 lockdep_is_held(&table->tb6_lock)); 3925 if (rcu_access_pointer(fn->leaf) == rt) { 3926 struct fib6_info *last_sibling, *replace_rt; 3927 3928 last_sibling = list_last_entry(&rt->fib6_siblings, 3929 struct fib6_info, 3930 fib6_siblings); 3931 replace_rt = rcu_dereference_protected( 3932 last_sibling->fib6_next, 3933 lockdep_is_held(&table->tb6_lock)); 3934 if (replace_rt) 3935 call_fib6_entry_notifiers_replace(net, 3936 replace_rt); 3937 else 3938 call_fib6_multipath_entry_notifiers(net, 3939 FIB_EVENT_ENTRY_DEL, 3940 rt, rt->fib6_nsiblings, 3941 NULL); 3942 } 3943 list_for_each_entry_safe(sibling, next_sibling, 3944 &rt->fib6_siblings, 3945 fib6_siblings) { 3946 err = fib6_del(sibling, info); 3947 if (err) 3948 goto out_unlock; 3949 } 3950 } 3951 3952 err = fib6_del(rt, info); 3953 out_unlock: 3954 spin_unlock_bh(&table->tb6_lock); 3955 out_put: 3956 fib6_info_release(rt); 3957 3958 if (skb) { 3959 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 3960 info->nlh, gfp_any()); 3961 } 3962 return err; 3963 } 3964 3965 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg) 3966 { 3967 int rc = -ESRCH; 3968 3969 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex) 3970 goto out; 3971 3972 if (cfg->fc_flags & RTF_GATEWAY && 3973 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) 3974 goto out; 3975 3976 rc = rt6_remove_exception_rt(rt); 3977 out: 3978 return rc; 3979 } 3980 3981 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt, 3982 struct fib6_nh *nh) 3983 { 3984 struct fib6_result res = { 3985 .f6i = rt, 3986 .nh = nh, 3987 }; 3988 struct rt6_info *rt_cache; 3989 3990 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src); 3991 if (rt_cache) 3992 return __ip6_del_cached_rt(rt_cache, cfg); 3993 3994 return 0; 3995 } 3996 3997 struct fib6_nh_del_cached_rt_arg { 3998 struct fib6_config *cfg; 3999 struct fib6_info *f6i; 4000 }; 4001 4002 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg) 4003 { 4004 struct fib6_nh_del_cached_rt_arg *arg = _arg; 4005 int rc; 4006 4007 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh); 4008 return rc != -ESRCH ? rc : 0; 4009 } 4010 4011 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i) 4012 { 4013 struct fib6_nh_del_cached_rt_arg arg = { 4014 .cfg = cfg, 4015 .f6i = f6i 4016 }; 4017 4018 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg); 4019 } 4020 4021 static int ip6_route_del(struct fib6_config *cfg, 4022 struct netlink_ext_ack *extack) 4023 { 4024 struct fib6_table *table; 4025 struct fib6_info *rt; 4026 struct fib6_node *fn; 4027 int err = -ESRCH; 4028 4029 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); 4030 if (!table) { 4031 NL_SET_ERR_MSG(extack, "FIB table does not exist"); 4032 return err; 4033 } 4034 4035 rcu_read_lock(); 4036 4037 fn = fib6_locate(&table->tb6_root, 4038 &cfg->fc_dst, cfg->fc_dst_len, 4039 &cfg->fc_src, cfg->fc_src_len, 4040 !(cfg->fc_flags & RTF_CACHE)); 4041 4042 if (fn) { 4043 for_each_fib6_node_rt_rcu(fn) { 4044 struct fib6_nh *nh; 4045 4046 if (rt->nh && cfg->fc_nh_id && 4047 rt->nh->id != cfg->fc_nh_id) 4048 continue; 4049 4050 if (cfg->fc_flags & RTF_CACHE) { 4051 int rc = 0; 4052 4053 if (rt->nh) { 4054 rc = ip6_del_cached_rt_nh(cfg, rt); 4055 } else if (cfg->fc_nh_id) { 4056 continue; 4057 } else { 4058 nh = rt->fib6_nh; 4059 rc = ip6_del_cached_rt(cfg, rt, nh); 4060 } 4061 if (rc != -ESRCH) { 4062 rcu_read_unlock(); 4063 return rc; 4064 } 4065 continue; 4066 } 4067 4068 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric) 4069 continue; 4070 if (cfg->fc_protocol && 4071 cfg->fc_protocol != rt->fib6_protocol) 4072 continue; 4073 4074 if (rt->nh) { 4075 if (!fib6_info_hold_safe(rt)) 4076 continue; 4077 rcu_read_unlock(); 4078 4079 return __ip6_del_rt(rt, &cfg->fc_nlinfo); 4080 } 4081 if (cfg->fc_nh_id) 4082 continue; 4083 4084 nh = rt->fib6_nh; 4085 if (cfg->fc_ifindex && 4086 (!nh->fib_nh_dev || 4087 nh->fib_nh_dev->ifindex != cfg->fc_ifindex)) 4088 continue; 4089 if (cfg->fc_flags & RTF_GATEWAY && 4090 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6)) 4091 continue; 4092 if (!fib6_info_hold_safe(rt)) 4093 continue; 4094 rcu_read_unlock(); 4095 4096 /* if gateway was specified only delete the one hop */ 4097 if (cfg->fc_flags & RTF_GATEWAY) 4098 return __ip6_del_rt(rt, &cfg->fc_nlinfo); 4099 4100 return __ip6_del_rt_siblings(rt, cfg); 4101 } 4102 } 4103 rcu_read_unlock(); 4104 4105 return err; 4106 } 4107 4108 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) 4109 { 4110 struct netevent_redirect netevent; 4111 struct rt6_info *rt, *nrt = NULL; 4112 struct fib6_result res = {}; 4113 struct ndisc_options ndopts; 4114 struct inet6_dev *in6_dev; 4115 struct neighbour *neigh; 4116 struct rd_msg *msg; 4117 int optlen, on_link; 4118 u8 *lladdr; 4119 4120 optlen = skb_tail_pointer(skb) - skb_transport_header(skb); 4121 optlen -= sizeof(*msg); 4122 4123 if (optlen < 0) { 4124 net_dbg_ratelimited("rt6_do_redirect: packet too short\n"); 4125 return; 4126 } 4127 4128 msg = (struct rd_msg *)icmp6_hdr(skb); 4129 4130 if (ipv6_addr_is_multicast(&msg->dest)) { 4131 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n"); 4132 return; 4133 } 4134 4135 on_link = 0; 4136 if (ipv6_addr_equal(&msg->dest, &msg->target)) { 4137 on_link = 1; 4138 } else if (ipv6_addr_type(&msg->target) != 4139 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { 4140 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n"); 4141 return; 4142 } 4143 4144 in6_dev = __in6_dev_get(skb->dev); 4145 if (!in6_dev) 4146 return; 4147 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) 4148 return; 4149 4150 /* RFC2461 8.1: 4151 * The IP source address of the Redirect MUST be the same as the current 4152 * first-hop router for the specified ICMP Destination Address. 4153 */ 4154 4155 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) { 4156 net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); 4157 return; 4158 } 4159 4160 lladdr = NULL; 4161 if (ndopts.nd_opts_tgt_lladdr) { 4162 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, 4163 skb->dev); 4164 if (!lladdr) { 4165 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n"); 4166 return; 4167 } 4168 } 4169 4170 rt = (struct rt6_info *) dst; 4171 if (rt->rt6i_flags & RTF_REJECT) { 4172 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); 4173 return; 4174 } 4175 4176 /* Redirect received -> path was valid. 4177 * Look, redirects are sent only in response to data packets, 4178 * so that this nexthop apparently is reachable. --ANK 4179 */ 4180 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr); 4181 4182 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1); 4183 if (!neigh) 4184 return; 4185 4186 /* 4187 * We have finally decided to accept it. 4188 */ 4189 4190 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, 4191 NEIGH_UPDATE_F_WEAK_OVERRIDE| 4192 NEIGH_UPDATE_F_OVERRIDE| 4193 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| 4194 NEIGH_UPDATE_F_ISROUTER)), 4195 NDISC_REDIRECT, &ndopts); 4196 4197 rcu_read_lock(); 4198 res.f6i = rcu_dereference(rt->from); 4199 if (!res.f6i) 4200 goto out; 4201 4202 if (res.f6i->nh) { 4203 struct fib6_nh_match_arg arg = { 4204 .dev = dst->dev, 4205 .gw = &rt->rt6i_gateway, 4206 }; 4207 4208 nexthop_for_each_fib6_nh(res.f6i->nh, 4209 fib6_nh_find_match, &arg); 4210 4211 /* fib6_info uses a nexthop that does not have fib6_nh 4212 * using the dst->dev. Should be impossible 4213 */ 4214 if (!arg.match) 4215 goto out; 4216 res.nh = arg.match; 4217 } else { 4218 res.nh = res.f6i->fib6_nh; 4219 } 4220 4221 res.fib6_flags = res.f6i->fib6_flags; 4222 res.fib6_type = res.f6i->fib6_type; 4223 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL); 4224 if (!nrt) 4225 goto out; 4226 4227 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE; 4228 if (on_link) 4229 nrt->rt6i_flags &= ~RTF_GATEWAY; 4230 4231 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 4232 4233 /* rt6_insert_exception() will take care of duplicated exceptions */ 4234 if (rt6_insert_exception(nrt, &res)) { 4235 dst_release_immediate(&nrt->dst); 4236 goto out; 4237 } 4238 4239 netevent.old = &rt->dst; 4240 netevent.new = &nrt->dst; 4241 netevent.daddr = &msg->dest; 4242 netevent.neigh = neigh; 4243 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 4244 4245 out: 4246 rcu_read_unlock(); 4247 neigh_release(neigh); 4248 } 4249 4250 #ifdef CONFIG_IPV6_ROUTE_INFO 4251 static struct fib6_info *rt6_get_route_info(struct net *net, 4252 const struct in6_addr *prefix, int prefixlen, 4253 const struct in6_addr *gwaddr, 4254 struct net_device *dev) 4255 { 4256 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; 4257 int ifindex = dev->ifindex; 4258 struct fib6_node *fn; 4259 struct fib6_info *rt = NULL; 4260 struct fib6_table *table; 4261 4262 table = fib6_get_table(net, tb_id); 4263 if (!table) 4264 return NULL; 4265 4266 rcu_read_lock(); 4267 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true); 4268 if (!fn) 4269 goto out; 4270 4271 for_each_fib6_node_rt_rcu(fn) { 4272 /* these routes do not use nexthops */ 4273 if (rt->nh) 4274 continue; 4275 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex) 4276 continue; 4277 if (!(rt->fib6_flags & RTF_ROUTEINFO) || 4278 !rt->fib6_nh->fib_nh_gw_family) 4279 continue; 4280 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr)) 4281 continue; 4282 if (!fib6_info_hold_safe(rt)) 4283 continue; 4284 break; 4285 } 4286 out: 4287 rcu_read_unlock(); 4288 return rt; 4289 } 4290 4291 static struct fib6_info *rt6_add_route_info(struct net *net, 4292 const struct in6_addr *prefix, int prefixlen, 4293 const struct in6_addr *gwaddr, 4294 struct net_device *dev, 4295 unsigned int pref) 4296 { 4297 struct fib6_config cfg = { 4298 .fc_metric = IP6_RT_PRIO_USER, 4299 .fc_ifindex = dev->ifindex, 4300 .fc_dst_len = prefixlen, 4301 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 4302 RTF_UP | RTF_PREF(pref), 4303 .fc_protocol = RTPROT_RA, 4304 .fc_type = RTN_UNICAST, 4305 .fc_nlinfo.portid = 0, 4306 .fc_nlinfo.nlh = NULL, 4307 .fc_nlinfo.nl_net = net, 4308 }; 4309 4310 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; 4311 cfg.fc_dst = *prefix; 4312 cfg.fc_gateway = *gwaddr; 4313 4314 /* We should treat it as a default route if prefix length is 0. */ 4315 if (!prefixlen) 4316 cfg.fc_flags |= RTF_DEFAULT; 4317 4318 ip6_route_add(&cfg, GFP_ATOMIC, NULL); 4319 4320 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); 4321 } 4322 #endif 4323 4324 struct fib6_info *rt6_get_dflt_router(struct net *net, 4325 const struct in6_addr *addr, 4326 struct net_device *dev) 4327 { 4328 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; 4329 struct fib6_info *rt; 4330 struct fib6_table *table; 4331 4332 table = fib6_get_table(net, tb_id); 4333 if (!table) 4334 return NULL; 4335 4336 rcu_read_lock(); 4337 for_each_fib6_node_rt_rcu(&table->tb6_root) { 4338 struct fib6_nh *nh; 4339 4340 /* RA routes do not use nexthops */ 4341 if (rt->nh) 4342 continue; 4343 4344 nh = rt->fib6_nh; 4345 if (dev == nh->fib_nh_dev && 4346 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && 4347 ipv6_addr_equal(&nh->fib_nh_gw6, addr)) 4348 break; 4349 } 4350 if (rt && !fib6_info_hold_safe(rt)) 4351 rt = NULL; 4352 rcu_read_unlock(); 4353 return rt; 4354 } 4355 4356 struct fib6_info *rt6_add_dflt_router(struct net *net, 4357 const struct in6_addr *gwaddr, 4358 struct net_device *dev, 4359 unsigned int pref, 4360 u32 defrtr_usr_metric) 4361 { 4362 struct fib6_config cfg = { 4363 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT, 4364 .fc_metric = defrtr_usr_metric, 4365 .fc_ifindex = dev->ifindex, 4366 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 4367 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 4368 .fc_protocol = RTPROT_RA, 4369 .fc_type = RTN_UNICAST, 4370 .fc_nlinfo.portid = 0, 4371 .fc_nlinfo.nlh = NULL, 4372 .fc_nlinfo.nl_net = net, 4373 }; 4374 4375 cfg.fc_gateway = *gwaddr; 4376 4377 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) { 4378 struct fib6_table *table; 4379 4380 table = fib6_get_table(dev_net(dev), cfg.fc_table); 4381 if (table) 4382 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; 4383 } 4384 4385 return rt6_get_dflt_router(net, gwaddr, dev); 4386 } 4387 4388 static void __rt6_purge_dflt_routers(struct net *net, 4389 struct fib6_table *table) 4390 { 4391 struct fib6_info *rt; 4392 4393 restart: 4394 rcu_read_lock(); 4395 for_each_fib6_node_rt_rcu(&table->tb6_root) { 4396 struct net_device *dev = fib6_info_nh_dev(rt); 4397 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; 4398 4399 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && 4400 (!idev || idev->cnf.accept_ra != 2) && 4401 fib6_info_hold_safe(rt)) { 4402 rcu_read_unlock(); 4403 ip6_del_rt(net, rt, false); 4404 goto restart; 4405 } 4406 } 4407 rcu_read_unlock(); 4408 4409 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; 4410 } 4411 4412 void rt6_purge_dflt_routers(struct net *net) 4413 { 4414 struct fib6_table *table; 4415 struct hlist_head *head; 4416 unsigned int h; 4417 4418 rcu_read_lock(); 4419 4420 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 4421 head = &net->ipv6.fib_table_hash[h]; 4422 hlist_for_each_entry_rcu(table, head, tb6_hlist) { 4423 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) 4424 __rt6_purge_dflt_routers(net, table); 4425 } 4426 } 4427 4428 rcu_read_unlock(); 4429 } 4430 4431 static void rtmsg_to_fib6_config(struct net *net, 4432 struct in6_rtmsg *rtmsg, 4433 struct fib6_config *cfg) 4434 { 4435 *cfg = (struct fib6_config){ 4436 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? 4437 : RT6_TABLE_MAIN, 4438 .fc_ifindex = rtmsg->rtmsg_ifindex, 4439 .fc_metric = rtmsg->rtmsg_metric, 4440 .fc_expires = rtmsg->rtmsg_info, 4441 .fc_dst_len = rtmsg->rtmsg_dst_len, 4442 .fc_src_len = rtmsg->rtmsg_src_len, 4443 .fc_flags = rtmsg->rtmsg_flags, 4444 .fc_type = rtmsg->rtmsg_type, 4445 4446 .fc_nlinfo.nl_net = net, 4447 4448 .fc_dst = rtmsg->rtmsg_dst, 4449 .fc_src = rtmsg->rtmsg_src, 4450 .fc_gateway = rtmsg->rtmsg_gateway, 4451 }; 4452 } 4453 4454 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg) 4455 { 4456 struct fib6_config cfg; 4457 int err; 4458 4459 if (cmd != SIOCADDRT && cmd != SIOCDELRT) 4460 return -EINVAL; 4461 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 4462 return -EPERM; 4463 4464 rtmsg_to_fib6_config(net, rtmsg, &cfg); 4465 4466 rtnl_lock(); 4467 switch (cmd) { 4468 case SIOCADDRT: 4469 /* Only do the default setting of fc_metric in route adding */ 4470 if (cfg.fc_metric == 0) 4471 cfg.fc_metric = IP6_RT_PRIO_USER; 4472 err = ip6_route_add(&cfg, GFP_KERNEL, NULL); 4473 break; 4474 case SIOCDELRT: 4475 err = ip6_route_del(&cfg, NULL); 4476 break; 4477 } 4478 rtnl_unlock(); 4479 return err; 4480 } 4481 4482 /* 4483 * Drop the packet on the floor 4484 */ 4485 4486 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) 4487 { 4488 struct dst_entry *dst = skb_dst(skb); 4489 struct net *net = dev_net(dst->dev); 4490 struct inet6_dev *idev; 4491 SKB_DR(reason); 4492 int type; 4493 4494 if (netif_is_l3_master(skb->dev) || 4495 dst->dev == net->loopback_dev) 4496 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif)); 4497 else 4498 idev = ip6_dst_idev(dst); 4499 4500 switch (ipstats_mib_noroutes) { 4501 case IPSTATS_MIB_INNOROUTES: 4502 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); 4503 if (type == IPV6_ADDR_ANY) { 4504 SKB_DR_SET(reason, IP_INADDRERRORS); 4505 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); 4506 break; 4507 } 4508 SKB_DR_SET(reason, IP_INNOROUTES); 4509 fallthrough; 4510 case IPSTATS_MIB_OUTNOROUTES: 4511 SKB_DR_OR(reason, IP_OUTNOROUTES); 4512 IP6_INC_STATS(net, idev, ipstats_mib_noroutes); 4513 break; 4514 } 4515 4516 /* Start over by dropping the dst for l3mdev case */ 4517 if (netif_is_l3_master(skb->dev)) 4518 skb_dst_drop(skb); 4519 4520 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0); 4521 kfree_skb_reason(skb, reason); 4522 return 0; 4523 } 4524 4525 static int ip6_pkt_discard(struct sk_buff *skb) 4526 { 4527 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); 4528 } 4529 4530 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) 4531 { 4532 skb->dev = skb_dst(skb)->dev; 4533 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); 4534 } 4535 4536 static int ip6_pkt_prohibit(struct sk_buff *skb) 4537 { 4538 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); 4539 } 4540 4541 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb) 4542 { 4543 skb->dev = skb_dst(skb)->dev; 4544 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); 4545 } 4546 4547 /* 4548 * Allocate a dst for local (unicast / anycast) address. 4549 */ 4550 4551 struct fib6_info *addrconf_f6i_alloc(struct net *net, 4552 struct inet6_dev *idev, 4553 const struct in6_addr *addr, 4554 bool anycast, gfp_t gfp_flags, 4555 struct netlink_ext_ack *extack) 4556 { 4557 struct fib6_config cfg = { 4558 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL, 4559 .fc_ifindex = idev->dev->ifindex, 4560 .fc_flags = RTF_UP | RTF_NONEXTHOP, 4561 .fc_dst = *addr, 4562 .fc_dst_len = 128, 4563 .fc_protocol = RTPROT_KERNEL, 4564 .fc_nlinfo.nl_net = net, 4565 .fc_ignore_dev_down = true, 4566 }; 4567 struct fib6_info *f6i; 4568 4569 if (anycast) { 4570 cfg.fc_type = RTN_ANYCAST; 4571 cfg.fc_flags |= RTF_ANYCAST; 4572 } else { 4573 cfg.fc_type = RTN_LOCAL; 4574 cfg.fc_flags |= RTF_LOCAL; 4575 } 4576 4577 f6i = ip6_route_info_create(&cfg, gfp_flags, extack); 4578 if (!IS_ERR(f6i)) { 4579 f6i->dst_nocount = true; 4580 4581 if (!anycast && 4582 (net->ipv6.devconf_all->disable_policy || 4583 idev->cnf.disable_policy)) 4584 f6i->dst_nopolicy = true; 4585 } 4586 4587 return f6i; 4588 } 4589 4590 /* remove deleted ip from prefsrc entries */ 4591 struct arg_dev_net_ip { 4592 struct net *net; 4593 struct in6_addr *addr; 4594 }; 4595 4596 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg) 4597 { 4598 struct net *net = ((struct arg_dev_net_ip *)arg)->net; 4599 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; 4600 4601 if (!rt->nh && 4602 rt != net->ipv6.fib6_null_entry && 4603 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr) && 4604 !ipv6_chk_addr(net, addr, rt->fib6_nh->fib_nh_dev, 0)) { 4605 spin_lock_bh(&rt6_exception_lock); 4606 /* remove prefsrc entry */ 4607 rt->fib6_prefsrc.plen = 0; 4608 spin_unlock_bh(&rt6_exception_lock); 4609 } 4610 return 0; 4611 } 4612 4613 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) 4614 { 4615 struct net *net = dev_net(ifp->idev->dev); 4616 struct arg_dev_net_ip adni = { 4617 .net = net, 4618 .addr = &ifp->addr, 4619 }; 4620 fib6_clean_all(net, fib6_remove_prefsrc, &adni); 4621 } 4622 4623 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT) 4624 4625 /* Remove routers and update dst entries when gateway turn into host. */ 4626 static int fib6_clean_tohost(struct fib6_info *rt, void *arg) 4627 { 4628 struct in6_addr *gateway = (struct in6_addr *)arg; 4629 struct fib6_nh *nh; 4630 4631 /* RA routes do not use nexthops */ 4632 if (rt->nh) 4633 return 0; 4634 4635 nh = rt->fib6_nh; 4636 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) && 4637 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6)) 4638 return -1; 4639 4640 /* Further clean up cached routes in exception table. 4641 * This is needed because cached route may have a different 4642 * gateway than its 'parent' in the case of an ip redirect. 4643 */ 4644 fib6_nh_exceptions_clean_tohost(nh, gateway); 4645 4646 return 0; 4647 } 4648 4649 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway) 4650 { 4651 fib6_clean_all(net, fib6_clean_tohost, gateway); 4652 } 4653 4654 struct arg_netdev_event { 4655 const struct net_device *dev; 4656 union { 4657 unsigned char nh_flags; 4658 unsigned long event; 4659 }; 4660 }; 4661 4662 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt) 4663 { 4664 struct fib6_info *iter; 4665 struct fib6_node *fn; 4666 4667 fn = rcu_dereference_protected(rt->fib6_node, 4668 lockdep_is_held(&rt->fib6_table->tb6_lock)); 4669 iter = rcu_dereference_protected(fn->leaf, 4670 lockdep_is_held(&rt->fib6_table->tb6_lock)); 4671 while (iter) { 4672 if (iter->fib6_metric == rt->fib6_metric && 4673 rt6_qualify_for_ecmp(iter)) 4674 return iter; 4675 iter = rcu_dereference_protected(iter->fib6_next, 4676 lockdep_is_held(&rt->fib6_table->tb6_lock)); 4677 } 4678 4679 return NULL; 4680 } 4681 4682 /* only called for fib entries with builtin fib6_nh */ 4683 static bool rt6_is_dead(const struct fib6_info *rt) 4684 { 4685 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD || 4686 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN && 4687 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev))) 4688 return true; 4689 4690 return false; 4691 } 4692 4693 static int rt6_multipath_total_weight(const struct fib6_info *rt) 4694 { 4695 struct fib6_info *iter; 4696 int total = 0; 4697 4698 if (!rt6_is_dead(rt)) 4699 total += rt->fib6_nh->fib_nh_weight; 4700 4701 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) { 4702 if (!rt6_is_dead(iter)) 4703 total += iter->fib6_nh->fib_nh_weight; 4704 } 4705 4706 return total; 4707 } 4708 4709 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total) 4710 { 4711 int upper_bound = -1; 4712 4713 if (!rt6_is_dead(rt)) { 4714 *weight += rt->fib6_nh->fib_nh_weight; 4715 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31, 4716 total) - 1; 4717 } 4718 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound); 4719 } 4720 4721 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total) 4722 { 4723 struct fib6_info *iter; 4724 int weight = 0; 4725 4726 rt6_upper_bound_set(rt, &weight, total); 4727 4728 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4729 rt6_upper_bound_set(iter, &weight, total); 4730 } 4731 4732 void rt6_multipath_rebalance(struct fib6_info *rt) 4733 { 4734 struct fib6_info *first; 4735 int total; 4736 4737 /* In case the entire multipath route was marked for flushing, 4738 * then there is no need to rebalance upon the removal of every 4739 * sibling route. 4740 */ 4741 if (!rt->fib6_nsiblings || rt->should_flush) 4742 return; 4743 4744 /* During lookup routes are evaluated in order, so we need to 4745 * make sure upper bounds are assigned from the first sibling 4746 * onwards. 4747 */ 4748 first = rt6_multipath_first_sibling(rt); 4749 if (WARN_ON_ONCE(!first)) 4750 return; 4751 4752 total = rt6_multipath_total_weight(first); 4753 rt6_multipath_upper_bound_set(first, total); 4754 } 4755 4756 static int fib6_ifup(struct fib6_info *rt, void *p_arg) 4757 { 4758 const struct arg_netdev_event *arg = p_arg; 4759 struct net *net = dev_net(arg->dev); 4760 4761 if (rt != net->ipv6.fib6_null_entry && !rt->nh && 4762 rt->fib6_nh->fib_nh_dev == arg->dev) { 4763 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags; 4764 fib6_update_sernum_upto_root(net, rt); 4765 rt6_multipath_rebalance(rt); 4766 } 4767 4768 return 0; 4769 } 4770 4771 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags) 4772 { 4773 struct arg_netdev_event arg = { 4774 .dev = dev, 4775 { 4776 .nh_flags = nh_flags, 4777 }, 4778 }; 4779 4780 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev)) 4781 arg.nh_flags |= RTNH_F_LINKDOWN; 4782 4783 fib6_clean_all(dev_net(dev), fib6_ifup, &arg); 4784 } 4785 4786 /* only called for fib entries with inline fib6_nh */ 4787 static bool rt6_multipath_uses_dev(const struct fib6_info *rt, 4788 const struct net_device *dev) 4789 { 4790 struct fib6_info *iter; 4791 4792 if (rt->fib6_nh->fib_nh_dev == dev) 4793 return true; 4794 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4795 if (iter->fib6_nh->fib_nh_dev == dev) 4796 return true; 4797 4798 return false; 4799 } 4800 4801 static void rt6_multipath_flush(struct fib6_info *rt) 4802 { 4803 struct fib6_info *iter; 4804 4805 rt->should_flush = 1; 4806 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4807 iter->should_flush = 1; 4808 } 4809 4810 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt, 4811 const struct net_device *down_dev) 4812 { 4813 struct fib6_info *iter; 4814 unsigned int dead = 0; 4815 4816 if (rt->fib6_nh->fib_nh_dev == down_dev || 4817 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD) 4818 dead++; 4819 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4820 if (iter->fib6_nh->fib_nh_dev == down_dev || 4821 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD) 4822 dead++; 4823 4824 return dead; 4825 } 4826 4827 static void rt6_multipath_nh_flags_set(struct fib6_info *rt, 4828 const struct net_device *dev, 4829 unsigned char nh_flags) 4830 { 4831 struct fib6_info *iter; 4832 4833 if (rt->fib6_nh->fib_nh_dev == dev) 4834 rt->fib6_nh->fib_nh_flags |= nh_flags; 4835 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 4836 if (iter->fib6_nh->fib_nh_dev == dev) 4837 iter->fib6_nh->fib_nh_flags |= nh_flags; 4838 } 4839 4840 /* called with write lock held for table with rt */ 4841 static int fib6_ifdown(struct fib6_info *rt, void *p_arg) 4842 { 4843 const struct arg_netdev_event *arg = p_arg; 4844 const struct net_device *dev = arg->dev; 4845 struct net *net = dev_net(dev); 4846 4847 if (rt == net->ipv6.fib6_null_entry || rt->nh) 4848 return 0; 4849 4850 switch (arg->event) { 4851 case NETDEV_UNREGISTER: 4852 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0; 4853 case NETDEV_DOWN: 4854 if (rt->should_flush) 4855 return -1; 4856 if (!rt->fib6_nsiblings) 4857 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0; 4858 if (rt6_multipath_uses_dev(rt, dev)) { 4859 unsigned int count; 4860 4861 count = rt6_multipath_dead_count(rt, dev); 4862 if (rt->fib6_nsiblings + 1 == count) { 4863 rt6_multipath_flush(rt); 4864 return -1; 4865 } 4866 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD | 4867 RTNH_F_LINKDOWN); 4868 fib6_update_sernum(net, rt); 4869 rt6_multipath_rebalance(rt); 4870 } 4871 return -2; 4872 case NETDEV_CHANGE: 4873 if (rt->fib6_nh->fib_nh_dev != dev || 4874 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) 4875 break; 4876 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN; 4877 rt6_multipath_rebalance(rt); 4878 break; 4879 } 4880 4881 return 0; 4882 } 4883 4884 void rt6_sync_down_dev(struct net_device *dev, unsigned long event) 4885 { 4886 struct arg_netdev_event arg = { 4887 .dev = dev, 4888 { 4889 .event = event, 4890 }, 4891 }; 4892 struct net *net = dev_net(dev); 4893 4894 if (net->ipv6.sysctl.skip_notify_on_dev_down) 4895 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg); 4896 else 4897 fib6_clean_all(net, fib6_ifdown, &arg); 4898 } 4899 4900 void rt6_disable_ip(struct net_device *dev, unsigned long event) 4901 { 4902 rt6_sync_down_dev(dev, event); 4903 rt6_uncached_list_flush_dev(dev); 4904 neigh_ifdown(&nd_tbl, dev); 4905 } 4906 4907 struct rt6_mtu_change_arg { 4908 struct net_device *dev; 4909 unsigned int mtu; 4910 struct fib6_info *f6i; 4911 }; 4912 4913 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg) 4914 { 4915 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg; 4916 struct fib6_info *f6i = arg->f6i; 4917 4918 /* For administrative MTU increase, there is no way to discover 4919 * IPv6 PMTU increase, so PMTU increase should be updated here. 4920 * Since RFC 1981 doesn't include administrative MTU increase 4921 * update PMTU increase is a MUST. (i.e. jumbo frame) 4922 */ 4923 if (nh->fib_nh_dev == arg->dev) { 4924 struct inet6_dev *idev = __in6_dev_get(arg->dev); 4925 u32 mtu = f6i->fib6_pmtu; 4926 4927 if (mtu >= arg->mtu || 4928 (mtu < arg->mtu && mtu == idev->cnf.mtu6)) 4929 fib6_metric_set(f6i, RTAX_MTU, arg->mtu); 4930 4931 spin_lock_bh(&rt6_exception_lock); 4932 rt6_exceptions_update_pmtu(idev, nh, arg->mtu); 4933 spin_unlock_bh(&rt6_exception_lock); 4934 } 4935 4936 return 0; 4937 } 4938 4939 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg) 4940 { 4941 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; 4942 struct inet6_dev *idev; 4943 4944 /* In IPv6 pmtu discovery is not optional, 4945 so that RTAX_MTU lock cannot disable it. 4946 We still use this lock to block changes 4947 caused by addrconf/ndisc. 4948 */ 4949 4950 idev = __in6_dev_get(arg->dev); 4951 if (!idev) 4952 return 0; 4953 4954 if (fib6_metric_locked(f6i, RTAX_MTU)) 4955 return 0; 4956 4957 arg->f6i = f6i; 4958 if (f6i->nh) { 4959 /* fib6_nh_mtu_change only returns 0, so this is safe */ 4960 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change, 4961 arg); 4962 } 4963 4964 return fib6_nh_mtu_change(f6i->fib6_nh, arg); 4965 } 4966 4967 void rt6_mtu_change(struct net_device *dev, unsigned int mtu) 4968 { 4969 struct rt6_mtu_change_arg arg = { 4970 .dev = dev, 4971 .mtu = mtu, 4972 }; 4973 4974 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg); 4975 } 4976 4977 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 4978 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 }, 4979 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, 4980 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) }, 4981 [RTA_OIF] = { .type = NLA_U32 }, 4982 [RTA_IIF] = { .type = NLA_U32 }, 4983 [RTA_PRIORITY] = { .type = NLA_U32 }, 4984 [RTA_METRICS] = { .type = NLA_NESTED }, 4985 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 4986 [RTA_PREF] = { .type = NLA_U8 }, 4987 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 4988 [RTA_ENCAP] = { .type = NLA_NESTED }, 4989 [RTA_EXPIRES] = { .type = NLA_U32 }, 4990 [RTA_UID] = { .type = NLA_U32 }, 4991 [RTA_MARK] = { .type = NLA_U32 }, 4992 [RTA_TABLE] = { .type = NLA_U32 }, 4993 [RTA_IP_PROTO] = { .type = NLA_U8 }, 4994 [RTA_SPORT] = { .type = NLA_U16 }, 4995 [RTA_DPORT] = { .type = NLA_U16 }, 4996 [RTA_NH_ID] = { .type = NLA_U32 }, 4997 }; 4998 4999 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 5000 struct fib6_config *cfg, 5001 struct netlink_ext_ack *extack) 5002 { 5003 struct rtmsg *rtm; 5004 struct nlattr *tb[RTA_MAX+1]; 5005 unsigned int pref; 5006 int err; 5007 5008 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, 5009 rtm_ipv6_policy, extack); 5010 if (err < 0) 5011 goto errout; 5012 5013 err = -EINVAL; 5014 rtm = nlmsg_data(nlh); 5015 5016 if (rtm->rtm_tos) { 5017 NL_SET_ERR_MSG(extack, 5018 "Invalid dsfield (tos): option not available for IPv6"); 5019 goto errout; 5020 } 5021 5022 *cfg = (struct fib6_config){ 5023 .fc_table = rtm->rtm_table, 5024 .fc_dst_len = rtm->rtm_dst_len, 5025 .fc_src_len = rtm->rtm_src_len, 5026 .fc_flags = RTF_UP, 5027 .fc_protocol = rtm->rtm_protocol, 5028 .fc_type = rtm->rtm_type, 5029 5030 .fc_nlinfo.portid = NETLINK_CB(skb).portid, 5031 .fc_nlinfo.nlh = nlh, 5032 .fc_nlinfo.nl_net = sock_net(skb->sk), 5033 }; 5034 5035 if (rtm->rtm_type == RTN_UNREACHABLE || 5036 rtm->rtm_type == RTN_BLACKHOLE || 5037 rtm->rtm_type == RTN_PROHIBIT || 5038 rtm->rtm_type == RTN_THROW) 5039 cfg->fc_flags |= RTF_REJECT; 5040 5041 if (rtm->rtm_type == RTN_LOCAL) 5042 cfg->fc_flags |= RTF_LOCAL; 5043 5044 if (rtm->rtm_flags & RTM_F_CLONED) 5045 cfg->fc_flags |= RTF_CACHE; 5046 5047 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK); 5048 5049 if (tb[RTA_NH_ID]) { 5050 if (tb[RTA_GATEWAY] || tb[RTA_OIF] || 5051 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) { 5052 NL_SET_ERR_MSG(extack, 5053 "Nexthop specification and nexthop id are mutually exclusive"); 5054 goto errout; 5055 } 5056 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]); 5057 } 5058 5059 if (tb[RTA_GATEWAY]) { 5060 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); 5061 cfg->fc_flags |= RTF_GATEWAY; 5062 } 5063 if (tb[RTA_VIA]) { 5064 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute"); 5065 goto errout; 5066 } 5067 5068 if (tb[RTA_DST]) { 5069 int plen = (rtm->rtm_dst_len + 7) >> 3; 5070 5071 if (nla_len(tb[RTA_DST]) < plen) 5072 goto errout; 5073 5074 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen); 5075 } 5076 5077 if (tb[RTA_SRC]) { 5078 int plen = (rtm->rtm_src_len + 7) >> 3; 5079 5080 if (nla_len(tb[RTA_SRC]) < plen) 5081 goto errout; 5082 5083 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); 5084 } 5085 5086 if (tb[RTA_PREFSRC]) 5087 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]); 5088 5089 if (tb[RTA_OIF]) 5090 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); 5091 5092 if (tb[RTA_PRIORITY]) 5093 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]); 5094 5095 if (tb[RTA_METRICS]) { 5096 cfg->fc_mx = nla_data(tb[RTA_METRICS]); 5097 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]); 5098 } 5099 5100 if (tb[RTA_TABLE]) 5101 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); 5102 5103 if (tb[RTA_MULTIPATH]) { 5104 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); 5105 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); 5106 5107 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, 5108 cfg->fc_mp_len, extack); 5109 if (err < 0) 5110 goto errout; 5111 } 5112 5113 if (tb[RTA_PREF]) { 5114 pref = nla_get_u8(tb[RTA_PREF]); 5115 if (pref != ICMPV6_ROUTER_PREF_LOW && 5116 pref != ICMPV6_ROUTER_PREF_HIGH) 5117 pref = ICMPV6_ROUTER_PREF_MEDIUM; 5118 cfg->fc_flags |= RTF_PREF(pref); 5119 } 5120 5121 if (tb[RTA_ENCAP]) 5122 cfg->fc_encap = tb[RTA_ENCAP]; 5123 5124 if (tb[RTA_ENCAP_TYPE]) { 5125 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); 5126 5127 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack); 5128 if (err < 0) 5129 goto errout; 5130 } 5131 5132 if (tb[RTA_EXPIRES]) { 5133 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); 5134 5135 if (addrconf_finite_timeout(timeout)) { 5136 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ); 5137 cfg->fc_flags |= RTF_EXPIRES; 5138 } 5139 } 5140 5141 err = 0; 5142 errout: 5143 return err; 5144 } 5145 5146 struct rt6_nh { 5147 struct fib6_info *fib6_info; 5148 struct fib6_config r_cfg; 5149 struct list_head next; 5150 }; 5151 5152 static int ip6_route_info_append(struct net *net, 5153 struct list_head *rt6_nh_list, 5154 struct fib6_info *rt, 5155 struct fib6_config *r_cfg) 5156 { 5157 struct rt6_nh *nh; 5158 int err = -EEXIST; 5159 5160 list_for_each_entry(nh, rt6_nh_list, next) { 5161 /* check if fib6_info already exists */ 5162 if (rt6_duplicate_nexthop(nh->fib6_info, rt)) 5163 return err; 5164 } 5165 5166 nh = kzalloc(sizeof(*nh), GFP_KERNEL); 5167 if (!nh) 5168 return -ENOMEM; 5169 nh->fib6_info = rt; 5170 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); 5171 list_add_tail(&nh->next, rt6_nh_list); 5172 5173 return 0; 5174 } 5175 5176 static void ip6_route_mpath_notify(struct fib6_info *rt, 5177 struct fib6_info *rt_last, 5178 struct nl_info *info, 5179 __u16 nlflags) 5180 { 5181 /* if this is an APPEND route, then rt points to the first route 5182 * inserted and rt_last points to last route inserted. Userspace 5183 * wants a consistent dump of the route which starts at the first 5184 * nexthop. Since sibling routes are always added at the end of 5185 * the list, find the first sibling of the last route appended 5186 */ 5187 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) { 5188 rt = list_first_entry(&rt_last->fib6_siblings, 5189 struct fib6_info, 5190 fib6_siblings); 5191 } 5192 5193 if (rt) 5194 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); 5195 } 5196 5197 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt) 5198 { 5199 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt); 5200 bool should_notify = false; 5201 struct fib6_info *leaf; 5202 struct fib6_node *fn; 5203 5204 rcu_read_lock(); 5205 fn = rcu_dereference(rt->fib6_node); 5206 if (!fn) 5207 goto out; 5208 5209 leaf = rcu_dereference(fn->leaf); 5210 if (!leaf) 5211 goto out; 5212 5213 if (rt == leaf || 5214 (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric && 5215 rt6_qualify_for_ecmp(leaf))) 5216 should_notify = true; 5217 out: 5218 rcu_read_unlock(); 5219 5220 return should_notify; 5221 } 5222 5223 static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla, 5224 struct netlink_ext_ack *extack) 5225 { 5226 if (nla_len(nla) < sizeof(*gw)) { 5227 NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY"); 5228 return -EINVAL; 5229 } 5230 5231 *gw = nla_get_in6_addr(nla); 5232 5233 return 0; 5234 } 5235 5236 static int ip6_route_multipath_add(struct fib6_config *cfg, 5237 struct netlink_ext_ack *extack) 5238 { 5239 struct fib6_info *rt_notif = NULL, *rt_last = NULL; 5240 struct nl_info *info = &cfg->fc_nlinfo; 5241 struct fib6_config r_cfg; 5242 struct rtnexthop *rtnh; 5243 struct fib6_info *rt; 5244 struct rt6_nh *err_nh; 5245 struct rt6_nh *nh, *nh_safe; 5246 __u16 nlflags; 5247 int remaining; 5248 int attrlen; 5249 int err = 1; 5250 int nhn = 0; 5251 int replace = (cfg->fc_nlinfo.nlh && 5252 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); 5253 LIST_HEAD(rt6_nh_list); 5254 5255 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE; 5256 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) 5257 nlflags |= NLM_F_APPEND; 5258 5259 remaining = cfg->fc_mp_len; 5260 rtnh = (struct rtnexthop *)cfg->fc_mp; 5261 5262 /* Parse a Multipath Entry and build a list (rt6_nh_list) of 5263 * fib6_info structs per nexthop 5264 */ 5265 while (rtnh_ok(rtnh, remaining)) { 5266 memcpy(&r_cfg, cfg, sizeof(*cfg)); 5267 if (rtnh->rtnh_ifindex) 5268 r_cfg.fc_ifindex = rtnh->rtnh_ifindex; 5269 5270 attrlen = rtnh_attrlen(rtnh); 5271 if (attrlen > 0) { 5272 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 5273 5274 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 5275 if (nla) { 5276 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla, 5277 extack); 5278 if (err) 5279 goto cleanup; 5280 5281 r_cfg.fc_flags |= RTF_GATEWAY; 5282 } 5283 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); 5284 5285 /* RTA_ENCAP_TYPE length checked in 5286 * lwtunnel_valid_encap_type_attr 5287 */ 5288 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); 5289 if (nla) 5290 r_cfg.fc_encap_type = nla_get_u16(nla); 5291 } 5292 5293 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK); 5294 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack); 5295 if (IS_ERR(rt)) { 5296 err = PTR_ERR(rt); 5297 rt = NULL; 5298 goto cleanup; 5299 } 5300 if (!rt6_qualify_for_ecmp(rt)) { 5301 err = -EINVAL; 5302 NL_SET_ERR_MSG(extack, 5303 "Device only routes can not be added for IPv6 using the multipath API."); 5304 fib6_info_release(rt); 5305 goto cleanup; 5306 } 5307 5308 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1; 5309 5310 err = ip6_route_info_append(info->nl_net, &rt6_nh_list, 5311 rt, &r_cfg); 5312 if (err) { 5313 fib6_info_release(rt); 5314 goto cleanup; 5315 } 5316 5317 rtnh = rtnh_next(rtnh, &remaining); 5318 } 5319 5320 if (list_empty(&rt6_nh_list)) { 5321 NL_SET_ERR_MSG(extack, 5322 "Invalid nexthop configuration - no valid nexthops"); 5323 return -EINVAL; 5324 } 5325 5326 /* for add and replace send one notification with all nexthops. 5327 * Skip the notification in fib6_add_rt2node and send one with 5328 * the full route when done 5329 */ 5330 info->skip_notify = 1; 5331 5332 /* For add and replace, send one notification with all nexthops. For 5333 * append, send one notification with all appended nexthops. 5334 */ 5335 info->skip_notify_kernel = 1; 5336 5337 err_nh = NULL; 5338 list_for_each_entry(nh, &rt6_nh_list, next) { 5339 err = __ip6_ins_rt(nh->fib6_info, info, extack); 5340 5341 if (err) { 5342 if (replace && nhn) 5343 NL_SET_ERR_MSG_MOD(extack, 5344 "multipath route replace failed (check consistency of installed routes)"); 5345 err_nh = nh; 5346 goto add_errout; 5347 } 5348 /* save reference to last route successfully inserted */ 5349 rt_last = nh->fib6_info; 5350 5351 /* save reference to first route for notification */ 5352 if (!rt_notif) 5353 rt_notif = nh->fib6_info; 5354 5355 /* Because each route is added like a single route we remove 5356 * these flags after the first nexthop: if there is a collision, 5357 * we have already failed to add the first nexthop: 5358 * fib6_add_rt2node() has rejected it; when replacing, old 5359 * nexthops have been replaced by first new, the rest should 5360 * be added to it. 5361 */ 5362 if (cfg->fc_nlinfo.nlh) { 5363 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | 5364 NLM_F_REPLACE); 5365 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; 5366 } 5367 nhn++; 5368 } 5369 5370 /* An in-kernel notification should only be sent in case the new 5371 * multipath route is added as the first route in the node, or if 5372 * it was appended to it. We pass 'rt_notif' since it is the first 5373 * sibling and might allow us to skip some checks in the replace case. 5374 */ 5375 if (ip6_route_mpath_should_notify(rt_notif)) { 5376 enum fib_event_type fib_event; 5377 5378 if (rt_notif->fib6_nsiblings != nhn - 1) 5379 fib_event = FIB_EVENT_ENTRY_APPEND; 5380 else 5381 fib_event = FIB_EVENT_ENTRY_REPLACE; 5382 5383 err = call_fib6_multipath_entry_notifiers(info->nl_net, 5384 fib_event, rt_notif, 5385 nhn - 1, extack); 5386 if (err) { 5387 /* Delete all the siblings that were just added */ 5388 err_nh = NULL; 5389 goto add_errout; 5390 } 5391 } 5392 5393 /* success ... tell user about new route */ 5394 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); 5395 goto cleanup; 5396 5397 add_errout: 5398 /* send notification for routes that were added so that 5399 * the delete notifications sent by ip6_route_del are 5400 * coherent 5401 */ 5402 if (rt_notif) 5403 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); 5404 5405 /* Delete routes that were already added */ 5406 list_for_each_entry(nh, &rt6_nh_list, next) { 5407 if (err_nh == nh) 5408 break; 5409 ip6_route_del(&nh->r_cfg, extack); 5410 } 5411 5412 cleanup: 5413 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { 5414 fib6_info_release(nh->fib6_info); 5415 list_del(&nh->next); 5416 kfree(nh); 5417 } 5418 5419 return err; 5420 } 5421 5422 static int ip6_route_multipath_del(struct fib6_config *cfg, 5423 struct netlink_ext_ack *extack) 5424 { 5425 struct fib6_config r_cfg; 5426 struct rtnexthop *rtnh; 5427 int last_err = 0; 5428 int remaining; 5429 int attrlen; 5430 int err; 5431 5432 remaining = cfg->fc_mp_len; 5433 rtnh = (struct rtnexthop *)cfg->fc_mp; 5434 5435 /* Parse a Multipath Entry */ 5436 while (rtnh_ok(rtnh, remaining)) { 5437 memcpy(&r_cfg, cfg, sizeof(*cfg)); 5438 if (rtnh->rtnh_ifindex) 5439 r_cfg.fc_ifindex = rtnh->rtnh_ifindex; 5440 5441 attrlen = rtnh_attrlen(rtnh); 5442 if (attrlen > 0) { 5443 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 5444 5445 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 5446 if (nla) { 5447 err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla, 5448 extack); 5449 if (err) { 5450 last_err = err; 5451 goto next_rtnh; 5452 } 5453 5454 r_cfg.fc_flags |= RTF_GATEWAY; 5455 } 5456 } 5457 err = ip6_route_del(&r_cfg, extack); 5458 if (err) 5459 last_err = err; 5460 5461 next_rtnh: 5462 rtnh = rtnh_next(rtnh, &remaining); 5463 } 5464 5465 return last_err; 5466 } 5467 5468 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, 5469 struct netlink_ext_ack *extack) 5470 { 5471 struct fib6_config cfg; 5472 int err; 5473 5474 err = rtm_to_fib6_config(skb, nlh, &cfg, extack); 5475 if (err < 0) 5476 return err; 5477 5478 if (cfg.fc_nh_id && 5479 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) { 5480 NL_SET_ERR_MSG(extack, "Nexthop id does not exist"); 5481 return -EINVAL; 5482 } 5483 5484 if (cfg.fc_mp) 5485 return ip6_route_multipath_del(&cfg, extack); 5486 else { 5487 cfg.fc_delete_all_nh = 1; 5488 return ip6_route_del(&cfg, extack); 5489 } 5490 } 5491 5492 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, 5493 struct netlink_ext_ack *extack) 5494 { 5495 struct fib6_config cfg; 5496 int err; 5497 5498 err = rtm_to_fib6_config(skb, nlh, &cfg, extack); 5499 if (err < 0) 5500 return err; 5501 5502 if (cfg.fc_metric == 0) 5503 cfg.fc_metric = IP6_RT_PRIO_USER; 5504 5505 if (cfg.fc_mp) 5506 return ip6_route_multipath_add(&cfg, extack); 5507 else 5508 return ip6_route_add(&cfg, GFP_KERNEL, extack); 5509 } 5510 5511 /* add the overhead of this fib6_nh to nexthop_len */ 5512 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg) 5513 { 5514 int *nexthop_len = arg; 5515 5516 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */ 5517 + NLA_ALIGN(sizeof(struct rtnexthop)) 5518 + nla_total_size(16); /* RTA_GATEWAY */ 5519 5520 if (nh->fib_nh_lws) { 5521 /* RTA_ENCAP_TYPE */ 5522 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws); 5523 /* RTA_ENCAP */ 5524 *nexthop_len += nla_total_size(2); 5525 } 5526 5527 return 0; 5528 } 5529 5530 static size_t rt6_nlmsg_size(struct fib6_info *f6i) 5531 { 5532 int nexthop_len; 5533 5534 if (f6i->nh) { 5535 nexthop_len = nla_total_size(4); /* RTA_NH_ID */ 5536 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size, 5537 &nexthop_len); 5538 } else { 5539 struct fib6_info *sibling, *next_sibling; 5540 struct fib6_nh *nh = f6i->fib6_nh; 5541 5542 nexthop_len = 0; 5543 if (f6i->fib6_nsiblings) { 5544 rt6_nh_nlmsg_size(nh, &nexthop_len); 5545 5546 list_for_each_entry_safe(sibling, next_sibling, 5547 &f6i->fib6_siblings, fib6_siblings) { 5548 rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len); 5549 } 5550 } 5551 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws); 5552 } 5553 5554 return NLMSG_ALIGN(sizeof(struct rtmsg)) 5555 + nla_total_size(16) /* RTA_SRC */ 5556 + nla_total_size(16) /* RTA_DST */ 5557 + nla_total_size(16) /* RTA_GATEWAY */ 5558 + nla_total_size(16) /* RTA_PREFSRC */ 5559 + nla_total_size(4) /* RTA_TABLE */ 5560 + nla_total_size(4) /* RTA_IIF */ 5561 + nla_total_size(4) /* RTA_OIF */ 5562 + nla_total_size(4) /* RTA_PRIORITY */ 5563 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ 5564 + nla_total_size(sizeof(struct rta_cacheinfo)) 5565 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ 5566 + nla_total_size(1) /* RTA_PREF */ 5567 + nexthop_len; 5568 } 5569 5570 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh, 5571 unsigned char *flags) 5572 { 5573 if (nexthop_is_multipath(nh)) { 5574 struct nlattr *mp; 5575 5576 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH); 5577 if (!mp) 5578 goto nla_put_failure; 5579 5580 if (nexthop_mpath_fill_node(skb, nh, AF_INET6)) 5581 goto nla_put_failure; 5582 5583 nla_nest_end(skb, mp); 5584 } else { 5585 struct fib6_nh *fib6_nh; 5586 5587 fib6_nh = nexthop_fib6_nh(nh); 5588 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6, 5589 flags, false) < 0) 5590 goto nla_put_failure; 5591 } 5592 5593 return 0; 5594 5595 nla_put_failure: 5596 return -EMSGSIZE; 5597 } 5598 5599 static int rt6_fill_node(struct net *net, struct sk_buff *skb, 5600 struct fib6_info *rt, struct dst_entry *dst, 5601 struct in6_addr *dest, struct in6_addr *src, 5602 int iif, int type, u32 portid, u32 seq, 5603 unsigned int flags) 5604 { 5605 struct rt6_info *rt6 = (struct rt6_info *)dst; 5606 struct rt6key *rt6_dst, *rt6_src; 5607 u32 *pmetrics, table, rt6_flags; 5608 unsigned char nh_flags = 0; 5609 struct nlmsghdr *nlh; 5610 struct rtmsg *rtm; 5611 long expires = 0; 5612 5613 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); 5614 if (!nlh) 5615 return -EMSGSIZE; 5616 5617 if (rt6) { 5618 rt6_dst = &rt6->rt6i_dst; 5619 rt6_src = &rt6->rt6i_src; 5620 rt6_flags = rt6->rt6i_flags; 5621 } else { 5622 rt6_dst = &rt->fib6_dst; 5623 rt6_src = &rt->fib6_src; 5624 rt6_flags = rt->fib6_flags; 5625 } 5626 5627 rtm = nlmsg_data(nlh); 5628 rtm->rtm_family = AF_INET6; 5629 rtm->rtm_dst_len = rt6_dst->plen; 5630 rtm->rtm_src_len = rt6_src->plen; 5631 rtm->rtm_tos = 0; 5632 if (rt->fib6_table) 5633 table = rt->fib6_table->tb6_id; 5634 else 5635 table = RT6_TABLE_UNSPEC; 5636 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT; 5637 if (nla_put_u32(skb, RTA_TABLE, table)) 5638 goto nla_put_failure; 5639 5640 rtm->rtm_type = rt->fib6_type; 5641 rtm->rtm_flags = 0; 5642 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 5643 rtm->rtm_protocol = rt->fib6_protocol; 5644 5645 if (rt6_flags & RTF_CACHE) 5646 rtm->rtm_flags |= RTM_F_CLONED; 5647 5648 if (dest) { 5649 if (nla_put_in6_addr(skb, RTA_DST, dest)) 5650 goto nla_put_failure; 5651 rtm->rtm_dst_len = 128; 5652 } else if (rtm->rtm_dst_len) 5653 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr)) 5654 goto nla_put_failure; 5655 #ifdef CONFIG_IPV6_SUBTREES 5656 if (src) { 5657 if (nla_put_in6_addr(skb, RTA_SRC, src)) 5658 goto nla_put_failure; 5659 rtm->rtm_src_len = 128; 5660 } else if (rtm->rtm_src_len && 5661 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr)) 5662 goto nla_put_failure; 5663 #endif 5664 if (iif) { 5665 #ifdef CONFIG_IPV6_MROUTE 5666 if (ipv6_addr_is_multicast(&rt6_dst->addr)) { 5667 int err = ip6mr_get_route(net, skb, rtm, portid); 5668 5669 if (err == 0) 5670 return 0; 5671 if (err < 0) 5672 goto nla_put_failure; 5673 } else 5674 #endif 5675 if (nla_put_u32(skb, RTA_IIF, iif)) 5676 goto nla_put_failure; 5677 } else if (dest) { 5678 struct in6_addr saddr_buf; 5679 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 && 5680 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 5681 goto nla_put_failure; 5682 } 5683 5684 if (rt->fib6_prefsrc.plen) { 5685 struct in6_addr saddr_buf; 5686 saddr_buf = rt->fib6_prefsrc.addr; 5687 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 5688 goto nla_put_failure; 5689 } 5690 5691 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics; 5692 if (rtnetlink_put_metrics(skb, pmetrics) < 0) 5693 goto nla_put_failure; 5694 5695 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric)) 5696 goto nla_put_failure; 5697 5698 /* For multipath routes, walk the siblings list and add 5699 * each as a nexthop within RTA_MULTIPATH. 5700 */ 5701 if (rt6) { 5702 if (rt6_flags & RTF_GATEWAY && 5703 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway)) 5704 goto nla_put_failure; 5705 5706 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex)) 5707 goto nla_put_failure; 5708 5709 if (dst->lwtstate && 5710 lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0) 5711 goto nla_put_failure; 5712 } else if (rt->fib6_nsiblings) { 5713 struct fib6_info *sibling, *next_sibling; 5714 struct nlattr *mp; 5715 5716 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH); 5717 if (!mp) 5718 goto nla_put_failure; 5719 5720 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common, 5721 rt->fib6_nh->fib_nh_weight, AF_INET6, 5722 0) < 0) 5723 goto nla_put_failure; 5724 5725 list_for_each_entry_safe(sibling, next_sibling, 5726 &rt->fib6_siblings, fib6_siblings) { 5727 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common, 5728 sibling->fib6_nh->fib_nh_weight, 5729 AF_INET6, 0) < 0) 5730 goto nla_put_failure; 5731 } 5732 5733 nla_nest_end(skb, mp); 5734 } else if (rt->nh) { 5735 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id)) 5736 goto nla_put_failure; 5737 5738 if (nexthop_is_blackhole(rt->nh)) 5739 rtm->rtm_type = RTN_BLACKHOLE; 5740 5741 if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) && 5742 rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0) 5743 goto nla_put_failure; 5744 5745 rtm->rtm_flags |= nh_flags; 5746 } else { 5747 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6, 5748 &nh_flags, false) < 0) 5749 goto nla_put_failure; 5750 5751 rtm->rtm_flags |= nh_flags; 5752 } 5753 5754 if (rt6_flags & RTF_EXPIRES) { 5755 expires = dst ? dst->expires : rt->expires; 5756 expires -= jiffies; 5757 } 5758 5759 if (!dst) { 5760 if (READ_ONCE(rt->offload)) 5761 rtm->rtm_flags |= RTM_F_OFFLOAD; 5762 if (READ_ONCE(rt->trap)) 5763 rtm->rtm_flags |= RTM_F_TRAP; 5764 if (READ_ONCE(rt->offload_failed)) 5765 rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED; 5766 } 5767 5768 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) 5769 goto nla_put_failure; 5770 5771 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags))) 5772 goto nla_put_failure; 5773 5774 5775 nlmsg_end(skb, nlh); 5776 return 0; 5777 5778 nla_put_failure: 5779 nlmsg_cancel(skb, nlh); 5780 return -EMSGSIZE; 5781 } 5782 5783 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg) 5784 { 5785 const struct net_device *dev = arg; 5786 5787 if (nh->fib_nh_dev == dev) 5788 return 1; 5789 5790 return 0; 5791 } 5792 5793 static bool fib6_info_uses_dev(const struct fib6_info *f6i, 5794 const struct net_device *dev) 5795 { 5796 if (f6i->nh) { 5797 struct net_device *_dev = (struct net_device *)dev; 5798 5799 return !!nexthop_for_each_fib6_nh(f6i->nh, 5800 fib6_info_nh_uses_dev, 5801 _dev); 5802 } 5803 5804 if (f6i->fib6_nh->fib_nh_dev == dev) 5805 return true; 5806 5807 if (f6i->fib6_nsiblings) { 5808 struct fib6_info *sibling, *next_sibling; 5809 5810 list_for_each_entry_safe(sibling, next_sibling, 5811 &f6i->fib6_siblings, fib6_siblings) { 5812 if (sibling->fib6_nh->fib_nh_dev == dev) 5813 return true; 5814 } 5815 } 5816 5817 return false; 5818 } 5819 5820 struct fib6_nh_exception_dump_walker { 5821 struct rt6_rtnl_dump_arg *dump; 5822 struct fib6_info *rt; 5823 unsigned int flags; 5824 unsigned int skip; 5825 unsigned int count; 5826 }; 5827 5828 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg) 5829 { 5830 struct fib6_nh_exception_dump_walker *w = arg; 5831 struct rt6_rtnl_dump_arg *dump = w->dump; 5832 struct rt6_exception_bucket *bucket; 5833 struct rt6_exception *rt6_ex; 5834 int i, err; 5835 5836 bucket = fib6_nh_get_excptn_bucket(nh, NULL); 5837 if (!bucket) 5838 return 0; 5839 5840 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 5841 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 5842 if (w->skip) { 5843 w->skip--; 5844 continue; 5845 } 5846 5847 /* Expiration of entries doesn't bump sernum, insertion 5848 * does. Removal is triggered by insertion, so we can 5849 * rely on the fact that if entries change between two 5850 * partial dumps, this node is scanned again completely, 5851 * see rt6_insert_exception() and fib6_dump_table(). 5852 * 5853 * Count expired entries we go through as handled 5854 * entries that we'll skip next time, in case of partial 5855 * node dump. Otherwise, if entries expire meanwhile, 5856 * we'll skip the wrong amount. 5857 */ 5858 if (rt6_check_expired(rt6_ex->rt6i)) { 5859 w->count++; 5860 continue; 5861 } 5862 5863 err = rt6_fill_node(dump->net, dump->skb, w->rt, 5864 &rt6_ex->rt6i->dst, NULL, NULL, 0, 5865 RTM_NEWROUTE, 5866 NETLINK_CB(dump->cb->skb).portid, 5867 dump->cb->nlh->nlmsg_seq, w->flags); 5868 if (err) 5869 return err; 5870 5871 w->count++; 5872 } 5873 bucket++; 5874 } 5875 5876 return 0; 5877 } 5878 5879 /* Return -1 if done with node, number of handled routes on partial dump */ 5880 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip) 5881 { 5882 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; 5883 struct fib_dump_filter *filter = &arg->filter; 5884 unsigned int flags = NLM_F_MULTI; 5885 struct net *net = arg->net; 5886 int count = 0; 5887 5888 if (rt == net->ipv6.fib6_null_entry) 5889 return -1; 5890 5891 if ((filter->flags & RTM_F_PREFIX) && 5892 !(rt->fib6_flags & RTF_PREFIX_RT)) { 5893 /* success since this is not a prefix route */ 5894 return -1; 5895 } 5896 if (filter->filter_set && 5897 ((filter->rt_type && rt->fib6_type != filter->rt_type) || 5898 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) || 5899 (filter->protocol && rt->fib6_protocol != filter->protocol))) { 5900 return -1; 5901 } 5902 5903 if (filter->filter_set || 5904 !filter->dump_routes || !filter->dump_exceptions) { 5905 flags |= NLM_F_DUMP_FILTERED; 5906 } 5907 5908 if (filter->dump_routes) { 5909 if (skip) { 5910 skip--; 5911 } else { 5912 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 5913 0, RTM_NEWROUTE, 5914 NETLINK_CB(arg->cb->skb).portid, 5915 arg->cb->nlh->nlmsg_seq, flags)) { 5916 return 0; 5917 } 5918 count++; 5919 } 5920 } 5921 5922 if (filter->dump_exceptions) { 5923 struct fib6_nh_exception_dump_walker w = { .dump = arg, 5924 .rt = rt, 5925 .flags = flags, 5926 .skip = skip, 5927 .count = 0 }; 5928 int err; 5929 5930 rcu_read_lock(); 5931 if (rt->nh) { 5932 err = nexthop_for_each_fib6_nh(rt->nh, 5933 rt6_nh_dump_exceptions, 5934 &w); 5935 } else { 5936 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w); 5937 } 5938 rcu_read_unlock(); 5939 5940 if (err) 5941 return count + w.count; 5942 } 5943 5944 return -1; 5945 } 5946 5947 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb, 5948 const struct nlmsghdr *nlh, 5949 struct nlattr **tb, 5950 struct netlink_ext_ack *extack) 5951 { 5952 struct rtmsg *rtm; 5953 int i, err; 5954 5955 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) { 5956 NL_SET_ERR_MSG_MOD(extack, 5957 "Invalid header for get route request"); 5958 return -EINVAL; 5959 } 5960 5961 if (!netlink_strict_get_check(skb)) 5962 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, 5963 rtm_ipv6_policy, extack); 5964 5965 rtm = nlmsg_data(nlh); 5966 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) || 5967 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) || 5968 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope || 5969 rtm->rtm_type) { 5970 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request"); 5971 return -EINVAL; 5972 } 5973 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) { 5974 NL_SET_ERR_MSG_MOD(extack, 5975 "Invalid flags for get route request"); 5976 return -EINVAL; 5977 } 5978 5979 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX, 5980 rtm_ipv6_policy, extack); 5981 if (err) 5982 return err; 5983 5984 if ((tb[RTA_SRC] && !rtm->rtm_src_len) || 5985 (tb[RTA_DST] && !rtm->rtm_dst_len)) { 5986 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6"); 5987 return -EINVAL; 5988 } 5989 5990 for (i = 0; i <= RTA_MAX; i++) { 5991 if (!tb[i]) 5992 continue; 5993 5994 switch (i) { 5995 case RTA_SRC: 5996 case RTA_DST: 5997 case RTA_IIF: 5998 case RTA_OIF: 5999 case RTA_MARK: 6000 case RTA_UID: 6001 case RTA_SPORT: 6002 case RTA_DPORT: 6003 case RTA_IP_PROTO: 6004 break; 6005 default: 6006 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request"); 6007 return -EINVAL; 6008 } 6009 } 6010 6011 return 0; 6012 } 6013 6014 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 6015 struct netlink_ext_ack *extack) 6016 { 6017 struct net *net = sock_net(in_skb->sk); 6018 struct nlattr *tb[RTA_MAX+1]; 6019 int err, iif = 0, oif = 0; 6020 struct fib6_info *from; 6021 struct dst_entry *dst; 6022 struct rt6_info *rt; 6023 struct sk_buff *skb; 6024 struct rtmsg *rtm; 6025 struct flowi6 fl6 = {}; 6026 bool fibmatch; 6027 6028 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack); 6029 if (err < 0) 6030 goto errout; 6031 6032 err = -EINVAL; 6033 rtm = nlmsg_data(nlh); 6034 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0); 6035 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH); 6036 6037 if (tb[RTA_SRC]) { 6038 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) 6039 goto errout; 6040 6041 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]); 6042 } 6043 6044 if (tb[RTA_DST]) { 6045 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) 6046 goto errout; 6047 6048 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]); 6049 } 6050 6051 if (tb[RTA_IIF]) 6052 iif = nla_get_u32(tb[RTA_IIF]); 6053 6054 if (tb[RTA_OIF]) 6055 oif = nla_get_u32(tb[RTA_OIF]); 6056 6057 if (tb[RTA_MARK]) 6058 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); 6059 6060 if (tb[RTA_UID]) 6061 fl6.flowi6_uid = make_kuid(current_user_ns(), 6062 nla_get_u32(tb[RTA_UID])); 6063 else 6064 fl6.flowi6_uid = iif ? INVALID_UID : current_uid(); 6065 6066 if (tb[RTA_SPORT]) 6067 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]); 6068 6069 if (tb[RTA_DPORT]) 6070 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]); 6071 6072 if (tb[RTA_IP_PROTO]) { 6073 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO], 6074 &fl6.flowi6_proto, AF_INET6, 6075 extack); 6076 if (err) 6077 goto errout; 6078 } 6079 6080 if (iif) { 6081 struct net_device *dev; 6082 int flags = 0; 6083 6084 rcu_read_lock(); 6085 6086 dev = dev_get_by_index_rcu(net, iif); 6087 if (!dev) { 6088 rcu_read_unlock(); 6089 err = -ENODEV; 6090 goto errout; 6091 } 6092 6093 fl6.flowi6_iif = iif; 6094 6095 if (!ipv6_addr_any(&fl6.saddr)) 6096 flags |= RT6_LOOKUP_F_HAS_SADDR; 6097 6098 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags); 6099 6100 rcu_read_unlock(); 6101 } else { 6102 fl6.flowi6_oif = oif; 6103 6104 dst = ip6_route_output(net, NULL, &fl6); 6105 } 6106 6107 6108 rt = container_of(dst, struct rt6_info, dst); 6109 if (rt->dst.error) { 6110 err = rt->dst.error; 6111 ip6_rt_put(rt); 6112 goto errout; 6113 } 6114 6115 if (rt == net->ipv6.ip6_null_entry) { 6116 err = rt->dst.error; 6117 ip6_rt_put(rt); 6118 goto errout; 6119 } 6120 6121 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 6122 if (!skb) { 6123 ip6_rt_put(rt); 6124 err = -ENOBUFS; 6125 goto errout; 6126 } 6127 6128 skb_dst_set(skb, &rt->dst); 6129 6130 rcu_read_lock(); 6131 from = rcu_dereference(rt->from); 6132 if (from) { 6133 if (fibmatch) 6134 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, 6135 iif, RTM_NEWROUTE, 6136 NETLINK_CB(in_skb).portid, 6137 nlh->nlmsg_seq, 0); 6138 else 6139 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr, 6140 &fl6.saddr, iif, RTM_NEWROUTE, 6141 NETLINK_CB(in_skb).portid, 6142 nlh->nlmsg_seq, 0); 6143 } else { 6144 err = -ENETUNREACH; 6145 } 6146 rcu_read_unlock(); 6147 6148 if (err < 0) { 6149 kfree_skb(skb); 6150 goto errout; 6151 } 6152 6153 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 6154 errout: 6155 return err; 6156 } 6157 6158 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, 6159 unsigned int nlm_flags) 6160 { 6161 struct sk_buff *skb; 6162 struct net *net = info->nl_net; 6163 u32 seq; 6164 int err; 6165 6166 err = -ENOBUFS; 6167 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 6168 6169 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 6170 if (!skb) 6171 goto errout; 6172 6173 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0, 6174 event, info->portid, seq, nlm_flags); 6175 if (err < 0) { 6176 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 6177 WARN_ON(err == -EMSGSIZE); 6178 kfree_skb(skb); 6179 goto errout; 6180 } 6181 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 6182 info->nlh, gfp_any()); 6183 return; 6184 errout: 6185 if (err < 0) 6186 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); 6187 } 6188 6189 void fib6_rt_update(struct net *net, struct fib6_info *rt, 6190 struct nl_info *info) 6191 { 6192 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 6193 struct sk_buff *skb; 6194 int err = -ENOBUFS; 6195 6196 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 6197 if (!skb) 6198 goto errout; 6199 6200 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0, 6201 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE); 6202 if (err < 0) { 6203 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 6204 WARN_ON(err == -EMSGSIZE); 6205 kfree_skb(skb); 6206 goto errout; 6207 } 6208 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 6209 info->nlh, gfp_any()); 6210 return; 6211 errout: 6212 if (err < 0) 6213 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); 6214 } 6215 6216 void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i, 6217 bool offload, bool trap, bool offload_failed) 6218 { 6219 struct sk_buff *skb; 6220 int err; 6221 6222 if (READ_ONCE(f6i->offload) == offload && 6223 READ_ONCE(f6i->trap) == trap && 6224 READ_ONCE(f6i->offload_failed) == offload_failed) 6225 return; 6226 6227 WRITE_ONCE(f6i->offload, offload); 6228 WRITE_ONCE(f6i->trap, trap); 6229 6230 /* 2 means send notifications only if offload_failed was changed. */ 6231 if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 && 6232 READ_ONCE(f6i->offload_failed) == offload_failed) 6233 return; 6234 6235 WRITE_ONCE(f6i->offload_failed, offload_failed); 6236 6237 if (!rcu_access_pointer(f6i->fib6_node)) 6238 /* The route was removed from the tree, do not send 6239 * notification. 6240 */ 6241 return; 6242 6243 if (!net->ipv6.sysctl.fib_notify_on_flag_change) 6244 return; 6245 6246 skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL); 6247 if (!skb) { 6248 err = -ENOBUFS; 6249 goto errout; 6250 } 6251 6252 err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0, 6253 0, 0); 6254 if (err < 0) { 6255 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 6256 WARN_ON(err == -EMSGSIZE); 6257 kfree_skb(skb); 6258 goto errout; 6259 } 6260 6261 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL); 6262 return; 6263 6264 errout: 6265 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); 6266 } 6267 EXPORT_SYMBOL(fib6_info_hw_flags_set); 6268 6269 static int ip6_route_dev_notify(struct notifier_block *this, 6270 unsigned long event, void *ptr) 6271 { 6272 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6273 struct net *net = dev_net(dev); 6274 6275 if (!(dev->flags & IFF_LOOPBACK)) 6276 return NOTIFY_OK; 6277 6278 if (event == NETDEV_REGISTER) { 6279 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev; 6280 net->ipv6.ip6_null_entry->dst.dev = dev; 6281 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); 6282 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6283 net->ipv6.ip6_prohibit_entry->dst.dev = dev; 6284 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); 6285 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 6286 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 6287 #endif 6288 } else if (event == NETDEV_UNREGISTER && 6289 dev->reg_state != NETREG_UNREGISTERED) { 6290 /* NETDEV_UNREGISTER could be fired for multiple times by 6291 * netdev_wait_allrefs(). Make sure we only call this once. 6292 */ 6293 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev); 6294 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6295 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev); 6296 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev); 6297 #endif 6298 } 6299 6300 return NOTIFY_OK; 6301 } 6302 6303 /* 6304 * /proc 6305 */ 6306 6307 #ifdef CONFIG_PROC_FS 6308 static int rt6_stats_seq_show(struct seq_file *seq, void *v) 6309 { 6310 struct net *net = (struct net *)seq->private; 6311 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", 6312 net->ipv6.rt6_stats->fib_nodes, 6313 net->ipv6.rt6_stats->fib_route_nodes, 6314 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc), 6315 net->ipv6.rt6_stats->fib_rt_entries, 6316 net->ipv6.rt6_stats->fib_rt_cache, 6317 dst_entries_get_slow(&net->ipv6.ip6_dst_ops), 6318 net->ipv6.rt6_stats->fib_discarded_routes); 6319 6320 return 0; 6321 } 6322 #endif /* CONFIG_PROC_FS */ 6323 6324 #ifdef CONFIG_SYSCTL 6325 6326 static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, 6327 void *buffer, size_t *lenp, loff_t *ppos) 6328 { 6329 struct net *net; 6330 int delay; 6331 int ret; 6332 if (!write) 6333 return -EINVAL; 6334 6335 net = (struct net *)ctl->extra1; 6336 delay = net->ipv6.sysctl.flush_delay; 6337 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 6338 if (ret) 6339 return ret; 6340 6341 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0); 6342 return 0; 6343 } 6344 6345 static struct ctl_table ipv6_route_table_template[] = { 6346 { 6347 .procname = "max_size", 6348 .data = &init_net.ipv6.sysctl.ip6_rt_max_size, 6349 .maxlen = sizeof(int), 6350 .mode = 0644, 6351 .proc_handler = proc_dointvec, 6352 }, 6353 { 6354 .procname = "gc_thresh", 6355 .data = &ip6_dst_ops_template.gc_thresh, 6356 .maxlen = sizeof(int), 6357 .mode = 0644, 6358 .proc_handler = proc_dointvec, 6359 }, 6360 { 6361 .procname = "flush", 6362 .data = &init_net.ipv6.sysctl.flush_delay, 6363 .maxlen = sizeof(int), 6364 .mode = 0200, 6365 .proc_handler = ipv6_sysctl_rtcache_flush 6366 }, 6367 { 6368 .procname = "gc_min_interval", 6369 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, 6370 .maxlen = sizeof(int), 6371 .mode = 0644, 6372 .proc_handler = proc_dointvec_jiffies, 6373 }, 6374 { 6375 .procname = "gc_timeout", 6376 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout, 6377 .maxlen = sizeof(int), 6378 .mode = 0644, 6379 .proc_handler = proc_dointvec_jiffies, 6380 }, 6381 { 6382 .procname = "gc_interval", 6383 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval, 6384 .maxlen = sizeof(int), 6385 .mode = 0644, 6386 .proc_handler = proc_dointvec_jiffies, 6387 }, 6388 { 6389 .procname = "gc_elasticity", 6390 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, 6391 .maxlen = sizeof(int), 6392 .mode = 0644, 6393 .proc_handler = proc_dointvec, 6394 }, 6395 { 6396 .procname = "mtu_expires", 6397 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires, 6398 .maxlen = sizeof(int), 6399 .mode = 0644, 6400 .proc_handler = proc_dointvec_jiffies, 6401 }, 6402 { 6403 .procname = "min_adv_mss", 6404 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, 6405 .maxlen = sizeof(int), 6406 .mode = 0644, 6407 .proc_handler = proc_dointvec, 6408 }, 6409 { 6410 .procname = "gc_min_interval_ms", 6411 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, 6412 .maxlen = sizeof(int), 6413 .mode = 0644, 6414 .proc_handler = proc_dointvec_ms_jiffies, 6415 }, 6416 { 6417 .procname = "skip_notify_on_dev_down", 6418 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down, 6419 .maxlen = sizeof(u8), 6420 .mode = 0644, 6421 .proc_handler = proc_dou8vec_minmax, 6422 .extra1 = SYSCTL_ZERO, 6423 .extra2 = SYSCTL_ONE, 6424 }, 6425 { } 6426 }; 6427 6428 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) 6429 { 6430 struct ctl_table *table; 6431 6432 table = kmemdup(ipv6_route_table_template, 6433 sizeof(ipv6_route_table_template), 6434 GFP_KERNEL); 6435 6436 if (table) { 6437 table[0].data = &net->ipv6.sysctl.ip6_rt_max_size; 6438 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 6439 table[2].data = &net->ipv6.sysctl.flush_delay; 6440 table[2].extra1 = net; 6441 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 6442 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; 6443 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; 6444 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; 6445 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; 6446 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; 6447 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 6448 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down; 6449 6450 /* Don't export sysctls to unprivileged users */ 6451 if (net->user_ns != &init_user_ns) 6452 table[1].procname = NULL; 6453 } 6454 6455 return table; 6456 } 6457 6458 size_t ipv6_route_sysctl_table_size(struct net *net) 6459 { 6460 /* Don't export sysctls to unprivileged users */ 6461 if (net->user_ns != &init_user_ns) 6462 return 1; 6463 6464 return ARRAY_SIZE(ipv6_route_table_template); 6465 } 6466 #endif 6467 6468 static int __net_init ip6_route_net_init(struct net *net) 6469 { 6470 int ret = -ENOMEM; 6471 6472 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template, 6473 sizeof(net->ipv6.ip6_dst_ops)); 6474 6475 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0) 6476 goto out_ip6_dst_ops; 6477 6478 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true); 6479 if (!net->ipv6.fib6_null_entry) 6480 goto out_ip6_dst_entries; 6481 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template, 6482 sizeof(*net->ipv6.fib6_null_entry)); 6483 6484 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, 6485 sizeof(*net->ipv6.ip6_null_entry), 6486 GFP_KERNEL); 6487 if (!net->ipv6.ip6_null_entry) 6488 goto out_fib6_null_entry; 6489 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; 6490 dst_init_metrics(&net->ipv6.ip6_null_entry->dst, 6491 ip6_template_metrics, true); 6492 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->dst.rt_uncached); 6493 6494 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6495 net->ipv6.fib6_has_custom_rules = false; 6496 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, 6497 sizeof(*net->ipv6.ip6_prohibit_entry), 6498 GFP_KERNEL); 6499 if (!net->ipv6.ip6_prohibit_entry) 6500 goto out_ip6_null_entry; 6501 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; 6502 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst, 6503 ip6_template_metrics, true); 6504 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->dst.rt_uncached); 6505 6506 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, 6507 sizeof(*net->ipv6.ip6_blk_hole_entry), 6508 GFP_KERNEL); 6509 if (!net->ipv6.ip6_blk_hole_entry) 6510 goto out_ip6_prohibit_entry; 6511 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; 6512 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, 6513 ip6_template_metrics, true); 6514 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->dst.rt_uncached); 6515 #ifdef CONFIG_IPV6_SUBTREES 6516 net->ipv6.fib6_routes_require_src = 0; 6517 #endif 6518 #endif 6519 6520 net->ipv6.sysctl.flush_delay = 0; 6521 net->ipv6.sysctl.ip6_rt_max_size = INT_MAX; 6522 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2; 6523 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ; 6524 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ; 6525 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9; 6526 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; 6527 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; 6528 net->ipv6.sysctl.skip_notify_on_dev_down = 0; 6529 6530 atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ); 6531 6532 ret = 0; 6533 out: 6534 return ret; 6535 6536 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6537 out_ip6_prohibit_entry: 6538 kfree(net->ipv6.ip6_prohibit_entry); 6539 out_ip6_null_entry: 6540 kfree(net->ipv6.ip6_null_entry); 6541 #endif 6542 out_fib6_null_entry: 6543 kfree(net->ipv6.fib6_null_entry); 6544 out_ip6_dst_entries: 6545 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 6546 out_ip6_dst_ops: 6547 goto out; 6548 } 6549 6550 static void __net_exit ip6_route_net_exit(struct net *net) 6551 { 6552 kfree(net->ipv6.fib6_null_entry); 6553 kfree(net->ipv6.ip6_null_entry); 6554 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6555 kfree(net->ipv6.ip6_prohibit_entry); 6556 kfree(net->ipv6.ip6_blk_hole_entry); 6557 #endif 6558 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 6559 } 6560 6561 static int __net_init ip6_route_net_init_late(struct net *net) 6562 { 6563 #ifdef CONFIG_PROC_FS 6564 if (!proc_create_net("ipv6_route", 0, net->proc_net, 6565 &ipv6_route_seq_ops, 6566 sizeof(struct ipv6_route_iter))) 6567 return -ENOMEM; 6568 6569 if (!proc_create_net_single("rt6_stats", 0444, net->proc_net, 6570 rt6_stats_seq_show, NULL)) { 6571 remove_proc_entry("ipv6_route", net->proc_net); 6572 return -ENOMEM; 6573 } 6574 #endif 6575 return 0; 6576 } 6577 6578 static void __net_exit ip6_route_net_exit_late(struct net *net) 6579 { 6580 #ifdef CONFIG_PROC_FS 6581 remove_proc_entry("ipv6_route", net->proc_net); 6582 remove_proc_entry("rt6_stats", net->proc_net); 6583 #endif 6584 } 6585 6586 static struct pernet_operations ip6_route_net_ops = { 6587 .init = ip6_route_net_init, 6588 .exit = ip6_route_net_exit, 6589 }; 6590 6591 static int __net_init ipv6_inetpeer_init(struct net *net) 6592 { 6593 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); 6594 6595 if (!bp) 6596 return -ENOMEM; 6597 inet_peer_base_init(bp); 6598 net->ipv6.peers = bp; 6599 return 0; 6600 } 6601 6602 static void __net_exit ipv6_inetpeer_exit(struct net *net) 6603 { 6604 struct inet_peer_base *bp = net->ipv6.peers; 6605 6606 net->ipv6.peers = NULL; 6607 inetpeer_invalidate_tree(bp); 6608 kfree(bp); 6609 } 6610 6611 static struct pernet_operations ipv6_inetpeer_ops = { 6612 .init = ipv6_inetpeer_init, 6613 .exit = ipv6_inetpeer_exit, 6614 }; 6615 6616 static struct pernet_operations ip6_route_net_late_ops = { 6617 .init = ip6_route_net_init_late, 6618 .exit = ip6_route_net_exit_late, 6619 }; 6620 6621 static struct notifier_block ip6_route_dev_notifier = { 6622 .notifier_call = ip6_route_dev_notify, 6623 .priority = ADDRCONF_NOTIFY_PRIORITY - 10, 6624 }; 6625 6626 void __init ip6_route_init_special_entries(void) 6627 { 6628 /* Registering of the loopback is done before this portion of code, 6629 * the loopback reference in rt6_info will not be taken, do it 6630 * manually for init_net */ 6631 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev; 6632 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; 6633 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 6634 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 6635 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; 6636 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 6637 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; 6638 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 6639 #endif 6640 } 6641 6642 #if IS_BUILTIN(CONFIG_IPV6) 6643 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 6644 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt) 6645 6646 BTF_ID_LIST(btf_fib6_info_id) 6647 BTF_ID(struct, fib6_info) 6648 6649 static const struct bpf_iter_seq_info ipv6_route_seq_info = { 6650 .seq_ops = &ipv6_route_seq_ops, 6651 .init_seq_private = bpf_iter_init_seq_net, 6652 .fini_seq_private = bpf_iter_fini_seq_net, 6653 .seq_priv_size = sizeof(struct ipv6_route_iter), 6654 }; 6655 6656 static struct bpf_iter_reg ipv6_route_reg_info = { 6657 .target = "ipv6_route", 6658 .ctx_arg_info_size = 1, 6659 .ctx_arg_info = { 6660 { offsetof(struct bpf_iter__ipv6_route, rt), 6661 PTR_TO_BTF_ID_OR_NULL }, 6662 }, 6663 .seq_info = &ipv6_route_seq_info, 6664 }; 6665 6666 static int __init bpf_iter_register(void) 6667 { 6668 ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id; 6669 return bpf_iter_reg_target(&ipv6_route_reg_info); 6670 } 6671 6672 static void bpf_iter_unregister(void) 6673 { 6674 bpf_iter_unreg_target(&ipv6_route_reg_info); 6675 } 6676 #endif 6677 #endif 6678 6679 int __init ip6_route_init(void) 6680 { 6681 int ret; 6682 int cpu; 6683 6684 ret = -ENOMEM; 6685 ip6_dst_ops_template.kmem_cachep = 6686 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 6687 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); 6688 if (!ip6_dst_ops_template.kmem_cachep) 6689 goto out; 6690 6691 ret = dst_entries_init(&ip6_dst_blackhole_ops); 6692 if (ret) 6693 goto out_kmem_cache; 6694 6695 ret = register_pernet_subsys(&ipv6_inetpeer_ops); 6696 if (ret) 6697 goto out_dst_entries; 6698 6699 ret = register_pernet_subsys(&ip6_route_net_ops); 6700 if (ret) 6701 goto out_register_inetpeer; 6702 6703 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; 6704 6705 ret = fib6_init(); 6706 if (ret) 6707 goto out_register_subsys; 6708 6709 ret = xfrm6_init(); 6710 if (ret) 6711 goto out_fib6_init; 6712 6713 ret = fib6_rules_init(); 6714 if (ret) 6715 goto xfrm6_init; 6716 6717 ret = register_pernet_subsys(&ip6_route_net_late_ops); 6718 if (ret) 6719 goto fib6_rules_init; 6720 6721 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE, 6722 inet6_rtm_newroute, NULL, 0); 6723 if (ret < 0) 6724 goto out_register_late_subsys; 6725 6726 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE, 6727 inet6_rtm_delroute, NULL, 0); 6728 if (ret < 0) 6729 goto out_register_late_subsys; 6730 6731 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, 6732 inet6_rtm_getroute, NULL, 6733 RTNL_FLAG_DOIT_UNLOCKED); 6734 if (ret < 0) 6735 goto out_register_late_subsys; 6736 6737 ret = register_netdevice_notifier(&ip6_route_dev_notifier); 6738 if (ret) 6739 goto out_register_late_subsys; 6740 6741 #if IS_BUILTIN(CONFIG_IPV6) 6742 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 6743 ret = bpf_iter_register(); 6744 if (ret) 6745 goto out_register_late_subsys; 6746 #endif 6747 #endif 6748 6749 for_each_possible_cpu(cpu) { 6750 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); 6751 6752 INIT_LIST_HEAD(&ul->head); 6753 INIT_LIST_HEAD(&ul->quarantine); 6754 spin_lock_init(&ul->lock); 6755 } 6756 6757 out: 6758 return ret; 6759 6760 out_register_late_subsys: 6761 rtnl_unregister_all(PF_INET6); 6762 unregister_pernet_subsys(&ip6_route_net_late_ops); 6763 fib6_rules_init: 6764 fib6_rules_cleanup(); 6765 xfrm6_init: 6766 xfrm6_fini(); 6767 out_fib6_init: 6768 fib6_gc_cleanup(); 6769 out_register_subsys: 6770 unregister_pernet_subsys(&ip6_route_net_ops); 6771 out_register_inetpeer: 6772 unregister_pernet_subsys(&ipv6_inetpeer_ops); 6773 out_dst_entries: 6774 dst_entries_destroy(&ip6_dst_blackhole_ops); 6775 out_kmem_cache: 6776 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 6777 goto out; 6778 } 6779 6780 void ip6_route_cleanup(void) 6781 { 6782 #if IS_BUILTIN(CONFIG_IPV6) 6783 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 6784 bpf_iter_unregister(); 6785 #endif 6786 #endif 6787 unregister_netdevice_notifier(&ip6_route_dev_notifier); 6788 unregister_pernet_subsys(&ip6_route_net_late_ops); 6789 fib6_rules_cleanup(); 6790 xfrm6_fini(); 6791 fib6_gc_cleanup(); 6792 unregister_pernet_subsys(&ipv6_inetpeer_ops); 6793 unregister_pernet_subsys(&ip6_route_net_ops); 6794 dst_entries_destroy(&ip6_dst_blackhole_ops); 6795 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 6796 } 6797