route.c (5be1f9d82fa73c199ebeee2866dbac83e419c897) route.c (8d7017fd621d02ff0d47d19484350c2356828483)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 */

--- 86 unchanged lines hidden (view full) ---

95static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
96static void ip6_link_failure(struct sk_buff *skb);
97static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb, u32 mtu);
99static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 struct sk_buff *skb);
101static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
102 int strict);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 */

--- 86 unchanged lines hidden (view full) ---

95static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
96static void ip6_link_failure(struct sk_buff *skb);
97static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb, u32 mtu);
99static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 struct sk_buff *skb);
101static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
102 int strict);
103static size_t rt6_nlmsg_size(struct fib6_info *rt);
103static size_t rt6_nlmsg_size(struct fib6_info *f6i);
104static int rt6_fill_node(struct net *net, struct sk_buff *skb,
105 struct fib6_info *rt, struct dst_entry *dst,
106 struct in6_addr *dest, struct in6_addr *src,
107 int iif, int type, u32 portid, u32 seq,
108 unsigned int flags);
109static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
110 const struct in6_addr *daddr,
111 const struct in6_addr *saddr);

--- 59 unchanged lines hidden (view full) ---

171 struct net_device *rt_dev = rt->dst.dev;
172
173 if (rt_idev->dev == dev) {
174 rt->rt6i_idev = in6_dev_get(loopback_dev);
175 in6_dev_put(rt_idev);
176 }
177
178 if (rt_dev == dev) {
104static int rt6_fill_node(struct net *net, struct sk_buff *skb,
105 struct fib6_info *rt, struct dst_entry *dst,
106 struct in6_addr *dest, struct in6_addr *src,
107 int iif, int type, u32 portid, u32 seq,
108 unsigned int flags);
109static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
110 const struct in6_addr *daddr,
111 const struct in6_addr *saddr);

--- 59 unchanged lines hidden (view full) ---

171 struct net_device *rt_dev = rt->dst.dev;
172
173 if (rt_idev->dev == dev) {
174 rt->rt6i_idev = in6_dev_get(loopback_dev);
175 in6_dev_put(rt_idev);
176 }
177
178 if (rt_dev == dev) {
179 rt->dst.dev = loopback_dev;
179 rt->dst.dev = blackhole_netdev;
180 dev_hold(rt->dst.dev);
181 dev_put(rt_dev);
182 }
183 }
184 spin_unlock_bh(&ul->lock);
185 }
186}
187

--- 25 unchanged lines hidden (view full) ---

213}
214
215static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
216 struct sk_buff *skb,
217 const void *daddr)
218{
219 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
220
180 dev_hold(rt->dst.dev);
181 dev_put(rt_dev);
182 }
183 }
184 spin_unlock_bh(&ul->lock);
185 }
186}
187

--- 25 unchanged lines hidden (view full) ---

213}
214
215static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
216 struct sk_buff *skb,
217 const void *daddr)
218{
219 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
220
221 return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr);
221 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
222 dst->dev, skb, daddr);
222}
223
224static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
225{
226 struct net_device *dev = dst->dev;
227 struct rt6_info *rt = (struct rt6_info *)dst;
228
229 daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);

--- 193 unchanged lines hidden (view full) ---

423
424void fib6_select_path(const struct net *net, struct fib6_result *res,
425 struct flowi6 *fl6, int oif, bool have_oif_match,
426 const struct sk_buff *skb, int strict)
427{
428 struct fib6_info *sibling, *next_sibling;
429 struct fib6_info *match = res->f6i;
430
223}
224
225static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
226{
227 struct net_device *dev = dst->dev;
228 struct rt6_info *rt = (struct rt6_info *)dst;
229
230 daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);

--- 193 unchanged lines hidden (view full) ---

424
425void fib6_select_path(const struct net *net, struct fib6_result *res,
426 struct flowi6 *fl6, int oif, bool have_oif_match,
427 const struct sk_buff *skb, int strict)
428{
429 struct fib6_info *sibling, *next_sibling;
430 struct fib6_info *match = res->f6i;
431
431 if (!match->fib6_nsiblings || have_oif_match)
432 if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
432 goto out;
433
434 /* We might have already computed the hash for ICMPv6 errors. In such
435 * case it will always be non-zero. Otherwise now is the time to do it.
436 */
433 goto out;
434
435 /* We might have already computed the hash for ICMPv6 errors. In such
436 * case it will always be non-zero. Otherwise now is the time to do it.
437 */
437 if (!fl6->mp_hash)
438 if (!fl6->mp_hash &&
439 (!match->nh || nexthop_is_multipath(match->nh)))
438 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
439
440 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
441
440 if (fl6->mp_hash <= atomic_read(&match->fib6_nh.fib_nh_upper_bound))
442 if (unlikely(match->nh)) {
443 nexthop_path_fib6_result(res, fl6->mp_hash);
444 return;
445 }
446
447 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
441 goto out;
442
443 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
444 fib6_siblings) {
448 goto out;
449
450 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
451 fib6_siblings) {
445 const struct fib6_nh *nh = &sibling->fib6_nh;
452 const struct fib6_nh *nh = sibling->fib6_nh;
446 int nh_upper_bound;
447
448 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
449 if (fl6->mp_hash > nh_upper_bound)
450 continue;
451 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
452 break;
453 match = sibling;
454 break;
455 }
456
457out:
458 res->f6i = match;
453 int nh_upper_bound;
454
455 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
456 if (fl6->mp_hash > nh_upper_bound)
457 continue;
458 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
459 break;
460 match = sibling;
461 break;
462 }
463
464out:
465 res->f6i = match;
459 res->nh = &match->fib6_nh;
466 res->nh = match->fib6_nh;
460}
461
462/*
463 * Route lookup. rcu_read_lock() should be held.
464 */
465
466static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
467 const struct in6_addr *saddr, int oif, int flags)

--- 11 unchanged lines hidden (view full) ---

479 if (ipv6_chk_addr(net, saddr, dev,
480 flags & RT6_LOOKUP_F_IFACE))
481 return true;
482 }
483
484 return false;
485}
486
467}
468
469/*
470 * Route lookup. rcu_read_lock() should be held.
471 */
472
473static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
474 const struct in6_addr *saddr, int oif, int flags)

--- 11 unchanged lines hidden (view full) ---

486 if (ipv6_chk_addr(net, saddr, dev,
487 flags & RT6_LOOKUP_F_IFACE))
488 return true;
489 }
490
491 return false;
492}
493
494struct fib6_nh_dm_arg {
495 struct net *net;
496 const struct in6_addr *saddr;
497 int oif;
498 int flags;
499 struct fib6_nh *nh;
500};
501
502static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
503{
504 struct fib6_nh_dm_arg *arg = _arg;
505
506 arg->nh = nh;
507 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
508 arg->flags);
509}
510
511/* returns fib6_nh from nexthop or NULL */
512static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
513 struct fib6_result *res,
514 const struct in6_addr *saddr,
515 int oif, int flags)
516{
517 struct fib6_nh_dm_arg arg = {
518 .net = net,
519 .saddr = saddr,
520 .oif = oif,
521 .flags = flags,
522 };
523
524 if (nexthop_is_blackhole(nh))
525 return NULL;
526
527 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
528 return arg.nh;
529
530 return NULL;
531}
532
487static void rt6_device_match(struct net *net, struct fib6_result *res,
488 const struct in6_addr *saddr, int oif, int flags)
489{
490 struct fib6_info *f6i = res->f6i;
491 struct fib6_info *spf6i;
492 struct fib6_nh *nh;
493
494 if (!oif && ipv6_addr_any(saddr)) {
533static void rt6_device_match(struct net *net, struct fib6_result *res,
534 const struct in6_addr *saddr, int oif, int flags)
535{
536 struct fib6_info *f6i = res->f6i;
537 struct fib6_info *spf6i;
538 struct fib6_nh *nh;
539
540 if (!oif && ipv6_addr_any(saddr)) {
495 nh = &f6i->fib6_nh;
541 if (unlikely(f6i->nh)) {
542 nh = nexthop_fib6_nh(f6i->nh);
543 if (nexthop_is_blackhole(f6i->nh))
544 goto out_blackhole;
545 } else {
546 nh = f6i->fib6_nh;
547 }
496 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
497 goto out;
498 }
499
500 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
548 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
549 goto out;
550 }
551
552 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
501 nh = &spf6i->fib6_nh;
502 if (__rt6_device_match(net, nh, saddr, oif, flags)) {
553 bool matched = false;
554
555 if (unlikely(spf6i->nh)) {
556 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
557 oif, flags);
558 if (nh)
559 matched = true;
560 } else {
561 nh = spf6i->fib6_nh;
562 if (__rt6_device_match(net, nh, saddr, oif, flags))
563 matched = true;
564 }
565 if (matched) {
503 res->f6i = spf6i;
504 goto out;
505 }
506 }
507
508 if (oif && flags & RT6_LOOKUP_F_IFACE) {
509 res->f6i = net->ipv6.fib6_null_entry;
566 res->f6i = spf6i;
567 goto out;
568 }
569 }
570
571 if (oif && flags & RT6_LOOKUP_F_IFACE) {
572 res->f6i = net->ipv6.fib6_null_entry;
510 nh = &res->f6i->fib6_nh;
573 nh = res->f6i->fib6_nh;
511 goto out;
512 }
513
574 goto out;
575 }
576
514 nh = &f6i->fib6_nh;
577 if (unlikely(f6i->nh)) {
578 nh = nexthop_fib6_nh(f6i->nh);
579 if (nexthop_is_blackhole(f6i->nh))
580 goto out_blackhole;
581 } else {
582 nh = f6i->fib6_nh;
583 }
584
515 if (nh->fib_nh_flags & RTNH_F_DEAD) {
516 res->f6i = net->ipv6.fib6_null_entry;
585 if (nh->fib_nh_flags & RTNH_F_DEAD) {
586 res->f6i = net->ipv6.fib6_null_entry;
517 nh = &res->f6i->fib6_nh;
587 nh = res->f6i->fib6_nh;
518 }
519out:
520 res->nh = nh;
521 res->fib6_type = res->f6i->fib6_type;
522 res->fib6_flags = res->f6i->fib6_flags;
588 }
589out:
590 res->nh = nh;
591 res->fib6_type = res->f6i->fib6_type;
592 res->fib6_flags = res->f6i->fib6_flags;
593 return;
594
595out_blackhole:
596 res->fib6_flags |= RTF_REJECT;
597 res->fib6_type = RTN_BLACKHOLE;
598 res->nh = nh;
523}
524
525#ifdef CONFIG_IPV6_ROUTER_PREF
526struct __rt6_probe_work {
527 struct work_struct work;
528 struct in6_addr target;
529 struct net_device *dev;
530};

--- 154 unchanged lines hidden (view full) ---

685 *do_rr = match_do_rr;
686 *mpri = m;
687 rc = true;
688 }
689out:
690 return rc;
691}
692
599}
600
601#ifdef CONFIG_IPV6_ROUTER_PREF
602struct __rt6_probe_work {
603 struct work_struct work;
604 struct in6_addr target;
605 struct net_device *dev;
606};

--- 154 unchanged lines hidden (view full) ---

761 *do_rr = match_do_rr;
762 *mpri = m;
763 rc = true;
764 }
765out:
766 return rc;
767}
768
769struct fib6_nh_frl_arg {
770 u32 flags;
771 int oif;
772 int strict;
773 int *mpri;
774 bool *do_rr;
775 struct fib6_nh *nh;
776};
777
778static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
779{
780 struct fib6_nh_frl_arg *arg = _arg;
781
782 arg->nh = nh;
783 return find_match(nh, arg->flags, arg->oif, arg->strict,
784 arg->mpri, arg->do_rr);
785}
786
693static void __find_rr_leaf(struct fib6_info *f6i_start,
694 struct fib6_info *nomatch, u32 metric,
695 struct fib6_result *res, struct fib6_info **cont,
696 int oif, int strict, bool *do_rr, int *mpri)
697{
698 struct fib6_info *f6i;
699
700 for (f6i = f6i_start;
701 f6i && f6i != nomatch;
702 f6i = rcu_dereference(f6i->fib6_next)) {
787static void __find_rr_leaf(struct fib6_info *f6i_start,
788 struct fib6_info *nomatch, u32 metric,
789 struct fib6_result *res, struct fib6_info **cont,
790 int oif, int strict, bool *do_rr, int *mpri)
791{
792 struct fib6_info *f6i;
793
794 for (f6i = f6i_start;
795 f6i && f6i != nomatch;
796 f6i = rcu_dereference(f6i->fib6_next)) {
797 bool matched = false;
703 struct fib6_nh *nh;
704
705 if (cont && f6i->fib6_metric != metric) {
706 *cont = f6i;
707 return;
708 }
709
710 if (fib6_check_expired(f6i))
711 continue;
712
798 struct fib6_nh *nh;
799
800 if (cont && f6i->fib6_metric != metric) {
801 *cont = f6i;
802 return;
803 }
804
805 if (fib6_check_expired(f6i))
806 continue;
807
713 nh = &f6i->fib6_nh;
714 if (find_match(nh, f6i->fib6_flags, oif, strict, mpri, do_rr)) {
808 if (unlikely(f6i->nh)) {
809 struct fib6_nh_frl_arg arg = {
810 .flags = f6i->fib6_flags,
811 .oif = oif,
812 .strict = strict,
813 .mpri = mpri,
814 .do_rr = do_rr
815 };
816
817 if (nexthop_is_blackhole(f6i->nh)) {
818 res->fib6_flags = RTF_REJECT;
819 res->fib6_type = RTN_BLACKHOLE;
820 res->f6i = f6i;
821 res->nh = nexthop_fib6_nh(f6i->nh);
822 return;
823 }
824 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
825 &arg)) {
826 matched = true;
827 nh = arg.nh;
828 }
829 } else {
830 nh = f6i->fib6_nh;
831 if (find_match(nh, f6i->fib6_flags, oif, strict,
832 mpri, do_rr))
833 matched = true;
834 }
835 if (matched) {
715 res->f6i = f6i;
716 res->nh = nh;
717 res->fib6_flags = f6i->fib6_flags;
718 res->fib6_type = f6i->fib6_type;
719 }
720 }
721}
722

--- 64 unchanged lines hidden (view full) ---

787 rcu_assign_pointer(fn->rr_ptr, next);
788 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
789 }
790 }
791
792out:
793 if (!res->f6i) {
794 res->f6i = net->ipv6.fib6_null_entry;
836 res->f6i = f6i;
837 res->nh = nh;
838 res->fib6_flags = f6i->fib6_flags;
839 res->fib6_type = f6i->fib6_type;
840 }
841 }
842}
843

--- 64 unchanged lines hidden (view full) ---

908 rcu_assign_pointer(fn->rr_ptr, next);
909 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
910 }
911 }
912
913out:
914 if (!res->f6i) {
915 res->f6i = net->ipv6.fib6_null_entry;
795 res->nh = &res->f6i->fib6_nh;
916 res->nh = res->f6i->fib6_nh;
796 res->fib6_flags = res->f6i->fib6_flags;
797 res->fib6_type = res->f6i->fib6_type;
798 }
799}
800
801static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
802{
803 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||

--- 304 unchanged lines hidden (view full) ---

1108 if (res.f6i == net->ipv6.fib6_null_entry) {
1109 fn = fib6_backtrack(fn, &fl6->saddr);
1110 if (fn)
1111 goto restart;
1112
1113 rt = net->ipv6.ip6_null_entry;
1114 dst_hold(&rt->dst);
1115 goto out;
917 res->fib6_flags = res->f6i->fib6_flags;
918 res->fib6_type = res->f6i->fib6_type;
919 }
920}
921
922static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
923{
924 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||

--- 304 unchanged lines hidden (view full) ---

1229 if (res.f6i == net->ipv6.fib6_null_entry) {
1230 fn = fib6_backtrack(fn, &fl6->saddr);
1231 if (fn)
1232 goto restart;
1233
1234 rt = net->ipv6.ip6_null_entry;
1235 dst_hold(&rt->dst);
1236 goto out;
1237 } else if (res.fib6_flags & RTF_REJECT) {
1238 goto do_create;
1116 }
1117
1118 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1119 fl6->flowi6_oif != 0, skb, flags);
1120
1121 /* Search through exception table */
1122 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1123 if (rt) {
1124 if (ip6_hold_safe(net, &rt))
1125 dst_use_noref(&rt->dst, jiffies);
1126 } else {
1239 }
1240
1241 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1242 fl6->flowi6_oif != 0, skb, flags);
1243
1244 /* Search through exception table */
1245 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1246 if (rt) {
1247 if (ip6_hold_safe(net, &rt))
1248 dst_use_noref(&rt->dst, jiffies);
1249 } else {
1250do_create:
1127 rt = ip6_create_rt_rcu(&res);
1128 }
1129
1130out:
1131 trace_fib6_table_lookup(net, &res, table, fl6);
1132
1133 rcu_read_unlock();
1134

--- 124 unchanged lines hidden (view full) ---

1259 ip6_rt_copy_init(pcpu_rt, res);
1260 pcpu_rt->rt6i_flags |= RTF_PCPU;
1261 return pcpu_rt;
1262}
1263
1264/* It should be called with rcu_read_lock() acquired */
1265static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1266{
1251 rt = ip6_create_rt_rcu(&res);
1252 }
1253
1254out:
1255 trace_fib6_table_lookup(net, &res, table, fl6);
1256
1257 rcu_read_unlock();
1258

--- 124 unchanged lines hidden (view full) ---

1383 ip6_rt_copy_init(pcpu_rt, res);
1384 pcpu_rt->rt6i_flags |= RTF_PCPU;
1385 return pcpu_rt;
1386}
1387
1388/* It should be called with rcu_read_lock() acquired */
1389static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1390{
1267 struct rt6_info *pcpu_rt, **p;
1391 struct rt6_info *pcpu_rt;
1268
1392
1269 p = this_cpu_ptr(res->f6i->rt6i_pcpu);
1270 pcpu_rt = *p;
1393 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1271
1394
1272 if (pcpu_rt)
1273 ip6_hold_safe(NULL, &pcpu_rt);
1274
1275 return pcpu_rt;
1276}
1277
1278static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1279 const struct fib6_result *res)
1280{
1281 struct rt6_info *pcpu_rt, *prev, **p;
1282
1283 pcpu_rt = ip6_rt_pcpu_alloc(res);
1395 return pcpu_rt;
1396}
1397
1398static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1399 const struct fib6_result *res)
1400{
1401 struct rt6_info *pcpu_rt, *prev, **p;
1402
1403 pcpu_rt = ip6_rt_pcpu_alloc(res);
1284 if (!pcpu_rt) {
1285 dst_hold(&net->ipv6.ip6_null_entry->dst);
1286 return net->ipv6.ip6_null_entry;
1287 }
1404 if (!pcpu_rt)
1405 return NULL;
1288
1406
1289 dst_hold(&pcpu_rt->dst);
1290 p = this_cpu_ptr(res->f6i->rt6i_pcpu);
1407 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1291 prev = cmpxchg(p, NULL, pcpu_rt);
1292 BUG_ON(prev);
1293
1294 if (res->f6i->fib6_destroying) {
1295 struct fib6_info *from;
1296
1297 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1298 fib6_info_release(from);

--- 153 unchanged lines hidden (view full) ---

1452 rcu_read_unlock();
1453 }
1454
1455 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1456
1457 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1458}
1459
1408 prev = cmpxchg(p, NULL, pcpu_rt);
1409 BUG_ON(prev);
1410
1411 if (res->f6i->fib6_destroying) {
1412 struct fib6_info *from;
1413
1414 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1415 fib6_info_release(from);

--- 153 unchanged lines hidden (view full) ---

1569 rcu_read_unlock();
1570 }
1571
1572 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1573
1574 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1575}
1576
1577#define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1578
1579/* used when the flushed bit is not relevant, only access to the bucket
1580 * (ie., all bucket users except rt6_insert_exception);
1581 *
1582 * called under rcu lock; sometimes called with rt6_exception_lock held
1583 */
1584static
1585struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1586 spinlock_t *lock)
1587{
1588 struct rt6_exception_bucket *bucket;
1589
1590 if (lock)
1591 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1592 lockdep_is_held(lock));
1593 else
1594 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1595
1596 /* remove bucket flushed bit if set */
1597 if (bucket) {
1598 unsigned long p = (unsigned long)bucket;
1599
1600 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1601 bucket = (struct rt6_exception_bucket *)p;
1602 }
1603
1604 return bucket;
1605}
1606
1607static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1608{
1609 unsigned long p = (unsigned long)bucket;
1610
1611 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1612}
1613
1614/* called with rt6_exception_lock held */
1615static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1616 spinlock_t *lock)
1617{
1618 struct rt6_exception_bucket *bucket;
1619 unsigned long p;
1620
1621 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1622 lockdep_is_held(lock));
1623
1624 p = (unsigned long)bucket;
1625 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1626 bucket = (struct rt6_exception_bucket *)p;
1627 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1628}
1629
1460static int rt6_insert_exception(struct rt6_info *nrt,
1461 const struct fib6_result *res)
1462{
1463 struct net *net = dev_net(nrt->dst.dev);
1464 struct rt6_exception_bucket *bucket;
1630static int rt6_insert_exception(struct rt6_info *nrt,
1631 const struct fib6_result *res)
1632{
1633 struct net *net = dev_net(nrt->dst.dev);
1634 struct rt6_exception_bucket *bucket;
1635 struct fib6_info *f6i = res->f6i;
1465 struct in6_addr *src_key = NULL;
1466 struct rt6_exception *rt6_ex;
1636 struct in6_addr *src_key = NULL;
1637 struct rt6_exception *rt6_ex;
1467 struct fib6_info *f6i = res->f6i;
1638 struct fib6_nh *nh = res->nh;
1468 int err = 0;
1469
1470 spin_lock_bh(&rt6_exception_lock);
1471
1639 int err = 0;
1640
1641 spin_lock_bh(&rt6_exception_lock);
1642
1472 if (f6i->exception_bucket_flushed) {
1473 err = -EINVAL;
1474 goto out;
1475 }
1476
1477 bucket = rcu_dereference_protected(f6i->rt6i_exception_bucket,
1478 lockdep_is_held(&rt6_exception_lock));
1643 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1644 lockdep_is_held(&rt6_exception_lock));
1479 if (!bucket) {
1480 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1481 GFP_ATOMIC);
1482 if (!bucket) {
1483 err = -ENOMEM;
1484 goto out;
1485 }
1645 if (!bucket) {
1646 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1647 GFP_ATOMIC);
1648 if (!bucket) {
1649 err = -ENOMEM;
1650 goto out;
1651 }
1486 rcu_assign_pointer(f6i->rt6i_exception_bucket, bucket);
1652 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1653 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1654 err = -EINVAL;
1655 goto out;
1487 }
1488
1489#ifdef CONFIG_IPV6_SUBTREES
1490 /* fib6_src.plen != 0 indicates f6i is in subtree
1491 * and exception table is indexed by a hash of
1492 * both fib6_dst and fib6_src.
1493 * Otherwise, the exception table is indexed by
1494 * a hash of only fib6_dst.

--- 38 unchanged lines hidden (view full) ---

1533 fib6_update_sernum(net, f6i);
1534 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1535 fib6_force_start_gc(net);
1536 }
1537
1538 return err;
1539}
1540
1656 }
1657
1658#ifdef CONFIG_IPV6_SUBTREES
1659 /* fib6_src.plen != 0 indicates f6i is in subtree
1660 * and exception table is indexed by a hash of
1661 * both fib6_dst and fib6_src.
1662 * Otherwise, the exception table is indexed by
1663 * a hash of only fib6_dst.

--- 38 unchanged lines hidden (view full) ---

1702 fib6_update_sernum(net, f6i);
1703 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1704 fib6_force_start_gc(net);
1705 }
1706
1707 return err;
1708}
1709
1541void rt6_flush_exceptions(struct fib6_info *rt)
1710static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1542{
1543 struct rt6_exception_bucket *bucket;
1544 struct rt6_exception *rt6_ex;
1545 struct hlist_node *tmp;
1546 int i;
1547
1548 spin_lock_bh(&rt6_exception_lock);
1711{
1712 struct rt6_exception_bucket *bucket;
1713 struct rt6_exception *rt6_ex;
1714 struct hlist_node *tmp;
1715 int i;
1716
1717 spin_lock_bh(&rt6_exception_lock);
1549 /* Prevent rt6_insert_exception() to recreate the bucket list */
1550 rt->exception_bucket_flushed = 1;
1551
1718
1552 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1553 lockdep_is_held(&rt6_exception_lock));
1719 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1554 if (!bucket)
1555 goto out;
1556
1720 if (!bucket)
1721 goto out;
1722
1723 /* Prevent rt6_insert_exception() to recreate the bucket list */
1724 if (!from)
1725 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1726
1557 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1727 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1558 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1559 rt6_remove_exception(bucket, rt6_ex);
1560 WARN_ON_ONCE(bucket->depth);
1728 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1729 if (!from ||
1730 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1731 rt6_remove_exception(bucket, rt6_ex);
1732 }
1733 WARN_ON_ONCE(!from && bucket->depth);
1561 bucket++;
1562 }
1734 bucket++;
1735 }
1563
1564out:
1565 spin_unlock_bh(&rt6_exception_lock);
1566}
1567
1736out:
1737 spin_unlock_bh(&rt6_exception_lock);
1738}
1739
1740static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1741{
1742 struct fib6_info *f6i = arg;
1743
1744 fib6_nh_flush_exceptions(nh, f6i);
1745
1746 return 0;
1747}
1748
1749void rt6_flush_exceptions(struct fib6_info *f6i)
1750{
1751 if (f6i->nh)
1752 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1753 f6i);
1754 else
1755 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1756}
1757
1568/* Find cached rt in the hash table inside passed in rt
1569 * Caller has to hold rcu_read_lock()
1570 */
1571static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1572 const struct in6_addr *daddr,
1573 const struct in6_addr *saddr)
1574{
1575 const struct in6_addr *src_key = NULL;

--- 12 unchanged lines hidden (view full) ---

1588 * if the passed in saddr does not find anything.
1589 * (See the logic in ip6_rt_cache_alloc() on how
1590 * rt->rt6i_src is updated.)
1591 */
1592 if (res->f6i->fib6_src.plen)
1593 src_key = saddr;
1594find_ex:
1595#endif
1758/* Find cached rt in the hash table inside passed in rt
1759 * Caller has to hold rcu_read_lock()
1760 */
1761static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1762 const struct in6_addr *daddr,
1763 const struct in6_addr *saddr)
1764{
1765 const struct in6_addr *src_key = NULL;

--- 12 unchanged lines hidden (view full) ---

1778 * if the passed in saddr does not find anything.
1779 * (See the logic in ip6_rt_cache_alloc() on how
1780 * rt->rt6i_src is updated.)
1781 */
1782 if (res->f6i->fib6_src.plen)
1783 src_key = saddr;
1784find_ex:
1785#endif
1596 bucket = rcu_dereference(res->f6i->rt6i_exception_bucket);
1786 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1597 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1598
1599 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1600 ret = rt6_ex->rt6i;
1601
1602#ifdef CONFIG_IPV6_SUBTREES
1603 /* Use fib6_src as src_key and redo lookup */
1604 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1605 src_key = &res->f6i->fib6_src.addr;
1606 goto find_ex;
1607 }
1608#endif
1609
1610 return ret;
1611}
1612
1613/* Remove the passed in cached rt from the hash table that contains it */
1787 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1788
1789 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1790 ret = rt6_ex->rt6i;
1791
1792#ifdef CONFIG_IPV6_SUBTREES
1793 /* Use fib6_src as src_key and redo lookup */
1794 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1795 src_key = &res->f6i->fib6_src.addr;
1796 goto find_ex;
1797 }
1798#endif
1799
1800 return ret;
1801}
1802
1803/* Remove the passed in cached rt from the hash table that contains it */
1614static int rt6_remove_exception_rt(struct rt6_info *rt)
1804static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1805 const struct rt6_info *rt)
1615{
1806{
1807 const struct in6_addr *src_key = NULL;
1616 struct rt6_exception_bucket *bucket;
1808 struct rt6_exception_bucket *bucket;
1617 struct in6_addr *src_key = NULL;
1618 struct rt6_exception *rt6_ex;
1809 struct rt6_exception *rt6_ex;
1619 struct fib6_info *from;
1620 int err;
1621
1810 int err;
1811
1622 from = rcu_dereference(rt->from);
1623 if (!from ||
1624 !(rt->rt6i_flags & RTF_CACHE))
1625 return -EINVAL;
1626
1627 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1812 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1628 return -ENOENT;
1629
1630 spin_lock_bh(&rt6_exception_lock);
1813 return -ENOENT;
1814
1815 spin_lock_bh(&rt6_exception_lock);
1631 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1632 lockdep_is_held(&rt6_exception_lock));
1816 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1817
1633#ifdef CONFIG_IPV6_SUBTREES
1634 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1635 * and exception table is indexed by a hash of
1636 * both rt6i_dst and rt6i_src.
1637 * Otherwise, the exception table is indexed by
1638 * a hash of only rt6i_dst.
1639 */
1818#ifdef CONFIG_IPV6_SUBTREES
1819 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1820 * and exception table is indexed by a hash of
1821 * both rt6i_dst and rt6i_src.
1822 * Otherwise, the exception table is indexed by
1823 * a hash of only rt6i_dst.
1824 */
1640 if (from->fib6_src.plen)
1825 if (plen)
1641 src_key = &rt->rt6i_src.addr;
1642#endif
1643 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1644 &rt->rt6i_dst.addr,
1645 src_key);
1646 if (rt6_ex) {
1647 rt6_remove_exception(bucket, rt6_ex);
1648 err = 0;
1649 } else {
1650 err = -ENOENT;
1651 }
1652
1653 spin_unlock_bh(&rt6_exception_lock);
1654 return err;
1655}
1656
1826 src_key = &rt->rt6i_src.addr;
1827#endif
1828 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1829 &rt->rt6i_dst.addr,
1830 src_key);
1831 if (rt6_ex) {
1832 rt6_remove_exception(bucket, rt6_ex);
1833 err = 0;
1834 } else {
1835 err = -ENOENT;
1836 }
1837
1838 spin_unlock_bh(&rt6_exception_lock);
1839 return err;
1840}
1841
1657/* Find rt6_ex which contains the passed in rt cache and
1658 * refresh its stamp
1659 */
1660static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1842struct fib6_nh_excptn_arg {
1843 struct rt6_info *rt;
1844 int plen;
1845};
1846
1847static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1661{
1848{
1662 struct rt6_exception_bucket *bucket;
1663 struct in6_addr *src_key = NULL;
1664 struct rt6_exception *rt6_ex;
1849 struct fib6_nh_excptn_arg *arg = _arg;
1850 int err;
1851
1852 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1853 if (err == 0)
1854 return 1;
1855
1856 return 0;
1857}
1858
1859static int rt6_remove_exception_rt(struct rt6_info *rt)
1860{
1665 struct fib6_info *from;
1666
1861 struct fib6_info *from;
1862
1667 rcu_read_lock();
1668 from = rcu_dereference(rt->from);
1669 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1863 from = rcu_dereference(rt->from);
1864 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1670 goto unlock;
1865 return -EINVAL;
1671
1866
1672 bucket = rcu_dereference(from->rt6i_exception_bucket);
1867 if (from->nh) {
1868 struct fib6_nh_excptn_arg arg = {
1869 .rt = rt,
1870 .plen = from->fib6_src.plen
1871 };
1872 int rc;
1673
1873
1874 /* rc = 1 means an entry was found */
1875 rc = nexthop_for_each_fib6_nh(from->nh,
1876 rt6_nh_remove_exception_rt,
1877 &arg);
1878 return rc ? 0 : -ENOENT;
1879 }
1880
1881 return fib6_nh_remove_exception(from->fib6_nh,
1882 from->fib6_src.plen, rt);
1883}
1884
1885/* Find rt6_ex which contains the passed in rt cache and
1886 * refresh its stamp
1887 */
1888static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1889 const struct rt6_info *rt)
1890{
1891 const struct in6_addr *src_key = NULL;
1892 struct rt6_exception_bucket *bucket;
1893 struct rt6_exception *rt6_ex;
1894
1895 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1674#ifdef CONFIG_IPV6_SUBTREES
1675 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1676 * and exception table is indexed by a hash of
1677 * both rt6i_dst and rt6i_src.
1678 * Otherwise, the exception table is indexed by
1679 * a hash of only rt6i_dst.
1680 */
1896#ifdef CONFIG_IPV6_SUBTREES
1897 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1898 * and exception table is indexed by a hash of
1899 * both rt6i_dst and rt6i_src.
1900 * Otherwise, the exception table is indexed by
1901 * a hash of only rt6i_dst.
1902 */
1681 if (from->fib6_src.plen)
1903 if (plen)
1682 src_key = &rt->rt6i_src.addr;
1683#endif
1904 src_key = &rt->rt6i_src.addr;
1905#endif
1684 rt6_ex = __rt6_find_exception_rcu(&bucket,
1685 &rt->rt6i_dst.addr,
1686 src_key);
1906 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1687 if (rt6_ex)
1688 rt6_ex->stamp = jiffies;
1907 if (rt6_ex)
1908 rt6_ex->stamp = jiffies;
1909}
1689
1910
1911struct fib6_nh_match_arg {
1912 const struct net_device *dev;
1913 const struct in6_addr *gw;
1914 struct fib6_nh *match;
1915};
1916
1917/* determine if fib6_nh has given device and gateway */
1918static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1919{
1920 struct fib6_nh_match_arg *arg = _arg;
1921
1922 if (arg->dev != nh->fib_nh_dev ||
1923 (arg->gw && !nh->fib_nh_gw_family) ||
1924 (!arg->gw && nh->fib_nh_gw_family) ||
1925 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1926 return 0;
1927
1928 arg->match = nh;
1929
1930 /* found a match, break the loop */
1931 return 1;
1932}
1933
1934static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1935{
1936 struct fib6_info *from;
1937 struct fib6_nh *fib6_nh;
1938
1939 rcu_read_lock();
1940
1941 from = rcu_dereference(rt->from);
1942 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1943 goto unlock;
1944
1945 if (from->nh) {
1946 struct fib6_nh_match_arg arg = {
1947 .dev = rt->dst.dev,
1948 .gw = &rt->rt6i_gateway,
1949 };
1950
1951 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1952
1953 if (!arg.match)
1954 return;
1955 fib6_nh = arg.match;
1956 } else {
1957 fib6_nh = from->fib6_nh;
1958 }
1959 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1690unlock:
1691 rcu_read_unlock();
1692}
1693
1694static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1695 struct rt6_info *rt, int mtu)
1696{
1697 /* If the new MTU is lower than the route PMTU, this new MTU will be the

--- 11 unchanged lines hidden (view full) ---

1709
1710 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1711 return true;
1712
1713 return false;
1714}
1715
1716static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1960unlock:
1961 rcu_read_unlock();
1962}
1963
1964static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1965 struct rt6_info *rt, int mtu)
1966{
1967 /* If the new MTU is lower than the route PMTU, this new MTU will be the

--- 11 unchanged lines hidden (view full) ---

1979
1980 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1981 return true;
1982
1983 return false;
1984}
1985
1986static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1717 struct fib6_info *rt, int mtu)
1987 const struct fib6_nh *nh, int mtu)
1718{
1719 struct rt6_exception_bucket *bucket;
1720 struct rt6_exception *rt6_ex;
1721 int i;
1722
1988{
1989 struct rt6_exception_bucket *bucket;
1990 struct rt6_exception *rt6_ex;
1991 int i;
1992
1723 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1724 lockdep_is_held(&rt6_exception_lock));
1725
1993 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1726 if (!bucket)
1727 return;
1728
1729 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1730 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1731 struct rt6_info *entry = rt6_ex->rt6i;
1732
1733 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected

--- 5 unchanged lines hidden (view full) ---

1739 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
1740 }
1741 bucket++;
1742 }
1743}
1744
1745#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1746
1994 if (!bucket)
1995 return;
1996
1997 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1998 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1999 struct rt6_info *entry = rt6_ex->rt6i;
2000
2001 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected

--- 5 unchanged lines hidden (view full) ---

2007 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2008 }
2009 bucket++;
2010 }
2011}
2012
2013#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2014
1747static void rt6_exceptions_clean_tohost(struct fib6_info *rt,
1748 struct in6_addr *gateway)
2015static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2016 const struct in6_addr *gateway)
1749{
1750 struct rt6_exception_bucket *bucket;
1751 struct rt6_exception *rt6_ex;
1752 struct hlist_node *tmp;
1753 int i;
1754
2017{
2018 struct rt6_exception_bucket *bucket;
2019 struct rt6_exception *rt6_ex;
2020 struct hlist_node *tmp;
2021 int i;
2022
1755 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
2023 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1756 return;
1757
1758 spin_lock_bh(&rt6_exception_lock);
2024 return;
2025
2026 spin_lock_bh(&rt6_exception_lock);
1759 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1760 lockdep_is_held(&rt6_exception_lock));
1761
2027 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1762 if (bucket) {
1763 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1764 hlist_for_each_entry_safe(rt6_ex, tmp,
1765 &bucket->chain, hlist) {
1766 struct rt6_info *entry = rt6_ex->rt6i;
1767
1768 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1769 RTF_CACHE_GATEWAY &&

--- 48 unchanged lines hidden (view full) ---

1818 rt6_remove_exception(bucket, rt6_ex);
1819 return;
1820 }
1821 }
1822
1823 gc_args->more++;
1824}
1825
2028 if (bucket) {
2029 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2030 hlist_for_each_entry_safe(rt6_ex, tmp,
2031 &bucket->chain, hlist) {
2032 struct rt6_info *entry = rt6_ex->rt6i;
2033
2034 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2035 RTF_CACHE_GATEWAY &&

--- 48 unchanged lines hidden (view full) ---

2084 rt6_remove_exception(bucket, rt6_ex);
2085 return;
2086 }
2087 }
2088
2089 gc_args->more++;
2090}
2091
1826void rt6_age_exceptions(struct fib6_info *rt,
1827 struct fib6_gc_args *gc_args,
1828 unsigned long now)
2092static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2093 struct fib6_gc_args *gc_args,
2094 unsigned long now)
1829{
1830 struct rt6_exception_bucket *bucket;
1831 struct rt6_exception *rt6_ex;
1832 struct hlist_node *tmp;
1833 int i;
1834
2095{
2096 struct rt6_exception_bucket *bucket;
2097 struct rt6_exception *rt6_ex;
2098 struct hlist_node *tmp;
2099 int i;
2100
1835 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
2101 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1836 return;
1837
1838 rcu_read_lock_bh();
1839 spin_lock(&rt6_exception_lock);
2102 return;
2103
2104 rcu_read_lock_bh();
2105 spin_lock(&rt6_exception_lock);
1840 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1841 lockdep_is_held(&rt6_exception_lock));
1842
2106 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1843 if (bucket) {
1844 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1845 hlist_for_each_entry_safe(rt6_ex, tmp,
1846 &bucket->chain, hlist) {
1847 rt6_age_examine_exception(bucket, rt6_ex,
1848 gc_args, now);
1849 }
1850 bucket++;
1851 }
1852 }
1853 spin_unlock(&rt6_exception_lock);
1854 rcu_read_unlock_bh();
1855}
1856
2107 if (bucket) {
2108 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2109 hlist_for_each_entry_safe(rt6_ex, tmp,
2110 &bucket->chain, hlist) {
2111 rt6_age_examine_exception(bucket, rt6_ex,
2112 gc_args, now);
2113 }
2114 bucket++;
2115 }
2116 }
2117 spin_unlock(&rt6_exception_lock);
2118 rcu_read_unlock_bh();
2119}
2120
2121struct fib6_nh_age_excptn_arg {
2122 struct fib6_gc_args *gc_args;
2123 unsigned long now;
2124};
2125
2126static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2127{
2128 struct fib6_nh_age_excptn_arg *arg = _arg;
2129
2130 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2131 return 0;
2132}
2133
2134void rt6_age_exceptions(struct fib6_info *f6i,
2135 struct fib6_gc_args *gc_args,
2136 unsigned long now)
2137{
2138 if (f6i->nh) {
2139 struct fib6_nh_age_excptn_arg arg = {
2140 .gc_args = gc_args,
2141 .now = now
2142 };
2143
2144 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2145 &arg);
2146 } else {
2147 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2148 }
2149}
2150
1857/* must be called with rcu lock held */
1858int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
1859 struct flowi6 *fl6, struct fib6_result *res, int strict)
1860{
1861 struct fib6_node *fn, *saved_fn;
1862
1863 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1864 saved_fn = fn;

--- 20 unchanged lines hidden (view full) ---

1885 return 0;
1886}
1887
1888struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1889 int oif, struct flowi6 *fl6,
1890 const struct sk_buff *skb, int flags)
1891{
1892 struct fib6_result res = {};
2151/* must be called with rcu lock held */
2152int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2153 struct flowi6 *fl6, struct fib6_result *res, int strict)
2154{
2155 struct fib6_node *fn, *saved_fn;
2156
2157 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2158 saved_fn = fn;

--- 20 unchanged lines hidden (view full) ---

2179 return 0;
2180}
2181
2182struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2183 int oif, struct flowi6 *fl6,
2184 const struct sk_buff *skb, int flags)
2185{
2186 struct fib6_result res = {};
1893 struct rt6_info *rt;
2187 struct rt6_info *rt = NULL;
1894 int strict = 0;
1895
2188 int strict = 0;
2189
2190 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2191 !rcu_read_lock_held());
2192
1896 strict |= flags & RT6_LOOKUP_F_IFACE;
1897 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1898 if (net->ipv6.devconf_all->forwarding == 0)
1899 strict |= RT6_LOOKUP_F_REACHABLE;
1900
1901 rcu_read_lock();
1902
1903 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2193 strict |= flags & RT6_LOOKUP_F_IFACE;
2194 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2195 if (net->ipv6.devconf_all->forwarding == 0)
2196 strict |= RT6_LOOKUP_F_REACHABLE;
2197
2198 rcu_read_lock();
2199
2200 fib6_table_lookup(net, table, oif, fl6, &res, strict);
1904 if (res.f6i == net->ipv6.fib6_null_entry) {
1905 rt = net->ipv6.ip6_null_entry;
1906 rcu_read_unlock();
1907 dst_hold(&rt->dst);
1908 return rt;
1909 }
2201 if (res.f6i == net->ipv6.fib6_null_entry)
2202 goto out;
1910
1911 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
1912
1913 /*Search through exception table */
1914 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1915 if (rt) {
2203
2204 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2205
2206 /*Search through exception table */
2207 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2208 if (rt) {
1916 if (ip6_hold_safe(net, &rt))
1917 dst_use_noref(&rt->dst, jiffies);
1918
1919 rcu_read_unlock();
1920 return rt;
2209 goto out;
1921 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1922 !res.nh->fib_nh_gw_family)) {
1923 /* Create a RTF_CACHE clone which will not be
1924 * owned by the fib6 tree. It is for the special case where
1925 * the daddr in the skb during the neighbor look-up is different
1926 * from the fl6->daddr used to look-up route here.
1927 */
2210 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2211 !res.nh->fib_nh_gw_family)) {
2212 /* Create a RTF_CACHE clone which will not be
2213 * owned by the fib6 tree. It is for the special case where
2214 * the daddr in the skb during the neighbor look-up is different
2215 * from the fl6->daddr used to look-up route here.
2216 */
1928 struct rt6_info *uncached_rt;
2217 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
1929
2218
1930 uncached_rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
1931
1932 rcu_read_unlock();
1933
1934 if (uncached_rt) {
1935 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1936 * No need for another dst_hold()
2219 if (rt) {
2220 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2221 * As rt6_uncached_list_add() does not consume refcnt,
2222 * this refcnt is always returned to the caller even
2223 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
1937 */
2224 */
1938 rt6_uncached_list_add(uncached_rt);
2225 rt6_uncached_list_add(rt);
1939 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2226 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1940 } else {
1941 uncached_rt = net->ipv6.ip6_null_entry;
1942 dst_hold(&uncached_rt->dst);
1943 }
2227 rcu_read_unlock();
1944
2228
1945 return uncached_rt;
2229 return rt;
2230 }
1946 } else {
1947 /* Get a percpu copy */
2231 } else {
2232 /* Get a percpu copy */
1948
1949 struct rt6_info *pcpu_rt;
1950
1951 local_bh_disable();
2233 local_bh_disable();
1952 pcpu_rt = rt6_get_pcpu_route(&res);
2234 rt = rt6_get_pcpu_route(&res);
1953
2235
1954 if (!pcpu_rt)
1955 pcpu_rt = rt6_make_pcpu_route(net, &res);
2236 if (!rt)
2237 rt = rt6_make_pcpu_route(net, &res);
1956
1957 local_bh_enable();
2238
2239 local_bh_enable();
1958 rcu_read_unlock();
1959
1960 return pcpu_rt;
1961 }
2240 }
2241out:
2242 if (!rt)
2243 rt = net->ipv6.ip6_null_entry;
2244 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2245 ip6_hold_safe(net, &rt);
2246 rcu_read_unlock();
2247
2248 return rt;
1962}
1963EXPORT_SYMBOL_GPL(ip6_pol_route);
1964
1965static struct rt6_info *ip6_pol_route_input(struct net *net,
1966 struct fib6_table *table,
1967 struct flowi6 *fl6,
1968 const struct sk_buff *skb,
1969 int flags)

--- 114 unchanged lines hidden (view full) ---

2084 }
2085 break;
2086 }
2087 mhash = flow_hash_from_keys(&hash_keys);
2088
2089 return mhash >> 1;
2090}
2091
2249}
2250EXPORT_SYMBOL_GPL(ip6_pol_route);
2251
2252static struct rt6_info *ip6_pol_route_input(struct net *net,
2253 struct fib6_table *table,
2254 struct flowi6 *fl6,
2255 const struct sk_buff *skb,
2256 int flags)

--- 114 unchanged lines hidden (view full) ---

2371 }
2372 break;
2373 }
2374 mhash = flow_hash_from_keys(&hash_keys);
2375
2376 return mhash >> 1;
2377}
2378
2379/* Called with rcu held */
2092void ip6_route_input(struct sk_buff *skb)
2093{
2094 const struct ipv6hdr *iph = ipv6_hdr(skb);
2095 struct net *net = dev_net(skb->dev);
2380void ip6_route_input(struct sk_buff *skb)
2381{
2382 const struct ipv6hdr *iph = ipv6_hdr(skb);
2383 struct net *net = dev_net(skb->dev);
2096 int flags = RT6_LOOKUP_F_HAS_SADDR;
2384 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2097 struct ip_tunnel_info *tun_info;
2098 struct flowi6 fl6 = {
2099 .flowi6_iif = skb->dev->ifindex,
2100 .daddr = iph->daddr,
2101 .saddr = iph->saddr,
2102 .flowlabel = ip6_flowinfo(iph),
2103 .flowi6_mark = skb->mark,
2104 .flowi6_proto = iph->nexthdr,

--- 5 unchanged lines hidden (view full) ---

2110 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2111
2112 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2113 flkeys = &_flkeys;
2114
2115 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2116 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2117 skb_dst_drop(skb);
2385 struct ip_tunnel_info *tun_info;
2386 struct flowi6 fl6 = {
2387 .flowi6_iif = skb->dev->ifindex,
2388 .daddr = iph->daddr,
2389 .saddr = iph->saddr,
2390 .flowlabel = ip6_flowinfo(iph),
2391 .flowi6_mark = skb->mark,
2392 .flowi6_proto = iph->nexthdr,

--- 5 unchanged lines hidden (view full) ---

2398 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2399
2400 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2401 flkeys = &_flkeys;
2402
2403 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2404 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2405 skb_dst_drop(skb);
2118 skb_dst_set(skb,
2119 ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
2406 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2407 &fl6, skb, flags));
2120}
2121
2122static struct rt6_info *ip6_pol_route_output(struct net *net,
2123 struct fib6_table *table,
2124 struct flowi6 *fl6,
2125 const struct sk_buff *skb,
2126 int flags)
2127{
2128 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2129}
2130
2408}
2409
2410static struct rt6_info *ip6_pol_route_output(struct net *net,
2411 struct fib6_table *table,
2412 struct flowi6 *fl6,
2413 const struct sk_buff *skb,
2414 int flags)
2415{
2416 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2417}
2418
2131struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
2132 struct flowi6 *fl6, int flags)
2419struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2420 const struct sock *sk,
2421 struct flowi6 *fl6, int flags)
2133{
2134 bool any_src;
2135
2136 if (ipv6_addr_type(&fl6->daddr) &
2137 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2138 struct dst_entry *dst;
2139
2422{
2423 bool any_src;
2424
2425 if (ipv6_addr_type(&fl6->daddr) &
2426 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2427 struct dst_entry *dst;
2428
2429 /* This function does not take refcnt on the dst */
2140 dst = l3mdev_link_scope_lookup(net, fl6);
2141 if (dst)
2142 return dst;
2143 }
2144
2145 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2146
2430 dst = l3mdev_link_scope_lookup(net, fl6);
2431 if (dst)
2432 return dst;
2433 }
2434
2435 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2436
2437 flags |= RT6_LOOKUP_F_DST_NOREF;
2147 any_src = ipv6_addr_any(&fl6->saddr);
2148 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2149 (fl6->flowi6_oif && any_src))
2150 flags |= RT6_LOOKUP_F_IFACE;
2151
2152 if (!any_src)
2153 flags |= RT6_LOOKUP_F_HAS_SADDR;
2154 else if (sk)
2155 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2156
2157 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2158}
2438 any_src = ipv6_addr_any(&fl6->saddr);
2439 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2440 (fl6->flowi6_oif && any_src))
2441 flags |= RT6_LOOKUP_F_IFACE;
2442
2443 if (!any_src)
2444 flags |= RT6_LOOKUP_F_HAS_SADDR;
2445 else if (sk)
2446 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2447
2448 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2449}
2450EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2451
2452struct dst_entry *ip6_route_output_flags(struct net *net,
2453 const struct sock *sk,
2454 struct flowi6 *fl6,
2455 int flags)
2456{
2457 struct dst_entry *dst;
2458 struct rt6_info *rt6;
2459
2460 rcu_read_lock();
2461 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2462 rt6 = (struct rt6_info *)dst;
2463 /* For dst cached in uncached_list, refcnt is already taken. */
2464 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2465 dst = &net->ipv6.ip6_null_entry->dst;
2466 dst_hold(dst);
2467 }
2468 rcu_read_unlock();
2469
2470 return dst;
2471}
2159EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2160
2161struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2162{
2163 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2164 struct net_device *loopback_dev = net->loopback_dev;
2165 struct dst_entry *new = NULL;
2166

--- 208 unchanged lines hidden (view full) ---

2375 struct rt6_info *nrt6;
2376
2377 rcu_read_lock();
2378 res.f6i = rcu_dereference(rt6->from);
2379 if (!res.f6i) {
2380 rcu_read_unlock();
2381 return;
2382 }
2472EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2473
2474struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2475{
2476 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2477 struct net_device *loopback_dev = net->loopback_dev;
2478 struct dst_entry *new = NULL;
2479

--- 208 unchanged lines hidden (view full) ---

2688 struct rt6_info *nrt6;
2689
2690 rcu_read_lock();
2691 res.f6i = rcu_dereference(rt6->from);
2692 if (!res.f6i) {
2693 rcu_read_unlock();
2694 return;
2695 }
2383 res.nh = &res.f6i->fib6_nh;
2384 res.fib6_flags = res.f6i->fib6_flags;
2385 res.fib6_type = res.f6i->fib6_type;
2386
2696 res.fib6_flags = res.f6i->fib6_flags;
2697 res.fib6_type = res.f6i->fib6_type;
2698
2699 if (res.f6i->nh) {
2700 struct fib6_nh_match_arg arg = {
2701 .dev = dst->dev,
2702 .gw = &rt6->rt6i_gateway,
2703 };
2704
2705 nexthop_for_each_fib6_nh(res.f6i->nh,
2706 fib6_nh_find_match, &arg);
2707
2708 /* fib6_info uses a nexthop that does not have fib6_nh
2709 * using the dst->dev + gw. Should be impossible.
2710 */
2711 if (!arg.match) {
2712 rcu_read_unlock();
2713 return;
2714 }
2715
2716 res.nh = arg.match;
2717 } else {
2718 res.nh = res.f6i->fib6_nh;
2719 }
2720
2387 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2388 if (nrt6) {
2389 rt6_do_update_pmtu(nrt6, mtu);
2390 if (rt6_insert_exception(nrt6, &res))
2391 dst_release_immediate(&nrt6->dst);
2392 }
2393 rcu_read_unlock();
2394 }

--- 90 unchanged lines hidden (view full) ---

2485 *ret = rt_cache;
2486 return true;
2487 }
2488 return false;
2489 }
2490 return true;
2491}
2492
2721 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2722 if (nrt6) {
2723 rt6_do_update_pmtu(nrt6, mtu);
2724 if (rt6_insert_exception(nrt6, &res))
2725 dst_release_immediate(&nrt6->dst);
2726 }
2727 rcu_read_unlock();
2728 }

--- 90 unchanged lines hidden (view full) ---

2819 *ret = rt_cache;
2820 return true;
2821 }
2822 return false;
2823 }
2824 return true;
2825}
2826
2827struct fib6_nh_rd_arg {
2828 struct fib6_result *res;
2829 struct flowi6 *fl6;
2830 const struct in6_addr *gw;
2831 struct rt6_info **ret;
2832};
2833
2834static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
2835{
2836 struct fib6_nh_rd_arg *arg = _arg;
2837
2838 arg->res->nh = nh;
2839 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
2840}
2841
2493/* Handle redirects */
2494struct ip6rd_flowi {
2495 struct flowi6 fl6;
2496 struct in6_addr gateway;
2497};
2498
2499static struct rt6_info *__ip6_route_redirect(struct net *net,
2500 struct fib6_table *table,
2501 struct flowi6 *fl6,
2502 const struct sk_buff *skb,
2503 int flags)
2504{
2505 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2506 struct rt6_info *ret = NULL;
2507 struct fib6_result res = {};
2842/* Handle redirects */
2843struct ip6rd_flowi {
2844 struct flowi6 fl6;
2845 struct in6_addr gateway;
2846};
2847
2848static struct rt6_info *__ip6_route_redirect(struct net *net,
2849 struct fib6_table *table,
2850 struct flowi6 *fl6,
2851 const struct sk_buff *skb,
2852 int flags)
2853{
2854 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2855 struct rt6_info *ret = NULL;
2856 struct fib6_result res = {};
2857 struct fib6_nh_rd_arg arg = {
2858 .res = &res,
2859 .fl6 = fl6,
2860 .gw = &rdfl->gateway,
2861 .ret = &ret
2862 };
2508 struct fib6_info *rt;
2509 struct fib6_node *fn;
2510
2511 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2512 * this case we must match on the real ingress device, so reset it
2513 */
2514 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2515 fl6->flowi6_oif = skb->dev->ifindex;

--- 8 unchanged lines hidden (view full) ---

2524 * routes.
2525 */
2526
2527 rcu_read_lock();
2528 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2529restart:
2530 for_each_fib6_node_rt_rcu(fn) {
2531 res.f6i = rt;
2863 struct fib6_info *rt;
2864 struct fib6_node *fn;
2865
2866 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2867 * this case we must match on the real ingress device, so reset it
2868 */
2869 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2870 fl6->flowi6_oif = skb->dev->ifindex;

--- 8 unchanged lines hidden (view full) ---

2879 * routes.
2880 */
2881
2882 rcu_read_lock();
2883 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2884restart:
2885 for_each_fib6_node_rt_rcu(fn) {
2886 res.f6i = rt;
2532 res.nh = &rt->fib6_nh;
2533
2534 if (fib6_check_expired(rt))
2535 continue;
2536 if (rt->fib6_flags & RTF_REJECT)
2537 break;
2887 if (fib6_check_expired(rt))
2888 continue;
2889 if (rt->fib6_flags & RTF_REJECT)
2890 break;
2538 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway, &ret))
2539 goto out;
2891 if (unlikely(rt->nh)) {
2892 if (nexthop_is_blackhole(rt->nh))
2893 continue;
2894 /* on match, res->nh is filled in and potentially ret */
2895 if (nexthop_for_each_fib6_nh(rt->nh,
2896 fib6_nh_redirect_match,
2897 &arg))
2898 goto out;
2899 } else {
2900 res.nh = rt->fib6_nh;
2901 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
2902 &ret))
2903 goto out;
2904 }
2540 }
2541
2542 if (!rt)
2543 rt = net->ipv6.fib6_null_entry;
2544 else if (rt->fib6_flags & RTF_REJECT) {
2545 ret = net->ipv6.ip6_null_entry;
2546 goto out;
2547 }
2548
2549 if (rt == net->ipv6.fib6_null_entry) {
2550 fn = fib6_backtrack(fn, &fl6->saddr);
2551 if (fn)
2552 goto restart;
2553 }
2554
2555 res.f6i = rt;
2905 }
2906
2907 if (!rt)
2908 rt = net->ipv6.fib6_null_entry;
2909 else if (rt->fib6_flags & RTF_REJECT) {
2910 ret = net->ipv6.ip6_null_entry;
2911 goto out;
2912 }
2913
2914 if (rt == net->ipv6.fib6_null_entry) {
2915 fn = fib6_backtrack(fn, &fl6->saddr);
2916 if (fn)
2917 goto restart;
2918 }
2919
2920 res.f6i = rt;
2556 res.nh = &rt->fib6_nh;
2921 res.nh = rt->fib6_nh;
2557out:
2558 if (ret) {
2559 ip6_hold_safe(net, &ret);
2560 } else {
2561 res.fib6_flags = res.f6i->fib6_flags;
2562 res.fib6_type = res.f6i->fib6_type;
2563 ret = ip6_create_rt_rcu(&res);
2564 }

--- 210 unchanged lines hidden (view full) ---

2775 entries = dst_entries_get_slow(ops);
2776 if (entries < ops->gc_thresh)
2777 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2778out:
2779 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2780 return entries > rt_max_size;
2781}
2782
2922out:
2923 if (ret) {
2924 ip6_hold_safe(net, &ret);
2925 } else {
2926 res.fib6_flags = res.f6i->fib6_flags;
2927 res.fib6_type = res.f6i->fib6_type;
2928 ret = ip6_create_rt_rcu(&res);
2929 }

--- 210 unchanged lines hidden (view full) ---

3140 entries = dst_entries_get_slow(ops);
3141 if (entries < ops->gc_thresh)
3142 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
3143out:
3144 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
3145 return entries > rt_max_size;
3146}
3147
2783static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2784 struct fib6_config *cfg,
2785 const struct in6_addr *gw_addr,
2786 u32 tbid, int flags)
3148static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3149 const struct in6_addr *gw_addr, u32 tbid,
3150 int flags, struct fib6_result *res)
2787{
2788 struct flowi6 fl6 = {
2789 .flowi6_oif = cfg->fc_ifindex,
2790 .daddr = *gw_addr,
2791 .saddr = cfg->fc_prefsrc,
2792 };
2793 struct fib6_table *table;
3151{
3152 struct flowi6 fl6 = {
3153 .flowi6_oif = cfg->fc_ifindex,
3154 .daddr = *gw_addr,
3155 .saddr = cfg->fc_prefsrc,
3156 };
3157 struct fib6_table *table;
2794 struct rt6_info *rt;
3158 int err;
2795
2796 table = fib6_get_table(net, tbid);
2797 if (!table)
3159
3160 table = fib6_get_table(net, tbid);
3161 if (!table)
2798 return NULL;
3162 return -EINVAL;
2799
2800 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2801 flags |= RT6_LOOKUP_F_HAS_SADDR;
2802
2803 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3163
3164 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3165 flags |= RT6_LOOKUP_F_HAS_SADDR;
3166
3167 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
2804 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
2805
3168
2806 /* if table lookup failed, fall back to full lookup */
2807 if (rt == net->ipv6.ip6_null_entry) {
2808 ip6_rt_put(rt);
2809 rt = NULL;
2810 }
3169 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3170 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3171 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3172 cfg->fc_ifindex != 0, NULL, flags);
2811
3173
2812 return rt;
3174 return err;
2813}
2814
2815static int ip6_route_check_nh_onlink(struct net *net,
2816 struct fib6_config *cfg,
2817 const struct net_device *dev,
2818 struct netlink_ext_ack *extack)
2819{
3175}
3176
3177static int ip6_route_check_nh_onlink(struct net *net,
3178 struct fib6_config *cfg,
3179 const struct net_device *dev,
3180 struct netlink_ext_ack *extack)
3181{
2820 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
3182 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
2821 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3183 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2822 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2823 struct fib6_info *from;
2824 struct rt6_info *grt;
3184 struct fib6_result res = {};
2825 int err;
2826
3185 int err;
3186
2827 err = 0;
2828 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2829 if (grt) {
2830 rcu_read_lock();
2831 from = rcu_dereference(grt->from);
2832 if (!grt->dst.error &&
2833 /* ignore match if it is the default route */
2834 from && !ipv6_addr_any(&from->fib6_dst.addr) &&
2835 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2836 NL_SET_ERR_MSG(extack,
2837 "Nexthop has invalid gateway or device mismatch");
2838 err = -EINVAL;
2839 }
2840 rcu_read_unlock();
2841
2842 ip6_rt_put(grt);
3187 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3188 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3189 /* ignore match if it is the default route */
3190 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3191 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3192 NL_SET_ERR_MSG(extack,
3193 "Nexthop has invalid gateway or device mismatch");
3194 err = -EINVAL;
2843 }
2844
2845 return err;
2846}
2847
2848static int ip6_route_check_nh(struct net *net,
2849 struct fib6_config *cfg,
2850 struct net_device **_dev,
2851 struct inet6_dev **idev)
2852{
2853 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2854 struct net_device *dev = _dev ? *_dev : NULL;
3195 }
3196
3197 return err;
3198}
3199
3200static int ip6_route_check_nh(struct net *net,
3201 struct fib6_config *cfg,
3202 struct net_device **_dev,
3203 struct inet6_dev **idev)
3204{
3205 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3206 struct net_device *dev = _dev ? *_dev : NULL;
2855 struct rt6_info *grt = NULL;
3207 int flags = RT6_LOOKUP_F_IFACE;
3208 struct fib6_result res = {};
2856 int err = -EHOSTUNREACH;
2857
2858 if (cfg->fc_table) {
3209 int err = -EHOSTUNREACH;
3210
3211 if (cfg->fc_table) {
2859 int flags = RT6_LOOKUP_F_IFACE;
2860
2861 grt = ip6_nh_lookup_table(net, cfg, gw_addr,
2862 cfg->fc_table, flags);
2863 if (grt) {
2864 if (grt->rt6i_flags & RTF_GATEWAY ||
2865 (dev && dev != grt->dst.dev)) {
2866 ip6_rt_put(grt);
2867 grt = NULL;
2868 }
2869 }
3212 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3213 cfg->fc_table, flags, &res);
3214 /* gw_addr can not require a gateway or resolve to a reject
3215 * route. If a device is given, it must match the result.
3216 */
3217 if (err || res.fib6_flags & RTF_REJECT ||
3218 res.nh->fib_nh_gw_family ||
3219 (dev && dev != res.nh->fib_nh_dev))
3220 err = -EHOSTUNREACH;
2870 }
2871
3221 }
3222
2872 if (!grt)
2873 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
3223 if (err < 0) {
3224 struct flowi6 fl6 = {
3225 .flowi6_oif = cfg->fc_ifindex,
3226 .daddr = *gw_addr,
3227 };
2874
3228
2875 if (!grt)
2876 goto out;
3229 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3230 if (err || res.fib6_flags & RTF_REJECT ||
3231 res.nh->fib_nh_gw_family)
3232 err = -EHOSTUNREACH;
2877
3233
3234 if (err)
3235 return err;
3236
3237 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3238 cfg->fc_ifindex != 0, NULL, flags);
3239 }
3240
3241 err = 0;
2878 if (dev) {
3242 if (dev) {
2879 if (dev != grt->dst.dev) {
2880 ip6_rt_put(grt);
2881 goto out;
2882 }
3243 if (dev != res.nh->fib_nh_dev)
3244 err = -EHOSTUNREACH;
2883 } else {
3245 } else {
2884 *_dev = dev = grt->dst.dev;
2885 *idev = grt->rt6i_idev;
3246 *_dev = dev = res.nh->fib_nh_dev;
2886 dev_hold(dev);
3247 dev_hold(dev);
2887 in6_dev_hold(grt->rt6i_idev);
3248 *idev = in6_dev_get(dev);
2888 }
2889
3249 }
3250
2890 if (!(grt->rt6i_flags & RTF_GATEWAY))
2891 err = 0;
2892
2893 ip6_rt_put(grt);
2894
2895out:
2896 return err;
2897}
2898
2899static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
2900 struct net_device **_dev, struct inet6_dev **idev,
2901 struct netlink_ext_ack *extack)
2902{
2903 const struct in6_addr *gw_addr = &cfg->fc_gateway;

--- 24 unchanged lines hidden (view full) ---

2928 * We allow IPv4-mapped nexthops to support RFC4798-type
2929 * addressing
2930 */
2931 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
2932 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2933 goto out;
2934 }
2935
3251 return err;
3252}
3253
3254static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3255 struct net_device **_dev, struct inet6_dev **idev,
3256 struct netlink_ext_ack *extack)
3257{
3258 const struct in6_addr *gw_addr = &cfg->fc_gateway;

--- 24 unchanged lines hidden (view full) ---

3283 * We allow IPv4-mapped nexthops to support RFC4798-type
3284 * addressing
3285 */
3286 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3287 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3288 goto out;
3289 }
3290
3291 rcu_read_lock();
3292
2936 if (cfg->fc_flags & RTNH_F_ONLINK)
2937 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
2938 else
2939 err = ip6_route_check_nh(net, cfg, _dev, idev);
2940
3293 if (cfg->fc_flags & RTNH_F_ONLINK)
3294 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3295 else
3296 err = ip6_route_check_nh(net, cfg, _dev, idev);
3297
3298 rcu_read_unlock();
3299
2941 if (err)
2942 goto out;
2943 }
2944
2945 /* reload in case device was changed */
2946 dev = *_dev;
2947
2948 err = -EINVAL;

--- 84 unchanged lines hidden (view full) ---

3033 dev = net->loopback_dev;
3034 dev_hold(dev);
3035 idev = in6_dev_get(dev);
3036 if (!idev) {
3037 err = -ENODEV;
3038 goto out;
3039 }
3040 }
3300 if (err)
3301 goto out;
3302 }
3303
3304 /* reload in case device was changed */
3305 dev = *_dev;
3306
3307 err = -EINVAL;

--- 84 unchanged lines hidden (view full) ---

3392 dev = net->loopback_dev;
3393 dev_hold(dev);
3394 idev = in6_dev_get(dev);
3395 if (!idev) {
3396 err = -ENODEV;
3397 goto out;
3398 }
3399 }
3041 goto set_dev;
3400 goto pcpu_alloc;
3042 }
3043
3044 if (cfg->fc_flags & RTF_GATEWAY) {
3045 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3046 if (err)
3047 goto out;
3048
3049 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;

--- 19 unchanged lines hidden (view full) ---

3069 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3070 !netif_carrier_ok(dev))
3071 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3072
3073 err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
3074 cfg->fc_encap_type, cfg, gfp_flags, extack);
3075 if (err)
3076 goto out;
3401 }
3402
3403 if (cfg->fc_flags & RTF_GATEWAY) {
3404 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3405 if (err)
3406 goto out;
3407
3408 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;

--- 19 unchanged lines hidden (view full) ---

3428 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3429 !netif_carrier_ok(dev))
3430 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3431
3432 err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
3433 cfg->fc_encap_type, cfg, gfp_flags, extack);
3434 if (err)
3435 goto out;
3077set_dev:
3436
3437pcpu_alloc:
3438 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3439 if (!fib6_nh->rt6i_pcpu) {
3440 err = -ENOMEM;
3441 goto out;
3442 }
3443
3078 fib6_nh->fib_nh_dev = dev;
3079 fib6_nh->fib_nh_oif = dev->ifindex;
3080 err = 0;
3081out:
3082 if (idev)
3083 in6_dev_put(idev);
3084
3085 if (err) {
3086 lwtstate_put(fib6_nh->fib_nh_lws);
3087 fib6_nh->fib_nh_lws = NULL;
3088 if (dev)
3089 dev_put(dev);
3090 }
3091
3092 return err;
3093}
3094
3095void fib6_nh_release(struct fib6_nh *fib6_nh)
3096{
3444 fib6_nh->fib_nh_dev = dev;
3445 fib6_nh->fib_nh_oif = dev->ifindex;
3446 err = 0;
3447out:
3448 if (idev)
3449 in6_dev_put(idev);
3450
3451 if (err) {
3452 lwtstate_put(fib6_nh->fib_nh_lws);
3453 fib6_nh->fib_nh_lws = NULL;
3454 if (dev)
3455 dev_put(dev);
3456 }
3457
3458 return err;
3459}
3460
3461void fib6_nh_release(struct fib6_nh *fib6_nh)
3462{
3463 struct rt6_exception_bucket *bucket;
3464
3465 rcu_read_lock();
3466
3467 fib6_nh_flush_exceptions(fib6_nh, NULL);
3468 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3469 if (bucket) {
3470 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3471 kfree(bucket);
3472 }
3473
3474 rcu_read_unlock();
3475
3476 if (fib6_nh->rt6i_pcpu) {
3477 int cpu;
3478
3479 for_each_possible_cpu(cpu) {
3480 struct rt6_info **ppcpu_rt;
3481 struct rt6_info *pcpu_rt;
3482
3483 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3484 pcpu_rt = *ppcpu_rt;
3485 if (pcpu_rt) {
3486 dst_dev_put(&pcpu_rt->dst);
3487 dst_release(&pcpu_rt->dst);
3488 *ppcpu_rt = NULL;
3489 }
3490 }
3491
3492 free_percpu(fib6_nh->rt6i_pcpu);
3493 }
3494
3097 fib_nh_common_release(&fib6_nh->nh_common);
3098}
3099
3100static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3101 gfp_t gfp_flags,
3102 struct netlink_ext_ack *extack)
3103{
3104 struct net *net = cfg->fc_nlinfo.nl_net;
3105 struct fib6_info *rt = NULL;
3495 fib_nh_common_release(&fib6_nh->nh_common);
3496}
3497
3498static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3499 gfp_t gfp_flags,
3500 struct netlink_ext_ack *extack)
3501{
3502 struct net *net = cfg->fc_nlinfo.nl_net;
3503 struct fib6_info *rt = NULL;
3504 struct nexthop *nh = NULL;
3106 struct fib6_table *table;
3505 struct fib6_table *table;
3506 struct fib6_nh *fib6_nh;
3107 int err = -EINVAL;
3108 int addr_type;
3109
3110 /* RTF_PCPU is an internal flag; can not be set by userspace */
3111 if (cfg->fc_flags & RTF_PCPU) {
3112 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3113 goto out;
3114 }

--- 19 unchanged lines hidden (view full) ---

3134 }
3135#ifndef CONFIG_IPV6_SUBTREES
3136 if (cfg->fc_src_len) {
3137 NL_SET_ERR_MSG(extack,
3138 "Specifying source address requires IPV6_SUBTREES to be enabled");
3139 goto out;
3140 }
3141#endif
3507 int err = -EINVAL;
3508 int addr_type;
3509
3510 /* RTF_PCPU is an internal flag; can not be set by userspace */
3511 if (cfg->fc_flags & RTF_PCPU) {
3512 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3513 goto out;
3514 }

--- 19 unchanged lines hidden (view full) ---

3534 }
3535#ifndef CONFIG_IPV6_SUBTREES
3536 if (cfg->fc_src_len) {
3537 NL_SET_ERR_MSG(extack,
3538 "Specifying source address requires IPV6_SUBTREES to be enabled");
3539 goto out;
3540 }
3541#endif
3542 if (cfg->fc_nh_id) {
3543 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3544 if (!nh) {
3545 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3546 goto out;
3547 }
3548 err = fib6_check_nexthop(nh, cfg, extack);
3549 if (err)
3550 goto out;
3551 }
3142
3143 err = -ENOBUFS;
3144 if (cfg->fc_nlinfo.nlh &&
3145 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3146 table = fib6_get_table(net, cfg->fc_table);
3147 if (!table) {
3148 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3149 table = fib6_new_table(net, cfg->fc_table);
3150 }
3151 } else {
3152 table = fib6_new_table(net, cfg->fc_table);
3153 }
3154
3155 if (!table)
3156 goto out;
3157
3158 err = -ENOMEM;
3552
3553 err = -ENOBUFS;
3554 if (cfg->fc_nlinfo.nlh &&
3555 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3556 table = fib6_get_table(net, cfg->fc_table);
3557 if (!table) {
3558 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3559 table = fib6_new_table(net, cfg->fc_table);
3560 }
3561 } else {
3562 table = fib6_new_table(net, cfg->fc_table);
3563 }
3564
3565 if (!table)
3566 goto out;
3567
3568 err = -ENOMEM;
3159 rt = fib6_info_alloc(gfp_flags);
3569 rt = fib6_info_alloc(gfp_flags, !nh);
3160 if (!rt)
3161 goto out;
3162
3163 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3164 extack);
3165 if (IS_ERR(rt->fib6_metrics)) {
3166 err = PTR_ERR(rt->fib6_metrics);
3167 /* Do not leave garbage there. */

--- 23 unchanged lines hidden (view full) ---

3191 rt->fib6_dst.plen = cfg->fc_dst_len;
3192 if (rt->fib6_dst.plen == 128)
3193 rt->dst_host = true;
3194
3195#ifdef CONFIG_IPV6_SUBTREES
3196 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3197 rt->fib6_src.plen = cfg->fc_src_len;
3198#endif
3570 if (!rt)
3571 goto out;
3572
3573 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3574 extack);
3575 if (IS_ERR(rt->fib6_metrics)) {
3576 err = PTR_ERR(rt->fib6_metrics);
3577 /* Do not leave garbage there. */

--- 23 unchanged lines hidden (view full) ---

3601 rt->fib6_dst.plen = cfg->fc_dst_len;
3602 if (rt->fib6_dst.plen == 128)
3603 rt->dst_host = true;
3604
3605#ifdef CONFIG_IPV6_SUBTREES
3606 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3607 rt->fib6_src.plen = cfg->fc_src_len;
3608#endif
3199 err = fib6_nh_init(net, &rt->fib6_nh, cfg, gfp_flags, extack);
3200 if (err)
3201 goto out;
3609 if (nh) {
3610 if (!nexthop_get(nh)) {
3611 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3612 goto out;
3613 }
3614 if (rt->fib6_src.plen) {
3615 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3616 goto out;
3617 }
3618 rt->nh = nh;
3619 fib6_nh = nexthop_fib6_nh(rt->nh);
3620 } else {
3621 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3622 if (err)
3623 goto out;
3202
3624
3203 /* We cannot add true routes via loopback here,
3204 * they would result in kernel looping; promote them to reject routes
3205 */
3206 addr_type = ipv6_addr_type(&cfg->fc_dst);
3207 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh.fib_nh_dev, addr_type))
3208 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3625 fib6_nh = rt->fib6_nh;
3209
3626
3627 /* We cannot add true routes via loopback here, they would
3628 * result in kernel looping; promote them to reject routes
3629 */
3630 addr_type = ipv6_addr_type(&cfg->fc_dst);
3631 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3632 addr_type))
3633 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3634 }
3635
3210 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3636 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3211 struct net_device *dev = fib6_info_nh_dev(rt);
3637 struct net_device *dev = fib6_nh->fib_nh_dev;
3212
3213 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3214 NL_SET_ERR_MSG(extack, "Invalid source address");
3215 err = -EINVAL;
3216 goto out;
3217 }
3218 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3219 rt->fib6_prefsrc.plen = 128;

--- 75 unchanged lines hidden (view full) ---

3295 NULL, NULL, 0, RTM_DELROUTE,
3296 info->portid, seq, 0) < 0) {
3297 kfree_skb(skb);
3298 skb = NULL;
3299 } else
3300 info->skip_notify = 1;
3301 }
3302
3638
3639 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3640 NL_SET_ERR_MSG(extack, "Invalid source address");
3641 err = -EINVAL;
3642 goto out;
3643 }
3644 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3645 rt->fib6_prefsrc.plen = 128;

--- 75 unchanged lines hidden (view full) ---

3721 NULL, NULL, 0, RTM_DELROUTE,
3722 info->portid, seq, 0) < 0) {
3723 kfree_skb(skb);
3724 skb = NULL;
3725 } else
3726 info->skip_notify = 1;
3727 }
3728
3729 info->skip_notify_kernel = 1;
3730 call_fib6_multipath_entry_notifiers(net,
3731 FIB_EVENT_ENTRY_DEL,
3732 rt,
3733 rt->fib6_nsiblings,
3734 NULL);
3303 list_for_each_entry_safe(sibling, next_sibling,
3304 &rt->fib6_siblings,
3305 fib6_siblings) {
3306 err = fib6_del(sibling, info);
3307 if (err)
3308 goto out_unlock;
3309 }
3310 }

--- 6 unchanged lines hidden (view full) ---

3317
3318 if (skb) {
3319 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3320 info->nlh, gfp_any());
3321 }
3322 return err;
3323}
3324
3735 list_for_each_entry_safe(sibling, next_sibling,
3736 &rt->fib6_siblings,
3737 fib6_siblings) {
3738 err = fib6_del(sibling, info);
3739 if (err)
3740 goto out_unlock;
3741 }
3742 }

--- 6 unchanged lines hidden (view full) ---

3749
3750 if (skb) {
3751 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3752 info->nlh, gfp_any());
3753 }
3754 return err;
3755}
3756
3325static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3757static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3326{
3327 int rc = -ESRCH;
3328
3329 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3330 goto out;
3331
3332 if (cfg->fc_flags & RTF_GATEWAY &&
3333 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3334 goto out;
3335
3336 rc = rt6_remove_exception_rt(rt);
3337out:
3338 return rc;
3339}
3340
3758{
3759 int rc = -ESRCH;
3760
3761 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3762 goto out;
3763
3764 if (cfg->fc_flags & RTF_GATEWAY &&
3765 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3766 goto out;
3767
3768 rc = rt6_remove_exception_rt(rt);
3769out:
3770 return rc;
3771}
3772
3773static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3774 struct fib6_nh *nh)
3775{
3776 struct fib6_result res = {
3777 .f6i = rt,
3778 .nh = nh,
3779 };
3780 struct rt6_info *rt_cache;
3781
3782 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3783 if (rt_cache)
3784 return __ip6_del_cached_rt(rt_cache, cfg);
3785
3786 return 0;
3787}
3788
3789struct fib6_nh_del_cached_rt_arg {
3790 struct fib6_config *cfg;
3791 struct fib6_info *f6i;
3792};
3793
3794static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
3795{
3796 struct fib6_nh_del_cached_rt_arg *arg = _arg;
3797 int rc;
3798
3799 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
3800 return rc != -ESRCH ? rc : 0;
3801}
3802
3803static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
3804{
3805 struct fib6_nh_del_cached_rt_arg arg = {
3806 .cfg = cfg,
3807 .f6i = f6i
3808 };
3809
3810 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
3811}
3812
3341static int ip6_route_del(struct fib6_config *cfg,
3342 struct netlink_ext_ack *extack)
3343{
3813static int ip6_route_del(struct fib6_config *cfg,
3814 struct netlink_ext_ack *extack)
3815{
3344 struct rt6_info *rt_cache;
3345 struct fib6_table *table;
3346 struct fib6_info *rt;
3347 struct fib6_node *fn;
3348 int err = -ESRCH;
3349
3350 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3351 if (!table) {
3352 NL_SET_ERR_MSG(extack, "FIB table does not exist");

--- 6 unchanged lines hidden (view full) ---

3359 &cfg->fc_dst, cfg->fc_dst_len,
3360 &cfg->fc_src, cfg->fc_src_len,
3361 !(cfg->fc_flags & RTF_CACHE));
3362
3363 if (fn) {
3364 for_each_fib6_node_rt_rcu(fn) {
3365 struct fib6_nh *nh;
3366
3816 struct fib6_table *table;
3817 struct fib6_info *rt;
3818 struct fib6_node *fn;
3819 int err = -ESRCH;
3820
3821 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3822 if (!table) {
3823 NL_SET_ERR_MSG(extack, "FIB table does not exist");

--- 6 unchanged lines hidden (view full) ---

3830 &cfg->fc_dst, cfg->fc_dst_len,
3831 &cfg->fc_src, cfg->fc_src_len,
3832 !(cfg->fc_flags & RTF_CACHE));
3833
3834 if (fn) {
3835 for_each_fib6_node_rt_rcu(fn) {
3836 struct fib6_nh *nh;
3837
3838 if (rt->nh && cfg->fc_nh_id &&
3839 rt->nh->id != cfg->fc_nh_id)
3840 continue;
3841
3367 if (cfg->fc_flags & RTF_CACHE) {
3842 if (cfg->fc_flags & RTF_CACHE) {
3368 struct fib6_result res = {
3369 .f6i = rt,
3370 };
3371 int rc;
3843 int rc = 0;
3372
3844
3373 rt_cache = rt6_find_cached_rt(&res,
3374 &cfg->fc_dst,
3375 &cfg->fc_src);
3376 if (rt_cache) {
3377 rc = ip6_del_cached_rt(rt_cache, cfg);
3378 if (rc != -ESRCH) {
3379 rcu_read_unlock();
3380 return rc;
3381 }
3845 if (rt->nh) {
3846 rc = ip6_del_cached_rt_nh(cfg, rt);
3847 } else if (cfg->fc_nh_id) {
3848 continue;
3849 } else {
3850 nh = rt->fib6_nh;
3851 rc = ip6_del_cached_rt(cfg, rt, nh);
3382 }
3852 }
3853 if (rc != -ESRCH) {
3854 rcu_read_unlock();
3855 return rc;
3856 }
3383 continue;
3384 }
3385
3857 continue;
3858 }
3859
3386 nh = &rt->fib6_nh;
3860 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3861 continue;
3862 if (cfg->fc_protocol &&
3863 cfg->fc_protocol != rt->fib6_protocol)
3864 continue;
3865
3866 if (rt->nh) {
3867 if (!fib6_info_hold_safe(rt))
3868 continue;
3869 rcu_read_unlock();
3870
3871 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3872 }
3873 if (cfg->fc_nh_id)
3874 continue;
3875
3876 nh = rt->fib6_nh;
3387 if (cfg->fc_ifindex &&
3388 (!nh->fib_nh_dev ||
3389 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3390 continue;
3391 if (cfg->fc_flags & RTF_GATEWAY &&
3392 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3393 continue;
3877 if (cfg->fc_ifindex &&
3878 (!nh->fib_nh_dev ||
3879 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3880 continue;
3881 if (cfg->fc_flags & RTF_GATEWAY &&
3882 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3883 continue;
3394 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3395 continue;
3396 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
3397 continue;
3398 if (!fib6_info_hold_safe(rt))
3399 continue;
3400 rcu_read_unlock();
3401
3402 /* if gateway was specified only delete the one hop */
3403 if (cfg->fc_flags & RTF_GATEWAY)
3404 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3405

--- 94 unchanged lines hidden (view full) ---

3500 NEIGH_UPDATE_F_ISROUTER)),
3501 NDISC_REDIRECT, &ndopts);
3502
3503 rcu_read_lock();
3504 res.f6i = rcu_dereference(rt->from);
3505 if (!res.f6i)
3506 goto out;
3507
3884 if (!fib6_info_hold_safe(rt))
3885 continue;
3886 rcu_read_unlock();
3887
3888 /* if gateway was specified only delete the one hop */
3889 if (cfg->fc_flags & RTF_GATEWAY)
3890 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3891

--- 94 unchanged lines hidden (view full) ---

3986 NEIGH_UPDATE_F_ISROUTER)),
3987 NDISC_REDIRECT, &ndopts);
3988
3989 rcu_read_lock();
3990 res.f6i = rcu_dereference(rt->from);
3991 if (!res.f6i)
3992 goto out;
3993
3508 res.nh = &res.f6i->fib6_nh;
3994 if (res.f6i->nh) {
3995 struct fib6_nh_match_arg arg = {
3996 .dev = dst->dev,
3997 .gw = &rt->rt6i_gateway,
3998 };
3999
4000 nexthop_for_each_fib6_nh(res.f6i->nh,
4001 fib6_nh_find_match, &arg);
4002
4003 /* fib6_info uses a nexthop that does not have fib6_nh
4004 * using the dst->dev. Should be impossible
4005 */
4006 if (!arg.match)
4007 goto out;
4008 res.nh = arg.match;
4009 } else {
4010 res.nh = res.f6i->fib6_nh;
4011 }
4012
3509 res.fib6_flags = res.f6i->fib6_flags;
3510 res.fib6_type = res.f6i->fib6_type;
3511 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
3512 if (!nrt)
3513 goto out;
3514
3515 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3516 if (on_link)

--- 35 unchanged lines hidden (view full) ---

3552 return NULL;
3553
3554 rcu_read_lock();
3555 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3556 if (!fn)
3557 goto out;
3558
3559 for_each_fib6_node_rt_rcu(fn) {
4013 res.fib6_flags = res.f6i->fib6_flags;
4014 res.fib6_type = res.f6i->fib6_type;
4015 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4016 if (!nrt)
4017 goto out;
4018
4019 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4020 if (on_link)

--- 35 unchanged lines hidden (view full) ---

4056 return NULL;
4057
4058 rcu_read_lock();
4059 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4060 if (!fn)
4061 goto out;
4062
4063 for_each_fib6_node_rt_rcu(fn) {
3560 if (rt->fib6_nh.fib_nh_dev->ifindex != ifindex)
4064 /* these routes do not use nexthops */
4065 if (rt->nh)
3561 continue;
4066 continue;
4067 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4068 continue;
3562 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4069 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
3563 !rt->fib6_nh.fib_nh_gw_family)
4070 !rt->fib6_nh->fib_nh_gw_family)
3564 continue;
4071 continue;
3565 if (!ipv6_addr_equal(&rt->fib6_nh.fib_nh_gw6, gwaddr))
4072 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
3566 continue;
3567 if (!fib6_info_hold_safe(rt))
3568 continue;
3569 break;
3570 }
3571out:
3572 rcu_read_unlock();
3573 return rt;

--- 41 unchanged lines hidden (view full) ---

3615 struct fib6_table *table;
3616
3617 table = fib6_get_table(net, tb_id);
3618 if (!table)
3619 return NULL;
3620
3621 rcu_read_lock();
3622 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4073 continue;
4074 if (!fib6_info_hold_safe(rt))
4075 continue;
4076 break;
4077 }
4078out:
4079 rcu_read_unlock();
4080 return rt;

--- 41 unchanged lines hidden (view full) ---

4122 struct fib6_table *table;
4123
4124 table = fib6_get_table(net, tb_id);
4125 if (!table)
4126 return NULL;
4127
4128 rcu_read_lock();
4129 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3623 struct fib6_nh *nh = &rt->fib6_nh;
4130 struct fib6_nh *nh;
3624
4131
4132 /* RA routes do not use nexthops */
4133 if (rt->nh)
4134 continue;
4135
4136 nh = rt->fib6_nh;
3625 if (dev == nh->fib_nh_dev &&
3626 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3627 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
3628 break;
3629 }
3630 if (rt && !fib6_info_hold_safe(rt))
3631 rt = NULL;
3632 rcu_read_unlock();

--- 234 unchanged lines hidden (view full) ---

3867};
3868
3869static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
3870{
3871 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3872 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3873 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3874
4137 if (dev == nh->fib_nh_dev &&
4138 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4139 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4140 break;
4141 }
4142 if (rt && !fib6_info_hold_safe(rt))
4143 rt = NULL;
4144 rcu_read_unlock();

--- 234 unchanged lines hidden (view full) ---

4379};
4380
4381static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4382{
4383 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4384 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4385 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4386
3875 if (((void *)rt->fib6_nh.fib_nh_dev == dev || !dev) &&
4387 if (!rt->nh &&
4388 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
3876 rt != net->ipv6.fib6_null_entry &&
3877 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
3878 spin_lock_bh(&rt6_exception_lock);
3879 /* remove prefsrc entry */
3880 rt->fib6_prefsrc.plen = 0;
3881 spin_unlock_bh(&rt6_exception_lock);
3882 }
3883 return 0;

--- 11 unchanged lines hidden (view full) ---

3895}
3896
3897#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
3898
3899/* Remove routers and update dst entries when gateway turn into host. */
3900static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
3901{
3902 struct in6_addr *gateway = (struct in6_addr *)arg;
4389 rt != net->ipv6.fib6_null_entry &&
4390 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4391 spin_lock_bh(&rt6_exception_lock);
4392 /* remove prefsrc entry */
4393 rt->fib6_prefsrc.plen = 0;
4394 spin_unlock_bh(&rt6_exception_lock);
4395 }
4396 return 0;

--- 11 unchanged lines hidden (view full) ---

4408}
4409
4410#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4411
4412/* Remove routers and update dst entries when gateway turn into host. */
4413static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4414{
4415 struct in6_addr *gateway = (struct in6_addr *)arg;
4416 struct fib6_nh *nh;
3903
4417
4418 /* RA routes do not use nexthops */
4419 if (rt->nh)
4420 return 0;
4421
4422 nh = rt->fib6_nh;
3904 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4423 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3905 rt->fib6_nh.fib_nh_gw_family &&
3906 ipv6_addr_equal(gateway, &rt->fib6_nh.fib_nh_gw6)) {
4424 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
3907 return -1;
4425 return -1;
3908 }
3909
3910 /* Further clean up cached routes in exception table.
3911 * This is needed because cached route may have a different
3912 * gateway than its 'parent' in the case of an ip redirect.
3913 */
4426
4427 /* Further clean up cached routes in exception table.
4428 * This is needed because cached route may have a different
4429 * gateway than its 'parent' in the case of an ip redirect.
4430 */
3914 rt6_exceptions_clean_tohost(rt, gateway);
4431 fib6_nh_exceptions_clean_tohost(nh, gateway);
3915
3916 return 0;
3917}
3918
3919void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3920{
3921 fib6_clean_all(net, fib6_clean_tohost, gateway);
3922}

--- 21 unchanged lines hidden (view full) ---

3944 return iter;
3945 iter = rcu_dereference_protected(iter->fib6_next,
3946 lockdep_is_held(&rt->fib6_table->tb6_lock));
3947 }
3948
3949 return NULL;
3950}
3951
4432
4433 return 0;
4434}
4435
4436void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4437{
4438 fib6_clean_all(net, fib6_clean_tohost, gateway);
4439}

--- 21 unchanged lines hidden (view full) ---

4461 return iter;
4462 iter = rcu_dereference_protected(iter->fib6_next,
4463 lockdep_is_held(&rt->fib6_table->tb6_lock));
4464 }
4465
4466 return NULL;
4467}
4468
4469/* only called for fib entries with builtin fib6_nh */
3952static bool rt6_is_dead(const struct fib6_info *rt)
3953{
4470static bool rt6_is_dead(const struct fib6_info *rt)
4471{
3954 if (rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD ||
3955 (rt->fib6_nh.fib_nh_flags & RTNH_F_LINKDOWN &&
3956 ip6_ignore_linkdown(rt->fib6_nh.fib_nh_dev)))
4472 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4473 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4474 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
3957 return true;
3958
3959 return false;
3960}
3961
3962static int rt6_multipath_total_weight(const struct fib6_info *rt)
3963{
3964 struct fib6_info *iter;
3965 int total = 0;
3966
3967 if (!rt6_is_dead(rt))
4475 return true;
4476
4477 return false;
4478}
4479
4480static int rt6_multipath_total_weight(const struct fib6_info *rt)
4481{
4482 struct fib6_info *iter;
4483 int total = 0;
4484
4485 if (!rt6_is_dead(rt))
3968 total += rt->fib6_nh.fib_nh_weight;
4486 total += rt->fib6_nh->fib_nh_weight;
3969
3970 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
3971 if (!rt6_is_dead(iter))
4487
4488 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4489 if (!rt6_is_dead(iter))
3972 total += iter->fib6_nh.fib_nh_weight;
4490 total += iter->fib6_nh->fib_nh_weight;
3973 }
3974
3975 return total;
3976}
3977
3978static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
3979{
3980 int upper_bound = -1;
3981
3982 if (!rt6_is_dead(rt)) {
4491 }
4492
4493 return total;
4494}
4495
4496static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4497{
4498 int upper_bound = -1;
4499
4500 if (!rt6_is_dead(rt)) {
3983 *weight += rt->fib6_nh.fib_nh_weight;
4501 *weight += rt->fib6_nh->fib_nh_weight;
3984 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
3985 total) - 1;
3986 }
4502 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4503 total) - 1;
4504 }
3987 atomic_set(&rt->fib6_nh.fib_nh_upper_bound, upper_bound);
4505 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
3988}
3989
3990static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
3991{
3992 struct fib6_info *iter;
3993 int weight = 0;
3994
3995 rt6_upper_bound_set(rt, &weight, total);

--- 26 unchanged lines hidden (view full) ---

4022 rt6_multipath_upper_bound_set(first, total);
4023}
4024
4025static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4026{
4027 const struct arg_netdev_event *arg = p_arg;
4028 struct net *net = dev_net(arg->dev);
4029
4506}
4507
4508static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4509{
4510 struct fib6_info *iter;
4511 int weight = 0;
4512
4513 rt6_upper_bound_set(rt, &weight, total);

--- 26 unchanged lines hidden (view full) ---

4540 rt6_multipath_upper_bound_set(first, total);
4541}
4542
4543static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4544{
4545 const struct arg_netdev_event *arg = p_arg;
4546 struct net *net = dev_net(arg->dev);
4547
4030 if (rt != net->ipv6.fib6_null_entry &&
4031 rt->fib6_nh.fib_nh_dev == arg->dev) {
4032 rt->fib6_nh.fib_nh_flags &= ~arg->nh_flags;
4548 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4549 rt->fib6_nh->fib_nh_dev == arg->dev) {
4550 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4033 fib6_update_sernum_upto_root(net, rt);
4034 rt6_multipath_rebalance(rt);
4035 }
4036
4037 return 0;
4038}
4039
4040void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)

--- 6 unchanged lines hidden (view full) ---

4047 };
4048
4049 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4050 arg.nh_flags |= RTNH_F_LINKDOWN;
4051
4052 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4053}
4054
4551 fib6_update_sernum_upto_root(net, rt);
4552 rt6_multipath_rebalance(rt);
4553 }
4554
4555 return 0;
4556}
4557
4558void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)

--- 6 unchanged lines hidden (view full) ---

4565 };
4566
4567 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4568 arg.nh_flags |= RTNH_F_LINKDOWN;
4569
4570 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4571}
4572
4573/* only called for fib entries with inline fib6_nh */
4055static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4056 const struct net_device *dev)
4057{
4058 struct fib6_info *iter;
4059
4574static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4575 const struct net_device *dev)
4576{
4577 struct fib6_info *iter;
4578
4060 if (rt->fib6_nh.fib_nh_dev == dev)
4579 if (rt->fib6_nh->fib_nh_dev == dev)
4061 return true;
4062 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4580 return true;
4581 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4063 if (iter->fib6_nh.fib_nh_dev == dev)
4582 if (iter->fib6_nh->fib_nh_dev == dev)
4064 return true;
4065
4066 return false;
4067}
4068
4069static void rt6_multipath_flush(struct fib6_info *rt)
4070{
4071 struct fib6_info *iter;

--- 4 unchanged lines hidden (view full) ---

4076}
4077
4078static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4079 const struct net_device *down_dev)
4080{
4081 struct fib6_info *iter;
4082 unsigned int dead = 0;
4083
4583 return true;
4584
4585 return false;
4586}
4587
4588static void rt6_multipath_flush(struct fib6_info *rt)
4589{
4590 struct fib6_info *iter;

--- 4 unchanged lines hidden (view full) ---

4595}
4596
4597static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4598 const struct net_device *down_dev)
4599{
4600 struct fib6_info *iter;
4601 unsigned int dead = 0;
4602
4084 if (rt->fib6_nh.fib_nh_dev == down_dev ||
4085 rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
4603 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4604 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4086 dead++;
4087 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4605 dead++;
4606 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4088 if (iter->fib6_nh.fib_nh_dev == down_dev ||
4089 iter->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
4607 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4608 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4090 dead++;
4091
4092 return dead;
4093}
4094
4095static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4096 const struct net_device *dev,
4097 unsigned char nh_flags)
4098{
4099 struct fib6_info *iter;
4100
4609 dead++;
4610
4611 return dead;
4612}
4613
4614static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4615 const struct net_device *dev,
4616 unsigned char nh_flags)
4617{
4618 struct fib6_info *iter;
4619
4101 if (rt->fib6_nh.fib_nh_dev == dev)
4102 rt->fib6_nh.fib_nh_flags |= nh_flags;
4620 if (rt->fib6_nh->fib_nh_dev == dev)
4621 rt->fib6_nh->fib_nh_flags |= nh_flags;
4103 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4622 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4104 if (iter->fib6_nh.fib_nh_dev == dev)
4105 iter->fib6_nh.fib_nh_flags |= nh_flags;
4623 if (iter->fib6_nh->fib_nh_dev == dev)
4624 iter->fib6_nh->fib_nh_flags |= nh_flags;
4106}
4107
4108/* called with write lock held for table with rt */
4109static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4110{
4111 const struct arg_netdev_event *arg = p_arg;
4112 const struct net_device *dev = arg->dev;
4113 struct net *net = dev_net(dev);
4114
4625}
4626
4627/* called with write lock held for table with rt */
4628static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4629{
4630 const struct arg_netdev_event *arg = p_arg;
4631 const struct net_device *dev = arg->dev;
4632 struct net *net = dev_net(dev);
4633
4115 if (rt == net->ipv6.fib6_null_entry)
4634 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4116 return 0;
4117
4118 switch (arg->event) {
4119 case NETDEV_UNREGISTER:
4635 return 0;
4636
4637 switch (arg->event) {
4638 case NETDEV_UNREGISTER:
4120 return rt->fib6_nh.fib_nh_dev == dev ? -1 : 0;
4639 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4121 case NETDEV_DOWN:
4122 if (rt->should_flush)
4123 return -1;
4124 if (!rt->fib6_nsiblings)
4640 case NETDEV_DOWN:
4641 if (rt->should_flush)
4642 return -1;
4643 if (!rt->fib6_nsiblings)
4125 return rt->fib6_nh.fib_nh_dev == dev ? -1 : 0;
4644 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4126 if (rt6_multipath_uses_dev(rt, dev)) {
4127 unsigned int count;
4128
4129 count = rt6_multipath_dead_count(rt, dev);
4130 if (rt->fib6_nsiblings + 1 == count) {
4131 rt6_multipath_flush(rt);
4132 return -1;
4133 }
4134 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4135 RTNH_F_LINKDOWN);
4136 fib6_update_sernum(net, rt);
4137 rt6_multipath_rebalance(rt);
4138 }
4139 return -2;
4140 case NETDEV_CHANGE:
4645 if (rt6_multipath_uses_dev(rt, dev)) {
4646 unsigned int count;
4647
4648 count = rt6_multipath_dead_count(rt, dev);
4649 if (rt->fib6_nsiblings + 1 == count) {
4650 rt6_multipath_flush(rt);
4651 return -1;
4652 }
4653 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4654 RTNH_F_LINKDOWN);
4655 fib6_update_sernum(net, rt);
4656 rt6_multipath_rebalance(rt);
4657 }
4658 return -2;
4659 case NETDEV_CHANGE:
4141 if (rt->fib6_nh.fib_nh_dev != dev ||
4660 if (rt->fib6_nh->fib_nh_dev != dev ||
4142 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4143 break;
4661 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4662 break;
4144 rt->fib6_nh.fib_nh_flags |= RTNH_F_LINKDOWN;
4663 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4145 rt6_multipath_rebalance(rt);
4146 break;
4147 }
4148
4149 return 0;
4150}
4151
4152void rt6_sync_down_dev(struct net_device *dev, unsigned long event)

--- 17 unchanged lines hidden (view full) ---

4170 rt6_sync_down_dev(dev, event);
4171 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4172 neigh_ifdown(&nd_tbl, dev);
4173}
4174
4175struct rt6_mtu_change_arg {
4176 struct net_device *dev;
4177 unsigned int mtu;
4664 rt6_multipath_rebalance(rt);
4665 break;
4666 }
4667
4668 return 0;
4669}
4670
4671void rt6_sync_down_dev(struct net_device *dev, unsigned long event)

--- 17 unchanged lines hidden (view full) ---

4689 rt6_sync_down_dev(dev, event);
4690 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4691 neigh_ifdown(&nd_tbl, dev);
4692}
4693
4694struct rt6_mtu_change_arg {
4695 struct net_device *dev;
4696 unsigned int mtu;
4697 struct fib6_info *f6i;
4178};
4179
4698};
4699
4180static int rt6_mtu_change_route(struct fib6_info *rt, void *p_arg)
4700static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4181{
4701{
4702 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4703 struct fib6_info *f6i = arg->f6i;
4704
4705 /* For administrative MTU increase, there is no way to discover
4706 * IPv6 PMTU increase, so PMTU increase should be updated here.
4707 * Since RFC 1981 doesn't include administrative MTU increase
4708 * update PMTU increase is a MUST. (i.e. jumbo frame)
4709 */
4710 if (nh->fib_nh_dev == arg->dev) {
4711 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4712 u32 mtu = f6i->fib6_pmtu;
4713
4714 if (mtu >= arg->mtu ||
4715 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4716 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4717
4718 spin_lock_bh(&rt6_exception_lock);
4719 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4720 spin_unlock_bh(&rt6_exception_lock);
4721 }
4722
4723 return 0;
4724}
4725
4726static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4727{
4182 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4183 struct inet6_dev *idev;
4184
4185 /* In IPv6 pmtu discovery is not optional,
4186 so that RTAX_MTU lock cannot disable it.
4187 We still use this lock to block changes
4188 caused by addrconf/ndisc.
4189 */
4190
4191 idev = __in6_dev_get(arg->dev);
4192 if (!idev)
4193 return 0;
4194
4728 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4729 struct inet6_dev *idev;
4730
4731 /* In IPv6 pmtu discovery is not optional,
4732 so that RTAX_MTU lock cannot disable it.
4733 We still use this lock to block changes
4734 caused by addrconf/ndisc.
4735 */
4736
4737 idev = __in6_dev_get(arg->dev);
4738 if (!idev)
4739 return 0;
4740
4195 /* For administrative MTU increase, there is no way to discover
4196 IPv6 PMTU increase, so PMTU increase should be updated here.
4197 Since RFC 1981 doesn't include administrative MTU increase
4198 update PMTU increase is a MUST. (i.e. jumbo frame)
4199 */
4200 if (rt->fib6_nh.fib_nh_dev == arg->dev &&
4201 !fib6_metric_locked(rt, RTAX_MTU)) {
4202 u32 mtu = rt->fib6_pmtu;
4741 if (fib6_metric_locked(f6i, RTAX_MTU))
4742 return 0;
4203
4743
4204 if (mtu >= arg->mtu ||
4205 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4206 fib6_metric_set(rt, RTAX_MTU, arg->mtu);
4207
4208 spin_lock_bh(&rt6_exception_lock);
4209 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
4210 spin_unlock_bh(&rt6_exception_lock);
4744 arg->f6i = f6i;
4745 if (f6i->nh) {
4746 /* fib6_nh_mtu_change only returns 0, so this is safe */
4747 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4748 arg);
4211 }
4749 }
4212 return 0;
4750
4751 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4213}
4214
4215void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4216{
4217 struct rt6_mtu_change_arg arg = {
4218 .dev = dev,
4219 .mtu = mtu,
4220 };
4221
4222 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4223}
4224
4225static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4752}
4753
4754void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4755{
4756 struct rt6_mtu_change_arg arg = {
4757 .dev = dev,
4758 .mtu = mtu,
4759 };
4760
4761 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4762}
4763
4764static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4765 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4226 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4227 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4228 [RTA_OIF] = { .type = NLA_U32 },
4229 [RTA_IIF] = { .type = NLA_U32 },
4230 [RTA_PRIORITY] = { .type = NLA_U32 },
4231 [RTA_METRICS] = { .type = NLA_NESTED },
4232 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4233 [RTA_PREF] = { .type = NLA_U8 },
4234 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4235 [RTA_ENCAP] = { .type = NLA_NESTED },
4236 [RTA_EXPIRES] = { .type = NLA_U32 },
4237 [RTA_UID] = { .type = NLA_U32 },
4238 [RTA_MARK] = { .type = NLA_U32 },
4239 [RTA_TABLE] = { .type = NLA_U32 },
4240 [RTA_IP_PROTO] = { .type = NLA_U8 },
4241 [RTA_SPORT] = { .type = NLA_U16 },
4242 [RTA_DPORT] = { .type = NLA_U16 },
4766 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4767 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4768 [RTA_OIF] = { .type = NLA_U32 },
4769 [RTA_IIF] = { .type = NLA_U32 },
4770 [RTA_PRIORITY] = { .type = NLA_U32 },
4771 [RTA_METRICS] = { .type = NLA_NESTED },
4772 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4773 [RTA_PREF] = { .type = NLA_U8 },
4774 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4775 [RTA_ENCAP] = { .type = NLA_NESTED },
4776 [RTA_EXPIRES] = { .type = NLA_U32 },
4777 [RTA_UID] = { .type = NLA_U32 },
4778 [RTA_MARK] = { .type = NLA_U32 },
4779 [RTA_TABLE] = { .type = NLA_U32 },
4780 [RTA_IP_PROTO] = { .type = NLA_U8 },
4781 [RTA_SPORT] = { .type = NLA_U16 },
4782 [RTA_DPORT] = { .type = NLA_U16 },
4783 [RTA_NH_ID] = { .type = NLA_U32 },
4243};
4244
4245static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4246 struct fib6_config *cfg,
4247 struct netlink_ext_ack *extack)
4248{
4249 struct rtmsg *rtm;
4250 struct nlattr *tb[RTA_MAX+1];

--- 30 unchanged lines hidden (view full) ---

4281 if (rtm->rtm_type == RTN_LOCAL)
4282 cfg->fc_flags |= RTF_LOCAL;
4283
4284 if (rtm->rtm_flags & RTM_F_CLONED)
4285 cfg->fc_flags |= RTF_CACHE;
4286
4287 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4288
4784};
4785
4786static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4787 struct fib6_config *cfg,
4788 struct netlink_ext_ack *extack)
4789{
4790 struct rtmsg *rtm;
4791 struct nlattr *tb[RTA_MAX+1];

--- 30 unchanged lines hidden (view full) ---

4822 if (rtm->rtm_type == RTN_LOCAL)
4823 cfg->fc_flags |= RTF_LOCAL;
4824
4825 if (rtm->rtm_flags & RTM_F_CLONED)
4826 cfg->fc_flags |= RTF_CACHE;
4827
4828 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4829
4830 if (tb[RTA_NH_ID]) {
4831 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
4832 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
4833 NL_SET_ERR_MSG(extack,
4834 "Nexthop specification and nexthop id are mutually exclusive");
4835 goto errout;
4836 }
4837 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
4838 }
4839
4289 if (tb[RTA_GATEWAY]) {
4290 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4291 cfg->fc_flags |= RTF_GATEWAY;
4292 }
4293 if (tb[RTA_VIA]) {
4294 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4295 goto errout;
4296 }

--- 127 unchanged lines hidden (view full) ---

4424 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4425}
4426
4427static int ip6_route_multipath_add(struct fib6_config *cfg,
4428 struct netlink_ext_ack *extack)
4429{
4430 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
4431 struct nl_info *info = &cfg->fc_nlinfo;
4840 if (tb[RTA_GATEWAY]) {
4841 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4842 cfg->fc_flags |= RTF_GATEWAY;
4843 }
4844 if (tb[RTA_VIA]) {
4845 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4846 goto errout;
4847 }

--- 127 unchanged lines hidden (view full) ---

4975 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4976}
4977
4978static int ip6_route_multipath_add(struct fib6_config *cfg,
4979 struct netlink_ext_ack *extack)
4980{
4981 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
4982 struct nl_info *info = &cfg->fc_nlinfo;
4983 enum fib_event_type event_type;
4432 struct fib6_config r_cfg;
4433 struct rtnexthop *rtnh;
4434 struct fib6_info *rt;
4435 struct rt6_nh *err_nh;
4436 struct rt6_nh *nh, *nh_safe;
4437 __u16 nlflags;
4438 int remaining;
4439 int attrlen;

--- 43 unchanged lines hidden (view full) ---

4483 if (!rt6_qualify_for_ecmp(rt)) {
4484 err = -EINVAL;
4485 NL_SET_ERR_MSG(extack,
4486 "Device only routes can not be added for IPv6 using the multipath API.");
4487 fib6_info_release(rt);
4488 goto cleanup;
4489 }
4490
4984 struct fib6_config r_cfg;
4985 struct rtnexthop *rtnh;
4986 struct fib6_info *rt;
4987 struct rt6_nh *err_nh;
4988 struct rt6_nh *nh, *nh_safe;
4989 __u16 nlflags;
4990 int remaining;
4991 int attrlen;

--- 43 unchanged lines hidden (view full) ---

5035 if (!rt6_qualify_for_ecmp(rt)) {
5036 err = -EINVAL;
5037 NL_SET_ERR_MSG(extack,
5038 "Device only routes can not be added for IPv6 using the multipath API.");
5039 fib6_info_release(rt);
5040 goto cleanup;
5041 }
5042
4491 rt->fib6_nh.fib_nh_weight = rtnh->rtnh_hops + 1;
5043 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
4492
4493 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
4494 rt, &r_cfg);
4495 if (err) {
4496 fib6_info_release(rt);
4497 goto cleanup;
4498 }
4499
4500 rtnh = rtnh_next(rtnh, &remaining);
4501 }
4502
5044
5045 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5046 rt, &r_cfg);
5047 if (err) {
5048 fib6_info_release(rt);
5049 goto cleanup;
5050 }
5051
5052 rtnh = rtnh_next(rtnh, &remaining);
5053 }
5054
5055 if (list_empty(&rt6_nh_list)) {
5056 NL_SET_ERR_MSG(extack,
5057 "Invalid nexthop configuration - no valid nexthops");
5058 return -EINVAL;
5059 }
5060
4503 /* for add and replace send one notification with all nexthops.
4504 * Skip the notification in fib6_add_rt2node and send one with
4505 * the full route when done
4506 */
4507 info->skip_notify = 1;
4508
5061 /* for add and replace send one notification with all nexthops.
5062 * Skip the notification in fib6_add_rt2node and send one with
5063 * the full route when done
5064 */
5065 info->skip_notify = 1;
5066
5067 /* For add and replace, send one notification with all nexthops. For
5068 * append, send one notification with all appended nexthops.
5069 */
5070 info->skip_notify_kernel = 1;
5071
4509 err_nh = NULL;
4510 list_for_each_entry(nh, &rt6_nh_list, next) {
4511 err = __ip6_ins_rt(nh->fib6_info, info, extack);
4512 fib6_info_release(nh->fib6_info);
4513
4514 if (!err) {
4515 /* save reference to last route successfully inserted */
4516 rt_last = nh->fib6_info;

--- 20 unchanged lines hidden (view full) ---

4537 * nexthops have been replaced by first new, the rest should
4538 * be added to it.
4539 */
4540 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4541 NLM_F_REPLACE);
4542 nhn++;
4543 }
4544
5072 err_nh = NULL;
5073 list_for_each_entry(nh, &rt6_nh_list, next) {
5074 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5075 fib6_info_release(nh->fib6_info);
5076
5077 if (!err) {
5078 /* save reference to last route successfully inserted */
5079 rt_last = nh->fib6_info;

--- 20 unchanged lines hidden (view full) ---

5100 * nexthops have been replaced by first new, the rest should
5101 * be added to it.
5102 */
5103 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5104 NLM_F_REPLACE);
5105 nhn++;
5106 }
5107
5108 event_type = replace ? FIB_EVENT_ENTRY_REPLACE : FIB_EVENT_ENTRY_ADD;
5109 err = call_fib6_multipath_entry_notifiers(info->nl_net, event_type,
5110 rt_notif, nhn - 1, extack);
5111 if (err) {
5112 /* Delete all the siblings that were just added */
5113 err_nh = NULL;
5114 goto add_errout;
5115 }
5116
4545 /* success ... tell user about new route */
4546 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4547 goto cleanup;
4548
4549add_errout:
4550 /* send notification for routes that were added so that
4551 * the delete notifications sent by ip6_route_del are
4552 * coherent

--- 62 unchanged lines hidden (view full) ---

4615{
4616 struct fib6_config cfg;
4617 int err;
4618
4619 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4620 if (err < 0)
4621 return err;
4622
5117 /* success ... tell user about new route */
5118 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5119 goto cleanup;
5120
5121add_errout:
5122 /* send notification for routes that were added so that
5123 * the delete notifications sent by ip6_route_del are
5124 * coherent

--- 62 unchanged lines hidden (view full) ---

5187{
5188 struct fib6_config cfg;
5189 int err;
5190
5191 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5192 if (err < 0)
5193 return err;
5194
5195 if (cfg.fc_nh_id &&
5196 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5197 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5198 return -EINVAL;
5199 }
5200
4623 if (cfg.fc_mp)
4624 return ip6_route_multipath_del(&cfg, extack);
4625 else {
4626 cfg.fc_delete_all_nh = 1;
4627 return ip6_route_del(&cfg, extack);
4628 }
4629}
4630

--- 11 unchanged lines hidden (view full) ---

4642 cfg.fc_metric = IP6_RT_PRIO_USER;
4643
4644 if (cfg.fc_mp)
4645 return ip6_route_multipath_add(&cfg, extack);
4646 else
4647 return ip6_route_add(&cfg, GFP_KERNEL, extack);
4648}
4649
5201 if (cfg.fc_mp)
5202 return ip6_route_multipath_del(&cfg, extack);
5203 else {
5204 cfg.fc_delete_all_nh = 1;
5205 return ip6_route_del(&cfg, extack);
5206 }
5207}
5208

--- 11 unchanged lines hidden (view full) ---

5220 cfg.fc_metric = IP6_RT_PRIO_USER;
5221
5222 if (cfg.fc_mp)
5223 return ip6_route_multipath_add(&cfg, extack);
5224 else
5225 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5226}
5227
4650static size_t rt6_nlmsg_size(struct fib6_info *rt)
5228/* add the overhead of this fib6_nh to nexthop_len */
5229static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
4651{
5230{
4652 int nexthop_len = 0;
5231 int *nexthop_len = arg;
4653
5232
4654 if (rt->fib6_nsiblings) {
4655 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4656 + NLA_ALIGN(sizeof(struct rtnexthop))
4657 + nla_total_size(16) /* RTA_GATEWAY */
4658 + lwtunnel_get_encap_size(rt->fib6_nh.fib_nh_lws);
5233 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5234 + NLA_ALIGN(sizeof(struct rtnexthop))
5235 + nla_total_size(16); /* RTA_GATEWAY */
4659
5236
4660 nexthop_len *= rt->fib6_nsiblings;
5237 if (nh->fib_nh_lws) {
5238 /* RTA_ENCAP_TYPE */
5239 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5240 /* RTA_ENCAP */
5241 *nexthop_len += nla_total_size(2);
4661 }
4662
5242 }
5243
5244 return 0;
5245}
5246
5247static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5248{
5249 int nexthop_len;
5250
5251 if (f6i->nh) {
5252 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5253 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5254 &nexthop_len);
5255 } else {
5256 struct fib6_nh *nh = f6i->fib6_nh;
5257
5258 nexthop_len = 0;
5259 if (f6i->fib6_nsiblings) {
5260 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
5261 + NLA_ALIGN(sizeof(struct rtnexthop))
5262 + nla_total_size(16) /* RTA_GATEWAY */
5263 + lwtunnel_get_encap_size(nh->fib_nh_lws);
5264
5265 nexthop_len *= f6i->fib6_nsiblings;
5266 }
5267 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5268 }
5269
4663 return NLMSG_ALIGN(sizeof(struct rtmsg))
4664 + nla_total_size(16) /* RTA_SRC */
4665 + nla_total_size(16) /* RTA_DST */
4666 + nla_total_size(16) /* RTA_GATEWAY */
4667 + nla_total_size(16) /* RTA_PREFSRC */
4668 + nla_total_size(4) /* RTA_TABLE */
4669 + nla_total_size(4) /* RTA_IIF */
4670 + nla_total_size(4) /* RTA_OIF */
4671 + nla_total_size(4) /* RTA_PRIORITY */
4672 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4673 + nla_total_size(sizeof(struct rta_cacheinfo))
4674 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4675 + nla_total_size(1) /* RTA_PREF */
5270 return NLMSG_ALIGN(sizeof(struct rtmsg))
5271 + nla_total_size(16) /* RTA_SRC */
5272 + nla_total_size(16) /* RTA_DST */
5273 + nla_total_size(16) /* RTA_GATEWAY */
5274 + nla_total_size(16) /* RTA_PREFSRC */
5275 + nla_total_size(4) /* RTA_TABLE */
5276 + nla_total_size(4) /* RTA_IIF */
5277 + nla_total_size(4) /* RTA_OIF */
5278 + nla_total_size(4) /* RTA_PRIORITY */
5279 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5280 + nla_total_size(sizeof(struct rta_cacheinfo))
5281 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5282 + nla_total_size(1) /* RTA_PREF */
4676 + lwtunnel_get_encap_size(rt->fib6_nh.fib_nh_lws)
4677 + nexthop_len;
4678}
4679
5283 + nexthop_len;
5284}
5285
5286static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5287 unsigned char *flags)
5288{
5289 if (nexthop_is_multipath(nh)) {
5290 struct nlattr *mp;
5291
5292 mp = nla_nest_start(skb, RTA_MULTIPATH);
5293 if (!mp)
5294 goto nla_put_failure;
5295
5296 if (nexthop_mpath_fill_node(skb, nh))
5297 goto nla_put_failure;
5298
5299 nla_nest_end(skb, mp);
5300 } else {
5301 struct fib6_nh *fib6_nh;
5302
5303 fib6_nh = nexthop_fib6_nh(nh);
5304 if (fib_nexthop_info(skb, &fib6_nh->nh_common,
5305 flags, false) < 0)
5306 goto nla_put_failure;
5307 }
5308
5309 return 0;
5310
5311nla_put_failure:
5312 return -EMSGSIZE;
5313}
5314
4680static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4681 struct fib6_info *rt, struct dst_entry *dst,
4682 struct in6_addr *dest, struct in6_addr *src,
4683 int iif, int type, u32 portid, u32 seq,
4684 unsigned int flags)
4685{
4686 struct rt6_info *rt6 = (struct rt6_info *)dst;
4687 struct rt6key *rt6_dst, *rt6_src;
4688 u32 *pmetrics, table, rt6_flags;
5315static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5316 struct fib6_info *rt, struct dst_entry *dst,
5317 struct in6_addr *dest, struct in6_addr *src,
5318 int iif, int type, u32 portid, u32 seq,
5319 unsigned int flags)
5320{
5321 struct rt6_info *rt6 = (struct rt6_info *)dst;
5322 struct rt6key *rt6_dst, *rt6_src;
5323 u32 *pmetrics, table, rt6_flags;
5324 unsigned char nh_flags = 0;
4689 struct nlmsghdr *nlh;
4690 struct rtmsg *rtm;
4691 long expires = 0;
4692
4693 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4694 if (!nlh)
4695 return -EMSGSIZE;
4696

--- 91 unchanged lines hidden (view full) ---

4788 } else if (rt->fib6_nsiblings) {
4789 struct fib6_info *sibling, *next_sibling;
4790 struct nlattr *mp;
4791
4792 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
4793 if (!mp)
4794 goto nla_put_failure;
4795
5325 struct nlmsghdr *nlh;
5326 struct rtmsg *rtm;
5327 long expires = 0;
5328
5329 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5330 if (!nlh)
5331 return -EMSGSIZE;
5332

--- 91 unchanged lines hidden (view full) ---

5424 } else if (rt->fib6_nsiblings) {
5425 struct fib6_info *sibling, *next_sibling;
5426 struct nlattr *mp;
5427
5428 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5429 if (!mp)
5430 goto nla_put_failure;
5431
4796 if (fib_add_nexthop(skb, &rt->fib6_nh.nh_common,
4797 rt->fib6_nh.fib_nh_weight) < 0)
5432 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5433 rt->fib6_nh->fib_nh_weight) < 0)
4798 goto nla_put_failure;
4799
4800 list_for_each_entry_safe(sibling, next_sibling,
4801 &rt->fib6_siblings, fib6_siblings) {
5434 goto nla_put_failure;
5435
5436 list_for_each_entry_safe(sibling, next_sibling,
5437 &rt->fib6_siblings, fib6_siblings) {
4802 if (fib_add_nexthop(skb, &sibling->fib6_nh.nh_common,
4803 sibling->fib6_nh.fib_nh_weight) < 0)
5438 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5439 sibling->fib6_nh->fib_nh_weight) < 0)
4804 goto nla_put_failure;
4805 }
4806
4807 nla_nest_end(skb, mp);
5440 goto nla_put_failure;
5441 }
5442
5443 nla_nest_end(skb, mp);
4808 } else {
4809 unsigned char nh_flags = 0;
5444 } else if (rt->nh) {
5445 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5446 goto nla_put_failure;
4810
5447
4811 if (fib_nexthop_info(skb, &rt->fib6_nh.nh_common,
5448 if (nexthop_is_blackhole(rt->nh))
5449 rtm->rtm_type = RTN_BLACKHOLE;
5450
5451 if (rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5452 goto nla_put_failure;
5453
5454 rtm->rtm_flags |= nh_flags;
5455 } else {
5456 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common,
4812 &nh_flags, false) < 0)
4813 goto nla_put_failure;
4814
4815 rtm->rtm_flags |= nh_flags;
4816 }
4817
4818 if (rt6_flags & RTF_EXPIRES) {
4819 expires = dst ? dst->expires : rt->expires;

--- 10 unchanged lines hidden (view full) ---

4830 nlmsg_end(skb, nlh);
4831 return 0;
4832
4833nla_put_failure:
4834 nlmsg_cancel(skb, nlh);
4835 return -EMSGSIZE;
4836}
4837
5457 &nh_flags, false) < 0)
5458 goto nla_put_failure;
5459
5460 rtm->rtm_flags |= nh_flags;
5461 }
5462
5463 if (rt6_flags & RTF_EXPIRES) {
5464 expires = dst ? dst->expires : rt->expires;

--- 10 unchanged lines hidden (view full) ---

5475 nlmsg_end(skb, nlh);
5476 return 0;
5477
5478nla_put_failure:
5479 nlmsg_cancel(skb, nlh);
5480 return -EMSGSIZE;
5481}
5482
5483static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5484{
5485 const struct net_device *dev = arg;
5486
5487 if (nh->fib_nh_dev == dev)
5488 return 1;
5489
5490 return 0;
5491}
5492
4838static bool fib6_info_uses_dev(const struct fib6_info *f6i,
4839 const struct net_device *dev)
4840{
5493static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5494 const struct net_device *dev)
5495{
4841 if (f6i->fib6_nh.fib_nh_dev == dev)
5496 if (f6i->nh) {
5497 struct net_device *_dev = (struct net_device *)dev;
5498
5499 return !!nexthop_for_each_fib6_nh(f6i->nh,
5500 fib6_info_nh_uses_dev,
5501 _dev);
5502 }
5503
5504 if (f6i->fib6_nh->fib_nh_dev == dev)
4842 return true;
4843
4844 if (f6i->fib6_nsiblings) {
4845 struct fib6_info *sibling, *next_sibling;
4846
4847 list_for_each_entry_safe(sibling, next_sibling,
4848 &f6i->fib6_siblings, fib6_siblings) {
5505 return true;
5506
5507 if (f6i->fib6_nsiblings) {
5508 struct fib6_info *sibling, *next_sibling;
5509
5510 list_for_each_entry_safe(sibling, next_sibling,
5511 &f6i->fib6_siblings, fib6_siblings) {
4849 if (sibling->fib6_nh.fib_nh_dev == dev)
5512 if (sibling->fib6_nh->fib_nh_dev == dev)
4850 return true;
4851 }
4852 }
4853
4854 return false;
4855}
4856
5513 return true;
5514 }
5515 }
5516
5517 return false;
5518}
5519
4857int rt6_dump_route(struct fib6_info *rt, void *p_arg)
5520struct fib6_nh_exception_dump_walker {
5521 struct rt6_rtnl_dump_arg *dump;
5522 struct fib6_info *rt;
5523 unsigned int flags;
5524 unsigned int skip;
5525 unsigned int count;
5526};
5527
5528static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
4858{
5529{
5530 struct fib6_nh_exception_dump_walker *w = arg;
5531 struct rt6_rtnl_dump_arg *dump = w->dump;
5532 struct rt6_exception_bucket *bucket;
5533 struct rt6_exception *rt6_ex;
5534 int i, err;
5535
5536 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5537 if (!bucket)
5538 return 0;
5539
5540 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5541 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5542 if (w->skip) {
5543 w->skip--;
5544 continue;
5545 }
5546
5547 /* Expiration of entries doesn't bump sernum, insertion
5548 * does. Removal is triggered by insertion, so we can
5549 * rely on the fact that if entries change between two
5550 * partial dumps, this node is scanned again completely,
5551 * see rt6_insert_exception() and fib6_dump_table().
5552 *
5553 * Count expired entries we go through as handled
5554 * entries that we'll skip next time, in case of partial
5555 * node dump. Otherwise, if entries expire meanwhile,
5556 * we'll skip the wrong amount.
5557 */
5558 if (rt6_check_expired(rt6_ex->rt6i)) {
5559 w->count++;
5560 continue;
5561 }
5562
5563 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5564 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5565 RTM_NEWROUTE,
5566 NETLINK_CB(dump->cb->skb).portid,
5567 dump->cb->nlh->nlmsg_seq, w->flags);
5568 if (err)
5569 return err;
5570
5571 w->count++;
5572 }
5573 bucket++;
5574 }
5575
5576 return 0;
5577}
5578
5579/* Return -1 if done with node, number of handled routes on partial dump */
5580int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5581{
4859 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4860 struct fib_dump_filter *filter = &arg->filter;
4861 unsigned int flags = NLM_F_MULTI;
4862 struct net *net = arg->net;
5582 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5583 struct fib_dump_filter *filter = &arg->filter;
5584 unsigned int flags = NLM_F_MULTI;
5585 struct net *net = arg->net;
5586 int count = 0;
4863
4864 if (rt == net->ipv6.fib6_null_entry)
5587
5588 if (rt == net->ipv6.fib6_null_entry)
4865 return 0;
5589 return -1;
4866
4867 if ((filter->flags & RTM_F_PREFIX) &&
4868 !(rt->fib6_flags & RTF_PREFIX_RT)) {
4869 /* success since this is not a prefix route */
5590
5591 if ((filter->flags & RTM_F_PREFIX) &&
5592 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5593 /* success since this is not a prefix route */
4870 return 1;
5594 return -1;
4871 }
5595 }
4872 if (filter->filter_set) {
4873 if ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
4874 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
4875 (filter->protocol && rt->fib6_protocol != filter->protocol)) {
4876 return 1;
4877 }
5596 if (filter->filter_set &&
5597 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5598 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5599 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5600 return -1;
5601 }
5602
5603 if (filter->filter_set ||
5604 !filter->dump_routes || !filter->dump_exceptions) {
4878 flags |= NLM_F_DUMP_FILTERED;
4879 }
4880
5605 flags |= NLM_F_DUMP_FILTERED;
5606 }
5607
4881 return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
4882 RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
4883 arg->cb->nlh->nlmsg_seq, flags);
5608 if (filter->dump_routes) {
5609 if (skip) {
5610 skip--;
5611 } else {
5612 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5613 0, RTM_NEWROUTE,
5614 NETLINK_CB(arg->cb->skb).portid,
5615 arg->cb->nlh->nlmsg_seq, flags)) {
5616 return 0;
5617 }
5618 count++;
5619 }
5620 }
5621
5622 if (filter->dump_exceptions) {
5623 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5624 .rt = rt,
5625 .flags = flags,
5626 .skip = skip,
5627 .count = 0 };
5628 int err;
5629
5630 rcu_read_lock();
5631 if (rt->nh) {
5632 err = nexthop_for_each_fib6_nh(rt->nh,
5633 rt6_nh_dump_exceptions,
5634 &w);
5635 } else {
5636 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5637 }
5638 rcu_read_unlock();
5639
5640 if (err)
5641 return count += w.count;
5642 }
5643
5644 return -1;
4884}
4885
4886static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
4887 const struct nlmsghdr *nlh,
4888 struct nlattr **tb,
4889 struct netlink_ext_ack *extack)
4890{
4891 struct rtmsg *rtm;

--- 228 unchanged lines hidden (view full) ---

5120 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5121 info->nlh, gfp_any());
5122 return;
5123errout:
5124 if (err < 0)
5125 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5126}
5127
5645}
5646
5647static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5648 const struct nlmsghdr *nlh,
5649 struct nlattr **tb,
5650 struct netlink_ext_ack *extack)
5651{
5652 struct rtmsg *rtm;

--- 228 unchanged lines hidden (view full) ---

5881 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5882 info->nlh, gfp_any());
5883 return;
5884errout:
5885 if (err < 0)
5886 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5887}
5888
5889void fib6_rt_update(struct net *net, struct fib6_info *rt,
5890 struct nl_info *info)
5891{
5892 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5893 struct sk_buff *skb;
5894 int err = -ENOBUFS;
5895
5896 /* call_fib6_entry_notifiers will be removed when in-kernel notifier
5897 * is implemented and supported for nexthop objects
5898 */
5899 call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
5900
5901 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5902 if (!skb)
5903 goto errout;
5904
5905 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5906 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
5907 if (err < 0) {
5908 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5909 WARN_ON(err == -EMSGSIZE);
5910 kfree_skb(skb);
5911 goto errout;
5912 }
5913 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5914 info->nlh, gfp_any());
5915 return;
5916errout:
5917 if (err < 0)
5918 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5919}
5920
5128static int ip6_route_dev_notify(struct notifier_block *this,
5129 unsigned long event, void *ptr)
5130{
5131 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5132 struct net *net = dev_net(dev);
5133
5134 if (!(dev->flags & IFF_LOOPBACK))
5135 return NOTIFY_OK;
5136
5137 if (event == NETDEV_REGISTER) {
5921static int ip6_route_dev_notify(struct notifier_block *this,
5922 unsigned long event, void *ptr)
5923{
5924 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5925 struct net *net = dev_net(dev);
5926
5927 if (!(dev->flags & IFF_LOOPBACK))
5928 return NOTIFY_OK;
5929
5930 if (event == NETDEV_REGISTER) {
5138 net->ipv6.fib6_null_entry->fib6_nh.fib_nh_dev = dev;
5931 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
5139 net->ipv6.ip6_null_entry->dst.dev = dev;
5140 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
5141#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5142 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5143 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5144 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5145 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5146#endif

--- 129 unchanged lines hidden (view full) ---

5276 .mode = 0644,
5277 .proc_handler = proc_dointvec_ms_jiffies,
5278 },
5279 {
5280 .procname = "skip_notify_on_dev_down",
5281 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
5282 .maxlen = sizeof(int),
5283 .mode = 0644,
5932 net->ipv6.ip6_null_entry->dst.dev = dev;
5933 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
5934#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5935 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5936 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5937 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5938 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5939#endif

--- 129 unchanged lines hidden (view full) ---

6069 .mode = 0644,
6070 .proc_handler = proc_dointvec_ms_jiffies,
6071 },
6072 {
6073 .procname = "skip_notify_on_dev_down",
6074 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6075 .maxlen = sizeof(int),
6076 .mode = 0644,
5284 .proc_handler = proc_dointvec,
6077 .proc_handler = proc_dointvec_minmax,
5285 .extra1 = &zero,
5286 .extra2 = &one,
5287 },
5288 { }
5289};
5290
5291struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
5292{

--- 31 unchanged lines hidden (view full) ---

5324 int ret = -ENOMEM;
5325
5326 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
5327 sizeof(net->ipv6.ip6_dst_ops));
5328
5329 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
5330 goto out_ip6_dst_ops;
5331
6078 .extra1 = &zero,
6079 .extra2 = &one,
6080 },
6081 { }
6082};
6083
6084struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6085{

--- 31 unchanged lines hidden (view full) ---

6117 int ret = -ENOMEM;
6118
6119 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6120 sizeof(net->ipv6.ip6_dst_ops));
6121
6122 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6123 goto out_ip6_dst_ops;
6124
5332 net->ipv6.fib6_null_entry = kmemdup(&fib6_null_entry_template,
5333 sizeof(*net->ipv6.fib6_null_entry),
5334 GFP_KERNEL);
6125 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
5335 if (!net->ipv6.fib6_null_entry)
5336 goto out_ip6_dst_entries;
6126 if (!net->ipv6.fib6_null_entry)
6127 goto out_ip6_dst_entries;
6128 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6129 sizeof(*net->ipv6.fib6_null_entry));
5337
5338 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
5339 sizeof(*net->ipv6.ip6_null_entry),
5340 GFP_KERNEL);
5341 if (!net->ipv6.ip6_null_entry)
5342 goto out_fib6_null_entry;
5343 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5344 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
5345 ip6_template_metrics, true);
6130
6131 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6132 sizeof(*net->ipv6.ip6_null_entry),
6133 GFP_KERNEL);
6134 if (!net->ipv6.ip6_null_entry)
6135 goto out_fib6_null_entry;
6136 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6137 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6138 ip6_template_metrics, true);
6139 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
5346
5347#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5348 net->ipv6.fib6_has_custom_rules = false;
5349 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
5350 sizeof(*net->ipv6.ip6_prohibit_entry),
5351 GFP_KERNEL);
5352 if (!net->ipv6.ip6_prohibit_entry)
5353 goto out_ip6_null_entry;
5354 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5355 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
5356 ip6_template_metrics, true);
6140
6141#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6142 net->ipv6.fib6_has_custom_rules = false;
6143 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6144 sizeof(*net->ipv6.ip6_prohibit_entry),
6145 GFP_KERNEL);
6146 if (!net->ipv6.ip6_prohibit_entry)
6147 goto out_ip6_null_entry;
6148 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6149 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6150 ip6_template_metrics, true);
6151 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
5357
5358 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
5359 sizeof(*net->ipv6.ip6_blk_hole_entry),
5360 GFP_KERNEL);
5361 if (!net->ipv6.ip6_blk_hole_entry)
5362 goto out_ip6_prohibit_entry;
5363 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5364 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
5365 ip6_template_metrics, true);
6152
6153 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6154 sizeof(*net->ipv6.ip6_blk_hole_entry),
6155 GFP_KERNEL);
6156 if (!net->ipv6.ip6_blk_hole_entry)
6157 goto out_ip6_prohibit_entry;
6158 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6159 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6160 ip6_template_metrics, true);
6161 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
5366#endif
5367
5368 net->ipv6.sysctl.flush_delay = 0;
5369 net->ipv6.sysctl.ip6_rt_max_size = 4096;
5370 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
5371 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
5372 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
5373 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;

--- 91 unchanged lines hidden (view full) ---

5465 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
5466};
5467
5468void __init ip6_route_init_special_entries(void)
5469{
5470 /* Registering of the loopback is done before this portion of code,
5471 * the loopback reference in rt6_info will not be taken, do it
5472 * manually for init_net */
6162#endif
6163
6164 net->ipv6.sysctl.flush_delay = 0;
6165 net->ipv6.sysctl.ip6_rt_max_size = 4096;
6166 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6167 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6168 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6169 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;

--- 91 unchanged lines hidden (view full) ---

6261 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6262};
6263
6264void __init ip6_route_init_special_entries(void)
6265{
6266 /* Registering of the loopback is done before this portion of code,
6267 * the loopback reference in rt6_info will not be taken, do it
6268 * manually for init_net */
5473 init_net.ipv6.fib6_null_entry->fib6_nh.fib_nh_dev = init_net.loopback_dev;
6269 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
5474 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
5475 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5476 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5477 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
5478 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5479 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
5480 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5481 #endif

--- 106 unchanged lines hidden ---
6270 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6271 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6272 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6273 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6274 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6275 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6276 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6277 #endif

--- 106 unchanged lines hidden ---