route.c (0638eb573cde5888c0886c7f35da604e5db209a6) | route.c (aa8f8778493c85fff480cdf8b349b1e1dcb5f243) |
---|---|
1/* 2 * Linux INET6 implementation 3 * FIB front-end. 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * This program is free software; you can redistribute it and/or --- 64 unchanged lines hidden (view full) --- 73 74enum rt6_nud_state { 75 RT6_NUD_FAIL_HARD = -3, 76 RT6_NUD_FAIL_PROBE = -2, 77 RT6_NUD_FAIL_DO_RR = -1, 78 RT6_NUD_SUCCEED = 1 79}; 80 | 1/* 2 * Linux INET6 implementation 3 * FIB front-end. 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * This program is free software; you can redistribute it and/or --- 64 unchanged lines hidden (view full) --- 73 74enum rt6_nud_state { 75 RT6_NUD_FAIL_HARD = -3, 76 RT6_NUD_FAIL_PROBE = -2, 77 RT6_NUD_FAIL_DO_RR = -1, 78 RT6_NUD_SUCCEED = 1 79}; 80 |
81static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort); |
|
81static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 82static unsigned int ip6_default_advmss(const struct dst_entry *dst); 83static unsigned int ip6_mtu(const struct dst_entry *dst); 84static struct dst_entry *ip6_negative_advice(struct dst_entry *); 85static void ip6_dst_destroy(struct dst_entry *); 86static void ip6_dst_ifdown(struct dst_entry *, 87 struct net_device *dev, int how); 88static int ip6_dst_gc(struct dst_ops *ops); 89 90static int ip6_pkt_discard(struct sk_buff *skb); 91static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); 92static int ip6_pkt_prohibit(struct sk_buff *skb); 93static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); 94static void ip6_link_failure(struct sk_buff *skb); 95static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 96 struct sk_buff *skb, u32 mtu); 97static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, 98 struct sk_buff *skb); | 82static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 83static unsigned int ip6_default_advmss(const struct dst_entry *dst); 84static unsigned int ip6_mtu(const struct dst_entry *dst); 85static struct dst_entry *ip6_negative_advice(struct dst_entry *); 86static void ip6_dst_destroy(struct dst_entry *); 87static void ip6_dst_ifdown(struct dst_entry *, 88 struct net_device *dev, int how); 89static int ip6_dst_gc(struct dst_ops *ops); 90 91static int ip6_pkt_discard(struct sk_buff *skb); 92static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); 93static int ip6_pkt_prohibit(struct sk_buff *skb); 94static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); 95static void ip6_link_failure(struct sk_buff *skb); 96static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 97 struct sk_buff *skb, u32 mtu); 98static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, 99 struct sk_buff *skb); |
99static int rt6_score_route(struct fib6_info *rt, int oif, int strict); 100static size_t rt6_nlmsg_size(struct fib6_info *rt); 101static int rt6_fill_node(struct net *net, struct sk_buff *skb, 102 struct fib6_info *rt, struct dst_entry *dst, 103 struct in6_addr *dest, struct in6_addr *src, | 100static void rt6_dst_from_metrics_check(struct rt6_info *rt); 101static int rt6_score_route(struct rt6_info *rt, int oif, int strict); 102static size_t rt6_nlmsg_size(struct rt6_info *rt); 103static int rt6_fill_node(struct net *net, 104 struct sk_buff *skb, struct rt6_info *rt, 105 struct in6_addr *dst, struct in6_addr *src, |
104 int iif, int type, u32 portid, u32 seq, 105 unsigned int flags); | 106 int iif, int type, u32 portid, u32 seq, 107 unsigned int flags); |
106static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt, | 108static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt, |
107 struct in6_addr *daddr, 108 struct in6_addr *saddr); 109 110#ifdef CONFIG_IPV6_ROUTE_INFO | 109 struct in6_addr *daddr, 110 struct in6_addr *saddr); 111 112#ifdef CONFIG_IPV6_ROUTE_INFO |
111static struct fib6_info *rt6_add_route_info(struct net *net, | 113static struct rt6_info *rt6_add_route_info(struct net *net, |
112 const struct in6_addr *prefix, int prefixlen, 113 const struct in6_addr *gwaddr, 114 struct net_device *dev, 115 unsigned int pref); | 114 const struct in6_addr *prefix, int prefixlen, 115 const struct in6_addr *gwaddr, 116 struct net_device *dev, 117 unsigned int pref); |
116static struct fib6_info *rt6_get_route_info(struct net *net, | 118static struct rt6_info *rt6_get_route_info(struct net *net, |
117 const struct in6_addr *prefix, int prefixlen, 118 const struct in6_addr *gwaddr, 119 struct net_device *dev); 120#endif 121 122struct uncached_list { 123 spinlock_t lock; 124 struct list_head head; --- 52 unchanged lines hidden (view full) --- 177 dev_hold(rt->dst.dev); 178 dev_put(rt_dev); 179 } 180 } 181 spin_unlock_bh(&ul->lock); 182 } 183} 184 | 119 const struct in6_addr *prefix, int prefixlen, 120 const struct in6_addr *gwaddr, 121 struct net_device *dev); 122#endif 123 124struct uncached_list { 125 spinlock_t lock; 126 struct list_head head; --- 52 unchanged lines hidden (view full) --- 179 dev_hold(rt->dst.dev); 180 dev_put(rt_dev); 181 } 182 } 183 spin_unlock_bh(&ul->lock); 184 } 185} 186 |
185static inline const void *choose_neigh_daddr(const struct in6_addr *p, | 187static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt) 188{ 189 return dst_metrics_write_ptr(&rt->from->dst); 190} 191 192static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) 193{ 194 struct rt6_info *rt = (struct rt6_info *)dst; 195 196 if (rt->rt6i_flags & RTF_PCPU) 197 return rt6_pcpu_cow_metrics(rt); 198 else if (rt->rt6i_flags & RTF_CACHE) 199 return NULL; 200 else 201 return dst_cow_metrics_generic(dst, old); 202} 203 204static inline const void *choose_neigh_daddr(struct rt6_info *rt, |
186 struct sk_buff *skb, 187 const void *daddr) 188{ | 205 struct sk_buff *skb, 206 const void *daddr) 207{ |
208 struct in6_addr *p = &rt->rt6i_gateway; 209 |
|
189 if (!ipv6_addr_any(p)) 190 return (const void *) p; 191 else if (skb) 192 return &ipv6_hdr(skb)->daddr; 193 return daddr; 194} 195 | 210 if (!ipv6_addr_any(p)) 211 return (const void *) p; 212 else if (skb) 213 return &ipv6_hdr(skb)->daddr; 214 return daddr; 215} 216 |
196struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw, 197 struct net_device *dev, 198 struct sk_buff *skb, 199 const void *daddr) | 217static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, 218 struct sk_buff *skb, 219 const void *daddr) |
200{ | 220{ |
221 struct rt6_info *rt = (struct rt6_info *) dst; |
|
201 struct neighbour *n; 202 | 222 struct neighbour *n; 223 |
203 daddr = choose_neigh_daddr(gw, skb, daddr); 204 n = __ipv6_neigh_lookup(dev, daddr); | 224 daddr = choose_neigh_daddr(rt, skb, daddr); 225 n = __ipv6_neigh_lookup(dst->dev, daddr); |
205 if (n) 206 return n; | 226 if (n) 227 return n; |
207 return neigh_create(&nd_tbl, daddr, dev); | 228 return neigh_create(&nd_tbl, daddr, dst->dev); |
208} 209 | 229} 230 |
210static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst, 211 struct sk_buff *skb, 212 const void *daddr) 213{ 214 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst); 215 216 return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr); 217} 218 | |
219static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) 220{ 221 struct net_device *dev = dst->dev; 222 struct rt6_info *rt = (struct rt6_info *)dst; 223 | 231static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) 232{ 233 struct net_device *dev = dst->dev; 234 struct rt6_info *rt = (struct rt6_info *)dst; 235 |
224 daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr); | 236 daddr = choose_neigh_daddr(rt, NULL, daddr); |
225 if (!daddr) 226 return; 227 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) 228 return; 229 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr)) 230 return; 231 __ipv6_confirm_neigh(dev, daddr); 232} 233 234static struct dst_ops ip6_dst_ops_template = { 235 .family = AF_INET6, 236 .gc = ip6_dst_gc, 237 .gc_thresh = 1024, 238 .check = ip6_dst_check, 239 .default_advmss = ip6_default_advmss, 240 .mtu = ip6_mtu, | 237 if (!daddr) 238 return; 239 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) 240 return; 241 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr)) 242 return; 243 __ipv6_confirm_neigh(dev, daddr); 244} 245 246static struct dst_ops ip6_dst_ops_template = { 247 .family = AF_INET6, 248 .gc = ip6_dst_gc, 249 .gc_thresh = 1024, 250 .check = ip6_dst_check, 251 .default_advmss = ip6_default_advmss, 252 .mtu = ip6_mtu, |
241 .cow_metrics = dst_cow_metrics_generic, | 253 .cow_metrics = ipv6_cow_metrics, |
242 .destroy = ip6_dst_destroy, 243 .ifdown = ip6_dst_ifdown, 244 .negative_advice = ip6_negative_advice, 245 .link_failure = ip6_link_failure, 246 .update_pmtu = ip6_rt_update_pmtu, 247 .redirect = rt6_do_redirect, 248 .local_out = __ip6_local_out, | 254 .destroy = ip6_dst_destroy, 255 .ifdown = ip6_dst_ifdown, 256 .negative_advice = ip6_negative_advice, 257 .link_failure = ip6_link_failure, 258 .update_pmtu = ip6_rt_update_pmtu, 259 .redirect = rt6_do_redirect, 260 .local_out = __ip6_local_out, |
249 .neigh_lookup = ip6_dst_neigh_lookup, | 261 .neigh_lookup = ip6_neigh_lookup, |
250 .confirm_neigh = ip6_confirm_neigh, 251}; 252 253static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) 254{ 255 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 256 257 return mtu ? : dst->dev->mtu; --- 13 unchanged lines hidden (view full) --- 271 .family = AF_INET6, 272 .destroy = ip6_dst_destroy, 273 .check = ip6_dst_check, 274 .mtu = ip6_blackhole_mtu, 275 .default_advmss = ip6_default_advmss, 276 .update_pmtu = ip6_rt_blackhole_update_pmtu, 277 .redirect = ip6_rt_blackhole_redirect, 278 .cow_metrics = dst_cow_metrics_generic, | 262 .confirm_neigh = ip6_confirm_neigh, 263}; 264 265static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) 266{ 267 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 268 269 return mtu ? : dst->dev->mtu; --- 13 unchanged lines hidden (view full) --- 283 .family = AF_INET6, 284 .destroy = ip6_dst_destroy, 285 .check = ip6_dst_check, 286 .mtu = ip6_blackhole_mtu, 287 .default_advmss = ip6_default_advmss, 288 .update_pmtu = ip6_rt_blackhole_update_pmtu, 289 .redirect = ip6_rt_blackhole_redirect, 290 .cow_metrics = dst_cow_metrics_generic, |
279 .neigh_lookup = ip6_dst_neigh_lookup, | 291 .neigh_lookup = ip6_neigh_lookup, |
280}; 281 282static const u32 ip6_template_metrics[RTAX_MAX] = { 283 [RTAX_HOPLIMIT - 1] = 0, 284}; 285 | 292}; 293 294static const u32 ip6_template_metrics[RTAX_MAX] = { 295 [RTAX_HOPLIMIT - 1] = 0, 296}; 297 |
286static const struct fib6_info fib6_null_entry_template = { 287 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP), 288 .fib6_protocol = RTPROT_KERNEL, 289 .fib6_metric = ~(u32)0, 290 .fib6_ref = ATOMIC_INIT(1), 291 .fib6_type = RTN_UNREACHABLE, 292 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics, 293}; 294 | |
295static const struct rt6_info ip6_null_entry_template = { 296 .dst = { 297 .__refcnt = ATOMIC_INIT(1), 298 .__use = 1, 299 .obsolete = DST_OBSOLETE_FORCE_CHK, 300 .error = -ENETUNREACH, 301 .input = ip6_pkt_discard, 302 .output = ip6_pkt_discard_out, 303 }, 304 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 298static const struct rt6_info ip6_null_entry_template = { 299 .dst = { 300 .__refcnt = ATOMIC_INIT(1), 301 .__use = 1, 302 .obsolete = DST_OBSOLETE_FORCE_CHK, 303 .error = -ENETUNREACH, 304 .input = ip6_pkt_discard, 305 .output = ip6_pkt_discard_out, 306 }, 307 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
308 .rt6i_protocol = RTPROT_KERNEL, 309 .rt6i_metric = ~(u32) 0, 310 .rt6i_ref = ATOMIC_INIT(1), |
|
305}; 306 307#ifdef CONFIG_IPV6_MULTIPLE_TABLES 308 309static const struct rt6_info ip6_prohibit_entry_template = { 310 .dst = { 311 .__refcnt = ATOMIC_INIT(1), 312 .__use = 1, 313 .obsolete = DST_OBSOLETE_FORCE_CHK, 314 .error = -EACCES, 315 .input = ip6_pkt_prohibit, 316 .output = ip6_pkt_prohibit_out, 317 }, 318 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 311}; 312 313#ifdef CONFIG_IPV6_MULTIPLE_TABLES 314 315static const struct rt6_info ip6_prohibit_entry_template = { 316 .dst = { 317 .__refcnt = ATOMIC_INIT(1), 318 .__use = 1, 319 .obsolete = DST_OBSOLETE_FORCE_CHK, 320 .error = -EACCES, 321 .input = ip6_pkt_prohibit, 322 .output = ip6_pkt_prohibit_out, 323 }, 324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
325 .rt6i_protocol = RTPROT_KERNEL, 326 .rt6i_metric = ~(u32) 0, 327 .rt6i_ref = ATOMIC_INIT(1), |
|
319}; 320 321static const struct rt6_info ip6_blk_hole_entry_template = { 322 .dst = { 323 .__refcnt = ATOMIC_INIT(1), 324 .__use = 1, 325 .obsolete = DST_OBSOLETE_FORCE_CHK, 326 .error = -EINVAL, 327 .input = dst_discard, 328 .output = dst_discard_out, 329 }, 330 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 328}; 329 330static const struct rt6_info ip6_blk_hole_entry_template = { 331 .dst = { 332 .__refcnt = ATOMIC_INIT(1), 333 .__use = 1, 334 .obsolete = DST_OBSOLETE_FORCE_CHK, 335 .error = -EINVAL, 336 .input = dst_discard, 337 .output = dst_discard_out, 338 }, 339 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
340 .rt6i_protocol = RTPROT_KERNEL, 341 .rt6i_metric = ~(u32) 0, 342 .rt6i_ref = ATOMIC_INIT(1), |
|
331}; 332 333#endif 334 335static void rt6_info_init(struct rt6_info *rt) 336{ 337 struct dst_entry *dst = &rt->dst; 338 339 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); | 343}; 344 345#endif 346 347static void rt6_info_init(struct rt6_info *rt) 348{ 349 struct dst_entry *dst = &rt->dst; 350 351 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); |
352 INIT_LIST_HEAD(&rt->rt6i_siblings); |
|
340 INIT_LIST_HEAD(&rt->rt6i_uncached); 341} 342 343/* allocate dst with ip6_dst_ops */ | 353 INIT_LIST_HEAD(&rt->rt6i_uncached); 354} 355 356/* allocate dst with ip6_dst_ops */ |
344struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, 345 int flags) | 357static struct rt6_info *__ip6_dst_alloc(struct net *net, 358 struct net_device *dev, 359 int flags) |
346{ 347 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 348 1, DST_OBSOLETE_FORCE_CHK, flags); 349 350 if (rt) { 351 rt6_info_init(rt); 352 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc); 353 } 354 355 return rt; 356} | 360{ 361 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 362 1, DST_OBSOLETE_FORCE_CHK, flags); 363 364 if (rt) { 365 rt6_info_init(rt); 366 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc); 367 } 368 369 return rt; 370} |
371 372struct rt6_info *ip6_dst_alloc(struct net *net, 373 struct net_device *dev, 374 int flags) 375{ 376 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); 377 378 if (rt) { 379 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); 380 if (!rt->rt6i_pcpu) { 381 dst_release_immediate(&rt->dst); 382 return NULL; 383 } 384 } 385 386 return rt; 387} |
|
357EXPORT_SYMBOL(ip6_dst_alloc); 358 359static void ip6_dst_destroy(struct dst_entry *dst) 360{ 361 struct rt6_info *rt = (struct rt6_info *)dst; | 388EXPORT_SYMBOL(ip6_dst_alloc); 389 390static void ip6_dst_destroy(struct dst_entry *dst) 391{ 392 struct rt6_info *rt = (struct rt6_info *)dst; |
362 struct fib6_info *from; | 393 struct rt6_exception_bucket *bucket; 394 struct rt6_info *from = rt->from; |
363 struct inet6_dev *idev; 364 365 dst_destroy_metrics_generic(dst); | 395 struct inet6_dev *idev; 396 397 dst_destroy_metrics_generic(dst); |
398 free_percpu(rt->rt6i_pcpu); |
|
366 rt6_uncached_list_del(rt); 367 368 idev = rt->rt6i_idev; 369 if (idev) { 370 rt->rt6i_idev = NULL; 371 in6_dev_put(idev); 372 } | 399 rt6_uncached_list_del(rt); 400 401 idev = rt->rt6i_idev; 402 if (idev) { 403 rt->rt6i_idev = NULL; 404 in6_dev_put(idev); 405 } |
406 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1); 407 if (bucket) { 408 rt->rt6i_exception_bucket = NULL; 409 kfree(bucket); 410 } |
|
373 | 411 |
374 rcu_read_lock(); 375 from = rcu_dereference(rt->from); 376 rcu_assign_pointer(rt->from, NULL); 377 fib6_info_release(from); 378 rcu_read_unlock(); | 412 rt->from = NULL; 413 dst_release(&from->dst); |
379} 380 381static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 382 int how) 383{ 384 struct rt6_info *rt = (struct rt6_info *)dst; 385 struct inet6_dev *idev = rt->rt6i_idev; 386 struct net_device *loopback_dev = --- 13 unchanged lines hidden (view full) --- 400 if (rt->rt6i_flags & RTF_EXPIRES) 401 return time_after(jiffies, rt->dst.expires); 402 else 403 return false; 404} 405 406static bool rt6_check_expired(const struct rt6_info *rt) 407{ | 414} 415 416static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 417 int how) 418{ 419 struct rt6_info *rt = (struct rt6_info *)dst; 420 struct inet6_dev *idev = rt->rt6i_idev; 421 struct net_device *loopback_dev = --- 13 unchanged lines hidden (view full) --- 435 if (rt->rt6i_flags & RTF_EXPIRES) 436 return time_after(jiffies, rt->dst.expires); 437 else 438 return false; 439} 440 441static bool rt6_check_expired(const struct rt6_info *rt) 442{ |
408 struct fib6_info *from; 409 410 from = rcu_dereference(rt->from); 411 | |
412 if (rt->rt6i_flags & RTF_EXPIRES) { 413 if (time_after(jiffies, rt->dst.expires)) 414 return true; | 443 if (rt->rt6i_flags & RTF_EXPIRES) { 444 if (time_after(jiffies, rt->dst.expires)) 445 return true; |
415 } else if (from) { | 446 } else if (rt->from) { |
416 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK || | 447 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK || |
417 fib6_check_expired(from); | 448 rt6_check_expired(rt->from); |
418 } 419 return false; 420} 421 | 449 } 450 return false; 451} 452 |
422static struct fib6_info *rt6_multipath_select(const struct net *net, 423 struct fib6_info *match, | 453static struct rt6_info *rt6_multipath_select(const struct net *net, 454 struct rt6_info *match, |
424 struct flowi6 *fl6, int oif, 425 const struct sk_buff *skb, 426 int strict) 427{ | 455 struct flowi6 *fl6, int oif, 456 const struct sk_buff *skb, 457 int strict) 458{ |
428 struct fib6_info *sibling, *next_sibling; | 459 struct rt6_info *sibling, *next_sibling; |
429 430 /* We might have already computed the hash for ICMPv6 errors. In such 431 * case it will always be non-zero. Otherwise now is the time to do it. 432 */ 433 if (!fl6->mp_hash) 434 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL); 435 | 460 461 /* We might have already computed the hash for ICMPv6 errors. In such 462 * case it will always be non-zero. Otherwise now is the time to do it. 463 */ 464 if (!fl6->mp_hash) 465 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL); 466 |
436 if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound)) | 467 if (fl6->mp_hash <= atomic_read(&match->rt6i_nh_upper_bound)) |
437 return match; 438 | 468 return match; 469 |
439 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings, 440 fib6_siblings) { 441 int nh_upper_bound; 442 443 nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound); 444 if (fl6->mp_hash > nh_upper_bound) | 470 list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings, 471 rt6i_siblings) { 472 if (fl6->mp_hash > atomic_read(&sibling->rt6i_nh_upper_bound)) |
445 continue; 446 if (rt6_score_route(sibling, oif, strict) < 0) 447 break; 448 match = sibling; 449 break; 450 } 451 452 return match; 453} 454 455/* 456 * Route lookup. rcu_read_lock() should be held. 457 */ 458 | 473 continue; 474 if (rt6_score_route(sibling, oif, strict) < 0) 475 break; 476 match = sibling; 477 break; 478 } 479 480 return match; 481} 482 483/* 484 * Route lookup. rcu_read_lock() should be held. 485 */ 486 |
459static inline struct fib6_info *rt6_device_match(struct net *net, 460 struct fib6_info *rt, | 487static inline struct rt6_info *rt6_device_match(struct net *net, 488 struct rt6_info *rt, |
461 const struct in6_addr *saddr, 462 int oif, 463 int flags) 464{ | 489 const struct in6_addr *saddr, 490 int oif, 491 int flags) 492{ |
465 struct fib6_info *sprt; | 493 struct rt6_info *local = NULL; 494 struct rt6_info *sprt; |
466 | 495 |
467 if (!oif && ipv6_addr_any(saddr) && 468 !(rt->fib6_nh.nh_flags & RTNH_F_DEAD)) | 496 if (!oif && ipv6_addr_any(saddr) && !(rt->rt6i_nh_flags & RTNH_F_DEAD)) |
469 return rt; 470 471 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) { | 497 return rt; 498 499 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) { |
472 const struct net_device *dev = sprt->fib6_nh.nh_dev; | 500 struct net_device *dev = sprt->dst.dev; |
473 | 501 |
474 if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD) | 502 if (sprt->rt6i_nh_flags & RTNH_F_DEAD) |
475 continue; 476 477 if (oif) { 478 if (dev->ifindex == oif) 479 return sprt; | 503 continue; 504 505 if (oif) { 506 if (dev->ifindex == oif) 507 return sprt; |
508 if (dev->flags & IFF_LOOPBACK) { 509 if (!sprt->rt6i_idev || 510 sprt->rt6i_idev->dev->ifindex != oif) { 511 if (flags & RT6_LOOKUP_F_IFACE) 512 continue; 513 if (local && 514 local->rt6i_idev->dev->ifindex == oif) 515 continue; 516 } 517 local = sprt; 518 } |
|
480 } else { 481 if (ipv6_chk_addr(net, saddr, dev, 482 flags & RT6_LOOKUP_F_IFACE)) 483 return sprt; 484 } 485 } 486 | 519 } else { 520 if (ipv6_chk_addr(net, saddr, dev, 521 flags & RT6_LOOKUP_F_IFACE)) 522 return sprt; 523 } 524 } 525 |
487 if (oif && flags & RT6_LOOKUP_F_IFACE) 488 return net->ipv6.fib6_null_entry; | 526 if (oif) { 527 if (local) 528 return local; |
489 | 529 |
490 return rt->fib6_nh.nh_flags & RTNH_F_DEAD ? net->ipv6.fib6_null_entry : rt; | 530 if (flags & RT6_LOOKUP_F_IFACE) 531 return net->ipv6.ip6_null_entry; 532 } 533 534 return rt->rt6i_nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt; |
491} 492 493#ifdef CONFIG_IPV6_ROUTER_PREF 494struct __rt6_probe_work { 495 struct work_struct work; 496 struct in6_addr target; 497 struct net_device *dev; 498}; --- 5 unchanged lines hidden (view full) --- 504 container_of(w, struct __rt6_probe_work, work); 505 506 addrconf_addr_solict_mult(&work->target, &mcaddr); 507 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); 508 dev_put(work->dev); 509 kfree(work); 510} 511 | 535} 536 537#ifdef CONFIG_IPV6_ROUTER_PREF 538struct __rt6_probe_work { 539 struct work_struct work; 540 struct in6_addr target; 541 struct net_device *dev; 542}; --- 5 unchanged lines hidden (view full) --- 548 container_of(w, struct __rt6_probe_work, work); 549 550 addrconf_addr_solict_mult(&work->target, &mcaddr); 551 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); 552 dev_put(work->dev); 553 kfree(work); 554} 555 |
512static void rt6_probe(struct fib6_info *rt) | 556static void rt6_probe(struct rt6_info *rt) |
513{ 514 struct __rt6_probe_work *work; | 557{ 558 struct __rt6_probe_work *work; |
515 const struct in6_addr *nh_gw; | |
516 struct neighbour *neigh; | 559 struct neighbour *neigh; |
517 struct net_device *dev; 518 | |
519 /* 520 * Okay, this does not seem to be appropriate 521 * for now, however, we need to check if it 522 * is really so; aka Router Reachability Probing. 523 * 524 * Router Reachability Probe MUST be rate-limited 525 * to no more than one per minute. 526 */ | 560 /* 561 * Okay, this does not seem to be appropriate 562 * for now, however, we need to check if it 563 * is really so; aka Router Reachability Probing. 564 * 565 * Router Reachability Probe MUST be rate-limited 566 * to no more than one per minute. 567 */ |
527 if (!rt || !(rt->fib6_flags & RTF_GATEWAY)) | 568 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) |
528 return; | 569 return; |
529 530 nh_gw = &rt->fib6_nh.nh_gw; 531 dev = rt->fib6_nh.nh_dev; | |
532 rcu_read_lock_bh(); | 570 rcu_read_lock_bh(); |
533 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); | 571 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); |
534 if (neigh) { | 572 if (neigh) { |
535 struct inet6_dev *idev; 536 | |
537 if (neigh->nud_state & NUD_VALID) 538 goto out; 539 | 573 if (neigh->nud_state & NUD_VALID) 574 goto out; 575 |
540 idev = __in6_dev_get(dev); | |
541 work = NULL; 542 write_lock(&neigh->lock); 543 if (!(neigh->nud_state & NUD_VALID) && 544 time_after(jiffies, | 576 work = NULL; 577 write_lock(&neigh->lock); 578 if (!(neigh->nud_state & NUD_VALID) && 579 time_after(jiffies, |
545 neigh->updated + idev->cnf.rtr_probe_interval)) { | 580 neigh->updated + 581 rt->rt6i_idev->cnf.rtr_probe_interval)) { |
546 work = kmalloc(sizeof(*work), GFP_ATOMIC); 547 if (work) 548 __neigh_set_probe_once(neigh); 549 } 550 write_unlock(&neigh->lock); 551 } else { 552 work = kmalloc(sizeof(*work), GFP_ATOMIC); 553 } 554 555 if (work) { 556 INIT_WORK(&work->work, rt6_probe_deferred); | 582 work = kmalloc(sizeof(*work), GFP_ATOMIC); 583 if (work) 584 __neigh_set_probe_once(neigh); 585 } 586 write_unlock(&neigh->lock); 587 } else { 588 work = kmalloc(sizeof(*work), GFP_ATOMIC); 589 } 590 591 if (work) { 592 INIT_WORK(&work->work, rt6_probe_deferred); |
557 work->target = *nh_gw; 558 dev_hold(dev); 559 work->dev = dev; | 593 work->target = rt->rt6i_gateway; 594 dev_hold(rt->dst.dev); 595 work->dev = rt->dst.dev; |
560 schedule_work(&work->work); 561 } 562 563out: 564 rcu_read_unlock_bh(); 565} 566#else | 596 schedule_work(&work->work); 597 } 598 599out: 600 rcu_read_unlock_bh(); 601} 602#else |
567static inline void rt6_probe(struct fib6_info *rt) | 603static inline void rt6_probe(struct rt6_info *rt) |
568{ 569} 570#endif 571 572/* 573 * Default Router Selection (RFC 2461 6.3.6) 574 */ | 604{ 605} 606#endif 607 608/* 609 * Default Router Selection (RFC 2461 6.3.6) 610 */ |
575static inline int rt6_check_dev(struct fib6_info *rt, int oif) | 611static inline int rt6_check_dev(struct rt6_info *rt, int oif) |
576{ | 612{ |
577 const struct net_device *dev = rt->fib6_nh.nh_dev; 578 | 613 struct net_device *dev = rt->dst.dev; |
579 if (!oif || dev->ifindex == oif) 580 return 2; | 614 if (!oif || dev->ifindex == oif) 615 return 2; |
616 if ((dev->flags & IFF_LOOPBACK) && 617 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif) 618 return 1; |
|
581 return 0; 582} 583 | 619 return 0; 620} 621 |
584static inline enum rt6_nud_state rt6_check_neigh(struct fib6_info *rt) | 622static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt) |
585{ | 623{ |
586 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; | |
587 struct neighbour *neigh; | 624 struct neighbour *neigh; |
625 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; |
|
588 | 626 |
589 if (rt->fib6_flags & RTF_NONEXTHOP || 590 !(rt->fib6_flags & RTF_GATEWAY)) | 627 if (rt->rt6i_flags & RTF_NONEXTHOP || 628 !(rt->rt6i_flags & RTF_GATEWAY)) |
591 return RT6_NUD_SUCCEED; 592 593 rcu_read_lock_bh(); | 629 return RT6_NUD_SUCCEED; 630 631 rcu_read_lock_bh(); |
594 neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.nh_dev, 595 &rt->fib6_nh.nh_gw); | 632 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); |
596 if (neigh) { 597 read_lock(&neigh->lock); 598 if (neigh->nud_state & NUD_VALID) 599 ret = RT6_NUD_SUCCEED; 600#ifdef CONFIG_IPV6_ROUTER_PREF 601 else if (!(neigh->nud_state & NUD_FAILED)) 602 ret = RT6_NUD_SUCCEED; 603 else --- 4 unchanged lines hidden (view full) --- 608 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ? 609 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR; 610 } 611 rcu_read_unlock_bh(); 612 613 return ret; 614} 615 | 633 if (neigh) { 634 read_lock(&neigh->lock); 635 if (neigh->nud_state & NUD_VALID) 636 ret = RT6_NUD_SUCCEED; 637#ifdef CONFIG_IPV6_ROUTER_PREF 638 else if (!(neigh->nud_state & NUD_FAILED)) 639 ret = RT6_NUD_SUCCEED; 640 else --- 4 unchanged lines hidden (view full) --- 645 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ? 646 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR; 647 } 648 rcu_read_unlock_bh(); 649 650 return ret; 651} 652 |
616static int rt6_score_route(struct fib6_info *rt, int oif, int strict) | 653static int rt6_score_route(struct rt6_info *rt, int oif, 654 int strict) |
617{ 618 int m; 619 620 m = rt6_check_dev(rt, oif); 621 if (!m && (strict & RT6_LOOKUP_F_IFACE)) 622 return RT6_NUD_FAIL_HARD; 623#ifdef CONFIG_IPV6_ROUTER_PREF | 655{ 656 int m; 657 658 m = rt6_check_dev(rt, oif); 659 if (!m && (strict & RT6_LOOKUP_F_IFACE)) 660 return RT6_NUD_FAIL_HARD; 661#ifdef CONFIG_IPV6_ROUTER_PREF |
624 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->fib6_flags)) << 2; | 662 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; |
625#endif 626 if (strict & RT6_LOOKUP_F_REACHABLE) { 627 int n = rt6_check_neigh(rt); 628 if (n < 0) 629 return n; 630 } 631 return m; 632} 633 | 663#endif 664 if (strict & RT6_LOOKUP_F_REACHABLE) { 665 int n = rt6_check_neigh(rt); 666 if (n < 0) 667 return n; 668 } 669 return m; 670} 671 |
634/* called with rc_read_lock held */ 635static inline bool fib6_ignore_linkdown(const struct fib6_info *f6i) 636{ 637 const struct net_device *dev = fib6_info_nh_dev(f6i); 638 bool rc = false; 639 640 if (dev) { 641 const struct inet6_dev *idev = __in6_dev_get(dev); 642 643 rc = !!idev->cnf.ignore_routes_with_linkdown; 644 } 645 646 return rc; 647} 648 649static struct fib6_info *find_match(struct fib6_info *rt, int oif, int strict, 650 int *mpri, struct fib6_info *match, | 672static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, 673 int *mpri, struct rt6_info *match, |
651 bool *do_rr) 652{ 653 int m; 654 bool match_do_rr = false; | 674 bool *do_rr) 675{ 676 int m; 677 bool match_do_rr = false; |
678 struct inet6_dev *idev = rt->rt6i_idev; |
|
655 | 679 |
656 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD) | 680 if (rt->rt6i_nh_flags & RTNH_F_DEAD) |
657 goto out; 658 | 681 goto out; 682 |
659 if (fib6_ignore_linkdown(rt) && 660 rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN && | 683 if (idev->cnf.ignore_routes_with_linkdown && 684 rt->rt6i_nh_flags & RTNH_F_LINKDOWN && |
661 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) 662 goto out; 663 | 685 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) 686 goto out; 687 |
664 if (fib6_check_expired(rt)) | 688 if (rt6_check_expired(rt)) |
665 goto out; 666 667 m = rt6_score_route(rt, oif, strict); 668 if (m == RT6_NUD_FAIL_DO_RR) { 669 match_do_rr = true; 670 m = 0; /* lowest valid score */ 671 } else if (m == RT6_NUD_FAIL_HARD) { 672 goto out; --- 7 unchanged lines hidden (view full) --- 680 *do_rr = match_do_rr; 681 *mpri = m; 682 match = rt; 683 } 684out: 685 return match; 686} 687 | 689 goto out; 690 691 m = rt6_score_route(rt, oif, strict); 692 if (m == RT6_NUD_FAIL_DO_RR) { 693 match_do_rr = true; 694 m = 0; /* lowest valid score */ 695 } else if (m == RT6_NUD_FAIL_HARD) { 696 goto out; --- 7 unchanged lines hidden (view full) --- 704 *do_rr = match_do_rr; 705 *mpri = m; 706 match = rt; 707 } 708out: 709 return match; 710} 711 |
688static struct fib6_info *find_rr_leaf(struct fib6_node *fn, 689 struct fib6_info *leaf, 690 struct fib6_info *rr_head, | 712static struct rt6_info *find_rr_leaf(struct fib6_node *fn, 713 struct rt6_info *leaf, 714 struct rt6_info *rr_head, |
691 u32 metric, int oif, int strict, 692 bool *do_rr) 693{ | 715 u32 metric, int oif, int strict, 716 bool *do_rr) 717{ |
694 struct fib6_info *rt, *match, *cont; | 718 struct rt6_info *rt, *match, *cont; |
695 int mpri = -1; 696 697 match = NULL; 698 cont = NULL; 699 for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) { | 719 int mpri = -1; 720 721 match = NULL; 722 cont = NULL; 723 for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) { |
700 if (rt->fib6_metric != metric) { | 724 if (rt->rt6i_metric != metric) { |
701 cont = rt; 702 break; 703 } 704 705 match = find_match(rt, oif, strict, &mpri, match, do_rr); 706 } 707 708 for (rt = leaf; rt && rt != rr_head; 709 rt = rcu_dereference(rt->rt6_next)) { | 725 cont = rt; 726 break; 727 } 728 729 match = find_match(rt, oif, strict, &mpri, match, do_rr); 730 } 731 732 for (rt = leaf; rt && rt != rr_head; 733 rt = rcu_dereference(rt->rt6_next)) { |
710 if (rt->fib6_metric != metric) { | 734 if (rt->rt6i_metric != metric) { |
711 cont = rt; 712 break; 713 } 714 715 match = find_match(rt, oif, strict, &mpri, match, do_rr); 716 } 717 718 if (match || !cont) 719 return match; 720 721 for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next)) 722 match = find_match(rt, oif, strict, &mpri, match, do_rr); 723 724 return match; 725} 726 | 735 cont = rt; 736 break; 737 } 738 739 match = find_match(rt, oif, strict, &mpri, match, do_rr); 740 } 741 742 if (match || !cont) 743 return match; 744 745 for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next)) 746 match = find_match(rt, oif, strict, &mpri, match, do_rr); 747 748 return match; 749} 750 |
727static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn, | 751static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn, |
728 int oif, int strict) 729{ | 752 int oif, int strict) 753{ |
730 struct fib6_info *leaf = rcu_dereference(fn->leaf); 731 struct fib6_info *match, *rt0; | 754 struct rt6_info *leaf = rcu_dereference(fn->leaf); 755 struct rt6_info *match, *rt0; |
732 bool do_rr = false; 733 int key_plen; 734 | 756 bool do_rr = false; 757 int key_plen; 758 |
735 if (!leaf || leaf == net->ipv6.fib6_null_entry) 736 return net->ipv6.fib6_null_entry; | 759 if (!leaf || leaf == net->ipv6.ip6_null_entry) 760 return net->ipv6.ip6_null_entry; |
737 738 rt0 = rcu_dereference(fn->rr_ptr); 739 if (!rt0) 740 rt0 = leaf; 741 742 /* Double check to make sure fn is not an intermediate node 743 * and fn->leaf does not points to its child's leaf 744 * (This might happen if all routes under fn are deleted from 745 * the tree and fib6_repair_tree() is called on the node.) 746 */ | 761 762 rt0 = rcu_dereference(fn->rr_ptr); 763 if (!rt0) 764 rt0 = leaf; 765 766 /* Double check to make sure fn is not an intermediate node 767 * and fn->leaf does not points to its child's leaf 768 * (This might happen if all routes under fn are deleted from 769 * the tree and fib6_repair_tree() is called on the node.) 770 */ |
747 key_plen = rt0->fib6_dst.plen; | 771 key_plen = rt0->rt6i_dst.plen; |
748#ifdef CONFIG_IPV6_SUBTREES | 772#ifdef CONFIG_IPV6_SUBTREES |
749 if (rt0->fib6_src.plen) 750 key_plen = rt0->fib6_src.plen; | 773 if (rt0->rt6i_src.plen) 774 key_plen = rt0->rt6i_src.plen; |
751#endif 752 if (fn->fn_bit != key_plen) | 775#endif 776 if (fn->fn_bit != key_plen) |
753 return net->ipv6.fib6_null_entry; | 777 return net->ipv6.ip6_null_entry; |
754 | 778 |
755 match = find_rr_leaf(fn, leaf, rt0, rt0->fib6_metric, oif, strict, | 779 match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict, |
756 &do_rr); 757 758 if (do_rr) { | 780 &do_rr); 781 782 if (do_rr) { |
759 struct fib6_info *next = rcu_dereference(rt0->rt6_next); | 783 struct rt6_info *next = rcu_dereference(rt0->rt6_next); |
760 761 /* no entries matched; do round-robin */ | 784 785 /* no entries matched; do round-robin */ |
762 if (!next || next->fib6_metric != rt0->fib6_metric) | 786 if (!next || next->rt6i_metric != rt0->rt6i_metric) |
763 next = leaf; 764 765 if (next != rt0) { | 787 next = leaf; 788 789 if (next != rt0) { |
766 spin_lock_bh(&leaf->fib6_table->tb6_lock); | 790 spin_lock_bh(&leaf->rt6i_table->tb6_lock); |
767 /* make sure next is not being deleted from the tree */ | 791 /* make sure next is not being deleted from the tree */ |
768 if (next->fib6_node) | 792 if (next->rt6i_node) |
769 rcu_assign_pointer(fn->rr_ptr, next); | 793 rcu_assign_pointer(fn->rr_ptr, next); |
770 spin_unlock_bh(&leaf->fib6_table->tb6_lock); | 794 spin_unlock_bh(&leaf->rt6i_table->tb6_lock); |
771 } 772 } 773 | 795 } 796 } 797 |
774 return match ? match : net->ipv6.fib6_null_entry; | 798 return match ? match : net->ipv6.ip6_null_entry; |
775} 776 | 799} 800 |
777static bool rt6_is_gw_or_nonexthop(const struct fib6_info *rt) | 801static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt) |
778{ | 802{ |
779 return (rt->fib6_flags & (RTF_NONEXTHOP | RTF_GATEWAY)); | 803 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)); |
780} 781 782#ifdef CONFIG_IPV6_ROUTE_INFO 783int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 784 const struct in6_addr *gwaddr) 785{ 786 struct net *net = dev_net(dev); 787 struct route_info *rinfo = (struct route_info *) opt; 788 struct in6_addr prefix_buf, *prefix; 789 unsigned int pref; 790 unsigned long lifetime; | 804} 805 806#ifdef CONFIG_IPV6_ROUTE_INFO 807int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 808 const struct in6_addr *gwaddr) 809{ 810 struct net *net = dev_net(dev); 811 struct route_info *rinfo = (struct route_info *) opt; 812 struct in6_addr prefix_buf, *prefix; 813 unsigned int pref; 814 unsigned long lifetime; |
791 struct fib6_info *rt; | 815 struct rt6_info *rt; |
792 793 if (len < sizeof(struct route_info)) { 794 return -EINVAL; 795 } 796 797 /* Sanity check for prefix_len and length */ 798 if (rinfo->length > 3) { 799 return -EINVAL; --- 21 unchanged lines hidden (view full) --- 821 /* this function is safe */ 822 ipv6_addr_prefix(&prefix_buf, 823 (struct in6_addr *)rinfo->prefix, 824 rinfo->prefix_len); 825 prefix = &prefix_buf; 826 } 827 828 if (rinfo->prefix_len == 0) | 816 817 if (len < sizeof(struct route_info)) { 818 return -EINVAL; 819 } 820 821 /* Sanity check for prefix_len and length */ 822 if (rinfo->length > 3) { 823 return -EINVAL; --- 21 unchanged lines hidden (view full) --- 845 /* this function is safe */ 846 ipv6_addr_prefix(&prefix_buf, 847 (struct in6_addr *)rinfo->prefix, 848 rinfo->prefix_len); 849 prefix = &prefix_buf; 850 } 851 852 if (rinfo->prefix_len == 0) |
829 rt = rt6_get_dflt_router(net, gwaddr, dev); | 853 rt = rt6_get_dflt_router(gwaddr, dev); |
830 else 831 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, 832 gwaddr, dev); 833 834 if (rt && !lifetime) { | 854 else 855 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, 856 gwaddr, dev); 857 858 if (rt && !lifetime) { |
835 ip6_del_rt(net, rt); | 859 ip6_del_rt(rt); |
836 rt = NULL; 837 } 838 839 if (!rt && lifetime) 840 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, 841 dev, pref); 842 else if (rt) | 860 rt = NULL; 861 } 862 863 if (!rt && lifetime) 864 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, 865 dev, pref); 866 else if (rt) |
843 rt->fib6_flags = RTF_ROUTEINFO | 844 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); | 867 rt->rt6i_flags = RTF_ROUTEINFO | 868 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); |
845 846 if (rt) { 847 if (!addrconf_finite_timeout(lifetime)) | 869 870 if (rt) { 871 if (!addrconf_finite_timeout(lifetime)) |
848 fib6_clean_expires(rt); | 872 rt6_clean_expires(rt); |
849 else | 873 else |
850 fib6_set_expires(rt, jiffies + HZ * lifetime); | 874 rt6_set_expires(rt, jiffies + HZ * lifetime); |
851 | 875 |
852 fib6_info_release(rt); | 876 ip6_rt_put(rt); |
853 } 854 return 0; 855} 856#endif 857 | 877 } 878 return 0; 879} 880#endif 881 |
858/* 859 * Misc support functions 860 */ 861 862/* called with rcu_lock held */ 863static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt) 864{ 865 struct net_device *dev = rt->fib6_nh.nh_dev; 866 867 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) { 868 /* for copies of local routes, dst->dev needs to be the 869 * device if it is a master device, the master device if 870 * device is enslaved, and the loopback as the default 871 */ 872 if (netif_is_l3_slave(dev) && 873 !rt6_need_strict(&rt->fib6_dst.addr)) 874 dev = l3mdev_master_dev_rcu(dev); 875 else if (!netif_is_l3_master(dev)) 876 dev = dev_net(dev)->loopback_dev; 877 /* last case is netif_is_l3_master(dev) is true in which 878 * case we want dev returned to be dev 879 */ 880 } 881 882 return dev; 883} 884 885static const int fib6_prop[RTN_MAX + 1] = { 886 [RTN_UNSPEC] = 0, 887 [RTN_UNICAST] = 0, 888 [RTN_LOCAL] = 0, 889 [RTN_BROADCAST] = 0, 890 [RTN_ANYCAST] = 0, 891 [RTN_MULTICAST] = 0, 892 [RTN_BLACKHOLE] = -EINVAL, 893 [RTN_UNREACHABLE] = -EHOSTUNREACH, 894 [RTN_PROHIBIT] = -EACCES, 895 [RTN_THROW] = -EAGAIN, 896 [RTN_NAT] = -EINVAL, 897 [RTN_XRESOLVE] = -EINVAL, 898}; 899 900static int ip6_rt_type_to_error(u8 fib6_type) 901{ 902 return fib6_prop[fib6_type]; 903} 904 905static unsigned short fib6_info_dst_flags(struct fib6_info *rt) 906{ 907 unsigned short flags = 0; 908 909 if (rt->dst_nocount) 910 flags |= DST_NOCOUNT; 911 if (rt->dst_nopolicy) 912 flags |= DST_NOPOLICY; 913 if (rt->dst_host) 914 flags |= DST_HOST; 915 916 return flags; 917} 918 919static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort) 920{ 921 rt->dst.error = ip6_rt_type_to_error(ort->fib6_type); 922 923 switch (ort->fib6_type) { 924 case RTN_BLACKHOLE: 925 rt->dst.output = dst_discard_out; 926 rt->dst.input = dst_discard; 927 break; 928 case RTN_PROHIBIT: 929 rt->dst.output = ip6_pkt_prohibit_out; 930 rt->dst.input = ip6_pkt_prohibit; 931 break; 932 case RTN_THROW: 933 case RTN_UNREACHABLE: 934 default: 935 rt->dst.output = ip6_pkt_discard_out; 936 rt->dst.input = ip6_pkt_discard; 937 break; 938 } 939} 940 941static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) 942{ 943 rt->dst.flags |= fib6_info_dst_flags(ort); 944 945 if (ort->fib6_flags & RTF_REJECT) { 946 ip6_rt_init_dst_reject(rt, ort); 947 return; 948 } 949 950 rt->dst.error = 0; 951 rt->dst.output = ip6_output; 952 953 if (ort->fib6_type == RTN_LOCAL) { 954 rt->dst.input = ip6_input; 955 } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { 956 rt->dst.input = ip6_mc_input; 957 } else { 958 rt->dst.input = ip6_forward; 959 } 960 961 if (ort->fib6_nh.nh_lwtstate) { 962 rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); 963 lwtunnel_set_redirect(&rt->dst); 964 } 965 966 rt->dst.lastuse = jiffies; 967} 968 969static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) 970{ 971 rt->rt6i_flags &= ~RTF_EXPIRES; 972 fib6_info_hold(from); 973 rcu_assign_pointer(rt->from, from); 974 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); 975 if (from->fib6_metrics != &dst_default_metrics) { 976 rt->dst._metrics |= DST_METRICS_REFCOUNTED; 977 refcount_inc(&from->fib6_metrics->refcnt); 978 } 979} 980 981static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) 982{ 983 struct net_device *dev = fib6_info_nh_dev(ort); 984 985 ip6_rt_init_dst(rt, ort); 986 987 rt->rt6i_dst = ort->fib6_dst; 988 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL; 989 rt->rt6i_gateway = ort->fib6_nh.nh_gw; 990 rt->rt6i_flags = ort->fib6_flags; 991 rt6_set_from(rt, ort); 992#ifdef CONFIG_IPV6_SUBTREES 993 rt->rt6i_src = ort->fib6_src; 994#endif 995 rt->rt6i_prefsrc = ort->fib6_prefsrc; 996 rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); 997} 998 | |
999static struct fib6_node* fib6_backtrack(struct fib6_node *fn, 1000 struct in6_addr *saddr) 1001{ 1002 struct fib6_node *pn, *sn; 1003 while (1) { 1004 if (fn->fn_flags & RTN_TL_ROOT) 1005 return NULL; 1006 pn = rcu_dereference(fn->parent); --- 19 unchanged lines hidden (view full) --- 1026 dst_hold(&rt->dst); 1027 } else { 1028 rt = NULL; 1029 } 1030 *prt = rt; 1031 return false; 1032} 1033 | 882static struct fib6_node* fib6_backtrack(struct fib6_node *fn, 883 struct in6_addr *saddr) 884{ 885 struct fib6_node *pn, *sn; 886 while (1) { 887 if (fn->fn_flags & RTN_TL_ROOT) 888 return NULL; 889 pn = rcu_dereference(fn->parent); --- 19 unchanged lines hidden (view full) --- 909 dst_hold(&rt->dst); 910 } else { 911 rt = NULL; 912 } 913 *prt = rt; 914 return false; 915} 916 |
1034/* called with rcu_lock held */ 1035static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt) 1036{ 1037 unsigned short flags = fib6_info_dst_flags(rt); 1038 struct net_device *dev = rt->fib6_nh.nh_dev; 1039 struct rt6_info *nrt; 1040 1041 nrt = ip6_dst_alloc(dev_net(dev), dev, flags); 1042 if (nrt) 1043 ip6_rt_copy_init(nrt, rt); 1044 1045 return nrt; 1046} 1047 | |
1048static struct rt6_info *ip6_pol_route_lookup(struct net *net, 1049 struct fib6_table *table, 1050 struct flowi6 *fl6, 1051 const struct sk_buff *skb, 1052 int flags) 1053{ | 917static struct rt6_info *ip6_pol_route_lookup(struct net *net, 918 struct fib6_table *table, 919 struct flowi6 *fl6, 920 const struct sk_buff *skb, 921 int flags) 922{ |
1054 struct fib6_info *f6i; | 923 struct rt6_info *rt, *rt_cache; |
1055 struct fib6_node *fn; | 924 struct fib6_node *fn; |
1056 struct rt6_info *rt; | |
1057 1058 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) 1059 flags &= ~RT6_LOOKUP_F_IFACE; 1060 1061 rcu_read_lock(); 1062 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 1063restart: | 925 926 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) 927 flags &= ~RT6_LOOKUP_F_IFACE; 928 929 rcu_read_lock(); 930 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 931restart: |
1064 f6i = rcu_dereference(fn->leaf); 1065 if (!f6i) { 1066 f6i = net->ipv6.fib6_null_entry; | 932 rt = rcu_dereference(fn->leaf); 933 if (!rt) { 934 rt = net->ipv6.ip6_null_entry; |
1067 } else { | 935 } else { |
1068 f6i = rt6_device_match(net, f6i, &fl6->saddr, | 936 rt = rt6_device_match(net, rt, &fl6->saddr, |
1069 fl6->flowi6_oif, flags); | 937 fl6->flowi6_oif, flags); |
1070 if (f6i->fib6_nsiblings && fl6->flowi6_oif == 0) 1071 f6i = rt6_multipath_select(net, f6i, fl6, 1072 fl6->flowi6_oif, skb, flags); | 938 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) 939 rt = rt6_multipath_select(net, rt, fl6, fl6->flowi6_oif, 940 skb, flags); |
1073 } | 941 } |
1074 if (f6i == net->ipv6.fib6_null_entry) { | 942 if (rt == net->ipv6.ip6_null_entry) { |
1075 fn = fib6_backtrack(fn, &fl6->saddr); 1076 if (fn) 1077 goto restart; 1078 } | 943 fn = fib6_backtrack(fn, &fl6->saddr); 944 if (fn) 945 goto restart; 946 } |
1079 | |
1080 /* Search through exception table */ | 947 /* Search through exception table */ |
1081 rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr); 1082 if (rt) { 1083 if (ip6_hold_safe(net, &rt, true)) 1084 dst_use_noref(&rt->dst, jiffies); 1085 } else if (f6i == net->ipv6.fib6_null_entry) { 1086 rt = net->ipv6.ip6_null_entry; 1087 dst_hold(&rt->dst); 1088 } else { 1089 rt = ip6_create_rt_rcu(f6i); 1090 if (!rt) { 1091 rt = net->ipv6.ip6_null_entry; 1092 dst_hold(&rt->dst); 1093 } 1094 } | 948 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr); 949 if (rt_cache) 950 rt = rt_cache; |
1095 | 951 |
952 if (ip6_hold_safe(net, &rt, true)) 953 dst_use_noref(&rt->dst, jiffies); 954 |
|
1096 rcu_read_unlock(); 1097 1098 trace_fib6_table_lookup(net, rt, table, fl6); 1099 1100 return rt; | 955 rcu_read_unlock(); 956 957 trace_fib6_table_lookup(net, rt, table, fl6); 958 959 return rt; |
960 |
|
1101} 1102 1103struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, 1104 const struct sk_buff *skb, int flags) 1105{ 1106 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup); 1107} 1108EXPORT_SYMBOL_GPL(ip6_route_lookup); --- 25 unchanged lines hidden (view full) --- 1134EXPORT_SYMBOL(rt6_lookup); 1135 1136/* ip6_ins_rt is called with FREE table->tb6_lock. 1137 * It takes new route entry, the addition fails by any reason the 1138 * route is released. 1139 * Caller must hold dst before calling it. 1140 */ 1141 | 961} 962 963struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, 964 const struct sk_buff *skb, int flags) 965{ 966 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup); 967} 968EXPORT_SYMBOL_GPL(ip6_route_lookup); --- 25 unchanged lines hidden (view full) --- 994EXPORT_SYMBOL(rt6_lookup); 995 996/* ip6_ins_rt is called with FREE table->tb6_lock. 997 * It takes new route entry, the addition fails by any reason the 998 * route is released. 999 * Caller must hold dst before calling it. 1000 */ 1001 |
1142static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info, | 1002static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, 1003 struct mx6_config *mxc, |
1143 struct netlink_ext_ack *extack) 1144{ 1145 int err; 1146 struct fib6_table *table; 1147 | 1004 struct netlink_ext_ack *extack) 1005{ 1006 int err; 1007 struct fib6_table *table; 1008 |
1148 table = rt->fib6_table; | 1009 table = rt->rt6i_table; |
1149 spin_lock_bh(&table->tb6_lock); | 1010 spin_lock_bh(&table->tb6_lock); |
1150 err = fib6_add(&table->tb6_root, rt, info, extack); | 1011 err = fib6_add(&table->tb6_root, rt, info, mxc, extack); |
1151 spin_unlock_bh(&table->tb6_lock); 1152 1153 return err; 1154} 1155 | 1012 spin_unlock_bh(&table->tb6_lock); 1013 1014 return err; 1015} 1016 |
1156int ip6_ins_rt(struct net *net, struct fib6_info *rt) | 1017int ip6_ins_rt(struct rt6_info *rt) |
1157{ | 1018{ |
1158 struct nl_info info = { .nl_net = net, }; | 1019 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; 1020 struct mx6_config mxc = { .mx = NULL, }; |
1159 | 1021 |
1160 return __ip6_ins_rt(rt, &info, NULL); | 1022 /* Hold dst to account for the reference from the fib6 tree */ 1023 dst_hold(&rt->dst); 1024 return __ip6_ins_rt(rt, &info, &mxc, NULL); |
1161} 1162 | 1025} 1026 |
1163static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort, | 1027/* called with rcu_lock held */ 1028static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt) 1029{ 1030 struct net_device *dev = rt->dst.dev; 1031 1032 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) { 1033 /* for copies of local routes, dst->dev needs to be the 1034 * device if it is a master device, the master device if 1035 * device is enslaved, and the loopback as the default 1036 */ 1037 if (netif_is_l3_slave(dev) && 1038 !rt6_need_strict(&rt->rt6i_dst.addr)) 1039 dev = l3mdev_master_dev_rcu(dev); 1040 else if (!netif_is_l3_master(dev)) 1041 dev = dev_net(dev)->loopback_dev; 1042 /* last case is netif_is_l3_master(dev) is true in which 1043 * case we want dev returned to be dev 1044 */ 1045 } 1046 1047 return dev; 1048} 1049 1050static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, |
1164 const struct in6_addr *daddr, 1165 const struct in6_addr *saddr) 1166{ 1167 struct net_device *dev; 1168 struct rt6_info *rt; 1169 1170 /* 1171 * Clone the route. 1172 */ 1173 | 1051 const struct in6_addr *daddr, 1052 const struct in6_addr *saddr) 1053{ 1054 struct net_device *dev; 1055 struct rt6_info *rt; 1056 1057 /* 1058 * Clone the route. 1059 */ 1060 |
1061 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) 1062 ort = ort->from; 1063 1064 rcu_read_lock(); |
|
1174 dev = ip6_rt_get_dev_rcu(ort); | 1065 dev = ip6_rt_get_dev_rcu(ort); |
1175 rt = ip6_dst_alloc(dev_net(dev), dev, 0); | 1066 rt = __ip6_dst_alloc(dev_net(dev), dev, 0); 1067 rcu_read_unlock(); |
1176 if (!rt) 1177 return NULL; 1178 1179 ip6_rt_copy_init(rt, ort); 1180 rt->rt6i_flags |= RTF_CACHE; | 1068 if (!rt) 1069 return NULL; 1070 1071 ip6_rt_copy_init(rt, ort); 1072 rt->rt6i_flags |= RTF_CACHE; |
1073 rt->rt6i_metric = 0; |
|
1181 rt->dst.flags |= DST_HOST; 1182 rt->rt6i_dst.addr = *daddr; 1183 rt->rt6i_dst.plen = 128; 1184 1185 if (!rt6_is_gw_or_nonexthop(ort)) { | 1074 rt->dst.flags |= DST_HOST; 1075 rt->rt6i_dst.addr = *daddr; 1076 rt->rt6i_dst.plen = 128; 1077 1078 if (!rt6_is_gw_or_nonexthop(ort)) { |
1186 if (ort->fib6_dst.plen != 128 && 1187 ipv6_addr_equal(&ort->fib6_dst.addr, daddr)) | 1079 if (ort->rt6i_dst.plen != 128 && 1080 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) |
1188 rt->rt6i_flags |= RTF_ANYCAST; 1189#ifdef CONFIG_IPV6_SUBTREES 1190 if (rt->rt6i_src.plen && saddr) { 1191 rt->rt6i_src.addr = *saddr; 1192 rt->rt6i_src.plen = 128; 1193 } 1194#endif 1195 } 1196 1197 return rt; 1198} 1199 | 1081 rt->rt6i_flags |= RTF_ANYCAST; 1082#ifdef CONFIG_IPV6_SUBTREES 1083 if (rt->rt6i_src.plen && saddr) { 1084 rt->rt6i_src.addr = *saddr; 1085 rt->rt6i_src.plen = 128; 1086 } 1087#endif 1088 } 1089 1090 return rt; 1091} 1092 |
1200static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt) | 1093static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) |
1201{ | 1094{ |
1202 unsigned short flags = fib6_info_dst_flags(rt); | |
1203 struct net_device *dev; 1204 struct rt6_info *pcpu_rt; 1205 1206 rcu_read_lock(); 1207 dev = ip6_rt_get_dev_rcu(rt); | 1095 struct net_device *dev; 1096 struct rt6_info *pcpu_rt; 1097 1098 rcu_read_lock(); 1099 dev = ip6_rt_get_dev_rcu(rt); |
1208 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags); | 1100 pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags); |
1209 rcu_read_unlock(); 1210 if (!pcpu_rt) 1211 return NULL; 1212 ip6_rt_copy_init(pcpu_rt, rt); | 1101 rcu_read_unlock(); 1102 if (!pcpu_rt) 1103 return NULL; 1104 ip6_rt_copy_init(pcpu_rt, rt); |
1105 pcpu_rt->rt6i_protocol = rt->rt6i_protocol; |
|
1213 pcpu_rt->rt6i_flags |= RTF_PCPU; 1214 return pcpu_rt; 1215} 1216 1217/* It should be called with rcu_read_lock() acquired */ | 1106 pcpu_rt->rt6i_flags |= RTF_PCPU; 1107 return pcpu_rt; 1108} 1109 1110/* It should be called with rcu_read_lock() acquired */ |
1218static struct rt6_info *rt6_get_pcpu_route(struct fib6_info *rt) | 1111static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) |
1219{ 1220 struct rt6_info *pcpu_rt, **p; 1221 1222 p = this_cpu_ptr(rt->rt6i_pcpu); 1223 pcpu_rt = *p; 1224 | 1112{ 1113 struct rt6_info *pcpu_rt, **p; 1114 1115 p = this_cpu_ptr(rt->rt6i_pcpu); 1116 pcpu_rt = *p; 1117 |
1225 if (pcpu_rt) 1226 ip6_hold_safe(NULL, &pcpu_rt, false); | 1118 if (pcpu_rt && ip6_hold_safe(NULL, &pcpu_rt, false)) 1119 rt6_dst_from_metrics_check(pcpu_rt); |
1227 1228 return pcpu_rt; 1229} 1230 | 1120 1121 return pcpu_rt; 1122} 1123 |
1231static struct rt6_info *rt6_make_pcpu_route(struct net *net, 1232 struct fib6_info *rt) | 1124static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) |
1233{ 1234 struct rt6_info *pcpu_rt, *prev, **p; 1235 1236 pcpu_rt = ip6_rt_pcpu_alloc(rt); 1237 if (!pcpu_rt) { | 1125{ 1126 struct rt6_info *pcpu_rt, *prev, **p; 1127 1128 pcpu_rt = ip6_rt_pcpu_alloc(rt); 1129 if (!pcpu_rt) { |
1130 struct net *net = dev_net(rt->dst.dev); 1131 |
|
1238 dst_hold(&net->ipv6.ip6_null_entry->dst); 1239 return net->ipv6.ip6_null_entry; 1240 } 1241 1242 dst_hold(&pcpu_rt->dst); 1243 p = this_cpu_ptr(rt->rt6i_pcpu); 1244 prev = cmpxchg(p, NULL, pcpu_rt); 1245 BUG_ON(prev); 1246 | 1132 dst_hold(&net->ipv6.ip6_null_entry->dst); 1133 return net->ipv6.ip6_null_entry; 1134 } 1135 1136 dst_hold(&pcpu_rt->dst); 1137 p = this_cpu_ptr(rt->rt6i_pcpu); 1138 prev = cmpxchg(p, NULL, pcpu_rt); 1139 BUG_ON(prev); 1140 |
1141 rt6_dst_from_metrics_check(pcpu_rt); |
|
1247 return pcpu_rt; 1248} 1249 1250/* exception hash table implementation 1251 */ 1252static DEFINE_SPINLOCK(rt6_exception_lock); 1253 1254/* Remove rt6_ex from hash table and free the memory 1255 * Caller must hold rt6_exception_lock 1256 */ 1257static void rt6_remove_exception(struct rt6_exception_bucket *bucket, 1258 struct rt6_exception *rt6_ex) 1259{ 1260 struct net *net; 1261 1262 if (!bucket || !rt6_ex) 1263 return; 1264 1265 net = dev_net(rt6_ex->rt6i->dst.dev); | 1142 return pcpu_rt; 1143} 1144 1145/* exception hash table implementation 1146 */ 1147static DEFINE_SPINLOCK(rt6_exception_lock); 1148 1149/* Remove rt6_ex from hash table and free the memory 1150 * Caller must hold rt6_exception_lock 1151 */ 1152static void rt6_remove_exception(struct rt6_exception_bucket *bucket, 1153 struct rt6_exception *rt6_ex) 1154{ 1155 struct net *net; 1156 1157 if (!bucket || !rt6_ex) 1158 return; 1159 1160 net = dev_net(rt6_ex->rt6i->dst.dev); |
1161 rt6_ex->rt6i->rt6i_node = NULL; |
|
1266 hlist_del_rcu(&rt6_ex->hlist); | 1162 hlist_del_rcu(&rt6_ex->hlist); |
1267 dst_release(&rt6_ex->rt6i->dst); | 1163 rt6_release(rt6_ex->rt6i); |
1268 kfree_rcu(rt6_ex, rcu); 1269 WARN_ON_ONCE(!bucket->depth); 1270 bucket->depth--; 1271 net->ipv6.rt6_stats->fib_rt_cache--; 1272} 1273 1274/* Remove oldest rt6_ex in bucket and free the memory 1275 * Caller must hold rt6_exception_lock --- 91 unchanged lines hidden (view full) --- 1367 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr); 1368#endif 1369 if (matched) 1370 return rt6_ex; 1371 } 1372 return NULL; 1373} 1374 | 1164 kfree_rcu(rt6_ex, rcu); 1165 WARN_ON_ONCE(!bucket->depth); 1166 bucket->depth--; 1167 net->ipv6.rt6_stats->fib_rt_cache--; 1168} 1169 1170/* Remove oldest rt6_ex in bucket and free the memory 1171 * Caller must hold rt6_exception_lock --- 91 unchanged lines hidden (view full) --- 1263 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr); 1264#endif 1265 if (matched) 1266 return rt6_ex; 1267 } 1268 return NULL; 1269} 1270 |
1375static unsigned int fib6_mtu(const struct fib6_info *rt) 1376{ 1377 unsigned int mtu; 1378 1379 if (rt->fib6_pmtu) { 1380 mtu = rt->fib6_pmtu; 1381 } else { 1382 struct net_device *dev = fib6_info_nh_dev(rt); 1383 struct inet6_dev *idev; 1384 1385 rcu_read_lock(); 1386 idev = __in6_dev_get(dev); 1387 mtu = idev->cnf.mtu6; 1388 rcu_read_unlock(); 1389 } 1390 1391 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); 1392 1393 return mtu - lwtunnel_headroom(rt->fib6_nh.nh_lwtstate, mtu); 1394} 1395 | |
1396static int rt6_insert_exception(struct rt6_info *nrt, | 1271static int rt6_insert_exception(struct rt6_info *nrt, |
1397 struct fib6_info *ort) | 1272 struct rt6_info *ort) |
1398{ | 1273{ |
1399 struct net *net = dev_net(nrt->dst.dev); | 1274 struct net *net = dev_net(ort->dst.dev); |
1400 struct rt6_exception_bucket *bucket; 1401 struct in6_addr *src_key = NULL; 1402 struct rt6_exception *rt6_ex; 1403 int err = 0; 1404 | 1275 struct rt6_exception_bucket *bucket; 1276 struct in6_addr *src_key = NULL; 1277 struct rt6_exception *rt6_ex; 1278 int err = 0; 1279 |
1280 /* ort can't be a cache or pcpu route */ 1281 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) 1282 ort = ort->from; 1283 WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)); 1284 |
|
1405 spin_lock_bh(&rt6_exception_lock); 1406 1407 if (ort->exception_bucket_flushed) { 1408 err = -EINVAL; 1409 goto out; 1410 } 1411 1412 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket, --- 10 unchanged lines hidden (view full) --- 1423 1424#ifdef CONFIG_IPV6_SUBTREES 1425 /* rt6i_src.plen != 0 indicates ort is in subtree 1426 * and exception table is indexed by a hash of 1427 * both rt6i_dst and rt6i_src. 1428 * Otherwise, the exception table is indexed by 1429 * a hash of only rt6i_dst. 1430 */ | 1285 spin_lock_bh(&rt6_exception_lock); 1286 1287 if (ort->exception_bucket_flushed) { 1288 err = -EINVAL; 1289 goto out; 1290 } 1291 1292 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket, --- 10 unchanged lines hidden (view full) --- 1303 1304#ifdef CONFIG_IPV6_SUBTREES 1305 /* rt6i_src.plen != 0 indicates ort is in subtree 1306 * and exception table is indexed by a hash of 1307 * both rt6i_dst and rt6i_src. 1308 * Otherwise, the exception table is indexed by 1309 * a hash of only rt6i_dst. 1310 */ |
1431 if (ort->fib6_src.plen) | 1311 if (ort->rt6i_src.plen) |
1432 src_key = &nrt->rt6i_src.addr; 1433#endif 1434 1435 /* Update rt6i_prefsrc as it could be changed 1436 * in rt6_remove_prefsrc() 1437 */ | 1312 src_key = &nrt->rt6i_src.addr; 1313#endif 1314 1315 /* Update rt6i_prefsrc as it could be changed 1316 * in rt6_remove_prefsrc() 1317 */ |
1438 nrt->rt6i_prefsrc = ort->fib6_prefsrc; | 1318 nrt->rt6i_prefsrc = ort->rt6i_prefsrc; |
1439 /* rt6_mtu_change() might lower mtu on ort. 1440 * Only insert this exception route if its mtu 1441 * is less than ort's mtu value. 1442 */ | 1319 /* rt6_mtu_change() might lower mtu on ort. 1320 * Only insert this exception route if its mtu 1321 * is less than ort's mtu value. 1322 */ |
1443 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(ort)) { | 1323 if (nrt->rt6i_pmtu >= dst_mtu(&ort->dst)) { |
1444 err = -EINVAL; 1445 goto out; 1446 } 1447 1448 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr, 1449 src_key); 1450 if (rt6_ex) 1451 rt6_remove_exception(bucket, rt6_ex); 1452 1453 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC); 1454 if (!rt6_ex) { 1455 err = -ENOMEM; 1456 goto out; 1457 } 1458 rt6_ex->rt6i = nrt; 1459 rt6_ex->stamp = jiffies; | 1324 err = -EINVAL; 1325 goto out; 1326 } 1327 1328 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr, 1329 src_key); 1330 if (rt6_ex) 1331 rt6_remove_exception(bucket, rt6_ex); 1332 1333 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC); 1334 if (!rt6_ex) { 1335 err = -ENOMEM; 1336 goto out; 1337 } 1338 rt6_ex->rt6i = nrt; 1339 rt6_ex->stamp = jiffies; |
1340 atomic_inc(&nrt->rt6i_ref); 1341 nrt->rt6i_node = ort->rt6i_node; |
|
1460 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain); 1461 bucket->depth++; 1462 net->ipv6.rt6_stats->fib_rt_cache++; 1463 1464 if (bucket->depth > FIB6_MAX_DEPTH) 1465 rt6_exception_remove_oldest(bucket); 1466 1467out: 1468 spin_unlock_bh(&rt6_exception_lock); 1469 1470 /* Update fn->fn_sernum to invalidate all cached dst */ 1471 if (!err) { | 1342 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain); 1343 bucket->depth++; 1344 net->ipv6.rt6_stats->fib_rt_cache++; 1345 1346 if (bucket->depth > FIB6_MAX_DEPTH) 1347 rt6_exception_remove_oldest(bucket); 1348 1349out: 1350 spin_unlock_bh(&rt6_exception_lock); 1351 1352 /* Update fn->fn_sernum to invalidate all cached dst */ 1353 if (!err) { |
1472 spin_lock_bh(&ort->fib6_table->tb6_lock); 1473 fib6_update_sernum(net, ort); 1474 spin_unlock_bh(&ort->fib6_table->tb6_lock); | 1354 spin_lock_bh(&ort->rt6i_table->tb6_lock); 1355 fib6_update_sernum(ort); 1356 spin_unlock_bh(&ort->rt6i_table->tb6_lock); |
1475 fib6_force_start_gc(net); 1476 } 1477 1478 return err; 1479} 1480 | 1357 fib6_force_start_gc(net); 1358 } 1359 1360 return err; 1361} 1362 |
1481void rt6_flush_exceptions(struct fib6_info *rt) | 1363void rt6_flush_exceptions(struct rt6_info *rt) |
1482{ 1483 struct rt6_exception_bucket *bucket; 1484 struct rt6_exception *rt6_ex; 1485 struct hlist_node *tmp; 1486 int i; 1487 1488 spin_lock_bh(&rt6_exception_lock); 1489 /* Prevent rt6_insert_exception() to recreate the bucket list */ --- 13 unchanged lines hidden (view full) --- 1503 1504out: 1505 spin_unlock_bh(&rt6_exception_lock); 1506} 1507 1508/* Find cached rt in the hash table inside passed in rt 1509 * Caller has to hold rcu_read_lock() 1510 */ | 1364{ 1365 struct rt6_exception_bucket *bucket; 1366 struct rt6_exception *rt6_ex; 1367 struct hlist_node *tmp; 1368 int i; 1369 1370 spin_lock_bh(&rt6_exception_lock); 1371 /* Prevent rt6_insert_exception() to recreate the bucket list */ --- 13 unchanged lines hidden (view full) --- 1385 1386out: 1387 spin_unlock_bh(&rt6_exception_lock); 1388} 1389 1390/* Find cached rt in the hash table inside passed in rt 1391 * Caller has to hold rcu_read_lock() 1392 */ |
1511static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt, | 1393static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt, |
1512 struct in6_addr *daddr, 1513 struct in6_addr *saddr) 1514{ 1515 struct rt6_exception_bucket *bucket; 1516 struct in6_addr *src_key = NULL; 1517 struct rt6_exception *rt6_ex; 1518 struct rt6_info *res = NULL; 1519 1520 bucket = rcu_dereference(rt->rt6i_exception_bucket); 1521 1522#ifdef CONFIG_IPV6_SUBTREES 1523 /* rt6i_src.plen != 0 indicates rt is in subtree 1524 * and exception table is indexed by a hash of 1525 * both rt6i_dst and rt6i_src. 1526 * Otherwise, the exception table is indexed by 1527 * a hash of only rt6i_dst. 1528 */ | 1394 struct in6_addr *daddr, 1395 struct in6_addr *saddr) 1396{ 1397 struct rt6_exception_bucket *bucket; 1398 struct in6_addr *src_key = NULL; 1399 struct rt6_exception *rt6_ex; 1400 struct rt6_info *res = NULL; 1401 1402 bucket = rcu_dereference(rt->rt6i_exception_bucket); 1403 1404#ifdef CONFIG_IPV6_SUBTREES 1405 /* rt6i_src.plen != 0 indicates rt is in subtree 1406 * and exception table is indexed by a hash of 1407 * both rt6i_dst and rt6i_src. 1408 * Otherwise, the exception table is indexed by 1409 * a hash of only rt6i_dst. 1410 */ |
1529 if (rt->fib6_src.plen) | 1411 if (rt->rt6i_src.plen) |
1530 src_key = saddr; 1531#endif 1532 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); 1533 1534 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i)) 1535 res = rt6_ex->rt6i; 1536 1537 return res; 1538} 1539 1540/* Remove the passed in cached rt from the hash table that contains it */ | 1412 src_key = saddr; 1413#endif 1414 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); 1415 1416 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i)) 1417 res = rt6_ex->rt6i; 1418 1419 return res; 1420} 1421 1422/* Remove the passed in cached rt from the hash table that contains it */ |
1541static int rt6_remove_exception_rt(struct rt6_info *rt) | 1423int rt6_remove_exception_rt(struct rt6_info *rt) |
1542{ 1543 struct rt6_exception_bucket *bucket; | 1424{ 1425 struct rt6_exception_bucket *bucket; |
1544 struct fib6_info *from = rt->from; | 1426 struct rt6_info *from = rt->from; |
1545 struct in6_addr *src_key = NULL; 1546 struct rt6_exception *rt6_ex; 1547 int err; 1548 1549 if (!from || 1550 !(rt->rt6i_flags & RTF_CACHE)) 1551 return -EINVAL; 1552 --- 5 unchanged lines hidden (view full) --- 1558 lockdep_is_held(&rt6_exception_lock)); 1559#ifdef CONFIG_IPV6_SUBTREES 1560 /* rt6i_src.plen != 0 indicates 'from' is in subtree 1561 * and exception table is indexed by a hash of 1562 * both rt6i_dst and rt6i_src. 1563 * Otherwise, the exception table is indexed by 1564 * a hash of only rt6i_dst. 1565 */ | 1427 struct in6_addr *src_key = NULL; 1428 struct rt6_exception *rt6_ex; 1429 int err; 1430 1431 if (!from || 1432 !(rt->rt6i_flags & RTF_CACHE)) 1433 return -EINVAL; 1434 --- 5 unchanged lines hidden (view full) --- 1440 lockdep_is_held(&rt6_exception_lock)); 1441#ifdef CONFIG_IPV6_SUBTREES 1442 /* rt6i_src.plen != 0 indicates 'from' is in subtree 1443 * and exception table is indexed by a hash of 1444 * both rt6i_dst and rt6i_src. 1445 * Otherwise, the exception table is indexed by 1446 * a hash of only rt6i_dst. 1447 */ |
1566 if (from->fib6_src.plen) | 1448 if (from->rt6i_src.plen) |
1567 src_key = &rt->rt6i_src.addr; 1568#endif 1569 rt6_ex = __rt6_find_exception_spinlock(&bucket, 1570 &rt->rt6i_dst.addr, 1571 src_key); 1572 if (rt6_ex) { 1573 rt6_remove_exception(bucket, rt6_ex); 1574 err = 0; --- 6 unchanged lines hidden (view full) --- 1581} 1582 1583/* Find rt6_ex which contains the passed in rt cache and 1584 * refresh its stamp 1585 */ 1586static void rt6_update_exception_stamp_rt(struct rt6_info *rt) 1587{ 1588 struct rt6_exception_bucket *bucket; | 1449 src_key = &rt->rt6i_src.addr; 1450#endif 1451 rt6_ex = __rt6_find_exception_spinlock(&bucket, 1452 &rt->rt6i_dst.addr, 1453 src_key); 1454 if (rt6_ex) { 1455 rt6_remove_exception(bucket, rt6_ex); 1456 err = 0; --- 6 unchanged lines hidden (view full) --- 1463} 1464 1465/* Find rt6_ex which contains the passed in rt cache and 1466 * refresh its stamp 1467 */ 1468static void rt6_update_exception_stamp_rt(struct rt6_info *rt) 1469{ 1470 struct rt6_exception_bucket *bucket; |
1589 struct fib6_info *from = rt->from; | 1471 struct rt6_info *from = rt->from; |
1590 struct in6_addr *src_key = NULL; 1591 struct rt6_exception *rt6_ex; 1592 1593 if (!from || 1594 !(rt->rt6i_flags & RTF_CACHE)) 1595 return; 1596 1597 rcu_read_lock(); 1598 bucket = rcu_dereference(from->rt6i_exception_bucket); 1599 1600#ifdef CONFIG_IPV6_SUBTREES 1601 /* rt6i_src.plen != 0 indicates 'from' is in subtree 1602 * and exception table is indexed by a hash of 1603 * both rt6i_dst and rt6i_src. 1604 * Otherwise, the exception table is indexed by 1605 * a hash of only rt6i_dst. 1606 */ | 1472 struct in6_addr *src_key = NULL; 1473 struct rt6_exception *rt6_ex; 1474 1475 if (!from || 1476 !(rt->rt6i_flags & RTF_CACHE)) 1477 return; 1478 1479 rcu_read_lock(); 1480 bucket = rcu_dereference(from->rt6i_exception_bucket); 1481 1482#ifdef CONFIG_IPV6_SUBTREES 1483 /* rt6i_src.plen != 0 indicates 'from' is in subtree 1484 * and exception table is indexed by a hash of 1485 * both rt6i_dst and rt6i_src. 1486 * Otherwise, the exception table is indexed by 1487 * a hash of only rt6i_dst. 1488 */ |
1607 if (from->fib6_src.plen) | 1489 if (from->rt6i_src.plen) |
1608 src_key = &rt->rt6i_src.addr; 1609#endif 1610 rt6_ex = __rt6_find_exception_rcu(&bucket, 1611 &rt->rt6i_dst.addr, 1612 src_key); 1613 if (rt6_ex) 1614 rt6_ex->stamp = jiffies; 1615 1616 rcu_read_unlock(); 1617} 1618 | 1490 src_key = &rt->rt6i_src.addr; 1491#endif 1492 rt6_ex = __rt6_find_exception_rcu(&bucket, 1493 &rt->rt6i_dst.addr, 1494 src_key); 1495 if (rt6_ex) 1496 rt6_ex->stamp = jiffies; 1497 1498 rcu_read_unlock(); 1499} 1500 |
1619static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt) | 1501static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt) |
1620{ 1621 struct rt6_exception_bucket *bucket; 1622 struct rt6_exception *rt6_ex; 1623 int i; 1624 1625 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1626 lockdep_is_held(&rt6_exception_lock)); 1627 --- 25 unchanged lines hidden (view full) --- 1653 1654 if (dst_mtu(&rt->dst) == idev->cnf.mtu6) 1655 return true; 1656 1657 return false; 1658} 1659 1660static void rt6_exceptions_update_pmtu(struct inet6_dev *idev, | 1502{ 1503 struct rt6_exception_bucket *bucket; 1504 struct rt6_exception *rt6_ex; 1505 int i; 1506 1507 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1508 lockdep_is_held(&rt6_exception_lock)); 1509 --- 25 unchanged lines hidden (view full) --- 1535 1536 if (dst_mtu(&rt->dst) == idev->cnf.mtu6) 1537 return true; 1538 1539 return false; 1540} 1541 1542static void rt6_exceptions_update_pmtu(struct inet6_dev *idev, |
1661 struct fib6_info *rt, int mtu) | 1543 struct rt6_info *rt, int mtu) |
1662{ 1663 struct rt6_exception_bucket *bucket; 1664 struct rt6_exception *rt6_ex; 1665 int i; 1666 1667 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1668 lockdep_is_held(&rt6_exception_lock)); 1669 1670 if (!bucket) 1671 return; 1672 1673 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 1674 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 1675 struct rt6_info *entry = rt6_ex->rt6i; 1676 1677 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected | 1544{ 1545 struct rt6_exception_bucket *bucket; 1546 struct rt6_exception *rt6_ex; 1547 int i; 1548 1549 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1550 lockdep_is_held(&rt6_exception_lock)); 1551 1552 if (!bucket) 1553 return; 1554 1555 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 1556 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 1557 struct rt6_info *entry = rt6_ex->rt6i; 1558 1559 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected |
1678 * route), the metrics of its rt->from have already | 1560 * route), the metrics of its rt->dst.from have already |
1679 * been updated. 1680 */ | 1561 * been updated. 1562 */ |
1681 if (dst_metric_raw(&entry->dst, RTAX_MTU) && | 1563 if (entry->rt6i_pmtu && |
1682 rt6_mtu_change_route_allowed(idev, entry, mtu)) | 1564 rt6_mtu_change_route_allowed(idev, entry, mtu)) |
1683 dst_metric_set(&entry->dst, RTAX_MTU, mtu); | 1565 entry->rt6i_pmtu = mtu; |
1684 } 1685 bucket++; 1686 } 1687} 1688 1689#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) 1690 | 1566 } 1567 bucket++; 1568 } 1569} 1570 1571#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) 1572 |
1691static void rt6_exceptions_clean_tohost(struct fib6_info *rt, | 1573static void rt6_exceptions_clean_tohost(struct rt6_info *rt, |
1692 struct in6_addr *gateway) 1693{ 1694 struct rt6_exception_bucket *bucket; 1695 struct rt6_exception *rt6_ex; 1696 struct hlist_node *tmp; 1697 int i; 1698 1699 if (!rcu_access_pointer(rt->rt6i_exception_bucket)) --- 62 unchanged lines hidden (view full) --- 1762 rt6_remove_exception(bucket, rt6_ex); 1763 return; 1764 } 1765 } 1766 1767 gc_args->more++; 1768} 1769 | 1574 struct in6_addr *gateway) 1575{ 1576 struct rt6_exception_bucket *bucket; 1577 struct rt6_exception *rt6_ex; 1578 struct hlist_node *tmp; 1579 int i; 1580 1581 if (!rcu_access_pointer(rt->rt6i_exception_bucket)) --- 62 unchanged lines hidden (view full) --- 1644 rt6_remove_exception(bucket, rt6_ex); 1645 return; 1646 } 1647 } 1648 1649 gc_args->more++; 1650} 1651 |
1770void rt6_age_exceptions(struct fib6_info *rt, | 1652void rt6_age_exceptions(struct rt6_info *rt, |
1771 struct fib6_gc_args *gc_args, 1772 unsigned long now) 1773{ 1774 struct rt6_exception_bucket *bucket; 1775 struct rt6_exception *rt6_ex; 1776 struct hlist_node *tmp; 1777 int i; 1778 --- 19 unchanged lines hidden (view full) --- 1798 rcu_read_unlock_bh(); 1799} 1800 1801struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, 1802 int oif, struct flowi6 *fl6, 1803 const struct sk_buff *skb, int flags) 1804{ 1805 struct fib6_node *fn, *saved_fn; | 1653 struct fib6_gc_args *gc_args, 1654 unsigned long now) 1655{ 1656 struct rt6_exception_bucket *bucket; 1657 struct rt6_exception *rt6_ex; 1658 struct hlist_node *tmp; 1659 int i; 1660 --- 19 unchanged lines hidden (view full) --- 1680 rcu_read_unlock_bh(); 1681} 1682 1683struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, 1684 int oif, struct flowi6 *fl6, 1685 const struct sk_buff *skb, int flags) 1686{ 1687 struct fib6_node *fn, *saved_fn; |
1806 struct fib6_info *f6i; 1807 struct rt6_info *rt; | 1688 struct rt6_info *rt, *rt_cache; |
1808 int strict = 0; 1809 1810 strict |= flags & RT6_LOOKUP_F_IFACE; 1811 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; 1812 if (net->ipv6.devconf_all->forwarding == 0) 1813 strict |= RT6_LOOKUP_F_REACHABLE; 1814 1815 rcu_read_lock(); 1816 1817 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 1818 saved_fn = fn; 1819 1820 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) 1821 oif = 0; 1822 1823redo_rt6_select: | 1689 int strict = 0; 1690 1691 strict |= flags & RT6_LOOKUP_F_IFACE; 1692 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; 1693 if (net->ipv6.devconf_all->forwarding == 0) 1694 strict |= RT6_LOOKUP_F_REACHABLE; 1695 1696 rcu_read_lock(); 1697 1698 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 1699 saved_fn = fn; 1700 1701 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) 1702 oif = 0; 1703 1704redo_rt6_select: |
1824 f6i = rt6_select(net, fn, oif, strict); 1825 if (f6i->fib6_nsiblings) 1826 f6i = rt6_multipath_select(net, f6i, fl6, oif, skb, strict); 1827 if (f6i == net->ipv6.fib6_null_entry) { | 1705 rt = rt6_select(net, fn, oif, strict); 1706 if (rt->rt6i_nsiblings) 1707 rt = rt6_multipath_select(net, rt, fl6, oif, skb, strict); 1708 if (rt == net->ipv6.ip6_null_entry) { |
1828 fn = fib6_backtrack(fn, &fl6->saddr); 1829 if (fn) 1830 goto redo_rt6_select; 1831 else if (strict & RT6_LOOKUP_F_REACHABLE) { 1832 /* also consider unreachable route */ 1833 strict &= ~RT6_LOOKUP_F_REACHABLE; 1834 fn = saved_fn; 1835 goto redo_rt6_select; 1836 } 1837 } 1838 | 1709 fn = fib6_backtrack(fn, &fl6->saddr); 1710 if (fn) 1711 goto redo_rt6_select; 1712 else if (strict & RT6_LOOKUP_F_REACHABLE) { 1713 /* also consider unreachable route */ 1714 strict &= ~RT6_LOOKUP_F_REACHABLE; 1715 fn = saved_fn; 1716 goto redo_rt6_select; 1717 } 1718 } 1719 |
1839 if (f6i == net->ipv6.fib6_null_entry) { 1840 rt = net->ipv6.ip6_null_entry; | 1720 /*Search through exception table */ 1721 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr); 1722 if (rt_cache) 1723 rt = rt_cache; 1724 1725 if (rt == net->ipv6.ip6_null_entry) { |
1841 rcu_read_unlock(); 1842 dst_hold(&rt->dst); 1843 trace_fib6_table_lookup(net, rt, table, fl6); 1844 return rt; | 1726 rcu_read_unlock(); 1727 dst_hold(&rt->dst); 1728 trace_fib6_table_lookup(net, rt, table, fl6); 1729 return rt; |
1845 } 1846 1847 /*Search through exception table */ 1848 rt = rt6_find_cached_rt(f6i, &fl6->daddr, &fl6->saddr); 1849 if (rt) { 1850 if (ip6_hold_safe(net, &rt, true)) | 1730 } else if (rt->rt6i_flags & RTF_CACHE) { 1731 if (ip6_hold_safe(net, &rt, true)) { |
1851 dst_use_noref(&rt->dst, jiffies); | 1732 dst_use_noref(&rt->dst, jiffies); |
1852 | 1733 rt6_dst_from_metrics_check(rt); 1734 } |
1853 rcu_read_unlock(); 1854 trace_fib6_table_lookup(net, rt, table, fl6); 1855 return rt; 1856 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && | 1735 rcu_read_unlock(); 1736 trace_fib6_table_lookup(net, rt, table, fl6); 1737 return rt; 1738 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && |
1857 !(f6i->fib6_flags & RTF_GATEWAY))) { | 1739 !(rt->rt6i_flags & RTF_GATEWAY))) { |
1858 /* Create a RTF_CACHE clone which will not be 1859 * owned by the fib6 tree. It is for the special case where 1860 * the daddr in the skb during the neighbor look-up is different 1861 * from the fl6->daddr used to look-up route here. 1862 */ | 1740 /* Create a RTF_CACHE clone which will not be 1741 * owned by the fib6 tree. It is for the special case where 1742 * the daddr in the skb during the neighbor look-up is different 1743 * from the fl6->daddr used to look-up route here. 1744 */ |
1745 |
|
1863 struct rt6_info *uncached_rt; 1864 | 1746 struct rt6_info *uncached_rt; 1747 |
1865 uncached_rt = ip6_rt_cache_alloc(f6i, &fl6->daddr, NULL); 1866 | 1748 if (ip6_hold_safe(net, &rt, true)) { 1749 dst_use_noref(&rt->dst, jiffies); 1750 } else { 1751 rcu_read_unlock(); 1752 uncached_rt = rt; 1753 goto uncached_rt_out; 1754 } |
1867 rcu_read_unlock(); 1868 | 1755 rcu_read_unlock(); 1756 |
1757 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL); 1758 dst_release(&rt->dst); 1759 |
|
1869 if (uncached_rt) { 1870 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc() 1871 * No need for another dst_hold() 1872 */ 1873 rt6_uncached_list_add(uncached_rt); 1874 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache); 1875 } else { 1876 uncached_rt = net->ipv6.ip6_null_entry; 1877 dst_hold(&uncached_rt->dst); 1878 } 1879 | 1760 if (uncached_rt) { 1761 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc() 1762 * No need for another dst_hold() 1763 */ 1764 rt6_uncached_list_add(uncached_rt); 1765 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache); 1766 } else { 1767 uncached_rt = net->ipv6.ip6_null_entry; 1768 dst_hold(&uncached_rt->dst); 1769 } 1770 |
1771uncached_rt_out: |
|
1880 trace_fib6_table_lookup(net, uncached_rt, table, fl6); 1881 return uncached_rt; 1882 1883 } else { 1884 /* Get a percpu copy */ 1885 1886 struct rt6_info *pcpu_rt; 1887 | 1772 trace_fib6_table_lookup(net, uncached_rt, table, fl6); 1773 return uncached_rt; 1774 1775 } else { 1776 /* Get a percpu copy */ 1777 1778 struct rt6_info *pcpu_rt; 1779 |
1780 dst_use_noref(&rt->dst, jiffies); |
|
1888 local_bh_disable(); | 1781 local_bh_disable(); |
1889 pcpu_rt = rt6_get_pcpu_route(f6i); | 1782 pcpu_rt = rt6_get_pcpu_route(rt); |
1890 | 1783 |
1891 if (!pcpu_rt) 1892 pcpu_rt = rt6_make_pcpu_route(net, f6i); 1893 | 1784 if (!pcpu_rt) { 1785 /* atomic_inc_not_zero() is needed when using rcu */ 1786 if (atomic_inc_not_zero(&rt->rt6i_ref)) { 1787 /* No dst_hold() on rt is needed because grabbing 1788 * rt->rt6i_ref makes sure rt can't be released. 1789 */ 1790 pcpu_rt = rt6_make_pcpu_route(rt); 1791 rt6_release(rt); 1792 } else { 1793 /* rt is already removed from tree */ 1794 pcpu_rt = net->ipv6.ip6_null_entry; 1795 dst_hold(&pcpu_rt->dst); 1796 } 1797 } |
1894 local_bh_enable(); 1895 rcu_read_unlock(); 1896 trace_fib6_table_lookup(net, pcpu_rt, table, fl6); 1897 return pcpu_rt; 1898 } 1899} 1900EXPORT_SYMBOL_GPL(ip6_pol_route); 1901 --- 204 unchanged lines hidden (view full) --- 2106 new->input = dst_discard; 2107 new->output = dst_discard_out; 2108 2109 dst_copy_metrics(new, &ort->dst); 2110 2111 rt->rt6i_idev = in6_dev_get(loopback_dev); 2112 rt->rt6i_gateway = ort->rt6i_gateway; 2113 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; | 1798 local_bh_enable(); 1799 rcu_read_unlock(); 1800 trace_fib6_table_lookup(net, pcpu_rt, table, fl6); 1801 return pcpu_rt; 1802 } 1803} 1804EXPORT_SYMBOL_GPL(ip6_pol_route); 1805 --- 204 unchanged lines hidden (view full) --- 2010 new->input = dst_discard; 2011 new->output = dst_discard_out; 2012 2013 dst_copy_metrics(new, &ort->dst); 2014 2015 rt->rt6i_idev = in6_dev_get(loopback_dev); 2016 rt->rt6i_gateway = ort->rt6i_gateway; 2017 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; |
2018 rt->rt6i_metric = 0; |
|
2114 2115 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 2116#ifdef CONFIG_IPV6_SUBTREES 2117 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); 2118#endif 2119 } 2120 2121 dst_release(dst_orig); 2122 return new ? new : ERR_PTR(-ENOMEM); 2123} 2124 2125/* 2126 * Destination cache support functions 2127 */ 2128 | 2019 2020 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 2021#ifdef CONFIG_IPV6_SUBTREES 2022 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); 2023#endif 2024 } 2025 2026 dst_release(dst_orig); 2027 return new ? new : ERR_PTR(-ENOMEM); 2028} 2029 2030/* 2031 * Destination cache support functions 2032 */ 2033 |
2129static bool fib6_check(struct fib6_info *f6i, u32 cookie) | 2034static void rt6_dst_from_metrics_check(struct rt6_info *rt) |
2130{ | 2035{ |
2131 u32 rt_cookie = 0; 2132 2133 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie) 2134 return false; 2135 2136 if (fib6_check_expired(f6i)) 2137 return false; 2138 2139 return true; | 2036 if (rt->from && 2037 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(&rt->from->dst)) 2038 dst_init_metrics(&rt->dst, dst_metrics_ptr(&rt->from->dst), true); |
2140} 2141 | 2039} 2040 |
2142static struct dst_entry *rt6_check(struct rt6_info *rt, 2143 struct fib6_info *from, 2144 u32 cookie) | 2041static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) |
2145{ 2146 u32 rt_cookie = 0; 2147 | 2042{ 2043 u32 rt_cookie = 0; 2044 |
2148 if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) || 2149 rt_cookie != cookie) | 2045 if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie) |
2150 return NULL; 2151 2152 if (rt6_check_expired(rt)) 2153 return NULL; 2154 2155 return &rt->dst; 2156} 2157 | 2046 return NULL; 2047 2048 if (rt6_check_expired(rt)) 2049 return NULL; 2050 2051 return &rt->dst; 2052} 2053 |
2158static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, 2159 struct fib6_info *from, 2160 u32 cookie) | 2054static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) |
2161{ 2162 if (!__rt6_check_expired(rt) && 2163 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && | 2055{ 2056 if (!__rt6_check_expired(rt) && 2057 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && |
2164 fib6_check(from, cookie)) | 2058 rt6_check(rt->from, cookie)) |
2165 return &rt->dst; 2166 else 2167 return NULL; 2168} 2169 2170static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) 2171{ | 2059 return &rt->dst; 2060 else 2061 return NULL; 2062} 2063 2064static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) 2065{ |
2172 struct dst_entry *dst_ret; 2173 struct fib6_info *from; | |
2174 struct rt6_info *rt; 2175 | 2066 struct rt6_info *rt; 2067 |
2176 rt = container_of(dst, struct rt6_info, dst); | 2068 rt = (struct rt6_info *) dst; |
2177 | 2069 |
2178 rcu_read_lock(); 2179 | |
2180 /* All IPV6 dsts are created with ->obsolete set to the value 2181 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 2182 * into this function always. 2183 */ 2184 | 2070 /* All IPV6 dsts are created with ->obsolete set to the value 2071 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 2072 * into this function always. 2073 */ 2074 |
2185 from = rcu_dereference(rt->from); | 2075 rt6_dst_from_metrics_check(rt); |
2186 | 2076 |
2187 if (from && (rt->rt6i_flags & RTF_PCPU || 2188 unlikely(!list_empty(&rt->rt6i_uncached)))) 2189 dst_ret = rt6_dst_from_check(rt, from, cookie); | 2077 if (rt->rt6i_flags & RTF_PCPU || 2078 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from)) 2079 return rt6_dst_from_check(rt, cookie); |
2190 else | 2080 else |
2191 dst_ret = rt6_check(rt, from, cookie); 2192 2193 rcu_read_unlock(); 2194 2195 return dst_ret; | 2081 return rt6_check(rt, cookie); |
2196} 2197 2198static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) 2199{ 2200 struct rt6_info *rt = (struct rt6_info *) dst; 2201 2202 if (rt) { 2203 if (rt->rt6i_flags & RTF_CACHE) { 2204 if (rt6_check_expired(rt)) { | 2082} 2083 2084static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) 2085{ 2086 struct rt6_info *rt = (struct rt6_info *) dst; 2087 2088 if (rt) { 2089 if (rt->rt6i_flags & RTF_CACHE) { 2090 if (rt6_check_expired(rt)) { |
2205 rt6_remove_exception_rt(rt); | 2091 ip6_del_rt(rt); |
2206 dst = NULL; 2207 } 2208 } else { 2209 dst_release(dst); 2210 dst = NULL; 2211 } 2212 } 2213 return dst; --- 4 unchanged lines hidden (view full) --- 2218 struct rt6_info *rt; 2219 2220 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 2221 2222 rt = (struct rt6_info *) skb_dst(skb); 2223 if (rt) { 2224 if (rt->rt6i_flags & RTF_CACHE) { 2225 if (dst_hold_safe(&rt->dst)) | 2092 dst = NULL; 2093 } 2094 } else { 2095 dst_release(dst); 2096 dst = NULL; 2097 } 2098 } 2099 return dst; --- 4 unchanged lines hidden (view full) --- 2104 struct rt6_info *rt; 2105 2106 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 2107 2108 rt = (struct rt6_info *) skb_dst(skb); 2109 if (rt) { 2110 if (rt->rt6i_flags & RTF_CACHE) { 2111 if (dst_hold_safe(&rt->dst)) |
2226 rt6_remove_exception_rt(rt); | 2112 ip6_del_rt(rt); |
2227 } else { | 2113 } else { |
2228 struct fib6_info *from; | |
2229 struct fib6_node *fn; 2230 2231 rcu_read_lock(); | 2114 struct fib6_node *fn; 2115 2116 rcu_read_lock(); |
2232 from = rcu_dereference(rt->from); 2233 if (from) { 2234 fn = rcu_dereference(from->fib6_node); 2235 if (fn && (rt->rt6i_flags & RTF_DEFAULT)) 2236 fn->fn_sernum = -1; 2237 } | 2117 fn = rcu_dereference(rt->rt6i_node); 2118 if (fn && (rt->rt6i_flags & RTF_DEFAULT)) 2119 fn->fn_sernum = -1; |
2238 rcu_read_unlock(); 2239 } 2240 } 2241} 2242 | 2120 rcu_read_unlock(); 2121 } 2122 } 2123} 2124 |
2243static void rt6_update_expires(struct rt6_info *rt0, int timeout) 2244{ 2245 if (!(rt0->rt6i_flags & RTF_EXPIRES)) { 2246 struct fib6_info *from; 2247 2248 rcu_read_lock(); 2249 from = rcu_dereference(rt0->from); 2250 if (from) 2251 rt0->dst.expires = from->expires; 2252 rcu_read_unlock(); 2253 } 2254 2255 dst_set_expires(&rt0->dst, timeout); 2256 rt0->rt6i_flags |= RTF_EXPIRES; 2257} 2258 | |
2259static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) 2260{ 2261 struct net *net = dev_net(rt->dst.dev); 2262 | 2125static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) 2126{ 2127 struct net *net = dev_net(rt->dst.dev); 2128 |
2263 dst_metric_set(&rt->dst, RTAX_MTU, mtu); | |
2264 rt->rt6i_flags |= RTF_MODIFIED; | 2129 rt->rt6i_flags |= RTF_MODIFIED; |
2130 rt->rt6i_pmtu = mtu; |
|
2265 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 2266} 2267 2268static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) 2269{ | 2131 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 2132} 2133 2134static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) 2135{ |
2270 bool from_set; 2271 2272 rcu_read_lock(); 2273 from_set = !!rcu_dereference(rt->from); 2274 rcu_read_unlock(); 2275 | |
2276 return !(rt->rt6i_flags & RTF_CACHE) && | 2136 return !(rt->rt6i_flags & RTF_CACHE) && |
2277 (rt->rt6i_flags & RTF_PCPU || from_set); | 2137 (rt->rt6i_flags & RTF_PCPU || 2138 rcu_access_pointer(rt->rt6i_node)); |
2278} 2279 2280static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 2281 const struct ipv6hdr *iph, u32 mtu) 2282{ 2283 const struct in6_addr *daddr, *saddr; 2284 struct rt6_info *rt6 = (struct rt6_info *)dst; 2285 --- 19 unchanged lines hidden (view full) --- 2305 return; 2306 2307 if (!rt6_cache_allowed_for_pmtu(rt6)) { 2308 rt6_do_update_pmtu(rt6, mtu); 2309 /* update rt6_ex->stamp for cache */ 2310 if (rt6->rt6i_flags & RTF_CACHE) 2311 rt6_update_exception_stamp_rt(rt6); 2312 } else if (daddr) { | 2139} 2140 2141static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 2142 const struct ipv6hdr *iph, u32 mtu) 2143{ 2144 const struct in6_addr *daddr, *saddr; 2145 struct rt6_info *rt6 = (struct rt6_info *)dst; 2146 --- 19 unchanged lines hidden (view full) --- 2166 return; 2167 2168 if (!rt6_cache_allowed_for_pmtu(rt6)) { 2169 rt6_do_update_pmtu(rt6, mtu); 2170 /* update rt6_ex->stamp for cache */ 2171 if (rt6->rt6i_flags & RTF_CACHE) 2172 rt6_update_exception_stamp_rt(rt6); 2173 } else if (daddr) { |
2313 struct fib6_info *from; | |
2314 struct rt6_info *nrt6; 2315 | 2174 struct rt6_info *nrt6; 2175 |
2316 rcu_read_lock(); 2317 from = rcu_dereference(rt6->from); 2318 nrt6 = ip6_rt_cache_alloc(from, daddr, saddr); | 2176 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr); |
2319 if (nrt6) { 2320 rt6_do_update_pmtu(nrt6, mtu); | 2177 if (nrt6) { 2178 rt6_do_update_pmtu(nrt6, mtu); |
2321 if (rt6_insert_exception(nrt6, from)) | 2179 if (rt6_insert_exception(nrt6, rt6)) |
2322 dst_release_immediate(&nrt6->dst); 2323 } | 2180 dst_release_immediate(&nrt6->dst); 2181 } |
2324 rcu_read_unlock(); | |
2325 } 2326} 2327 2328static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 2329 struct sk_buff *skb, u32 mtu) 2330{ 2331 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu); 2332} --- 64 unchanged lines hidden (view full) --- 2397 2398static struct rt6_info *__ip6_route_redirect(struct net *net, 2399 struct fib6_table *table, 2400 struct flowi6 *fl6, 2401 const struct sk_buff *skb, 2402 int flags) 2403{ 2404 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; | 2182 } 2183} 2184 2185static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 2186 struct sk_buff *skb, u32 mtu) 2187{ 2188 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu); 2189} --- 64 unchanged lines hidden (view full) --- 2254 2255static struct rt6_info *__ip6_route_redirect(struct net *net, 2256 struct fib6_table *table, 2257 struct flowi6 *fl6, 2258 const struct sk_buff *skb, 2259 int flags) 2260{ 2261 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; |
2405 struct rt6_info *ret = NULL, *rt_cache; 2406 struct fib6_info *rt; | 2262 struct rt6_info *rt, *rt_cache; |
2407 struct fib6_node *fn; 2408 2409 /* Get the "current" route for this destination and 2410 * check if the redirect has come from appropriate router. 2411 * 2412 * RFC 4861 specifies that redirects should only be 2413 * accepted if they come from the nexthop to the target. 2414 * Due to the way the routes are chosen, this notion 2415 * is a bit fuzzy and one might need to check all possible 2416 * routes. 2417 */ 2418 2419 rcu_read_lock(); 2420 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 2421restart: 2422 for_each_fib6_node_rt_rcu(fn) { | 2263 struct fib6_node *fn; 2264 2265 /* Get the "current" route for this destination and 2266 * check if the redirect has come from appropriate router. 2267 * 2268 * RFC 4861 specifies that redirects should only be 2269 * accepted if they come from the nexthop to the target. 2270 * Due to the way the routes are chosen, this notion 2271 * is a bit fuzzy and one might need to check all possible 2272 * routes. 2273 */ 2274 2275 rcu_read_lock(); 2276 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 2277restart: 2278 for_each_fib6_node_rt_rcu(fn) { |
2423 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD) | 2279 if (rt->rt6i_nh_flags & RTNH_F_DEAD) |
2424 continue; | 2280 continue; |
2425 if (fib6_check_expired(rt)) | 2281 if (rt6_check_expired(rt)) |
2426 continue; | 2282 continue; |
2427 if (rt->fib6_flags & RTF_REJECT) | 2283 if (rt->dst.error) |
2428 break; | 2284 break; |
2429 if (!(rt->fib6_flags & RTF_GATEWAY)) | 2285 if (!(rt->rt6i_flags & RTF_GATEWAY)) |
2430 continue; | 2286 continue; |
2431 if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex) | 2287 if (fl6->flowi6_oif != rt->dst.dev->ifindex) |
2432 continue; 2433 /* rt_cache's gateway might be different from its 'parent' 2434 * in the case of an ip redirect. 2435 * So we keep searching in the exception table if the gateway 2436 * is different. 2437 */ | 2288 continue; 2289 /* rt_cache's gateway might be different from its 'parent' 2290 * in the case of an ip redirect. 2291 * So we keep searching in the exception table if the gateway 2292 * is different. 2293 */ |
2438 if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.nh_gw)) { | 2294 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) { |
2439 rt_cache = rt6_find_cached_rt(rt, 2440 &fl6->daddr, 2441 &fl6->saddr); 2442 if (rt_cache && 2443 ipv6_addr_equal(&rdfl->gateway, 2444 &rt_cache->rt6i_gateway)) { | 2295 rt_cache = rt6_find_cached_rt(rt, 2296 &fl6->daddr, 2297 &fl6->saddr); 2298 if (rt_cache && 2299 ipv6_addr_equal(&rdfl->gateway, 2300 &rt_cache->rt6i_gateway)) { |
2445 ret = rt_cache; | 2301 rt = rt_cache; |
2446 break; 2447 } 2448 continue; 2449 } 2450 break; 2451 } 2452 2453 if (!rt) | 2302 break; 2303 } 2304 continue; 2305 } 2306 break; 2307 } 2308 2309 if (!rt) |
2454 rt = net->ipv6.fib6_null_entry; 2455 else if (rt->fib6_flags & RTF_REJECT) { 2456 ret = net->ipv6.ip6_null_entry; | 2310 rt = net->ipv6.ip6_null_entry; 2311 else if (rt->dst.error) { 2312 rt = net->ipv6.ip6_null_entry; |
2457 goto out; 2458 } 2459 | 2313 goto out; 2314 } 2315 |
2460 if (rt == net->ipv6.fib6_null_entry) { | 2316 if (rt == net->ipv6.ip6_null_entry) { |
2461 fn = fib6_backtrack(fn, &fl6->saddr); 2462 if (fn) 2463 goto restart; 2464 } 2465 2466out: | 2317 fn = fib6_backtrack(fn, &fl6->saddr); 2318 if (fn) 2319 goto restart; 2320 } 2321 2322out: |
2467 if (ret) 2468 dst_hold(&ret->dst); 2469 else 2470 ret = ip6_create_rt_rcu(rt); | 2323 ip6_hold_safe(net, &rt, true); |
2471 2472 rcu_read_unlock(); 2473 | 2324 2325 rcu_read_unlock(); 2326 |
2474 trace_fib6_table_lookup(net, ret, table, fl6); 2475 return ret; | 2327 trace_fib6_table_lookup(net, rt, table, fl6); 2328 return rt; |
2476}; 2477 2478static struct dst_entry *ip6_route_redirect(struct net *net, 2479 const struct flowi6 *fl6, 2480 const struct sk_buff *skb, 2481 const struct in6_addr *gateway) 2482{ 2483 int flags = RT6_LOOKUP_F_HAS_SADDR; --- 75 unchanged lines hidden (view full) --- 2559 */ 2560 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr)) 2561 mtu = IPV6_MAXPLEN; 2562 return mtu; 2563} 2564 2565static unsigned int ip6_mtu(const struct dst_entry *dst) 2566{ | 2329}; 2330 2331static struct dst_entry *ip6_route_redirect(struct net *net, 2332 const struct flowi6 *fl6, 2333 const struct sk_buff *skb, 2334 const struct in6_addr *gateway) 2335{ 2336 int flags = RT6_LOOKUP_F_HAS_SADDR; --- 75 unchanged lines hidden (view full) --- 2412 */ 2413 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr)) 2414 mtu = IPV6_MAXPLEN; 2415 return mtu; 2416} 2417 2418static unsigned int ip6_mtu(const struct dst_entry *dst) 2419{ |
2420 const struct rt6_info *rt = (const struct rt6_info *)dst; 2421 unsigned int mtu = rt->rt6i_pmtu; |
|
2567 struct inet6_dev *idev; | 2422 struct inet6_dev *idev; |
2568 unsigned int mtu; | |
2569 | 2423 |
2424 if (mtu) 2425 goto out; 2426 |
|
2570 mtu = dst_metric_raw(dst, RTAX_MTU); 2571 if (mtu) 2572 goto out; 2573 2574 mtu = IPV6_MIN_MTU; 2575 2576 rcu_read_lock(); 2577 idev = __in6_dev_get(dst->dev); --- 66 unchanged lines hidden (view full) --- 2644 entries = dst_entries_get_slow(ops); 2645 if (entries < ops->gc_thresh) 2646 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; 2647out: 2648 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity; 2649 return entries > rt_max_size; 2650} 2651 | 2427 mtu = dst_metric_raw(dst, RTAX_MTU); 2428 if (mtu) 2429 goto out; 2430 2431 mtu = IPV6_MIN_MTU; 2432 2433 rcu_read_lock(); 2434 idev = __in6_dev_get(dst->dev); --- 66 unchanged lines hidden (view full) --- 2501 entries = dst_entries_get_slow(ops); 2502 if (entries < ops->gc_thresh) 2503 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; 2504out: 2505 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity; 2506 return entries > rt_max_size; 2507} 2508 |
2652static int ip6_convert_metrics(struct net *net, struct fib6_info *rt, 2653 struct fib6_config *cfg) | 2509static int ip6_convert_metrics(struct mx6_config *mxc, 2510 const struct fib6_config *cfg) |
2654{ | 2511{ |
2655 struct dst_metrics *p; | 2512 struct net *net = cfg->fc_nlinfo.nl_net; 2513 bool ecn_ca = false; 2514 struct nlattr *nla; 2515 int remaining; 2516 u32 *mp; |
2656 2657 if (!cfg->fc_mx) 2658 return 0; 2659 | 2517 2518 if (!cfg->fc_mx) 2519 return 0; 2520 |
2660 p = kzalloc(sizeof(*rt->fib6_metrics), GFP_KERNEL); 2661 if (unlikely(!p)) | 2521 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 2522 if (unlikely(!mp)) |
2662 return -ENOMEM; 2663 | 2523 return -ENOMEM; 2524 |
2664 refcount_set(&p->refcnt, 1); 2665 rt->fib6_metrics = p; | 2525 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { 2526 int type = nla_type(nla); 2527 u32 val; |
2666 | 2528 |
2667 return ip_metrics_convert(net, cfg->fc_mx, cfg->fc_mx_len, p->metrics); | 2529 if (!type) 2530 continue; 2531 if (unlikely(type > RTAX_MAX)) 2532 goto err; 2533 2534 if (type == RTAX_CC_ALGO) { 2535 char tmp[TCP_CA_NAME_MAX]; 2536 2537 nla_strlcpy(tmp, nla, sizeof(tmp)); 2538 val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca); 2539 if (val == TCP_CA_UNSPEC) 2540 goto err; 2541 } else { 2542 val = nla_get_u32(nla); 2543 } 2544 if (type == RTAX_HOPLIMIT && val > 255) 2545 val = 255; 2546 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) 2547 goto err; 2548 2549 mp[type - 1] = val; 2550 __set_bit(type - 1, mxc->mx_valid); 2551 } 2552 2553 if (ecn_ca) { 2554 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid); 2555 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; 2556 } 2557 2558 mxc->mx = mp; 2559 return 0; 2560 err: 2561 kfree(mp); 2562 return -EINVAL; |
2668} 2669 2670static struct rt6_info *ip6_nh_lookup_table(struct net *net, 2671 struct fib6_config *cfg, 2672 const struct in6_addr *gw_addr, 2673 u32 tbid, int flags) 2674{ 2675 struct flowi6 fl6 = { --- 169 unchanged lines hidden (view full) --- 2845 goto out; 2846 } 2847 2848 err = 0; 2849out: 2850 return err; 2851} 2852 | 2563} 2564 2565static struct rt6_info *ip6_nh_lookup_table(struct net *net, 2566 struct fib6_config *cfg, 2567 const struct in6_addr *gw_addr, 2568 u32 tbid, int flags) 2569{ 2570 struct flowi6 fl6 = { --- 169 unchanged lines hidden (view full) --- 2740 goto out; 2741 } 2742 2743 err = 0; 2744out: 2745 return err; 2746} 2747 |
2853static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, 2854 gfp_t gfp_flags, | 2748static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, |
2855 struct netlink_ext_ack *extack) 2856{ 2857 struct net *net = cfg->fc_nlinfo.nl_net; | 2749 struct netlink_ext_ack *extack) 2750{ 2751 struct net *net = cfg->fc_nlinfo.nl_net; |
2858 struct fib6_info *rt = NULL; | 2752 struct rt6_info *rt = NULL; |
2859 struct net_device *dev = NULL; 2860 struct inet6_dev *idev = NULL; 2861 struct fib6_table *table; 2862 int addr_type; 2863 int err = -EINVAL; 2864 2865 /* RTF_PCPU is an internal flag; can not be set by userspace */ 2866 if (cfg->fc_flags & RTF_PCPU) { 2867 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); 2868 goto out; 2869 } 2870 2871 /* RTF_CACHE is an internal flag; can not be set by userspace */ 2872 if (cfg->fc_flags & RTF_CACHE) { 2873 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE"); 2874 goto out; 2875 } 2876 | 2753 struct net_device *dev = NULL; 2754 struct inet6_dev *idev = NULL; 2755 struct fib6_table *table; 2756 int addr_type; 2757 int err = -EINVAL; 2758 2759 /* RTF_PCPU is an internal flag; can not be set by userspace */ 2760 if (cfg->fc_flags & RTF_PCPU) { 2761 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); 2762 goto out; 2763 } 2764 2765 /* RTF_CACHE is an internal flag; can not be set by userspace */ 2766 if (cfg->fc_flags & RTF_CACHE) { 2767 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE"); 2768 goto out; 2769 } 2770 |
2877 if (cfg->fc_type > RTN_MAX) { 2878 NL_SET_ERR_MSG(extack, "Invalid route type"); 2879 goto out; 2880 } 2881 | |
2882 if (cfg->fc_dst_len > 128) { 2883 NL_SET_ERR_MSG(extack, "Invalid prefix length"); 2884 goto out; 2885 } 2886 if (cfg->fc_src_len > 128) { 2887 NL_SET_ERR_MSG(extack, "Invalid source address length"); 2888 goto out; 2889 } --- 42 unchanged lines hidden (view full) --- 2932 } 2933 } else { 2934 table = fib6_new_table(net, cfg->fc_table); 2935 } 2936 2937 if (!table) 2938 goto out; 2939 | 2771 if (cfg->fc_dst_len > 128) { 2772 NL_SET_ERR_MSG(extack, "Invalid prefix length"); 2773 goto out; 2774 } 2775 if (cfg->fc_src_len > 128) { 2776 NL_SET_ERR_MSG(extack, "Invalid source address length"); 2777 goto out; 2778 } --- 42 unchanged lines hidden (view full) --- 2821 } 2822 } else { 2823 table = fib6_new_table(net, cfg->fc_table); 2824 } 2825 2826 if (!table) 2827 goto out; 2828 |
2940 err = -ENOMEM; 2941 rt = fib6_info_alloc(gfp_flags); 2942 if (!rt) 2943 goto out; | 2829 rt = ip6_dst_alloc(net, NULL, 2830 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT); |
2944 | 2831 |
2945 if (cfg->fc_flags & RTF_ADDRCONF) 2946 rt->dst_nocount = true; 2947 2948 err = ip6_convert_metrics(net, rt, cfg); 2949 if (err < 0) | 2832 if (!rt) { 2833 err = -ENOMEM; |
2950 goto out; | 2834 goto out; |
2835 } |
|
2951 2952 if (cfg->fc_flags & RTF_EXPIRES) | 2836 2837 if (cfg->fc_flags & RTF_EXPIRES) |
2953 fib6_set_expires(rt, jiffies + | 2838 rt6_set_expires(rt, jiffies + |
2954 clock_t_to_jiffies(cfg->fc_expires)); 2955 else | 2839 clock_t_to_jiffies(cfg->fc_expires)); 2840 else |
2956 fib6_clean_expires(rt); | 2841 rt6_clean_expires(rt); |
2957 2958 if (cfg->fc_protocol == RTPROT_UNSPEC) 2959 cfg->fc_protocol = RTPROT_BOOT; | 2842 2843 if (cfg->fc_protocol == RTPROT_UNSPEC) 2844 cfg->fc_protocol = RTPROT_BOOT; |
2960 rt->fib6_protocol = cfg->fc_protocol; | 2845 rt->rt6i_protocol = cfg->fc_protocol; |
2961 2962 addr_type = ipv6_addr_type(&cfg->fc_dst); 2963 | 2846 2847 addr_type = ipv6_addr_type(&cfg->fc_dst); 2848 |
2849 if (addr_type & IPV6_ADDR_MULTICAST) 2850 rt->dst.input = ip6_mc_input; 2851 else if (cfg->fc_flags & RTF_LOCAL) 2852 rt->dst.input = ip6_input; 2853 else 2854 rt->dst.input = ip6_forward; 2855 2856 rt->dst.output = ip6_output; 2857 |
|
2964 if (cfg->fc_encap) { 2965 struct lwtunnel_state *lwtstate; 2966 2967 err = lwtunnel_build_state(cfg->fc_encap_type, 2968 cfg->fc_encap, AF_INET6, cfg, 2969 &lwtstate, extack); 2970 if (err) 2971 goto out; | 2858 if (cfg->fc_encap) { 2859 struct lwtunnel_state *lwtstate; 2860 2861 err = lwtunnel_build_state(cfg->fc_encap_type, 2862 cfg->fc_encap, AF_INET6, cfg, 2863 &lwtstate, extack); 2864 if (err) 2865 goto out; |
2972 rt->fib6_nh.nh_lwtstate = lwtstate_get(lwtstate); | 2866 rt->dst.lwtstate = lwtstate_get(lwtstate); 2867 lwtunnel_set_redirect(&rt->dst); |
2973 } 2974 | 2868 } 2869 |
2975 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); 2976 rt->fib6_dst.plen = cfg->fc_dst_len; 2977 if (rt->fib6_dst.plen == 128) 2978 rt->dst_host = true; | 2870 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); 2871 rt->rt6i_dst.plen = cfg->fc_dst_len; 2872 if (rt->rt6i_dst.plen == 128) 2873 rt->dst.flags |= DST_HOST; |
2979 2980#ifdef CONFIG_IPV6_SUBTREES | 2874 2875#ifdef CONFIG_IPV6_SUBTREES |
2981 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len); 2982 rt->fib6_src.plen = cfg->fc_src_len; | 2876 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 2877 rt->rt6i_src.plen = cfg->fc_src_len; |
2983#endif 2984 | 2878#endif 2879 |
2985 rt->fib6_metric = cfg->fc_metric; 2986 rt->fib6_nh.nh_weight = 1; | 2880 rt->rt6i_metric = cfg->fc_metric; 2881 rt->rt6i_nh_weight = 1; |
2987 | 2882 |
2988 rt->fib6_type = cfg->fc_type; 2989 | |
2990 /* We cannot add true routes via loopback here, 2991 they would result in kernel looping; promote them to reject routes 2992 */ 2993 if ((cfg->fc_flags & RTF_REJECT) || 2994 (dev && (dev->flags & IFF_LOOPBACK) && 2995 !(addr_type & IPV6_ADDR_LOOPBACK) && 2996 !(cfg->fc_flags & RTF_LOCAL))) { 2997 /* hold loopback dev/idev if we haven't done so. */ --- 5 unchanged lines hidden (view full) --- 3003 dev = net->loopback_dev; 3004 dev_hold(dev); 3005 idev = in6_dev_get(dev); 3006 if (!idev) { 3007 err = -ENODEV; 3008 goto out; 3009 } 3010 } | 2883 /* We cannot add true routes via loopback here, 2884 they would result in kernel looping; promote them to reject routes 2885 */ 2886 if ((cfg->fc_flags & RTF_REJECT) || 2887 (dev && (dev->flags & IFF_LOOPBACK) && 2888 !(addr_type & IPV6_ADDR_LOOPBACK) && 2889 !(cfg->fc_flags & RTF_LOCAL))) { 2890 /* hold loopback dev/idev if we haven't done so. */ --- 5 unchanged lines hidden (view full) --- 2896 dev = net->loopback_dev; 2897 dev_hold(dev); 2898 idev = in6_dev_get(dev); 2899 if (!idev) { 2900 err = -ENODEV; 2901 goto out; 2902 } 2903 } |
3011 rt->fib6_flags = RTF_REJECT|RTF_NONEXTHOP; | 2904 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; 2905 switch (cfg->fc_type) { 2906 case RTN_BLACKHOLE: 2907 rt->dst.error = -EINVAL; 2908 rt->dst.output = dst_discard_out; 2909 rt->dst.input = dst_discard; 2910 break; 2911 case RTN_PROHIBIT: 2912 rt->dst.error = -EACCES; 2913 rt->dst.output = ip6_pkt_prohibit_out; 2914 rt->dst.input = ip6_pkt_prohibit; 2915 break; 2916 case RTN_THROW: 2917 case RTN_UNREACHABLE: 2918 default: 2919 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN 2920 : (cfg->fc_type == RTN_UNREACHABLE) 2921 ? -EHOSTUNREACH : -ENETUNREACH; 2922 rt->dst.output = ip6_pkt_discard_out; 2923 rt->dst.input = ip6_pkt_discard; 2924 break; 2925 } |
3012 goto install_route; 3013 } 3014 3015 if (cfg->fc_flags & RTF_GATEWAY) { 3016 err = ip6_validate_gw(net, cfg, &dev, &idev, extack); 3017 if (err) 3018 goto out; 3019 | 2926 goto install_route; 2927 } 2928 2929 if (cfg->fc_flags & RTF_GATEWAY) { 2930 err = ip6_validate_gw(net, cfg, &dev, &idev, extack); 2931 if (err) 2932 goto out; 2933 |
3020 rt->fib6_nh.nh_gw = cfg->fc_gateway; | 2934 rt->rt6i_gateway = cfg->fc_gateway; |
3021 } 3022 3023 err = -ENODEV; 3024 if (!dev) 3025 goto out; 3026 3027 if (idev->cnf.disable_ipv6) { 3028 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device"); --- 8 unchanged lines hidden (view full) --- 3037 } 3038 3039 if (!ipv6_addr_any(&cfg->fc_prefsrc)) { 3040 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) { 3041 NL_SET_ERR_MSG(extack, "Invalid source address"); 3042 err = -EINVAL; 3043 goto out; 3044 } | 2935 } 2936 2937 err = -ENODEV; 2938 if (!dev) 2939 goto out; 2940 2941 if (idev->cnf.disable_ipv6) { 2942 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device"); --- 8 unchanged lines hidden (view full) --- 2951 } 2952 2953 if (!ipv6_addr_any(&cfg->fc_prefsrc)) { 2954 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) { 2955 NL_SET_ERR_MSG(extack, "Invalid source address"); 2956 err = -EINVAL; 2957 goto out; 2958 } |
3045 rt->fib6_prefsrc.addr = cfg->fc_prefsrc; 3046 rt->fib6_prefsrc.plen = 128; | 2959 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc; 2960 rt->rt6i_prefsrc.plen = 128; |
3047 } else | 2961 } else |
3048 rt->fib6_prefsrc.plen = 0; | 2962 rt->rt6i_prefsrc.plen = 0; |
3049 | 2963 |
3050 rt->fib6_flags = cfg->fc_flags; | 2964 rt->rt6i_flags = cfg->fc_flags; |
3051 3052install_route: | 2965 2966install_route: |
3053 if (!(rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) && | 2967 if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) && |
3054 !netif_carrier_ok(dev)) | 2968 !netif_carrier_ok(dev)) |
3055 rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN; 3056 rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK); 3057 rt->fib6_nh.nh_dev = dev; 3058 rt->fib6_table = table; | 2969 rt->rt6i_nh_flags |= RTNH_F_LINKDOWN; 2970 rt->rt6i_nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK); 2971 rt->dst.dev = dev; 2972 rt->rt6i_idev = idev; 2973 rt->rt6i_table = table; |
3059 3060 cfg->fc_nlinfo.nl_net = dev_net(dev); 3061 | 2974 2975 cfg->fc_nlinfo.nl_net = dev_net(dev); 2976 |
3062 if (idev) 3063 in6_dev_put(idev); 3064 | |
3065 return rt; 3066out: 3067 if (dev) 3068 dev_put(dev); 3069 if (idev) 3070 in6_dev_put(idev); | 2977 return rt; 2978out: 2979 if (dev) 2980 dev_put(dev); 2981 if (idev) 2982 in6_dev_put(idev); |
2983 if (rt) 2984 dst_release_immediate(&rt->dst); |
|
3071 | 2985 |
3072 fib6_info_release(rt); | |
3073 return ERR_PTR(err); 3074} 3075 | 2986 return ERR_PTR(err); 2987} 2988 |
3076int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, | 2989int ip6_route_add(struct fib6_config *cfg, |
3077 struct netlink_ext_ack *extack) 3078{ | 2990 struct netlink_ext_ack *extack) 2991{ |
3079 struct fib6_info *rt; | 2992 struct mx6_config mxc = { .mx = NULL, }; 2993 struct rt6_info *rt; |
3080 int err; 3081 | 2994 int err; 2995 |
3082 rt = ip6_route_info_create(cfg, gfp_flags, extack); 3083 if (IS_ERR(rt)) 3084 return PTR_ERR(rt); | 2996 rt = ip6_route_info_create(cfg, extack); 2997 if (IS_ERR(rt)) { 2998 err = PTR_ERR(rt); 2999 rt = NULL; 3000 goto out; 3001 } |
3085 | 3002 |
3086 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack); 3087 fib6_info_release(rt); | 3003 err = ip6_convert_metrics(&mxc, cfg); 3004 if (err) 3005 goto out; |
3088 | 3006 |
3007 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack); 3008 3009 kfree(mxc.mx); 3010 |
|
3089 return err; | 3011 return err; |
3012out: 3013 if (rt) 3014 dst_release_immediate(&rt->dst); 3015 3016 return err; |
|
3090} 3091 | 3017} 3018 |
3092static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info) | 3019static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) |
3093{ | 3020{ |
3094 struct net *net = info->nl_net; 3095 struct fib6_table *table; | |
3096 int err; | 3021 int err; |
3022 struct fib6_table *table; 3023 struct net *net = dev_net(rt->dst.dev); |
|
3097 | 3024 |
3098 if (rt == net->ipv6.fib6_null_entry) { | 3025 if (rt == net->ipv6.ip6_null_entry) { |
3099 err = -ENOENT; 3100 goto out; 3101 } 3102 | 3026 err = -ENOENT; 3027 goto out; 3028 } 3029 |
3103 table = rt->fib6_table; | 3030 table = rt->rt6i_table; |
3104 spin_lock_bh(&table->tb6_lock); 3105 err = fib6_del(rt, info); 3106 spin_unlock_bh(&table->tb6_lock); 3107 3108out: | 3031 spin_lock_bh(&table->tb6_lock); 3032 err = fib6_del(rt, info); 3033 spin_unlock_bh(&table->tb6_lock); 3034 3035out: |
3109 fib6_info_release(rt); | 3036 ip6_rt_put(rt); |
3110 return err; 3111} 3112 | 3037 return err; 3038} 3039 |
3113int ip6_del_rt(struct net *net, struct fib6_info *rt) | 3040int ip6_del_rt(struct rt6_info *rt) |
3114{ | 3041{ |
3115 struct nl_info info = { .nl_net = net }; 3116 | 3042 struct nl_info info = { 3043 .nl_net = dev_net(rt->dst.dev), 3044 }; |
3117 return __ip6_del_rt(rt, &info); 3118} 3119 | 3045 return __ip6_del_rt(rt, &info); 3046} 3047 |
3120static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg) | 3048static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) |
3121{ 3122 struct nl_info *info = &cfg->fc_nlinfo; 3123 struct net *net = info->nl_net; 3124 struct sk_buff *skb = NULL; 3125 struct fib6_table *table; 3126 int err = -ENOENT; 3127 | 3049{ 3050 struct nl_info *info = &cfg->fc_nlinfo; 3051 struct net *net = info->nl_net; 3052 struct sk_buff *skb = NULL; 3053 struct fib6_table *table; 3054 int err = -ENOENT; 3055 |
3128 if (rt == net->ipv6.fib6_null_entry) | 3056 if (rt == net->ipv6.ip6_null_entry) |
3129 goto out_put; | 3057 goto out_put; |
3130 table = rt->fib6_table; | 3058 table = rt->rt6i_table; |
3131 spin_lock_bh(&table->tb6_lock); 3132 | 3059 spin_lock_bh(&table->tb6_lock); 3060 |
3133 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) { 3134 struct fib6_info *sibling, *next_sibling; | 3061 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) { 3062 struct rt6_info *sibling, *next_sibling; |
3135 3136 /* prefer to send a single notification with all hops */ 3137 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 3138 if (skb) { 3139 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 3140 | 3063 3064 /* prefer to send a single notification with all hops */ 3065 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 3066 if (skb) { 3067 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 3068 |
3141 if (rt6_fill_node(net, skb, rt, NULL, | 3069 if (rt6_fill_node(net, skb, rt, |
3142 NULL, NULL, 0, RTM_DELROUTE, 3143 info->portid, seq, 0) < 0) { 3144 kfree_skb(skb); 3145 skb = NULL; 3146 } else 3147 info->skip_notify = 1; 3148 } 3149 3150 list_for_each_entry_safe(sibling, next_sibling, | 3070 NULL, NULL, 0, RTM_DELROUTE, 3071 info->portid, seq, 0) < 0) { 3072 kfree_skb(skb); 3073 skb = NULL; 3074 } else 3075 info->skip_notify = 1; 3076 } 3077 3078 list_for_each_entry_safe(sibling, next_sibling, |
3151 &rt->fib6_siblings, 3152 fib6_siblings) { | 3079 &rt->rt6i_siblings, 3080 rt6i_siblings) { |
3153 err = fib6_del(sibling, info); 3154 if (err) 3155 goto out_unlock; 3156 } 3157 } 3158 3159 err = fib6_del(rt, info); 3160out_unlock: 3161 spin_unlock_bh(&table->tb6_lock); 3162out_put: | 3081 err = fib6_del(sibling, info); 3082 if (err) 3083 goto out_unlock; 3084 } 3085 } 3086 3087 err = fib6_del(rt, info); 3088out_unlock: 3089 spin_unlock_bh(&table->tb6_lock); 3090out_put: |
3163 fib6_info_release(rt); | 3091 ip6_rt_put(rt); |
3164 3165 if (skb) { 3166 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 3167 info->nlh, gfp_any()); 3168 } 3169 return err; 3170} 3171 | 3092 3093 if (skb) { 3094 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 3095 info->nlh, gfp_any()); 3096 } 3097 return err; 3098} 3099 |
3172static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg) 3173{ 3174 int rc = -ESRCH; 3175 3176 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex) 3177 goto out; 3178 3179 if (cfg->fc_flags & RTF_GATEWAY && 3180 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) 3181 goto out; 3182 if (dst_hold_safe(&rt->dst)) 3183 rc = rt6_remove_exception_rt(rt); 3184out: 3185 return rc; 3186} 3187 | |
3188static int ip6_route_del(struct fib6_config *cfg, 3189 struct netlink_ext_ack *extack) 3190{ | 3100static int ip6_route_del(struct fib6_config *cfg, 3101 struct netlink_ext_ack *extack) 3102{ |
3191 struct rt6_info *rt_cache; | 3103 struct rt6_info *rt, *rt_cache; |
3192 struct fib6_table *table; | 3104 struct fib6_table *table; |
3193 struct fib6_info *rt; | |
3194 struct fib6_node *fn; 3195 int err = -ESRCH; 3196 3197 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); 3198 if (!table) { 3199 NL_SET_ERR_MSG(extack, "FIB table does not exist"); 3200 return err; 3201 } 3202 3203 rcu_read_lock(); 3204 3205 fn = fib6_locate(&table->tb6_root, 3206 &cfg->fc_dst, cfg->fc_dst_len, 3207 &cfg->fc_src, cfg->fc_src_len, 3208 !(cfg->fc_flags & RTF_CACHE)); 3209 3210 if (fn) { 3211 for_each_fib6_node_rt_rcu(fn) { 3212 if (cfg->fc_flags & RTF_CACHE) { | 3105 struct fib6_node *fn; 3106 int err = -ESRCH; 3107 3108 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); 3109 if (!table) { 3110 NL_SET_ERR_MSG(extack, "FIB table does not exist"); 3111 return err; 3112 } 3113 3114 rcu_read_lock(); 3115 3116 fn = fib6_locate(&table->tb6_root, 3117 &cfg->fc_dst, cfg->fc_dst_len, 3118 &cfg->fc_src, cfg->fc_src_len, 3119 !(cfg->fc_flags & RTF_CACHE)); 3120 3121 if (fn) { 3122 for_each_fib6_node_rt_rcu(fn) { 3123 if (cfg->fc_flags & RTF_CACHE) { |
3213 int rc; 3214 | |
3215 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst, 3216 &cfg->fc_src); | 3124 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst, 3125 &cfg->fc_src); |
3217 if (rt_cache) { 3218 rc = ip6_del_cached_rt(rt_cache, cfg); 3219 if (rc != -ESRCH) 3220 return rc; 3221 } 3222 continue; | 3126 if (!rt_cache) 3127 continue; 3128 rt = rt_cache; |
3223 } 3224 if (cfg->fc_ifindex && | 3129 } 3130 if (cfg->fc_ifindex && |
3225 (!rt->fib6_nh.nh_dev || 3226 rt->fib6_nh.nh_dev->ifindex != cfg->fc_ifindex)) | 3131 (!rt->dst.dev || 3132 rt->dst.dev->ifindex != cfg->fc_ifindex)) |
3227 continue; 3228 if (cfg->fc_flags & RTF_GATEWAY && | 3133 continue; 3134 if (cfg->fc_flags & RTF_GATEWAY && |
3229 !ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw)) | 3135 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) |
3230 continue; | 3136 continue; |
3231 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric) | 3137 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) |
3232 continue; | 3138 continue; |
3233 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol) | 3139 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol) |
3234 continue; | 3140 continue; |
3235 fib6_info_hold(rt); | 3141 if (!dst_hold_safe(&rt->dst)) 3142 break; |
3236 rcu_read_unlock(); 3237 3238 /* if gateway was specified only delete the one hop */ 3239 if (cfg->fc_flags & RTF_GATEWAY) 3240 return __ip6_del_rt(rt, &cfg->fc_nlinfo); 3241 3242 return __ip6_del_rt_siblings(rt, cfg); 3243 } --- 5 unchanged lines hidden (view full) --- 3249 3250static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) 3251{ 3252 struct netevent_redirect netevent; 3253 struct rt6_info *rt, *nrt = NULL; 3254 struct ndisc_options ndopts; 3255 struct inet6_dev *in6_dev; 3256 struct neighbour *neigh; | 3143 rcu_read_unlock(); 3144 3145 /* if gateway was specified only delete the one hop */ 3146 if (cfg->fc_flags & RTF_GATEWAY) 3147 return __ip6_del_rt(rt, &cfg->fc_nlinfo); 3148 3149 return __ip6_del_rt_siblings(rt, cfg); 3150 } --- 5 unchanged lines hidden (view full) --- 3156 3157static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) 3158{ 3159 struct netevent_redirect netevent; 3160 struct rt6_info *rt, *nrt = NULL; 3161 struct ndisc_options ndopts; 3162 struct inet6_dev *in6_dev; 3163 struct neighbour *neigh; |
3257 struct fib6_info *from; | |
3258 struct rd_msg *msg; 3259 int optlen, on_link; 3260 u8 *lladdr; 3261 3262 optlen = skb_tail_pointer(skb) - skb_transport_header(skb); 3263 optlen -= sizeof(*msg); 3264 3265 if (optlen < 0) { --- 65 unchanged lines hidden (view full) --- 3331 3332 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, 3333 NEIGH_UPDATE_F_WEAK_OVERRIDE| 3334 NEIGH_UPDATE_F_OVERRIDE| 3335 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| 3336 NEIGH_UPDATE_F_ISROUTER)), 3337 NDISC_REDIRECT, &ndopts); 3338 | 3164 struct rd_msg *msg; 3165 int optlen, on_link; 3166 u8 *lladdr; 3167 3168 optlen = skb_tail_pointer(skb) - skb_transport_header(skb); 3169 optlen -= sizeof(*msg); 3170 3171 if (optlen < 0) { --- 65 unchanged lines hidden (view full) --- 3237 3238 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, 3239 NEIGH_UPDATE_F_WEAK_OVERRIDE| 3240 NEIGH_UPDATE_F_OVERRIDE| 3241 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| 3242 NEIGH_UPDATE_F_ISROUTER)), 3243 NDISC_REDIRECT, &ndopts); 3244 |
3339 rcu_read_lock(); 3340 from = rcu_dereference(rt->from); 3341 nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL); 3342 rcu_read_unlock(); | 3245 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL); |
3343 if (!nrt) 3344 goto out; 3345 3346 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE; 3347 if (on_link) 3348 nrt->rt6i_flags &= ~RTF_GATEWAY; 3349 | 3246 if (!nrt) 3247 goto out; 3248 3249 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE; 3250 if (on_link) 3251 nrt->rt6i_flags &= ~RTF_GATEWAY; 3252 |
3253 nrt->rt6i_protocol = RTPROT_REDIRECT; |
|
3350 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 3351 3352 /* No need to remove rt from the exception table if rt is 3353 * a cached route because rt6_insert_exception() will 3354 * takes care of it 3355 */ | 3254 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 3255 3256 /* No need to remove rt from the exception table if rt is 3257 * a cached route because rt6_insert_exception() will 3258 * takes care of it 3259 */ |
3356 if (rt6_insert_exception(nrt, rt->from)) { | 3260 if (rt6_insert_exception(nrt, rt)) { |
3357 dst_release_immediate(&nrt->dst); 3358 goto out; 3359 } 3360 3361 netevent.old = &rt->dst; 3362 netevent.new = &nrt->dst; 3363 netevent.daddr = &msg->dest; 3364 netevent.neigh = neigh; 3365 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 3366 3367out: 3368 neigh_release(neigh); 3369} 3370 | 3261 dst_release_immediate(&nrt->dst); 3262 goto out; 3263 } 3264 3265 netevent.old = &rt->dst; 3266 netevent.new = &nrt->dst; 3267 netevent.daddr = &msg->dest; 3268 netevent.neigh = neigh; 3269 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 3270 3271out: 3272 neigh_release(neigh); 3273} 3274 |
3275/* 3276 * Misc support functions 3277 */ 3278 3279static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) 3280{ 3281 BUG_ON(from->from); 3282 3283 rt->rt6i_flags &= ~RTF_EXPIRES; 3284 dst_hold(&from->dst); 3285 rt->from = from; 3286 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true); 3287} 3288 3289static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) 3290{ 3291 rt->dst.input = ort->dst.input; 3292 rt->dst.output = ort->dst.output; 3293 rt->rt6i_dst = ort->rt6i_dst; 3294 rt->dst.error = ort->dst.error; 3295 rt->rt6i_idev = ort->rt6i_idev; 3296 if (rt->rt6i_idev) 3297 in6_dev_hold(rt->rt6i_idev); 3298 rt->dst.lastuse = jiffies; 3299 rt->rt6i_gateway = ort->rt6i_gateway; 3300 rt->rt6i_flags = ort->rt6i_flags; 3301 rt6_set_from(rt, ort); 3302 rt->rt6i_metric = ort->rt6i_metric; 3303#ifdef CONFIG_IPV6_SUBTREES 3304 rt->rt6i_src = ort->rt6i_src; 3305#endif 3306 rt->rt6i_prefsrc = ort->rt6i_prefsrc; 3307 rt->rt6i_table = ort->rt6i_table; 3308 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate); 3309} 3310 |
|
3371#ifdef CONFIG_IPV6_ROUTE_INFO | 3311#ifdef CONFIG_IPV6_ROUTE_INFO |
3372static struct fib6_info *rt6_get_route_info(struct net *net, | 3312static struct rt6_info *rt6_get_route_info(struct net *net, |
3373 const struct in6_addr *prefix, int prefixlen, 3374 const struct in6_addr *gwaddr, 3375 struct net_device *dev) 3376{ 3377 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; 3378 int ifindex = dev->ifindex; 3379 struct fib6_node *fn; | 3313 const struct in6_addr *prefix, int prefixlen, 3314 const struct in6_addr *gwaddr, 3315 struct net_device *dev) 3316{ 3317 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; 3318 int ifindex = dev->ifindex; 3319 struct fib6_node *fn; |
3380 struct fib6_info *rt = NULL; | 3320 struct rt6_info *rt = NULL; |
3381 struct fib6_table *table; 3382 3383 table = fib6_get_table(net, tb_id); 3384 if (!table) 3385 return NULL; 3386 3387 rcu_read_lock(); 3388 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true); 3389 if (!fn) 3390 goto out; 3391 3392 for_each_fib6_node_rt_rcu(fn) { | 3321 struct fib6_table *table; 3322 3323 table = fib6_get_table(net, tb_id); 3324 if (!table) 3325 return NULL; 3326 3327 rcu_read_lock(); 3328 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true); 3329 if (!fn) 3330 goto out; 3331 3332 for_each_fib6_node_rt_rcu(fn) { |
3393 if (rt->fib6_nh.nh_dev->ifindex != ifindex) | 3333 if (rt->dst.dev->ifindex != ifindex) |
3394 continue; | 3334 continue; |
3395 if ((rt->fib6_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) | 3335 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) |
3396 continue; | 3336 continue; |
3397 if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) | 3337 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) |
3398 continue; | 3338 continue; |
3399 fib6_info_hold(rt); | 3339 ip6_hold_safe(NULL, &rt, false); |
3400 break; 3401 } 3402out: 3403 rcu_read_unlock(); 3404 return rt; 3405} 3406 | 3340 break; 3341 } 3342out: 3343 rcu_read_unlock(); 3344 return rt; 3345} 3346 |
3407static struct fib6_info *rt6_add_route_info(struct net *net, | 3347static struct rt6_info *rt6_add_route_info(struct net *net, |
3408 const struct in6_addr *prefix, int prefixlen, 3409 const struct in6_addr *gwaddr, 3410 struct net_device *dev, 3411 unsigned int pref) 3412{ 3413 struct fib6_config cfg = { 3414 .fc_metric = IP6_RT_PRIO_USER, 3415 .fc_ifindex = dev->ifindex, 3416 .fc_dst_len = prefixlen, 3417 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 3418 RTF_UP | RTF_PREF(pref), 3419 .fc_protocol = RTPROT_RA, | 3348 const struct in6_addr *prefix, int prefixlen, 3349 const struct in6_addr *gwaddr, 3350 struct net_device *dev, 3351 unsigned int pref) 3352{ 3353 struct fib6_config cfg = { 3354 .fc_metric = IP6_RT_PRIO_USER, 3355 .fc_ifindex = dev->ifindex, 3356 .fc_dst_len = prefixlen, 3357 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 3358 RTF_UP | RTF_PREF(pref), 3359 .fc_protocol = RTPROT_RA, |
3420 .fc_type = RTN_UNICAST, | |
3421 .fc_nlinfo.portid = 0, 3422 .fc_nlinfo.nlh = NULL, 3423 .fc_nlinfo.nl_net = net, 3424 }; 3425 3426 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO, 3427 cfg.fc_dst = *prefix; 3428 cfg.fc_gateway = *gwaddr; 3429 3430 /* We should treat it as a default route if prefix length is 0. */ 3431 if (!prefixlen) 3432 cfg.fc_flags |= RTF_DEFAULT; 3433 | 3360 .fc_nlinfo.portid = 0, 3361 .fc_nlinfo.nlh = NULL, 3362 .fc_nlinfo.nl_net = net, 3363 }; 3364 3365 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO, 3366 cfg.fc_dst = *prefix; 3367 cfg.fc_gateway = *gwaddr; 3368 3369 /* We should treat it as a default route if prefix length is 0. */ 3370 if (!prefixlen) 3371 cfg.fc_flags |= RTF_DEFAULT; 3372 |
3434 ip6_route_add(&cfg, GFP_ATOMIC, NULL); | 3373 ip6_route_add(&cfg, NULL); |
3435 3436 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); 3437} 3438#endif 3439 | 3374 3375 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); 3376} 3377#endif 3378 |
3440struct fib6_info *rt6_get_dflt_router(struct net *net, 3441 const struct in6_addr *addr, 3442 struct net_device *dev) | 3379struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev) |
3443{ 3444 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; | 3380{ 3381 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; |
3445 struct fib6_info *rt; | 3382 struct rt6_info *rt; |
3446 struct fib6_table *table; 3447 | 3383 struct fib6_table *table; 3384 |
3448 table = fib6_get_table(net, tb_id); | 3385 table = fib6_get_table(dev_net(dev), tb_id); |
3449 if (!table) 3450 return NULL; 3451 3452 rcu_read_lock(); 3453 for_each_fib6_node_rt_rcu(&table->tb6_root) { | 3386 if (!table) 3387 return NULL; 3388 3389 rcu_read_lock(); 3390 for_each_fib6_node_rt_rcu(&table->tb6_root) { |
3454 if (dev == rt->fib6_nh.nh_dev && 3455 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && 3456 ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) | 3391 if (dev == rt->dst.dev && 3392 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && 3393 ipv6_addr_equal(&rt->rt6i_gateway, addr)) |
3457 break; 3458 } 3459 if (rt) | 3394 break; 3395 } 3396 if (rt) |
3460 fib6_info_hold(rt); | 3397 ip6_hold_safe(NULL, &rt, false); |
3461 rcu_read_unlock(); 3462 return rt; 3463} 3464 | 3398 rcu_read_unlock(); 3399 return rt; 3400} 3401 |
3465struct fib6_info *rt6_add_dflt_router(struct net *net, 3466 const struct in6_addr *gwaddr, | 3402struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, |
3467 struct net_device *dev, 3468 unsigned int pref) 3469{ 3470 struct fib6_config cfg = { 3471 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT, 3472 .fc_metric = IP6_RT_PRIO_USER, 3473 .fc_ifindex = dev->ifindex, 3474 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 3475 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 3476 .fc_protocol = RTPROT_RA, | 3403 struct net_device *dev, 3404 unsigned int pref) 3405{ 3406 struct fib6_config cfg = { 3407 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT, 3408 .fc_metric = IP6_RT_PRIO_USER, 3409 .fc_ifindex = dev->ifindex, 3410 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 3411 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 3412 .fc_protocol = RTPROT_RA, |
3477 .fc_type = RTN_UNICAST, | |
3478 .fc_nlinfo.portid = 0, 3479 .fc_nlinfo.nlh = NULL, | 3413 .fc_nlinfo.portid = 0, 3414 .fc_nlinfo.nlh = NULL, |
3480 .fc_nlinfo.nl_net = net, | 3415 .fc_nlinfo.nl_net = dev_net(dev), |
3481 }; 3482 3483 cfg.fc_gateway = *gwaddr; 3484 | 3416 }; 3417 3418 cfg.fc_gateway = *gwaddr; 3419 |
3485 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) { | 3420 if (!ip6_route_add(&cfg, NULL)) { |
3486 struct fib6_table *table; 3487 3488 table = fib6_get_table(dev_net(dev), cfg.fc_table); 3489 if (table) 3490 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; 3491 } 3492 | 3421 struct fib6_table *table; 3422 3423 table = fib6_get_table(dev_net(dev), cfg.fc_table); 3424 if (table) 3425 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; 3426 } 3427 |
3493 return rt6_get_dflt_router(net, gwaddr, dev); | 3428 return rt6_get_dflt_router(gwaddr, dev); |
3494} 3495 | 3429} 3430 |
3496static void __rt6_purge_dflt_routers(struct net *net, 3497 struct fib6_table *table) | 3431static void __rt6_purge_dflt_routers(struct fib6_table *table) |
3498{ | 3432{ |
3499 struct fib6_info *rt; | 3433 struct rt6_info *rt; |
3500 3501restart: 3502 rcu_read_lock(); 3503 for_each_fib6_node_rt_rcu(&table->tb6_root) { | 3434 3435restart: 3436 rcu_read_lock(); 3437 for_each_fib6_node_rt_rcu(&table->tb6_root) { |
3504 struct net_device *dev = fib6_info_nh_dev(rt); 3505 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; 3506 3507 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && 3508 (!idev || idev->cnf.accept_ra != 2)) { 3509 fib6_info_hold(rt); 3510 rcu_read_unlock(); 3511 ip6_del_rt(net, rt); | 3438 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && 3439 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { 3440 if (dst_hold_safe(&rt->dst)) { 3441 rcu_read_unlock(); 3442 ip6_del_rt(rt); 3443 } else { 3444 rcu_read_unlock(); 3445 } |
3512 goto restart; 3513 } 3514 } 3515 rcu_read_unlock(); 3516 3517 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; 3518} 3519 --- 4 unchanged lines hidden (view full) --- 3524 unsigned int h; 3525 3526 rcu_read_lock(); 3527 3528 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 3529 head = &net->ipv6.fib_table_hash[h]; 3530 hlist_for_each_entry_rcu(table, head, tb6_hlist) { 3531 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) | 3446 goto restart; 3447 } 3448 } 3449 rcu_read_unlock(); 3450 3451 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; 3452} 3453 --- 4 unchanged lines hidden (view full) --- 3458 unsigned int h; 3459 3460 rcu_read_lock(); 3461 3462 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 3463 head = &net->ipv6.fib_table_hash[h]; 3464 hlist_for_each_entry_rcu(table, head, tb6_hlist) { 3465 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) |
3532 __rt6_purge_dflt_routers(net, table); | 3466 __rt6_purge_dflt_routers(table); |
3533 } 3534 } 3535 3536 rcu_read_unlock(); 3537} 3538 3539static void rtmsg_to_fib6_config(struct net *net, 3540 struct in6_rtmsg *rtmsg, --- 4 unchanged lines hidden (view full) --- 3545 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? 3546 : RT6_TABLE_MAIN; 3547 cfg->fc_ifindex = rtmsg->rtmsg_ifindex; 3548 cfg->fc_metric = rtmsg->rtmsg_metric; 3549 cfg->fc_expires = rtmsg->rtmsg_info; 3550 cfg->fc_dst_len = rtmsg->rtmsg_dst_len; 3551 cfg->fc_src_len = rtmsg->rtmsg_src_len; 3552 cfg->fc_flags = rtmsg->rtmsg_flags; | 3467 } 3468 } 3469 3470 rcu_read_unlock(); 3471} 3472 3473static void rtmsg_to_fib6_config(struct net *net, 3474 struct in6_rtmsg *rtmsg, --- 4 unchanged lines hidden (view full) --- 3479 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? 3480 : RT6_TABLE_MAIN; 3481 cfg->fc_ifindex = rtmsg->rtmsg_ifindex; 3482 cfg->fc_metric = rtmsg->rtmsg_metric; 3483 cfg->fc_expires = rtmsg->rtmsg_info; 3484 cfg->fc_dst_len = rtmsg->rtmsg_dst_len; 3485 cfg->fc_src_len = rtmsg->rtmsg_src_len; 3486 cfg->fc_flags = rtmsg->rtmsg_flags; |
3553 cfg->fc_type = rtmsg->rtmsg_type; | |
3554 3555 cfg->fc_nlinfo.nl_net = net; 3556 3557 cfg->fc_dst = rtmsg->rtmsg_dst; 3558 cfg->fc_src = rtmsg->rtmsg_src; 3559 cfg->fc_gateway = rtmsg->rtmsg_gateway; 3560} 3561 --- 13 unchanged lines hidden (view full) --- 3575 if (err) 3576 return -EFAULT; 3577 3578 rtmsg_to_fib6_config(net, &rtmsg, &cfg); 3579 3580 rtnl_lock(); 3581 switch (cmd) { 3582 case SIOCADDRT: | 3487 3488 cfg->fc_nlinfo.nl_net = net; 3489 3490 cfg->fc_dst = rtmsg->rtmsg_dst; 3491 cfg->fc_src = rtmsg->rtmsg_src; 3492 cfg->fc_gateway = rtmsg->rtmsg_gateway; 3493} 3494 --- 13 unchanged lines hidden (view full) --- 3508 if (err) 3509 return -EFAULT; 3510 3511 rtmsg_to_fib6_config(net, &rtmsg, &cfg); 3512 3513 rtnl_lock(); 3514 switch (cmd) { 3515 case SIOCADDRT: |
3583 err = ip6_route_add(&cfg, GFP_KERNEL, NULL); | 3516 err = ip6_route_add(&cfg, NULL); |
3584 break; 3585 case SIOCDELRT: 3586 err = ip6_route_del(&cfg, NULL); 3587 break; 3588 default: 3589 err = -EINVAL; 3590 } 3591 rtnl_unlock(); --- 11 unchanged lines hidden (view full) --- 3603static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) 3604{ 3605 int type; 3606 struct dst_entry *dst = skb_dst(skb); 3607 switch (ipstats_mib_noroutes) { 3608 case IPSTATS_MIB_INNOROUTES: 3609 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); 3610 if (type == IPV6_ADDR_ANY) { | 3517 break; 3518 case SIOCDELRT: 3519 err = ip6_route_del(&cfg, NULL); 3520 break; 3521 default: 3522 err = -EINVAL; 3523 } 3524 rtnl_unlock(); --- 11 unchanged lines hidden (view full) --- 3536static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) 3537{ 3538 int type; 3539 struct dst_entry *dst = skb_dst(skb); 3540 switch (ipstats_mib_noroutes) { 3541 case IPSTATS_MIB_INNOROUTES: 3542 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); 3543 if (type == IPV6_ADDR_ANY) { |
3611 IP6_INC_STATS(dev_net(dst->dev), 3612 __in6_dev_get_safely(skb->dev), | 3544 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), |
3613 IPSTATS_MIB_INADDRERRORS); 3614 break; 3615 } 3616 /* FALLTHROUGH */ 3617 case IPSTATS_MIB_OUTNOROUTES: 3618 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), 3619 ipstats_mib_noroutes); 3620 break; --- 24 unchanged lines hidden (view full) --- 3645 skb->dev = skb_dst(skb)->dev; 3646 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); 3647} 3648 3649/* 3650 * Allocate a dst for local (unicast / anycast) address. 3651 */ 3652 | 3545 IPSTATS_MIB_INADDRERRORS); 3546 break; 3547 } 3548 /* FALLTHROUGH */ 3549 case IPSTATS_MIB_OUTNOROUTES: 3550 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), 3551 ipstats_mib_noroutes); 3552 break; --- 24 unchanged lines hidden (view full) --- 3577 skb->dev = skb_dst(skb)->dev; 3578 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); 3579} 3580 3581/* 3582 * Allocate a dst for local (unicast / anycast) address. 3583 */ 3584 |
3653struct fib6_info *addrconf_f6i_alloc(struct net *net, 3654 struct inet6_dev *idev, 3655 const struct in6_addr *addr, 3656 bool anycast, gfp_t gfp_flags) | 3585struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, 3586 const struct in6_addr *addr, 3587 bool anycast) |
3657{ 3658 u32 tb_id; | 3588{ 3589 u32 tb_id; |
3590 struct net *net = dev_net(idev->dev); |
|
3659 struct net_device *dev = idev->dev; | 3591 struct net_device *dev = idev->dev; |
3660 struct fib6_info *f6i; | 3592 struct rt6_info *rt; |
3661 | 3593 |
3662 f6i = fib6_info_alloc(gfp_flags); 3663 if (!f6i) | 3594 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT); 3595 if (!rt) |
3664 return ERR_PTR(-ENOMEM); 3665 | 3596 return ERR_PTR(-ENOMEM); 3597 |
3666 f6i->dst_nocount = true; 3667 f6i->dst_host = true; 3668 f6i->fib6_protocol = RTPROT_KERNEL; 3669 f6i->fib6_flags = RTF_UP | RTF_NONEXTHOP; 3670 if (anycast) { 3671 f6i->fib6_type = RTN_ANYCAST; 3672 f6i->fib6_flags |= RTF_ANYCAST; 3673 } else { 3674 f6i->fib6_type = RTN_LOCAL; 3675 f6i->fib6_flags |= RTF_LOCAL; 3676 } | 3598 in6_dev_hold(idev); |
3677 | 3599 |
3678 f6i->fib6_nh.nh_gw = *addr; 3679 dev_hold(dev); 3680 f6i->fib6_nh.nh_dev = dev; 3681 f6i->fib6_dst.addr = *addr; 3682 f6i->fib6_dst.plen = 128; | 3600 rt->dst.flags |= DST_HOST; 3601 rt->dst.input = ip6_input; 3602 rt->dst.output = ip6_output; 3603 rt->rt6i_idev = idev; 3604 3605 rt->rt6i_protocol = RTPROT_KERNEL; 3606 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; 3607 if (anycast) 3608 rt->rt6i_flags |= RTF_ANYCAST; 3609 else 3610 rt->rt6i_flags |= RTF_LOCAL; 3611 3612 rt->rt6i_gateway = *addr; 3613 rt->rt6i_dst.addr = *addr; 3614 rt->rt6i_dst.plen = 128; |
3683 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; | 3615 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; |
3684 f6i->fib6_table = fib6_get_table(net, tb_id); | 3616 rt->rt6i_table = fib6_get_table(net, tb_id); |
3685 | 3617 |
3686 return f6i; | 3618 return rt; |
3687} 3688 3689/* remove deleted ip from prefsrc entries */ 3690struct arg_dev_net_ip { 3691 struct net_device *dev; 3692 struct net *net; 3693 struct in6_addr *addr; 3694}; 3695 | 3619} 3620 3621/* remove deleted ip from prefsrc entries */ 3622struct arg_dev_net_ip { 3623 struct net_device *dev; 3624 struct net *net; 3625 struct in6_addr *addr; 3626}; 3627 |
3696static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg) | 3628static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg) |
3697{ 3698 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev; 3699 struct net *net = ((struct arg_dev_net_ip *)arg)->net; 3700 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; 3701 | 3629{ 3630 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev; 3631 struct net *net = ((struct arg_dev_net_ip *)arg)->net; 3632 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; 3633 |
3702 if (((void *)rt->fib6_nh.nh_dev == dev || !dev) && 3703 rt != net->ipv6.fib6_null_entry && 3704 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) { | 3634 if (((void *)rt->dst.dev == dev || !dev) && 3635 rt != net->ipv6.ip6_null_entry && 3636 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) { |
3705 spin_lock_bh(&rt6_exception_lock); 3706 /* remove prefsrc entry */ | 3637 spin_lock_bh(&rt6_exception_lock); 3638 /* remove prefsrc entry */ |
3707 rt->fib6_prefsrc.plen = 0; | 3639 rt->rt6i_prefsrc.plen = 0; |
3708 /* need to update cache as well */ 3709 rt6_exceptions_remove_prefsrc(rt); 3710 spin_unlock_bh(&rt6_exception_lock); 3711 } 3712 return 0; 3713} 3714 3715void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) --- 5 unchanged lines hidden (view full) --- 3721 .addr = &ifp->addr, 3722 }; 3723 fib6_clean_all(net, fib6_remove_prefsrc, &adni); 3724} 3725 3726#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY) 3727 3728/* Remove routers and update dst entries when gateway turn into host. */ | 3640 /* need to update cache as well */ 3641 rt6_exceptions_remove_prefsrc(rt); 3642 spin_unlock_bh(&rt6_exception_lock); 3643 } 3644 return 0; 3645} 3646 3647void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) --- 5 unchanged lines hidden (view full) --- 3653 .addr = &ifp->addr, 3654 }; 3655 fib6_clean_all(net, fib6_remove_prefsrc, &adni); 3656} 3657 3658#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY) 3659 3660/* Remove routers and update dst entries when gateway turn into host. */ |
3729static int fib6_clean_tohost(struct fib6_info *rt, void *arg) | 3661static int fib6_clean_tohost(struct rt6_info *rt, void *arg) |
3730{ 3731 struct in6_addr *gateway = (struct in6_addr *)arg; 3732 | 3662{ 3663 struct in6_addr *gateway = (struct in6_addr *)arg; 3664 |
3733 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) && 3734 ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) { | 3665 if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) && 3666 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) { |
3735 return -1; 3736 } 3737 3738 /* Further clean up cached routes in exception table. 3739 * This is needed because cached route may have a different 3740 * gateway than its 'parent' in the case of an ip redirect. 3741 */ 3742 rt6_exceptions_clean_tohost(rt, gateway); --- 9 unchanged lines hidden (view full) --- 3752struct arg_netdev_event { 3753 const struct net_device *dev; 3754 union { 3755 unsigned int nh_flags; 3756 unsigned long event; 3757 }; 3758}; 3759 | 3667 return -1; 3668 } 3669 3670 /* Further clean up cached routes in exception table. 3671 * This is needed because cached route may have a different 3672 * gateway than its 'parent' in the case of an ip redirect. 3673 */ 3674 rt6_exceptions_clean_tohost(rt, gateway); --- 9 unchanged lines hidden (view full) --- 3684struct arg_netdev_event { 3685 const struct net_device *dev; 3686 union { 3687 unsigned int nh_flags; 3688 unsigned long event; 3689 }; 3690}; 3691 |
3760static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt) | 3692static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt) |
3761{ | 3693{ |
3762 struct fib6_info *iter; | 3694 struct rt6_info *iter; |
3763 struct fib6_node *fn; 3764 | 3695 struct fib6_node *fn; 3696 |
3765 fn = rcu_dereference_protected(rt->fib6_node, 3766 lockdep_is_held(&rt->fib6_table->tb6_lock)); | 3697 fn = rcu_dereference_protected(rt->rt6i_node, 3698 lockdep_is_held(&rt->rt6i_table->tb6_lock)); |
3767 iter = rcu_dereference_protected(fn->leaf, | 3699 iter = rcu_dereference_protected(fn->leaf, |
3768 lockdep_is_held(&rt->fib6_table->tb6_lock)); | 3700 lockdep_is_held(&rt->rt6i_table->tb6_lock)); |
3769 while (iter) { | 3701 while (iter) { |
3770 if (iter->fib6_metric == rt->fib6_metric && | 3702 if (iter->rt6i_metric == rt->rt6i_metric && |
3771 rt6_qualify_for_ecmp(iter)) 3772 return iter; 3773 iter = rcu_dereference_protected(iter->rt6_next, | 3703 rt6_qualify_for_ecmp(iter)) 3704 return iter; 3705 iter = rcu_dereference_protected(iter->rt6_next, |
3774 lockdep_is_held(&rt->fib6_table->tb6_lock)); | 3706 lockdep_is_held(&rt->rt6i_table->tb6_lock)); |
3775 } 3776 3777 return NULL; 3778} 3779 | 3707 } 3708 3709 return NULL; 3710} 3711 |
3780static bool rt6_is_dead(const struct fib6_info *rt) | 3712static bool rt6_is_dead(const struct rt6_info *rt) |
3781{ | 3713{ |
3782 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD || 3783 (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN && 3784 fib6_ignore_linkdown(rt))) | 3714 if (rt->rt6i_nh_flags & RTNH_F_DEAD || 3715 (rt->rt6i_nh_flags & RTNH_F_LINKDOWN && 3716 rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) |
3785 return true; 3786 3787 return false; 3788} 3789 | 3717 return true; 3718 3719 return false; 3720} 3721 |
3790static int rt6_multipath_total_weight(const struct fib6_info *rt) | 3722static int rt6_multipath_total_weight(const struct rt6_info *rt) |
3791{ | 3723{ |
3792 struct fib6_info *iter; | 3724 struct rt6_info *iter; |
3793 int total = 0; 3794 3795 if (!rt6_is_dead(rt)) | 3725 int total = 0; 3726 3727 if (!rt6_is_dead(rt)) |
3796 total += rt->fib6_nh.nh_weight; | 3728 total += rt->rt6i_nh_weight; |
3797 | 3729 |
3798 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) { | 3730 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) { |
3799 if (!rt6_is_dead(iter)) | 3731 if (!rt6_is_dead(iter)) |
3800 total += iter->fib6_nh.nh_weight; | 3732 total += iter->rt6i_nh_weight; |
3801 } 3802 3803 return total; 3804} 3805 | 3733 } 3734 3735 return total; 3736} 3737 |
3806static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total) | 3738static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total) |
3807{ 3808 int upper_bound = -1; 3809 3810 if (!rt6_is_dead(rt)) { | 3739{ 3740 int upper_bound = -1; 3741 3742 if (!rt6_is_dead(rt)) { |
3811 *weight += rt->fib6_nh.nh_weight; | 3743 *weight += rt->rt6i_nh_weight; |
3812 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31, 3813 total) - 1; 3814 } | 3744 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31, 3745 total) - 1; 3746 } |
3815 atomic_set(&rt->fib6_nh.nh_upper_bound, upper_bound); | 3747 atomic_set(&rt->rt6i_nh_upper_bound, upper_bound); |
3816} 3817 | 3748} 3749 |
3818static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total) | 3750static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total) |
3819{ | 3751{ |
3820 struct fib6_info *iter; | 3752 struct rt6_info *iter; |
3821 int weight = 0; 3822 3823 rt6_upper_bound_set(rt, &weight, total); 3824 | 3753 int weight = 0; 3754 3755 rt6_upper_bound_set(rt, &weight, total); 3756 |
3825 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) | 3757 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) |
3826 rt6_upper_bound_set(iter, &weight, total); 3827} 3828 | 3758 rt6_upper_bound_set(iter, &weight, total); 3759} 3760 |
3829void rt6_multipath_rebalance(struct fib6_info *rt) | 3761void rt6_multipath_rebalance(struct rt6_info *rt) |
3830{ | 3762{ |
3831 struct fib6_info *first; | 3763 struct rt6_info *first; |
3832 int total; 3833 3834 /* In case the entire multipath route was marked for flushing, 3835 * then there is no need to rebalance upon the removal of every 3836 * sibling route. 3837 */ | 3764 int total; 3765 3766 /* In case the entire multipath route was marked for flushing, 3767 * then there is no need to rebalance upon the removal of every 3768 * sibling route. 3769 */ |
3838 if (!rt->fib6_nsiblings || rt->should_flush) | 3770 if (!rt->rt6i_nsiblings || rt->should_flush) |
3839 return; 3840 3841 /* During lookup routes are evaluated in order, so we need to 3842 * make sure upper bounds are assigned from the first sibling 3843 * onwards. 3844 */ 3845 first = rt6_multipath_first_sibling(rt); 3846 if (WARN_ON_ONCE(!first)) 3847 return; 3848 3849 total = rt6_multipath_total_weight(first); 3850 rt6_multipath_upper_bound_set(first, total); 3851} 3852 | 3771 return; 3772 3773 /* During lookup routes are evaluated in order, so we need to 3774 * make sure upper bounds are assigned from the first sibling 3775 * onwards. 3776 */ 3777 first = rt6_multipath_first_sibling(rt); 3778 if (WARN_ON_ONCE(!first)) 3779 return; 3780 3781 total = rt6_multipath_total_weight(first); 3782 rt6_multipath_upper_bound_set(first, total); 3783} 3784 |
3853static int fib6_ifup(struct fib6_info *rt, void *p_arg) | 3785static int fib6_ifup(struct rt6_info *rt, void *p_arg) |
3854{ 3855 const struct arg_netdev_event *arg = p_arg; | 3786{ 3787 const struct arg_netdev_event *arg = p_arg; |
3856 struct net *net = dev_net(arg->dev); | 3788 const struct net *net = dev_net(arg->dev); |
3857 | 3789 |
3858 if (rt != net->ipv6.fib6_null_entry && rt->fib6_nh.nh_dev == arg->dev) { 3859 rt->fib6_nh.nh_flags &= ~arg->nh_flags; 3860 fib6_update_sernum_upto_root(net, rt); | 3790 if (rt != net->ipv6.ip6_null_entry && rt->dst.dev == arg->dev) { 3791 rt->rt6i_nh_flags &= ~arg->nh_flags; 3792 fib6_update_sernum_upto_root(dev_net(rt->dst.dev), rt); |
3861 rt6_multipath_rebalance(rt); 3862 } 3863 3864 return 0; 3865} 3866 3867void rt6_sync_up(struct net_device *dev, unsigned int nh_flags) 3868{ --- 5 unchanged lines hidden (view full) --- 3874 }; 3875 3876 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev)) 3877 arg.nh_flags |= RTNH_F_LINKDOWN; 3878 3879 fib6_clean_all(dev_net(dev), fib6_ifup, &arg); 3880} 3881 | 3793 rt6_multipath_rebalance(rt); 3794 } 3795 3796 return 0; 3797} 3798 3799void rt6_sync_up(struct net_device *dev, unsigned int nh_flags) 3800{ --- 5 unchanged lines hidden (view full) --- 3806 }; 3807 3808 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev)) 3809 arg.nh_flags |= RTNH_F_LINKDOWN; 3810 3811 fib6_clean_all(dev_net(dev), fib6_ifup, &arg); 3812} 3813 |
3882static bool rt6_multipath_uses_dev(const struct fib6_info *rt, | 3814static bool rt6_multipath_uses_dev(const struct rt6_info *rt, |
3883 const struct net_device *dev) 3884{ | 3815 const struct net_device *dev) 3816{ |
3885 struct fib6_info *iter; | 3817 struct rt6_info *iter; |
3886 | 3818 |
3887 if (rt->fib6_nh.nh_dev == dev) | 3819 if (rt->dst.dev == dev) |
3888 return true; | 3820 return true; |
3889 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 3890 if (iter->fib6_nh.nh_dev == dev) | 3821 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) 3822 if (iter->dst.dev == dev) |
3891 return true; 3892 3893 return false; 3894} 3895 | 3823 return true; 3824 3825 return false; 3826} 3827 |
3896static void rt6_multipath_flush(struct fib6_info *rt) | 3828static void rt6_multipath_flush(struct rt6_info *rt) |
3897{ | 3829{ |
3898 struct fib6_info *iter; | 3830 struct rt6_info *iter; |
3899 3900 rt->should_flush = 1; | 3831 3832 rt->should_flush = 1; |
3901 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) | 3833 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) |
3902 iter->should_flush = 1; 3903} 3904 | 3834 iter->should_flush = 1; 3835} 3836 |
3905static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt, | 3837static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt, |
3906 const struct net_device *down_dev) 3907{ | 3838 const struct net_device *down_dev) 3839{ |
3908 struct fib6_info *iter; | 3840 struct rt6_info *iter; |
3909 unsigned int dead = 0; 3910 | 3841 unsigned int dead = 0; 3842 |
3911 if (rt->fib6_nh.nh_dev == down_dev || 3912 rt->fib6_nh.nh_flags & RTNH_F_DEAD) | 3843 if (rt->dst.dev == down_dev || rt->rt6i_nh_flags & RTNH_F_DEAD) |
3913 dead++; | 3844 dead++; |
3914 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 3915 if (iter->fib6_nh.nh_dev == down_dev || 3916 iter->fib6_nh.nh_flags & RTNH_F_DEAD) | 3845 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) 3846 if (iter->dst.dev == down_dev || 3847 iter->rt6i_nh_flags & RTNH_F_DEAD) |
3917 dead++; 3918 3919 return dead; 3920} 3921 | 3848 dead++; 3849 3850 return dead; 3851} 3852 |
3922static void rt6_multipath_nh_flags_set(struct fib6_info *rt, | 3853static void rt6_multipath_nh_flags_set(struct rt6_info *rt, |
3923 const struct net_device *dev, 3924 unsigned int nh_flags) 3925{ | 3854 const struct net_device *dev, 3855 unsigned int nh_flags) 3856{ |
3926 struct fib6_info *iter; | 3857 struct rt6_info *iter; |
3927 | 3858 |
3928 if (rt->fib6_nh.nh_dev == dev) 3929 rt->fib6_nh.nh_flags |= nh_flags; 3930 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) 3931 if (iter->fib6_nh.nh_dev == dev) 3932 iter->fib6_nh.nh_flags |= nh_flags; | 3859 if (rt->dst.dev == dev) 3860 rt->rt6i_nh_flags |= nh_flags; 3861 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) 3862 if (iter->dst.dev == dev) 3863 iter->rt6i_nh_flags |= nh_flags; |
3933} 3934 3935/* called with write lock held for table with rt */ | 3864} 3865 3866/* called with write lock held for table with rt */ |
3936static int fib6_ifdown(struct fib6_info *rt, void *p_arg) | 3867static int fib6_ifdown(struct rt6_info *rt, void *p_arg) |
3937{ 3938 const struct arg_netdev_event *arg = p_arg; 3939 const struct net_device *dev = arg->dev; | 3868{ 3869 const struct arg_netdev_event *arg = p_arg; 3870 const struct net_device *dev = arg->dev; |
3940 struct net *net = dev_net(dev); | 3871 const struct net *net = dev_net(dev); |
3941 | 3872 |
3942 if (rt == net->ipv6.fib6_null_entry) | 3873 if (rt == net->ipv6.ip6_null_entry) |
3943 return 0; 3944 3945 switch (arg->event) { 3946 case NETDEV_UNREGISTER: | 3874 return 0; 3875 3876 switch (arg->event) { 3877 case NETDEV_UNREGISTER: |
3947 return rt->fib6_nh.nh_dev == dev ? -1 : 0; | 3878 return rt->dst.dev == dev ? -1 : 0; |
3948 case NETDEV_DOWN: 3949 if (rt->should_flush) 3950 return -1; | 3879 case NETDEV_DOWN: 3880 if (rt->should_flush) 3881 return -1; |
3951 if (!rt->fib6_nsiblings) 3952 return rt->fib6_nh.nh_dev == dev ? -1 : 0; | 3882 if (!rt->rt6i_nsiblings) 3883 return rt->dst.dev == dev ? -1 : 0; |
3953 if (rt6_multipath_uses_dev(rt, dev)) { 3954 unsigned int count; 3955 3956 count = rt6_multipath_dead_count(rt, dev); | 3884 if (rt6_multipath_uses_dev(rt, dev)) { 3885 unsigned int count; 3886 3887 count = rt6_multipath_dead_count(rt, dev); |
3957 if (rt->fib6_nsiblings + 1 == count) { | 3888 if (rt->rt6i_nsiblings + 1 == count) { |
3958 rt6_multipath_flush(rt); 3959 return -1; 3960 } 3961 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD | 3962 RTNH_F_LINKDOWN); | 3889 rt6_multipath_flush(rt); 3890 return -1; 3891 } 3892 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD | 3893 RTNH_F_LINKDOWN); |
3963 fib6_update_sernum(net, rt); | 3894 fib6_update_sernum(rt); |
3964 rt6_multipath_rebalance(rt); 3965 } 3966 return -2; 3967 case NETDEV_CHANGE: | 3895 rt6_multipath_rebalance(rt); 3896 } 3897 return -2; 3898 case NETDEV_CHANGE: |
3968 if (rt->fib6_nh.nh_dev != dev || 3969 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) | 3899 if (rt->dst.dev != dev || 3900 rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) |
3970 break; | 3901 break; |
3971 rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN; | 3902 rt->rt6i_nh_flags |= RTNH_F_LINKDOWN; |
3972 rt6_multipath_rebalance(rt); 3973 break; 3974 } 3975 3976 return 0; 3977} 3978 3979void rt6_sync_down_dev(struct net_device *dev, unsigned long event) --- 15 unchanged lines hidden (view full) --- 3995 neigh_ifdown(&nd_tbl, dev); 3996} 3997 3998struct rt6_mtu_change_arg { 3999 struct net_device *dev; 4000 unsigned int mtu; 4001}; 4002 | 3903 rt6_multipath_rebalance(rt); 3904 break; 3905 } 3906 3907 return 0; 3908} 3909 3910void rt6_sync_down_dev(struct net_device *dev, unsigned long event) --- 15 unchanged lines hidden (view full) --- 3926 neigh_ifdown(&nd_tbl, dev); 3927} 3928 3929struct rt6_mtu_change_arg { 3930 struct net_device *dev; 3931 unsigned int mtu; 3932}; 3933 |
4003static int rt6_mtu_change_route(struct fib6_info *rt, void *p_arg) | 3934static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) |
4004{ 4005 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; 4006 struct inet6_dev *idev; 4007 4008 /* In IPv6 pmtu discovery is not optional, 4009 so that RTAX_MTU lock cannot disable it. 4010 We still use this lock to block changes 4011 caused by addrconf/ndisc. 4012 */ 4013 4014 idev = __in6_dev_get(arg->dev); 4015 if (!idev) 4016 return 0; 4017 4018 /* For administrative MTU increase, there is no way to discover 4019 IPv6 PMTU increase, so PMTU increase should be updated here. 4020 Since RFC 1981 doesn't include administrative MTU increase 4021 update PMTU increase is a MUST. (i.e. jumbo frame) 4022 */ | 3935{ 3936 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; 3937 struct inet6_dev *idev; 3938 3939 /* In IPv6 pmtu discovery is not optional, 3940 so that RTAX_MTU lock cannot disable it. 3941 We still use this lock to block changes 3942 caused by addrconf/ndisc. 3943 */ 3944 3945 idev = __in6_dev_get(arg->dev); 3946 if (!idev) 3947 return 0; 3948 3949 /* For administrative MTU increase, there is no way to discover 3950 IPv6 PMTU increase, so PMTU increase should be updated here. 3951 Since RFC 1981 doesn't include administrative MTU increase 3952 update PMTU increase is a MUST. (i.e. jumbo frame) 3953 */ |
4023 if (rt->fib6_nh.nh_dev == arg->dev && 4024 !fib6_metric_locked(rt, RTAX_MTU)) { 4025 u32 mtu = rt->fib6_pmtu; 4026 4027 if (mtu >= arg->mtu || 4028 (mtu < arg->mtu && mtu == idev->cnf.mtu6)) 4029 fib6_metric_set(rt, RTAX_MTU, arg->mtu); 4030 | 3954 if (rt->dst.dev == arg->dev && 3955 !dst_metric_locked(&rt->dst, RTAX_MTU)) { |
4031 spin_lock_bh(&rt6_exception_lock); | 3956 spin_lock_bh(&rt6_exception_lock); |
3957 if (dst_metric_raw(&rt->dst, RTAX_MTU) && 3958 rt6_mtu_change_route_allowed(idev, rt, arg->mtu)) 3959 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); |
|
4032 rt6_exceptions_update_pmtu(idev, rt, arg->mtu); 4033 spin_unlock_bh(&rt6_exception_lock); 4034 } 4035 return 0; 4036} 4037 4038void rt6_mtu_change(struct net_device *dev, unsigned int mtu) 4039{ 4040 struct rt6_mtu_change_arg arg = { 4041 .dev = dev, 4042 .mtu = mtu, 4043 }; 4044 4045 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg); 4046} 4047 4048static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 4049 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, | 3960 rt6_exceptions_update_pmtu(idev, rt, arg->mtu); 3961 spin_unlock_bh(&rt6_exception_lock); 3962 } 3963 return 0; 3964} 3965 3966void rt6_mtu_change(struct net_device *dev, unsigned int mtu) 3967{ 3968 struct rt6_mtu_change_arg arg = { 3969 .dev = dev, 3970 .mtu = mtu, 3971 }; 3972 3973 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg); 3974} 3975 3976static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 3977 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, |
3978 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) }, |
|
4050 [RTA_OIF] = { .type = NLA_U32 }, 4051 [RTA_IIF] = { .type = NLA_U32 }, 4052 [RTA_PRIORITY] = { .type = NLA_U32 }, 4053 [RTA_METRICS] = { .type = NLA_NESTED }, 4054 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 4055 [RTA_PREF] = { .type = NLA_U8 }, 4056 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 4057 [RTA_ENCAP] = { .type = NLA_NESTED }, 4058 [RTA_EXPIRES] = { .type = NLA_U32 }, 4059 [RTA_UID] = { .type = NLA_U32 }, 4060 [RTA_MARK] = { .type = NLA_U32 }, | 3979 [RTA_OIF] = { .type = NLA_U32 }, 3980 [RTA_IIF] = { .type = NLA_U32 }, 3981 [RTA_PRIORITY] = { .type = NLA_U32 }, 3982 [RTA_METRICS] = { .type = NLA_NESTED }, 3983 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 3984 [RTA_PREF] = { .type = NLA_U8 }, 3985 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 3986 [RTA_ENCAP] = { .type = NLA_NESTED }, 3987 [RTA_EXPIRES] = { .type = NLA_U32 }, 3988 [RTA_UID] = { .type = NLA_U32 }, 3989 [RTA_MARK] = { .type = NLA_U32 }, |
3990 [RTA_TABLE] = { .type = NLA_U32 }, |
|
4061}; 4062 4063static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 4064 struct fib6_config *cfg, 4065 struct netlink_ext_ack *extack) 4066{ 4067 struct rtmsg *rtm; 4068 struct nlattr *tb[RTA_MAX+1]; --- 113 unchanged lines hidden (view full) --- 4182 } 4183 4184 err = 0; 4185errout: 4186 return err; 4187} 4188 4189struct rt6_nh { | 3991}; 3992 3993static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 3994 struct fib6_config *cfg, 3995 struct netlink_ext_ack *extack) 3996{ 3997 struct rtmsg *rtm; 3998 struct nlattr *tb[RTA_MAX+1]; --- 113 unchanged lines hidden (view full) --- 4112 } 4113 4114 err = 0; 4115errout: 4116 return err; 4117} 4118 4119struct rt6_nh { |
4190 struct fib6_info *fib6_info; | 4120 struct rt6_info *rt6_info; |
4191 struct fib6_config r_cfg; | 4121 struct fib6_config r_cfg; |
4122 struct mx6_config mxc; |
|
4192 struct list_head next; 4193}; 4194 4195static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) 4196{ 4197 struct rt6_nh *nh; 4198 4199 list_for_each_entry(nh, rt6_nh_list, next) { 4200 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", 4201 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, 4202 nh->r_cfg.fc_ifindex); 4203 } 4204} 4205 | 4123 struct list_head next; 4124}; 4125 4126static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) 4127{ 4128 struct rt6_nh *nh; 4129 4130 list_for_each_entry(nh, rt6_nh_list, next) { 4131 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", 4132 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, 4133 nh->r_cfg.fc_ifindex); 4134 } 4135} 4136 |
4206static int ip6_route_info_append(struct net *net, 4207 struct list_head *rt6_nh_list, 4208 struct fib6_info *rt, 4209 struct fib6_config *r_cfg) | 4137static int ip6_route_info_append(struct list_head *rt6_nh_list, 4138 struct rt6_info *rt, struct fib6_config *r_cfg) |
4210{ 4211 struct rt6_nh *nh; 4212 int err = -EEXIST; 4213 4214 list_for_each_entry(nh, rt6_nh_list, next) { | 4139{ 4140 struct rt6_nh *nh; 4141 int err = -EEXIST; 4142 4143 list_for_each_entry(nh, rt6_nh_list, next) { |
4215 /* check if fib6_info already exists */ 4216 if (rt6_duplicate_nexthop(nh->fib6_info, rt)) | 4144 /* check if rt6_info already exists */ 4145 if (rt6_duplicate_nexthop(nh->rt6_info, rt)) |
4217 return err; 4218 } 4219 4220 nh = kzalloc(sizeof(*nh), GFP_KERNEL); 4221 if (!nh) 4222 return -ENOMEM; | 4146 return err; 4147 } 4148 4149 nh = kzalloc(sizeof(*nh), GFP_KERNEL); 4150 if (!nh) 4151 return -ENOMEM; |
4223 nh->fib6_info = rt; 4224 err = ip6_convert_metrics(net, rt, r_cfg); | 4152 nh->rt6_info = rt; 4153 err = ip6_convert_metrics(&nh->mxc, r_cfg); |
4225 if (err) { 4226 kfree(nh); 4227 return err; 4228 } 4229 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); 4230 list_add_tail(&nh->next, rt6_nh_list); 4231 4232 return 0; 4233} 4234 | 4154 if (err) { 4155 kfree(nh); 4156 return err; 4157 } 4158 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); 4159 list_add_tail(&nh->next, rt6_nh_list); 4160 4161 return 0; 4162} 4163 |
4235static void ip6_route_mpath_notify(struct fib6_info *rt, 4236 struct fib6_info *rt_last, | 4164static void ip6_route_mpath_notify(struct rt6_info *rt, 4165 struct rt6_info *rt_last, |
4237 struct nl_info *info, 4238 __u16 nlflags) 4239{ 4240 /* if this is an APPEND route, then rt points to the first route 4241 * inserted and rt_last points to last route inserted. Userspace 4242 * wants a consistent dump of the route which starts at the first 4243 * nexthop. Since sibling routes are always added at the end of 4244 * the list, find the first sibling of the last route appended 4245 */ | 4166 struct nl_info *info, 4167 __u16 nlflags) 4168{ 4169 /* if this is an APPEND route, then rt points to the first route 4170 * inserted and rt_last points to last route inserted. Userspace 4171 * wants a consistent dump of the route which starts at the first 4172 * nexthop. Since sibling routes are always added at the end of 4173 * the list, find the first sibling of the last route appended 4174 */ |
4246 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) { 4247 rt = list_first_entry(&rt_last->fib6_siblings, 4248 struct fib6_info, 4249 fib6_siblings); | 4175 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) { 4176 rt = list_first_entry(&rt_last->rt6i_siblings, 4177 struct rt6_info, 4178 rt6i_siblings); |
4250 } 4251 4252 if (rt) 4253 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); 4254} 4255 4256static int ip6_route_multipath_add(struct fib6_config *cfg, 4257 struct netlink_ext_ack *extack) 4258{ | 4179 } 4180 4181 if (rt) 4182 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); 4183} 4184 4185static int ip6_route_multipath_add(struct fib6_config *cfg, 4186 struct netlink_ext_ack *extack) 4187{ |
4259 struct fib6_info *rt_notif = NULL, *rt_last = NULL; | 4188 struct rt6_info *rt_notif = NULL, *rt_last = NULL; |
4260 struct nl_info *info = &cfg->fc_nlinfo; 4261 struct fib6_config r_cfg; 4262 struct rtnexthop *rtnh; | 4189 struct nl_info *info = &cfg->fc_nlinfo; 4190 struct fib6_config r_cfg; 4191 struct rtnexthop *rtnh; |
4263 struct fib6_info *rt; | 4192 struct rt6_info *rt; |
4264 struct rt6_nh *err_nh; 4265 struct rt6_nh *nh, *nh_safe; 4266 __u16 nlflags; 4267 int remaining; 4268 int attrlen; 4269 int err = 1; 4270 int nhn = 0; 4271 int replace = (cfg->fc_nlinfo.nlh && 4272 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); 4273 LIST_HEAD(rt6_nh_list); 4274 4275 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE; 4276 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) 4277 nlflags |= NLM_F_APPEND; 4278 4279 remaining = cfg->fc_mp_len; 4280 rtnh = (struct rtnexthop *)cfg->fc_mp; 4281 4282 /* Parse a Multipath Entry and build a list (rt6_nh_list) of | 4193 struct rt6_nh *err_nh; 4194 struct rt6_nh *nh, *nh_safe; 4195 __u16 nlflags; 4196 int remaining; 4197 int attrlen; 4198 int err = 1; 4199 int nhn = 0; 4200 int replace = (cfg->fc_nlinfo.nlh && 4201 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); 4202 LIST_HEAD(rt6_nh_list); 4203 4204 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE; 4205 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) 4206 nlflags |= NLM_F_APPEND; 4207 4208 remaining = cfg->fc_mp_len; 4209 rtnh = (struct rtnexthop *)cfg->fc_mp; 4210 4211 /* Parse a Multipath Entry and build a list (rt6_nh_list) of |
4283 * fib6_info structs per nexthop | 4212 * rt6_info structs per nexthop |
4284 */ 4285 while (rtnh_ok(rtnh, remaining)) { 4286 memcpy(&r_cfg, cfg, sizeof(*cfg)); 4287 if (rtnh->rtnh_ifindex) 4288 r_cfg.fc_ifindex = rtnh->rtnh_ifindex; 4289 4290 attrlen = rtnh_attrlen(rtnh); 4291 if (attrlen > 0) { --- 6 unchanged lines hidden (view full) --- 4298 } 4299 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); 4300 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); 4301 if (nla) 4302 r_cfg.fc_encap_type = nla_get_u16(nla); 4303 } 4304 4305 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK); | 4213 */ 4214 while (rtnh_ok(rtnh, remaining)) { 4215 memcpy(&r_cfg, cfg, sizeof(*cfg)); 4216 if (rtnh->rtnh_ifindex) 4217 r_cfg.fc_ifindex = rtnh->rtnh_ifindex; 4218 4219 attrlen = rtnh_attrlen(rtnh); 4220 if (attrlen > 0) { --- 6 unchanged lines hidden (view full) --- 4227 } 4228 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); 4229 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); 4230 if (nla) 4231 r_cfg.fc_encap_type = nla_get_u16(nla); 4232 } 4233 4234 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK); |
4306 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack); | 4235 rt = ip6_route_info_create(&r_cfg, extack); |
4307 if (IS_ERR(rt)) { 4308 err = PTR_ERR(rt); 4309 rt = NULL; 4310 goto cleanup; 4311 } 4312 | 4236 if (IS_ERR(rt)) { 4237 err = PTR_ERR(rt); 4238 rt = NULL; 4239 goto cleanup; 4240 } 4241 |
4313 rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1; | 4242 rt->rt6i_nh_weight = rtnh->rtnh_hops + 1; |
4314 | 4243 |
4315 err = ip6_route_info_append(info->nl_net, &rt6_nh_list, 4316 rt, &r_cfg); | 4244 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); |
4317 if (err) { | 4245 if (err) { |
4318 fib6_info_release(rt); | 4246 dst_release_immediate(&rt->dst); |
4319 goto cleanup; 4320 } 4321 4322 rtnh = rtnh_next(rtnh, &remaining); 4323 } 4324 4325 /* for add and replace send one notification with all nexthops. 4326 * Skip the notification in fib6_add_rt2node and send one with 4327 * the full route when done 4328 */ 4329 info->skip_notify = 1; 4330 4331 err_nh = NULL; 4332 list_for_each_entry(nh, &rt6_nh_list, next) { | 4247 goto cleanup; 4248 } 4249 4250 rtnh = rtnh_next(rtnh, &remaining); 4251 } 4252 4253 /* for add and replace send one notification with all nexthops. 4254 * Skip the notification in fib6_add_rt2node and send one with 4255 * the full route when done 4256 */ 4257 info->skip_notify = 1; 4258 4259 err_nh = NULL; 4260 list_for_each_entry(nh, &rt6_nh_list, next) { |
4333 rt_last = nh->fib6_info; 4334 err = __ip6_ins_rt(nh->fib6_info, info, extack); 4335 fib6_info_release(nh->fib6_info); 4336 | 4261 rt_last = nh->rt6_info; 4262 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack); |
4337 /* save reference to first route for notification */ 4338 if (!rt_notif && !err) | 4263 /* save reference to first route for notification */ 4264 if (!rt_notif && !err) |
4339 rt_notif = nh->fib6_info; | 4265 rt_notif = nh->rt6_info; |
4340 | 4266 |
4341 /* nh->fib6_info is used or freed at this point, reset to NULL*/ 4342 nh->fib6_info = NULL; | 4267 /* nh->rt6_info is used or freed at this point, reset to NULL*/ 4268 nh->rt6_info = NULL; |
4343 if (err) { 4344 if (replace && nhn) 4345 ip6_print_replace_route_err(&rt6_nh_list); 4346 err_nh = nh; 4347 goto add_errout; 4348 } 4349 4350 /* Because each route is added like a single route we remove --- 24 unchanged lines hidden (view full) --- 4375 list_for_each_entry(nh, &rt6_nh_list, next) { 4376 if (err_nh == nh) 4377 break; 4378 ip6_route_del(&nh->r_cfg, extack); 4379 } 4380 4381cleanup: 4382 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { | 4269 if (err) { 4270 if (replace && nhn) 4271 ip6_print_replace_route_err(&rt6_nh_list); 4272 err_nh = nh; 4273 goto add_errout; 4274 } 4275 4276 /* Because each route is added like a single route we remove --- 24 unchanged lines hidden (view full) --- 4301 list_for_each_entry(nh, &rt6_nh_list, next) { 4302 if (err_nh == nh) 4303 break; 4304 ip6_route_del(&nh->r_cfg, extack); 4305 } 4306 4307cleanup: 4308 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { |
4383 if (nh->fib6_info) 4384 fib6_info_release(nh->fib6_info); | 4309 if (nh->rt6_info) 4310 dst_release_immediate(&nh->rt6_info->dst); 4311 kfree(nh->mxc.mx); |
4385 list_del(&nh->next); 4386 kfree(nh); 4387 } 4388 4389 return err; 4390} 4391 4392static int ip6_route_multipath_del(struct fib6_config *cfg, --- 60 unchanged lines hidden (view full) --- 4453 4454 err = rtm_to_fib6_config(skb, nlh, &cfg, extack); 4455 if (err < 0) 4456 return err; 4457 4458 if (cfg.fc_mp) 4459 return ip6_route_multipath_add(&cfg, extack); 4460 else | 4312 list_del(&nh->next); 4313 kfree(nh); 4314 } 4315 4316 return err; 4317} 4318 4319static int ip6_route_multipath_del(struct fib6_config *cfg, --- 60 unchanged lines hidden (view full) --- 4380 4381 err = rtm_to_fib6_config(skb, nlh, &cfg, extack); 4382 if (err < 0) 4383 return err; 4384 4385 if (cfg.fc_mp) 4386 return ip6_route_multipath_add(&cfg, extack); 4387 else |
4461 return ip6_route_add(&cfg, GFP_KERNEL, extack); | 4388 return ip6_route_add(&cfg, extack); |
4462} 4463 | 4389} 4390 |
4464static size_t rt6_nlmsg_size(struct fib6_info *rt) | 4391static size_t rt6_nlmsg_size(struct rt6_info *rt) |
4465{ 4466 int nexthop_len = 0; 4467 | 4392{ 4393 int nexthop_len = 0; 4394 |
4468 if (rt->fib6_nsiblings) { | 4395 if (rt->rt6i_nsiblings) { |
4469 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ 4470 + NLA_ALIGN(sizeof(struct rtnexthop)) 4471 + nla_total_size(16) /* RTA_GATEWAY */ | 4396 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ 4397 + NLA_ALIGN(sizeof(struct rtnexthop)) 4398 + nla_total_size(16) /* RTA_GATEWAY */ |
4472 + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate); | 4399 + lwtunnel_get_encap_size(rt->dst.lwtstate); |
4473 | 4400 |
4474 nexthop_len *= rt->fib6_nsiblings; | 4401 nexthop_len *= rt->rt6i_nsiblings; |
4475 } 4476 4477 return NLMSG_ALIGN(sizeof(struct rtmsg)) 4478 + nla_total_size(16) /* RTA_SRC */ 4479 + nla_total_size(16) /* RTA_DST */ 4480 + nla_total_size(16) /* RTA_GATEWAY */ 4481 + nla_total_size(16) /* RTA_PREFSRC */ 4482 + nla_total_size(4) /* RTA_TABLE */ 4483 + nla_total_size(4) /* RTA_IIF */ 4484 + nla_total_size(4) /* RTA_OIF */ 4485 + nla_total_size(4) /* RTA_PRIORITY */ 4486 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ 4487 + nla_total_size(sizeof(struct rta_cacheinfo)) 4488 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ 4489 + nla_total_size(1) /* RTA_PREF */ | 4402 } 4403 4404 return NLMSG_ALIGN(sizeof(struct rtmsg)) 4405 + nla_total_size(16) /* RTA_SRC */ 4406 + nla_total_size(16) /* RTA_DST */ 4407 + nla_total_size(16) /* RTA_GATEWAY */ 4408 + nla_total_size(16) /* RTA_PREFSRC */ 4409 + nla_total_size(4) /* RTA_TABLE */ 4410 + nla_total_size(4) /* RTA_IIF */ 4411 + nla_total_size(4) /* RTA_OIF */ 4412 + nla_total_size(4) /* RTA_PRIORITY */ 4413 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ 4414 + nla_total_size(sizeof(struct rta_cacheinfo)) 4415 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ 4416 + nla_total_size(1) /* RTA_PREF */ |
4490 + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate) | 4417 + lwtunnel_get_encap_size(rt->dst.lwtstate) |
4491 + nexthop_len; 4492} 4493 | 4418 + nexthop_len; 4419} 4420 |
4494static int rt6_nexthop_info(struct sk_buff *skb, struct fib6_info *rt, | 4421static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, |
4495 unsigned int *flags, bool skip_oif) 4496{ | 4422 unsigned int *flags, bool skip_oif) 4423{ |
4497 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD) | 4424 if (rt->rt6i_nh_flags & RTNH_F_DEAD) |
4498 *flags |= RTNH_F_DEAD; 4499 | 4425 *flags |= RTNH_F_DEAD; 4426 |
4500 if (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN) { | 4427 if (rt->rt6i_nh_flags & RTNH_F_LINKDOWN) { |
4501 *flags |= RTNH_F_LINKDOWN; | 4428 *flags |= RTNH_F_LINKDOWN; |
4502 4503 rcu_read_lock(); 4504 if (fib6_ignore_linkdown(rt)) | 4429 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) |
4505 *flags |= RTNH_F_DEAD; | 4430 *flags |= RTNH_F_DEAD; |
4506 rcu_read_unlock(); | |
4507 } 4508 | 4431 } 4432 |
4509 if (rt->fib6_flags & RTF_GATEWAY) { 4510 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->fib6_nh.nh_gw) < 0) | 4433 if (rt->rt6i_flags & RTF_GATEWAY) { 4434 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0) |
4511 goto nla_put_failure; 4512 } 4513 | 4435 goto nla_put_failure; 4436 } 4437 |
4514 *flags |= (rt->fib6_nh.nh_flags & RTNH_F_ONLINK); 4515 if (rt->fib6_nh.nh_flags & RTNH_F_OFFLOAD) | 4438 *flags |= (rt->rt6i_nh_flags & RTNH_F_ONLINK); 4439 if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD) |
4516 *flags |= RTNH_F_OFFLOAD; 4517 4518 /* not needed for multipath encoding b/c it has a rtnexthop struct */ | 4440 *flags |= RTNH_F_OFFLOAD; 4441 4442 /* not needed for multipath encoding b/c it has a rtnexthop struct */ |
4519 if (!skip_oif && rt->fib6_nh.nh_dev && 4520 nla_put_u32(skb, RTA_OIF, rt->fib6_nh.nh_dev->ifindex)) | 4443 if (!skip_oif && rt->dst.dev && 4444 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) |
4521 goto nla_put_failure; 4522 | 4445 goto nla_put_failure; 4446 |
4523 if (rt->fib6_nh.nh_lwtstate && 4524 lwtunnel_fill_encap(skb, rt->fib6_nh.nh_lwtstate) < 0) | 4447 if (rt->dst.lwtstate && 4448 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0) |
4525 goto nla_put_failure; 4526 4527 return 0; 4528 4529nla_put_failure: 4530 return -EMSGSIZE; 4531} 4532 4533/* add multipath next hop */ | 4449 goto nla_put_failure; 4450 4451 return 0; 4452 4453nla_put_failure: 4454 return -EMSGSIZE; 4455} 4456 4457/* add multipath next hop */ |
4534static int rt6_add_nexthop(struct sk_buff *skb, struct fib6_info *rt) | 4458static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) |
4535{ | 4459{ |
4536 const struct net_device *dev = rt->fib6_nh.nh_dev; | |
4537 struct rtnexthop *rtnh; 4538 unsigned int flags = 0; 4539 4540 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); 4541 if (!rtnh) 4542 goto nla_put_failure; 4543 | 4460 struct rtnexthop *rtnh; 4461 unsigned int flags = 0; 4462 4463 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); 4464 if (!rtnh) 4465 goto nla_put_failure; 4466 |
4544 rtnh->rtnh_hops = rt->fib6_nh.nh_weight - 1; 4545 rtnh->rtnh_ifindex = dev ? dev->ifindex : 0; | 4467 rtnh->rtnh_hops = rt->rt6i_nh_weight - 1; 4468 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; |
4546 4547 if (rt6_nexthop_info(skb, rt, &flags, true) < 0) 4548 goto nla_put_failure; 4549 4550 rtnh->rtnh_flags = flags; 4551 4552 /* length of rtnetlink header + attributes */ 4553 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; 4554 4555 return 0; 4556 4557nla_put_failure: 4558 return -EMSGSIZE; 4559} 4560 | 4469 4470 if (rt6_nexthop_info(skb, rt, &flags, true) < 0) 4471 goto nla_put_failure; 4472 4473 rtnh->rtnh_flags = flags; 4474 4475 /* length of rtnetlink header + attributes */ 4476 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; 4477 4478 return 0; 4479 4480nla_put_failure: 4481 return -EMSGSIZE; 4482} 4483 |
4561static int rt6_fill_node(struct net *net, struct sk_buff *skb, 4562 struct fib6_info *rt, struct dst_entry *dst, 4563 struct in6_addr *dest, struct in6_addr *src, | 4484static int rt6_fill_node(struct net *net, 4485 struct sk_buff *skb, struct rt6_info *rt, 4486 struct in6_addr *dst, struct in6_addr *src, |
4564 int iif, int type, u32 portid, u32 seq, 4565 unsigned int flags) 4566{ | 4487 int iif, int type, u32 portid, u32 seq, 4488 unsigned int flags) 4489{ |
4490 u32 metrics[RTAX_MAX]; |
|
4567 struct rtmsg *rtm; 4568 struct nlmsghdr *nlh; | 4491 struct rtmsg *rtm; 4492 struct nlmsghdr *nlh; |
4569 long expires = 0; 4570 u32 *pmetrics; | 4493 long expires; |
4571 u32 table; 4572 4573 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); 4574 if (!nlh) 4575 return -EMSGSIZE; 4576 4577 rtm = nlmsg_data(nlh); 4578 rtm->rtm_family = AF_INET6; | 4494 u32 table; 4495 4496 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); 4497 if (!nlh) 4498 return -EMSGSIZE; 4499 4500 rtm = nlmsg_data(nlh); 4501 rtm->rtm_family = AF_INET6; |
4579 rtm->rtm_dst_len = rt->fib6_dst.plen; 4580 rtm->rtm_src_len = rt->fib6_src.plen; | 4502 rtm->rtm_dst_len = rt->rt6i_dst.plen; 4503 rtm->rtm_src_len = rt->rt6i_src.plen; |
4581 rtm->rtm_tos = 0; | 4504 rtm->rtm_tos = 0; |
4582 if (rt->fib6_table) 4583 table = rt->fib6_table->tb6_id; | 4505 if (rt->rt6i_table) 4506 table = rt->rt6i_table->tb6_id; |
4584 else 4585 table = RT6_TABLE_UNSPEC; 4586 rtm->rtm_table = table; 4587 if (nla_put_u32(skb, RTA_TABLE, table)) 4588 goto nla_put_failure; | 4507 else 4508 table = RT6_TABLE_UNSPEC; 4509 rtm->rtm_table = table; 4510 if (nla_put_u32(skb, RTA_TABLE, table)) 4511 goto nla_put_failure; |
4589 4590 rtm->rtm_type = rt->fib6_type; | 4512 if (rt->rt6i_flags & RTF_REJECT) { 4513 switch (rt->dst.error) { 4514 case -EINVAL: 4515 rtm->rtm_type = RTN_BLACKHOLE; 4516 break; 4517 case -EACCES: 4518 rtm->rtm_type = RTN_PROHIBIT; 4519 break; 4520 case -EAGAIN: 4521 rtm->rtm_type = RTN_THROW; 4522 break; 4523 default: 4524 rtm->rtm_type = RTN_UNREACHABLE; 4525 break; 4526 } 4527 } 4528 else if (rt->rt6i_flags & RTF_LOCAL) 4529 rtm->rtm_type = RTN_LOCAL; 4530 else if (rt->rt6i_flags & RTF_ANYCAST) 4531 rtm->rtm_type = RTN_ANYCAST; 4532 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) 4533 rtm->rtm_type = RTN_LOCAL; 4534 else 4535 rtm->rtm_type = RTN_UNICAST; |
4591 rtm->rtm_flags = 0; 4592 rtm->rtm_scope = RT_SCOPE_UNIVERSE; | 4536 rtm->rtm_flags = 0; 4537 rtm->rtm_scope = RT_SCOPE_UNIVERSE; |
4593 rtm->rtm_protocol = rt->fib6_protocol; | 4538 rtm->rtm_protocol = rt->rt6i_protocol; |
4594 | 4539 |
4595 if (rt->fib6_flags & RTF_CACHE) | 4540 if (rt->rt6i_flags & RTF_CACHE) |
4596 rtm->rtm_flags |= RTM_F_CLONED; 4597 | 4541 rtm->rtm_flags |= RTM_F_CLONED; 4542 |
4598 if (dest) { 4599 if (nla_put_in6_addr(skb, RTA_DST, dest)) | 4543 if (dst) { 4544 if (nla_put_in6_addr(skb, RTA_DST, dst)) |
4600 goto nla_put_failure; 4601 rtm->rtm_dst_len = 128; 4602 } else if (rtm->rtm_dst_len) | 4545 goto nla_put_failure; 4546 rtm->rtm_dst_len = 128; 4547 } else if (rtm->rtm_dst_len) |
4603 if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr)) | 4548 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr)) |
4604 goto nla_put_failure; 4605#ifdef CONFIG_IPV6_SUBTREES 4606 if (src) { 4607 if (nla_put_in6_addr(skb, RTA_SRC, src)) 4608 goto nla_put_failure; 4609 rtm->rtm_src_len = 128; 4610 } else if (rtm->rtm_src_len && | 4549 goto nla_put_failure; 4550#ifdef CONFIG_IPV6_SUBTREES 4551 if (src) { 4552 if (nla_put_in6_addr(skb, RTA_SRC, src)) 4553 goto nla_put_failure; 4554 rtm->rtm_src_len = 128; 4555 } else if (rtm->rtm_src_len && |
4611 nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr)) | 4556 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr)) |
4612 goto nla_put_failure; 4613#endif 4614 if (iif) { 4615#ifdef CONFIG_IPV6_MROUTE | 4557 goto nla_put_failure; 4558#endif 4559 if (iif) { 4560#ifdef CONFIG_IPV6_MROUTE |
4616 if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) { | 4561 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { |
4617 int err = ip6mr_get_route(net, skb, rtm, portid); 4618 4619 if (err == 0) 4620 return 0; 4621 if (err < 0) 4622 goto nla_put_failure; 4623 } else 4624#endif 4625 if (nla_put_u32(skb, RTA_IIF, iif)) 4626 goto nla_put_failure; | 4562 int err = ip6mr_get_route(net, skb, rtm, portid); 4563 4564 if (err == 0) 4565 return 0; 4566 if (err < 0) 4567 goto nla_put_failure; 4568 } else 4569#endif 4570 if (nla_put_u32(skb, RTA_IIF, iif)) 4571 goto nla_put_failure; |
4627 } else if (dest) { | 4572 } else if (dst) { |
4628 struct in6_addr saddr_buf; | 4573 struct in6_addr saddr_buf; |
4629 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 && | 4574 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 && |
4630 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 4631 goto nla_put_failure; 4632 } 4633 | 4575 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 4576 goto nla_put_failure; 4577 } 4578 |
4634 if (rt->fib6_prefsrc.plen) { | 4579 if (rt->rt6i_prefsrc.plen) { |
4635 struct in6_addr saddr_buf; | 4580 struct in6_addr saddr_buf; |
4636 saddr_buf = rt->fib6_prefsrc.addr; | 4581 saddr_buf = rt->rt6i_prefsrc.addr; |
4637 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 4638 goto nla_put_failure; 4639 } 4640 | 4582 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 4583 goto nla_put_failure; 4584 } 4585 |
4641 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics; 4642 if (rtnetlink_put_metrics(skb, pmetrics) < 0) | 4586 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 4587 if (rt->rt6i_pmtu) 4588 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu; 4589 if (rtnetlink_put_metrics(skb, metrics) < 0) |
4643 goto nla_put_failure; 4644 | 4590 goto nla_put_failure; 4591 |
4645 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric)) | 4592 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) |
4646 goto nla_put_failure; 4647 4648 /* For multipath routes, walk the siblings list and add 4649 * each as a nexthop within RTA_MULTIPATH. 4650 */ | 4593 goto nla_put_failure; 4594 4595 /* For multipath routes, walk the siblings list and add 4596 * each as a nexthop within RTA_MULTIPATH. 4597 */ |
4651 if (rt->fib6_nsiblings) { 4652 struct fib6_info *sibling, *next_sibling; | 4598 if (rt->rt6i_nsiblings) { 4599 struct rt6_info *sibling, *next_sibling; |
4653 struct nlattr *mp; 4654 4655 mp = nla_nest_start(skb, RTA_MULTIPATH); 4656 if (!mp) 4657 goto nla_put_failure; 4658 4659 if (rt6_add_nexthop(skb, rt) < 0) 4660 goto nla_put_failure; 4661 4662 list_for_each_entry_safe(sibling, next_sibling, | 4600 struct nlattr *mp; 4601 4602 mp = nla_nest_start(skb, RTA_MULTIPATH); 4603 if (!mp) 4604 goto nla_put_failure; 4605 4606 if (rt6_add_nexthop(skb, rt) < 0) 4607 goto nla_put_failure; 4608 4609 list_for_each_entry_safe(sibling, next_sibling, |
4663 &rt->fib6_siblings, fib6_siblings) { | 4610 &rt->rt6i_siblings, rt6i_siblings) { |
4664 if (rt6_add_nexthop(skb, sibling) < 0) 4665 goto nla_put_failure; 4666 } 4667 4668 nla_nest_end(skb, mp); 4669 } else { 4670 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0) 4671 goto nla_put_failure; 4672 } 4673 | 4611 if (rt6_add_nexthop(skb, sibling) < 0) 4612 goto nla_put_failure; 4613 } 4614 4615 nla_nest_end(skb, mp); 4616 } else { 4617 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0) 4618 goto nla_put_failure; 4619 } 4620 |
4674 if (rt->fib6_flags & RTF_EXPIRES) { 4675 expires = dst ? dst->expires : rt->expires; 4676 expires -= jiffies; 4677 } | 4621 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0; |
4678 | 4622 |
4679 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) | 4623 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) |
4680 goto nla_put_failure; 4681 | 4624 goto nla_put_failure; 4625 |
4682 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags))) | 4626 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) |
4683 goto nla_put_failure; 4684 4685 4686 nlmsg_end(skb, nlh); 4687 return 0; 4688 4689nla_put_failure: 4690 nlmsg_cancel(skb, nlh); 4691 return -EMSGSIZE; 4692} 4693 | 4627 goto nla_put_failure; 4628 4629 4630 nlmsg_end(skb, nlh); 4631 return 0; 4632 4633nla_put_failure: 4634 nlmsg_cancel(skb, nlh); 4635 return -EMSGSIZE; 4636} 4637 |
4694int rt6_dump_route(struct fib6_info *rt, void *p_arg) | 4638int rt6_dump_route(struct rt6_info *rt, void *p_arg) |
4695{ 4696 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; 4697 struct net *net = arg->net; 4698 | 4639{ 4640 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; 4641 struct net *net = arg->net; 4642 |
4699 if (rt == net->ipv6.fib6_null_entry) | 4643 if (rt == net->ipv6.ip6_null_entry) |
4700 return 0; 4701 4702 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) { 4703 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh); 4704 4705 /* user wants prefix routes only */ 4706 if (rtm->rtm_flags & RTM_F_PREFIX && | 4644 return 0; 4645 4646 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) { 4647 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh); 4648 4649 /* user wants prefix routes only */ 4650 if (rtm->rtm_flags & RTM_F_PREFIX && |
4707 !(rt->fib6_flags & RTF_PREFIX_RT)) { | 4651 !(rt->rt6i_flags & RTF_PREFIX_RT)) { |
4708 /* success since this is not a prefix route */ 4709 return 1; 4710 } 4711 } 4712 | 4652 /* success since this is not a prefix route */ 4653 return 1; 4654 } 4655 } 4656 |
4713 return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0, 4714 RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid, 4715 arg->cb->nlh->nlmsg_seq, NLM_F_MULTI); | 4657 return rt6_fill_node(net, 4658 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, 4659 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, 4660 NLM_F_MULTI); |
4716} 4717 4718static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4719 struct netlink_ext_ack *extack) 4720{ 4721 struct net *net = sock_net(in_skb->sk); 4722 struct nlattr *tb[RTA_MAX+1]; 4723 int err, iif = 0, oif = 0; | 4661} 4662 4663static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4664 struct netlink_ext_ack *extack) 4665{ 4666 struct net *net = sock_net(in_skb->sk); 4667 struct nlattr *tb[RTA_MAX+1]; 4668 int err, iif = 0, oif = 0; |
4724 struct fib6_info *from; | |
4725 struct dst_entry *dst; 4726 struct rt6_info *rt; 4727 struct sk_buff *skb; 4728 struct rtmsg *rtm; 4729 struct flowi6 fl6; 4730 bool fibmatch; 4731 4732 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, --- 72 unchanged lines hidden (view full) --- 4805 } 4806 4807 if (rt == net->ipv6.ip6_null_entry) { 4808 err = rt->dst.error; 4809 ip6_rt_put(rt); 4810 goto errout; 4811 } 4812 | 4669 struct dst_entry *dst; 4670 struct rt6_info *rt; 4671 struct sk_buff *skb; 4672 struct rtmsg *rtm; 4673 struct flowi6 fl6; 4674 bool fibmatch; 4675 4676 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, --- 72 unchanged lines hidden (view full) --- 4749 } 4750 4751 if (rt == net->ipv6.ip6_null_entry) { 4752 err = rt->dst.error; 4753 ip6_rt_put(rt); 4754 goto errout; 4755 } 4756 |
4757 if (fibmatch && rt->from) { 4758 struct rt6_info *ort = rt->from; 4759 4760 dst_hold(&ort->dst); 4761 ip6_rt_put(rt); 4762 rt = ort; 4763 } 4764 |
|
4813 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 4814 if (!skb) { 4815 ip6_rt_put(rt); 4816 err = -ENOBUFS; 4817 goto errout; 4818 } 4819 4820 skb_dst_set(skb, &rt->dst); | 4765 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 4766 if (!skb) { 4767 ip6_rt_put(rt); 4768 err = -ENOBUFS; 4769 goto errout; 4770 } 4771 4772 skb_dst_set(skb, &rt->dst); |
4821 4822 rcu_read_lock(); 4823 from = rcu_dereference(rt->from); 4824 | |
4825 if (fibmatch) | 4773 if (fibmatch) |
4826 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif, | 4774 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif, |
4827 RTM_NEWROUTE, NETLINK_CB(in_skb).portid, 4828 nlh->nlmsg_seq, 0); 4829 else | 4775 RTM_NEWROUTE, NETLINK_CB(in_skb).portid, 4776 nlh->nlmsg_seq, 0); 4777 else |
4830 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr, 4831 &fl6.saddr, iif, RTM_NEWROUTE, 4832 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 4833 0); 4834 rcu_read_unlock(); 4835 | 4778 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, 4779 RTM_NEWROUTE, NETLINK_CB(in_skb).portid, 4780 nlh->nlmsg_seq, 0); |
4836 if (err < 0) { 4837 kfree_skb(skb); 4838 goto errout; 4839 } 4840 4841 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4842errout: 4843 return err; 4844} 4845 | 4781 if (err < 0) { 4782 kfree_skb(skb); 4783 goto errout; 4784 } 4785 4786 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 4787errout: 4788 return err; 4789} 4790 |
4846void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, | 4791void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, |
4847 unsigned int nlm_flags) 4848{ 4849 struct sk_buff *skb; 4850 struct net *net = info->nl_net; 4851 u32 seq; 4852 int err; 4853 4854 err = -ENOBUFS; 4855 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 4856 4857 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 4858 if (!skb) 4859 goto errout; 4860 | 4792 unsigned int nlm_flags) 4793{ 4794 struct sk_buff *skb; 4795 struct net *net = info->nl_net; 4796 u32 seq; 4797 int err; 4798 4799 err = -ENOBUFS; 4800 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 4801 4802 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 4803 if (!skb) 4804 goto errout; 4805 |
4861 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0, 4862 event, info->portid, seq, nlm_flags); | 4806 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, 4807 event, info->portid, seq, nlm_flags); |
4863 if (err < 0) { 4864 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 4865 WARN_ON(err == -EMSGSIZE); 4866 kfree_skb(skb); 4867 goto errout; 4868 } 4869 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 4870 info->nlh, gfp_any()); --- 8 unchanged lines hidden (view full) --- 4879{ 4880 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4881 struct net *net = dev_net(dev); 4882 4883 if (!(dev->flags & IFF_LOOPBACK)) 4884 return NOTIFY_OK; 4885 4886 if (event == NETDEV_REGISTER) { | 4808 if (err < 0) { 4809 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 4810 WARN_ON(err == -EMSGSIZE); 4811 kfree_skb(skb); 4812 goto errout; 4813 } 4814 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 4815 info->nlh, gfp_any()); --- 8 unchanged lines hidden (view full) --- 4824{ 4825 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4826 struct net *net = dev_net(dev); 4827 4828 if (!(dev->flags & IFF_LOOPBACK)) 4829 return NOTIFY_OK; 4830 4831 if (event == NETDEV_REGISTER) { |
4887 net->ipv6.fib6_null_entry->fib6_nh.nh_dev = dev; | |
4888 net->ipv6.ip6_null_entry->dst.dev = dev; 4889 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); 4890#ifdef CONFIG_IPV6_MULTIPLE_TABLES 4891 net->ipv6.ip6_prohibit_entry->dst.dev = dev; 4892 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); 4893 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 4894 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 4895#endif --- 180 unchanged lines hidden (view full) --- 5076 int ret = -ENOMEM; 5077 5078 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template, 5079 sizeof(net->ipv6.ip6_dst_ops)); 5080 5081 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0) 5082 goto out_ip6_dst_ops; 5083 | 4832 net->ipv6.ip6_null_entry->dst.dev = dev; 4833 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); 4834#ifdef CONFIG_IPV6_MULTIPLE_TABLES 4835 net->ipv6.ip6_prohibit_entry->dst.dev = dev; 4836 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); 4837 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 4838 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 4839#endif --- 180 unchanged lines hidden (view full) --- 5020 int ret = -ENOMEM; 5021 5022 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template, 5023 sizeof(net->ipv6.ip6_dst_ops)); 5024 5025 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0) 5026 goto out_ip6_dst_ops; 5027 |
5084 net->ipv6.fib6_null_entry = kmemdup(&fib6_null_entry_template, 5085 sizeof(*net->ipv6.fib6_null_entry), 5086 GFP_KERNEL); 5087 if (!net->ipv6.fib6_null_entry) 5088 goto out_ip6_dst_entries; 5089 | |
5090 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, 5091 sizeof(*net->ipv6.ip6_null_entry), 5092 GFP_KERNEL); 5093 if (!net->ipv6.ip6_null_entry) | 5028 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, 5029 sizeof(*net->ipv6.ip6_null_entry), 5030 GFP_KERNEL); 5031 if (!net->ipv6.ip6_null_entry) |
5094 goto out_fib6_null_entry; | 5032 goto out_ip6_dst_entries; |
5095 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; 5096 dst_init_metrics(&net->ipv6.ip6_null_entry->dst, 5097 ip6_template_metrics, true); 5098 5099#ifdef CONFIG_IPV6_MULTIPLE_TABLES 5100 net->ipv6.fib6_has_custom_rules = false; 5101 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, 5102 sizeof(*net->ipv6.ip6_prohibit_entry), --- 30 unchanged lines hidden (view full) --- 5133 return ret; 5134 5135#ifdef CONFIG_IPV6_MULTIPLE_TABLES 5136out_ip6_prohibit_entry: 5137 kfree(net->ipv6.ip6_prohibit_entry); 5138out_ip6_null_entry: 5139 kfree(net->ipv6.ip6_null_entry); 5140#endif | 5033 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; 5034 dst_init_metrics(&net->ipv6.ip6_null_entry->dst, 5035 ip6_template_metrics, true); 5036 5037#ifdef CONFIG_IPV6_MULTIPLE_TABLES 5038 net->ipv6.fib6_has_custom_rules = false; 5039 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, 5040 sizeof(*net->ipv6.ip6_prohibit_entry), --- 30 unchanged lines hidden (view full) --- 5071 return ret; 5072 5073#ifdef CONFIG_IPV6_MULTIPLE_TABLES 5074out_ip6_prohibit_entry: 5075 kfree(net->ipv6.ip6_prohibit_entry); 5076out_ip6_null_entry: 5077 kfree(net->ipv6.ip6_null_entry); 5078#endif |
5141out_fib6_null_entry: 5142 kfree(net->ipv6.fib6_null_entry); | |
5143out_ip6_dst_entries: 5144 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 5145out_ip6_dst_ops: 5146 goto out; 5147} 5148 5149static void __net_exit ip6_route_net_exit(struct net *net) 5150{ | 5079out_ip6_dst_entries: 5080 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 5081out_ip6_dst_ops: 5082 goto out; 5083} 5084 5085static void __net_exit ip6_route_net_exit(struct net *net) 5086{ |
5151 kfree(net->ipv6.fib6_null_entry); | |
5152 kfree(net->ipv6.ip6_null_entry); 5153#ifdef CONFIG_IPV6_MULTIPLE_TABLES 5154 kfree(net->ipv6.ip6_prohibit_entry); 5155 kfree(net->ipv6.ip6_blk_hole_entry); 5156#endif 5157 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 5158} 5159 --- 54 unchanged lines hidden (view full) --- 5214 .priority = ADDRCONF_NOTIFY_PRIORITY - 10, 5215}; 5216 5217void __init ip6_route_init_special_entries(void) 5218{ 5219 /* Registering of the loopback is done before this portion of code, 5220 * the loopback reference in rt6_info will not be taken, do it 5221 * manually for init_net */ | 5087 kfree(net->ipv6.ip6_null_entry); 5088#ifdef CONFIG_IPV6_MULTIPLE_TABLES 5089 kfree(net->ipv6.ip6_prohibit_entry); 5090 kfree(net->ipv6.ip6_blk_hole_entry); 5091#endif 5092 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 5093} 5094 --- 54 unchanged lines hidden (view full) --- 5149 .priority = ADDRCONF_NOTIFY_PRIORITY - 10, 5150}; 5151 5152void __init ip6_route_init_special_entries(void) 5153{ 5154 /* Registering of the loopback is done before this portion of code, 5155 * the loopback reference in rt6_info will not be taken, do it 5156 * manually for init_net */ |
5222 init_net.ipv6.fib6_null_entry->fib6_nh.nh_dev = init_net.loopback_dev; | |
5223 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; 5224 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 5225 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 5226 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; 5227 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 5228 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; 5229 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 5230 #endif --- 106 unchanged lines hidden --- | 5157 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; 5158 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 5159 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 5160 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; 5161 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 5162 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; 5163 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 5164 #endif --- 106 unchanged lines hidden --- |