1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * xfrm_policy.c 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * Kazunori MIYAZAWA @USAGI 11 * YOSHIFUJI Hideaki 12 * Split up af-specific portion 13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 14 * 15 */ 16 17 #include <linux/err.h> 18 #include <linux/slab.h> 19 #include <linux/kmod.h> 20 #include <linux/list.h> 21 #include <linux/spinlock.h> 22 #include <linux/workqueue.h> 23 #include <linux/notifier.h> 24 #include <linux/netdevice.h> 25 #include <linux/netfilter.h> 26 #include <linux/module.h> 27 #include <linux/cache.h> 28 #include <linux/cpu.h> 29 #include <linux/audit.h> 30 #include <linux/rhashtable.h> 31 #include <linux/if_tunnel.h> 32 #include <net/dst.h> 33 #include <net/flow.h> 34 #include <net/xfrm.h> 35 #include <net/ip.h> 36 #if IS_ENABLED(CONFIG_IPV6_MIP6) 37 #include <net/mip6.h> 38 #endif 39 #ifdef CONFIG_XFRM_STATISTICS 40 #include <net/snmp.h> 41 #endif 42 #ifdef CONFIG_XFRM_ESPINTCP 43 #include <net/espintcp.h> 44 #endif 45 46 #include "xfrm_hash.h" 47 48 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10)) 49 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) 50 #define XFRM_MAX_QUEUE_LEN 100 51 52 struct xfrm_flo { 53 struct dst_entry *dst_orig; 54 u8 flags; 55 }; 56 57 /* prefixes smaller than this are stored in lists, not trees. */ 58 #define INEXACT_PREFIXLEN_IPV4 16 59 #define INEXACT_PREFIXLEN_IPV6 48 60 61 struct xfrm_pol_inexact_node { 62 struct rb_node node; 63 union { 64 xfrm_address_t addr; 65 struct rcu_head rcu; 66 }; 67 u8 prefixlen; 68 69 struct rb_root root; 70 71 /* the policies matching this node, can be empty list */ 72 struct hlist_head hhead; 73 }; 74 75 /* xfrm inexact policy search tree: 76 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id); 77 * | 78 * +---- root_d: sorted by daddr:prefix 79 * | | 80 * | xfrm_pol_inexact_node 81 * | | 82 * | +- root: sorted by saddr/prefix 83 * | | | 84 * | | xfrm_pol_inexact_node 85 * | | | 86 * | | + root: unused 87 * | | | 88 * | | + hhead: saddr:daddr policies 89 * | | 90 * | +- coarse policies and all any:daddr policies 91 * | 92 * +---- root_s: sorted by saddr:prefix 93 * | | 94 * | xfrm_pol_inexact_node 95 * | | 96 * | + root: unused 97 * | | 98 * | + hhead: saddr:any policies 99 * | 100 * +---- coarse policies and all any:any policies 101 * 102 * Lookups return four candidate lists: 103 * 1. any:any list from top-level xfrm_pol_inexact_bin 104 * 2. any:daddr list from daddr tree 105 * 3. saddr:daddr list from 2nd level daddr tree 106 * 4. saddr:any list from saddr tree 107 * 108 * This result set then needs to be searched for the policy with 109 * the lowest priority. If two results have same prio, youngest one wins. 110 */ 111 112 struct xfrm_pol_inexact_key { 113 possible_net_t net; 114 u32 if_id; 115 u16 family; 116 u8 dir, type; 117 }; 118 119 struct xfrm_pol_inexact_bin { 120 struct xfrm_pol_inexact_key k; 121 struct rhash_head head; 122 /* list containing '*:*' policies */ 123 struct hlist_head hhead; 124 125 seqcount_t count; 126 /* tree sorted by daddr/prefix */ 127 struct rb_root root_d; 128 129 /* tree sorted by saddr/prefix */ 130 struct rb_root root_s; 131 132 /* slow path below */ 133 struct list_head inexact_bins; 134 struct rcu_head rcu; 135 }; 136 137 enum xfrm_pol_inexact_candidate_type { 138 XFRM_POL_CAND_BOTH, 139 XFRM_POL_CAND_SADDR, 140 XFRM_POL_CAND_DADDR, 141 XFRM_POL_CAND_ANY, 142 143 XFRM_POL_CAND_MAX, 144 }; 145 146 struct xfrm_pol_inexact_candidates { 147 struct hlist_head *res[XFRM_POL_CAND_MAX]; 148 }; 149 150 static DEFINE_SPINLOCK(xfrm_if_cb_lock); 151 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly; 152 153 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); 154 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1] 155 __read_mostly; 156 157 static struct kmem_cache *xfrm_dst_cache __ro_after_init; 158 static __read_mostly seqcount_t xfrm_policy_hash_generation; 159 160 static struct rhashtable xfrm_policy_inexact_table; 161 static const struct rhashtable_params xfrm_pol_inexact_params; 162 163 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr); 164 static int stale_bundle(struct dst_entry *dst); 165 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 166 static void xfrm_policy_queue_process(struct timer_list *t); 167 168 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir); 169 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 170 int dir); 171 172 static struct xfrm_pol_inexact_bin * 173 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir, 174 u32 if_id); 175 176 static struct xfrm_pol_inexact_bin * 177 xfrm_policy_inexact_lookup_rcu(struct net *net, 178 u8 type, u16 family, u8 dir, u32 if_id); 179 static struct xfrm_policy * 180 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy, 181 bool excl); 182 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, 183 struct xfrm_policy *policy); 184 185 static bool 186 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand, 187 struct xfrm_pol_inexact_bin *b, 188 const xfrm_address_t *saddr, 189 const xfrm_address_t *daddr); 190 191 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy) 192 { 193 return refcount_inc_not_zero(&policy->refcnt); 194 } 195 196 static inline bool 197 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 198 { 199 const struct flowi4 *fl4 = &fl->u.ip4; 200 201 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 202 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 203 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 204 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 205 (fl4->flowi4_proto == sel->proto || !sel->proto) && 206 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 207 } 208 209 static inline bool 210 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 211 { 212 const struct flowi6 *fl6 = &fl->u.ip6; 213 214 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 215 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 216 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 217 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 218 (fl6->flowi6_proto == sel->proto || !sel->proto) && 219 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 220 } 221 222 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 223 unsigned short family) 224 { 225 switch (family) { 226 case AF_INET: 227 return __xfrm4_selector_match(sel, fl); 228 case AF_INET6: 229 return __xfrm6_selector_match(sel, fl); 230 } 231 return false; 232 } 233 234 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 235 { 236 const struct xfrm_policy_afinfo *afinfo; 237 238 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo))) 239 return NULL; 240 rcu_read_lock(); 241 afinfo = rcu_dereference(xfrm_policy_afinfo[family]); 242 if (unlikely(!afinfo)) 243 rcu_read_unlock(); 244 return afinfo; 245 } 246 247 /* Called with rcu_read_lock(). */ 248 static const struct xfrm_if_cb *xfrm_if_get_cb(void) 249 { 250 return rcu_dereference(xfrm_if_cb); 251 } 252 253 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, 254 const xfrm_address_t *saddr, 255 const xfrm_address_t *daddr, 256 int family, u32 mark) 257 { 258 const struct xfrm_policy_afinfo *afinfo; 259 struct dst_entry *dst; 260 261 afinfo = xfrm_policy_get_afinfo(family); 262 if (unlikely(afinfo == NULL)) 263 return ERR_PTR(-EAFNOSUPPORT); 264 265 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark); 266 267 rcu_read_unlock(); 268 269 return dst; 270 } 271 EXPORT_SYMBOL(__xfrm_dst_lookup); 272 273 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, 274 int tos, int oif, 275 xfrm_address_t *prev_saddr, 276 xfrm_address_t *prev_daddr, 277 int family, u32 mark) 278 { 279 struct net *net = xs_net(x); 280 xfrm_address_t *saddr = &x->props.saddr; 281 xfrm_address_t *daddr = &x->id.daddr; 282 struct dst_entry *dst; 283 284 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 285 saddr = x->coaddr; 286 daddr = prev_daddr; 287 } 288 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 289 saddr = prev_saddr; 290 daddr = x->coaddr; 291 } 292 293 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark); 294 295 if (!IS_ERR(dst)) { 296 if (prev_saddr != saddr) 297 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 298 if (prev_daddr != daddr) 299 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 300 } 301 302 return dst; 303 } 304 305 static inline unsigned long make_jiffies(long secs) 306 { 307 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 308 return MAX_SCHEDULE_TIMEOUT-1; 309 else 310 return secs*HZ; 311 } 312 313 static void xfrm_policy_timer(struct timer_list *t) 314 { 315 struct xfrm_policy *xp = from_timer(xp, t, timer); 316 time64_t now = ktime_get_real_seconds(); 317 time64_t next = TIME64_MAX; 318 int warn = 0; 319 int dir; 320 321 read_lock(&xp->lock); 322 323 if (unlikely(xp->walk.dead)) 324 goto out; 325 326 dir = xfrm_policy_id2dir(xp->index); 327 328 if (xp->lft.hard_add_expires_seconds) { 329 time64_t tmo = xp->lft.hard_add_expires_seconds + 330 xp->curlft.add_time - now; 331 if (tmo <= 0) 332 goto expired; 333 if (tmo < next) 334 next = tmo; 335 } 336 if (xp->lft.hard_use_expires_seconds) { 337 time64_t tmo = xp->lft.hard_use_expires_seconds + 338 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 339 if (tmo <= 0) 340 goto expired; 341 if (tmo < next) 342 next = tmo; 343 } 344 if (xp->lft.soft_add_expires_seconds) { 345 time64_t tmo = xp->lft.soft_add_expires_seconds + 346 xp->curlft.add_time - now; 347 if (tmo <= 0) { 348 warn = 1; 349 tmo = XFRM_KM_TIMEOUT; 350 } 351 if (tmo < next) 352 next = tmo; 353 } 354 if (xp->lft.soft_use_expires_seconds) { 355 time64_t tmo = xp->lft.soft_use_expires_seconds + 356 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 357 if (tmo <= 0) { 358 warn = 1; 359 tmo = XFRM_KM_TIMEOUT; 360 } 361 if (tmo < next) 362 next = tmo; 363 } 364 365 if (warn) 366 km_policy_expired(xp, dir, 0, 0); 367 if (next != TIME64_MAX && 368 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 369 xfrm_pol_hold(xp); 370 371 out: 372 read_unlock(&xp->lock); 373 xfrm_pol_put(xp); 374 return; 375 376 expired: 377 read_unlock(&xp->lock); 378 if (!xfrm_policy_delete(xp, dir)) 379 km_policy_expired(xp, dir, 1, 0); 380 xfrm_pol_put(xp); 381 } 382 383 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 384 * SPD calls. 385 */ 386 387 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 388 { 389 struct xfrm_policy *policy; 390 391 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 392 393 if (policy) { 394 write_pnet(&policy->xp_net, net); 395 INIT_LIST_HEAD(&policy->walk.all); 396 INIT_HLIST_NODE(&policy->bydst_inexact_list); 397 INIT_HLIST_NODE(&policy->bydst); 398 INIT_HLIST_NODE(&policy->byidx); 399 rwlock_init(&policy->lock); 400 refcount_set(&policy->refcnt, 1); 401 skb_queue_head_init(&policy->polq.hold_queue); 402 timer_setup(&policy->timer, xfrm_policy_timer, 0); 403 timer_setup(&policy->polq.hold_timer, 404 xfrm_policy_queue_process, 0); 405 } 406 return policy; 407 } 408 EXPORT_SYMBOL(xfrm_policy_alloc); 409 410 static void xfrm_policy_destroy_rcu(struct rcu_head *head) 411 { 412 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu); 413 414 security_xfrm_policy_free(policy->security); 415 kfree(policy); 416 } 417 418 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 419 420 void xfrm_policy_destroy(struct xfrm_policy *policy) 421 { 422 BUG_ON(!policy->walk.dead); 423 424 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) 425 BUG(); 426 427 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu); 428 } 429 EXPORT_SYMBOL(xfrm_policy_destroy); 430 431 /* Rule must be locked. Release descendant resources, announce 432 * entry dead. The rule must be unlinked from lists to the moment. 433 */ 434 435 static void xfrm_policy_kill(struct xfrm_policy *policy) 436 { 437 write_lock_bh(&policy->lock); 438 policy->walk.dead = 1; 439 write_unlock_bh(&policy->lock); 440 441 atomic_inc(&policy->genid); 442 443 if (del_timer(&policy->polq.hold_timer)) 444 xfrm_pol_put(policy); 445 skb_queue_purge(&policy->polq.hold_queue); 446 447 if (del_timer(&policy->timer)) 448 xfrm_pol_put(policy); 449 450 xfrm_pol_put(policy); 451 } 452 453 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 454 455 static inline unsigned int idx_hash(struct net *net, u32 index) 456 { 457 return __idx_hash(index, net->xfrm.policy_idx_hmask); 458 } 459 460 /* calculate policy hash thresholds */ 461 static void __get_hash_thresh(struct net *net, 462 unsigned short family, int dir, 463 u8 *dbits, u8 *sbits) 464 { 465 switch (family) { 466 case AF_INET: 467 *dbits = net->xfrm.policy_bydst[dir].dbits4; 468 *sbits = net->xfrm.policy_bydst[dir].sbits4; 469 break; 470 471 case AF_INET6: 472 *dbits = net->xfrm.policy_bydst[dir].dbits6; 473 *sbits = net->xfrm.policy_bydst[dir].sbits6; 474 break; 475 476 default: 477 *dbits = 0; 478 *sbits = 0; 479 } 480 } 481 482 static struct hlist_head *policy_hash_bysel(struct net *net, 483 const struct xfrm_selector *sel, 484 unsigned short family, int dir) 485 { 486 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 487 unsigned int hash; 488 u8 dbits; 489 u8 sbits; 490 491 __get_hash_thresh(net, family, dir, &dbits, &sbits); 492 hash = __sel_hash(sel, family, hmask, dbits, sbits); 493 494 if (hash == hmask + 1) 495 return NULL; 496 497 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, 498 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash; 499 } 500 501 static struct hlist_head *policy_hash_direct(struct net *net, 502 const xfrm_address_t *daddr, 503 const xfrm_address_t *saddr, 504 unsigned short family, int dir) 505 { 506 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 507 unsigned int hash; 508 u8 dbits; 509 u8 sbits; 510 511 __get_hash_thresh(net, family, dir, &dbits, &sbits); 512 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits); 513 514 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, 515 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash; 516 } 517 518 static void xfrm_dst_hash_transfer(struct net *net, 519 struct hlist_head *list, 520 struct hlist_head *ndsttable, 521 unsigned int nhashmask, 522 int dir) 523 { 524 struct hlist_node *tmp, *entry0 = NULL; 525 struct xfrm_policy *pol; 526 unsigned int h0 = 0; 527 u8 dbits; 528 u8 sbits; 529 530 redo: 531 hlist_for_each_entry_safe(pol, tmp, list, bydst) { 532 unsigned int h; 533 534 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits); 535 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 536 pol->family, nhashmask, dbits, sbits); 537 if (!entry0) { 538 hlist_del_rcu(&pol->bydst); 539 hlist_add_head_rcu(&pol->bydst, ndsttable + h); 540 h0 = h; 541 } else { 542 if (h != h0) 543 continue; 544 hlist_del_rcu(&pol->bydst); 545 hlist_add_behind_rcu(&pol->bydst, entry0); 546 } 547 entry0 = &pol->bydst; 548 } 549 if (!hlist_empty(list)) { 550 entry0 = NULL; 551 goto redo; 552 } 553 } 554 555 static void xfrm_idx_hash_transfer(struct hlist_head *list, 556 struct hlist_head *nidxtable, 557 unsigned int nhashmask) 558 { 559 struct hlist_node *tmp; 560 struct xfrm_policy *pol; 561 562 hlist_for_each_entry_safe(pol, tmp, list, byidx) { 563 unsigned int h; 564 565 h = __idx_hash(pol->index, nhashmask); 566 hlist_add_head(&pol->byidx, nidxtable+h); 567 } 568 } 569 570 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 571 { 572 return ((old_hmask + 1) << 1) - 1; 573 } 574 575 static void xfrm_bydst_resize(struct net *net, int dir) 576 { 577 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 578 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 579 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 580 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 581 struct hlist_head *odst; 582 int i; 583 584 if (!ndst) 585 return; 586 587 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 588 write_seqcount_begin(&xfrm_policy_hash_generation); 589 590 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table, 591 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 592 593 for (i = hmask; i >= 0; i--) 594 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir); 595 596 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst); 597 net->xfrm.policy_bydst[dir].hmask = nhashmask; 598 599 write_seqcount_end(&xfrm_policy_hash_generation); 600 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 601 602 synchronize_rcu(); 603 604 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 605 } 606 607 static void xfrm_byidx_resize(struct net *net, int total) 608 { 609 unsigned int hmask = net->xfrm.policy_idx_hmask; 610 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 611 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 612 struct hlist_head *oidx = net->xfrm.policy_byidx; 613 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 614 int i; 615 616 if (!nidx) 617 return; 618 619 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 620 621 for (i = hmask; i >= 0; i--) 622 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 623 624 net->xfrm.policy_byidx = nidx; 625 net->xfrm.policy_idx_hmask = nhashmask; 626 627 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 628 629 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 630 } 631 632 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 633 { 634 unsigned int cnt = net->xfrm.policy_count[dir]; 635 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 636 637 if (total) 638 *total += cnt; 639 640 if ((hmask + 1) < xfrm_policy_hashmax && 641 cnt > hmask) 642 return 1; 643 644 return 0; 645 } 646 647 static inline int xfrm_byidx_should_resize(struct net *net, int total) 648 { 649 unsigned int hmask = net->xfrm.policy_idx_hmask; 650 651 if ((hmask + 1) < xfrm_policy_hashmax && 652 total > hmask) 653 return 1; 654 655 return 0; 656 } 657 658 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 659 { 660 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 661 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 662 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 663 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 664 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 665 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 666 si->spdhcnt = net->xfrm.policy_idx_hmask; 667 si->spdhmcnt = xfrm_policy_hashmax; 668 } 669 EXPORT_SYMBOL(xfrm_spd_getinfo); 670 671 static DEFINE_MUTEX(hash_resize_mutex); 672 static void xfrm_hash_resize(struct work_struct *work) 673 { 674 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 675 int dir, total; 676 677 mutex_lock(&hash_resize_mutex); 678 679 total = 0; 680 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 681 if (xfrm_bydst_should_resize(net, dir, &total)) 682 xfrm_bydst_resize(net, dir); 683 } 684 if (xfrm_byidx_should_resize(net, total)) 685 xfrm_byidx_resize(net, total); 686 687 mutex_unlock(&hash_resize_mutex); 688 } 689 690 /* Make sure *pol can be inserted into fastbin. 691 * Useful to check that later insert requests will be sucessful 692 * (provided xfrm_policy_lock is held throughout). 693 */ 694 static struct xfrm_pol_inexact_bin * 695 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir) 696 { 697 struct xfrm_pol_inexact_bin *bin, *prev; 698 struct xfrm_pol_inexact_key k = { 699 .family = pol->family, 700 .type = pol->type, 701 .dir = dir, 702 .if_id = pol->if_id, 703 }; 704 struct net *net = xp_net(pol); 705 706 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 707 708 write_pnet(&k.net, net); 709 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k, 710 xfrm_pol_inexact_params); 711 if (bin) 712 return bin; 713 714 bin = kzalloc(sizeof(*bin), GFP_ATOMIC); 715 if (!bin) 716 return NULL; 717 718 bin->k = k; 719 INIT_HLIST_HEAD(&bin->hhead); 720 bin->root_d = RB_ROOT; 721 bin->root_s = RB_ROOT; 722 seqcount_init(&bin->count); 723 724 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table, 725 &bin->k, &bin->head, 726 xfrm_pol_inexact_params); 727 if (!prev) { 728 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins); 729 return bin; 730 } 731 732 kfree(bin); 733 734 return IS_ERR(prev) ? NULL : prev; 735 } 736 737 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr, 738 int family, u8 prefixlen) 739 { 740 if (xfrm_addr_any(addr, family)) 741 return true; 742 743 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6) 744 return true; 745 746 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4) 747 return true; 748 749 return false; 750 } 751 752 static bool 753 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy) 754 { 755 const xfrm_address_t *addr; 756 bool saddr_any, daddr_any; 757 u8 prefixlen; 758 759 addr = &policy->selector.saddr; 760 prefixlen = policy->selector.prefixlen_s; 761 762 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr, 763 policy->family, 764 prefixlen); 765 addr = &policy->selector.daddr; 766 prefixlen = policy->selector.prefixlen_d; 767 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr, 768 policy->family, 769 prefixlen); 770 return saddr_any && daddr_any; 771 } 772 773 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node, 774 const xfrm_address_t *addr, u8 prefixlen) 775 { 776 node->addr = *addr; 777 node->prefixlen = prefixlen; 778 } 779 780 static struct xfrm_pol_inexact_node * 781 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen) 782 { 783 struct xfrm_pol_inexact_node *node; 784 785 node = kzalloc(sizeof(*node), GFP_ATOMIC); 786 if (node) 787 xfrm_pol_inexact_node_init(node, addr, prefixlen); 788 789 return node; 790 } 791 792 static int xfrm_policy_addr_delta(const xfrm_address_t *a, 793 const xfrm_address_t *b, 794 u8 prefixlen, u16 family) 795 { 796 unsigned int pdw, pbi; 797 int delta = 0; 798 799 switch (family) { 800 case AF_INET: 801 if (sizeof(long) == 4 && prefixlen == 0) 802 return ntohl(a->a4) - ntohl(b->a4); 803 return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) - 804 (ntohl(b->a4) & ((~0UL << (32 - prefixlen)))); 805 case AF_INET6: 806 pdw = prefixlen >> 5; 807 pbi = prefixlen & 0x1f; 808 809 if (pdw) { 810 delta = memcmp(a->a6, b->a6, pdw << 2); 811 if (delta) 812 return delta; 813 } 814 if (pbi) { 815 u32 mask = ~0u << (32 - pbi); 816 817 delta = (ntohl(a->a6[pdw]) & mask) - 818 (ntohl(b->a6[pdw]) & mask); 819 } 820 break; 821 default: 822 break; 823 } 824 825 return delta; 826 } 827 828 static void xfrm_policy_inexact_list_reinsert(struct net *net, 829 struct xfrm_pol_inexact_node *n, 830 u16 family) 831 { 832 unsigned int matched_s, matched_d; 833 struct xfrm_policy *policy, *p; 834 835 matched_s = 0; 836 matched_d = 0; 837 838 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 839 struct hlist_node *newpos = NULL; 840 bool matches_s, matches_d; 841 842 if (!policy->bydst_reinsert) 843 continue; 844 845 WARN_ON_ONCE(policy->family != family); 846 847 policy->bydst_reinsert = false; 848 hlist_for_each_entry(p, &n->hhead, bydst) { 849 if (policy->priority > p->priority) 850 newpos = &p->bydst; 851 else if (policy->priority == p->priority && 852 policy->pos > p->pos) 853 newpos = &p->bydst; 854 else 855 break; 856 } 857 858 if (newpos) 859 hlist_add_behind_rcu(&policy->bydst, newpos); 860 else 861 hlist_add_head_rcu(&policy->bydst, &n->hhead); 862 863 /* paranoia checks follow. 864 * Check that the reinserted policy matches at least 865 * saddr or daddr for current node prefix. 866 * 867 * Matching both is fine, matching saddr in one policy 868 * (but not daddr) and then matching only daddr in another 869 * is a bug. 870 */ 871 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr, 872 &n->addr, 873 n->prefixlen, 874 family) == 0; 875 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr, 876 &n->addr, 877 n->prefixlen, 878 family) == 0; 879 if (matches_s && matches_d) 880 continue; 881 882 WARN_ON_ONCE(!matches_s && !matches_d); 883 if (matches_s) 884 matched_s++; 885 if (matches_d) 886 matched_d++; 887 WARN_ON_ONCE(matched_s && matched_d); 888 } 889 } 890 891 static void xfrm_policy_inexact_node_reinsert(struct net *net, 892 struct xfrm_pol_inexact_node *n, 893 struct rb_root *new, 894 u16 family) 895 { 896 struct xfrm_pol_inexact_node *node; 897 struct rb_node **p, *parent; 898 899 /* we should not have another subtree here */ 900 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); 901 restart: 902 parent = NULL; 903 p = &new->rb_node; 904 while (*p) { 905 u8 prefixlen; 906 int delta; 907 908 parent = *p; 909 node = rb_entry(*p, struct xfrm_pol_inexact_node, node); 910 911 prefixlen = min(node->prefixlen, n->prefixlen); 912 913 delta = xfrm_policy_addr_delta(&n->addr, &node->addr, 914 prefixlen, family); 915 if (delta < 0) { 916 p = &parent->rb_left; 917 } else if (delta > 0) { 918 p = &parent->rb_right; 919 } else { 920 bool same_prefixlen = node->prefixlen == n->prefixlen; 921 struct xfrm_policy *tmp; 922 923 hlist_for_each_entry(tmp, &n->hhead, bydst) { 924 tmp->bydst_reinsert = true; 925 hlist_del_rcu(&tmp->bydst); 926 } 927 928 node->prefixlen = prefixlen; 929 930 xfrm_policy_inexact_list_reinsert(net, node, family); 931 932 if (same_prefixlen) { 933 kfree_rcu(n, rcu); 934 return; 935 } 936 937 rb_erase(*p, new); 938 kfree_rcu(n, rcu); 939 n = node; 940 goto restart; 941 } 942 } 943 944 rb_link_node_rcu(&n->node, parent, p); 945 rb_insert_color(&n->node, new); 946 } 947 948 /* merge nodes v and n */ 949 static void xfrm_policy_inexact_node_merge(struct net *net, 950 struct xfrm_pol_inexact_node *v, 951 struct xfrm_pol_inexact_node *n, 952 u16 family) 953 { 954 struct xfrm_pol_inexact_node *node; 955 struct xfrm_policy *tmp; 956 struct rb_node *rnode; 957 958 /* To-be-merged node v has a subtree. 959 * 960 * Dismantle it and insert its nodes to n->root. 961 */ 962 while ((rnode = rb_first(&v->root)) != NULL) { 963 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node); 964 rb_erase(&node->node, &v->root); 965 xfrm_policy_inexact_node_reinsert(net, node, &n->root, 966 family); 967 } 968 969 hlist_for_each_entry(tmp, &v->hhead, bydst) { 970 tmp->bydst_reinsert = true; 971 hlist_del_rcu(&tmp->bydst); 972 } 973 974 xfrm_policy_inexact_list_reinsert(net, n, family); 975 } 976 977 static struct xfrm_pol_inexact_node * 978 xfrm_policy_inexact_insert_node(struct net *net, 979 struct rb_root *root, 980 xfrm_address_t *addr, 981 u16 family, u8 prefixlen, u8 dir) 982 { 983 struct xfrm_pol_inexact_node *cached = NULL; 984 struct rb_node **p, *parent = NULL; 985 struct xfrm_pol_inexact_node *node; 986 987 p = &root->rb_node; 988 while (*p) { 989 int delta; 990 991 parent = *p; 992 node = rb_entry(*p, struct xfrm_pol_inexact_node, node); 993 994 delta = xfrm_policy_addr_delta(addr, &node->addr, 995 node->prefixlen, 996 family); 997 if (delta == 0 && prefixlen >= node->prefixlen) { 998 WARN_ON_ONCE(cached); /* ipsec policies got lost */ 999 return node; 1000 } 1001 1002 if (delta < 0) 1003 p = &parent->rb_left; 1004 else 1005 p = &parent->rb_right; 1006 1007 if (prefixlen < node->prefixlen) { 1008 delta = xfrm_policy_addr_delta(addr, &node->addr, 1009 prefixlen, 1010 family); 1011 if (delta) 1012 continue; 1013 1014 /* This node is a subnet of the new prefix. It needs 1015 * to be removed and re-inserted with the smaller 1016 * prefix and all nodes that are now also covered 1017 * by the reduced prefixlen. 1018 */ 1019 rb_erase(&node->node, root); 1020 1021 if (!cached) { 1022 xfrm_pol_inexact_node_init(node, addr, 1023 prefixlen); 1024 cached = node; 1025 } else { 1026 /* This node also falls within the new 1027 * prefixlen. Merge the to-be-reinserted 1028 * node and this one. 1029 */ 1030 xfrm_policy_inexact_node_merge(net, node, 1031 cached, family); 1032 kfree_rcu(node, rcu); 1033 } 1034 1035 /* restart */ 1036 p = &root->rb_node; 1037 parent = NULL; 1038 } 1039 } 1040 1041 node = cached; 1042 if (!node) { 1043 node = xfrm_pol_inexact_node_alloc(addr, prefixlen); 1044 if (!node) 1045 return NULL; 1046 } 1047 1048 rb_link_node_rcu(&node->node, parent, p); 1049 rb_insert_color(&node->node, root); 1050 1051 return node; 1052 } 1053 1054 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm) 1055 { 1056 struct xfrm_pol_inexact_node *node; 1057 struct rb_node *rn = rb_first(r); 1058 1059 while (rn) { 1060 node = rb_entry(rn, struct xfrm_pol_inexact_node, node); 1061 1062 xfrm_policy_inexact_gc_tree(&node->root, rm); 1063 rn = rb_next(rn); 1064 1065 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) { 1066 WARN_ON_ONCE(rm); 1067 continue; 1068 } 1069 1070 rb_erase(&node->node, r); 1071 kfree_rcu(node, rcu); 1072 } 1073 } 1074 1075 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit) 1076 { 1077 write_seqcount_begin(&b->count); 1078 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit); 1079 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit); 1080 write_seqcount_end(&b->count); 1081 1082 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) || 1083 !hlist_empty(&b->hhead)) { 1084 WARN_ON_ONCE(net_exit); 1085 return; 1086 } 1087 1088 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head, 1089 xfrm_pol_inexact_params) == 0) { 1090 list_del(&b->inexact_bins); 1091 kfree_rcu(b, rcu); 1092 } 1093 } 1094 1095 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b) 1096 { 1097 struct net *net = read_pnet(&b->k.net); 1098 1099 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1100 __xfrm_policy_inexact_prune_bin(b, false); 1101 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1102 } 1103 1104 static void __xfrm_policy_inexact_flush(struct net *net) 1105 { 1106 struct xfrm_pol_inexact_bin *bin, *t; 1107 1108 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1109 1110 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins) 1111 __xfrm_policy_inexact_prune_bin(bin, false); 1112 } 1113 1114 static struct hlist_head * 1115 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin, 1116 struct xfrm_policy *policy, u8 dir) 1117 { 1118 struct xfrm_pol_inexact_node *n; 1119 struct net *net; 1120 1121 net = xp_net(policy); 1122 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1123 1124 if (xfrm_policy_inexact_insert_use_any_list(policy)) 1125 return &bin->hhead; 1126 1127 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr, 1128 policy->family, 1129 policy->selector.prefixlen_d)) { 1130 write_seqcount_begin(&bin->count); 1131 n = xfrm_policy_inexact_insert_node(net, 1132 &bin->root_s, 1133 &policy->selector.saddr, 1134 policy->family, 1135 policy->selector.prefixlen_s, 1136 dir); 1137 write_seqcount_end(&bin->count); 1138 if (!n) 1139 return NULL; 1140 1141 return &n->hhead; 1142 } 1143 1144 /* daddr is fixed */ 1145 write_seqcount_begin(&bin->count); 1146 n = xfrm_policy_inexact_insert_node(net, 1147 &bin->root_d, 1148 &policy->selector.daddr, 1149 policy->family, 1150 policy->selector.prefixlen_d, dir); 1151 write_seqcount_end(&bin->count); 1152 if (!n) 1153 return NULL; 1154 1155 /* saddr is wildcard */ 1156 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr, 1157 policy->family, 1158 policy->selector.prefixlen_s)) 1159 return &n->hhead; 1160 1161 write_seqcount_begin(&bin->count); 1162 n = xfrm_policy_inexact_insert_node(net, 1163 &n->root, 1164 &policy->selector.saddr, 1165 policy->family, 1166 policy->selector.prefixlen_s, dir); 1167 write_seqcount_end(&bin->count); 1168 if (!n) 1169 return NULL; 1170 1171 return &n->hhead; 1172 } 1173 1174 static struct xfrm_policy * 1175 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl) 1176 { 1177 struct xfrm_pol_inexact_bin *bin; 1178 struct xfrm_policy *delpol; 1179 struct hlist_head *chain; 1180 struct net *net; 1181 1182 bin = xfrm_policy_inexact_alloc_bin(policy, dir); 1183 if (!bin) 1184 return ERR_PTR(-ENOMEM); 1185 1186 net = xp_net(policy); 1187 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1188 1189 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir); 1190 if (!chain) { 1191 __xfrm_policy_inexact_prune_bin(bin, false); 1192 return ERR_PTR(-ENOMEM); 1193 } 1194 1195 delpol = xfrm_policy_insert_list(chain, policy, excl); 1196 if (delpol && excl) { 1197 __xfrm_policy_inexact_prune_bin(bin, false); 1198 return ERR_PTR(-EEXIST); 1199 } 1200 1201 chain = &net->xfrm.policy_inexact[dir]; 1202 xfrm_policy_insert_inexact_list(chain, policy); 1203 1204 if (delpol) 1205 __xfrm_policy_inexact_prune_bin(bin, false); 1206 1207 return delpol; 1208 } 1209 1210 static void xfrm_hash_rebuild(struct work_struct *work) 1211 { 1212 struct net *net = container_of(work, struct net, 1213 xfrm.policy_hthresh.work); 1214 unsigned int hmask; 1215 struct xfrm_policy *pol; 1216 struct xfrm_policy *policy; 1217 struct hlist_head *chain; 1218 struct hlist_head *odst; 1219 struct hlist_node *newpos; 1220 int i; 1221 int dir; 1222 unsigned seq; 1223 u8 lbits4, rbits4, lbits6, rbits6; 1224 1225 mutex_lock(&hash_resize_mutex); 1226 1227 /* read selector prefixlen thresholds */ 1228 do { 1229 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 1230 1231 lbits4 = net->xfrm.policy_hthresh.lbits4; 1232 rbits4 = net->xfrm.policy_hthresh.rbits4; 1233 lbits6 = net->xfrm.policy_hthresh.lbits6; 1234 rbits6 = net->xfrm.policy_hthresh.rbits6; 1235 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 1236 1237 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1238 write_seqcount_begin(&xfrm_policy_hash_generation); 1239 1240 /* make sure that we can insert the indirect policies again before 1241 * we start with destructive action. 1242 */ 1243 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) { 1244 struct xfrm_pol_inexact_bin *bin; 1245 u8 dbits, sbits; 1246 1247 dir = xfrm_policy_id2dir(policy->index); 1248 if (policy->walk.dead || dir >= XFRM_POLICY_MAX) 1249 continue; 1250 1251 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 1252 if (policy->family == AF_INET) { 1253 dbits = rbits4; 1254 sbits = lbits4; 1255 } else { 1256 dbits = rbits6; 1257 sbits = lbits6; 1258 } 1259 } else { 1260 if (policy->family == AF_INET) { 1261 dbits = lbits4; 1262 sbits = rbits4; 1263 } else { 1264 dbits = lbits6; 1265 sbits = rbits6; 1266 } 1267 } 1268 1269 if (policy->selector.prefixlen_d < dbits || 1270 policy->selector.prefixlen_s < sbits) 1271 continue; 1272 1273 bin = xfrm_policy_inexact_alloc_bin(policy, dir); 1274 if (!bin) 1275 goto out_unlock; 1276 1277 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir)) 1278 goto out_unlock; 1279 } 1280 1281 /* reset the bydst and inexact table in all directions */ 1282 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 1283 struct hlist_node *n; 1284 1285 hlist_for_each_entry_safe(policy, n, 1286 &net->xfrm.policy_inexact[dir], 1287 bydst_inexact_list) { 1288 hlist_del_rcu(&policy->bydst); 1289 hlist_del_init(&policy->bydst_inexact_list); 1290 } 1291 1292 hmask = net->xfrm.policy_bydst[dir].hmask; 1293 odst = net->xfrm.policy_bydst[dir].table; 1294 for (i = hmask; i >= 0; i--) { 1295 hlist_for_each_entry_safe(policy, n, odst + i, bydst) 1296 hlist_del_rcu(&policy->bydst); 1297 } 1298 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 1299 /* dir out => dst = remote, src = local */ 1300 net->xfrm.policy_bydst[dir].dbits4 = rbits4; 1301 net->xfrm.policy_bydst[dir].sbits4 = lbits4; 1302 net->xfrm.policy_bydst[dir].dbits6 = rbits6; 1303 net->xfrm.policy_bydst[dir].sbits6 = lbits6; 1304 } else { 1305 /* dir in/fwd => dst = local, src = remote */ 1306 net->xfrm.policy_bydst[dir].dbits4 = lbits4; 1307 net->xfrm.policy_bydst[dir].sbits4 = rbits4; 1308 net->xfrm.policy_bydst[dir].dbits6 = lbits6; 1309 net->xfrm.policy_bydst[dir].sbits6 = rbits6; 1310 } 1311 } 1312 1313 /* re-insert all policies by order of creation */ 1314 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 1315 if (policy->walk.dead) 1316 continue; 1317 dir = xfrm_policy_id2dir(policy->index); 1318 if (dir >= XFRM_POLICY_MAX) { 1319 /* skip socket policies */ 1320 continue; 1321 } 1322 newpos = NULL; 1323 chain = policy_hash_bysel(net, &policy->selector, 1324 policy->family, dir); 1325 1326 if (!chain) { 1327 void *p = xfrm_policy_inexact_insert(policy, dir, 0); 1328 1329 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p)); 1330 continue; 1331 } 1332 1333 hlist_for_each_entry(pol, chain, bydst) { 1334 if (policy->priority >= pol->priority) 1335 newpos = &pol->bydst; 1336 else 1337 break; 1338 } 1339 if (newpos) 1340 hlist_add_behind_rcu(&policy->bydst, newpos); 1341 else 1342 hlist_add_head_rcu(&policy->bydst, chain); 1343 } 1344 1345 out_unlock: 1346 __xfrm_policy_inexact_flush(net); 1347 write_seqcount_end(&xfrm_policy_hash_generation); 1348 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1349 1350 mutex_unlock(&hash_resize_mutex); 1351 } 1352 1353 void xfrm_policy_hash_rebuild(struct net *net) 1354 { 1355 schedule_work(&net->xfrm.policy_hthresh.work); 1356 } 1357 EXPORT_SYMBOL(xfrm_policy_hash_rebuild); 1358 1359 /* Generate new index... KAME seems to generate them ordered by cost 1360 * of an absolute inpredictability of ordering of rules. This will not pass. */ 1361 static u32 xfrm_gen_index(struct net *net, int dir, u32 index) 1362 { 1363 static u32 idx_generator; 1364 1365 for (;;) { 1366 struct hlist_head *list; 1367 struct xfrm_policy *p; 1368 u32 idx; 1369 int found; 1370 1371 if (!index) { 1372 idx = (idx_generator | dir); 1373 idx_generator += 8; 1374 } else { 1375 idx = index; 1376 index = 0; 1377 } 1378 1379 if (idx == 0) 1380 idx = 8; 1381 list = net->xfrm.policy_byidx + idx_hash(net, idx); 1382 found = 0; 1383 hlist_for_each_entry(p, list, byidx) { 1384 if (p->index == idx) { 1385 found = 1; 1386 break; 1387 } 1388 } 1389 if (!found) 1390 return idx; 1391 } 1392 } 1393 1394 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 1395 { 1396 u32 *p1 = (u32 *) s1; 1397 u32 *p2 = (u32 *) s2; 1398 int len = sizeof(struct xfrm_selector) / sizeof(u32); 1399 int i; 1400 1401 for (i = 0; i < len; i++) { 1402 if (p1[i] != p2[i]) 1403 return 1; 1404 } 1405 1406 return 0; 1407 } 1408 1409 static void xfrm_policy_requeue(struct xfrm_policy *old, 1410 struct xfrm_policy *new) 1411 { 1412 struct xfrm_policy_queue *pq = &old->polq; 1413 struct sk_buff_head list; 1414 1415 if (skb_queue_empty(&pq->hold_queue)) 1416 return; 1417 1418 __skb_queue_head_init(&list); 1419 1420 spin_lock_bh(&pq->hold_queue.lock); 1421 skb_queue_splice_init(&pq->hold_queue, &list); 1422 if (del_timer(&pq->hold_timer)) 1423 xfrm_pol_put(old); 1424 spin_unlock_bh(&pq->hold_queue.lock); 1425 1426 pq = &new->polq; 1427 1428 spin_lock_bh(&pq->hold_queue.lock); 1429 skb_queue_splice(&list, &pq->hold_queue); 1430 pq->timeout = XFRM_QUEUE_TMO_MIN; 1431 if (!mod_timer(&pq->hold_timer, jiffies)) 1432 xfrm_pol_hold(new); 1433 spin_unlock_bh(&pq->hold_queue.lock); 1434 } 1435 1436 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark, 1437 struct xfrm_policy *pol) 1438 { 1439 return mark->v == pol->mark.v && mark->m == pol->mark.m; 1440 } 1441 1442 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) 1443 { 1444 const struct xfrm_pol_inexact_key *k = data; 1445 u32 a = k->type << 24 | k->dir << 16 | k->family; 1446 1447 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)), 1448 seed); 1449 } 1450 1451 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed) 1452 { 1453 const struct xfrm_pol_inexact_bin *b = data; 1454 1455 return xfrm_pol_bin_key(&b->k, 0, seed); 1456 } 1457 1458 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg, 1459 const void *ptr) 1460 { 1461 const struct xfrm_pol_inexact_key *key = arg->key; 1462 const struct xfrm_pol_inexact_bin *b = ptr; 1463 int ret; 1464 1465 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net))) 1466 return -1; 1467 1468 ret = b->k.dir ^ key->dir; 1469 if (ret) 1470 return ret; 1471 1472 ret = b->k.type ^ key->type; 1473 if (ret) 1474 return ret; 1475 1476 ret = b->k.family ^ key->family; 1477 if (ret) 1478 return ret; 1479 1480 return b->k.if_id ^ key->if_id; 1481 } 1482 1483 static const struct rhashtable_params xfrm_pol_inexact_params = { 1484 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head), 1485 .hashfn = xfrm_pol_bin_key, 1486 .obj_hashfn = xfrm_pol_bin_obj, 1487 .obj_cmpfn = xfrm_pol_bin_cmp, 1488 .automatic_shrinking = true, 1489 }; 1490 1491 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, 1492 struct xfrm_policy *policy) 1493 { 1494 struct xfrm_policy *pol, *delpol = NULL; 1495 struct hlist_node *newpos = NULL; 1496 int i = 0; 1497 1498 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 1499 if (pol->type == policy->type && 1500 pol->if_id == policy->if_id && 1501 !selector_cmp(&pol->selector, &policy->selector) && 1502 xfrm_policy_mark_match(&policy->mark, pol) && 1503 xfrm_sec_ctx_match(pol->security, policy->security) && 1504 !WARN_ON(delpol)) { 1505 delpol = pol; 1506 if (policy->priority > pol->priority) 1507 continue; 1508 } else if (policy->priority >= pol->priority) { 1509 newpos = &pol->bydst_inexact_list; 1510 continue; 1511 } 1512 if (delpol) 1513 break; 1514 } 1515 1516 if (newpos) 1517 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos); 1518 else 1519 hlist_add_head_rcu(&policy->bydst_inexact_list, chain); 1520 1521 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 1522 pol->pos = i; 1523 i++; 1524 } 1525 } 1526 1527 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, 1528 struct xfrm_policy *policy, 1529 bool excl) 1530 { 1531 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL; 1532 1533 hlist_for_each_entry(pol, chain, bydst) { 1534 if (pol->type == policy->type && 1535 pol->if_id == policy->if_id && 1536 !selector_cmp(&pol->selector, &policy->selector) && 1537 xfrm_policy_mark_match(&policy->mark, pol) && 1538 xfrm_sec_ctx_match(pol->security, policy->security) && 1539 !WARN_ON(delpol)) { 1540 if (excl) 1541 return ERR_PTR(-EEXIST); 1542 delpol = pol; 1543 if (policy->priority > pol->priority) 1544 continue; 1545 } else if (policy->priority >= pol->priority) { 1546 newpos = pol; 1547 continue; 1548 } 1549 if (delpol) 1550 break; 1551 } 1552 1553 if (newpos) 1554 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst); 1555 else 1556 hlist_add_head_rcu(&policy->bydst, chain); 1557 1558 return delpol; 1559 } 1560 1561 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 1562 { 1563 struct net *net = xp_net(policy); 1564 struct xfrm_policy *delpol; 1565 struct hlist_head *chain; 1566 1567 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1568 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 1569 if (chain) 1570 delpol = xfrm_policy_insert_list(chain, policy, excl); 1571 else 1572 delpol = xfrm_policy_inexact_insert(policy, dir, excl); 1573 1574 if (IS_ERR(delpol)) { 1575 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1576 return PTR_ERR(delpol); 1577 } 1578 1579 __xfrm_policy_link(policy, dir); 1580 1581 /* After previous checking, family can either be AF_INET or AF_INET6 */ 1582 if (policy->family == AF_INET) 1583 rt_genid_bump_ipv4(net); 1584 else 1585 rt_genid_bump_ipv6(net); 1586 1587 if (delpol) { 1588 xfrm_policy_requeue(delpol, policy); 1589 __xfrm_policy_unlink(delpol, dir); 1590 } 1591 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index); 1592 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 1593 policy->curlft.add_time = ktime_get_real_seconds(); 1594 policy->curlft.use_time = 0; 1595 if (!mod_timer(&policy->timer, jiffies + HZ)) 1596 xfrm_pol_hold(policy); 1597 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1598 1599 if (delpol) 1600 xfrm_policy_kill(delpol); 1601 else if (xfrm_bydst_should_resize(net, dir, NULL)) 1602 schedule_work(&net->xfrm.policy_hash_work); 1603 1604 return 0; 1605 } 1606 EXPORT_SYMBOL(xfrm_policy_insert); 1607 1608 static struct xfrm_policy * 1609 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark, 1610 u32 if_id, u8 type, int dir, struct xfrm_selector *sel, 1611 struct xfrm_sec_ctx *ctx) 1612 { 1613 struct xfrm_policy *pol; 1614 1615 if (!chain) 1616 return NULL; 1617 1618 hlist_for_each_entry(pol, chain, bydst) { 1619 if (pol->type == type && 1620 pol->if_id == if_id && 1621 xfrm_policy_mark_match(mark, pol) && 1622 !selector_cmp(sel, &pol->selector) && 1623 xfrm_sec_ctx_match(ctx, pol->security)) 1624 return pol; 1625 } 1626 1627 return NULL; 1628 } 1629 1630 struct xfrm_policy * 1631 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id, 1632 u8 type, int dir, struct xfrm_selector *sel, 1633 struct xfrm_sec_ctx *ctx, int delete, int *err) 1634 { 1635 struct xfrm_pol_inexact_bin *bin = NULL; 1636 struct xfrm_policy *pol, *ret = NULL; 1637 struct hlist_head *chain; 1638 1639 *err = 0; 1640 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1641 chain = policy_hash_bysel(net, sel, sel->family, dir); 1642 if (!chain) { 1643 struct xfrm_pol_inexact_candidates cand; 1644 int i; 1645 1646 bin = xfrm_policy_inexact_lookup(net, type, 1647 sel->family, dir, if_id); 1648 if (!bin) { 1649 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1650 return NULL; 1651 } 1652 1653 if (!xfrm_policy_find_inexact_candidates(&cand, bin, 1654 &sel->saddr, 1655 &sel->daddr)) { 1656 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1657 return NULL; 1658 } 1659 1660 pol = NULL; 1661 for (i = 0; i < ARRAY_SIZE(cand.res); i++) { 1662 struct xfrm_policy *tmp; 1663 1664 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark, 1665 if_id, type, dir, 1666 sel, ctx); 1667 if (!tmp) 1668 continue; 1669 1670 if (!pol || tmp->pos < pol->pos) 1671 pol = tmp; 1672 } 1673 } else { 1674 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir, 1675 sel, ctx); 1676 } 1677 1678 if (pol) { 1679 xfrm_pol_hold(pol); 1680 if (delete) { 1681 *err = security_xfrm_policy_delete(pol->security); 1682 if (*err) { 1683 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1684 return pol; 1685 } 1686 __xfrm_policy_unlink(pol, dir); 1687 } 1688 ret = pol; 1689 } 1690 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1691 1692 if (ret && delete) 1693 xfrm_policy_kill(ret); 1694 if (bin && delete) 1695 xfrm_policy_inexact_prune_bin(bin); 1696 return ret; 1697 } 1698 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 1699 1700 struct xfrm_policy * 1701 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id, 1702 u8 type, int dir, u32 id, int delete, int *err) 1703 { 1704 struct xfrm_policy *pol, *ret; 1705 struct hlist_head *chain; 1706 1707 *err = -ENOENT; 1708 if (xfrm_policy_id2dir(id) != dir) 1709 return NULL; 1710 1711 *err = 0; 1712 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1713 chain = net->xfrm.policy_byidx + idx_hash(net, id); 1714 ret = NULL; 1715 hlist_for_each_entry(pol, chain, byidx) { 1716 if (pol->type == type && pol->index == id && 1717 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) { 1718 xfrm_pol_hold(pol); 1719 if (delete) { 1720 *err = security_xfrm_policy_delete( 1721 pol->security); 1722 if (*err) { 1723 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1724 return pol; 1725 } 1726 __xfrm_policy_unlink(pol, dir); 1727 } 1728 ret = pol; 1729 break; 1730 } 1731 } 1732 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1733 1734 if (ret && delete) 1735 xfrm_policy_kill(ret); 1736 return ret; 1737 } 1738 EXPORT_SYMBOL(xfrm_policy_byid); 1739 1740 #ifdef CONFIG_SECURITY_NETWORK_XFRM 1741 static inline int 1742 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 1743 { 1744 struct xfrm_policy *pol; 1745 int err = 0; 1746 1747 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1748 if (pol->walk.dead || 1749 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX || 1750 pol->type != type) 1751 continue; 1752 1753 err = security_xfrm_policy_delete(pol->security); 1754 if (err) { 1755 xfrm_audit_policy_delete(pol, 0, task_valid); 1756 return err; 1757 } 1758 } 1759 return err; 1760 } 1761 #else 1762 static inline int 1763 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 1764 { 1765 return 0; 1766 } 1767 #endif 1768 1769 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) 1770 { 1771 int dir, err = 0, cnt = 0; 1772 struct xfrm_policy *pol; 1773 1774 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1775 1776 err = xfrm_policy_flush_secctx_check(net, type, task_valid); 1777 if (err) 1778 goto out; 1779 1780 again: 1781 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1782 dir = xfrm_policy_id2dir(pol->index); 1783 if (pol->walk.dead || 1784 dir >= XFRM_POLICY_MAX || 1785 pol->type != type) 1786 continue; 1787 1788 __xfrm_policy_unlink(pol, dir); 1789 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1790 cnt++; 1791 xfrm_audit_policy_delete(pol, 1, task_valid); 1792 xfrm_policy_kill(pol); 1793 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1794 goto again; 1795 } 1796 if (cnt) 1797 __xfrm_policy_inexact_flush(net); 1798 else 1799 err = -ESRCH; 1800 out: 1801 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1802 return err; 1803 } 1804 EXPORT_SYMBOL(xfrm_policy_flush); 1805 1806 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 1807 int (*func)(struct xfrm_policy *, int, int, void*), 1808 void *data) 1809 { 1810 struct xfrm_policy *pol; 1811 struct xfrm_policy_walk_entry *x; 1812 int error = 0; 1813 1814 if (walk->type >= XFRM_POLICY_TYPE_MAX && 1815 walk->type != XFRM_POLICY_TYPE_ANY) 1816 return -EINVAL; 1817 1818 if (list_empty(&walk->walk.all) && walk->seq != 0) 1819 return 0; 1820 1821 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1822 if (list_empty(&walk->walk.all)) 1823 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 1824 else 1825 x = list_first_entry(&walk->walk.all, 1826 struct xfrm_policy_walk_entry, all); 1827 1828 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 1829 if (x->dead) 1830 continue; 1831 pol = container_of(x, struct xfrm_policy, walk); 1832 if (walk->type != XFRM_POLICY_TYPE_ANY && 1833 walk->type != pol->type) 1834 continue; 1835 error = func(pol, xfrm_policy_id2dir(pol->index), 1836 walk->seq, data); 1837 if (error) { 1838 list_move_tail(&walk->walk.all, &x->all); 1839 goto out; 1840 } 1841 walk->seq++; 1842 } 1843 if (walk->seq == 0) { 1844 error = -ENOENT; 1845 goto out; 1846 } 1847 list_del_init(&walk->walk.all); 1848 out: 1849 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1850 return error; 1851 } 1852 EXPORT_SYMBOL(xfrm_policy_walk); 1853 1854 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 1855 { 1856 INIT_LIST_HEAD(&walk->walk.all); 1857 walk->walk.dead = 1; 1858 walk->type = type; 1859 walk->seq = 0; 1860 } 1861 EXPORT_SYMBOL(xfrm_policy_walk_init); 1862 1863 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net) 1864 { 1865 if (list_empty(&walk->walk.all)) 1866 return; 1867 1868 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */ 1869 list_del(&walk->walk.all); 1870 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1871 } 1872 EXPORT_SYMBOL(xfrm_policy_walk_done); 1873 1874 /* 1875 * Find policy to apply to this flow. 1876 * 1877 * Returns 0 if policy found, else an -errno. 1878 */ 1879 static int xfrm_policy_match(const struct xfrm_policy *pol, 1880 const struct flowi *fl, 1881 u8 type, u16 family, int dir, u32 if_id) 1882 { 1883 const struct xfrm_selector *sel = &pol->selector; 1884 int ret = -ESRCH; 1885 bool match; 1886 1887 if (pol->family != family || 1888 pol->if_id != if_id || 1889 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 1890 pol->type != type) 1891 return ret; 1892 1893 match = xfrm_selector_match(sel, fl, family); 1894 if (match) 1895 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, 1896 dir); 1897 return ret; 1898 } 1899 1900 static struct xfrm_pol_inexact_node * 1901 xfrm_policy_lookup_inexact_addr(const struct rb_root *r, 1902 seqcount_t *count, 1903 const xfrm_address_t *addr, u16 family) 1904 { 1905 const struct rb_node *parent; 1906 int seq; 1907 1908 again: 1909 seq = read_seqcount_begin(count); 1910 1911 parent = rcu_dereference_raw(r->rb_node); 1912 while (parent) { 1913 struct xfrm_pol_inexact_node *node; 1914 int delta; 1915 1916 node = rb_entry(parent, struct xfrm_pol_inexact_node, node); 1917 1918 delta = xfrm_policy_addr_delta(addr, &node->addr, 1919 node->prefixlen, family); 1920 if (delta < 0) { 1921 parent = rcu_dereference_raw(parent->rb_left); 1922 continue; 1923 } else if (delta > 0) { 1924 parent = rcu_dereference_raw(parent->rb_right); 1925 continue; 1926 } 1927 1928 return node; 1929 } 1930 1931 if (read_seqcount_retry(count, seq)) 1932 goto again; 1933 1934 return NULL; 1935 } 1936 1937 static bool 1938 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand, 1939 struct xfrm_pol_inexact_bin *b, 1940 const xfrm_address_t *saddr, 1941 const xfrm_address_t *daddr) 1942 { 1943 struct xfrm_pol_inexact_node *n; 1944 u16 family; 1945 1946 if (!b) 1947 return false; 1948 1949 family = b->k.family; 1950 memset(cand, 0, sizeof(*cand)); 1951 cand->res[XFRM_POL_CAND_ANY] = &b->hhead; 1952 1953 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr, 1954 family); 1955 if (n) { 1956 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead; 1957 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr, 1958 family); 1959 if (n) 1960 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead; 1961 } 1962 1963 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr, 1964 family); 1965 if (n) 1966 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead; 1967 1968 return true; 1969 } 1970 1971 static struct xfrm_pol_inexact_bin * 1972 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family, 1973 u8 dir, u32 if_id) 1974 { 1975 struct xfrm_pol_inexact_key k = { 1976 .family = family, 1977 .type = type, 1978 .dir = dir, 1979 .if_id = if_id, 1980 }; 1981 1982 write_pnet(&k.net, net); 1983 1984 return rhashtable_lookup(&xfrm_policy_inexact_table, &k, 1985 xfrm_pol_inexact_params); 1986 } 1987 1988 static struct xfrm_pol_inexact_bin * 1989 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, 1990 u8 dir, u32 if_id) 1991 { 1992 struct xfrm_pol_inexact_bin *bin; 1993 1994 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1995 1996 rcu_read_lock(); 1997 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id); 1998 rcu_read_unlock(); 1999 2000 return bin; 2001 } 2002 2003 static struct xfrm_policy * 2004 __xfrm_policy_eval_candidates(struct hlist_head *chain, 2005 struct xfrm_policy *prefer, 2006 const struct flowi *fl, 2007 u8 type, u16 family, int dir, u32 if_id) 2008 { 2009 u32 priority = prefer ? prefer->priority : ~0u; 2010 struct xfrm_policy *pol; 2011 2012 if (!chain) 2013 return NULL; 2014 2015 hlist_for_each_entry_rcu(pol, chain, bydst) { 2016 int err; 2017 2018 if (pol->priority > priority) 2019 break; 2020 2021 err = xfrm_policy_match(pol, fl, type, family, dir, if_id); 2022 if (err) { 2023 if (err != -ESRCH) 2024 return ERR_PTR(err); 2025 2026 continue; 2027 } 2028 2029 if (prefer) { 2030 /* matches. Is it older than *prefer? */ 2031 if (pol->priority == priority && 2032 prefer->pos < pol->pos) 2033 return prefer; 2034 } 2035 2036 return pol; 2037 } 2038 2039 return NULL; 2040 } 2041 2042 static struct xfrm_policy * 2043 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand, 2044 struct xfrm_policy *prefer, 2045 const struct flowi *fl, 2046 u8 type, u16 family, int dir, u32 if_id) 2047 { 2048 struct xfrm_policy *tmp; 2049 int i; 2050 2051 for (i = 0; i < ARRAY_SIZE(cand->res); i++) { 2052 tmp = __xfrm_policy_eval_candidates(cand->res[i], 2053 prefer, 2054 fl, type, family, dir, 2055 if_id); 2056 if (!tmp) 2057 continue; 2058 2059 if (IS_ERR(tmp)) 2060 return tmp; 2061 prefer = tmp; 2062 } 2063 2064 return prefer; 2065 } 2066 2067 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 2068 const struct flowi *fl, 2069 u16 family, u8 dir, 2070 u32 if_id) 2071 { 2072 struct xfrm_pol_inexact_candidates cand; 2073 const xfrm_address_t *daddr, *saddr; 2074 struct xfrm_pol_inexact_bin *bin; 2075 struct xfrm_policy *pol, *ret; 2076 struct hlist_head *chain; 2077 unsigned int sequence; 2078 int err; 2079 2080 daddr = xfrm_flowi_daddr(fl, family); 2081 saddr = xfrm_flowi_saddr(fl, family); 2082 if (unlikely(!daddr || !saddr)) 2083 return NULL; 2084 2085 rcu_read_lock(); 2086 retry: 2087 do { 2088 sequence = read_seqcount_begin(&xfrm_policy_hash_generation); 2089 chain = policy_hash_direct(net, daddr, saddr, family, dir); 2090 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)); 2091 2092 ret = NULL; 2093 hlist_for_each_entry_rcu(pol, chain, bydst) { 2094 err = xfrm_policy_match(pol, fl, type, family, dir, if_id); 2095 if (err) { 2096 if (err == -ESRCH) 2097 continue; 2098 else { 2099 ret = ERR_PTR(err); 2100 goto fail; 2101 } 2102 } else { 2103 ret = pol; 2104 break; 2105 } 2106 } 2107 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id); 2108 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr, 2109 daddr)) 2110 goto skip_inexact; 2111 2112 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type, 2113 family, dir, if_id); 2114 if (pol) { 2115 ret = pol; 2116 if (IS_ERR(pol)) 2117 goto fail; 2118 } 2119 2120 skip_inexact: 2121 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) 2122 goto retry; 2123 2124 if (ret && !xfrm_pol_hold_rcu(ret)) 2125 goto retry; 2126 fail: 2127 rcu_read_unlock(); 2128 2129 return ret; 2130 } 2131 2132 static struct xfrm_policy *xfrm_policy_lookup(struct net *net, 2133 const struct flowi *fl, 2134 u16 family, u8 dir, u32 if_id) 2135 { 2136 #ifdef CONFIG_XFRM_SUB_POLICY 2137 struct xfrm_policy *pol; 2138 2139 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, 2140 dir, if_id); 2141 if (pol != NULL) 2142 return pol; 2143 #endif 2144 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, 2145 dir, if_id); 2146 } 2147 2148 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, 2149 const struct flowi *fl, 2150 u16 family, u32 if_id) 2151 { 2152 struct xfrm_policy *pol; 2153 2154 rcu_read_lock(); 2155 again: 2156 pol = rcu_dereference(sk->sk_policy[dir]); 2157 if (pol != NULL) { 2158 bool match; 2159 int err = 0; 2160 2161 if (pol->family != family) { 2162 pol = NULL; 2163 goto out; 2164 } 2165 2166 match = xfrm_selector_match(&pol->selector, fl, family); 2167 if (match) { 2168 if ((sk->sk_mark & pol->mark.m) != pol->mark.v || 2169 pol->if_id != if_id) { 2170 pol = NULL; 2171 goto out; 2172 } 2173 err = security_xfrm_policy_lookup(pol->security, 2174 fl->flowi_secid, 2175 dir); 2176 if (!err) { 2177 if (!xfrm_pol_hold_rcu(pol)) 2178 goto again; 2179 } else if (err == -ESRCH) { 2180 pol = NULL; 2181 } else { 2182 pol = ERR_PTR(err); 2183 } 2184 } else 2185 pol = NULL; 2186 } 2187 out: 2188 rcu_read_unlock(); 2189 return pol; 2190 } 2191 2192 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 2193 { 2194 struct net *net = xp_net(pol); 2195 2196 list_add(&pol->walk.all, &net->xfrm.policy_all); 2197 net->xfrm.policy_count[dir]++; 2198 xfrm_pol_hold(pol); 2199 } 2200 2201 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 2202 int dir) 2203 { 2204 struct net *net = xp_net(pol); 2205 2206 if (list_empty(&pol->walk.all)) 2207 return NULL; 2208 2209 /* Socket policies are not hashed. */ 2210 if (!hlist_unhashed(&pol->bydst)) { 2211 hlist_del_rcu(&pol->bydst); 2212 hlist_del_init(&pol->bydst_inexact_list); 2213 hlist_del(&pol->byidx); 2214 } 2215 2216 list_del_init(&pol->walk.all); 2217 net->xfrm.policy_count[dir]--; 2218 2219 return pol; 2220 } 2221 2222 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir) 2223 { 2224 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir); 2225 } 2226 2227 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir) 2228 { 2229 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir); 2230 } 2231 2232 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 2233 { 2234 struct net *net = xp_net(pol); 2235 2236 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2237 pol = __xfrm_policy_unlink(pol, dir); 2238 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2239 if (pol) { 2240 xfrm_policy_kill(pol); 2241 return 0; 2242 } 2243 return -ENOENT; 2244 } 2245 EXPORT_SYMBOL(xfrm_policy_delete); 2246 2247 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 2248 { 2249 struct net *net = sock_net(sk); 2250 struct xfrm_policy *old_pol; 2251 2252 #ifdef CONFIG_XFRM_SUB_POLICY 2253 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 2254 return -EINVAL; 2255 #endif 2256 2257 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2258 old_pol = rcu_dereference_protected(sk->sk_policy[dir], 2259 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 2260 if (pol) { 2261 pol->curlft.add_time = ktime_get_real_seconds(); 2262 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); 2263 xfrm_sk_policy_link(pol, dir); 2264 } 2265 rcu_assign_pointer(sk->sk_policy[dir], pol); 2266 if (old_pol) { 2267 if (pol) 2268 xfrm_policy_requeue(old_pol, pol); 2269 2270 /* Unlinking succeeds always. This is the only function 2271 * allowed to delete or replace socket policy. 2272 */ 2273 xfrm_sk_policy_unlink(old_pol, dir); 2274 } 2275 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2276 2277 if (old_pol) { 2278 xfrm_policy_kill(old_pol); 2279 } 2280 return 0; 2281 } 2282 2283 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 2284 { 2285 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 2286 struct net *net = xp_net(old); 2287 2288 if (newp) { 2289 newp->selector = old->selector; 2290 if (security_xfrm_policy_clone(old->security, 2291 &newp->security)) { 2292 kfree(newp); 2293 return NULL; /* ENOMEM */ 2294 } 2295 newp->lft = old->lft; 2296 newp->curlft = old->curlft; 2297 newp->mark = old->mark; 2298 newp->if_id = old->if_id; 2299 newp->action = old->action; 2300 newp->flags = old->flags; 2301 newp->xfrm_nr = old->xfrm_nr; 2302 newp->index = old->index; 2303 newp->type = old->type; 2304 newp->family = old->family; 2305 memcpy(newp->xfrm_vec, old->xfrm_vec, 2306 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 2307 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2308 xfrm_sk_policy_link(newp, dir); 2309 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2310 xfrm_pol_put(newp); 2311 } 2312 return newp; 2313 } 2314 2315 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) 2316 { 2317 const struct xfrm_policy *p; 2318 struct xfrm_policy *np; 2319 int i, ret = 0; 2320 2321 rcu_read_lock(); 2322 for (i = 0; i < 2; i++) { 2323 p = rcu_dereference(osk->sk_policy[i]); 2324 if (p) { 2325 np = clone_policy(p, i); 2326 if (unlikely(!np)) { 2327 ret = -ENOMEM; 2328 break; 2329 } 2330 rcu_assign_pointer(sk->sk_policy[i], np); 2331 } 2332 } 2333 rcu_read_unlock(); 2334 return ret; 2335 } 2336 2337 static int 2338 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local, 2339 xfrm_address_t *remote, unsigned short family, u32 mark) 2340 { 2341 int err; 2342 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2343 2344 if (unlikely(afinfo == NULL)) 2345 return -EINVAL; 2346 err = afinfo->get_saddr(net, oif, local, remote, mark); 2347 rcu_read_unlock(); 2348 return err; 2349 } 2350 2351 /* Resolve list of templates for the flow, given policy. */ 2352 2353 static int 2354 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 2355 struct xfrm_state **xfrm, unsigned short family) 2356 { 2357 struct net *net = xp_net(policy); 2358 int nx; 2359 int i, error; 2360 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 2361 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 2362 xfrm_address_t tmp; 2363 2364 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) { 2365 struct xfrm_state *x; 2366 xfrm_address_t *remote = daddr; 2367 xfrm_address_t *local = saddr; 2368 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 2369 2370 if (tmpl->mode == XFRM_MODE_TUNNEL || 2371 tmpl->mode == XFRM_MODE_BEET) { 2372 remote = &tmpl->id.daddr; 2373 local = &tmpl->saddr; 2374 if (xfrm_addr_any(local, tmpl->encap_family)) { 2375 error = xfrm_get_saddr(net, fl->flowi_oif, 2376 &tmp, remote, 2377 tmpl->encap_family, 0); 2378 if (error) 2379 goto fail; 2380 local = &tmp; 2381 } 2382 } 2383 2384 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, 2385 family, policy->if_id); 2386 2387 if (x && x->km.state == XFRM_STATE_VALID) { 2388 xfrm[nx++] = x; 2389 daddr = remote; 2390 saddr = local; 2391 continue; 2392 } 2393 if (x) { 2394 error = (x->km.state == XFRM_STATE_ERROR ? 2395 -EINVAL : -EAGAIN); 2396 xfrm_state_put(x); 2397 } else if (error == -ESRCH) { 2398 error = -EAGAIN; 2399 } 2400 2401 if (!tmpl->optional) 2402 goto fail; 2403 } 2404 return nx; 2405 2406 fail: 2407 for (nx--; nx >= 0; nx--) 2408 xfrm_state_put(xfrm[nx]); 2409 return error; 2410 } 2411 2412 static int 2413 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 2414 struct xfrm_state **xfrm, unsigned short family) 2415 { 2416 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 2417 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 2418 int cnx = 0; 2419 int error; 2420 int ret; 2421 int i; 2422 2423 for (i = 0; i < npols; i++) { 2424 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 2425 error = -ENOBUFS; 2426 goto fail; 2427 } 2428 2429 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 2430 if (ret < 0) { 2431 error = ret; 2432 goto fail; 2433 } else 2434 cnx += ret; 2435 } 2436 2437 /* found states are sorted for outbound processing */ 2438 if (npols > 1) 2439 xfrm_state_sort(xfrm, tpp, cnx, family); 2440 2441 return cnx; 2442 2443 fail: 2444 for (cnx--; cnx >= 0; cnx--) 2445 xfrm_state_put(tpp[cnx]); 2446 return error; 2447 2448 } 2449 2450 static int xfrm_get_tos(const struct flowi *fl, int family) 2451 { 2452 if (family == AF_INET) 2453 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; 2454 2455 return 0; 2456 } 2457 2458 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 2459 { 2460 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2461 struct dst_ops *dst_ops; 2462 struct xfrm_dst *xdst; 2463 2464 if (!afinfo) 2465 return ERR_PTR(-EINVAL); 2466 2467 switch (family) { 2468 case AF_INET: 2469 dst_ops = &net->xfrm.xfrm4_dst_ops; 2470 break; 2471 #if IS_ENABLED(CONFIG_IPV6) 2472 case AF_INET6: 2473 dst_ops = &net->xfrm.xfrm6_dst_ops; 2474 break; 2475 #endif 2476 default: 2477 BUG(); 2478 } 2479 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0); 2480 2481 if (likely(xdst)) { 2482 struct dst_entry *dst = &xdst->u.dst; 2483 2484 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); 2485 } else 2486 xdst = ERR_PTR(-ENOBUFS); 2487 2488 rcu_read_unlock(); 2489 2490 return xdst; 2491 } 2492 2493 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 2494 int nfheader_len) 2495 { 2496 if (dst->ops->family == AF_INET6) { 2497 struct rt6_info *rt = (struct rt6_info *)dst; 2498 path->path_cookie = rt6_get_cookie(rt); 2499 path->u.rt6.rt6i_nfheader_len = nfheader_len; 2500 } 2501 } 2502 2503 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 2504 const struct flowi *fl) 2505 { 2506 const struct xfrm_policy_afinfo *afinfo = 2507 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 2508 int err; 2509 2510 if (!afinfo) 2511 return -EINVAL; 2512 2513 err = afinfo->fill_dst(xdst, dev, fl); 2514 2515 rcu_read_unlock(); 2516 2517 return err; 2518 } 2519 2520 2521 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 2522 * all the metrics... Shortly, bundle a bundle. 2523 */ 2524 2525 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 2526 struct xfrm_state **xfrm, 2527 struct xfrm_dst **bundle, 2528 int nx, 2529 const struct flowi *fl, 2530 struct dst_entry *dst) 2531 { 2532 const struct xfrm_state_afinfo *afinfo; 2533 const struct xfrm_mode *inner_mode; 2534 struct net *net = xp_net(policy); 2535 unsigned long now = jiffies; 2536 struct net_device *dev; 2537 struct xfrm_dst *xdst_prev = NULL; 2538 struct xfrm_dst *xdst0 = NULL; 2539 int i = 0; 2540 int err; 2541 int header_len = 0; 2542 int nfheader_len = 0; 2543 int trailer_len = 0; 2544 int tos; 2545 int family = policy->selector.family; 2546 xfrm_address_t saddr, daddr; 2547 2548 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 2549 2550 tos = xfrm_get_tos(fl, family); 2551 2552 dst_hold(dst); 2553 2554 for (; i < nx; i++) { 2555 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 2556 struct dst_entry *dst1 = &xdst->u.dst; 2557 2558 err = PTR_ERR(xdst); 2559 if (IS_ERR(xdst)) { 2560 dst_release(dst); 2561 goto put_states; 2562 } 2563 2564 bundle[i] = xdst; 2565 if (!xdst_prev) 2566 xdst0 = xdst; 2567 else 2568 /* Ref count is taken during xfrm_alloc_dst() 2569 * No need to do dst_clone() on dst1 2570 */ 2571 xfrm_dst_set_child(xdst_prev, &xdst->u.dst); 2572 2573 if (xfrm[i]->sel.family == AF_UNSPEC) { 2574 inner_mode = xfrm_ip2inner_mode(xfrm[i], 2575 xfrm_af2proto(family)); 2576 if (!inner_mode) { 2577 err = -EAFNOSUPPORT; 2578 dst_release(dst); 2579 goto put_states; 2580 } 2581 } else 2582 inner_mode = &xfrm[i]->inner_mode; 2583 2584 xdst->route = dst; 2585 dst_copy_metrics(dst1, dst); 2586 2587 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 2588 __u32 mark = 0; 2589 2590 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m) 2591 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); 2592 2593 family = xfrm[i]->props.family; 2594 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, 2595 &saddr, &daddr, family, mark); 2596 err = PTR_ERR(dst); 2597 if (IS_ERR(dst)) 2598 goto put_states; 2599 } else 2600 dst_hold(dst); 2601 2602 dst1->xfrm = xfrm[i]; 2603 xdst->xfrm_genid = xfrm[i]->genid; 2604 2605 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 2606 dst1->lastuse = now; 2607 2608 dst1->input = dst_discard; 2609 2610 rcu_read_lock(); 2611 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family); 2612 if (likely(afinfo)) 2613 dst1->output = afinfo->output; 2614 else 2615 dst1->output = dst_discard_out; 2616 rcu_read_unlock(); 2617 2618 xdst_prev = xdst; 2619 2620 header_len += xfrm[i]->props.header_len; 2621 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 2622 nfheader_len += xfrm[i]->props.header_len; 2623 trailer_len += xfrm[i]->props.trailer_len; 2624 } 2625 2626 xfrm_dst_set_child(xdst_prev, dst); 2627 xdst0->path = dst; 2628 2629 err = -ENODEV; 2630 dev = dst->dev; 2631 if (!dev) 2632 goto free_dst; 2633 2634 xfrm_init_path(xdst0, dst, nfheader_len); 2635 xfrm_init_pmtu(bundle, nx); 2636 2637 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst; 2638 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) { 2639 err = xfrm_fill_dst(xdst_prev, dev, fl); 2640 if (err) 2641 goto free_dst; 2642 2643 xdst_prev->u.dst.header_len = header_len; 2644 xdst_prev->u.dst.trailer_len = trailer_len; 2645 header_len -= xdst_prev->u.dst.xfrm->props.header_len; 2646 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; 2647 } 2648 2649 return &xdst0->u.dst; 2650 2651 put_states: 2652 for (; i < nx; i++) 2653 xfrm_state_put(xfrm[i]); 2654 free_dst: 2655 if (xdst0) 2656 dst_release_immediate(&xdst0->u.dst); 2657 2658 return ERR_PTR(err); 2659 } 2660 2661 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 2662 struct xfrm_policy **pols, 2663 int *num_pols, int *num_xfrms) 2664 { 2665 int i; 2666 2667 if (*num_pols == 0 || !pols[0]) { 2668 *num_pols = 0; 2669 *num_xfrms = 0; 2670 return 0; 2671 } 2672 if (IS_ERR(pols[0])) 2673 return PTR_ERR(pols[0]); 2674 2675 *num_xfrms = pols[0]->xfrm_nr; 2676 2677 #ifdef CONFIG_XFRM_SUB_POLICY 2678 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && 2679 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2680 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 2681 XFRM_POLICY_TYPE_MAIN, 2682 fl, family, 2683 XFRM_POLICY_OUT, 2684 pols[0]->if_id); 2685 if (pols[1]) { 2686 if (IS_ERR(pols[1])) { 2687 xfrm_pols_put(pols, *num_pols); 2688 return PTR_ERR(pols[1]); 2689 } 2690 (*num_pols)++; 2691 (*num_xfrms) += pols[1]->xfrm_nr; 2692 } 2693 } 2694 #endif 2695 for (i = 0; i < *num_pols; i++) { 2696 if (pols[i]->action != XFRM_POLICY_ALLOW) { 2697 *num_xfrms = -1; 2698 break; 2699 } 2700 } 2701 2702 return 0; 2703 2704 } 2705 2706 static struct xfrm_dst * 2707 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 2708 const struct flowi *fl, u16 family, 2709 struct dst_entry *dst_orig) 2710 { 2711 struct net *net = xp_net(pols[0]); 2712 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 2713 struct xfrm_dst *bundle[XFRM_MAX_DEPTH]; 2714 struct xfrm_dst *xdst; 2715 struct dst_entry *dst; 2716 int err; 2717 2718 /* Try to instantiate a bundle */ 2719 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 2720 if (err <= 0) { 2721 if (err == 0) 2722 return NULL; 2723 2724 if (err != -EAGAIN) 2725 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 2726 return ERR_PTR(err); 2727 } 2728 2729 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig); 2730 if (IS_ERR(dst)) { 2731 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 2732 return ERR_CAST(dst); 2733 } 2734 2735 xdst = (struct xfrm_dst *)dst; 2736 xdst->num_xfrms = err; 2737 xdst->num_pols = num_pols; 2738 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 2739 xdst->policy_genid = atomic_read(&pols[0]->genid); 2740 2741 return xdst; 2742 } 2743 2744 static void xfrm_policy_queue_process(struct timer_list *t) 2745 { 2746 struct sk_buff *skb; 2747 struct sock *sk; 2748 struct dst_entry *dst; 2749 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer); 2750 struct net *net = xp_net(pol); 2751 struct xfrm_policy_queue *pq = &pol->polq; 2752 struct flowi fl; 2753 struct sk_buff_head list; 2754 2755 spin_lock(&pq->hold_queue.lock); 2756 skb = skb_peek(&pq->hold_queue); 2757 if (!skb) { 2758 spin_unlock(&pq->hold_queue.lock); 2759 goto out; 2760 } 2761 dst = skb_dst(skb); 2762 sk = skb->sk; 2763 xfrm_decode_session(skb, &fl, dst->ops->family); 2764 spin_unlock(&pq->hold_queue.lock); 2765 2766 dst_hold(xfrm_dst_path(dst)); 2767 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE); 2768 if (IS_ERR(dst)) 2769 goto purge_queue; 2770 2771 if (dst->flags & DST_XFRM_QUEUE) { 2772 dst_release(dst); 2773 2774 if (pq->timeout >= XFRM_QUEUE_TMO_MAX) 2775 goto purge_queue; 2776 2777 pq->timeout = pq->timeout << 1; 2778 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout)) 2779 xfrm_pol_hold(pol); 2780 goto out; 2781 } 2782 2783 dst_release(dst); 2784 2785 __skb_queue_head_init(&list); 2786 2787 spin_lock(&pq->hold_queue.lock); 2788 pq->timeout = 0; 2789 skb_queue_splice_init(&pq->hold_queue, &list); 2790 spin_unlock(&pq->hold_queue.lock); 2791 2792 while (!skb_queue_empty(&list)) { 2793 skb = __skb_dequeue(&list); 2794 2795 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family); 2796 dst_hold(xfrm_dst_path(skb_dst(skb))); 2797 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0); 2798 if (IS_ERR(dst)) { 2799 kfree_skb(skb); 2800 continue; 2801 } 2802 2803 nf_reset_ct(skb); 2804 skb_dst_drop(skb); 2805 skb_dst_set(skb, dst); 2806 2807 dst_output(net, skb->sk, skb); 2808 } 2809 2810 out: 2811 xfrm_pol_put(pol); 2812 return; 2813 2814 purge_queue: 2815 pq->timeout = 0; 2816 skb_queue_purge(&pq->hold_queue); 2817 xfrm_pol_put(pol); 2818 } 2819 2820 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb) 2821 { 2822 unsigned long sched_next; 2823 struct dst_entry *dst = skb_dst(skb); 2824 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 2825 struct xfrm_policy *pol = xdst->pols[0]; 2826 struct xfrm_policy_queue *pq = &pol->polq; 2827 2828 if (unlikely(skb_fclone_busy(sk, skb))) { 2829 kfree_skb(skb); 2830 return 0; 2831 } 2832 2833 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { 2834 kfree_skb(skb); 2835 return -EAGAIN; 2836 } 2837 2838 skb_dst_force(skb); 2839 2840 spin_lock_bh(&pq->hold_queue.lock); 2841 2842 if (!pq->timeout) 2843 pq->timeout = XFRM_QUEUE_TMO_MIN; 2844 2845 sched_next = jiffies + pq->timeout; 2846 2847 if (del_timer(&pq->hold_timer)) { 2848 if (time_before(pq->hold_timer.expires, sched_next)) 2849 sched_next = pq->hold_timer.expires; 2850 xfrm_pol_put(pol); 2851 } 2852 2853 __skb_queue_tail(&pq->hold_queue, skb); 2854 if (!mod_timer(&pq->hold_timer, sched_next)) 2855 xfrm_pol_hold(pol); 2856 2857 spin_unlock_bh(&pq->hold_queue.lock); 2858 2859 return 0; 2860 } 2861 2862 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, 2863 struct xfrm_flo *xflo, 2864 const struct flowi *fl, 2865 int num_xfrms, 2866 u16 family) 2867 { 2868 int err; 2869 struct net_device *dev; 2870 struct dst_entry *dst; 2871 struct dst_entry *dst1; 2872 struct xfrm_dst *xdst; 2873 2874 xdst = xfrm_alloc_dst(net, family); 2875 if (IS_ERR(xdst)) 2876 return xdst; 2877 2878 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) || 2879 net->xfrm.sysctl_larval_drop || 2880 num_xfrms <= 0) 2881 return xdst; 2882 2883 dst = xflo->dst_orig; 2884 dst1 = &xdst->u.dst; 2885 dst_hold(dst); 2886 xdst->route = dst; 2887 2888 dst_copy_metrics(dst1, dst); 2889 2890 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 2891 dst1->flags |= DST_XFRM_QUEUE; 2892 dst1->lastuse = jiffies; 2893 2894 dst1->input = dst_discard; 2895 dst1->output = xdst_queue_output; 2896 2897 dst_hold(dst); 2898 xfrm_dst_set_child(xdst, dst); 2899 xdst->path = dst; 2900 2901 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0); 2902 2903 err = -ENODEV; 2904 dev = dst->dev; 2905 if (!dev) 2906 goto free_dst; 2907 2908 err = xfrm_fill_dst(xdst, dev, fl); 2909 if (err) 2910 goto free_dst; 2911 2912 out: 2913 return xdst; 2914 2915 free_dst: 2916 dst_release(dst1); 2917 xdst = ERR_PTR(err); 2918 goto out; 2919 } 2920 2921 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net, 2922 const struct flowi *fl, 2923 u16 family, u8 dir, 2924 struct xfrm_flo *xflo, u32 if_id) 2925 { 2926 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2927 int num_pols = 0, num_xfrms = 0, err; 2928 struct xfrm_dst *xdst; 2929 2930 /* Resolve policies to use if we couldn't get them from 2931 * previous cache entry */ 2932 num_pols = 1; 2933 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id); 2934 err = xfrm_expand_policies(fl, family, pols, 2935 &num_pols, &num_xfrms); 2936 if (err < 0) 2937 goto inc_error; 2938 if (num_pols == 0) 2939 return NULL; 2940 if (num_xfrms <= 0) 2941 goto make_dummy_bundle; 2942 2943 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, 2944 xflo->dst_orig); 2945 if (IS_ERR(xdst)) { 2946 err = PTR_ERR(xdst); 2947 if (err == -EREMOTE) { 2948 xfrm_pols_put(pols, num_pols); 2949 return NULL; 2950 } 2951 2952 if (err != -EAGAIN) 2953 goto error; 2954 goto make_dummy_bundle; 2955 } else if (xdst == NULL) { 2956 num_xfrms = 0; 2957 goto make_dummy_bundle; 2958 } 2959 2960 return xdst; 2961 2962 make_dummy_bundle: 2963 /* We found policies, but there's no bundles to instantiate: 2964 * either because the policy blocks, has no transformations or 2965 * we could not build template (no xfrm_states).*/ 2966 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); 2967 if (IS_ERR(xdst)) { 2968 xfrm_pols_put(pols, num_pols); 2969 return ERR_CAST(xdst); 2970 } 2971 xdst->num_pols = num_pols; 2972 xdst->num_xfrms = num_xfrms; 2973 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 2974 2975 return xdst; 2976 2977 inc_error: 2978 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 2979 error: 2980 xfrm_pols_put(pols, num_pols); 2981 return ERR_PTR(err); 2982 } 2983 2984 static struct dst_entry *make_blackhole(struct net *net, u16 family, 2985 struct dst_entry *dst_orig) 2986 { 2987 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2988 struct dst_entry *ret; 2989 2990 if (!afinfo) { 2991 dst_release(dst_orig); 2992 return ERR_PTR(-EINVAL); 2993 } else { 2994 ret = afinfo->blackhole_route(net, dst_orig); 2995 } 2996 rcu_read_unlock(); 2997 2998 return ret; 2999 } 3000 3001 /* Finds/creates a bundle for given flow and if_id 3002 * 3003 * At the moment we eat a raw IP route. Mostly to speed up lookups 3004 * on interfaces with disabled IPsec. 3005 * 3006 * xfrm_lookup uses an if_id of 0 by default, and is provided for 3007 * compatibility 3008 */ 3009 struct dst_entry *xfrm_lookup_with_ifid(struct net *net, 3010 struct dst_entry *dst_orig, 3011 const struct flowi *fl, 3012 const struct sock *sk, 3013 int flags, u32 if_id) 3014 { 3015 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3016 struct xfrm_dst *xdst; 3017 struct dst_entry *dst, *route; 3018 u16 family = dst_orig->ops->family; 3019 u8 dir = XFRM_POLICY_OUT; 3020 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 3021 3022 dst = NULL; 3023 xdst = NULL; 3024 route = NULL; 3025 3026 sk = sk_const_to_full_sk(sk); 3027 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 3028 num_pols = 1; 3029 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family, 3030 if_id); 3031 err = xfrm_expand_policies(fl, family, pols, 3032 &num_pols, &num_xfrms); 3033 if (err < 0) 3034 goto dropdst; 3035 3036 if (num_pols) { 3037 if (num_xfrms <= 0) { 3038 drop_pols = num_pols; 3039 goto no_transform; 3040 } 3041 3042 xdst = xfrm_resolve_and_create_bundle( 3043 pols, num_pols, fl, 3044 family, dst_orig); 3045 3046 if (IS_ERR(xdst)) { 3047 xfrm_pols_put(pols, num_pols); 3048 err = PTR_ERR(xdst); 3049 if (err == -EREMOTE) 3050 goto nopol; 3051 3052 goto dropdst; 3053 } else if (xdst == NULL) { 3054 num_xfrms = 0; 3055 drop_pols = num_pols; 3056 goto no_transform; 3057 } 3058 3059 route = xdst->route; 3060 } 3061 } 3062 3063 if (xdst == NULL) { 3064 struct xfrm_flo xflo; 3065 3066 xflo.dst_orig = dst_orig; 3067 xflo.flags = flags; 3068 3069 /* To accelerate a bit... */ 3070 if ((dst_orig->flags & DST_NOXFRM) || 3071 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 3072 goto nopol; 3073 3074 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id); 3075 if (xdst == NULL) 3076 goto nopol; 3077 if (IS_ERR(xdst)) { 3078 err = PTR_ERR(xdst); 3079 goto dropdst; 3080 } 3081 3082 num_pols = xdst->num_pols; 3083 num_xfrms = xdst->num_xfrms; 3084 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols); 3085 route = xdst->route; 3086 } 3087 3088 dst = &xdst->u.dst; 3089 if (route == NULL && num_xfrms > 0) { 3090 /* The only case when xfrm_bundle_lookup() returns a 3091 * bundle with null route, is when the template could 3092 * not be resolved. It means policies are there, but 3093 * bundle could not be created, since we don't yet 3094 * have the xfrm_state's. We need to wait for KM to 3095 * negotiate new SA's or bail out with error.*/ 3096 if (net->xfrm.sysctl_larval_drop) { 3097 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 3098 err = -EREMOTE; 3099 goto error; 3100 } 3101 3102 err = -EAGAIN; 3103 3104 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 3105 goto error; 3106 } 3107 3108 no_transform: 3109 if (num_pols == 0) 3110 goto nopol; 3111 3112 if ((flags & XFRM_LOOKUP_ICMP) && 3113 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 3114 err = -ENOENT; 3115 goto error; 3116 } 3117 3118 for (i = 0; i < num_pols; i++) 3119 pols[i]->curlft.use_time = ktime_get_real_seconds(); 3120 3121 if (num_xfrms < 0) { 3122 /* Prohibit the flow */ 3123 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 3124 err = -EPERM; 3125 goto error; 3126 } else if (num_xfrms > 0) { 3127 /* Flow transformed */ 3128 dst_release(dst_orig); 3129 } else { 3130 /* Flow passes untransformed */ 3131 dst_release(dst); 3132 dst = dst_orig; 3133 } 3134 ok: 3135 xfrm_pols_put(pols, drop_pols); 3136 if (dst && dst->xfrm && 3137 dst->xfrm->props.mode == XFRM_MODE_TUNNEL) 3138 dst->flags |= DST_XFRM_TUNNEL; 3139 return dst; 3140 3141 nopol: 3142 if (!(flags & XFRM_LOOKUP_ICMP)) { 3143 dst = dst_orig; 3144 goto ok; 3145 } 3146 err = -ENOENT; 3147 error: 3148 dst_release(dst); 3149 dropdst: 3150 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) 3151 dst_release(dst_orig); 3152 xfrm_pols_put(pols, drop_pols); 3153 return ERR_PTR(err); 3154 } 3155 EXPORT_SYMBOL(xfrm_lookup_with_ifid); 3156 3157 /* Main function: finds/creates a bundle for given flow. 3158 * 3159 * At the moment we eat a raw IP route. Mostly to speed up lookups 3160 * on interfaces with disabled IPsec. 3161 */ 3162 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 3163 const struct flowi *fl, const struct sock *sk, 3164 int flags) 3165 { 3166 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0); 3167 } 3168 EXPORT_SYMBOL(xfrm_lookup); 3169 3170 /* Callers of xfrm_lookup_route() must ensure a call to dst_output(). 3171 * Otherwise we may send out blackholed packets. 3172 */ 3173 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, 3174 const struct flowi *fl, 3175 const struct sock *sk, int flags) 3176 { 3177 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, 3178 flags | XFRM_LOOKUP_QUEUE | 3179 XFRM_LOOKUP_KEEP_DST_REF); 3180 3181 if (PTR_ERR(dst) == -EREMOTE) 3182 return make_blackhole(net, dst_orig->ops->family, dst_orig); 3183 3184 if (IS_ERR(dst)) 3185 dst_release(dst_orig); 3186 3187 return dst; 3188 } 3189 EXPORT_SYMBOL(xfrm_lookup_route); 3190 3191 static inline int 3192 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 3193 { 3194 struct sec_path *sp = skb_sec_path(skb); 3195 struct xfrm_state *x; 3196 3197 if (!sp || idx < 0 || idx >= sp->len) 3198 return 0; 3199 x = sp->xvec[idx]; 3200 if (!x->type->reject) 3201 return 0; 3202 return x->type->reject(x, skb, fl); 3203 } 3204 3205 /* When skb is transformed back to its "native" form, we have to 3206 * check policy restrictions. At the moment we make this in maximally 3207 * stupid way. Shame on me. :-) Of course, connected sockets must 3208 * have policy cached at them. 3209 */ 3210 3211 static inline int 3212 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 3213 unsigned short family) 3214 { 3215 if (xfrm_state_kern(x)) 3216 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 3217 return x->id.proto == tmpl->id.proto && 3218 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 3219 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 3220 x->props.mode == tmpl->mode && 3221 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 3222 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 3223 !(x->props.mode != XFRM_MODE_TRANSPORT && 3224 xfrm_state_addr_cmp(tmpl, x, family)); 3225 } 3226 3227 /* 3228 * 0 or more than 0 is returned when validation is succeeded (either bypass 3229 * because of optional transport mode, or next index of the mathced secpath 3230 * state with the template. 3231 * -1 is returned when no matching template is found. 3232 * Otherwise "-2 - errored_index" is returned. 3233 */ 3234 static inline int 3235 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 3236 unsigned short family) 3237 { 3238 int idx = start; 3239 3240 if (tmpl->optional) { 3241 if (tmpl->mode == XFRM_MODE_TRANSPORT) 3242 return start; 3243 } else 3244 start = -1; 3245 for (; idx < sp->len; idx++) { 3246 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 3247 return ++idx; 3248 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 3249 if (start == -1) 3250 start = -2-idx; 3251 break; 3252 } 3253 } 3254 return start; 3255 } 3256 3257 static void 3258 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse) 3259 { 3260 const struct iphdr *iph = ip_hdr(skb); 3261 int ihl = iph->ihl; 3262 u8 *xprth = skb_network_header(skb) + ihl * 4; 3263 struct flowi4 *fl4 = &fl->u.ip4; 3264 int oif = 0; 3265 3266 if (skb_dst(skb) && skb_dst(skb)->dev) 3267 oif = skb_dst(skb)->dev->ifindex; 3268 3269 memset(fl4, 0, sizeof(struct flowi4)); 3270 fl4->flowi4_mark = skb->mark; 3271 fl4->flowi4_oif = reverse ? skb->skb_iif : oif; 3272 3273 fl4->flowi4_proto = iph->protocol; 3274 fl4->daddr = reverse ? iph->saddr : iph->daddr; 3275 fl4->saddr = reverse ? iph->daddr : iph->saddr; 3276 fl4->flowi4_tos = iph->tos; 3277 3278 if (!ip_is_fragment(iph)) { 3279 switch (iph->protocol) { 3280 case IPPROTO_UDP: 3281 case IPPROTO_UDPLITE: 3282 case IPPROTO_TCP: 3283 case IPPROTO_SCTP: 3284 case IPPROTO_DCCP: 3285 if (xprth + 4 < skb->data || 3286 pskb_may_pull(skb, xprth + 4 - skb->data)) { 3287 __be16 *ports; 3288 3289 xprth = skb_network_header(skb) + ihl * 4; 3290 ports = (__be16 *)xprth; 3291 3292 fl4->fl4_sport = ports[!!reverse]; 3293 fl4->fl4_dport = ports[!reverse]; 3294 } 3295 break; 3296 case IPPROTO_ICMP: 3297 if (xprth + 2 < skb->data || 3298 pskb_may_pull(skb, xprth + 2 - skb->data)) { 3299 u8 *icmp; 3300 3301 xprth = skb_network_header(skb) + ihl * 4; 3302 icmp = xprth; 3303 3304 fl4->fl4_icmp_type = icmp[0]; 3305 fl4->fl4_icmp_code = icmp[1]; 3306 } 3307 break; 3308 case IPPROTO_ESP: 3309 if (xprth + 4 < skb->data || 3310 pskb_may_pull(skb, xprth + 4 - skb->data)) { 3311 __be32 *ehdr; 3312 3313 xprth = skb_network_header(skb) + ihl * 4; 3314 ehdr = (__be32 *)xprth; 3315 3316 fl4->fl4_ipsec_spi = ehdr[0]; 3317 } 3318 break; 3319 case IPPROTO_AH: 3320 if (xprth + 8 < skb->data || 3321 pskb_may_pull(skb, xprth + 8 - skb->data)) { 3322 __be32 *ah_hdr; 3323 3324 xprth = skb_network_header(skb) + ihl * 4; 3325 ah_hdr = (__be32 *)xprth; 3326 3327 fl4->fl4_ipsec_spi = ah_hdr[1]; 3328 } 3329 break; 3330 case IPPROTO_COMP: 3331 if (xprth + 4 < skb->data || 3332 pskb_may_pull(skb, xprth + 4 - skb->data)) { 3333 __be16 *ipcomp_hdr; 3334 3335 xprth = skb_network_header(skb) + ihl * 4; 3336 ipcomp_hdr = (__be16 *)xprth; 3337 3338 fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); 3339 } 3340 break; 3341 case IPPROTO_GRE: 3342 if (xprth + 12 < skb->data || 3343 pskb_may_pull(skb, xprth + 12 - skb->data)) { 3344 __be16 *greflags; 3345 __be32 *gre_hdr; 3346 3347 xprth = skb_network_header(skb) + ihl * 4; 3348 greflags = (__be16 *)xprth; 3349 gre_hdr = (__be32 *)xprth; 3350 3351 if (greflags[0] & GRE_KEY) { 3352 if (greflags[0] & GRE_CSUM) 3353 gre_hdr++; 3354 fl4->fl4_gre_key = gre_hdr[1]; 3355 } 3356 } 3357 break; 3358 default: 3359 fl4->fl4_ipsec_spi = 0; 3360 break; 3361 } 3362 } 3363 } 3364 3365 #if IS_ENABLED(CONFIG_IPV6) 3366 static void 3367 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse) 3368 { 3369 struct flowi6 *fl6 = &fl->u.ip6; 3370 int onlyproto = 0; 3371 const struct ipv6hdr *hdr = ipv6_hdr(skb); 3372 u32 offset = sizeof(*hdr); 3373 struct ipv6_opt_hdr *exthdr; 3374 const unsigned char *nh = skb_network_header(skb); 3375 u16 nhoff = IP6CB(skb)->nhoff; 3376 int oif = 0; 3377 u8 nexthdr; 3378 3379 if (!nhoff) 3380 nhoff = offsetof(struct ipv6hdr, nexthdr); 3381 3382 nexthdr = nh[nhoff]; 3383 3384 if (skb_dst(skb) && skb_dst(skb)->dev) 3385 oif = skb_dst(skb)->dev->ifindex; 3386 3387 memset(fl6, 0, sizeof(struct flowi6)); 3388 fl6->flowi6_mark = skb->mark; 3389 fl6->flowi6_oif = reverse ? skb->skb_iif : oif; 3390 3391 fl6->daddr = reverse ? hdr->saddr : hdr->daddr; 3392 fl6->saddr = reverse ? hdr->daddr : hdr->saddr; 3393 3394 while (nh + offset + sizeof(*exthdr) < skb->data || 3395 pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) { 3396 nh = skb_network_header(skb); 3397 exthdr = (struct ipv6_opt_hdr *)(nh + offset); 3398 3399 switch (nexthdr) { 3400 case NEXTHDR_FRAGMENT: 3401 onlyproto = 1; 3402 /* fall through */ 3403 case NEXTHDR_ROUTING: 3404 case NEXTHDR_HOP: 3405 case NEXTHDR_DEST: 3406 offset += ipv6_optlen(exthdr); 3407 nexthdr = exthdr->nexthdr; 3408 exthdr = (struct ipv6_opt_hdr *)(nh + offset); 3409 break; 3410 case IPPROTO_UDP: 3411 case IPPROTO_UDPLITE: 3412 case IPPROTO_TCP: 3413 case IPPROTO_SCTP: 3414 case IPPROTO_DCCP: 3415 if (!onlyproto && (nh + offset + 4 < skb->data || 3416 pskb_may_pull(skb, nh + offset + 4 - skb->data))) { 3417 __be16 *ports; 3418 3419 nh = skb_network_header(skb); 3420 ports = (__be16 *)(nh + offset); 3421 fl6->fl6_sport = ports[!!reverse]; 3422 fl6->fl6_dport = ports[!reverse]; 3423 } 3424 fl6->flowi6_proto = nexthdr; 3425 return; 3426 case IPPROTO_ICMPV6: 3427 if (!onlyproto && (nh + offset + 2 < skb->data || 3428 pskb_may_pull(skb, nh + offset + 2 - skb->data))) { 3429 u8 *icmp; 3430 3431 nh = skb_network_header(skb); 3432 icmp = (u8 *)(nh + offset); 3433 fl6->fl6_icmp_type = icmp[0]; 3434 fl6->fl6_icmp_code = icmp[1]; 3435 } 3436 fl6->flowi6_proto = nexthdr; 3437 return; 3438 #if IS_ENABLED(CONFIG_IPV6_MIP6) 3439 case IPPROTO_MH: 3440 offset += ipv6_optlen(exthdr); 3441 if (!onlyproto && (nh + offset + 3 < skb->data || 3442 pskb_may_pull(skb, nh + offset + 3 - skb->data))) { 3443 struct ip6_mh *mh; 3444 3445 nh = skb_network_header(skb); 3446 mh = (struct ip6_mh *)(nh + offset); 3447 fl6->fl6_mh_type = mh->ip6mh_type; 3448 } 3449 fl6->flowi6_proto = nexthdr; 3450 return; 3451 #endif 3452 /* XXX Why are there these headers? */ 3453 case IPPROTO_AH: 3454 case IPPROTO_ESP: 3455 case IPPROTO_COMP: 3456 default: 3457 fl6->fl6_ipsec_spi = 0; 3458 fl6->flowi6_proto = nexthdr; 3459 return; 3460 } 3461 } 3462 } 3463 #endif 3464 3465 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 3466 unsigned int family, int reverse) 3467 { 3468 switch (family) { 3469 case AF_INET: 3470 decode_session4(skb, fl, reverse); 3471 break; 3472 #if IS_ENABLED(CONFIG_IPV6) 3473 case AF_INET6: 3474 decode_session6(skb, fl, reverse); 3475 break; 3476 #endif 3477 default: 3478 return -EAFNOSUPPORT; 3479 } 3480 3481 return security_xfrm_decode_session(skb, &fl->flowi_secid); 3482 } 3483 EXPORT_SYMBOL(__xfrm_decode_session); 3484 3485 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 3486 { 3487 for (; k < sp->len; k++) { 3488 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 3489 *idxp = k; 3490 return 1; 3491 } 3492 } 3493 3494 return 0; 3495 } 3496 3497 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 3498 unsigned short family) 3499 { 3500 struct net *net = dev_net(skb->dev); 3501 struct xfrm_policy *pol; 3502 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3503 int npols = 0; 3504 int xfrm_nr; 3505 int pi; 3506 int reverse; 3507 struct flowi fl; 3508 int xerr_idx = -1; 3509 const struct xfrm_if_cb *ifcb; 3510 struct sec_path *sp; 3511 struct xfrm_if *xi; 3512 u32 if_id = 0; 3513 3514 rcu_read_lock(); 3515 ifcb = xfrm_if_get_cb(); 3516 3517 if (ifcb) { 3518 xi = ifcb->decode_session(skb, family); 3519 if (xi) { 3520 if_id = xi->p.if_id; 3521 net = xi->net; 3522 } 3523 } 3524 rcu_read_unlock(); 3525 3526 reverse = dir & ~XFRM_POLICY_MASK; 3527 dir &= XFRM_POLICY_MASK; 3528 3529 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 3530 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 3531 return 0; 3532 } 3533 3534 nf_nat_decode_session(skb, &fl, family); 3535 3536 /* First, check used SA against their selectors. */ 3537 sp = skb_sec_path(skb); 3538 if (sp) { 3539 int i; 3540 3541 for (i = sp->len - 1; i >= 0; i--) { 3542 struct xfrm_state *x = sp->xvec[i]; 3543 if (!xfrm_selector_match(&x->sel, &fl, family)) { 3544 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 3545 return 0; 3546 } 3547 } 3548 } 3549 3550 pol = NULL; 3551 sk = sk_to_full_sk(sk); 3552 if (sk && sk->sk_policy[dir]) { 3553 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id); 3554 if (IS_ERR(pol)) { 3555 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3556 return 0; 3557 } 3558 } 3559 3560 if (!pol) 3561 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id); 3562 3563 if (IS_ERR(pol)) { 3564 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3565 return 0; 3566 } 3567 3568 if (!pol) { 3569 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) { 3570 xfrm_secpath_reject(xerr_idx, skb, &fl); 3571 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 3572 return 0; 3573 } 3574 return 1; 3575 } 3576 3577 pol->curlft.use_time = ktime_get_real_seconds(); 3578 3579 pols[0] = pol; 3580 npols++; 3581 #ifdef CONFIG_XFRM_SUB_POLICY 3582 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 3583 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 3584 &fl, family, 3585 XFRM_POLICY_IN, if_id); 3586 if (pols[1]) { 3587 if (IS_ERR(pols[1])) { 3588 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3589 return 0; 3590 } 3591 pols[1]->curlft.use_time = ktime_get_real_seconds(); 3592 npols++; 3593 } 3594 } 3595 #endif 3596 3597 if (pol->action == XFRM_POLICY_ALLOW) { 3598 static struct sec_path dummy; 3599 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 3600 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 3601 struct xfrm_tmpl **tpp = tp; 3602 int ti = 0; 3603 int i, k; 3604 3605 sp = skb_sec_path(skb); 3606 if (!sp) 3607 sp = &dummy; 3608 3609 for (pi = 0; pi < npols; pi++) { 3610 if (pols[pi] != pol && 3611 pols[pi]->action != XFRM_POLICY_ALLOW) { 3612 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 3613 goto reject; 3614 } 3615 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 3616 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 3617 goto reject_error; 3618 } 3619 for (i = 0; i < pols[pi]->xfrm_nr; i++) 3620 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 3621 } 3622 xfrm_nr = ti; 3623 if (npols > 1) { 3624 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 3625 tpp = stp; 3626 } 3627 3628 /* For each tunnel xfrm, find the first matching tmpl. 3629 * For each tmpl before that, find corresponding xfrm. 3630 * Order is _important_. Later we will implement 3631 * some barriers, but at the moment barriers 3632 * are implied between each two transformations. 3633 */ 3634 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 3635 k = xfrm_policy_ok(tpp[i], sp, k, family); 3636 if (k < 0) { 3637 if (k < -1) 3638 /* "-2 - errored_index" returned */ 3639 xerr_idx = -(2+k); 3640 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 3641 goto reject; 3642 } 3643 } 3644 3645 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 3646 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 3647 goto reject; 3648 } 3649 3650 xfrm_pols_put(pols, npols); 3651 return 1; 3652 } 3653 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 3654 3655 reject: 3656 xfrm_secpath_reject(xerr_idx, skb, &fl); 3657 reject_error: 3658 xfrm_pols_put(pols, npols); 3659 return 0; 3660 } 3661 EXPORT_SYMBOL(__xfrm_policy_check); 3662 3663 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 3664 { 3665 struct net *net = dev_net(skb->dev); 3666 struct flowi fl; 3667 struct dst_entry *dst; 3668 int res = 1; 3669 3670 if (xfrm_decode_session(skb, &fl, family) < 0) { 3671 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 3672 return 0; 3673 } 3674 3675 skb_dst_force(skb); 3676 if (!skb_dst(skb)) { 3677 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 3678 return 0; 3679 } 3680 3681 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); 3682 if (IS_ERR(dst)) { 3683 res = 0; 3684 dst = NULL; 3685 } 3686 skb_dst_set(skb, dst); 3687 return res; 3688 } 3689 EXPORT_SYMBOL(__xfrm_route_forward); 3690 3691 /* Optimize later using cookies and generation ids. */ 3692 3693 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 3694 { 3695 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 3696 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to 3697 * get validated by dst_ops->check on every use. We do this 3698 * because when a normal route referenced by an XFRM dst is 3699 * obsoleted we do not go looking around for all parent 3700 * referencing XFRM dsts so that we can invalidate them. It 3701 * is just too much work. Instead we make the checks here on 3702 * every use. For example: 3703 * 3704 * XFRM dst A --> IPv4 dst X 3705 * 3706 * X is the "xdst->route" of A (X is also the "dst->path" of A 3707 * in this example). If X is marked obsolete, "A" will not 3708 * notice. That's what we are validating here via the 3709 * stale_bundle() check. 3710 * 3711 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will 3712 * be marked on it. 3713 * This will force stale_bundle() to fail on any xdst bundle with 3714 * this dst linked in it. 3715 */ 3716 if (dst->obsolete < 0 && !stale_bundle(dst)) 3717 return dst; 3718 3719 return NULL; 3720 } 3721 3722 static int stale_bundle(struct dst_entry *dst) 3723 { 3724 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 3725 } 3726 3727 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 3728 { 3729 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) { 3730 dst->dev = dev_net(dev)->loopback_dev; 3731 dev_hold(dst->dev); 3732 dev_put(dev); 3733 } 3734 } 3735 EXPORT_SYMBOL(xfrm_dst_ifdown); 3736 3737 static void xfrm_link_failure(struct sk_buff *skb) 3738 { 3739 /* Impossible. Such dst must be popped before reaches point of failure. */ 3740 } 3741 3742 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 3743 { 3744 if (dst) { 3745 if (dst->obsolete) { 3746 dst_release(dst); 3747 dst = NULL; 3748 } 3749 } 3750 return dst; 3751 } 3752 3753 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr) 3754 { 3755 while (nr--) { 3756 struct xfrm_dst *xdst = bundle[nr]; 3757 u32 pmtu, route_mtu_cached; 3758 struct dst_entry *dst; 3759 3760 dst = &xdst->u.dst; 3761 pmtu = dst_mtu(xfrm_dst_child(dst)); 3762 xdst->child_mtu_cached = pmtu; 3763 3764 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 3765 3766 route_mtu_cached = dst_mtu(xdst->route); 3767 xdst->route_mtu_cached = route_mtu_cached; 3768 3769 if (pmtu > route_mtu_cached) 3770 pmtu = route_mtu_cached; 3771 3772 dst_metric_set(dst, RTAX_MTU, pmtu); 3773 } 3774 } 3775 3776 /* Check that the bundle accepts the flow and its components are 3777 * still valid. 3778 */ 3779 3780 static int xfrm_bundle_ok(struct xfrm_dst *first) 3781 { 3782 struct xfrm_dst *bundle[XFRM_MAX_DEPTH]; 3783 struct dst_entry *dst = &first->u.dst; 3784 struct xfrm_dst *xdst; 3785 int start_from, nr; 3786 u32 mtu; 3787 3788 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) || 3789 (dst->dev && !netif_running(dst->dev))) 3790 return 0; 3791 3792 if (dst->flags & DST_XFRM_QUEUE) 3793 return 1; 3794 3795 start_from = nr = 0; 3796 do { 3797 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 3798 3799 if (dst->xfrm->km.state != XFRM_STATE_VALID) 3800 return 0; 3801 if (xdst->xfrm_genid != dst->xfrm->genid) 3802 return 0; 3803 if (xdst->num_pols > 0 && 3804 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 3805 return 0; 3806 3807 bundle[nr++] = xdst; 3808 3809 mtu = dst_mtu(xfrm_dst_child(dst)); 3810 if (xdst->child_mtu_cached != mtu) { 3811 start_from = nr; 3812 xdst->child_mtu_cached = mtu; 3813 } 3814 3815 if (!dst_check(xdst->route, xdst->route_cookie)) 3816 return 0; 3817 mtu = dst_mtu(xdst->route); 3818 if (xdst->route_mtu_cached != mtu) { 3819 start_from = nr; 3820 xdst->route_mtu_cached = mtu; 3821 } 3822 3823 dst = xfrm_dst_child(dst); 3824 } while (dst->xfrm); 3825 3826 if (likely(!start_from)) 3827 return 1; 3828 3829 xdst = bundle[start_from - 1]; 3830 mtu = xdst->child_mtu_cached; 3831 while (start_from--) { 3832 dst = &xdst->u.dst; 3833 3834 mtu = xfrm_state_mtu(dst->xfrm, mtu); 3835 if (mtu > xdst->route_mtu_cached) 3836 mtu = xdst->route_mtu_cached; 3837 dst_metric_set(dst, RTAX_MTU, mtu); 3838 if (!start_from) 3839 break; 3840 3841 xdst = bundle[start_from - 1]; 3842 xdst->child_mtu_cached = mtu; 3843 } 3844 3845 return 1; 3846 } 3847 3848 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 3849 { 3850 return dst_metric_advmss(xfrm_dst_path(dst)); 3851 } 3852 3853 static unsigned int xfrm_mtu(const struct dst_entry *dst) 3854 { 3855 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 3856 3857 return mtu ? : dst_mtu(xfrm_dst_path(dst)); 3858 } 3859 3860 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst, 3861 const void *daddr) 3862 { 3863 while (dst->xfrm) { 3864 const struct xfrm_state *xfrm = dst->xfrm; 3865 3866 dst = xfrm_dst_child(dst); 3867 3868 if (xfrm->props.mode == XFRM_MODE_TRANSPORT) 3869 continue; 3870 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) 3871 daddr = xfrm->coaddr; 3872 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) 3873 daddr = &xfrm->id.daddr; 3874 } 3875 return daddr; 3876 } 3877 3878 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, 3879 struct sk_buff *skb, 3880 const void *daddr) 3881 { 3882 const struct dst_entry *path = xfrm_dst_path(dst); 3883 3884 if (!skb) 3885 daddr = xfrm_get_dst_nexthop(dst, daddr); 3886 return path->ops->neigh_lookup(path, skb, daddr); 3887 } 3888 3889 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr) 3890 { 3891 const struct dst_entry *path = xfrm_dst_path(dst); 3892 3893 daddr = xfrm_get_dst_nexthop(dst, daddr); 3894 path->ops->confirm_neigh(path, daddr); 3895 } 3896 3897 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family) 3898 { 3899 int err = 0; 3900 3901 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo))) 3902 return -EAFNOSUPPORT; 3903 3904 spin_lock(&xfrm_policy_afinfo_lock); 3905 if (unlikely(xfrm_policy_afinfo[family] != NULL)) 3906 err = -EEXIST; 3907 else { 3908 struct dst_ops *dst_ops = afinfo->dst_ops; 3909 if (likely(dst_ops->kmem_cachep == NULL)) 3910 dst_ops->kmem_cachep = xfrm_dst_cache; 3911 if (likely(dst_ops->check == NULL)) 3912 dst_ops->check = xfrm_dst_check; 3913 if (likely(dst_ops->default_advmss == NULL)) 3914 dst_ops->default_advmss = xfrm_default_advmss; 3915 if (likely(dst_ops->mtu == NULL)) 3916 dst_ops->mtu = xfrm_mtu; 3917 if (likely(dst_ops->negative_advice == NULL)) 3918 dst_ops->negative_advice = xfrm_negative_advice; 3919 if (likely(dst_ops->link_failure == NULL)) 3920 dst_ops->link_failure = xfrm_link_failure; 3921 if (likely(dst_ops->neigh_lookup == NULL)) 3922 dst_ops->neigh_lookup = xfrm_neigh_lookup; 3923 if (likely(!dst_ops->confirm_neigh)) 3924 dst_ops->confirm_neigh = xfrm_confirm_neigh; 3925 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo); 3926 } 3927 spin_unlock(&xfrm_policy_afinfo_lock); 3928 3929 return err; 3930 } 3931 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 3932 3933 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo) 3934 { 3935 struct dst_ops *dst_ops = afinfo->dst_ops; 3936 int i; 3937 3938 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) { 3939 if (xfrm_policy_afinfo[i] != afinfo) 3940 continue; 3941 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL); 3942 break; 3943 } 3944 3945 synchronize_rcu(); 3946 3947 dst_ops->kmem_cachep = NULL; 3948 dst_ops->check = NULL; 3949 dst_ops->negative_advice = NULL; 3950 dst_ops->link_failure = NULL; 3951 } 3952 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 3953 3954 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb) 3955 { 3956 spin_lock(&xfrm_if_cb_lock); 3957 rcu_assign_pointer(xfrm_if_cb, ifcb); 3958 spin_unlock(&xfrm_if_cb_lock); 3959 } 3960 EXPORT_SYMBOL(xfrm_if_register_cb); 3961 3962 void xfrm_if_unregister_cb(void) 3963 { 3964 RCU_INIT_POINTER(xfrm_if_cb, NULL); 3965 synchronize_rcu(); 3966 } 3967 EXPORT_SYMBOL(xfrm_if_unregister_cb); 3968 3969 #ifdef CONFIG_XFRM_STATISTICS 3970 static int __net_init xfrm_statistics_init(struct net *net) 3971 { 3972 int rv; 3973 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib); 3974 if (!net->mib.xfrm_statistics) 3975 return -ENOMEM; 3976 rv = xfrm_proc_init(net); 3977 if (rv < 0) 3978 free_percpu(net->mib.xfrm_statistics); 3979 return rv; 3980 } 3981 3982 static void xfrm_statistics_fini(struct net *net) 3983 { 3984 xfrm_proc_fini(net); 3985 free_percpu(net->mib.xfrm_statistics); 3986 } 3987 #else 3988 static int __net_init xfrm_statistics_init(struct net *net) 3989 { 3990 return 0; 3991 } 3992 3993 static void xfrm_statistics_fini(struct net *net) 3994 { 3995 } 3996 #endif 3997 3998 static int __net_init xfrm_policy_init(struct net *net) 3999 { 4000 unsigned int hmask, sz; 4001 int dir, err; 4002 4003 if (net_eq(net, &init_net)) { 4004 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 4005 sizeof(struct xfrm_dst), 4006 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4007 NULL); 4008 err = rhashtable_init(&xfrm_policy_inexact_table, 4009 &xfrm_pol_inexact_params); 4010 BUG_ON(err); 4011 } 4012 4013 hmask = 8 - 1; 4014 sz = (hmask+1) * sizeof(struct hlist_head); 4015 4016 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 4017 if (!net->xfrm.policy_byidx) 4018 goto out_byidx; 4019 net->xfrm.policy_idx_hmask = hmask; 4020 4021 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 4022 struct xfrm_policy_hash *htab; 4023 4024 net->xfrm.policy_count[dir] = 0; 4025 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0; 4026 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 4027 4028 htab = &net->xfrm.policy_bydst[dir]; 4029 htab->table = xfrm_hash_alloc(sz); 4030 if (!htab->table) 4031 goto out_bydst; 4032 htab->hmask = hmask; 4033 htab->dbits4 = 32; 4034 htab->sbits4 = 32; 4035 htab->dbits6 = 128; 4036 htab->sbits6 = 128; 4037 } 4038 net->xfrm.policy_hthresh.lbits4 = 32; 4039 net->xfrm.policy_hthresh.rbits4 = 32; 4040 net->xfrm.policy_hthresh.lbits6 = 128; 4041 net->xfrm.policy_hthresh.rbits6 = 128; 4042 4043 seqlock_init(&net->xfrm.policy_hthresh.lock); 4044 4045 INIT_LIST_HEAD(&net->xfrm.policy_all); 4046 INIT_LIST_HEAD(&net->xfrm.inexact_bins); 4047 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 4048 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); 4049 return 0; 4050 4051 out_bydst: 4052 for (dir--; dir >= 0; dir--) { 4053 struct xfrm_policy_hash *htab; 4054 4055 htab = &net->xfrm.policy_bydst[dir]; 4056 xfrm_hash_free(htab->table, sz); 4057 } 4058 xfrm_hash_free(net->xfrm.policy_byidx, sz); 4059 out_byidx: 4060 return -ENOMEM; 4061 } 4062 4063 static void xfrm_policy_fini(struct net *net) 4064 { 4065 struct xfrm_pol_inexact_bin *b, *t; 4066 unsigned int sz; 4067 int dir; 4068 4069 flush_work(&net->xfrm.policy_hash_work); 4070 #ifdef CONFIG_XFRM_SUB_POLICY 4071 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false); 4072 #endif 4073 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false); 4074 4075 WARN_ON(!list_empty(&net->xfrm.policy_all)); 4076 4077 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 4078 struct xfrm_policy_hash *htab; 4079 4080 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 4081 4082 htab = &net->xfrm.policy_bydst[dir]; 4083 sz = (htab->hmask + 1) * sizeof(struct hlist_head); 4084 WARN_ON(!hlist_empty(htab->table)); 4085 xfrm_hash_free(htab->table, sz); 4086 } 4087 4088 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 4089 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 4090 xfrm_hash_free(net->xfrm.policy_byidx, sz); 4091 4092 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 4093 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins) 4094 __xfrm_policy_inexact_prune_bin(b, true); 4095 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 4096 } 4097 4098 static int __net_init xfrm_net_init(struct net *net) 4099 { 4100 int rv; 4101 4102 /* Initialize the per-net locks here */ 4103 spin_lock_init(&net->xfrm.xfrm_state_lock); 4104 spin_lock_init(&net->xfrm.xfrm_policy_lock); 4105 mutex_init(&net->xfrm.xfrm_cfg_mutex); 4106 4107 rv = xfrm_statistics_init(net); 4108 if (rv < 0) 4109 goto out_statistics; 4110 rv = xfrm_state_init(net); 4111 if (rv < 0) 4112 goto out_state; 4113 rv = xfrm_policy_init(net); 4114 if (rv < 0) 4115 goto out_policy; 4116 rv = xfrm_sysctl_init(net); 4117 if (rv < 0) 4118 goto out_sysctl; 4119 4120 return 0; 4121 4122 out_sysctl: 4123 xfrm_policy_fini(net); 4124 out_policy: 4125 xfrm_state_fini(net); 4126 out_state: 4127 xfrm_statistics_fini(net); 4128 out_statistics: 4129 return rv; 4130 } 4131 4132 static void __net_exit xfrm_net_exit(struct net *net) 4133 { 4134 xfrm_sysctl_fini(net); 4135 xfrm_policy_fini(net); 4136 xfrm_state_fini(net); 4137 xfrm_statistics_fini(net); 4138 } 4139 4140 static struct pernet_operations __net_initdata xfrm_net_ops = { 4141 .init = xfrm_net_init, 4142 .exit = xfrm_net_exit, 4143 }; 4144 4145 void __init xfrm_init(void) 4146 { 4147 register_pernet_subsys(&xfrm_net_ops); 4148 xfrm_dev_init(); 4149 seqcount_init(&xfrm_policy_hash_generation); 4150 xfrm_input_init(); 4151 4152 #ifdef CONFIG_XFRM_ESPINTCP 4153 espintcp_init(); 4154 #endif 4155 4156 RCU_INIT_POINTER(xfrm_if_cb, NULL); 4157 synchronize_rcu(); 4158 } 4159 4160 #ifdef CONFIG_AUDITSYSCALL 4161 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 4162 struct audit_buffer *audit_buf) 4163 { 4164 struct xfrm_sec_ctx *ctx = xp->security; 4165 struct xfrm_selector *sel = &xp->selector; 4166 4167 if (ctx) 4168 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 4169 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 4170 4171 switch (sel->family) { 4172 case AF_INET: 4173 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 4174 if (sel->prefixlen_s != 32) 4175 audit_log_format(audit_buf, " src_prefixlen=%d", 4176 sel->prefixlen_s); 4177 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 4178 if (sel->prefixlen_d != 32) 4179 audit_log_format(audit_buf, " dst_prefixlen=%d", 4180 sel->prefixlen_d); 4181 break; 4182 case AF_INET6: 4183 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 4184 if (sel->prefixlen_s != 128) 4185 audit_log_format(audit_buf, " src_prefixlen=%d", 4186 sel->prefixlen_s); 4187 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 4188 if (sel->prefixlen_d != 128) 4189 audit_log_format(audit_buf, " dst_prefixlen=%d", 4190 sel->prefixlen_d); 4191 break; 4192 } 4193 } 4194 4195 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid) 4196 { 4197 struct audit_buffer *audit_buf; 4198 4199 audit_buf = xfrm_audit_start("SPD-add"); 4200 if (audit_buf == NULL) 4201 return; 4202 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 4203 audit_log_format(audit_buf, " res=%u", result); 4204 xfrm_audit_common_policyinfo(xp, audit_buf); 4205 audit_log_end(audit_buf); 4206 } 4207 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 4208 4209 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 4210 bool task_valid) 4211 { 4212 struct audit_buffer *audit_buf; 4213 4214 audit_buf = xfrm_audit_start("SPD-delete"); 4215 if (audit_buf == NULL) 4216 return; 4217 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 4218 audit_log_format(audit_buf, " res=%u", result); 4219 xfrm_audit_common_policyinfo(xp, audit_buf); 4220 audit_log_end(audit_buf); 4221 } 4222 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 4223 #endif 4224 4225 #ifdef CONFIG_XFRM_MIGRATE 4226 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 4227 const struct xfrm_selector *sel_tgt) 4228 { 4229 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 4230 if (sel_tgt->family == sel_cmp->family && 4231 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr, 4232 sel_cmp->family) && 4233 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr, 4234 sel_cmp->family) && 4235 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 4236 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 4237 return true; 4238 } 4239 } else { 4240 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 4241 return true; 4242 } 4243 } 4244 return false; 4245 } 4246 4247 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel, 4248 u8 dir, u8 type, struct net *net) 4249 { 4250 struct xfrm_policy *pol, *ret = NULL; 4251 struct hlist_head *chain; 4252 u32 priority = ~0U; 4253 4254 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 4255 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir); 4256 hlist_for_each_entry(pol, chain, bydst) { 4257 if (xfrm_migrate_selector_match(sel, &pol->selector) && 4258 pol->type == type) { 4259 ret = pol; 4260 priority = ret->priority; 4261 break; 4262 } 4263 } 4264 chain = &net->xfrm.policy_inexact[dir]; 4265 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 4266 if ((pol->priority >= priority) && ret) 4267 break; 4268 4269 if (xfrm_migrate_selector_match(sel, &pol->selector) && 4270 pol->type == type) { 4271 ret = pol; 4272 break; 4273 } 4274 } 4275 4276 xfrm_pol_hold(ret); 4277 4278 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 4279 4280 return ret; 4281 } 4282 4283 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 4284 { 4285 int match = 0; 4286 4287 if (t->mode == m->mode && t->id.proto == m->proto && 4288 (m->reqid == 0 || t->reqid == m->reqid)) { 4289 switch (t->mode) { 4290 case XFRM_MODE_TUNNEL: 4291 case XFRM_MODE_BEET: 4292 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr, 4293 m->old_family) && 4294 xfrm_addr_equal(&t->saddr, &m->old_saddr, 4295 m->old_family)) { 4296 match = 1; 4297 } 4298 break; 4299 case XFRM_MODE_TRANSPORT: 4300 /* in case of transport mode, template does not store 4301 any IP addresses, hence we just compare mode and 4302 protocol */ 4303 match = 1; 4304 break; 4305 default: 4306 break; 4307 } 4308 } 4309 return match; 4310 } 4311 4312 /* update endpoint address(es) of template(s) */ 4313 static int xfrm_policy_migrate(struct xfrm_policy *pol, 4314 struct xfrm_migrate *m, int num_migrate) 4315 { 4316 struct xfrm_migrate *mp; 4317 int i, j, n = 0; 4318 4319 write_lock_bh(&pol->lock); 4320 if (unlikely(pol->walk.dead)) { 4321 /* target policy has been deleted */ 4322 write_unlock_bh(&pol->lock); 4323 return -ENOENT; 4324 } 4325 4326 for (i = 0; i < pol->xfrm_nr; i++) { 4327 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 4328 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 4329 continue; 4330 n++; 4331 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 4332 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 4333 continue; 4334 /* update endpoints */ 4335 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 4336 sizeof(pol->xfrm_vec[i].id.daddr)); 4337 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 4338 sizeof(pol->xfrm_vec[i].saddr)); 4339 pol->xfrm_vec[i].encap_family = mp->new_family; 4340 /* flush bundles */ 4341 atomic_inc(&pol->genid); 4342 } 4343 } 4344 4345 write_unlock_bh(&pol->lock); 4346 4347 if (!n) 4348 return -ENODATA; 4349 4350 return 0; 4351 } 4352 4353 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 4354 { 4355 int i, j; 4356 4357 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 4358 return -EINVAL; 4359 4360 for (i = 0; i < num_migrate; i++) { 4361 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 4362 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 4363 return -EINVAL; 4364 4365 /* check if there is any duplicated entry */ 4366 for (j = i + 1; j < num_migrate; j++) { 4367 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 4368 sizeof(m[i].old_daddr)) && 4369 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 4370 sizeof(m[i].old_saddr)) && 4371 m[i].proto == m[j].proto && 4372 m[i].mode == m[j].mode && 4373 m[i].reqid == m[j].reqid && 4374 m[i].old_family == m[j].old_family) 4375 return -EINVAL; 4376 } 4377 } 4378 4379 return 0; 4380 } 4381 4382 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 4383 struct xfrm_migrate *m, int num_migrate, 4384 struct xfrm_kmaddress *k, struct net *net, 4385 struct xfrm_encap_tmpl *encap) 4386 { 4387 int i, err, nx_cur = 0, nx_new = 0; 4388 struct xfrm_policy *pol = NULL; 4389 struct xfrm_state *x, *xc; 4390 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 4391 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 4392 struct xfrm_migrate *mp; 4393 4394 /* Stage 0 - sanity checks */ 4395 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 4396 goto out; 4397 4398 if (dir >= XFRM_POLICY_MAX) { 4399 err = -EINVAL; 4400 goto out; 4401 } 4402 4403 /* Stage 1 - find policy */ 4404 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { 4405 err = -ENOENT; 4406 goto out; 4407 } 4408 4409 /* Stage 2 - find and update state(s) */ 4410 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 4411 if ((x = xfrm_migrate_state_find(mp, net))) { 4412 x_cur[nx_cur] = x; 4413 nx_cur++; 4414 xc = xfrm_state_migrate(x, mp, encap); 4415 if (xc) { 4416 x_new[nx_new] = xc; 4417 nx_new++; 4418 } else { 4419 err = -ENODATA; 4420 goto restore_state; 4421 } 4422 } 4423 } 4424 4425 /* Stage 3 - update policy */ 4426 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 4427 goto restore_state; 4428 4429 /* Stage 4 - delete old state(s) */ 4430 if (nx_cur) { 4431 xfrm_states_put(x_cur, nx_cur); 4432 xfrm_states_delete(x_cur, nx_cur); 4433 } 4434 4435 /* Stage 5 - announce */ 4436 km_migrate(sel, dir, type, m, num_migrate, k, encap); 4437 4438 xfrm_pol_put(pol); 4439 4440 return 0; 4441 out: 4442 return err; 4443 4444 restore_state: 4445 if (pol) 4446 xfrm_pol_put(pol); 4447 if (nx_cur) 4448 xfrm_states_put(x_cur, nx_cur); 4449 if (nx_new) 4450 xfrm_states_delete(x_new, nx_new); 4451 4452 return err; 4453 } 4454 EXPORT_SYMBOL(xfrm_migrate); 4455 #endif 4456