1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/kmod.h> 19 #include <linux/list.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/module.h> 26 #include <linux/cache.h> 27 #include <linux/audit.h> 28 #include <net/dst.h> 29 #include <net/xfrm.h> 30 #include <net/ip.h> 31 #ifdef CONFIG_XFRM_STATISTICS 32 #include <net/snmp.h> 33 #endif 34 35 #include "xfrm_hash.h" 36 37 int sysctl_xfrm_larval_drop __read_mostly; 38 39 #ifdef CONFIG_XFRM_STATISTICS 40 DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly; 41 EXPORT_SYMBOL(xfrm_statistics); 42 #endif 43 44 DEFINE_MUTEX(xfrm_cfg_mutex); 45 EXPORT_SYMBOL(xfrm_cfg_mutex); 46 47 static DEFINE_RWLOCK(xfrm_policy_lock); 48 49 static struct list_head xfrm_policy_bytype[XFRM_POLICY_TYPE_MAX]; 50 unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; 51 EXPORT_SYMBOL(xfrm_policy_count); 52 53 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 54 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 55 56 static struct kmem_cache *xfrm_dst_cache __read_mostly; 57 58 static struct work_struct xfrm_policy_gc_work; 59 static HLIST_HEAD(xfrm_policy_gc_list); 60 static DEFINE_SPINLOCK(xfrm_policy_gc_lock); 61 62 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 63 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 64 static void xfrm_init_pmtu(struct dst_entry *dst); 65 66 static inline int 67 __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl) 68 { 69 return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) && 70 addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) && 71 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) && 72 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) && 73 (fl->proto == sel->proto || !sel->proto) && 74 (fl->oif == sel->ifindex || !sel->ifindex); 75 } 76 77 static inline int 78 __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl) 79 { 80 return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) && 81 addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) && 82 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) && 83 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) && 84 (fl->proto == sel->proto || !sel->proto) && 85 (fl->oif == sel->ifindex || !sel->ifindex); 86 } 87 88 int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl, 89 unsigned short family) 90 { 91 switch (family) { 92 case AF_INET: 93 return __xfrm4_selector_match(sel, fl); 94 case AF_INET6: 95 return __xfrm6_selector_match(sel, fl); 96 } 97 return 0; 98 } 99 100 static inline struct dst_entry *__xfrm_dst_lookup(int tos, 101 xfrm_address_t *saddr, 102 xfrm_address_t *daddr, 103 int family) 104 { 105 struct xfrm_policy_afinfo *afinfo; 106 struct dst_entry *dst; 107 108 afinfo = xfrm_policy_get_afinfo(family); 109 if (unlikely(afinfo == NULL)) 110 return ERR_PTR(-EAFNOSUPPORT); 111 112 dst = afinfo->dst_lookup(tos, saddr, daddr); 113 114 xfrm_policy_put_afinfo(afinfo); 115 116 return dst; 117 } 118 119 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, 120 xfrm_address_t *prev_saddr, 121 xfrm_address_t *prev_daddr, 122 int family) 123 { 124 xfrm_address_t *saddr = &x->props.saddr; 125 xfrm_address_t *daddr = &x->id.daddr; 126 struct dst_entry *dst; 127 128 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 129 saddr = x->coaddr; 130 daddr = prev_daddr; 131 } 132 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 133 saddr = prev_saddr; 134 daddr = x->coaddr; 135 } 136 137 dst = __xfrm_dst_lookup(tos, saddr, daddr, family); 138 139 if (!IS_ERR(dst)) { 140 if (prev_saddr != saddr) 141 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 142 if (prev_daddr != daddr) 143 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 144 } 145 146 return dst; 147 } 148 149 static inline unsigned long make_jiffies(long secs) 150 { 151 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 152 return MAX_SCHEDULE_TIMEOUT-1; 153 else 154 return secs*HZ; 155 } 156 157 static void xfrm_policy_timer(unsigned long data) 158 { 159 struct xfrm_policy *xp = (struct xfrm_policy*)data; 160 unsigned long now = get_seconds(); 161 long next = LONG_MAX; 162 int warn = 0; 163 int dir; 164 165 read_lock(&xp->lock); 166 167 if (xp->dead) 168 goto out; 169 170 dir = xfrm_policy_id2dir(xp->index); 171 172 if (xp->lft.hard_add_expires_seconds) { 173 long tmo = xp->lft.hard_add_expires_seconds + 174 xp->curlft.add_time - now; 175 if (tmo <= 0) 176 goto expired; 177 if (tmo < next) 178 next = tmo; 179 } 180 if (xp->lft.hard_use_expires_seconds) { 181 long tmo = xp->lft.hard_use_expires_seconds + 182 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 183 if (tmo <= 0) 184 goto expired; 185 if (tmo < next) 186 next = tmo; 187 } 188 if (xp->lft.soft_add_expires_seconds) { 189 long tmo = xp->lft.soft_add_expires_seconds + 190 xp->curlft.add_time - now; 191 if (tmo <= 0) { 192 warn = 1; 193 tmo = XFRM_KM_TIMEOUT; 194 } 195 if (tmo < next) 196 next = tmo; 197 } 198 if (xp->lft.soft_use_expires_seconds) { 199 long tmo = xp->lft.soft_use_expires_seconds + 200 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 201 if (tmo <= 0) { 202 warn = 1; 203 tmo = XFRM_KM_TIMEOUT; 204 } 205 if (tmo < next) 206 next = tmo; 207 } 208 209 if (warn) 210 km_policy_expired(xp, dir, 0, 0); 211 if (next != LONG_MAX && 212 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 213 xfrm_pol_hold(xp); 214 215 out: 216 read_unlock(&xp->lock); 217 xfrm_pol_put(xp); 218 return; 219 220 expired: 221 read_unlock(&xp->lock); 222 if (!xfrm_policy_delete(xp, dir)) 223 km_policy_expired(xp, dir, 1, 0); 224 xfrm_pol_put(xp); 225 } 226 227 228 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 229 * SPD calls. 230 */ 231 232 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) 233 { 234 struct xfrm_policy *policy; 235 236 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 237 238 if (policy) { 239 INIT_LIST_HEAD(&policy->bytype); 240 INIT_HLIST_NODE(&policy->bydst); 241 INIT_HLIST_NODE(&policy->byidx); 242 rwlock_init(&policy->lock); 243 atomic_set(&policy->refcnt, 1); 244 setup_timer(&policy->timer, xfrm_policy_timer, 245 (unsigned long)policy); 246 } 247 return policy; 248 } 249 EXPORT_SYMBOL(xfrm_policy_alloc); 250 251 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 252 253 void xfrm_policy_destroy(struct xfrm_policy *policy) 254 { 255 BUG_ON(!policy->dead); 256 257 BUG_ON(policy->bundles); 258 259 if (del_timer(&policy->timer)) 260 BUG(); 261 262 write_lock_bh(&xfrm_policy_lock); 263 list_del(&policy->bytype); 264 write_unlock_bh(&xfrm_policy_lock); 265 266 security_xfrm_policy_free(policy->security); 267 kfree(policy); 268 } 269 EXPORT_SYMBOL(xfrm_policy_destroy); 270 271 static void xfrm_policy_gc_kill(struct xfrm_policy *policy) 272 { 273 struct dst_entry *dst; 274 275 while ((dst = policy->bundles) != NULL) { 276 policy->bundles = dst->next; 277 dst_free(dst); 278 } 279 280 if (del_timer(&policy->timer)) 281 atomic_dec(&policy->refcnt); 282 283 if (atomic_read(&policy->refcnt) > 1) 284 flow_cache_flush(); 285 286 xfrm_pol_put(policy); 287 } 288 289 static void xfrm_policy_gc_task(struct work_struct *work) 290 { 291 struct xfrm_policy *policy; 292 struct hlist_node *entry, *tmp; 293 struct hlist_head gc_list; 294 295 spin_lock_bh(&xfrm_policy_gc_lock); 296 gc_list.first = xfrm_policy_gc_list.first; 297 INIT_HLIST_HEAD(&xfrm_policy_gc_list); 298 spin_unlock_bh(&xfrm_policy_gc_lock); 299 300 hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst) 301 xfrm_policy_gc_kill(policy); 302 } 303 304 /* Rule must be locked. Release descentant resources, announce 305 * entry dead. The rule must be unlinked from lists to the moment. 306 */ 307 308 static void xfrm_policy_kill(struct xfrm_policy *policy) 309 { 310 int dead; 311 312 write_lock_bh(&policy->lock); 313 dead = policy->dead; 314 policy->dead = 1; 315 write_unlock_bh(&policy->lock); 316 317 if (unlikely(dead)) { 318 WARN_ON(1); 319 return; 320 } 321 322 spin_lock(&xfrm_policy_gc_lock); 323 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list); 324 spin_unlock(&xfrm_policy_gc_lock); 325 326 schedule_work(&xfrm_policy_gc_work); 327 } 328 329 struct xfrm_policy_hash { 330 struct hlist_head *table; 331 unsigned int hmask; 332 }; 333 334 static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2]; 335 static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly; 336 static struct hlist_head *xfrm_policy_byidx __read_mostly; 337 static unsigned int xfrm_idx_hmask __read_mostly; 338 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 339 340 static inline unsigned int idx_hash(u32 index) 341 { 342 return __idx_hash(index, xfrm_idx_hmask); 343 } 344 345 static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir) 346 { 347 unsigned int hmask = xfrm_policy_bydst[dir].hmask; 348 unsigned int hash = __sel_hash(sel, family, hmask); 349 350 return (hash == hmask + 1 ? 351 &xfrm_policy_inexact[dir] : 352 xfrm_policy_bydst[dir].table + hash); 353 } 354 355 static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir) 356 { 357 unsigned int hmask = xfrm_policy_bydst[dir].hmask; 358 unsigned int hash = __addr_hash(daddr, saddr, family, hmask); 359 360 return xfrm_policy_bydst[dir].table + hash; 361 } 362 363 static void xfrm_dst_hash_transfer(struct hlist_head *list, 364 struct hlist_head *ndsttable, 365 unsigned int nhashmask) 366 { 367 struct hlist_node *entry, *tmp, *entry0 = NULL; 368 struct xfrm_policy *pol; 369 unsigned int h0 = 0; 370 371 redo: 372 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { 373 unsigned int h; 374 375 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 376 pol->family, nhashmask); 377 if (!entry0) { 378 hlist_del(entry); 379 hlist_add_head(&pol->bydst, ndsttable+h); 380 h0 = h; 381 } else { 382 if (h != h0) 383 continue; 384 hlist_del(entry); 385 hlist_add_after(entry0, &pol->bydst); 386 } 387 entry0 = entry; 388 } 389 if (!hlist_empty(list)) { 390 entry0 = NULL; 391 goto redo; 392 } 393 } 394 395 static void xfrm_idx_hash_transfer(struct hlist_head *list, 396 struct hlist_head *nidxtable, 397 unsigned int nhashmask) 398 { 399 struct hlist_node *entry, *tmp; 400 struct xfrm_policy *pol; 401 402 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { 403 unsigned int h; 404 405 h = __idx_hash(pol->index, nhashmask); 406 hlist_add_head(&pol->byidx, nidxtable+h); 407 } 408 } 409 410 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 411 { 412 return ((old_hmask + 1) << 1) - 1; 413 } 414 415 static void xfrm_bydst_resize(int dir) 416 { 417 unsigned int hmask = xfrm_policy_bydst[dir].hmask; 418 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 419 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 420 struct hlist_head *odst = xfrm_policy_bydst[dir].table; 421 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 422 int i; 423 424 if (!ndst) 425 return; 426 427 write_lock_bh(&xfrm_policy_lock); 428 429 for (i = hmask; i >= 0; i--) 430 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); 431 432 xfrm_policy_bydst[dir].table = ndst; 433 xfrm_policy_bydst[dir].hmask = nhashmask; 434 435 write_unlock_bh(&xfrm_policy_lock); 436 437 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 438 } 439 440 static void xfrm_byidx_resize(int total) 441 { 442 unsigned int hmask = xfrm_idx_hmask; 443 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 444 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 445 struct hlist_head *oidx = xfrm_policy_byidx; 446 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 447 int i; 448 449 if (!nidx) 450 return; 451 452 write_lock_bh(&xfrm_policy_lock); 453 454 for (i = hmask; i >= 0; i--) 455 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 456 457 xfrm_policy_byidx = nidx; 458 xfrm_idx_hmask = nhashmask; 459 460 write_unlock_bh(&xfrm_policy_lock); 461 462 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 463 } 464 465 static inline int xfrm_bydst_should_resize(int dir, int *total) 466 { 467 unsigned int cnt = xfrm_policy_count[dir]; 468 unsigned int hmask = xfrm_policy_bydst[dir].hmask; 469 470 if (total) 471 *total += cnt; 472 473 if ((hmask + 1) < xfrm_policy_hashmax && 474 cnt > hmask) 475 return 1; 476 477 return 0; 478 } 479 480 static inline int xfrm_byidx_should_resize(int total) 481 { 482 unsigned int hmask = xfrm_idx_hmask; 483 484 if ((hmask + 1) < xfrm_policy_hashmax && 485 total > hmask) 486 return 1; 487 488 return 0; 489 } 490 491 void xfrm_spd_getinfo(struct xfrmk_spdinfo *si) 492 { 493 read_lock_bh(&xfrm_policy_lock); 494 si->incnt = xfrm_policy_count[XFRM_POLICY_IN]; 495 si->outcnt = xfrm_policy_count[XFRM_POLICY_OUT]; 496 si->fwdcnt = xfrm_policy_count[XFRM_POLICY_FWD]; 497 si->inscnt = xfrm_policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 498 si->outscnt = xfrm_policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 499 si->fwdscnt = xfrm_policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 500 si->spdhcnt = xfrm_idx_hmask; 501 si->spdhmcnt = xfrm_policy_hashmax; 502 read_unlock_bh(&xfrm_policy_lock); 503 } 504 EXPORT_SYMBOL(xfrm_spd_getinfo); 505 506 static DEFINE_MUTEX(hash_resize_mutex); 507 static void xfrm_hash_resize(struct work_struct *__unused) 508 { 509 int dir, total; 510 511 mutex_lock(&hash_resize_mutex); 512 513 total = 0; 514 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 515 if (xfrm_bydst_should_resize(dir, &total)) 516 xfrm_bydst_resize(dir); 517 } 518 if (xfrm_byidx_should_resize(total)) 519 xfrm_byidx_resize(total); 520 521 mutex_unlock(&hash_resize_mutex); 522 } 523 524 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); 525 526 /* Generate new index... KAME seems to generate them ordered by cost 527 * of an absolute inpredictability of ordering of rules. This will not pass. */ 528 static u32 xfrm_gen_index(u8 type, int dir) 529 { 530 static u32 idx_generator; 531 532 for (;;) { 533 struct hlist_node *entry; 534 struct hlist_head *list; 535 struct xfrm_policy *p; 536 u32 idx; 537 int found; 538 539 idx = (idx_generator | dir); 540 idx_generator += 8; 541 if (idx == 0) 542 idx = 8; 543 list = xfrm_policy_byidx + idx_hash(idx); 544 found = 0; 545 hlist_for_each_entry(p, entry, list, byidx) { 546 if (p->index == idx) { 547 found = 1; 548 break; 549 } 550 } 551 if (!found) 552 return idx; 553 } 554 } 555 556 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 557 { 558 u32 *p1 = (u32 *) s1; 559 u32 *p2 = (u32 *) s2; 560 int len = sizeof(struct xfrm_selector) / sizeof(u32); 561 int i; 562 563 for (i = 0; i < len; i++) { 564 if (p1[i] != p2[i]) 565 return 1; 566 } 567 568 return 0; 569 } 570 571 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 572 { 573 struct xfrm_policy *pol; 574 struct xfrm_policy *delpol; 575 struct hlist_head *chain; 576 struct hlist_node *entry, *newpos; 577 struct dst_entry *gc_list; 578 579 write_lock_bh(&xfrm_policy_lock); 580 chain = policy_hash_bysel(&policy->selector, policy->family, dir); 581 delpol = NULL; 582 newpos = NULL; 583 hlist_for_each_entry(pol, entry, chain, bydst) { 584 if (pol->type == policy->type && 585 !selector_cmp(&pol->selector, &policy->selector) && 586 xfrm_sec_ctx_match(pol->security, policy->security) && 587 !WARN_ON(delpol)) { 588 if (excl) { 589 write_unlock_bh(&xfrm_policy_lock); 590 return -EEXIST; 591 } 592 delpol = pol; 593 if (policy->priority > pol->priority) 594 continue; 595 } else if (policy->priority >= pol->priority) { 596 newpos = &pol->bydst; 597 continue; 598 } 599 if (delpol) 600 break; 601 } 602 if (newpos) 603 hlist_add_after(newpos, &policy->bydst); 604 else 605 hlist_add_head(&policy->bydst, chain); 606 xfrm_pol_hold(policy); 607 xfrm_policy_count[dir]++; 608 atomic_inc(&flow_cache_genid); 609 if (delpol) { 610 hlist_del(&delpol->bydst); 611 hlist_del(&delpol->byidx); 612 xfrm_policy_count[dir]--; 613 } 614 policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir); 615 hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index)); 616 policy->curlft.add_time = get_seconds(); 617 policy->curlft.use_time = 0; 618 if (!mod_timer(&policy->timer, jiffies + HZ)) 619 xfrm_pol_hold(policy); 620 list_add_tail(&policy->bytype, &xfrm_policy_bytype[policy->type]); 621 write_unlock_bh(&xfrm_policy_lock); 622 623 if (delpol) 624 xfrm_policy_kill(delpol); 625 else if (xfrm_bydst_should_resize(dir, NULL)) 626 schedule_work(&xfrm_hash_work); 627 628 read_lock_bh(&xfrm_policy_lock); 629 gc_list = NULL; 630 entry = &policy->bydst; 631 hlist_for_each_entry_continue(policy, entry, bydst) { 632 struct dst_entry *dst; 633 634 write_lock(&policy->lock); 635 dst = policy->bundles; 636 if (dst) { 637 struct dst_entry *tail = dst; 638 while (tail->next) 639 tail = tail->next; 640 tail->next = gc_list; 641 gc_list = dst; 642 643 policy->bundles = NULL; 644 } 645 write_unlock(&policy->lock); 646 } 647 read_unlock_bh(&xfrm_policy_lock); 648 649 while (gc_list) { 650 struct dst_entry *dst = gc_list; 651 652 gc_list = dst->next; 653 dst_free(dst); 654 } 655 656 return 0; 657 } 658 EXPORT_SYMBOL(xfrm_policy_insert); 659 660 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir, 661 struct xfrm_selector *sel, 662 struct xfrm_sec_ctx *ctx, int delete, 663 int *err) 664 { 665 struct xfrm_policy *pol, *ret; 666 struct hlist_head *chain; 667 struct hlist_node *entry; 668 669 *err = 0; 670 write_lock_bh(&xfrm_policy_lock); 671 chain = policy_hash_bysel(sel, sel->family, dir); 672 ret = NULL; 673 hlist_for_each_entry(pol, entry, chain, bydst) { 674 if (pol->type == type && 675 !selector_cmp(sel, &pol->selector) && 676 xfrm_sec_ctx_match(ctx, pol->security)) { 677 xfrm_pol_hold(pol); 678 if (delete) { 679 *err = security_xfrm_policy_delete( 680 pol->security); 681 if (*err) { 682 write_unlock_bh(&xfrm_policy_lock); 683 return pol; 684 } 685 hlist_del(&pol->bydst); 686 hlist_del(&pol->byidx); 687 xfrm_policy_count[dir]--; 688 } 689 ret = pol; 690 break; 691 } 692 } 693 write_unlock_bh(&xfrm_policy_lock); 694 695 if (ret && delete) { 696 atomic_inc(&flow_cache_genid); 697 xfrm_policy_kill(ret); 698 } 699 return ret; 700 } 701 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 702 703 struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete, 704 int *err) 705 { 706 struct xfrm_policy *pol, *ret; 707 struct hlist_head *chain; 708 struct hlist_node *entry; 709 710 *err = -ENOENT; 711 if (xfrm_policy_id2dir(id) != dir) 712 return NULL; 713 714 *err = 0; 715 write_lock_bh(&xfrm_policy_lock); 716 chain = xfrm_policy_byidx + idx_hash(id); 717 ret = NULL; 718 hlist_for_each_entry(pol, entry, chain, byidx) { 719 if (pol->type == type && pol->index == id) { 720 xfrm_pol_hold(pol); 721 if (delete) { 722 *err = security_xfrm_policy_delete( 723 pol->security); 724 if (*err) { 725 write_unlock_bh(&xfrm_policy_lock); 726 return pol; 727 } 728 hlist_del(&pol->bydst); 729 hlist_del(&pol->byidx); 730 xfrm_policy_count[dir]--; 731 } 732 ret = pol; 733 break; 734 } 735 } 736 write_unlock_bh(&xfrm_policy_lock); 737 738 if (ret && delete) { 739 atomic_inc(&flow_cache_genid); 740 xfrm_policy_kill(ret); 741 } 742 return ret; 743 } 744 EXPORT_SYMBOL(xfrm_policy_byid); 745 746 #ifdef CONFIG_SECURITY_NETWORK_XFRM 747 static inline int 748 xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info) 749 { 750 int dir, err = 0; 751 752 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 753 struct xfrm_policy *pol; 754 struct hlist_node *entry; 755 int i; 756 757 hlist_for_each_entry(pol, entry, 758 &xfrm_policy_inexact[dir], bydst) { 759 if (pol->type != type) 760 continue; 761 err = security_xfrm_policy_delete(pol->security); 762 if (err) { 763 xfrm_audit_policy_delete(pol, 0, 764 audit_info->loginuid, 765 audit_info->sessionid, 766 audit_info->secid); 767 return err; 768 } 769 } 770 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) { 771 hlist_for_each_entry(pol, entry, 772 xfrm_policy_bydst[dir].table + i, 773 bydst) { 774 if (pol->type != type) 775 continue; 776 err = security_xfrm_policy_delete( 777 pol->security); 778 if (err) { 779 xfrm_audit_policy_delete(pol, 0, 780 audit_info->loginuid, 781 audit_info->sessionid, 782 audit_info->secid); 783 return err; 784 } 785 } 786 } 787 } 788 return err; 789 } 790 #else 791 static inline int 792 xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info) 793 { 794 return 0; 795 } 796 #endif 797 798 int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info) 799 { 800 int dir, err = 0; 801 802 write_lock_bh(&xfrm_policy_lock); 803 804 err = xfrm_policy_flush_secctx_check(type, audit_info); 805 if (err) 806 goto out; 807 808 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 809 struct xfrm_policy *pol; 810 struct hlist_node *entry; 811 int i, killed; 812 813 killed = 0; 814 again1: 815 hlist_for_each_entry(pol, entry, 816 &xfrm_policy_inexact[dir], bydst) { 817 if (pol->type != type) 818 continue; 819 hlist_del(&pol->bydst); 820 hlist_del(&pol->byidx); 821 write_unlock_bh(&xfrm_policy_lock); 822 823 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 824 audit_info->sessionid, 825 audit_info->secid); 826 827 xfrm_policy_kill(pol); 828 killed++; 829 830 write_lock_bh(&xfrm_policy_lock); 831 goto again1; 832 } 833 834 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) { 835 again2: 836 hlist_for_each_entry(pol, entry, 837 xfrm_policy_bydst[dir].table + i, 838 bydst) { 839 if (pol->type != type) 840 continue; 841 hlist_del(&pol->bydst); 842 hlist_del(&pol->byidx); 843 write_unlock_bh(&xfrm_policy_lock); 844 845 xfrm_audit_policy_delete(pol, 1, 846 audit_info->loginuid, 847 audit_info->sessionid, 848 audit_info->secid); 849 xfrm_policy_kill(pol); 850 killed++; 851 852 write_lock_bh(&xfrm_policy_lock); 853 goto again2; 854 } 855 } 856 857 xfrm_policy_count[dir] -= killed; 858 } 859 atomic_inc(&flow_cache_genid); 860 out: 861 write_unlock_bh(&xfrm_policy_lock); 862 return err; 863 } 864 EXPORT_SYMBOL(xfrm_policy_flush); 865 866 int xfrm_policy_walk(struct xfrm_policy_walk *walk, 867 int (*func)(struct xfrm_policy *, int, int, void*), 868 void *data) 869 { 870 struct xfrm_policy *old, *pol, *last = NULL; 871 int error = 0; 872 873 if (walk->type >= XFRM_POLICY_TYPE_MAX && 874 walk->type != XFRM_POLICY_TYPE_ANY) 875 return -EINVAL; 876 877 if (walk->policy == NULL && walk->count != 0) 878 return 0; 879 880 old = pol = walk->policy; 881 walk->policy = NULL; 882 read_lock_bh(&xfrm_policy_lock); 883 884 for (; walk->cur_type < XFRM_POLICY_TYPE_MAX; walk->cur_type++) { 885 if (walk->type != walk->cur_type && 886 walk->type != XFRM_POLICY_TYPE_ANY) 887 continue; 888 889 if (pol == NULL) { 890 pol = list_first_entry(&xfrm_policy_bytype[walk->cur_type], 891 struct xfrm_policy, bytype); 892 } 893 list_for_each_entry_from(pol, &xfrm_policy_bytype[walk->cur_type], bytype) { 894 if (pol->dead) 895 continue; 896 if (last) { 897 error = func(last, xfrm_policy_id2dir(last->index), 898 walk->count, data); 899 if (error) { 900 xfrm_pol_hold(last); 901 walk->policy = last; 902 goto out; 903 } 904 } 905 last = pol; 906 walk->count++; 907 } 908 pol = NULL; 909 } 910 if (walk->count == 0) { 911 error = -ENOENT; 912 goto out; 913 } 914 if (last) 915 error = func(last, xfrm_policy_id2dir(last->index), 0, data); 916 out: 917 read_unlock_bh(&xfrm_policy_lock); 918 if (old != NULL) 919 xfrm_pol_put(old); 920 return error; 921 } 922 EXPORT_SYMBOL(xfrm_policy_walk); 923 924 /* 925 * Find policy to apply to this flow. 926 * 927 * Returns 0 if policy found, else an -errno. 928 */ 929 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl, 930 u8 type, u16 family, int dir) 931 { 932 struct xfrm_selector *sel = &pol->selector; 933 int match, ret = -ESRCH; 934 935 if (pol->family != family || 936 pol->type != type) 937 return ret; 938 939 match = xfrm_selector_match(sel, fl, family); 940 if (match) 941 ret = security_xfrm_policy_lookup(pol->security, fl->secid, 942 dir); 943 944 return ret; 945 } 946 947 static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl, 948 u16 family, u8 dir) 949 { 950 int err; 951 struct xfrm_policy *pol, *ret; 952 xfrm_address_t *daddr, *saddr; 953 struct hlist_node *entry; 954 struct hlist_head *chain; 955 u32 priority = ~0U; 956 957 daddr = xfrm_flowi_daddr(fl, family); 958 saddr = xfrm_flowi_saddr(fl, family); 959 if (unlikely(!daddr || !saddr)) 960 return NULL; 961 962 read_lock_bh(&xfrm_policy_lock); 963 chain = policy_hash_direct(daddr, saddr, family, dir); 964 ret = NULL; 965 hlist_for_each_entry(pol, entry, chain, bydst) { 966 err = xfrm_policy_match(pol, fl, type, family, dir); 967 if (err) { 968 if (err == -ESRCH) 969 continue; 970 else { 971 ret = ERR_PTR(err); 972 goto fail; 973 } 974 } else { 975 ret = pol; 976 priority = ret->priority; 977 break; 978 } 979 } 980 chain = &xfrm_policy_inexact[dir]; 981 hlist_for_each_entry(pol, entry, chain, bydst) { 982 err = xfrm_policy_match(pol, fl, type, family, dir); 983 if (err) { 984 if (err == -ESRCH) 985 continue; 986 else { 987 ret = ERR_PTR(err); 988 goto fail; 989 } 990 } else if (pol->priority < priority) { 991 ret = pol; 992 break; 993 } 994 } 995 if (ret) 996 xfrm_pol_hold(ret); 997 fail: 998 read_unlock_bh(&xfrm_policy_lock); 999 1000 return ret; 1001 } 1002 1003 static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir, 1004 void **objp, atomic_t **obj_refp) 1005 { 1006 struct xfrm_policy *pol; 1007 int err = 0; 1008 1009 #ifdef CONFIG_XFRM_SUB_POLICY 1010 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir); 1011 if (IS_ERR(pol)) { 1012 err = PTR_ERR(pol); 1013 pol = NULL; 1014 } 1015 if (pol || err) 1016 goto end; 1017 #endif 1018 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir); 1019 if (IS_ERR(pol)) { 1020 err = PTR_ERR(pol); 1021 pol = NULL; 1022 } 1023 #ifdef CONFIG_XFRM_SUB_POLICY 1024 end: 1025 #endif 1026 if ((*objp = (void *) pol) != NULL) 1027 *obj_refp = &pol->refcnt; 1028 return err; 1029 } 1030 1031 static inline int policy_to_flow_dir(int dir) 1032 { 1033 if (XFRM_POLICY_IN == FLOW_DIR_IN && 1034 XFRM_POLICY_OUT == FLOW_DIR_OUT && 1035 XFRM_POLICY_FWD == FLOW_DIR_FWD) 1036 return dir; 1037 switch (dir) { 1038 default: 1039 case XFRM_POLICY_IN: 1040 return FLOW_DIR_IN; 1041 case XFRM_POLICY_OUT: 1042 return FLOW_DIR_OUT; 1043 case XFRM_POLICY_FWD: 1044 return FLOW_DIR_FWD; 1045 } 1046 } 1047 1048 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl) 1049 { 1050 struct xfrm_policy *pol; 1051 1052 read_lock_bh(&xfrm_policy_lock); 1053 if ((pol = sk->sk_policy[dir]) != NULL) { 1054 int match = xfrm_selector_match(&pol->selector, fl, 1055 sk->sk_family); 1056 int err = 0; 1057 1058 if (match) { 1059 err = security_xfrm_policy_lookup(pol->security, 1060 fl->secid, 1061 policy_to_flow_dir(dir)); 1062 if (!err) 1063 xfrm_pol_hold(pol); 1064 else if (err == -ESRCH) 1065 pol = NULL; 1066 else 1067 pol = ERR_PTR(err); 1068 } else 1069 pol = NULL; 1070 } 1071 read_unlock_bh(&xfrm_policy_lock); 1072 return pol; 1073 } 1074 1075 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 1076 { 1077 struct hlist_head *chain = policy_hash_bysel(&pol->selector, 1078 pol->family, dir); 1079 1080 hlist_add_head(&pol->bydst, chain); 1081 hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index)); 1082 xfrm_policy_count[dir]++; 1083 xfrm_pol_hold(pol); 1084 1085 if (xfrm_bydst_should_resize(dir, NULL)) 1086 schedule_work(&xfrm_hash_work); 1087 } 1088 1089 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 1090 int dir) 1091 { 1092 if (hlist_unhashed(&pol->bydst)) 1093 return NULL; 1094 1095 hlist_del(&pol->bydst); 1096 hlist_del(&pol->byidx); 1097 xfrm_policy_count[dir]--; 1098 1099 return pol; 1100 } 1101 1102 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 1103 { 1104 write_lock_bh(&xfrm_policy_lock); 1105 pol = __xfrm_policy_unlink(pol, dir); 1106 write_unlock_bh(&xfrm_policy_lock); 1107 if (pol) { 1108 if (dir < XFRM_POLICY_MAX) 1109 atomic_inc(&flow_cache_genid); 1110 xfrm_policy_kill(pol); 1111 return 0; 1112 } 1113 return -ENOENT; 1114 } 1115 EXPORT_SYMBOL(xfrm_policy_delete); 1116 1117 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 1118 { 1119 struct xfrm_policy *old_pol; 1120 1121 #ifdef CONFIG_XFRM_SUB_POLICY 1122 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 1123 return -EINVAL; 1124 #endif 1125 1126 write_lock_bh(&xfrm_policy_lock); 1127 old_pol = sk->sk_policy[dir]; 1128 sk->sk_policy[dir] = pol; 1129 if (pol) { 1130 pol->curlft.add_time = get_seconds(); 1131 pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir); 1132 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1133 } 1134 if (old_pol) 1135 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1136 write_unlock_bh(&xfrm_policy_lock); 1137 1138 if (old_pol) { 1139 xfrm_policy_kill(old_pol); 1140 } 1141 return 0; 1142 } 1143 1144 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir) 1145 { 1146 struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC); 1147 1148 if (newp) { 1149 newp->selector = old->selector; 1150 if (security_xfrm_policy_clone(old->security, 1151 &newp->security)) { 1152 kfree(newp); 1153 return NULL; /* ENOMEM */ 1154 } 1155 newp->lft = old->lft; 1156 newp->curlft = old->curlft; 1157 newp->action = old->action; 1158 newp->flags = old->flags; 1159 newp->xfrm_nr = old->xfrm_nr; 1160 newp->index = old->index; 1161 newp->type = old->type; 1162 memcpy(newp->xfrm_vec, old->xfrm_vec, 1163 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 1164 write_lock_bh(&xfrm_policy_lock); 1165 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 1166 write_unlock_bh(&xfrm_policy_lock); 1167 xfrm_pol_put(newp); 1168 } 1169 return newp; 1170 } 1171 1172 int __xfrm_sk_clone_policy(struct sock *sk) 1173 { 1174 struct xfrm_policy *p0 = sk->sk_policy[0], 1175 *p1 = sk->sk_policy[1]; 1176 1177 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1178 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1179 return -ENOMEM; 1180 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1181 return -ENOMEM; 1182 return 0; 1183 } 1184 1185 static int 1186 xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote, 1187 unsigned short family) 1188 { 1189 int err; 1190 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1191 1192 if (unlikely(afinfo == NULL)) 1193 return -EINVAL; 1194 err = afinfo->get_saddr(local, remote); 1195 xfrm_policy_put_afinfo(afinfo); 1196 return err; 1197 } 1198 1199 /* Resolve list of templates for the flow, given policy. */ 1200 1201 static int 1202 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl, 1203 struct xfrm_state **xfrm, 1204 unsigned short family) 1205 { 1206 int nx; 1207 int i, error; 1208 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1209 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1210 xfrm_address_t tmp; 1211 1212 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 1213 struct xfrm_state *x; 1214 xfrm_address_t *remote = daddr; 1215 xfrm_address_t *local = saddr; 1216 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1217 1218 if (tmpl->mode == XFRM_MODE_TUNNEL || 1219 tmpl->mode == XFRM_MODE_BEET) { 1220 remote = &tmpl->id.daddr; 1221 local = &tmpl->saddr; 1222 family = tmpl->encap_family; 1223 if (xfrm_addr_any(local, family)) { 1224 error = xfrm_get_saddr(&tmp, remote, family); 1225 if (error) 1226 goto fail; 1227 local = &tmp; 1228 } 1229 } 1230 1231 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1232 1233 if (x && x->km.state == XFRM_STATE_VALID) { 1234 xfrm[nx++] = x; 1235 daddr = remote; 1236 saddr = local; 1237 continue; 1238 } 1239 if (x) { 1240 error = (x->km.state == XFRM_STATE_ERROR ? 1241 -EINVAL : -EAGAIN); 1242 xfrm_state_put(x); 1243 } 1244 1245 if (!tmpl->optional) 1246 goto fail; 1247 } 1248 return nx; 1249 1250 fail: 1251 for (nx--; nx>=0; nx--) 1252 xfrm_state_put(xfrm[nx]); 1253 return error; 1254 } 1255 1256 static int 1257 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl, 1258 struct xfrm_state **xfrm, 1259 unsigned short family) 1260 { 1261 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 1262 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 1263 int cnx = 0; 1264 int error; 1265 int ret; 1266 int i; 1267 1268 for (i = 0; i < npols; i++) { 1269 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 1270 error = -ENOBUFS; 1271 goto fail; 1272 } 1273 1274 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 1275 if (ret < 0) { 1276 error = ret; 1277 goto fail; 1278 } else 1279 cnx += ret; 1280 } 1281 1282 /* found states are sorted for outbound processing */ 1283 if (npols > 1) 1284 xfrm_state_sort(xfrm, tpp, cnx, family); 1285 1286 return cnx; 1287 1288 fail: 1289 for (cnx--; cnx>=0; cnx--) 1290 xfrm_state_put(tpp[cnx]); 1291 return error; 1292 1293 } 1294 1295 /* Check that the bundle accepts the flow and its components are 1296 * still valid. 1297 */ 1298 1299 static struct dst_entry * 1300 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family) 1301 { 1302 struct dst_entry *x; 1303 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1304 if (unlikely(afinfo == NULL)) 1305 return ERR_PTR(-EINVAL); 1306 x = afinfo->find_bundle(fl, policy); 1307 xfrm_policy_put_afinfo(afinfo); 1308 return x; 1309 } 1310 1311 static inline int xfrm_get_tos(struct flowi *fl, int family) 1312 { 1313 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1314 int tos; 1315 1316 if (!afinfo) 1317 return -EINVAL; 1318 1319 tos = afinfo->get_tos(fl); 1320 1321 xfrm_policy_put_afinfo(afinfo); 1322 1323 return tos; 1324 } 1325 1326 static inline struct xfrm_dst *xfrm_alloc_dst(int family) 1327 { 1328 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1329 struct xfrm_dst *xdst; 1330 1331 if (!afinfo) 1332 return ERR_PTR(-EINVAL); 1333 1334 xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS); 1335 1336 xfrm_policy_put_afinfo(afinfo); 1337 1338 return xdst; 1339 } 1340 1341 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 1342 int nfheader_len) 1343 { 1344 struct xfrm_policy_afinfo *afinfo = 1345 xfrm_policy_get_afinfo(dst->ops->family); 1346 int err; 1347 1348 if (!afinfo) 1349 return -EINVAL; 1350 1351 err = afinfo->init_path(path, dst, nfheader_len); 1352 1353 xfrm_policy_put_afinfo(afinfo); 1354 1355 return err; 1356 } 1357 1358 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) 1359 { 1360 struct xfrm_policy_afinfo *afinfo = 1361 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1362 int err; 1363 1364 if (!afinfo) 1365 return -EINVAL; 1366 1367 err = afinfo->fill_dst(xdst, dev); 1368 1369 xfrm_policy_put_afinfo(afinfo); 1370 1371 return err; 1372 } 1373 1374 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 1375 * all the metrics... Shortly, bundle a bundle. 1376 */ 1377 1378 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 1379 struct xfrm_state **xfrm, int nx, 1380 struct flowi *fl, 1381 struct dst_entry *dst) 1382 { 1383 unsigned long now = jiffies; 1384 struct net_device *dev; 1385 struct dst_entry *dst_prev = NULL; 1386 struct dst_entry *dst0 = NULL; 1387 int i = 0; 1388 int err; 1389 int header_len = 0; 1390 int nfheader_len = 0; 1391 int trailer_len = 0; 1392 int tos; 1393 int family = policy->selector.family; 1394 xfrm_address_t saddr, daddr; 1395 1396 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 1397 1398 tos = xfrm_get_tos(fl, family); 1399 err = tos; 1400 if (tos < 0) 1401 goto put_states; 1402 1403 dst_hold(dst); 1404 1405 for (; i < nx; i++) { 1406 struct xfrm_dst *xdst = xfrm_alloc_dst(family); 1407 struct dst_entry *dst1 = &xdst->u.dst; 1408 1409 err = PTR_ERR(xdst); 1410 if (IS_ERR(xdst)) { 1411 dst_release(dst); 1412 goto put_states; 1413 } 1414 1415 if (!dst_prev) 1416 dst0 = dst1; 1417 else { 1418 dst_prev->child = dst_clone(dst1); 1419 dst1->flags |= DST_NOHASH; 1420 } 1421 1422 xdst->route = dst; 1423 memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics)); 1424 1425 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1426 family = xfrm[i]->props.family; 1427 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, 1428 family); 1429 err = PTR_ERR(dst); 1430 if (IS_ERR(dst)) 1431 goto put_states; 1432 } else 1433 dst_hold(dst); 1434 1435 dst1->xfrm = xfrm[i]; 1436 xdst->genid = xfrm[i]->genid; 1437 1438 dst1->obsolete = -1; 1439 dst1->flags |= DST_HOST; 1440 dst1->lastuse = now; 1441 1442 dst1->input = dst_discard; 1443 dst1->output = xfrm[i]->outer_mode->afinfo->output; 1444 1445 dst1->next = dst_prev; 1446 dst_prev = dst1; 1447 1448 header_len += xfrm[i]->props.header_len; 1449 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 1450 nfheader_len += xfrm[i]->props.header_len; 1451 trailer_len += xfrm[i]->props.trailer_len; 1452 } 1453 1454 dst_prev->child = dst; 1455 dst0->path = dst; 1456 1457 err = -ENODEV; 1458 dev = dst->dev; 1459 if (!dev) 1460 goto free_dst; 1461 1462 /* Copy neighbout for reachability confirmation */ 1463 dst0->neighbour = neigh_clone(dst->neighbour); 1464 1465 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1466 xfrm_init_pmtu(dst_prev); 1467 1468 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1469 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1470 1471 err = xfrm_fill_dst(xdst, dev); 1472 if (err) 1473 goto free_dst; 1474 1475 dst_prev->header_len = header_len; 1476 dst_prev->trailer_len = trailer_len; 1477 header_len -= xdst->u.dst.xfrm->props.header_len; 1478 trailer_len -= xdst->u.dst.xfrm->props.trailer_len; 1479 } 1480 1481 out: 1482 return dst0; 1483 1484 put_states: 1485 for (; i < nx; i++) 1486 xfrm_state_put(xfrm[i]); 1487 free_dst: 1488 if (dst0) 1489 dst_free(dst0); 1490 dst0 = ERR_PTR(err); 1491 goto out; 1492 } 1493 1494 static int inline 1495 xfrm_dst_alloc_copy(void **target, void *src, int size) 1496 { 1497 if (!*target) { 1498 *target = kmalloc(size, GFP_ATOMIC); 1499 if (!*target) 1500 return -ENOMEM; 1501 } 1502 memcpy(*target, src, size); 1503 return 0; 1504 } 1505 1506 static int inline 1507 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel) 1508 { 1509 #ifdef CONFIG_XFRM_SUB_POLICY 1510 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1511 return xfrm_dst_alloc_copy((void **)&(xdst->partner), 1512 sel, sizeof(*sel)); 1513 #else 1514 return 0; 1515 #endif 1516 } 1517 1518 static int inline 1519 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl) 1520 { 1521 #ifdef CONFIG_XFRM_SUB_POLICY 1522 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1523 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); 1524 #else 1525 return 0; 1526 #endif 1527 } 1528 1529 static int stale_bundle(struct dst_entry *dst); 1530 1531 /* Main function: finds/creates a bundle for given flow. 1532 * 1533 * At the moment we eat a raw IP route. Mostly to speed up lookups 1534 * on interfaces with disabled IPsec. 1535 */ 1536 int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, 1537 struct sock *sk, int flags) 1538 { 1539 struct xfrm_policy *policy; 1540 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1541 int npols; 1542 int pol_dead; 1543 int xfrm_nr; 1544 int pi; 1545 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 1546 struct dst_entry *dst, *dst_orig = *dst_p; 1547 int nx = 0; 1548 int err; 1549 u32 genid; 1550 u16 family; 1551 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1552 1553 restart: 1554 genid = atomic_read(&flow_cache_genid); 1555 policy = NULL; 1556 for (pi = 0; pi < ARRAY_SIZE(pols); pi++) 1557 pols[pi] = NULL; 1558 npols = 0; 1559 pol_dead = 0; 1560 xfrm_nr = 0; 1561 1562 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 1563 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1564 err = PTR_ERR(policy); 1565 if (IS_ERR(policy)) { 1566 XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR); 1567 goto dropdst; 1568 } 1569 } 1570 1571 if (!policy) { 1572 /* To accelerate a bit... */ 1573 if ((dst_orig->flags & DST_NOXFRM) || 1574 !xfrm_policy_count[XFRM_POLICY_OUT]) 1575 goto nopol; 1576 1577 policy = flow_cache_lookup(fl, dst_orig->ops->family, 1578 dir, xfrm_policy_lookup); 1579 err = PTR_ERR(policy); 1580 if (IS_ERR(policy)) { 1581 XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR); 1582 goto dropdst; 1583 } 1584 } 1585 1586 if (!policy) 1587 goto nopol; 1588 1589 family = dst_orig->ops->family; 1590 pols[0] = policy; 1591 npols ++; 1592 xfrm_nr += pols[0]->xfrm_nr; 1593 1594 err = -ENOENT; 1595 if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP)) 1596 goto error; 1597 1598 policy->curlft.use_time = get_seconds(); 1599 1600 switch (policy->action) { 1601 default: 1602 case XFRM_POLICY_BLOCK: 1603 /* Prohibit the flow */ 1604 XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK); 1605 err = -EPERM; 1606 goto error; 1607 1608 case XFRM_POLICY_ALLOW: 1609 #ifndef CONFIG_XFRM_SUB_POLICY 1610 if (policy->xfrm_nr == 0) { 1611 /* Flow passes not transformed. */ 1612 xfrm_pol_put(policy); 1613 return 0; 1614 } 1615 #endif 1616 1617 /* Try to find matching bundle. 1618 * 1619 * LATER: help from flow cache. It is optional, this 1620 * is required only for output policy. 1621 */ 1622 dst = xfrm_find_bundle(fl, policy, family); 1623 if (IS_ERR(dst)) { 1624 XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1625 err = PTR_ERR(dst); 1626 goto error; 1627 } 1628 1629 if (dst) 1630 break; 1631 1632 #ifdef CONFIG_XFRM_SUB_POLICY 1633 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 1634 pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, 1635 fl, family, 1636 XFRM_POLICY_OUT); 1637 if (pols[1]) { 1638 if (IS_ERR(pols[1])) { 1639 XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR); 1640 err = PTR_ERR(pols[1]); 1641 goto error; 1642 } 1643 if (pols[1]->action == XFRM_POLICY_BLOCK) { 1644 XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK); 1645 err = -EPERM; 1646 goto error; 1647 } 1648 npols ++; 1649 xfrm_nr += pols[1]->xfrm_nr; 1650 } 1651 } 1652 1653 /* 1654 * Because neither flowi nor bundle information knows about 1655 * transformation template size. On more than one policy usage 1656 * we can realize whether all of them is bypass or not after 1657 * they are searched. See above not-transformed bypass 1658 * is surrounded by non-sub policy configuration, too. 1659 */ 1660 if (xfrm_nr == 0) { 1661 /* Flow passes not transformed. */ 1662 xfrm_pols_put(pols, npols); 1663 return 0; 1664 } 1665 1666 #endif 1667 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family); 1668 1669 if (unlikely(nx<0)) { 1670 err = nx; 1671 if (err == -EAGAIN && sysctl_xfrm_larval_drop) { 1672 /* EREMOTE tells the caller to generate 1673 * a one-shot blackhole route. 1674 */ 1675 XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES); 1676 xfrm_pol_put(policy); 1677 return -EREMOTE; 1678 } 1679 if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) { 1680 DECLARE_WAITQUEUE(wait, current); 1681 1682 add_wait_queue(&km_waitq, &wait); 1683 set_current_state(TASK_INTERRUPTIBLE); 1684 schedule(); 1685 set_current_state(TASK_RUNNING); 1686 remove_wait_queue(&km_waitq, &wait); 1687 1688 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family); 1689 1690 if (nx == -EAGAIN && signal_pending(current)) { 1691 XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES); 1692 err = -ERESTART; 1693 goto error; 1694 } 1695 if (nx == -EAGAIN || 1696 genid != atomic_read(&flow_cache_genid)) { 1697 xfrm_pols_put(pols, npols); 1698 goto restart; 1699 } 1700 err = nx; 1701 } 1702 if (err < 0) { 1703 XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES); 1704 goto error; 1705 } 1706 } 1707 if (nx == 0) { 1708 /* Flow passes not transformed. */ 1709 xfrm_pols_put(pols, npols); 1710 return 0; 1711 } 1712 1713 dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig); 1714 err = PTR_ERR(dst); 1715 if (IS_ERR(dst)) { 1716 XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLEGENERROR); 1717 goto error; 1718 } 1719 1720 for (pi = 0; pi < npols; pi++) { 1721 read_lock_bh(&pols[pi]->lock); 1722 pol_dead |= pols[pi]->dead; 1723 read_unlock_bh(&pols[pi]->lock); 1724 } 1725 1726 write_lock_bh(&policy->lock); 1727 if (unlikely(pol_dead || stale_bundle(dst))) { 1728 /* Wow! While we worked on resolving, this 1729 * policy has gone. Retry. It is not paranoia, 1730 * we just cannot enlist new bundle to dead object. 1731 * We can't enlist stable bundles either. 1732 */ 1733 write_unlock_bh(&policy->lock); 1734 if (dst) 1735 dst_free(dst); 1736 1737 if (pol_dead) 1738 XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLDEAD); 1739 else 1740 XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1741 err = -EHOSTUNREACH; 1742 goto error; 1743 } 1744 1745 if (npols > 1) 1746 err = xfrm_dst_update_parent(dst, &pols[1]->selector); 1747 else 1748 err = xfrm_dst_update_origin(dst, fl); 1749 if (unlikely(err)) { 1750 write_unlock_bh(&policy->lock); 1751 if (dst) 1752 dst_free(dst); 1753 XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1754 goto error; 1755 } 1756 1757 dst->next = policy->bundles; 1758 policy->bundles = dst; 1759 dst_hold(dst); 1760 write_unlock_bh(&policy->lock); 1761 } 1762 *dst_p = dst; 1763 dst_release(dst_orig); 1764 xfrm_pols_put(pols, npols); 1765 return 0; 1766 1767 error: 1768 xfrm_pols_put(pols, npols); 1769 dropdst: 1770 dst_release(dst_orig); 1771 *dst_p = NULL; 1772 return err; 1773 1774 nopol: 1775 err = -ENOENT; 1776 if (flags & XFRM_LOOKUP_ICMP) 1777 goto dropdst; 1778 return 0; 1779 } 1780 EXPORT_SYMBOL(__xfrm_lookup); 1781 1782 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, 1783 struct sock *sk, int flags) 1784 { 1785 int err = __xfrm_lookup(dst_p, fl, sk, flags); 1786 1787 if (err == -EREMOTE) { 1788 dst_release(*dst_p); 1789 *dst_p = NULL; 1790 err = -EAGAIN; 1791 } 1792 1793 return err; 1794 } 1795 EXPORT_SYMBOL(xfrm_lookup); 1796 1797 static inline int 1798 xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl) 1799 { 1800 struct xfrm_state *x; 1801 1802 if (!skb->sp || idx < 0 || idx >= skb->sp->len) 1803 return 0; 1804 x = skb->sp->xvec[idx]; 1805 if (!x->type->reject) 1806 return 0; 1807 return x->type->reject(x, skb, fl); 1808 } 1809 1810 /* When skb is transformed back to its "native" form, we have to 1811 * check policy restrictions. At the moment we make this in maximally 1812 * stupid way. Shame on me. :-) Of course, connected sockets must 1813 * have policy cached at them. 1814 */ 1815 1816 static inline int 1817 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, 1818 unsigned short family) 1819 { 1820 if (xfrm_state_kern(x)) 1821 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 1822 return x->id.proto == tmpl->id.proto && 1823 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1824 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1825 x->props.mode == tmpl->mode && 1826 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 1827 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 1828 !(x->props.mode != XFRM_MODE_TRANSPORT && 1829 xfrm_state_addr_cmp(tmpl, x, family)); 1830 } 1831 1832 /* 1833 * 0 or more than 0 is returned when validation is succeeded (either bypass 1834 * because of optional transport mode, or next index of the mathced secpath 1835 * state with the template. 1836 * -1 is returned when no matching template is found. 1837 * Otherwise "-2 - errored_index" is returned. 1838 */ 1839 static inline int 1840 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start, 1841 unsigned short family) 1842 { 1843 int idx = start; 1844 1845 if (tmpl->optional) { 1846 if (tmpl->mode == XFRM_MODE_TRANSPORT) 1847 return start; 1848 } else 1849 start = -1; 1850 for (; idx < sp->len; idx++) { 1851 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 1852 return ++idx; 1853 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 1854 if (start == -1) 1855 start = -2-idx; 1856 break; 1857 } 1858 } 1859 return start; 1860 } 1861 1862 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 1863 unsigned int family, int reverse) 1864 { 1865 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1866 int err; 1867 1868 if (unlikely(afinfo == NULL)) 1869 return -EAFNOSUPPORT; 1870 1871 afinfo->decode_session(skb, fl, reverse); 1872 err = security_xfrm_decode_session(skb, &fl->secid); 1873 xfrm_policy_put_afinfo(afinfo); 1874 return err; 1875 } 1876 EXPORT_SYMBOL(__xfrm_decode_session); 1877 1878 static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp) 1879 { 1880 for (; k < sp->len; k++) { 1881 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 1882 *idxp = k; 1883 return 1; 1884 } 1885 } 1886 1887 return 0; 1888 } 1889 1890 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 1891 unsigned short family) 1892 { 1893 struct xfrm_policy *pol; 1894 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1895 int npols = 0; 1896 int xfrm_nr; 1897 int pi; 1898 int reverse; 1899 struct flowi fl; 1900 u8 fl_dir; 1901 int xerr_idx = -1; 1902 1903 reverse = dir & ~XFRM_POLICY_MASK; 1904 dir &= XFRM_POLICY_MASK; 1905 fl_dir = policy_to_flow_dir(dir); 1906 1907 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 1908 XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR); 1909 return 0; 1910 } 1911 1912 nf_nat_decode_session(skb, &fl, family); 1913 1914 /* First, check used SA against their selectors. */ 1915 if (skb->sp) { 1916 int i; 1917 1918 for (i=skb->sp->len-1; i>=0; i--) { 1919 struct xfrm_state *x = skb->sp->xvec[i]; 1920 if (!xfrm_selector_match(&x->sel, &fl, family)) { 1921 XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEMISMATCH); 1922 return 0; 1923 } 1924 } 1925 } 1926 1927 pol = NULL; 1928 if (sk && sk->sk_policy[dir]) { 1929 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 1930 if (IS_ERR(pol)) { 1931 XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR); 1932 return 0; 1933 } 1934 } 1935 1936 if (!pol) 1937 pol = flow_cache_lookup(&fl, family, fl_dir, 1938 xfrm_policy_lookup); 1939 1940 if (IS_ERR(pol)) { 1941 XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR); 1942 return 0; 1943 } 1944 1945 if (!pol) { 1946 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { 1947 xfrm_secpath_reject(xerr_idx, skb, &fl); 1948 XFRM_INC_STATS(LINUX_MIB_XFRMINNOPOLS); 1949 return 0; 1950 } 1951 return 1; 1952 } 1953 1954 pol->curlft.use_time = get_seconds(); 1955 1956 pols[0] = pol; 1957 npols ++; 1958 #ifdef CONFIG_XFRM_SUB_POLICY 1959 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 1960 pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, 1961 &fl, family, 1962 XFRM_POLICY_IN); 1963 if (pols[1]) { 1964 if (IS_ERR(pols[1])) { 1965 XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR); 1966 return 0; 1967 } 1968 pols[1]->curlft.use_time = get_seconds(); 1969 npols ++; 1970 } 1971 } 1972 #endif 1973 1974 if (pol->action == XFRM_POLICY_ALLOW) { 1975 struct sec_path *sp; 1976 static struct sec_path dummy; 1977 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 1978 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 1979 struct xfrm_tmpl **tpp = tp; 1980 int ti = 0; 1981 int i, k; 1982 1983 if ((sp = skb->sp) == NULL) 1984 sp = &dummy; 1985 1986 for (pi = 0; pi < npols; pi++) { 1987 if (pols[pi] != pol && 1988 pols[pi]->action != XFRM_POLICY_ALLOW) { 1989 XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK); 1990 goto reject; 1991 } 1992 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 1993 XFRM_INC_STATS(LINUX_MIB_XFRMINBUFFERERROR); 1994 goto reject_error; 1995 } 1996 for (i = 0; i < pols[pi]->xfrm_nr; i++) 1997 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 1998 } 1999 xfrm_nr = ti; 2000 if (npols > 1) { 2001 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 2002 tpp = stp; 2003 } 2004 2005 /* For each tunnel xfrm, find the first matching tmpl. 2006 * For each tmpl before that, find corresponding xfrm. 2007 * Order is _important_. Later we will implement 2008 * some barriers, but at the moment barriers 2009 * are implied between each two transformations. 2010 */ 2011 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 2012 k = xfrm_policy_ok(tpp[i], sp, k, family); 2013 if (k < 0) { 2014 if (k < -1) 2015 /* "-2 - errored_index" returned */ 2016 xerr_idx = -(2+k); 2017 XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH); 2018 goto reject; 2019 } 2020 } 2021 2022 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 2023 XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH); 2024 goto reject; 2025 } 2026 2027 xfrm_pols_put(pols, npols); 2028 return 1; 2029 } 2030 XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK); 2031 2032 reject: 2033 xfrm_secpath_reject(xerr_idx, skb, &fl); 2034 reject_error: 2035 xfrm_pols_put(pols, npols); 2036 return 0; 2037 } 2038 EXPORT_SYMBOL(__xfrm_policy_check); 2039 2040 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 2041 { 2042 struct flowi fl; 2043 2044 if (xfrm_decode_session(skb, &fl, family) < 0) { 2045 /* XXX: we should have something like FWDHDRERROR here. */ 2046 XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR); 2047 return 0; 2048 } 2049 2050 return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0; 2051 } 2052 EXPORT_SYMBOL(__xfrm_route_forward); 2053 2054 /* Optimize later using cookies and generation ids. */ 2055 2056 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 2057 { 2058 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 2059 * to "-1" to force all XFRM destinations to get validated by 2060 * dst_ops->check on every use. We do this because when a 2061 * normal route referenced by an XFRM dst is obsoleted we do 2062 * not go looking around for all parent referencing XFRM dsts 2063 * so that we can invalidate them. It is just too much work. 2064 * Instead we make the checks here on every use. For example: 2065 * 2066 * XFRM dst A --> IPv4 dst X 2067 * 2068 * X is the "xdst->route" of A (X is also the "dst->path" of A 2069 * in this example). If X is marked obsolete, "A" will not 2070 * notice. That's what we are validating here via the 2071 * stale_bundle() check. 2072 * 2073 * When a policy's bundle is pruned, we dst_free() the XFRM 2074 * dst which causes it's ->obsolete field to be set to a 2075 * positive non-zero integer. If an XFRM dst has been pruned 2076 * like this, we want to force a new route lookup. 2077 */ 2078 if (dst->obsolete < 0 && !stale_bundle(dst)) 2079 return dst; 2080 2081 return NULL; 2082 } 2083 2084 static int stale_bundle(struct dst_entry *dst) 2085 { 2086 return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0); 2087 } 2088 2089 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2090 { 2091 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2092 dst->dev = dev_net(dev)->loopback_dev; 2093 dev_hold(dst->dev); 2094 dev_put(dev); 2095 } 2096 } 2097 EXPORT_SYMBOL(xfrm_dst_ifdown); 2098 2099 static void xfrm_link_failure(struct sk_buff *skb) 2100 { 2101 /* Impossible. Such dst must be popped before reaches point of failure. */ 2102 return; 2103 } 2104 2105 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 2106 { 2107 if (dst) { 2108 if (dst->obsolete) { 2109 dst_release(dst); 2110 dst = NULL; 2111 } 2112 } 2113 return dst; 2114 } 2115 2116 static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p) 2117 { 2118 struct dst_entry *dst, **dstp; 2119 2120 write_lock(&pol->lock); 2121 dstp = &pol->bundles; 2122 while ((dst=*dstp) != NULL) { 2123 if (func(dst)) { 2124 *dstp = dst->next; 2125 dst->next = *gc_list_p; 2126 *gc_list_p = dst; 2127 } else { 2128 dstp = &dst->next; 2129 } 2130 } 2131 write_unlock(&pol->lock); 2132 } 2133 2134 static void xfrm_prune_bundles(int (*func)(struct dst_entry *)) 2135 { 2136 struct dst_entry *gc_list = NULL; 2137 int dir; 2138 2139 read_lock_bh(&xfrm_policy_lock); 2140 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2141 struct xfrm_policy *pol; 2142 struct hlist_node *entry; 2143 struct hlist_head *table; 2144 int i; 2145 2146 hlist_for_each_entry(pol, entry, 2147 &xfrm_policy_inexact[dir], bydst) 2148 prune_one_bundle(pol, func, &gc_list); 2149 2150 table = xfrm_policy_bydst[dir].table; 2151 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) { 2152 hlist_for_each_entry(pol, entry, table + i, bydst) 2153 prune_one_bundle(pol, func, &gc_list); 2154 } 2155 } 2156 read_unlock_bh(&xfrm_policy_lock); 2157 2158 while (gc_list) { 2159 struct dst_entry *dst = gc_list; 2160 gc_list = dst->next; 2161 dst_free(dst); 2162 } 2163 } 2164 2165 static int unused_bundle(struct dst_entry *dst) 2166 { 2167 return !atomic_read(&dst->__refcnt); 2168 } 2169 2170 static void __xfrm_garbage_collect(void) 2171 { 2172 xfrm_prune_bundles(unused_bundle); 2173 } 2174 2175 static int xfrm_flush_bundles(void) 2176 { 2177 xfrm_prune_bundles(stale_bundle); 2178 return 0; 2179 } 2180 2181 static void xfrm_init_pmtu(struct dst_entry *dst) 2182 { 2183 do { 2184 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2185 u32 pmtu, route_mtu_cached; 2186 2187 pmtu = dst_mtu(dst->child); 2188 xdst->child_mtu_cached = pmtu; 2189 2190 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 2191 2192 route_mtu_cached = dst_mtu(xdst->route); 2193 xdst->route_mtu_cached = route_mtu_cached; 2194 2195 if (pmtu > route_mtu_cached) 2196 pmtu = route_mtu_cached; 2197 2198 dst->metrics[RTAX_MTU-1] = pmtu; 2199 } while ((dst = dst->next)); 2200 } 2201 2202 /* Check that the bundle accepts the flow and its components are 2203 * still valid. 2204 */ 2205 2206 int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, 2207 struct flowi *fl, int family, int strict) 2208 { 2209 struct dst_entry *dst = &first->u.dst; 2210 struct xfrm_dst *last; 2211 u32 mtu; 2212 2213 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 2214 (dst->dev && !netif_running(dst->dev))) 2215 return 0; 2216 #ifdef CONFIG_XFRM_SUB_POLICY 2217 if (fl) { 2218 if (first->origin && !flow_cache_uli_match(first->origin, fl)) 2219 return 0; 2220 if (first->partner && 2221 !xfrm_selector_match(first->partner, fl, family)) 2222 return 0; 2223 } 2224 #endif 2225 2226 last = NULL; 2227 2228 do { 2229 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2230 2231 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) 2232 return 0; 2233 if (fl && pol && 2234 !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl)) 2235 return 0; 2236 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2237 return 0; 2238 if (xdst->genid != dst->xfrm->genid) 2239 return 0; 2240 2241 if (strict && fl && 2242 !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) && 2243 !xfrm_state_addr_flow_check(dst->xfrm, fl, family)) 2244 return 0; 2245 2246 mtu = dst_mtu(dst->child); 2247 if (xdst->child_mtu_cached != mtu) { 2248 last = xdst; 2249 xdst->child_mtu_cached = mtu; 2250 } 2251 2252 if (!dst_check(xdst->route, xdst->route_cookie)) 2253 return 0; 2254 mtu = dst_mtu(xdst->route); 2255 if (xdst->route_mtu_cached != mtu) { 2256 last = xdst; 2257 xdst->route_mtu_cached = mtu; 2258 } 2259 2260 dst = dst->child; 2261 } while (dst->xfrm); 2262 2263 if (likely(!last)) 2264 return 1; 2265 2266 mtu = last->child_mtu_cached; 2267 for (;;) { 2268 dst = &last->u.dst; 2269 2270 mtu = xfrm_state_mtu(dst->xfrm, mtu); 2271 if (mtu > last->route_mtu_cached) 2272 mtu = last->route_mtu_cached; 2273 dst->metrics[RTAX_MTU-1] = mtu; 2274 2275 if (last == first) 2276 break; 2277 2278 last = (struct xfrm_dst *)last->u.dst.next; 2279 last->child_mtu_cached = mtu; 2280 } 2281 2282 return 1; 2283 } 2284 2285 EXPORT_SYMBOL(xfrm_bundle_ok); 2286 2287 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2288 { 2289 int err = 0; 2290 if (unlikely(afinfo == NULL)) 2291 return -EINVAL; 2292 if (unlikely(afinfo->family >= NPROTO)) 2293 return -EAFNOSUPPORT; 2294 write_lock_bh(&xfrm_policy_afinfo_lock); 2295 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 2296 err = -ENOBUFS; 2297 else { 2298 struct dst_ops *dst_ops = afinfo->dst_ops; 2299 if (likely(dst_ops->kmem_cachep == NULL)) 2300 dst_ops->kmem_cachep = xfrm_dst_cache; 2301 if (likely(dst_ops->check == NULL)) 2302 dst_ops->check = xfrm_dst_check; 2303 if (likely(dst_ops->negative_advice == NULL)) 2304 dst_ops->negative_advice = xfrm_negative_advice; 2305 if (likely(dst_ops->link_failure == NULL)) 2306 dst_ops->link_failure = xfrm_link_failure; 2307 if (likely(afinfo->garbage_collect == NULL)) 2308 afinfo->garbage_collect = __xfrm_garbage_collect; 2309 xfrm_policy_afinfo[afinfo->family] = afinfo; 2310 } 2311 write_unlock_bh(&xfrm_policy_afinfo_lock); 2312 return err; 2313 } 2314 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2315 2316 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 2317 { 2318 int err = 0; 2319 if (unlikely(afinfo == NULL)) 2320 return -EINVAL; 2321 if (unlikely(afinfo->family >= NPROTO)) 2322 return -EAFNOSUPPORT; 2323 write_lock_bh(&xfrm_policy_afinfo_lock); 2324 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 2325 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 2326 err = -EINVAL; 2327 else { 2328 struct dst_ops *dst_ops = afinfo->dst_ops; 2329 xfrm_policy_afinfo[afinfo->family] = NULL; 2330 dst_ops->kmem_cachep = NULL; 2331 dst_ops->check = NULL; 2332 dst_ops->negative_advice = NULL; 2333 dst_ops->link_failure = NULL; 2334 afinfo->garbage_collect = NULL; 2335 } 2336 } 2337 write_unlock_bh(&xfrm_policy_afinfo_lock); 2338 return err; 2339 } 2340 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2341 2342 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2343 { 2344 struct xfrm_policy_afinfo *afinfo; 2345 if (unlikely(family >= NPROTO)) 2346 return NULL; 2347 read_lock(&xfrm_policy_afinfo_lock); 2348 afinfo = xfrm_policy_afinfo[family]; 2349 if (unlikely(!afinfo)) 2350 read_unlock(&xfrm_policy_afinfo_lock); 2351 return afinfo; 2352 } 2353 2354 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 2355 { 2356 read_unlock(&xfrm_policy_afinfo_lock); 2357 } 2358 2359 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2360 { 2361 struct net_device *dev = ptr; 2362 2363 if (!net_eq(dev_net(dev), &init_net)) 2364 return NOTIFY_DONE; 2365 2366 switch (event) { 2367 case NETDEV_DOWN: 2368 xfrm_flush_bundles(); 2369 } 2370 return NOTIFY_DONE; 2371 } 2372 2373 static struct notifier_block xfrm_dev_notifier = { 2374 xfrm_dev_event, 2375 NULL, 2376 0 2377 }; 2378 2379 #ifdef CONFIG_XFRM_STATISTICS 2380 static int __init xfrm_statistics_init(void) 2381 { 2382 if (snmp_mib_init((void **)xfrm_statistics, 2383 sizeof(struct linux_xfrm_mib)) < 0) 2384 return -ENOMEM; 2385 return 0; 2386 } 2387 #endif 2388 2389 static void __init xfrm_policy_init(void) 2390 { 2391 unsigned int hmask, sz; 2392 int dir; 2393 2394 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2395 sizeof(struct xfrm_dst), 2396 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2397 NULL); 2398 2399 hmask = 8 - 1; 2400 sz = (hmask+1) * sizeof(struct hlist_head); 2401 2402 xfrm_policy_byidx = xfrm_hash_alloc(sz); 2403 xfrm_idx_hmask = hmask; 2404 if (!xfrm_policy_byidx) 2405 panic("XFRM: failed to allocate byidx hash\n"); 2406 2407 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2408 struct xfrm_policy_hash *htab; 2409 2410 INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]); 2411 2412 htab = &xfrm_policy_bydst[dir]; 2413 htab->table = xfrm_hash_alloc(sz); 2414 htab->hmask = hmask; 2415 if (!htab->table) 2416 panic("XFRM: failed to allocate bydst hash\n"); 2417 } 2418 2419 for (dir = 0; dir < XFRM_POLICY_TYPE_MAX; dir++) 2420 INIT_LIST_HEAD(&xfrm_policy_bytype[dir]); 2421 2422 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); 2423 register_netdevice_notifier(&xfrm_dev_notifier); 2424 } 2425 2426 void __init xfrm_init(void) 2427 { 2428 #ifdef CONFIG_XFRM_STATISTICS 2429 xfrm_statistics_init(); 2430 #endif 2431 xfrm_state_init(); 2432 xfrm_policy_init(); 2433 xfrm_input_init(); 2434 #ifdef CONFIG_XFRM_STATISTICS 2435 xfrm_proc_init(); 2436 #endif 2437 } 2438 2439 #ifdef CONFIG_AUDITSYSCALL 2440 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 2441 struct audit_buffer *audit_buf) 2442 { 2443 struct xfrm_sec_ctx *ctx = xp->security; 2444 struct xfrm_selector *sel = &xp->selector; 2445 2446 if (ctx) 2447 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 2448 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 2449 2450 switch(sel->family) { 2451 case AF_INET: 2452 audit_log_format(audit_buf, " src=" NIPQUAD_FMT, 2453 NIPQUAD(sel->saddr.a4)); 2454 if (sel->prefixlen_s != 32) 2455 audit_log_format(audit_buf, " src_prefixlen=%d", 2456 sel->prefixlen_s); 2457 audit_log_format(audit_buf, " dst=" NIPQUAD_FMT, 2458 NIPQUAD(sel->daddr.a4)); 2459 if (sel->prefixlen_d != 32) 2460 audit_log_format(audit_buf, " dst_prefixlen=%d", 2461 sel->prefixlen_d); 2462 break; 2463 case AF_INET6: 2464 audit_log_format(audit_buf, " src=" NIP6_FMT, 2465 NIP6(*(struct in6_addr *)sel->saddr.a6)); 2466 if (sel->prefixlen_s != 128) 2467 audit_log_format(audit_buf, " src_prefixlen=%d", 2468 sel->prefixlen_s); 2469 audit_log_format(audit_buf, " dst=" NIP6_FMT, 2470 NIP6(*(struct in6_addr *)sel->daddr.a6)); 2471 if (sel->prefixlen_d != 128) 2472 audit_log_format(audit_buf, " dst_prefixlen=%d", 2473 sel->prefixlen_d); 2474 break; 2475 } 2476 } 2477 2478 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2479 uid_t auid, u32 sessionid, u32 secid) 2480 { 2481 struct audit_buffer *audit_buf; 2482 2483 audit_buf = xfrm_audit_start("SPD-add"); 2484 if (audit_buf == NULL) 2485 return; 2486 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2487 audit_log_format(audit_buf, " res=%u", result); 2488 xfrm_audit_common_policyinfo(xp, audit_buf); 2489 audit_log_end(audit_buf); 2490 } 2491 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2492 2493 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2494 uid_t auid, u32 sessionid, u32 secid) 2495 { 2496 struct audit_buffer *audit_buf; 2497 2498 audit_buf = xfrm_audit_start("SPD-delete"); 2499 if (audit_buf == NULL) 2500 return; 2501 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2502 audit_log_format(audit_buf, " res=%u", result); 2503 xfrm_audit_common_policyinfo(xp, audit_buf); 2504 audit_log_end(audit_buf); 2505 } 2506 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 2507 #endif 2508 2509 #ifdef CONFIG_XFRM_MIGRATE 2510 static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp, 2511 struct xfrm_selector *sel_tgt) 2512 { 2513 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 2514 if (sel_tgt->family == sel_cmp->family && 2515 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, 2516 sel_cmp->family) == 0 && 2517 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, 2518 sel_cmp->family) == 0 && 2519 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 2520 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 2521 return 1; 2522 } 2523 } else { 2524 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 2525 return 1; 2526 } 2527 } 2528 return 0; 2529 } 2530 2531 static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel, 2532 u8 dir, u8 type) 2533 { 2534 struct xfrm_policy *pol, *ret = NULL; 2535 struct hlist_node *entry; 2536 struct hlist_head *chain; 2537 u32 priority = ~0U; 2538 2539 read_lock_bh(&xfrm_policy_lock); 2540 chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir); 2541 hlist_for_each_entry(pol, entry, chain, bydst) { 2542 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2543 pol->type == type) { 2544 ret = pol; 2545 priority = ret->priority; 2546 break; 2547 } 2548 } 2549 chain = &xfrm_policy_inexact[dir]; 2550 hlist_for_each_entry(pol, entry, chain, bydst) { 2551 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2552 pol->type == type && 2553 pol->priority < priority) { 2554 ret = pol; 2555 break; 2556 } 2557 } 2558 2559 if (ret) 2560 xfrm_pol_hold(ret); 2561 2562 read_unlock_bh(&xfrm_policy_lock); 2563 2564 return ret; 2565 } 2566 2567 static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t) 2568 { 2569 int match = 0; 2570 2571 if (t->mode == m->mode && t->id.proto == m->proto && 2572 (m->reqid == 0 || t->reqid == m->reqid)) { 2573 switch (t->mode) { 2574 case XFRM_MODE_TUNNEL: 2575 case XFRM_MODE_BEET: 2576 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr, 2577 m->old_family) == 0 && 2578 xfrm_addr_cmp(&t->saddr, &m->old_saddr, 2579 m->old_family) == 0) { 2580 match = 1; 2581 } 2582 break; 2583 case XFRM_MODE_TRANSPORT: 2584 /* in case of transport mode, template does not store 2585 any IP addresses, hence we just compare mode and 2586 protocol */ 2587 match = 1; 2588 break; 2589 default: 2590 break; 2591 } 2592 } 2593 return match; 2594 } 2595 2596 /* update endpoint address(es) of template(s) */ 2597 static int xfrm_policy_migrate(struct xfrm_policy *pol, 2598 struct xfrm_migrate *m, int num_migrate) 2599 { 2600 struct xfrm_migrate *mp; 2601 struct dst_entry *dst; 2602 int i, j, n = 0; 2603 2604 write_lock_bh(&pol->lock); 2605 if (unlikely(pol->dead)) { 2606 /* target policy has been deleted */ 2607 write_unlock_bh(&pol->lock); 2608 return -ENOENT; 2609 } 2610 2611 for (i = 0; i < pol->xfrm_nr; i++) { 2612 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 2613 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 2614 continue; 2615 n++; 2616 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 2617 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 2618 continue; 2619 /* update endpoints */ 2620 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 2621 sizeof(pol->xfrm_vec[i].id.daddr)); 2622 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 2623 sizeof(pol->xfrm_vec[i].saddr)); 2624 pol->xfrm_vec[i].encap_family = mp->new_family; 2625 /* flush bundles */ 2626 while ((dst = pol->bundles) != NULL) { 2627 pol->bundles = dst->next; 2628 dst_free(dst); 2629 } 2630 } 2631 } 2632 2633 write_unlock_bh(&pol->lock); 2634 2635 if (!n) 2636 return -ENODATA; 2637 2638 return 0; 2639 } 2640 2641 static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate) 2642 { 2643 int i, j; 2644 2645 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 2646 return -EINVAL; 2647 2648 for (i = 0; i < num_migrate; i++) { 2649 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr, 2650 m[i].old_family) == 0) && 2651 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr, 2652 m[i].old_family) == 0)) 2653 return -EINVAL; 2654 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 2655 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 2656 return -EINVAL; 2657 2658 /* check if there is any duplicated entry */ 2659 for (j = i + 1; j < num_migrate; j++) { 2660 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 2661 sizeof(m[i].old_daddr)) && 2662 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 2663 sizeof(m[i].old_saddr)) && 2664 m[i].proto == m[j].proto && 2665 m[i].mode == m[j].mode && 2666 m[i].reqid == m[j].reqid && 2667 m[i].old_family == m[j].old_family) 2668 return -EINVAL; 2669 } 2670 } 2671 2672 return 0; 2673 } 2674 2675 int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type, 2676 struct xfrm_migrate *m, int num_migrate) 2677 { 2678 int i, err, nx_cur = 0, nx_new = 0; 2679 struct xfrm_policy *pol = NULL; 2680 struct xfrm_state *x, *xc; 2681 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 2682 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 2683 struct xfrm_migrate *mp; 2684 2685 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 2686 goto out; 2687 2688 /* Stage 1 - find policy */ 2689 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) { 2690 err = -ENOENT; 2691 goto out; 2692 } 2693 2694 /* Stage 2 - find and update state(s) */ 2695 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 2696 if ((x = xfrm_migrate_state_find(mp))) { 2697 x_cur[nx_cur] = x; 2698 nx_cur++; 2699 if ((xc = xfrm_state_migrate(x, mp))) { 2700 x_new[nx_new] = xc; 2701 nx_new++; 2702 } else { 2703 err = -ENODATA; 2704 goto restore_state; 2705 } 2706 } 2707 } 2708 2709 /* Stage 3 - update policy */ 2710 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 2711 goto restore_state; 2712 2713 /* Stage 4 - delete old state(s) */ 2714 if (nx_cur) { 2715 xfrm_states_put(x_cur, nx_cur); 2716 xfrm_states_delete(x_cur, nx_cur); 2717 } 2718 2719 /* Stage 5 - announce */ 2720 km_migrate(sel, dir, type, m, num_migrate); 2721 2722 xfrm_pol_put(pol); 2723 2724 return 0; 2725 out: 2726 return err; 2727 2728 restore_state: 2729 if (pol) 2730 xfrm_pol_put(pol); 2731 if (nx_cur) 2732 xfrm_states_put(x_cur, nx_cur); 2733 if (nx_new) 2734 xfrm_states_delete(x_new, nx_new); 2735 2736 return err; 2737 } 2738 EXPORT_SYMBOL(xfrm_migrate); 2739 #endif 2740