1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/kmod.h> 19 #include <linux/list.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/module.h> 26 #include <linux/cache.h> 27 #include <linux/audit.h> 28 #include <net/dst.h> 29 #include <net/flow.h> 30 #include <net/xfrm.h> 31 #include <net/ip.h> 32 #ifdef CONFIG_XFRM_STATISTICS 33 #include <net/snmp.h> 34 #endif 35 36 #include "xfrm_hash.h" 37 38 DEFINE_MUTEX(xfrm_cfg_mutex); 39 EXPORT_SYMBOL(xfrm_cfg_mutex); 40 41 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); 42 static struct dst_entry *xfrm_policy_sk_bundles; 43 static DEFINE_RWLOCK(xfrm_policy_lock); 44 45 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); 46 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] 47 __read_mostly; 48 49 static struct kmem_cache *xfrm_dst_cache __read_mostly; 50 51 static void xfrm_init_pmtu(struct dst_entry *dst); 52 static int stale_bundle(struct dst_entry *dst); 53 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 54 55 56 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 57 int dir); 58 59 static inline bool 60 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 61 { 62 const struct flowi4 *fl4 = &fl->u.ip4; 63 64 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 65 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 66 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 67 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 68 (fl4->flowi4_proto == sel->proto || !sel->proto) && 69 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 70 } 71 72 static inline bool 73 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 74 { 75 const struct flowi6 *fl6 = &fl->u.ip6; 76 77 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 78 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 79 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 80 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 81 (fl6->flowi6_proto == sel->proto || !sel->proto) && 82 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 83 } 84 85 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 86 unsigned short family) 87 { 88 switch (family) { 89 case AF_INET: 90 return __xfrm4_selector_match(sel, fl); 91 case AF_INET6: 92 return __xfrm6_selector_match(sel, fl); 93 } 94 return false; 95 } 96 97 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 98 { 99 struct xfrm_policy_afinfo *afinfo; 100 101 if (unlikely(family >= NPROTO)) 102 return NULL; 103 rcu_read_lock(); 104 afinfo = rcu_dereference(xfrm_policy_afinfo[family]); 105 if (unlikely(!afinfo)) 106 rcu_read_unlock(); 107 return afinfo; 108 } 109 110 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 111 { 112 rcu_read_unlock(); 113 } 114 115 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, 116 const xfrm_address_t *saddr, 117 const xfrm_address_t *daddr, 118 int family) 119 { 120 struct xfrm_policy_afinfo *afinfo; 121 struct dst_entry *dst; 122 123 afinfo = xfrm_policy_get_afinfo(family); 124 if (unlikely(afinfo == NULL)) 125 return ERR_PTR(-EAFNOSUPPORT); 126 127 dst = afinfo->dst_lookup(net, tos, saddr, daddr); 128 129 xfrm_policy_put_afinfo(afinfo); 130 131 return dst; 132 } 133 134 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, 135 xfrm_address_t *prev_saddr, 136 xfrm_address_t *prev_daddr, 137 int family) 138 { 139 struct net *net = xs_net(x); 140 xfrm_address_t *saddr = &x->props.saddr; 141 xfrm_address_t *daddr = &x->id.daddr; 142 struct dst_entry *dst; 143 144 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 145 saddr = x->coaddr; 146 daddr = prev_daddr; 147 } 148 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 149 saddr = prev_saddr; 150 daddr = x->coaddr; 151 } 152 153 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family); 154 155 if (!IS_ERR(dst)) { 156 if (prev_saddr != saddr) 157 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 158 if (prev_daddr != daddr) 159 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 160 } 161 162 return dst; 163 } 164 165 static inline unsigned long make_jiffies(long secs) 166 { 167 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 168 return MAX_SCHEDULE_TIMEOUT-1; 169 else 170 return secs*HZ; 171 } 172 173 static void xfrm_policy_timer(unsigned long data) 174 { 175 struct xfrm_policy *xp = (struct xfrm_policy*)data; 176 unsigned long now = get_seconds(); 177 long next = LONG_MAX; 178 int warn = 0; 179 int dir; 180 181 read_lock(&xp->lock); 182 183 if (unlikely(xp->walk.dead)) 184 goto out; 185 186 dir = xfrm_policy_id2dir(xp->index); 187 188 if (xp->lft.hard_add_expires_seconds) { 189 long tmo = xp->lft.hard_add_expires_seconds + 190 xp->curlft.add_time - now; 191 if (tmo <= 0) 192 goto expired; 193 if (tmo < next) 194 next = tmo; 195 } 196 if (xp->lft.hard_use_expires_seconds) { 197 long tmo = xp->lft.hard_use_expires_seconds + 198 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 199 if (tmo <= 0) 200 goto expired; 201 if (tmo < next) 202 next = tmo; 203 } 204 if (xp->lft.soft_add_expires_seconds) { 205 long tmo = xp->lft.soft_add_expires_seconds + 206 xp->curlft.add_time - now; 207 if (tmo <= 0) { 208 warn = 1; 209 tmo = XFRM_KM_TIMEOUT; 210 } 211 if (tmo < next) 212 next = tmo; 213 } 214 if (xp->lft.soft_use_expires_seconds) { 215 long tmo = xp->lft.soft_use_expires_seconds + 216 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 217 if (tmo <= 0) { 218 warn = 1; 219 tmo = XFRM_KM_TIMEOUT; 220 } 221 if (tmo < next) 222 next = tmo; 223 } 224 225 if (warn) 226 km_policy_expired(xp, dir, 0, 0); 227 if (next != LONG_MAX && 228 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 229 xfrm_pol_hold(xp); 230 231 out: 232 read_unlock(&xp->lock); 233 xfrm_pol_put(xp); 234 return; 235 236 expired: 237 read_unlock(&xp->lock); 238 if (!xfrm_policy_delete(xp, dir)) 239 km_policy_expired(xp, dir, 1, 0); 240 xfrm_pol_put(xp); 241 } 242 243 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) 244 { 245 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 246 247 if (unlikely(pol->walk.dead)) 248 flo = NULL; 249 else 250 xfrm_pol_hold(pol); 251 252 return flo; 253 } 254 255 static int xfrm_policy_flo_check(struct flow_cache_object *flo) 256 { 257 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 258 259 return !pol->walk.dead; 260 } 261 262 static void xfrm_policy_flo_delete(struct flow_cache_object *flo) 263 { 264 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); 265 } 266 267 static const struct flow_cache_ops xfrm_policy_fc_ops = { 268 .get = xfrm_policy_flo_get, 269 .check = xfrm_policy_flo_check, 270 .delete = xfrm_policy_flo_delete, 271 }; 272 273 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 274 * SPD calls. 275 */ 276 277 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 278 { 279 struct xfrm_policy *policy; 280 281 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 282 283 if (policy) { 284 write_pnet(&policy->xp_net, net); 285 INIT_LIST_HEAD(&policy->walk.all); 286 INIT_HLIST_NODE(&policy->bydst); 287 INIT_HLIST_NODE(&policy->byidx); 288 rwlock_init(&policy->lock); 289 atomic_set(&policy->refcnt, 1); 290 setup_timer(&policy->timer, xfrm_policy_timer, 291 (unsigned long)policy); 292 policy->flo.ops = &xfrm_policy_fc_ops; 293 } 294 return policy; 295 } 296 EXPORT_SYMBOL(xfrm_policy_alloc); 297 298 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 299 300 void xfrm_policy_destroy(struct xfrm_policy *policy) 301 { 302 BUG_ON(!policy->walk.dead); 303 304 if (del_timer(&policy->timer)) 305 BUG(); 306 307 security_xfrm_policy_free(policy->security); 308 kfree(policy); 309 } 310 EXPORT_SYMBOL(xfrm_policy_destroy); 311 312 /* Rule must be locked. Release descentant resources, announce 313 * entry dead. The rule must be unlinked from lists to the moment. 314 */ 315 316 static void xfrm_policy_kill(struct xfrm_policy *policy) 317 { 318 policy->walk.dead = 1; 319 320 atomic_inc(&policy->genid); 321 322 if (del_timer(&policy->timer)) 323 xfrm_pol_put(policy); 324 325 xfrm_pol_put(policy); 326 } 327 328 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 329 330 static inline unsigned int idx_hash(struct net *net, u32 index) 331 { 332 return __idx_hash(index, net->xfrm.policy_idx_hmask); 333 } 334 335 static struct hlist_head *policy_hash_bysel(struct net *net, 336 const struct xfrm_selector *sel, 337 unsigned short family, int dir) 338 { 339 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 340 unsigned int hash = __sel_hash(sel, family, hmask); 341 342 return (hash == hmask + 1 ? 343 &net->xfrm.policy_inexact[dir] : 344 net->xfrm.policy_bydst[dir].table + hash); 345 } 346 347 static struct hlist_head *policy_hash_direct(struct net *net, 348 const xfrm_address_t *daddr, 349 const xfrm_address_t *saddr, 350 unsigned short family, int dir) 351 { 352 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 353 unsigned int hash = __addr_hash(daddr, saddr, family, hmask); 354 355 return net->xfrm.policy_bydst[dir].table + hash; 356 } 357 358 static void xfrm_dst_hash_transfer(struct hlist_head *list, 359 struct hlist_head *ndsttable, 360 unsigned int nhashmask) 361 { 362 struct hlist_node *entry, *tmp, *entry0 = NULL; 363 struct xfrm_policy *pol; 364 unsigned int h0 = 0; 365 366 redo: 367 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { 368 unsigned int h; 369 370 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 371 pol->family, nhashmask); 372 if (!entry0) { 373 hlist_del(entry); 374 hlist_add_head(&pol->bydst, ndsttable+h); 375 h0 = h; 376 } else { 377 if (h != h0) 378 continue; 379 hlist_del(entry); 380 hlist_add_after(entry0, &pol->bydst); 381 } 382 entry0 = entry; 383 } 384 if (!hlist_empty(list)) { 385 entry0 = NULL; 386 goto redo; 387 } 388 } 389 390 static void xfrm_idx_hash_transfer(struct hlist_head *list, 391 struct hlist_head *nidxtable, 392 unsigned int nhashmask) 393 { 394 struct hlist_node *entry, *tmp; 395 struct xfrm_policy *pol; 396 397 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { 398 unsigned int h; 399 400 h = __idx_hash(pol->index, nhashmask); 401 hlist_add_head(&pol->byidx, nidxtable+h); 402 } 403 } 404 405 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 406 { 407 return ((old_hmask + 1) << 1) - 1; 408 } 409 410 static void xfrm_bydst_resize(struct net *net, int dir) 411 { 412 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 413 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 414 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 415 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table; 416 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 417 int i; 418 419 if (!ndst) 420 return; 421 422 write_lock_bh(&xfrm_policy_lock); 423 424 for (i = hmask; i >= 0; i--) 425 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); 426 427 net->xfrm.policy_bydst[dir].table = ndst; 428 net->xfrm.policy_bydst[dir].hmask = nhashmask; 429 430 write_unlock_bh(&xfrm_policy_lock); 431 432 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 433 } 434 435 static void xfrm_byidx_resize(struct net *net, int total) 436 { 437 unsigned int hmask = net->xfrm.policy_idx_hmask; 438 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 439 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 440 struct hlist_head *oidx = net->xfrm.policy_byidx; 441 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 442 int i; 443 444 if (!nidx) 445 return; 446 447 write_lock_bh(&xfrm_policy_lock); 448 449 for (i = hmask; i >= 0; i--) 450 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 451 452 net->xfrm.policy_byidx = nidx; 453 net->xfrm.policy_idx_hmask = nhashmask; 454 455 write_unlock_bh(&xfrm_policy_lock); 456 457 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 458 } 459 460 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 461 { 462 unsigned int cnt = net->xfrm.policy_count[dir]; 463 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 464 465 if (total) 466 *total += cnt; 467 468 if ((hmask + 1) < xfrm_policy_hashmax && 469 cnt > hmask) 470 return 1; 471 472 return 0; 473 } 474 475 static inline int xfrm_byidx_should_resize(struct net *net, int total) 476 { 477 unsigned int hmask = net->xfrm.policy_idx_hmask; 478 479 if ((hmask + 1) < xfrm_policy_hashmax && 480 total > hmask) 481 return 1; 482 483 return 0; 484 } 485 486 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 487 { 488 read_lock_bh(&xfrm_policy_lock); 489 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 490 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 491 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 492 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 493 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 494 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 495 si->spdhcnt = net->xfrm.policy_idx_hmask; 496 si->spdhmcnt = xfrm_policy_hashmax; 497 read_unlock_bh(&xfrm_policy_lock); 498 } 499 EXPORT_SYMBOL(xfrm_spd_getinfo); 500 501 static DEFINE_MUTEX(hash_resize_mutex); 502 static void xfrm_hash_resize(struct work_struct *work) 503 { 504 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 505 int dir, total; 506 507 mutex_lock(&hash_resize_mutex); 508 509 total = 0; 510 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 511 if (xfrm_bydst_should_resize(net, dir, &total)) 512 xfrm_bydst_resize(net, dir); 513 } 514 if (xfrm_byidx_should_resize(net, total)) 515 xfrm_byidx_resize(net, total); 516 517 mutex_unlock(&hash_resize_mutex); 518 } 519 520 /* Generate new index... KAME seems to generate them ordered by cost 521 * of an absolute inpredictability of ordering of rules. This will not pass. */ 522 static u32 xfrm_gen_index(struct net *net, int dir) 523 { 524 static u32 idx_generator; 525 526 for (;;) { 527 struct hlist_node *entry; 528 struct hlist_head *list; 529 struct xfrm_policy *p; 530 u32 idx; 531 int found; 532 533 idx = (idx_generator | dir); 534 idx_generator += 8; 535 if (idx == 0) 536 idx = 8; 537 list = net->xfrm.policy_byidx + idx_hash(net, idx); 538 found = 0; 539 hlist_for_each_entry(p, entry, list, byidx) { 540 if (p->index == idx) { 541 found = 1; 542 break; 543 } 544 } 545 if (!found) 546 return idx; 547 } 548 } 549 550 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 551 { 552 u32 *p1 = (u32 *) s1; 553 u32 *p2 = (u32 *) s2; 554 int len = sizeof(struct xfrm_selector) / sizeof(u32); 555 int i; 556 557 for (i = 0; i < len; i++) { 558 if (p1[i] != p2[i]) 559 return 1; 560 } 561 562 return 0; 563 } 564 565 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 566 { 567 struct net *net = xp_net(policy); 568 struct xfrm_policy *pol; 569 struct xfrm_policy *delpol; 570 struct hlist_head *chain; 571 struct hlist_node *entry, *newpos; 572 u32 mark = policy->mark.v & policy->mark.m; 573 574 write_lock_bh(&xfrm_policy_lock); 575 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 576 delpol = NULL; 577 newpos = NULL; 578 hlist_for_each_entry(pol, entry, chain, bydst) { 579 if (pol->type == policy->type && 580 !selector_cmp(&pol->selector, &policy->selector) && 581 (mark & pol->mark.m) == pol->mark.v && 582 xfrm_sec_ctx_match(pol->security, policy->security) && 583 !WARN_ON(delpol)) { 584 if (excl) { 585 write_unlock_bh(&xfrm_policy_lock); 586 return -EEXIST; 587 } 588 delpol = pol; 589 if (policy->priority > pol->priority) 590 continue; 591 } else if (policy->priority >= pol->priority) { 592 newpos = &pol->bydst; 593 continue; 594 } 595 if (delpol) 596 break; 597 } 598 if (newpos) 599 hlist_add_after(newpos, &policy->bydst); 600 else 601 hlist_add_head(&policy->bydst, chain); 602 xfrm_pol_hold(policy); 603 net->xfrm.policy_count[dir]++; 604 atomic_inc(&flow_cache_genid); 605 rt_genid_bump(net); 606 if (delpol) 607 __xfrm_policy_unlink(delpol, dir); 608 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); 609 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 610 policy->curlft.add_time = get_seconds(); 611 policy->curlft.use_time = 0; 612 if (!mod_timer(&policy->timer, jiffies + HZ)) 613 xfrm_pol_hold(policy); 614 list_add(&policy->walk.all, &net->xfrm.policy_all); 615 write_unlock_bh(&xfrm_policy_lock); 616 617 if (delpol) 618 xfrm_policy_kill(delpol); 619 else if (xfrm_bydst_should_resize(net, dir, NULL)) 620 schedule_work(&net->xfrm.policy_hash_work); 621 622 return 0; 623 } 624 EXPORT_SYMBOL(xfrm_policy_insert); 625 626 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, 627 int dir, struct xfrm_selector *sel, 628 struct xfrm_sec_ctx *ctx, int delete, 629 int *err) 630 { 631 struct xfrm_policy *pol, *ret; 632 struct hlist_head *chain; 633 struct hlist_node *entry; 634 635 *err = 0; 636 write_lock_bh(&xfrm_policy_lock); 637 chain = policy_hash_bysel(net, sel, sel->family, dir); 638 ret = NULL; 639 hlist_for_each_entry(pol, entry, chain, bydst) { 640 if (pol->type == type && 641 (mark & pol->mark.m) == pol->mark.v && 642 !selector_cmp(sel, &pol->selector) && 643 xfrm_sec_ctx_match(ctx, pol->security)) { 644 xfrm_pol_hold(pol); 645 if (delete) { 646 *err = security_xfrm_policy_delete( 647 pol->security); 648 if (*err) { 649 write_unlock_bh(&xfrm_policy_lock); 650 return pol; 651 } 652 __xfrm_policy_unlink(pol, dir); 653 } 654 ret = pol; 655 break; 656 } 657 } 658 write_unlock_bh(&xfrm_policy_lock); 659 660 if (ret && delete) 661 xfrm_policy_kill(ret); 662 return ret; 663 } 664 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 665 666 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, 667 int dir, u32 id, int delete, int *err) 668 { 669 struct xfrm_policy *pol, *ret; 670 struct hlist_head *chain; 671 struct hlist_node *entry; 672 673 *err = -ENOENT; 674 if (xfrm_policy_id2dir(id) != dir) 675 return NULL; 676 677 *err = 0; 678 write_lock_bh(&xfrm_policy_lock); 679 chain = net->xfrm.policy_byidx + idx_hash(net, id); 680 ret = NULL; 681 hlist_for_each_entry(pol, entry, chain, byidx) { 682 if (pol->type == type && pol->index == id && 683 (mark & pol->mark.m) == pol->mark.v) { 684 xfrm_pol_hold(pol); 685 if (delete) { 686 *err = security_xfrm_policy_delete( 687 pol->security); 688 if (*err) { 689 write_unlock_bh(&xfrm_policy_lock); 690 return pol; 691 } 692 __xfrm_policy_unlink(pol, dir); 693 } 694 ret = pol; 695 break; 696 } 697 } 698 write_unlock_bh(&xfrm_policy_lock); 699 700 if (ret && delete) 701 xfrm_policy_kill(ret); 702 return ret; 703 } 704 EXPORT_SYMBOL(xfrm_policy_byid); 705 706 #ifdef CONFIG_SECURITY_NETWORK_XFRM 707 static inline int 708 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 709 { 710 int dir, err = 0; 711 712 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 713 struct xfrm_policy *pol; 714 struct hlist_node *entry; 715 int i; 716 717 hlist_for_each_entry(pol, entry, 718 &net->xfrm.policy_inexact[dir], bydst) { 719 if (pol->type != type) 720 continue; 721 err = security_xfrm_policy_delete(pol->security); 722 if (err) { 723 xfrm_audit_policy_delete(pol, 0, 724 audit_info->loginuid, 725 audit_info->sessionid, 726 audit_info->secid); 727 return err; 728 } 729 } 730 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 731 hlist_for_each_entry(pol, entry, 732 net->xfrm.policy_bydst[dir].table + i, 733 bydst) { 734 if (pol->type != type) 735 continue; 736 err = security_xfrm_policy_delete( 737 pol->security); 738 if (err) { 739 xfrm_audit_policy_delete(pol, 0, 740 audit_info->loginuid, 741 audit_info->sessionid, 742 audit_info->secid); 743 return err; 744 } 745 } 746 } 747 } 748 return err; 749 } 750 #else 751 static inline int 752 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 753 { 754 return 0; 755 } 756 #endif 757 758 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 759 { 760 int dir, err = 0, cnt = 0; 761 762 write_lock_bh(&xfrm_policy_lock); 763 764 err = xfrm_policy_flush_secctx_check(net, type, audit_info); 765 if (err) 766 goto out; 767 768 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 769 struct xfrm_policy *pol; 770 struct hlist_node *entry; 771 int i; 772 773 again1: 774 hlist_for_each_entry(pol, entry, 775 &net->xfrm.policy_inexact[dir], bydst) { 776 if (pol->type != type) 777 continue; 778 __xfrm_policy_unlink(pol, dir); 779 write_unlock_bh(&xfrm_policy_lock); 780 cnt++; 781 782 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 783 audit_info->sessionid, 784 audit_info->secid); 785 786 xfrm_policy_kill(pol); 787 788 write_lock_bh(&xfrm_policy_lock); 789 goto again1; 790 } 791 792 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 793 again2: 794 hlist_for_each_entry(pol, entry, 795 net->xfrm.policy_bydst[dir].table + i, 796 bydst) { 797 if (pol->type != type) 798 continue; 799 __xfrm_policy_unlink(pol, dir); 800 write_unlock_bh(&xfrm_policy_lock); 801 cnt++; 802 803 xfrm_audit_policy_delete(pol, 1, 804 audit_info->loginuid, 805 audit_info->sessionid, 806 audit_info->secid); 807 xfrm_policy_kill(pol); 808 809 write_lock_bh(&xfrm_policy_lock); 810 goto again2; 811 } 812 } 813 814 } 815 if (!cnt) 816 err = -ESRCH; 817 out: 818 write_unlock_bh(&xfrm_policy_lock); 819 return err; 820 } 821 EXPORT_SYMBOL(xfrm_policy_flush); 822 823 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 824 int (*func)(struct xfrm_policy *, int, int, void*), 825 void *data) 826 { 827 struct xfrm_policy *pol; 828 struct xfrm_policy_walk_entry *x; 829 int error = 0; 830 831 if (walk->type >= XFRM_POLICY_TYPE_MAX && 832 walk->type != XFRM_POLICY_TYPE_ANY) 833 return -EINVAL; 834 835 if (list_empty(&walk->walk.all) && walk->seq != 0) 836 return 0; 837 838 write_lock_bh(&xfrm_policy_lock); 839 if (list_empty(&walk->walk.all)) 840 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 841 else 842 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all); 843 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 844 if (x->dead) 845 continue; 846 pol = container_of(x, struct xfrm_policy, walk); 847 if (walk->type != XFRM_POLICY_TYPE_ANY && 848 walk->type != pol->type) 849 continue; 850 error = func(pol, xfrm_policy_id2dir(pol->index), 851 walk->seq, data); 852 if (error) { 853 list_move_tail(&walk->walk.all, &x->all); 854 goto out; 855 } 856 walk->seq++; 857 } 858 if (walk->seq == 0) { 859 error = -ENOENT; 860 goto out; 861 } 862 list_del_init(&walk->walk.all); 863 out: 864 write_unlock_bh(&xfrm_policy_lock); 865 return error; 866 } 867 EXPORT_SYMBOL(xfrm_policy_walk); 868 869 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 870 { 871 INIT_LIST_HEAD(&walk->walk.all); 872 walk->walk.dead = 1; 873 walk->type = type; 874 walk->seq = 0; 875 } 876 EXPORT_SYMBOL(xfrm_policy_walk_init); 877 878 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk) 879 { 880 if (list_empty(&walk->walk.all)) 881 return; 882 883 write_lock_bh(&xfrm_policy_lock); 884 list_del(&walk->walk.all); 885 write_unlock_bh(&xfrm_policy_lock); 886 } 887 EXPORT_SYMBOL(xfrm_policy_walk_done); 888 889 /* 890 * Find policy to apply to this flow. 891 * 892 * Returns 0 if policy found, else an -errno. 893 */ 894 static int xfrm_policy_match(const struct xfrm_policy *pol, 895 const struct flowi *fl, 896 u8 type, u16 family, int dir) 897 { 898 const struct xfrm_selector *sel = &pol->selector; 899 int ret = -ESRCH; 900 bool match; 901 902 if (pol->family != family || 903 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 904 pol->type != type) 905 return ret; 906 907 match = xfrm_selector_match(sel, fl, family); 908 if (match) 909 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, 910 dir); 911 912 return ret; 913 } 914 915 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 916 const struct flowi *fl, 917 u16 family, u8 dir) 918 { 919 int err; 920 struct xfrm_policy *pol, *ret; 921 const xfrm_address_t *daddr, *saddr; 922 struct hlist_node *entry; 923 struct hlist_head *chain; 924 u32 priority = ~0U; 925 926 daddr = xfrm_flowi_daddr(fl, family); 927 saddr = xfrm_flowi_saddr(fl, family); 928 if (unlikely(!daddr || !saddr)) 929 return NULL; 930 931 read_lock_bh(&xfrm_policy_lock); 932 chain = policy_hash_direct(net, daddr, saddr, family, dir); 933 ret = NULL; 934 hlist_for_each_entry(pol, entry, chain, bydst) { 935 err = xfrm_policy_match(pol, fl, type, family, dir); 936 if (err) { 937 if (err == -ESRCH) 938 continue; 939 else { 940 ret = ERR_PTR(err); 941 goto fail; 942 } 943 } else { 944 ret = pol; 945 priority = ret->priority; 946 break; 947 } 948 } 949 chain = &net->xfrm.policy_inexact[dir]; 950 hlist_for_each_entry(pol, entry, chain, bydst) { 951 err = xfrm_policy_match(pol, fl, type, family, dir); 952 if (err) { 953 if (err == -ESRCH) 954 continue; 955 else { 956 ret = ERR_PTR(err); 957 goto fail; 958 } 959 } else if (pol->priority < priority) { 960 ret = pol; 961 break; 962 } 963 } 964 if (ret) 965 xfrm_pol_hold(ret); 966 fail: 967 read_unlock_bh(&xfrm_policy_lock); 968 969 return ret; 970 } 971 972 static struct xfrm_policy * 973 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) 974 { 975 #ifdef CONFIG_XFRM_SUB_POLICY 976 struct xfrm_policy *pol; 977 978 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); 979 if (pol != NULL) 980 return pol; 981 #endif 982 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); 983 } 984 985 static struct flow_cache_object * 986 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, 987 u8 dir, struct flow_cache_object *old_obj, void *ctx) 988 { 989 struct xfrm_policy *pol; 990 991 if (old_obj) 992 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); 993 994 pol = __xfrm_policy_lookup(net, fl, family, dir); 995 if (IS_ERR_OR_NULL(pol)) 996 return ERR_CAST(pol); 997 998 /* Resolver returns two references: 999 * one for cache and one for caller of flow_cache_lookup() */ 1000 xfrm_pol_hold(pol); 1001 1002 return &pol->flo; 1003 } 1004 1005 static inline int policy_to_flow_dir(int dir) 1006 { 1007 if (XFRM_POLICY_IN == FLOW_DIR_IN && 1008 XFRM_POLICY_OUT == FLOW_DIR_OUT && 1009 XFRM_POLICY_FWD == FLOW_DIR_FWD) 1010 return dir; 1011 switch (dir) { 1012 default: 1013 case XFRM_POLICY_IN: 1014 return FLOW_DIR_IN; 1015 case XFRM_POLICY_OUT: 1016 return FLOW_DIR_OUT; 1017 case XFRM_POLICY_FWD: 1018 return FLOW_DIR_FWD; 1019 } 1020 } 1021 1022 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, 1023 const struct flowi *fl) 1024 { 1025 struct xfrm_policy *pol; 1026 1027 read_lock_bh(&xfrm_policy_lock); 1028 if ((pol = sk->sk_policy[dir]) != NULL) { 1029 bool match = xfrm_selector_match(&pol->selector, fl, 1030 sk->sk_family); 1031 int err = 0; 1032 1033 if (match) { 1034 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1035 pol = NULL; 1036 goto out; 1037 } 1038 err = security_xfrm_policy_lookup(pol->security, 1039 fl->flowi_secid, 1040 policy_to_flow_dir(dir)); 1041 if (!err) 1042 xfrm_pol_hold(pol); 1043 else if (err == -ESRCH) 1044 pol = NULL; 1045 else 1046 pol = ERR_PTR(err); 1047 } else 1048 pol = NULL; 1049 } 1050 out: 1051 read_unlock_bh(&xfrm_policy_lock); 1052 return pol; 1053 } 1054 1055 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 1056 { 1057 struct net *net = xp_net(pol); 1058 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector, 1059 pol->family, dir); 1060 1061 list_add(&pol->walk.all, &net->xfrm.policy_all); 1062 hlist_add_head(&pol->bydst, chain); 1063 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index)); 1064 net->xfrm.policy_count[dir]++; 1065 xfrm_pol_hold(pol); 1066 1067 if (xfrm_bydst_should_resize(net, dir, NULL)) 1068 schedule_work(&net->xfrm.policy_hash_work); 1069 } 1070 1071 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 1072 int dir) 1073 { 1074 struct net *net = xp_net(pol); 1075 1076 if (hlist_unhashed(&pol->bydst)) 1077 return NULL; 1078 1079 hlist_del(&pol->bydst); 1080 hlist_del(&pol->byidx); 1081 list_del(&pol->walk.all); 1082 net->xfrm.policy_count[dir]--; 1083 1084 return pol; 1085 } 1086 1087 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 1088 { 1089 write_lock_bh(&xfrm_policy_lock); 1090 pol = __xfrm_policy_unlink(pol, dir); 1091 write_unlock_bh(&xfrm_policy_lock); 1092 if (pol) { 1093 xfrm_policy_kill(pol); 1094 return 0; 1095 } 1096 return -ENOENT; 1097 } 1098 EXPORT_SYMBOL(xfrm_policy_delete); 1099 1100 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 1101 { 1102 struct net *net = xp_net(pol); 1103 struct xfrm_policy *old_pol; 1104 1105 #ifdef CONFIG_XFRM_SUB_POLICY 1106 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 1107 return -EINVAL; 1108 #endif 1109 1110 write_lock_bh(&xfrm_policy_lock); 1111 old_pol = sk->sk_policy[dir]; 1112 sk->sk_policy[dir] = pol; 1113 if (pol) { 1114 pol->curlft.add_time = get_seconds(); 1115 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); 1116 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1117 } 1118 if (old_pol) 1119 /* Unlinking succeeds always. This is the only function 1120 * allowed to delete or replace socket policy. 1121 */ 1122 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1123 write_unlock_bh(&xfrm_policy_lock); 1124 1125 if (old_pol) { 1126 xfrm_policy_kill(old_pol); 1127 } 1128 return 0; 1129 } 1130 1131 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 1132 { 1133 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 1134 1135 if (newp) { 1136 newp->selector = old->selector; 1137 if (security_xfrm_policy_clone(old->security, 1138 &newp->security)) { 1139 kfree(newp); 1140 return NULL; /* ENOMEM */ 1141 } 1142 newp->lft = old->lft; 1143 newp->curlft = old->curlft; 1144 newp->mark = old->mark; 1145 newp->action = old->action; 1146 newp->flags = old->flags; 1147 newp->xfrm_nr = old->xfrm_nr; 1148 newp->index = old->index; 1149 newp->type = old->type; 1150 memcpy(newp->xfrm_vec, old->xfrm_vec, 1151 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 1152 write_lock_bh(&xfrm_policy_lock); 1153 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 1154 write_unlock_bh(&xfrm_policy_lock); 1155 xfrm_pol_put(newp); 1156 } 1157 return newp; 1158 } 1159 1160 int __xfrm_sk_clone_policy(struct sock *sk) 1161 { 1162 struct xfrm_policy *p0 = sk->sk_policy[0], 1163 *p1 = sk->sk_policy[1]; 1164 1165 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1166 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1167 return -ENOMEM; 1168 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1169 return -ENOMEM; 1170 return 0; 1171 } 1172 1173 static int 1174 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote, 1175 unsigned short family) 1176 { 1177 int err; 1178 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1179 1180 if (unlikely(afinfo == NULL)) 1181 return -EINVAL; 1182 err = afinfo->get_saddr(net, local, remote); 1183 xfrm_policy_put_afinfo(afinfo); 1184 return err; 1185 } 1186 1187 /* Resolve list of templates for the flow, given policy. */ 1188 1189 static int 1190 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 1191 struct xfrm_state **xfrm, unsigned short family) 1192 { 1193 struct net *net = xp_net(policy); 1194 int nx; 1195 int i, error; 1196 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1197 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1198 xfrm_address_t tmp; 1199 1200 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 1201 struct xfrm_state *x; 1202 xfrm_address_t *remote = daddr; 1203 xfrm_address_t *local = saddr; 1204 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1205 1206 if (tmpl->mode == XFRM_MODE_TUNNEL || 1207 tmpl->mode == XFRM_MODE_BEET) { 1208 remote = &tmpl->id.daddr; 1209 local = &tmpl->saddr; 1210 if (xfrm_addr_any(local, tmpl->encap_family)) { 1211 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); 1212 if (error) 1213 goto fail; 1214 local = &tmp; 1215 } 1216 } 1217 1218 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1219 1220 if (x && x->km.state == XFRM_STATE_VALID) { 1221 xfrm[nx++] = x; 1222 daddr = remote; 1223 saddr = local; 1224 continue; 1225 } 1226 if (x) { 1227 error = (x->km.state == XFRM_STATE_ERROR ? 1228 -EINVAL : -EAGAIN); 1229 xfrm_state_put(x); 1230 } 1231 else if (error == -ESRCH) 1232 error = -EAGAIN; 1233 1234 if (!tmpl->optional) 1235 goto fail; 1236 } 1237 return nx; 1238 1239 fail: 1240 for (nx--; nx>=0; nx--) 1241 xfrm_state_put(xfrm[nx]); 1242 return error; 1243 } 1244 1245 static int 1246 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 1247 struct xfrm_state **xfrm, unsigned short family) 1248 { 1249 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 1250 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 1251 int cnx = 0; 1252 int error; 1253 int ret; 1254 int i; 1255 1256 for (i = 0; i < npols; i++) { 1257 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 1258 error = -ENOBUFS; 1259 goto fail; 1260 } 1261 1262 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 1263 if (ret < 0) { 1264 error = ret; 1265 goto fail; 1266 } else 1267 cnx += ret; 1268 } 1269 1270 /* found states are sorted for outbound processing */ 1271 if (npols > 1) 1272 xfrm_state_sort(xfrm, tpp, cnx, family); 1273 1274 return cnx; 1275 1276 fail: 1277 for (cnx--; cnx>=0; cnx--) 1278 xfrm_state_put(tpp[cnx]); 1279 return error; 1280 1281 } 1282 1283 /* Check that the bundle accepts the flow and its components are 1284 * still valid. 1285 */ 1286 1287 static inline int xfrm_get_tos(const struct flowi *fl, int family) 1288 { 1289 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1290 int tos; 1291 1292 if (!afinfo) 1293 return -EINVAL; 1294 1295 tos = afinfo->get_tos(fl); 1296 1297 xfrm_policy_put_afinfo(afinfo); 1298 1299 return tos; 1300 } 1301 1302 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) 1303 { 1304 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1305 struct dst_entry *dst = &xdst->u.dst; 1306 1307 if (xdst->route == NULL) { 1308 /* Dummy bundle - if it has xfrms we were not 1309 * able to build bundle as template resolution failed. 1310 * It means we need to try again resolving. */ 1311 if (xdst->num_xfrms > 0) 1312 return NULL; 1313 } else { 1314 /* Real bundle */ 1315 if (stale_bundle(dst)) 1316 return NULL; 1317 } 1318 1319 dst_hold(dst); 1320 return flo; 1321 } 1322 1323 static int xfrm_bundle_flo_check(struct flow_cache_object *flo) 1324 { 1325 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1326 struct dst_entry *dst = &xdst->u.dst; 1327 1328 if (!xdst->route) 1329 return 0; 1330 if (stale_bundle(dst)) 1331 return 0; 1332 1333 return 1; 1334 } 1335 1336 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) 1337 { 1338 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1339 struct dst_entry *dst = &xdst->u.dst; 1340 1341 dst_free(dst); 1342 } 1343 1344 static const struct flow_cache_ops xfrm_bundle_fc_ops = { 1345 .get = xfrm_bundle_flo_get, 1346 .check = xfrm_bundle_flo_check, 1347 .delete = xfrm_bundle_flo_delete, 1348 }; 1349 1350 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1351 { 1352 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1353 struct dst_ops *dst_ops; 1354 struct xfrm_dst *xdst; 1355 1356 if (!afinfo) 1357 return ERR_PTR(-EINVAL); 1358 1359 switch (family) { 1360 case AF_INET: 1361 dst_ops = &net->xfrm.xfrm4_dst_ops; 1362 break; 1363 #if IS_ENABLED(CONFIG_IPV6) 1364 case AF_INET6: 1365 dst_ops = &net->xfrm.xfrm6_dst_ops; 1366 break; 1367 #endif 1368 default: 1369 BUG(); 1370 } 1371 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0); 1372 1373 if (likely(xdst)) { 1374 struct dst_entry *dst = &xdst->u.dst; 1375 1376 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); 1377 xdst->flo.ops = &xfrm_bundle_fc_ops; 1378 if (afinfo->init_dst) 1379 afinfo->init_dst(net, xdst); 1380 } else 1381 xdst = ERR_PTR(-ENOBUFS); 1382 1383 xfrm_policy_put_afinfo(afinfo); 1384 1385 return xdst; 1386 } 1387 1388 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 1389 int nfheader_len) 1390 { 1391 struct xfrm_policy_afinfo *afinfo = 1392 xfrm_policy_get_afinfo(dst->ops->family); 1393 int err; 1394 1395 if (!afinfo) 1396 return -EINVAL; 1397 1398 err = afinfo->init_path(path, dst, nfheader_len); 1399 1400 xfrm_policy_put_afinfo(afinfo); 1401 1402 return err; 1403 } 1404 1405 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 1406 const struct flowi *fl) 1407 { 1408 struct xfrm_policy_afinfo *afinfo = 1409 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1410 int err; 1411 1412 if (!afinfo) 1413 return -EINVAL; 1414 1415 err = afinfo->fill_dst(xdst, dev, fl); 1416 1417 xfrm_policy_put_afinfo(afinfo); 1418 1419 return err; 1420 } 1421 1422 1423 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 1424 * all the metrics... Shortly, bundle a bundle. 1425 */ 1426 1427 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 1428 struct xfrm_state **xfrm, int nx, 1429 const struct flowi *fl, 1430 struct dst_entry *dst) 1431 { 1432 struct net *net = xp_net(policy); 1433 unsigned long now = jiffies; 1434 struct net_device *dev; 1435 struct xfrm_mode *inner_mode; 1436 struct dst_entry *dst_prev = NULL; 1437 struct dst_entry *dst0 = NULL; 1438 int i = 0; 1439 int err; 1440 int header_len = 0; 1441 int nfheader_len = 0; 1442 int trailer_len = 0; 1443 int tos; 1444 int family = policy->selector.family; 1445 xfrm_address_t saddr, daddr; 1446 1447 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 1448 1449 tos = xfrm_get_tos(fl, family); 1450 err = tos; 1451 if (tos < 0) 1452 goto put_states; 1453 1454 dst_hold(dst); 1455 1456 for (; i < nx; i++) { 1457 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 1458 struct dst_entry *dst1 = &xdst->u.dst; 1459 1460 err = PTR_ERR(xdst); 1461 if (IS_ERR(xdst)) { 1462 dst_release(dst); 1463 goto put_states; 1464 } 1465 1466 if (xfrm[i]->sel.family == AF_UNSPEC) { 1467 inner_mode = xfrm_ip2inner_mode(xfrm[i], 1468 xfrm_af2proto(family)); 1469 if (!inner_mode) { 1470 err = -EAFNOSUPPORT; 1471 dst_release(dst); 1472 goto put_states; 1473 } 1474 } else 1475 inner_mode = xfrm[i]->inner_mode; 1476 1477 if (!dst_prev) 1478 dst0 = dst1; 1479 else { 1480 dst_prev->child = dst_clone(dst1); 1481 dst1->flags |= DST_NOHASH; 1482 } 1483 1484 xdst->route = dst; 1485 dst_copy_metrics(dst1, dst); 1486 1487 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1488 family = xfrm[i]->props.family; 1489 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, 1490 family); 1491 err = PTR_ERR(dst); 1492 if (IS_ERR(dst)) 1493 goto put_states; 1494 } else 1495 dst_hold(dst); 1496 1497 dst1->xfrm = xfrm[i]; 1498 xdst->xfrm_genid = xfrm[i]->genid; 1499 1500 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 1501 dst1->flags |= DST_HOST; 1502 dst1->lastuse = now; 1503 1504 dst1->input = dst_discard; 1505 dst1->output = inner_mode->afinfo->output; 1506 1507 dst1->next = dst_prev; 1508 dst_prev = dst1; 1509 1510 header_len += xfrm[i]->props.header_len; 1511 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 1512 nfheader_len += xfrm[i]->props.header_len; 1513 trailer_len += xfrm[i]->props.trailer_len; 1514 } 1515 1516 dst_prev->child = dst; 1517 dst0->path = dst; 1518 1519 err = -ENODEV; 1520 dev = dst->dev; 1521 if (!dev) 1522 goto free_dst; 1523 1524 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1525 xfrm_init_pmtu(dst_prev); 1526 1527 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1528 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1529 1530 err = xfrm_fill_dst(xdst, dev, fl); 1531 if (err) 1532 goto free_dst; 1533 1534 dst_prev->header_len = header_len; 1535 dst_prev->trailer_len = trailer_len; 1536 header_len -= xdst->u.dst.xfrm->props.header_len; 1537 trailer_len -= xdst->u.dst.xfrm->props.trailer_len; 1538 } 1539 1540 out: 1541 return dst0; 1542 1543 put_states: 1544 for (; i < nx; i++) 1545 xfrm_state_put(xfrm[i]); 1546 free_dst: 1547 if (dst0) 1548 dst_free(dst0); 1549 dst0 = ERR_PTR(err); 1550 goto out; 1551 } 1552 1553 static int inline 1554 xfrm_dst_alloc_copy(void **target, const void *src, int size) 1555 { 1556 if (!*target) { 1557 *target = kmalloc(size, GFP_ATOMIC); 1558 if (!*target) 1559 return -ENOMEM; 1560 } 1561 memcpy(*target, src, size); 1562 return 0; 1563 } 1564 1565 static int inline 1566 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) 1567 { 1568 #ifdef CONFIG_XFRM_SUB_POLICY 1569 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1570 return xfrm_dst_alloc_copy((void **)&(xdst->partner), 1571 sel, sizeof(*sel)); 1572 #else 1573 return 0; 1574 #endif 1575 } 1576 1577 static int inline 1578 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl) 1579 { 1580 #ifdef CONFIG_XFRM_SUB_POLICY 1581 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1582 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); 1583 #else 1584 return 0; 1585 #endif 1586 } 1587 1588 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1589 struct xfrm_policy **pols, 1590 int *num_pols, int *num_xfrms) 1591 { 1592 int i; 1593 1594 if (*num_pols == 0 || !pols[0]) { 1595 *num_pols = 0; 1596 *num_xfrms = 0; 1597 return 0; 1598 } 1599 if (IS_ERR(pols[0])) 1600 return PTR_ERR(pols[0]); 1601 1602 *num_xfrms = pols[0]->xfrm_nr; 1603 1604 #ifdef CONFIG_XFRM_SUB_POLICY 1605 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && 1606 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 1607 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 1608 XFRM_POLICY_TYPE_MAIN, 1609 fl, family, 1610 XFRM_POLICY_OUT); 1611 if (pols[1]) { 1612 if (IS_ERR(pols[1])) { 1613 xfrm_pols_put(pols, *num_pols); 1614 return PTR_ERR(pols[1]); 1615 } 1616 (*num_pols) ++; 1617 (*num_xfrms) += pols[1]->xfrm_nr; 1618 } 1619 } 1620 #endif 1621 for (i = 0; i < *num_pols; i++) { 1622 if (pols[i]->action != XFRM_POLICY_ALLOW) { 1623 *num_xfrms = -1; 1624 break; 1625 } 1626 } 1627 1628 return 0; 1629 1630 } 1631 1632 static struct xfrm_dst * 1633 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 1634 const struct flowi *fl, u16 family, 1635 struct dst_entry *dst_orig) 1636 { 1637 struct net *net = xp_net(pols[0]); 1638 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 1639 struct dst_entry *dst; 1640 struct xfrm_dst *xdst; 1641 int err; 1642 1643 /* Try to instantiate a bundle */ 1644 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1645 if (err <= 0) { 1646 if (err != 0 && err != -EAGAIN) 1647 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1648 return ERR_PTR(err); 1649 } 1650 1651 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); 1652 if (IS_ERR(dst)) { 1653 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 1654 return ERR_CAST(dst); 1655 } 1656 1657 xdst = (struct xfrm_dst *)dst; 1658 xdst->num_xfrms = err; 1659 if (num_pols > 1) 1660 err = xfrm_dst_update_parent(dst, &pols[1]->selector); 1661 else 1662 err = xfrm_dst_update_origin(dst, fl); 1663 if (unlikely(err)) { 1664 dst_free(dst); 1665 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1666 return ERR_PTR(err); 1667 } 1668 1669 xdst->num_pols = num_pols; 1670 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1671 xdst->policy_genid = atomic_read(&pols[0]->genid); 1672 1673 return xdst; 1674 } 1675 1676 static struct flow_cache_object * 1677 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 1678 struct flow_cache_object *oldflo, void *ctx) 1679 { 1680 struct dst_entry *dst_orig = (struct dst_entry *)ctx; 1681 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1682 struct xfrm_dst *xdst, *new_xdst; 1683 int num_pols = 0, num_xfrms = 0, i, err, pol_dead; 1684 1685 /* Check if the policies from old bundle are usable */ 1686 xdst = NULL; 1687 if (oldflo) { 1688 xdst = container_of(oldflo, struct xfrm_dst, flo); 1689 num_pols = xdst->num_pols; 1690 num_xfrms = xdst->num_xfrms; 1691 pol_dead = 0; 1692 for (i = 0; i < num_pols; i++) { 1693 pols[i] = xdst->pols[i]; 1694 pol_dead |= pols[i]->walk.dead; 1695 } 1696 if (pol_dead) { 1697 dst_free(&xdst->u.dst); 1698 xdst = NULL; 1699 num_pols = 0; 1700 num_xfrms = 0; 1701 oldflo = NULL; 1702 } 1703 } 1704 1705 /* Resolve policies to use if we couldn't get them from 1706 * previous cache entry */ 1707 if (xdst == NULL) { 1708 num_pols = 1; 1709 pols[0] = __xfrm_policy_lookup(net, fl, family, dir); 1710 err = xfrm_expand_policies(fl, family, pols, 1711 &num_pols, &num_xfrms); 1712 if (err < 0) 1713 goto inc_error; 1714 if (num_pols == 0) 1715 return NULL; 1716 if (num_xfrms <= 0) 1717 goto make_dummy_bundle; 1718 } 1719 1720 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); 1721 if (IS_ERR(new_xdst)) { 1722 err = PTR_ERR(new_xdst); 1723 if (err != -EAGAIN) 1724 goto error; 1725 if (oldflo == NULL) 1726 goto make_dummy_bundle; 1727 dst_hold(&xdst->u.dst); 1728 return oldflo; 1729 } else if (new_xdst == NULL) { 1730 num_xfrms = 0; 1731 if (oldflo == NULL) 1732 goto make_dummy_bundle; 1733 xdst->num_xfrms = 0; 1734 dst_hold(&xdst->u.dst); 1735 return oldflo; 1736 } 1737 1738 /* Kill the previous bundle */ 1739 if (xdst) { 1740 /* The policies were stolen for newly generated bundle */ 1741 xdst->num_pols = 0; 1742 dst_free(&xdst->u.dst); 1743 } 1744 1745 /* Flow cache does not have reference, it dst_free()'s, 1746 * but we do need to return one reference for original caller */ 1747 dst_hold(&new_xdst->u.dst); 1748 return &new_xdst->flo; 1749 1750 make_dummy_bundle: 1751 /* We found policies, but there's no bundles to instantiate: 1752 * either because the policy blocks, has no transformations or 1753 * we could not build template (no xfrm_states).*/ 1754 xdst = xfrm_alloc_dst(net, family); 1755 if (IS_ERR(xdst)) { 1756 xfrm_pols_put(pols, num_pols); 1757 return ERR_CAST(xdst); 1758 } 1759 xdst->num_pols = num_pols; 1760 xdst->num_xfrms = num_xfrms; 1761 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1762 1763 dst_hold(&xdst->u.dst); 1764 return &xdst->flo; 1765 1766 inc_error: 1767 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1768 error: 1769 if (xdst != NULL) 1770 dst_free(&xdst->u.dst); 1771 else 1772 xfrm_pols_put(pols, num_pols); 1773 return ERR_PTR(err); 1774 } 1775 1776 static struct dst_entry *make_blackhole(struct net *net, u16 family, 1777 struct dst_entry *dst_orig) 1778 { 1779 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1780 struct dst_entry *ret; 1781 1782 if (!afinfo) { 1783 dst_release(dst_orig); 1784 return ERR_PTR(-EINVAL); 1785 } else { 1786 ret = afinfo->blackhole_route(net, dst_orig); 1787 } 1788 xfrm_policy_put_afinfo(afinfo); 1789 1790 return ret; 1791 } 1792 1793 /* Main function: finds/creates a bundle for given flow. 1794 * 1795 * At the moment we eat a raw IP route. Mostly to speed up lookups 1796 * on interfaces with disabled IPsec. 1797 */ 1798 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 1799 const struct flowi *fl, 1800 struct sock *sk, int flags) 1801 { 1802 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1803 struct flow_cache_object *flo; 1804 struct xfrm_dst *xdst; 1805 struct dst_entry *dst, *route; 1806 u16 family = dst_orig->ops->family; 1807 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1808 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 1809 1810 restart: 1811 dst = NULL; 1812 xdst = NULL; 1813 route = NULL; 1814 1815 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 1816 num_pols = 1; 1817 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1818 err = xfrm_expand_policies(fl, family, pols, 1819 &num_pols, &num_xfrms); 1820 if (err < 0) 1821 goto dropdst; 1822 1823 if (num_pols) { 1824 if (num_xfrms <= 0) { 1825 drop_pols = num_pols; 1826 goto no_transform; 1827 } 1828 1829 xdst = xfrm_resolve_and_create_bundle( 1830 pols, num_pols, fl, 1831 family, dst_orig); 1832 if (IS_ERR(xdst)) { 1833 xfrm_pols_put(pols, num_pols); 1834 err = PTR_ERR(xdst); 1835 goto dropdst; 1836 } else if (xdst == NULL) { 1837 num_xfrms = 0; 1838 drop_pols = num_pols; 1839 goto no_transform; 1840 } 1841 1842 dst_hold(&xdst->u.dst); 1843 1844 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 1845 xdst->u.dst.next = xfrm_policy_sk_bundles; 1846 xfrm_policy_sk_bundles = &xdst->u.dst; 1847 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 1848 1849 route = xdst->route; 1850 } 1851 } 1852 1853 if (xdst == NULL) { 1854 /* To accelerate a bit... */ 1855 if ((dst_orig->flags & DST_NOXFRM) || 1856 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 1857 goto nopol; 1858 1859 flo = flow_cache_lookup(net, fl, family, dir, 1860 xfrm_bundle_lookup, dst_orig); 1861 if (flo == NULL) 1862 goto nopol; 1863 if (IS_ERR(flo)) { 1864 err = PTR_ERR(flo); 1865 goto dropdst; 1866 } 1867 xdst = container_of(flo, struct xfrm_dst, flo); 1868 1869 num_pols = xdst->num_pols; 1870 num_xfrms = xdst->num_xfrms; 1871 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); 1872 route = xdst->route; 1873 } 1874 1875 dst = &xdst->u.dst; 1876 if (route == NULL && num_xfrms > 0) { 1877 /* The only case when xfrm_bundle_lookup() returns a 1878 * bundle with null route, is when the template could 1879 * not be resolved. It means policies are there, but 1880 * bundle could not be created, since we don't yet 1881 * have the xfrm_state's. We need to wait for KM to 1882 * negotiate new SA's or bail out with error.*/ 1883 if (net->xfrm.sysctl_larval_drop) { 1884 /* EREMOTE tells the caller to generate 1885 * a one-shot blackhole route. */ 1886 dst_release(dst); 1887 xfrm_pols_put(pols, drop_pols); 1888 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1889 1890 return make_blackhole(net, family, dst_orig); 1891 } 1892 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { 1893 DECLARE_WAITQUEUE(wait, current); 1894 1895 add_wait_queue(&net->xfrm.km_waitq, &wait); 1896 set_current_state(TASK_INTERRUPTIBLE); 1897 schedule(); 1898 set_current_state(TASK_RUNNING); 1899 remove_wait_queue(&net->xfrm.km_waitq, &wait); 1900 1901 if (!signal_pending(current)) { 1902 dst_release(dst); 1903 goto restart; 1904 } 1905 1906 err = -ERESTART; 1907 } else 1908 err = -EAGAIN; 1909 1910 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1911 goto error; 1912 } 1913 1914 no_transform: 1915 if (num_pols == 0) 1916 goto nopol; 1917 1918 if ((flags & XFRM_LOOKUP_ICMP) && 1919 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 1920 err = -ENOENT; 1921 goto error; 1922 } 1923 1924 for (i = 0; i < num_pols; i++) 1925 pols[i]->curlft.use_time = get_seconds(); 1926 1927 if (num_xfrms < 0) { 1928 /* Prohibit the flow */ 1929 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 1930 err = -EPERM; 1931 goto error; 1932 } else if (num_xfrms > 0) { 1933 /* Flow transformed */ 1934 dst_release(dst_orig); 1935 } else { 1936 /* Flow passes untransformed */ 1937 dst_release(dst); 1938 dst = dst_orig; 1939 } 1940 ok: 1941 xfrm_pols_put(pols, drop_pols); 1942 if (dst && dst->xfrm && 1943 dst->xfrm->props.mode == XFRM_MODE_TUNNEL) 1944 dst->flags |= DST_XFRM_TUNNEL; 1945 return dst; 1946 1947 nopol: 1948 if (!(flags & XFRM_LOOKUP_ICMP)) { 1949 dst = dst_orig; 1950 goto ok; 1951 } 1952 err = -ENOENT; 1953 error: 1954 dst_release(dst); 1955 dropdst: 1956 dst_release(dst_orig); 1957 xfrm_pols_put(pols, drop_pols); 1958 return ERR_PTR(err); 1959 } 1960 EXPORT_SYMBOL(xfrm_lookup); 1961 1962 static inline int 1963 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 1964 { 1965 struct xfrm_state *x; 1966 1967 if (!skb->sp || idx < 0 || idx >= skb->sp->len) 1968 return 0; 1969 x = skb->sp->xvec[idx]; 1970 if (!x->type->reject) 1971 return 0; 1972 return x->type->reject(x, skb, fl); 1973 } 1974 1975 /* When skb is transformed back to its "native" form, we have to 1976 * check policy restrictions. At the moment we make this in maximally 1977 * stupid way. Shame on me. :-) Of course, connected sockets must 1978 * have policy cached at them. 1979 */ 1980 1981 static inline int 1982 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 1983 unsigned short family) 1984 { 1985 if (xfrm_state_kern(x)) 1986 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 1987 return x->id.proto == tmpl->id.proto && 1988 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1989 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1990 x->props.mode == tmpl->mode && 1991 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 1992 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 1993 !(x->props.mode != XFRM_MODE_TRANSPORT && 1994 xfrm_state_addr_cmp(tmpl, x, family)); 1995 } 1996 1997 /* 1998 * 0 or more than 0 is returned when validation is succeeded (either bypass 1999 * because of optional transport mode, or next index of the mathced secpath 2000 * state with the template. 2001 * -1 is returned when no matching template is found. 2002 * Otherwise "-2 - errored_index" is returned. 2003 */ 2004 static inline int 2005 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 2006 unsigned short family) 2007 { 2008 int idx = start; 2009 2010 if (tmpl->optional) { 2011 if (tmpl->mode == XFRM_MODE_TRANSPORT) 2012 return start; 2013 } else 2014 start = -1; 2015 for (; idx < sp->len; idx++) { 2016 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 2017 return ++idx; 2018 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 2019 if (start == -1) 2020 start = -2-idx; 2021 break; 2022 } 2023 } 2024 return start; 2025 } 2026 2027 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 2028 unsigned int family, int reverse) 2029 { 2030 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2031 int err; 2032 2033 if (unlikely(afinfo == NULL)) 2034 return -EAFNOSUPPORT; 2035 2036 afinfo->decode_session(skb, fl, reverse); 2037 err = security_xfrm_decode_session(skb, &fl->flowi_secid); 2038 xfrm_policy_put_afinfo(afinfo); 2039 return err; 2040 } 2041 EXPORT_SYMBOL(__xfrm_decode_session); 2042 2043 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 2044 { 2045 for (; k < sp->len; k++) { 2046 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 2047 *idxp = k; 2048 return 1; 2049 } 2050 } 2051 2052 return 0; 2053 } 2054 2055 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 2056 unsigned short family) 2057 { 2058 struct net *net = dev_net(skb->dev); 2059 struct xfrm_policy *pol; 2060 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2061 int npols = 0; 2062 int xfrm_nr; 2063 int pi; 2064 int reverse; 2065 struct flowi fl; 2066 u8 fl_dir; 2067 int xerr_idx = -1; 2068 2069 reverse = dir & ~XFRM_POLICY_MASK; 2070 dir &= XFRM_POLICY_MASK; 2071 fl_dir = policy_to_flow_dir(dir); 2072 2073 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 2074 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 2075 return 0; 2076 } 2077 2078 nf_nat_decode_session(skb, &fl, family); 2079 2080 /* First, check used SA against their selectors. */ 2081 if (skb->sp) { 2082 int i; 2083 2084 for (i=skb->sp->len-1; i>=0; i--) { 2085 struct xfrm_state *x = skb->sp->xvec[i]; 2086 if (!xfrm_selector_match(&x->sel, &fl, family)) { 2087 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 2088 return 0; 2089 } 2090 } 2091 } 2092 2093 pol = NULL; 2094 if (sk && sk->sk_policy[dir]) { 2095 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2096 if (IS_ERR(pol)) { 2097 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2098 return 0; 2099 } 2100 } 2101 2102 if (!pol) { 2103 struct flow_cache_object *flo; 2104 2105 flo = flow_cache_lookup(net, &fl, family, fl_dir, 2106 xfrm_policy_lookup, NULL); 2107 if (IS_ERR_OR_NULL(flo)) 2108 pol = ERR_CAST(flo); 2109 else 2110 pol = container_of(flo, struct xfrm_policy, flo); 2111 } 2112 2113 if (IS_ERR(pol)) { 2114 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2115 return 0; 2116 } 2117 2118 if (!pol) { 2119 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { 2120 xfrm_secpath_reject(xerr_idx, skb, &fl); 2121 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 2122 return 0; 2123 } 2124 return 1; 2125 } 2126 2127 pol->curlft.use_time = get_seconds(); 2128 2129 pols[0] = pol; 2130 npols ++; 2131 #ifdef CONFIG_XFRM_SUB_POLICY 2132 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2133 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 2134 &fl, family, 2135 XFRM_POLICY_IN); 2136 if (pols[1]) { 2137 if (IS_ERR(pols[1])) { 2138 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2139 return 0; 2140 } 2141 pols[1]->curlft.use_time = get_seconds(); 2142 npols ++; 2143 } 2144 } 2145 #endif 2146 2147 if (pol->action == XFRM_POLICY_ALLOW) { 2148 struct sec_path *sp; 2149 static struct sec_path dummy; 2150 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 2151 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 2152 struct xfrm_tmpl **tpp = tp; 2153 int ti = 0; 2154 int i, k; 2155 2156 if ((sp = skb->sp) == NULL) 2157 sp = &dummy; 2158 2159 for (pi = 0; pi < npols; pi++) { 2160 if (pols[pi] != pol && 2161 pols[pi]->action != XFRM_POLICY_ALLOW) { 2162 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2163 goto reject; 2164 } 2165 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 2166 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 2167 goto reject_error; 2168 } 2169 for (i = 0; i < pols[pi]->xfrm_nr; i++) 2170 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 2171 } 2172 xfrm_nr = ti; 2173 if (npols > 1) { 2174 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 2175 tpp = stp; 2176 } 2177 2178 /* For each tunnel xfrm, find the first matching tmpl. 2179 * For each tmpl before that, find corresponding xfrm. 2180 * Order is _important_. Later we will implement 2181 * some barriers, but at the moment barriers 2182 * are implied between each two transformations. 2183 */ 2184 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 2185 k = xfrm_policy_ok(tpp[i], sp, k, family); 2186 if (k < 0) { 2187 if (k < -1) 2188 /* "-2 - errored_index" returned */ 2189 xerr_idx = -(2+k); 2190 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2191 goto reject; 2192 } 2193 } 2194 2195 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 2196 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2197 goto reject; 2198 } 2199 2200 xfrm_pols_put(pols, npols); 2201 return 1; 2202 } 2203 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2204 2205 reject: 2206 xfrm_secpath_reject(xerr_idx, skb, &fl); 2207 reject_error: 2208 xfrm_pols_put(pols, npols); 2209 return 0; 2210 } 2211 EXPORT_SYMBOL(__xfrm_policy_check); 2212 2213 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 2214 { 2215 struct net *net = dev_net(skb->dev); 2216 struct flowi fl; 2217 struct dst_entry *dst; 2218 int res = 1; 2219 2220 if (xfrm_decode_session(skb, &fl, family) < 0) { 2221 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 2222 return 0; 2223 } 2224 2225 skb_dst_force(skb); 2226 2227 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); 2228 if (IS_ERR(dst)) { 2229 res = 0; 2230 dst = NULL; 2231 } 2232 skb_dst_set(skb, dst); 2233 return res; 2234 } 2235 EXPORT_SYMBOL(__xfrm_route_forward); 2236 2237 /* Optimize later using cookies and generation ids. */ 2238 2239 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 2240 { 2241 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 2242 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to 2243 * get validated by dst_ops->check on every use. We do this 2244 * because when a normal route referenced by an XFRM dst is 2245 * obsoleted we do not go looking around for all parent 2246 * referencing XFRM dsts so that we can invalidate them. It 2247 * is just too much work. Instead we make the checks here on 2248 * every use. For example: 2249 * 2250 * XFRM dst A --> IPv4 dst X 2251 * 2252 * X is the "xdst->route" of A (X is also the "dst->path" of A 2253 * in this example). If X is marked obsolete, "A" will not 2254 * notice. That's what we are validating here via the 2255 * stale_bundle() check. 2256 * 2257 * When a policy's bundle is pruned, we dst_free() the XFRM 2258 * dst which causes it's ->obsolete field to be set to 2259 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like 2260 * this, we want to force a new route lookup. 2261 */ 2262 if (dst->obsolete < 0 && !stale_bundle(dst)) 2263 return dst; 2264 2265 return NULL; 2266 } 2267 2268 static int stale_bundle(struct dst_entry *dst) 2269 { 2270 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 2271 } 2272 2273 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2274 { 2275 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2276 dst->dev = dev_net(dev)->loopback_dev; 2277 dev_hold(dst->dev); 2278 dev_put(dev); 2279 } 2280 } 2281 EXPORT_SYMBOL(xfrm_dst_ifdown); 2282 2283 static void xfrm_link_failure(struct sk_buff *skb) 2284 { 2285 /* Impossible. Such dst must be popped before reaches point of failure. */ 2286 } 2287 2288 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 2289 { 2290 if (dst) { 2291 if (dst->obsolete) { 2292 dst_release(dst); 2293 dst = NULL; 2294 } 2295 } 2296 return dst; 2297 } 2298 2299 static void __xfrm_garbage_collect(struct net *net) 2300 { 2301 struct dst_entry *head, *next; 2302 2303 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 2304 head = xfrm_policy_sk_bundles; 2305 xfrm_policy_sk_bundles = NULL; 2306 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 2307 2308 while (head) { 2309 next = head->next; 2310 dst_free(head); 2311 head = next; 2312 } 2313 } 2314 2315 static void xfrm_garbage_collect(struct net *net) 2316 { 2317 flow_cache_flush(); 2318 __xfrm_garbage_collect(net); 2319 } 2320 2321 static void xfrm_garbage_collect_deferred(struct net *net) 2322 { 2323 flow_cache_flush_deferred(); 2324 __xfrm_garbage_collect(net); 2325 } 2326 2327 static void xfrm_init_pmtu(struct dst_entry *dst) 2328 { 2329 do { 2330 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2331 u32 pmtu, route_mtu_cached; 2332 2333 pmtu = dst_mtu(dst->child); 2334 xdst->child_mtu_cached = pmtu; 2335 2336 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 2337 2338 route_mtu_cached = dst_mtu(xdst->route); 2339 xdst->route_mtu_cached = route_mtu_cached; 2340 2341 if (pmtu > route_mtu_cached) 2342 pmtu = route_mtu_cached; 2343 2344 dst_metric_set(dst, RTAX_MTU, pmtu); 2345 } while ((dst = dst->next)); 2346 } 2347 2348 /* Check that the bundle accepts the flow and its components are 2349 * still valid. 2350 */ 2351 2352 static int xfrm_bundle_ok(struct xfrm_dst *first) 2353 { 2354 struct dst_entry *dst = &first->u.dst; 2355 struct xfrm_dst *last; 2356 u32 mtu; 2357 2358 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 2359 (dst->dev && !netif_running(dst->dev))) 2360 return 0; 2361 2362 last = NULL; 2363 2364 do { 2365 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2366 2367 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2368 return 0; 2369 if (xdst->xfrm_genid != dst->xfrm->genid) 2370 return 0; 2371 if (xdst->num_pols > 0 && 2372 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 2373 return 0; 2374 2375 mtu = dst_mtu(dst->child); 2376 if (xdst->child_mtu_cached != mtu) { 2377 last = xdst; 2378 xdst->child_mtu_cached = mtu; 2379 } 2380 2381 if (!dst_check(xdst->route, xdst->route_cookie)) 2382 return 0; 2383 mtu = dst_mtu(xdst->route); 2384 if (xdst->route_mtu_cached != mtu) { 2385 last = xdst; 2386 xdst->route_mtu_cached = mtu; 2387 } 2388 2389 dst = dst->child; 2390 } while (dst->xfrm); 2391 2392 if (likely(!last)) 2393 return 1; 2394 2395 mtu = last->child_mtu_cached; 2396 for (;;) { 2397 dst = &last->u.dst; 2398 2399 mtu = xfrm_state_mtu(dst->xfrm, mtu); 2400 if (mtu > last->route_mtu_cached) 2401 mtu = last->route_mtu_cached; 2402 dst_metric_set(dst, RTAX_MTU, mtu); 2403 2404 if (last == first) 2405 break; 2406 2407 last = (struct xfrm_dst *)last->u.dst.next; 2408 last->child_mtu_cached = mtu; 2409 } 2410 2411 return 1; 2412 } 2413 2414 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 2415 { 2416 return dst_metric_advmss(dst->path); 2417 } 2418 2419 static unsigned int xfrm_mtu(const struct dst_entry *dst) 2420 { 2421 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 2422 2423 return mtu ? : dst_mtu(dst->path); 2424 } 2425 2426 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, 2427 struct sk_buff *skb, 2428 const void *daddr) 2429 { 2430 return dst->path->ops->neigh_lookup(dst, skb, daddr); 2431 } 2432 2433 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2434 { 2435 struct net *net; 2436 int err = 0; 2437 if (unlikely(afinfo == NULL)) 2438 return -EINVAL; 2439 if (unlikely(afinfo->family >= NPROTO)) 2440 return -EAFNOSUPPORT; 2441 spin_lock(&xfrm_policy_afinfo_lock); 2442 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 2443 err = -ENOBUFS; 2444 else { 2445 struct dst_ops *dst_ops = afinfo->dst_ops; 2446 if (likely(dst_ops->kmem_cachep == NULL)) 2447 dst_ops->kmem_cachep = xfrm_dst_cache; 2448 if (likely(dst_ops->check == NULL)) 2449 dst_ops->check = xfrm_dst_check; 2450 if (likely(dst_ops->default_advmss == NULL)) 2451 dst_ops->default_advmss = xfrm_default_advmss; 2452 if (likely(dst_ops->mtu == NULL)) 2453 dst_ops->mtu = xfrm_mtu; 2454 if (likely(dst_ops->negative_advice == NULL)) 2455 dst_ops->negative_advice = xfrm_negative_advice; 2456 if (likely(dst_ops->link_failure == NULL)) 2457 dst_ops->link_failure = xfrm_link_failure; 2458 if (likely(dst_ops->neigh_lookup == NULL)) 2459 dst_ops->neigh_lookup = xfrm_neigh_lookup; 2460 if (likely(afinfo->garbage_collect == NULL)) 2461 afinfo->garbage_collect = xfrm_garbage_collect_deferred; 2462 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); 2463 } 2464 spin_unlock(&xfrm_policy_afinfo_lock); 2465 2466 rtnl_lock(); 2467 for_each_net(net) { 2468 struct dst_ops *xfrm_dst_ops; 2469 2470 switch (afinfo->family) { 2471 case AF_INET: 2472 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2473 break; 2474 #if IS_ENABLED(CONFIG_IPV6) 2475 case AF_INET6: 2476 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2477 break; 2478 #endif 2479 default: 2480 BUG(); 2481 } 2482 *xfrm_dst_ops = *afinfo->dst_ops; 2483 } 2484 rtnl_unlock(); 2485 2486 return err; 2487 } 2488 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2489 2490 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 2491 { 2492 int err = 0; 2493 if (unlikely(afinfo == NULL)) 2494 return -EINVAL; 2495 if (unlikely(afinfo->family >= NPROTO)) 2496 return -EAFNOSUPPORT; 2497 spin_lock(&xfrm_policy_afinfo_lock); 2498 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 2499 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 2500 err = -EINVAL; 2501 else 2502 RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family], 2503 NULL); 2504 } 2505 spin_unlock(&xfrm_policy_afinfo_lock); 2506 if (!err) { 2507 struct dst_ops *dst_ops = afinfo->dst_ops; 2508 2509 synchronize_rcu(); 2510 2511 dst_ops->kmem_cachep = NULL; 2512 dst_ops->check = NULL; 2513 dst_ops->negative_advice = NULL; 2514 dst_ops->link_failure = NULL; 2515 afinfo->garbage_collect = NULL; 2516 } 2517 return err; 2518 } 2519 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2520 2521 static void __net_init xfrm_dst_ops_init(struct net *net) 2522 { 2523 struct xfrm_policy_afinfo *afinfo; 2524 2525 rcu_read_lock(); 2526 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]); 2527 if (afinfo) 2528 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2529 #if IS_ENABLED(CONFIG_IPV6) 2530 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]); 2531 if (afinfo) 2532 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2533 #endif 2534 rcu_read_unlock(); 2535 } 2536 2537 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2538 { 2539 struct net_device *dev = ptr; 2540 2541 switch (event) { 2542 case NETDEV_DOWN: 2543 xfrm_garbage_collect(dev_net(dev)); 2544 } 2545 return NOTIFY_DONE; 2546 } 2547 2548 static struct notifier_block xfrm_dev_notifier = { 2549 .notifier_call = xfrm_dev_event, 2550 }; 2551 2552 #ifdef CONFIG_XFRM_STATISTICS 2553 static int __net_init xfrm_statistics_init(struct net *net) 2554 { 2555 int rv; 2556 2557 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2558 sizeof(struct linux_xfrm_mib), 2559 __alignof__(struct linux_xfrm_mib)) < 0) 2560 return -ENOMEM; 2561 rv = xfrm_proc_init(net); 2562 if (rv < 0) 2563 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2564 return rv; 2565 } 2566 2567 static void xfrm_statistics_fini(struct net *net) 2568 { 2569 xfrm_proc_fini(net); 2570 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2571 } 2572 #else 2573 static int __net_init xfrm_statistics_init(struct net *net) 2574 { 2575 return 0; 2576 } 2577 2578 static void xfrm_statistics_fini(struct net *net) 2579 { 2580 } 2581 #endif 2582 2583 static int __net_init xfrm_policy_init(struct net *net) 2584 { 2585 unsigned int hmask, sz; 2586 int dir; 2587 2588 if (net_eq(net, &init_net)) 2589 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2590 sizeof(struct xfrm_dst), 2591 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2592 NULL); 2593 2594 hmask = 8 - 1; 2595 sz = (hmask+1) * sizeof(struct hlist_head); 2596 2597 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 2598 if (!net->xfrm.policy_byidx) 2599 goto out_byidx; 2600 net->xfrm.policy_idx_hmask = hmask; 2601 2602 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2603 struct xfrm_policy_hash *htab; 2604 2605 net->xfrm.policy_count[dir] = 0; 2606 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 2607 2608 htab = &net->xfrm.policy_bydst[dir]; 2609 htab->table = xfrm_hash_alloc(sz); 2610 if (!htab->table) 2611 goto out_bydst; 2612 htab->hmask = hmask; 2613 } 2614 2615 INIT_LIST_HEAD(&net->xfrm.policy_all); 2616 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 2617 if (net_eq(net, &init_net)) 2618 register_netdevice_notifier(&xfrm_dev_notifier); 2619 return 0; 2620 2621 out_bydst: 2622 for (dir--; dir >= 0; dir--) { 2623 struct xfrm_policy_hash *htab; 2624 2625 htab = &net->xfrm.policy_bydst[dir]; 2626 xfrm_hash_free(htab->table, sz); 2627 } 2628 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2629 out_byidx: 2630 return -ENOMEM; 2631 } 2632 2633 static void xfrm_policy_fini(struct net *net) 2634 { 2635 struct xfrm_audit audit_info; 2636 unsigned int sz; 2637 int dir; 2638 2639 flush_work(&net->xfrm.policy_hash_work); 2640 #ifdef CONFIG_XFRM_SUB_POLICY 2641 audit_info.loginuid = INVALID_UID; 2642 audit_info.sessionid = -1; 2643 audit_info.secid = 0; 2644 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); 2645 #endif 2646 audit_info.loginuid = INVALID_UID; 2647 audit_info.sessionid = -1; 2648 audit_info.secid = 0; 2649 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2650 2651 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2652 2653 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2654 struct xfrm_policy_hash *htab; 2655 2656 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2657 2658 htab = &net->xfrm.policy_bydst[dir]; 2659 sz = (htab->hmask + 1) * sizeof(struct hlist_head); 2660 WARN_ON(!hlist_empty(htab->table)); 2661 xfrm_hash_free(htab->table, sz); 2662 } 2663 2664 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 2665 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 2666 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2667 } 2668 2669 static int __net_init xfrm_net_init(struct net *net) 2670 { 2671 int rv; 2672 2673 rv = xfrm_statistics_init(net); 2674 if (rv < 0) 2675 goto out_statistics; 2676 rv = xfrm_state_init(net); 2677 if (rv < 0) 2678 goto out_state; 2679 rv = xfrm_policy_init(net); 2680 if (rv < 0) 2681 goto out_policy; 2682 xfrm_dst_ops_init(net); 2683 rv = xfrm_sysctl_init(net); 2684 if (rv < 0) 2685 goto out_sysctl; 2686 return 0; 2687 2688 out_sysctl: 2689 xfrm_policy_fini(net); 2690 out_policy: 2691 xfrm_state_fini(net); 2692 out_state: 2693 xfrm_statistics_fini(net); 2694 out_statistics: 2695 return rv; 2696 } 2697 2698 static void __net_exit xfrm_net_exit(struct net *net) 2699 { 2700 xfrm_sysctl_fini(net); 2701 xfrm_policy_fini(net); 2702 xfrm_state_fini(net); 2703 xfrm_statistics_fini(net); 2704 } 2705 2706 static struct pernet_operations __net_initdata xfrm_net_ops = { 2707 .init = xfrm_net_init, 2708 .exit = xfrm_net_exit, 2709 }; 2710 2711 void __init xfrm_init(void) 2712 { 2713 register_pernet_subsys(&xfrm_net_ops); 2714 xfrm_input_init(); 2715 } 2716 2717 #ifdef CONFIG_AUDITSYSCALL 2718 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 2719 struct audit_buffer *audit_buf) 2720 { 2721 struct xfrm_sec_ctx *ctx = xp->security; 2722 struct xfrm_selector *sel = &xp->selector; 2723 2724 if (ctx) 2725 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 2726 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 2727 2728 switch(sel->family) { 2729 case AF_INET: 2730 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 2731 if (sel->prefixlen_s != 32) 2732 audit_log_format(audit_buf, " src_prefixlen=%d", 2733 sel->prefixlen_s); 2734 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 2735 if (sel->prefixlen_d != 32) 2736 audit_log_format(audit_buf, " dst_prefixlen=%d", 2737 sel->prefixlen_d); 2738 break; 2739 case AF_INET6: 2740 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 2741 if (sel->prefixlen_s != 128) 2742 audit_log_format(audit_buf, " src_prefixlen=%d", 2743 sel->prefixlen_s); 2744 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 2745 if (sel->prefixlen_d != 128) 2746 audit_log_format(audit_buf, " dst_prefixlen=%d", 2747 sel->prefixlen_d); 2748 break; 2749 } 2750 } 2751 2752 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2753 kuid_t auid, u32 sessionid, u32 secid) 2754 { 2755 struct audit_buffer *audit_buf; 2756 2757 audit_buf = xfrm_audit_start("SPD-add"); 2758 if (audit_buf == NULL) 2759 return; 2760 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2761 audit_log_format(audit_buf, " res=%u", result); 2762 xfrm_audit_common_policyinfo(xp, audit_buf); 2763 audit_log_end(audit_buf); 2764 } 2765 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2766 2767 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2768 kuid_t auid, u32 sessionid, u32 secid) 2769 { 2770 struct audit_buffer *audit_buf; 2771 2772 audit_buf = xfrm_audit_start("SPD-delete"); 2773 if (audit_buf == NULL) 2774 return; 2775 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2776 audit_log_format(audit_buf, " res=%u", result); 2777 xfrm_audit_common_policyinfo(xp, audit_buf); 2778 audit_log_end(audit_buf); 2779 } 2780 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 2781 #endif 2782 2783 #ifdef CONFIG_XFRM_MIGRATE 2784 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 2785 const struct xfrm_selector *sel_tgt) 2786 { 2787 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 2788 if (sel_tgt->family == sel_cmp->family && 2789 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, 2790 sel_cmp->family) == 0 && 2791 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, 2792 sel_cmp->family) == 0 && 2793 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 2794 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 2795 return true; 2796 } 2797 } else { 2798 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 2799 return true; 2800 } 2801 } 2802 return false; 2803 } 2804 2805 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, 2806 u8 dir, u8 type) 2807 { 2808 struct xfrm_policy *pol, *ret = NULL; 2809 struct hlist_node *entry; 2810 struct hlist_head *chain; 2811 u32 priority = ~0U; 2812 2813 read_lock_bh(&xfrm_policy_lock); 2814 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); 2815 hlist_for_each_entry(pol, entry, chain, bydst) { 2816 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2817 pol->type == type) { 2818 ret = pol; 2819 priority = ret->priority; 2820 break; 2821 } 2822 } 2823 chain = &init_net.xfrm.policy_inexact[dir]; 2824 hlist_for_each_entry(pol, entry, chain, bydst) { 2825 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2826 pol->type == type && 2827 pol->priority < priority) { 2828 ret = pol; 2829 break; 2830 } 2831 } 2832 2833 if (ret) 2834 xfrm_pol_hold(ret); 2835 2836 read_unlock_bh(&xfrm_policy_lock); 2837 2838 return ret; 2839 } 2840 2841 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 2842 { 2843 int match = 0; 2844 2845 if (t->mode == m->mode && t->id.proto == m->proto && 2846 (m->reqid == 0 || t->reqid == m->reqid)) { 2847 switch (t->mode) { 2848 case XFRM_MODE_TUNNEL: 2849 case XFRM_MODE_BEET: 2850 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr, 2851 m->old_family) == 0 && 2852 xfrm_addr_cmp(&t->saddr, &m->old_saddr, 2853 m->old_family) == 0) { 2854 match = 1; 2855 } 2856 break; 2857 case XFRM_MODE_TRANSPORT: 2858 /* in case of transport mode, template does not store 2859 any IP addresses, hence we just compare mode and 2860 protocol */ 2861 match = 1; 2862 break; 2863 default: 2864 break; 2865 } 2866 } 2867 return match; 2868 } 2869 2870 /* update endpoint address(es) of template(s) */ 2871 static int xfrm_policy_migrate(struct xfrm_policy *pol, 2872 struct xfrm_migrate *m, int num_migrate) 2873 { 2874 struct xfrm_migrate *mp; 2875 int i, j, n = 0; 2876 2877 write_lock_bh(&pol->lock); 2878 if (unlikely(pol->walk.dead)) { 2879 /* target policy has been deleted */ 2880 write_unlock_bh(&pol->lock); 2881 return -ENOENT; 2882 } 2883 2884 for (i = 0; i < pol->xfrm_nr; i++) { 2885 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 2886 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 2887 continue; 2888 n++; 2889 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 2890 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 2891 continue; 2892 /* update endpoints */ 2893 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 2894 sizeof(pol->xfrm_vec[i].id.daddr)); 2895 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 2896 sizeof(pol->xfrm_vec[i].saddr)); 2897 pol->xfrm_vec[i].encap_family = mp->new_family; 2898 /* flush bundles */ 2899 atomic_inc(&pol->genid); 2900 } 2901 } 2902 2903 write_unlock_bh(&pol->lock); 2904 2905 if (!n) 2906 return -ENODATA; 2907 2908 return 0; 2909 } 2910 2911 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 2912 { 2913 int i, j; 2914 2915 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 2916 return -EINVAL; 2917 2918 for (i = 0; i < num_migrate; i++) { 2919 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr, 2920 m[i].old_family) == 0) && 2921 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr, 2922 m[i].old_family) == 0)) 2923 return -EINVAL; 2924 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 2925 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 2926 return -EINVAL; 2927 2928 /* check if there is any duplicated entry */ 2929 for (j = i + 1; j < num_migrate; j++) { 2930 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 2931 sizeof(m[i].old_daddr)) && 2932 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 2933 sizeof(m[i].old_saddr)) && 2934 m[i].proto == m[j].proto && 2935 m[i].mode == m[j].mode && 2936 m[i].reqid == m[j].reqid && 2937 m[i].old_family == m[j].old_family) 2938 return -EINVAL; 2939 } 2940 } 2941 2942 return 0; 2943 } 2944 2945 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2946 struct xfrm_migrate *m, int num_migrate, 2947 struct xfrm_kmaddress *k) 2948 { 2949 int i, err, nx_cur = 0, nx_new = 0; 2950 struct xfrm_policy *pol = NULL; 2951 struct xfrm_state *x, *xc; 2952 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 2953 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 2954 struct xfrm_migrate *mp; 2955 2956 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 2957 goto out; 2958 2959 /* Stage 1 - find policy */ 2960 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) { 2961 err = -ENOENT; 2962 goto out; 2963 } 2964 2965 /* Stage 2 - find and update state(s) */ 2966 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 2967 if ((x = xfrm_migrate_state_find(mp))) { 2968 x_cur[nx_cur] = x; 2969 nx_cur++; 2970 if ((xc = xfrm_state_migrate(x, mp))) { 2971 x_new[nx_new] = xc; 2972 nx_new++; 2973 } else { 2974 err = -ENODATA; 2975 goto restore_state; 2976 } 2977 } 2978 } 2979 2980 /* Stage 3 - update policy */ 2981 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 2982 goto restore_state; 2983 2984 /* Stage 4 - delete old state(s) */ 2985 if (nx_cur) { 2986 xfrm_states_put(x_cur, nx_cur); 2987 xfrm_states_delete(x_cur, nx_cur); 2988 } 2989 2990 /* Stage 5 - announce */ 2991 km_migrate(sel, dir, type, m, num_migrate, k); 2992 2993 xfrm_pol_put(pol); 2994 2995 return 0; 2996 out: 2997 return err; 2998 2999 restore_state: 3000 if (pol) 3001 xfrm_pol_put(pol); 3002 if (nx_cur) 3003 xfrm_states_put(x_cur, nx_cur); 3004 if (nx_new) 3005 xfrm_states_delete(x_new, nx_new); 3006 3007 return err; 3008 } 3009 EXPORT_SYMBOL(xfrm_migrate); 3010 #endif 3011