1 /* 2 * xfrm_state.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * YOSHIFUJI Hideaki @USAGI 10 * Split up af-specific functions 11 * Derek Atkins <derek@ihtfp.com> 12 * Add UDP Encapsulation 13 * 14 */ 15 16 #include <linux/workqueue.h> 17 #include <net/xfrm.h> 18 #include <linux/pfkeyv2.h> 19 #include <linux/ipsec.h> 20 #include <linux/module.h> 21 #include <linux/cache.h> 22 #include <linux/audit.h> 23 #include <asm/uaccess.h> 24 #include <linux/ktime.h> 25 #include <linux/slab.h> 26 #include <linux/interrupt.h> 27 #include <linux/kernel.h> 28 29 #include "xfrm_hash.h" 30 31 /* Each xfrm_state may be linked to two tables: 32 33 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl) 34 2. Hash table by (daddr,family,reqid) to find what SAs exist for given 35 destination/tunnel endpoint. (output) 36 */ 37 38 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 39 40 static inline unsigned int xfrm_dst_hash(struct net *net, 41 const xfrm_address_t *daddr, 42 const xfrm_address_t *saddr, 43 u32 reqid, 44 unsigned short family) 45 { 46 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask); 47 } 48 49 static inline unsigned int xfrm_src_hash(struct net *net, 50 const xfrm_address_t *daddr, 51 const xfrm_address_t *saddr, 52 unsigned short family) 53 { 54 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask); 55 } 56 57 static inline unsigned int 58 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr, 59 __be32 spi, u8 proto, unsigned short family) 60 { 61 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask); 62 } 63 64 static void xfrm_hash_transfer(struct hlist_head *list, 65 struct hlist_head *ndsttable, 66 struct hlist_head *nsrctable, 67 struct hlist_head *nspitable, 68 unsigned int nhashmask) 69 { 70 struct hlist_node *tmp; 71 struct xfrm_state *x; 72 73 hlist_for_each_entry_safe(x, tmp, list, bydst) { 74 unsigned int h; 75 76 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 77 x->props.reqid, x->props.family, 78 nhashmask); 79 hlist_add_head(&x->bydst, ndsttable+h); 80 81 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr, 82 x->props.family, 83 nhashmask); 84 hlist_add_head(&x->bysrc, nsrctable+h); 85 86 if (x->id.spi) { 87 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, 88 x->id.proto, x->props.family, 89 nhashmask); 90 hlist_add_head(&x->byspi, nspitable+h); 91 } 92 } 93 } 94 95 static unsigned long xfrm_hash_new_size(unsigned int state_hmask) 96 { 97 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head); 98 } 99 100 static DEFINE_MUTEX(hash_resize_mutex); 101 102 static void xfrm_hash_resize(struct work_struct *work) 103 { 104 struct net *net = container_of(work, struct net, xfrm.state_hash_work); 105 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; 106 unsigned long nsize, osize; 107 unsigned int nhashmask, ohashmask; 108 int i; 109 110 mutex_lock(&hash_resize_mutex); 111 112 nsize = xfrm_hash_new_size(net->xfrm.state_hmask); 113 ndst = xfrm_hash_alloc(nsize); 114 if (!ndst) 115 goto out_unlock; 116 nsrc = xfrm_hash_alloc(nsize); 117 if (!nsrc) { 118 xfrm_hash_free(ndst, nsize); 119 goto out_unlock; 120 } 121 nspi = xfrm_hash_alloc(nsize); 122 if (!nspi) { 123 xfrm_hash_free(ndst, nsize); 124 xfrm_hash_free(nsrc, nsize); 125 goto out_unlock; 126 } 127 128 spin_lock_bh(&net->xfrm.xfrm_state_lock); 129 130 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; 131 for (i = net->xfrm.state_hmask; i >= 0; i--) 132 xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi, 133 nhashmask); 134 135 odst = net->xfrm.state_bydst; 136 osrc = net->xfrm.state_bysrc; 137 ospi = net->xfrm.state_byspi; 138 ohashmask = net->xfrm.state_hmask; 139 140 net->xfrm.state_bydst = ndst; 141 net->xfrm.state_bysrc = nsrc; 142 net->xfrm.state_byspi = nspi; 143 net->xfrm.state_hmask = nhashmask; 144 145 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 146 147 osize = (ohashmask + 1) * sizeof(struct hlist_head); 148 xfrm_hash_free(odst, osize); 149 xfrm_hash_free(osrc, osize); 150 xfrm_hash_free(ospi, osize); 151 152 out_unlock: 153 mutex_unlock(&hash_resize_mutex); 154 } 155 156 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock); 157 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO]; 158 159 static DEFINE_SPINLOCK(xfrm_state_gc_lock); 160 161 int __xfrm_state_delete(struct xfrm_state *x); 162 163 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 164 bool km_is_alive(const struct km_event *c); 165 void km_state_expired(struct xfrm_state *x, int hard, u32 portid); 166 167 static DEFINE_SPINLOCK(xfrm_type_lock); 168 int xfrm_register_type(const struct xfrm_type *type, unsigned short family) 169 { 170 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 171 const struct xfrm_type **typemap; 172 int err = 0; 173 174 if (unlikely(afinfo == NULL)) 175 return -EAFNOSUPPORT; 176 typemap = afinfo->type_map; 177 spin_lock_bh(&xfrm_type_lock); 178 179 if (likely(typemap[type->proto] == NULL)) 180 typemap[type->proto] = type; 181 else 182 err = -EEXIST; 183 spin_unlock_bh(&xfrm_type_lock); 184 xfrm_state_put_afinfo(afinfo); 185 return err; 186 } 187 EXPORT_SYMBOL(xfrm_register_type); 188 189 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family) 190 { 191 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 192 const struct xfrm_type **typemap; 193 int err = 0; 194 195 if (unlikely(afinfo == NULL)) 196 return -EAFNOSUPPORT; 197 typemap = afinfo->type_map; 198 spin_lock_bh(&xfrm_type_lock); 199 200 if (unlikely(typemap[type->proto] != type)) 201 err = -ENOENT; 202 else 203 typemap[type->proto] = NULL; 204 spin_unlock_bh(&xfrm_type_lock); 205 xfrm_state_put_afinfo(afinfo); 206 return err; 207 } 208 EXPORT_SYMBOL(xfrm_unregister_type); 209 210 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) 211 { 212 struct xfrm_state_afinfo *afinfo; 213 const struct xfrm_type **typemap; 214 const struct xfrm_type *type; 215 int modload_attempted = 0; 216 217 retry: 218 afinfo = xfrm_state_get_afinfo(family); 219 if (unlikely(afinfo == NULL)) 220 return NULL; 221 typemap = afinfo->type_map; 222 223 type = typemap[proto]; 224 if (unlikely(type && !try_module_get(type->owner))) 225 type = NULL; 226 if (!type && !modload_attempted) { 227 xfrm_state_put_afinfo(afinfo); 228 request_module("xfrm-type-%d-%d", family, proto); 229 modload_attempted = 1; 230 goto retry; 231 } 232 233 xfrm_state_put_afinfo(afinfo); 234 return type; 235 } 236 237 static void xfrm_put_type(const struct xfrm_type *type) 238 { 239 module_put(type->owner); 240 } 241 242 static DEFINE_SPINLOCK(xfrm_mode_lock); 243 int xfrm_register_mode(struct xfrm_mode *mode, int family) 244 { 245 struct xfrm_state_afinfo *afinfo; 246 struct xfrm_mode **modemap; 247 int err; 248 249 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 250 return -EINVAL; 251 252 afinfo = xfrm_state_get_afinfo(family); 253 if (unlikely(afinfo == NULL)) 254 return -EAFNOSUPPORT; 255 256 err = -EEXIST; 257 modemap = afinfo->mode_map; 258 spin_lock_bh(&xfrm_mode_lock); 259 if (modemap[mode->encap]) 260 goto out; 261 262 err = -ENOENT; 263 if (!try_module_get(afinfo->owner)) 264 goto out; 265 266 mode->afinfo = afinfo; 267 modemap[mode->encap] = mode; 268 err = 0; 269 270 out: 271 spin_unlock_bh(&xfrm_mode_lock); 272 xfrm_state_put_afinfo(afinfo); 273 return err; 274 } 275 EXPORT_SYMBOL(xfrm_register_mode); 276 277 int xfrm_unregister_mode(struct xfrm_mode *mode, int family) 278 { 279 struct xfrm_state_afinfo *afinfo; 280 struct xfrm_mode **modemap; 281 int err; 282 283 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 284 return -EINVAL; 285 286 afinfo = xfrm_state_get_afinfo(family); 287 if (unlikely(afinfo == NULL)) 288 return -EAFNOSUPPORT; 289 290 err = -ENOENT; 291 modemap = afinfo->mode_map; 292 spin_lock_bh(&xfrm_mode_lock); 293 if (likely(modemap[mode->encap] == mode)) { 294 modemap[mode->encap] = NULL; 295 module_put(mode->afinfo->owner); 296 err = 0; 297 } 298 299 spin_unlock_bh(&xfrm_mode_lock); 300 xfrm_state_put_afinfo(afinfo); 301 return err; 302 } 303 EXPORT_SYMBOL(xfrm_unregister_mode); 304 305 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) 306 { 307 struct xfrm_state_afinfo *afinfo; 308 struct xfrm_mode *mode; 309 int modload_attempted = 0; 310 311 if (unlikely(encap >= XFRM_MODE_MAX)) 312 return NULL; 313 314 retry: 315 afinfo = xfrm_state_get_afinfo(family); 316 if (unlikely(afinfo == NULL)) 317 return NULL; 318 319 mode = afinfo->mode_map[encap]; 320 if (unlikely(mode && !try_module_get(mode->owner))) 321 mode = NULL; 322 if (!mode && !modload_attempted) { 323 xfrm_state_put_afinfo(afinfo); 324 request_module("xfrm-mode-%d-%d", family, encap); 325 modload_attempted = 1; 326 goto retry; 327 } 328 329 xfrm_state_put_afinfo(afinfo); 330 return mode; 331 } 332 333 static void xfrm_put_mode(struct xfrm_mode *mode) 334 { 335 module_put(mode->owner); 336 } 337 338 static void xfrm_state_gc_destroy(struct xfrm_state *x) 339 { 340 tasklet_hrtimer_cancel(&x->mtimer); 341 del_timer_sync(&x->rtimer); 342 kfree(x->aalg); 343 kfree(x->ealg); 344 kfree(x->calg); 345 kfree(x->encap); 346 kfree(x->coaddr); 347 kfree(x->replay_esn); 348 kfree(x->preplay_esn); 349 if (x->inner_mode) 350 xfrm_put_mode(x->inner_mode); 351 if (x->inner_mode_iaf) 352 xfrm_put_mode(x->inner_mode_iaf); 353 if (x->outer_mode) 354 xfrm_put_mode(x->outer_mode); 355 if (x->type) { 356 x->type->destructor(x); 357 xfrm_put_type(x->type); 358 } 359 security_xfrm_state_free(x); 360 kfree(x); 361 } 362 363 static void xfrm_state_gc_task(struct work_struct *work) 364 { 365 struct net *net = container_of(work, struct net, xfrm.state_gc_work); 366 struct xfrm_state *x; 367 struct hlist_node *tmp; 368 struct hlist_head gc_list; 369 370 spin_lock_bh(&xfrm_state_gc_lock); 371 hlist_move_list(&net->xfrm.state_gc_list, &gc_list); 372 spin_unlock_bh(&xfrm_state_gc_lock); 373 374 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) 375 xfrm_state_gc_destroy(x); 376 } 377 378 static inline unsigned long make_jiffies(long secs) 379 { 380 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 381 return MAX_SCHEDULE_TIMEOUT-1; 382 else 383 return secs*HZ; 384 } 385 386 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) 387 { 388 struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer); 389 struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer); 390 unsigned long now = get_seconds(); 391 long next = LONG_MAX; 392 int warn = 0; 393 int err = 0; 394 395 spin_lock(&x->lock); 396 if (x->km.state == XFRM_STATE_DEAD) 397 goto out; 398 if (x->km.state == XFRM_STATE_EXPIRED) 399 goto expired; 400 if (x->lft.hard_add_expires_seconds) { 401 long tmo = x->lft.hard_add_expires_seconds + 402 x->curlft.add_time - now; 403 if (tmo <= 0) { 404 if (x->xflags & XFRM_SOFT_EXPIRE) { 405 /* enter hard expire without soft expire first?! 406 * setting a new date could trigger this. 407 * workarbound: fix x->curflt.add_time by below: 408 */ 409 x->curlft.add_time = now - x->saved_tmo - 1; 410 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo; 411 } else 412 goto expired; 413 } 414 if (tmo < next) 415 next = tmo; 416 } 417 if (x->lft.hard_use_expires_seconds) { 418 long tmo = x->lft.hard_use_expires_seconds + 419 (x->curlft.use_time ? : now) - now; 420 if (tmo <= 0) 421 goto expired; 422 if (tmo < next) 423 next = tmo; 424 } 425 if (x->km.dying) 426 goto resched; 427 if (x->lft.soft_add_expires_seconds) { 428 long tmo = x->lft.soft_add_expires_seconds + 429 x->curlft.add_time - now; 430 if (tmo <= 0) { 431 warn = 1; 432 x->xflags &= ~XFRM_SOFT_EXPIRE; 433 } else if (tmo < next) { 434 next = tmo; 435 x->xflags |= XFRM_SOFT_EXPIRE; 436 x->saved_tmo = tmo; 437 } 438 } 439 if (x->lft.soft_use_expires_seconds) { 440 long tmo = x->lft.soft_use_expires_seconds + 441 (x->curlft.use_time ? : now) - now; 442 if (tmo <= 0) 443 warn = 1; 444 else if (tmo < next) 445 next = tmo; 446 } 447 448 x->km.dying = warn; 449 if (warn) 450 km_state_expired(x, 0, 0); 451 resched: 452 if (next != LONG_MAX) { 453 tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL); 454 } 455 456 goto out; 457 458 expired: 459 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) 460 x->km.state = XFRM_STATE_EXPIRED; 461 462 err = __xfrm_state_delete(x); 463 if (!err) 464 km_state_expired(x, 1, 0); 465 466 xfrm_audit_state_delete(x, err ? 0 : 1, true); 467 468 out: 469 spin_unlock(&x->lock); 470 return HRTIMER_NORESTART; 471 } 472 473 static void xfrm_replay_timer_handler(unsigned long data); 474 475 struct xfrm_state *xfrm_state_alloc(struct net *net) 476 { 477 struct xfrm_state *x; 478 479 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC); 480 481 if (x) { 482 write_pnet(&x->xs_net, net); 483 atomic_set(&x->refcnt, 1); 484 atomic_set(&x->tunnel_users, 0); 485 INIT_LIST_HEAD(&x->km.all); 486 INIT_HLIST_NODE(&x->bydst); 487 INIT_HLIST_NODE(&x->bysrc); 488 INIT_HLIST_NODE(&x->byspi); 489 tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, 490 CLOCK_BOOTTIME, HRTIMER_MODE_ABS); 491 setup_timer(&x->rtimer, xfrm_replay_timer_handler, 492 (unsigned long)x); 493 x->curlft.add_time = get_seconds(); 494 x->lft.soft_byte_limit = XFRM_INF; 495 x->lft.soft_packet_limit = XFRM_INF; 496 x->lft.hard_byte_limit = XFRM_INF; 497 x->lft.hard_packet_limit = XFRM_INF; 498 x->replay_maxage = 0; 499 x->replay_maxdiff = 0; 500 x->inner_mode = NULL; 501 x->inner_mode_iaf = NULL; 502 spin_lock_init(&x->lock); 503 } 504 return x; 505 } 506 EXPORT_SYMBOL(xfrm_state_alloc); 507 508 void __xfrm_state_destroy(struct xfrm_state *x) 509 { 510 struct net *net = xs_net(x); 511 512 WARN_ON(x->km.state != XFRM_STATE_DEAD); 513 514 spin_lock_bh(&xfrm_state_gc_lock); 515 hlist_add_head(&x->gclist, &net->xfrm.state_gc_list); 516 spin_unlock_bh(&xfrm_state_gc_lock); 517 schedule_work(&net->xfrm.state_gc_work); 518 } 519 EXPORT_SYMBOL(__xfrm_state_destroy); 520 521 int __xfrm_state_delete(struct xfrm_state *x) 522 { 523 struct net *net = xs_net(x); 524 int err = -ESRCH; 525 526 if (x->km.state != XFRM_STATE_DEAD) { 527 x->km.state = XFRM_STATE_DEAD; 528 spin_lock(&net->xfrm.xfrm_state_lock); 529 list_del(&x->km.all); 530 hlist_del(&x->bydst); 531 hlist_del(&x->bysrc); 532 if (x->id.spi) 533 hlist_del(&x->byspi); 534 net->xfrm.state_num--; 535 spin_unlock(&net->xfrm.xfrm_state_lock); 536 537 /* All xfrm_state objects are created by xfrm_state_alloc. 538 * The xfrm_state_alloc call gives a reference, and that 539 * is what we are dropping here. 540 */ 541 xfrm_state_put(x); 542 err = 0; 543 } 544 545 return err; 546 } 547 EXPORT_SYMBOL(__xfrm_state_delete); 548 549 int xfrm_state_delete(struct xfrm_state *x) 550 { 551 int err; 552 553 spin_lock_bh(&x->lock); 554 err = __xfrm_state_delete(x); 555 spin_unlock_bh(&x->lock); 556 557 return err; 558 } 559 EXPORT_SYMBOL(xfrm_state_delete); 560 561 #ifdef CONFIG_SECURITY_NETWORK_XFRM 562 static inline int 563 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) 564 { 565 int i, err = 0; 566 567 for (i = 0; i <= net->xfrm.state_hmask; i++) { 568 struct xfrm_state *x; 569 570 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 571 if (xfrm_id_proto_match(x->id.proto, proto) && 572 (err = security_xfrm_state_delete(x)) != 0) { 573 xfrm_audit_state_delete(x, 0, task_valid); 574 return err; 575 } 576 } 577 } 578 579 return err; 580 } 581 #else 582 static inline int 583 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) 584 { 585 return 0; 586 } 587 #endif 588 589 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) 590 { 591 int i, err = 0, cnt = 0; 592 593 spin_lock_bh(&net->xfrm.xfrm_state_lock); 594 err = xfrm_state_flush_secctx_check(net, proto, task_valid); 595 if (err) 596 goto out; 597 598 err = -ESRCH; 599 for (i = 0; i <= net->xfrm.state_hmask; i++) { 600 struct xfrm_state *x; 601 restart: 602 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 603 if (!xfrm_state_kern(x) && 604 xfrm_id_proto_match(x->id.proto, proto)) { 605 xfrm_state_hold(x); 606 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 607 608 err = xfrm_state_delete(x); 609 xfrm_audit_state_delete(x, err ? 0 : 1, 610 task_valid); 611 xfrm_state_put(x); 612 if (!err) 613 cnt++; 614 615 spin_lock_bh(&net->xfrm.xfrm_state_lock); 616 goto restart; 617 } 618 } 619 } 620 if (cnt) 621 err = 0; 622 623 out: 624 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 625 return err; 626 } 627 EXPORT_SYMBOL(xfrm_state_flush); 628 629 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) 630 { 631 spin_lock_bh(&net->xfrm.xfrm_state_lock); 632 si->sadcnt = net->xfrm.state_num; 633 si->sadhcnt = net->xfrm.state_hmask; 634 si->sadhmcnt = xfrm_state_hashmax; 635 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 636 } 637 EXPORT_SYMBOL(xfrm_sad_getinfo); 638 639 static int 640 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, 641 const struct xfrm_tmpl *tmpl, 642 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 643 unsigned short family) 644 { 645 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 646 if (!afinfo) 647 return -1; 648 afinfo->init_tempsel(&x->sel, fl); 649 650 if (family != tmpl->encap_family) { 651 xfrm_state_put_afinfo(afinfo); 652 afinfo = xfrm_state_get_afinfo(tmpl->encap_family); 653 if (!afinfo) 654 return -1; 655 } 656 afinfo->init_temprop(x, tmpl, daddr, saddr); 657 xfrm_state_put_afinfo(afinfo); 658 return 0; 659 } 660 661 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, 662 const xfrm_address_t *daddr, 663 __be32 spi, u8 proto, 664 unsigned short family) 665 { 666 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); 667 struct xfrm_state *x; 668 669 hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) { 670 if (x->props.family != family || 671 x->id.spi != spi || 672 x->id.proto != proto || 673 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 674 continue; 675 676 if ((mark & x->mark.m) != x->mark.v) 677 continue; 678 xfrm_state_hold(x); 679 return x; 680 } 681 682 return NULL; 683 } 684 685 static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, 686 const xfrm_address_t *daddr, 687 const xfrm_address_t *saddr, 688 u8 proto, unsigned short family) 689 { 690 unsigned int h = xfrm_src_hash(net, daddr, saddr, family); 691 struct xfrm_state *x; 692 693 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { 694 if (x->props.family != family || 695 x->id.proto != proto || 696 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 697 !xfrm_addr_equal(&x->props.saddr, saddr, family)) 698 continue; 699 700 if ((mark & x->mark.m) != x->mark.v) 701 continue; 702 xfrm_state_hold(x); 703 return x; 704 } 705 706 return NULL; 707 } 708 709 static inline struct xfrm_state * 710 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family) 711 { 712 struct net *net = xs_net(x); 713 u32 mark = x->mark.v & x->mark.m; 714 715 if (use_spi) 716 return __xfrm_state_lookup(net, mark, &x->id.daddr, 717 x->id.spi, x->id.proto, family); 718 else 719 return __xfrm_state_lookup_byaddr(net, mark, 720 &x->id.daddr, 721 &x->props.saddr, 722 x->id.proto, family); 723 } 724 725 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision) 726 { 727 if (have_hash_collision && 728 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax && 729 net->xfrm.state_num > net->xfrm.state_hmask) 730 schedule_work(&net->xfrm.state_hash_work); 731 } 732 733 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, 734 const struct flowi *fl, unsigned short family, 735 struct xfrm_state **best, int *acq_in_progress, 736 int *error) 737 { 738 /* Resolution logic: 739 * 1. There is a valid state with matching selector. Done. 740 * 2. Valid state with inappropriate selector. Skip. 741 * 742 * Entering area of "sysdeps". 743 * 744 * 3. If state is not valid, selector is temporary, it selects 745 * only session which triggered previous resolution. Key 746 * manager will do something to install a state with proper 747 * selector. 748 */ 749 if (x->km.state == XFRM_STATE_VALID) { 750 if ((x->sel.family && 751 !xfrm_selector_match(&x->sel, fl, x->sel.family)) || 752 !security_xfrm_state_pol_flow_match(x, pol, fl)) 753 return; 754 755 if (!*best || 756 (*best)->km.dying > x->km.dying || 757 ((*best)->km.dying == x->km.dying && 758 (*best)->curlft.add_time < x->curlft.add_time)) 759 *best = x; 760 } else if (x->km.state == XFRM_STATE_ACQ) { 761 *acq_in_progress = 1; 762 } else if (x->km.state == XFRM_STATE_ERROR || 763 x->km.state == XFRM_STATE_EXPIRED) { 764 if (xfrm_selector_match(&x->sel, fl, x->sel.family) && 765 security_xfrm_state_pol_flow_match(x, pol, fl)) 766 *error = -ESRCH; 767 } 768 } 769 770 struct xfrm_state * 771 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, 772 const struct flowi *fl, struct xfrm_tmpl *tmpl, 773 struct xfrm_policy *pol, int *err, 774 unsigned short family) 775 { 776 static xfrm_address_t saddr_wildcard = { }; 777 struct net *net = xp_net(pol); 778 unsigned int h, h_wildcard; 779 struct xfrm_state *x, *x0, *to_put; 780 int acquire_in_progress = 0; 781 int error = 0; 782 struct xfrm_state *best = NULL; 783 u32 mark = pol->mark.v & pol->mark.m; 784 unsigned short encap_family = tmpl->encap_family; 785 struct km_event c; 786 787 to_put = NULL; 788 789 spin_lock_bh(&net->xfrm.xfrm_state_lock); 790 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); 791 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 792 if (x->props.family == encap_family && 793 x->props.reqid == tmpl->reqid && 794 (mark & x->mark.m) == x->mark.v && 795 !(x->props.flags & XFRM_STATE_WILDRECV) && 796 xfrm_state_addr_check(x, daddr, saddr, encap_family) && 797 tmpl->mode == x->props.mode && 798 tmpl->id.proto == x->id.proto && 799 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 800 xfrm_state_look_at(pol, x, fl, encap_family, 801 &best, &acquire_in_progress, &error); 802 } 803 if (best || acquire_in_progress) 804 goto found; 805 806 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); 807 hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) { 808 if (x->props.family == encap_family && 809 x->props.reqid == tmpl->reqid && 810 (mark & x->mark.m) == x->mark.v && 811 !(x->props.flags & XFRM_STATE_WILDRECV) && 812 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) && 813 tmpl->mode == x->props.mode && 814 tmpl->id.proto == x->id.proto && 815 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 816 xfrm_state_look_at(pol, x, fl, encap_family, 817 &best, &acquire_in_progress, &error); 818 } 819 820 found: 821 x = best; 822 if (!x && !error && !acquire_in_progress) { 823 if (tmpl->id.spi && 824 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, 825 tmpl->id.proto, encap_family)) != NULL) { 826 to_put = x0; 827 error = -EEXIST; 828 goto out; 829 } 830 831 c.net = net; 832 /* If the KMs have no listeners (yet...), avoid allocating an SA 833 * for each and every packet - garbage collection might not 834 * handle the flood. 835 */ 836 if (!km_is_alive(&c)) { 837 error = -ESRCH; 838 goto out; 839 } 840 841 x = xfrm_state_alloc(net); 842 if (x == NULL) { 843 error = -ENOMEM; 844 goto out; 845 } 846 /* Initialize temporary state matching only 847 * to current session. */ 848 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); 849 memcpy(&x->mark, &pol->mark, sizeof(x->mark)); 850 851 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid); 852 if (error) { 853 x->km.state = XFRM_STATE_DEAD; 854 to_put = x; 855 x = NULL; 856 goto out; 857 } 858 859 if (km_query(x, tmpl, pol) == 0) { 860 x->km.state = XFRM_STATE_ACQ; 861 list_add(&x->km.all, &net->xfrm.state_all); 862 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); 863 h = xfrm_src_hash(net, daddr, saddr, encap_family); 864 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); 865 if (x->id.spi) { 866 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); 867 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 868 } 869 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 870 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); 871 net->xfrm.state_num++; 872 xfrm_hash_grow_check(net, x->bydst.next != NULL); 873 } else { 874 x->km.state = XFRM_STATE_DEAD; 875 to_put = x; 876 x = NULL; 877 error = -ESRCH; 878 } 879 } 880 out: 881 if (x) 882 xfrm_state_hold(x); 883 else 884 *err = acquire_in_progress ? -EAGAIN : error; 885 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 886 if (to_put) 887 xfrm_state_put(to_put); 888 return x; 889 } 890 891 struct xfrm_state * 892 xfrm_stateonly_find(struct net *net, u32 mark, 893 xfrm_address_t *daddr, xfrm_address_t *saddr, 894 unsigned short family, u8 mode, u8 proto, u32 reqid) 895 { 896 unsigned int h; 897 struct xfrm_state *rx = NULL, *x = NULL; 898 899 spin_lock_bh(&net->xfrm.xfrm_state_lock); 900 h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 901 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 902 if (x->props.family == family && 903 x->props.reqid == reqid && 904 (mark & x->mark.m) == x->mark.v && 905 !(x->props.flags & XFRM_STATE_WILDRECV) && 906 xfrm_state_addr_check(x, daddr, saddr, family) && 907 mode == x->props.mode && 908 proto == x->id.proto && 909 x->km.state == XFRM_STATE_VALID) { 910 rx = x; 911 break; 912 } 913 } 914 915 if (rx) 916 xfrm_state_hold(rx); 917 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 918 919 920 return rx; 921 } 922 EXPORT_SYMBOL(xfrm_stateonly_find); 923 924 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, 925 unsigned short family) 926 { 927 struct xfrm_state *x; 928 struct xfrm_state_walk *w; 929 930 spin_lock_bh(&net->xfrm.xfrm_state_lock); 931 list_for_each_entry(w, &net->xfrm.state_all, all) { 932 x = container_of(w, struct xfrm_state, km); 933 if (x->props.family != family || 934 x->id.spi != spi) 935 continue; 936 937 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 938 xfrm_state_hold(x); 939 return x; 940 } 941 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 942 return NULL; 943 } 944 EXPORT_SYMBOL(xfrm_state_lookup_byspi); 945 946 static void __xfrm_state_insert(struct xfrm_state *x) 947 { 948 struct net *net = xs_net(x); 949 unsigned int h; 950 951 list_add(&x->km.all, &net->xfrm.state_all); 952 953 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, 954 x->props.reqid, x->props.family); 955 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); 956 957 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family); 958 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); 959 960 if (x->id.spi) { 961 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, 962 x->props.family); 963 964 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 965 } 966 967 tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); 968 if (x->replay_maxage) 969 mod_timer(&x->rtimer, jiffies + x->replay_maxage); 970 971 net->xfrm.state_num++; 972 973 xfrm_hash_grow_check(net, x->bydst.next != NULL); 974 } 975 976 /* net->xfrm.xfrm_state_lock is held */ 977 static void __xfrm_state_bump_genids(struct xfrm_state *xnew) 978 { 979 struct net *net = xs_net(xnew); 980 unsigned short family = xnew->props.family; 981 u32 reqid = xnew->props.reqid; 982 struct xfrm_state *x; 983 unsigned int h; 984 u32 mark = xnew->mark.v & xnew->mark.m; 985 986 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); 987 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 988 if (x->props.family == family && 989 x->props.reqid == reqid && 990 (mark & x->mark.m) == x->mark.v && 991 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) && 992 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family)) 993 x->genid++; 994 } 995 } 996 997 void xfrm_state_insert(struct xfrm_state *x) 998 { 999 struct net *net = xs_net(x); 1000 1001 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1002 __xfrm_state_bump_genids(x); 1003 __xfrm_state_insert(x); 1004 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1005 } 1006 EXPORT_SYMBOL(xfrm_state_insert); 1007 1008 /* net->xfrm.xfrm_state_lock is held */ 1009 static struct xfrm_state *__find_acq_core(struct net *net, 1010 const struct xfrm_mark *m, 1011 unsigned short family, u8 mode, 1012 u32 reqid, u8 proto, 1013 const xfrm_address_t *daddr, 1014 const xfrm_address_t *saddr, 1015 int create) 1016 { 1017 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 1018 struct xfrm_state *x; 1019 u32 mark = m->v & m->m; 1020 1021 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1022 if (x->props.reqid != reqid || 1023 x->props.mode != mode || 1024 x->props.family != family || 1025 x->km.state != XFRM_STATE_ACQ || 1026 x->id.spi != 0 || 1027 x->id.proto != proto || 1028 (mark & x->mark.m) != x->mark.v || 1029 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 1030 !xfrm_addr_equal(&x->props.saddr, saddr, family)) 1031 continue; 1032 1033 xfrm_state_hold(x); 1034 return x; 1035 } 1036 1037 if (!create) 1038 return NULL; 1039 1040 x = xfrm_state_alloc(net); 1041 if (likely(x)) { 1042 switch (family) { 1043 case AF_INET: 1044 x->sel.daddr.a4 = daddr->a4; 1045 x->sel.saddr.a4 = saddr->a4; 1046 x->sel.prefixlen_d = 32; 1047 x->sel.prefixlen_s = 32; 1048 x->props.saddr.a4 = saddr->a4; 1049 x->id.daddr.a4 = daddr->a4; 1050 break; 1051 1052 case AF_INET6: 1053 *(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr; 1054 *(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr; 1055 x->sel.prefixlen_d = 128; 1056 x->sel.prefixlen_s = 128; 1057 *(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr; 1058 *(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr; 1059 break; 1060 } 1061 1062 x->km.state = XFRM_STATE_ACQ; 1063 x->id.proto = proto; 1064 x->props.family = family; 1065 x->props.mode = mode; 1066 x->props.reqid = reqid; 1067 x->mark.v = m->v; 1068 x->mark.m = m->m; 1069 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1070 xfrm_state_hold(x); 1071 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); 1072 list_add(&x->km.all, &net->xfrm.state_all); 1073 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); 1074 h = xfrm_src_hash(net, daddr, saddr, family); 1075 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); 1076 1077 net->xfrm.state_num++; 1078 1079 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1080 } 1081 1082 return x; 1083 } 1084 1085 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); 1086 1087 int xfrm_state_add(struct xfrm_state *x) 1088 { 1089 struct net *net = xs_net(x); 1090 struct xfrm_state *x1, *to_put; 1091 int family; 1092 int err; 1093 u32 mark = x->mark.v & x->mark.m; 1094 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1095 1096 family = x->props.family; 1097 1098 to_put = NULL; 1099 1100 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1101 1102 x1 = __xfrm_state_locate(x, use_spi, family); 1103 if (x1) { 1104 to_put = x1; 1105 x1 = NULL; 1106 err = -EEXIST; 1107 goto out; 1108 } 1109 1110 if (use_spi && x->km.seq) { 1111 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq); 1112 if (x1 && ((x1->id.proto != x->id.proto) || 1113 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) { 1114 to_put = x1; 1115 x1 = NULL; 1116 } 1117 } 1118 1119 if (use_spi && !x1) 1120 x1 = __find_acq_core(net, &x->mark, family, x->props.mode, 1121 x->props.reqid, x->id.proto, 1122 &x->id.daddr, &x->props.saddr, 0); 1123 1124 __xfrm_state_bump_genids(x); 1125 __xfrm_state_insert(x); 1126 err = 0; 1127 1128 out: 1129 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1130 1131 if (x1) { 1132 xfrm_state_delete(x1); 1133 xfrm_state_put(x1); 1134 } 1135 1136 if (to_put) 1137 xfrm_state_put(to_put); 1138 1139 return err; 1140 } 1141 EXPORT_SYMBOL(xfrm_state_add); 1142 1143 #ifdef CONFIG_XFRM_MIGRATE 1144 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig) 1145 { 1146 struct net *net = xs_net(orig); 1147 struct xfrm_state *x = xfrm_state_alloc(net); 1148 if (!x) 1149 goto out; 1150 1151 memcpy(&x->id, &orig->id, sizeof(x->id)); 1152 memcpy(&x->sel, &orig->sel, sizeof(x->sel)); 1153 memcpy(&x->lft, &orig->lft, sizeof(x->lft)); 1154 x->props.mode = orig->props.mode; 1155 x->props.replay_window = orig->props.replay_window; 1156 x->props.reqid = orig->props.reqid; 1157 x->props.family = orig->props.family; 1158 x->props.saddr = orig->props.saddr; 1159 1160 if (orig->aalg) { 1161 x->aalg = xfrm_algo_auth_clone(orig->aalg); 1162 if (!x->aalg) 1163 goto error; 1164 } 1165 x->props.aalgo = orig->props.aalgo; 1166 1167 if (orig->aead) { 1168 x->aead = xfrm_algo_aead_clone(orig->aead); 1169 if (!x->aead) 1170 goto error; 1171 } 1172 if (orig->ealg) { 1173 x->ealg = xfrm_algo_clone(orig->ealg); 1174 if (!x->ealg) 1175 goto error; 1176 } 1177 x->props.ealgo = orig->props.ealgo; 1178 1179 if (orig->calg) { 1180 x->calg = xfrm_algo_clone(orig->calg); 1181 if (!x->calg) 1182 goto error; 1183 } 1184 x->props.calgo = orig->props.calgo; 1185 1186 if (orig->encap) { 1187 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL); 1188 if (!x->encap) 1189 goto error; 1190 } 1191 1192 if (orig->coaddr) { 1193 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr), 1194 GFP_KERNEL); 1195 if (!x->coaddr) 1196 goto error; 1197 } 1198 1199 if (orig->replay_esn) { 1200 if (xfrm_replay_clone(x, orig)) 1201 goto error; 1202 } 1203 1204 memcpy(&x->mark, &orig->mark, sizeof(x->mark)); 1205 1206 if (xfrm_init_state(x) < 0) 1207 goto error; 1208 1209 x->props.flags = orig->props.flags; 1210 x->props.extra_flags = orig->props.extra_flags; 1211 1212 x->tfcpad = orig->tfcpad; 1213 x->replay_maxdiff = orig->replay_maxdiff; 1214 x->replay_maxage = orig->replay_maxage; 1215 x->curlft.add_time = orig->curlft.add_time; 1216 x->km.state = orig->km.state; 1217 x->km.seq = orig->km.seq; 1218 1219 return x; 1220 1221 error: 1222 xfrm_state_put(x); 1223 out: 1224 return NULL; 1225 } 1226 1227 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net) 1228 { 1229 unsigned int h; 1230 struct xfrm_state *x = NULL; 1231 1232 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1233 1234 if (m->reqid) { 1235 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, 1236 m->reqid, m->old_family); 1237 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1238 if (x->props.mode != m->mode || 1239 x->id.proto != m->proto) 1240 continue; 1241 if (m->reqid && x->props.reqid != m->reqid) 1242 continue; 1243 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, 1244 m->old_family) || 1245 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, 1246 m->old_family)) 1247 continue; 1248 xfrm_state_hold(x); 1249 break; 1250 } 1251 } else { 1252 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, 1253 m->old_family); 1254 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { 1255 if (x->props.mode != m->mode || 1256 x->id.proto != m->proto) 1257 continue; 1258 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, 1259 m->old_family) || 1260 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, 1261 m->old_family)) 1262 continue; 1263 xfrm_state_hold(x); 1264 break; 1265 } 1266 } 1267 1268 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1269 1270 return x; 1271 } 1272 EXPORT_SYMBOL(xfrm_migrate_state_find); 1273 1274 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, 1275 struct xfrm_migrate *m) 1276 { 1277 struct xfrm_state *xc; 1278 1279 xc = xfrm_state_clone(x); 1280 if (!xc) 1281 return NULL; 1282 1283 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr)); 1284 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr)); 1285 1286 /* add state */ 1287 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) { 1288 /* a care is needed when the destination address of the 1289 state is to be updated as it is a part of triplet */ 1290 xfrm_state_insert(xc); 1291 } else { 1292 if (xfrm_state_add(xc) < 0) 1293 goto error; 1294 } 1295 1296 return xc; 1297 error: 1298 xfrm_state_put(xc); 1299 return NULL; 1300 } 1301 EXPORT_SYMBOL(xfrm_state_migrate); 1302 #endif 1303 1304 int xfrm_state_update(struct xfrm_state *x) 1305 { 1306 struct xfrm_state *x1, *to_put; 1307 int err; 1308 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1309 struct net *net = xs_net(x); 1310 1311 to_put = NULL; 1312 1313 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1314 x1 = __xfrm_state_locate(x, use_spi, x->props.family); 1315 1316 err = -ESRCH; 1317 if (!x1) 1318 goto out; 1319 1320 if (xfrm_state_kern(x1)) { 1321 to_put = x1; 1322 err = -EEXIST; 1323 goto out; 1324 } 1325 1326 if (x1->km.state == XFRM_STATE_ACQ) { 1327 __xfrm_state_insert(x); 1328 x = NULL; 1329 } 1330 err = 0; 1331 1332 out: 1333 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1334 1335 if (to_put) 1336 xfrm_state_put(to_put); 1337 1338 if (err) 1339 return err; 1340 1341 if (!x) { 1342 xfrm_state_delete(x1); 1343 xfrm_state_put(x1); 1344 return 0; 1345 } 1346 1347 err = -EINVAL; 1348 spin_lock_bh(&x1->lock); 1349 if (likely(x1->km.state == XFRM_STATE_VALID)) { 1350 if (x->encap && x1->encap) 1351 memcpy(x1->encap, x->encap, sizeof(*x1->encap)); 1352 if (x->coaddr && x1->coaddr) { 1353 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); 1354 } 1355 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel))) 1356 memcpy(&x1->sel, &x->sel, sizeof(x1->sel)); 1357 memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); 1358 x1->km.dying = 0; 1359 1360 tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); 1361 if (x1->curlft.use_time) 1362 xfrm_state_check_expire(x1); 1363 1364 err = 0; 1365 x->km.state = XFRM_STATE_DEAD; 1366 __xfrm_state_put(x); 1367 } 1368 spin_unlock_bh(&x1->lock); 1369 1370 xfrm_state_put(x1); 1371 1372 return err; 1373 } 1374 EXPORT_SYMBOL(xfrm_state_update); 1375 1376 int xfrm_state_check_expire(struct xfrm_state *x) 1377 { 1378 if (!x->curlft.use_time) 1379 x->curlft.use_time = get_seconds(); 1380 1381 if (x->curlft.bytes >= x->lft.hard_byte_limit || 1382 x->curlft.packets >= x->lft.hard_packet_limit) { 1383 x->km.state = XFRM_STATE_EXPIRED; 1384 tasklet_hrtimer_start(&x->mtimer, ktime_set(0, 0), HRTIMER_MODE_REL); 1385 return -EINVAL; 1386 } 1387 1388 if (!x->km.dying && 1389 (x->curlft.bytes >= x->lft.soft_byte_limit || 1390 x->curlft.packets >= x->lft.soft_packet_limit)) { 1391 x->km.dying = 1; 1392 km_state_expired(x, 0, 0); 1393 } 1394 return 0; 1395 } 1396 EXPORT_SYMBOL(xfrm_state_check_expire); 1397 1398 struct xfrm_state * 1399 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi, 1400 u8 proto, unsigned short family) 1401 { 1402 struct xfrm_state *x; 1403 1404 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1405 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family); 1406 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1407 return x; 1408 } 1409 EXPORT_SYMBOL(xfrm_state_lookup); 1410 1411 struct xfrm_state * 1412 xfrm_state_lookup_byaddr(struct net *net, u32 mark, 1413 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 1414 u8 proto, unsigned short family) 1415 { 1416 struct xfrm_state *x; 1417 1418 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1419 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family); 1420 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1421 return x; 1422 } 1423 EXPORT_SYMBOL(xfrm_state_lookup_byaddr); 1424 1425 struct xfrm_state * 1426 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid, 1427 u8 proto, const xfrm_address_t *daddr, 1428 const xfrm_address_t *saddr, int create, unsigned short family) 1429 { 1430 struct xfrm_state *x; 1431 1432 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1433 x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create); 1434 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1435 1436 return x; 1437 } 1438 EXPORT_SYMBOL(xfrm_find_acq); 1439 1440 #ifdef CONFIG_XFRM_SUB_POLICY 1441 int 1442 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, 1443 unsigned short family, struct net *net) 1444 { 1445 int err = 0; 1446 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 1447 if (!afinfo) 1448 return -EAFNOSUPPORT; 1449 1450 spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/ 1451 if (afinfo->tmpl_sort) 1452 err = afinfo->tmpl_sort(dst, src, n); 1453 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1454 xfrm_state_put_afinfo(afinfo); 1455 return err; 1456 } 1457 EXPORT_SYMBOL(xfrm_tmpl_sort); 1458 1459 int 1460 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, 1461 unsigned short family) 1462 { 1463 int err = 0; 1464 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 1465 struct net *net = xs_net(*src); 1466 1467 if (!afinfo) 1468 return -EAFNOSUPPORT; 1469 1470 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1471 if (afinfo->state_sort) 1472 err = afinfo->state_sort(dst, src, n); 1473 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1474 xfrm_state_put_afinfo(afinfo); 1475 return err; 1476 } 1477 EXPORT_SYMBOL(xfrm_state_sort); 1478 #endif 1479 1480 /* Silly enough, but I'm lazy to build resolution list */ 1481 1482 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq) 1483 { 1484 int i; 1485 1486 for (i = 0; i <= net->xfrm.state_hmask; i++) { 1487 struct xfrm_state *x; 1488 1489 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 1490 if (x->km.seq == seq && 1491 (mark & x->mark.m) == x->mark.v && 1492 x->km.state == XFRM_STATE_ACQ) { 1493 xfrm_state_hold(x); 1494 return x; 1495 } 1496 } 1497 } 1498 return NULL; 1499 } 1500 1501 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq) 1502 { 1503 struct xfrm_state *x; 1504 1505 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1506 x = __xfrm_find_acq_byseq(net, mark, seq); 1507 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1508 return x; 1509 } 1510 EXPORT_SYMBOL(xfrm_find_acq_byseq); 1511 1512 u32 xfrm_get_acqseq(void) 1513 { 1514 u32 res; 1515 static atomic_t acqseq; 1516 1517 do { 1518 res = atomic_inc_return(&acqseq); 1519 } while (!res); 1520 1521 return res; 1522 } 1523 EXPORT_SYMBOL(xfrm_get_acqseq); 1524 1525 int verify_spi_info(u8 proto, u32 min, u32 max) 1526 { 1527 switch (proto) { 1528 case IPPROTO_AH: 1529 case IPPROTO_ESP: 1530 break; 1531 1532 case IPPROTO_COMP: 1533 /* IPCOMP spi is 16-bits. */ 1534 if (max >= 0x10000) 1535 return -EINVAL; 1536 break; 1537 1538 default: 1539 return -EINVAL; 1540 } 1541 1542 if (min > max) 1543 return -EINVAL; 1544 1545 return 0; 1546 } 1547 EXPORT_SYMBOL(verify_spi_info); 1548 1549 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) 1550 { 1551 struct net *net = xs_net(x); 1552 unsigned int h; 1553 struct xfrm_state *x0; 1554 int err = -ENOENT; 1555 __be32 minspi = htonl(low); 1556 __be32 maxspi = htonl(high); 1557 u32 mark = x->mark.v & x->mark.m; 1558 1559 spin_lock_bh(&x->lock); 1560 if (x->km.state == XFRM_STATE_DEAD) 1561 goto unlock; 1562 1563 err = 0; 1564 if (x->id.spi) 1565 goto unlock; 1566 1567 err = -ENOENT; 1568 1569 if (minspi == maxspi) { 1570 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family); 1571 if (x0) { 1572 xfrm_state_put(x0); 1573 goto unlock; 1574 } 1575 x->id.spi = minspi; 1576 } else { 1577 u32 spi = 0; 1578 for (h = 0; h < high-low+1; h++) { 1579 spi = low + prandom_u32()%(high-low+1); 1580 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); 1581 if (x0 == NULL) { 1582 x->id.spi = htonl(spi); 1583 break; 1584 } 1585 xfrm_state_put(x0); 1586 } 1587 } 1588 if (x->id.spi) { 1589 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1590 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); 1591 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 1592 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1593 1594 err = 0; 1595 } 1596 1597 unlock: 1598 spin_unlock_bh(&x->lock); 1599 1600 return err; 1601 } 1602 EXPORT_SYMBOL(xfrm_alloc_spi); 1603 1604 static bool __xfrm_state_filter_match(struct xfrm_state *x, 1605 struct xfrm_address_filter *filter) 1606 { 1607 if (filter) { 1608 if ((filter->family == AF_INET || 1609 filter->family == AF_INET6) && 1610 x->props.family != filter->family) 1611 return false; 1612 1613 return addr_match(&x->props.saddr, &filter->saddr, 1614 filter->splen) && 1615 addr_match(&x->id.daddr, &filter->daddr, 1616 filter->dplen); 1617 } 1618 return true; 1619 } 1620 1621 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, 1622 int (*func)(struct xfrm_state *, int, void*), 1623 void *data) 1624 { 1625 struct xfrm_state *state; 1626 struct xfrm_state_walk *x; 1627 int err = 0; 1628 1629 if (walk->seq != 0 && list_empty(&walk->all)) 1630 return 0; 1631 1632 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1633 if (list_empty(&walk->all)) 1634 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all); 1635 else 1636 x = list_entry(&walk->all, struct xfrm_state_walk, all); 1637 list_for_each_entry_from(x, &net->xfrm.state_all, all) { 1638 if (x->state == XFRM_STATE_DEAD) 1639 continue; 1640 state = container_of(x, struct xfrm_state, km); 1641 if (!xfrm_id_proto_match(state->id.proto, walk->proto)) 1642 continue; 1643 if (!__xfrm_state_filter_match(state, walk->filter)) 1644 continue; 1645 err = func(state, walk->seq, data); 1646 if (err) { 1647 list_move_tail(&walk->all, &x->all); 1648 goto out; 1649 } 1650 walk->seq++; 1651 } 1652 if (walk->seq == 0) { 1653 err = -ENOENT; 1654 goto out; 1655 } 1656 list_del_init(&walk->all); 1657 out: 1658 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1659 return err; 1660 } 1661 EXPORT_SYMBOL(xfrm_state_walk); 1662 1663 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto, 1664 struct xfrm_address_filter *filter) 1665 { 1666 INIT_LIST_HEAD(&walk->all); 1667 walk->proto = proto; 1668 walk->state = XFRM_STATE_DEAD; 1669 walk->seq = 0; 1670 walk->filter = filter; 1671 } 1672 EXPORT_SYMBOL(xfrm_state_walk_init); 1673 1674 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net) 1675 { 1676 kfree(walk->filter); 1677 1678 if (list_empty(&walk->all)) 1679 return; 1680 1681 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1682 list_del(&walk->all); 1683 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1684 } 1685 EXPORT_SYMBOL(xfrm_state_walk_done); 1686 1687 static void xfrm_replay_timer_handler(unsigned long data) 1688 { 1689 struct xfrm_state *x = (struct xfrm_state *)data; 1690 1691 spin_lock(&x->lock); 1692 1693 if (x->km.state == XFRM_STATE_VALID) { 1694 if (xfrm_aevent_is_on(xs_net(x))) 1695 x->repl->notify(x, XFRM_REPLAY_TIMEOUT); 1696 else 1697 x->xflags |= XFRM_TIME_DEFER; 1698 } 1699 1700 spin_unlock(&x->lock); 1701 } 1702 1703 static LIST_HEAD(xfrm_km_list); 1704 1705 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 1706 { 1707 struct xfrm_mgr *km; 1708 1709 rcu_read_lock(); 1710 list_for_each_entry_rcu(km, &xfrm_km_list, list) 1711 if (km->notify_policy) 1712 km->notify_policy(xp, dir, c); 1713 rcu_read_unlock(); 1714 } 1715 1716 void km_state_notify(struct xfrm_state *x, const struct km_event *c) 1717 { 1718 struct xfrm_mgr *km; 1719 rcu_read_lock(); 1720 list_for_each_entry_rcu(km, &xfrm_km_list, list) 1721 if (km->notify) 1722 km->notify(x, c); 1723 rcu_read_unlock(); 1724 } 1725 1726 EXPORT_SYMBOL(km_policy_notify); 1727 EXPORT_SYMBOL(km_state_notify); 1728 1729 void km_state_expired(struct xfrm_state *x, int hard, u32 portid) 1730 { 1731 struct km_event c; 1732 1733 c.data.hard = hard; 1734 c.portid = portid; 1735 c.event = XFRM_MSG_EXPIRE; 1736 km_state_notify(x, &c); 1737 } 1738 1739 EXPORT_SYMBOL(km_state_expired); 1740 /* 1741 * We send to all registered managers regardless of failure 1742 * We are happy with one success 1743 */ 1744 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol) 1745 { 1746 int err = -EINVAL, acqret; 1747 struct xfrm_mgr *km; 1748 1749 rcu_read_lock(); 1750 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 1751 acqret = km->acquire(x, t, pol); 1752 if (!acqret) 1753 err = acqret; 1754 } 1755 rcu_read_unlock(); 1756 return err; 1757 } 1758 EXPORT_SYMBOL(km_query); 1759 1760 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) 1761 { 1762 int err = -EINVAL; 1763 struct xfrm_mgr *km; 1764 1765 rcu_read_lock(); 1766 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 1767 if (km->new_mapping) 1768 err = km->new_mapping(x, ipaddr, sport); 1769 if (!err) 1770 break; 1771 } 1772 rcu_read_unlock(); 1773 return err; 1774 } 1775 EXPORT_SYMBOL(km_new_mapping); 1776 1777 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid) 1778 { 1779 struct km_event c; 1780 1781 c.data.hard = hard; 1782 c.portid = portid; 1783 c.event = XFRM_MSG_POLEXPIRE; 1784 km_policy_notify(pol, dir, &c); 1785 } 1786 EXPORT_SYMBOL(km_policy_expired); 1787 1788 #ifdef CONFIG_XFRM_MIGRATE 1789 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 1790 const struct xfrm_migrate *m, int num_migrate, 1791 const struct xfrm_kmaddress *k) 1792 { 1793 int err = -EINVAL; 1794 int ret; 1795 struct xfrm_mgr *km; 1796 1797 rcu_read_lock(); 1798 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 1799 if (km->migrate) { 1800 ret = km->migrate(sel, dir, type, m, num_migrate, k); 1801 if (!ret) 1802 err = ret; 1803 } 1804 } 1805 rcu_read_unlock(); 1806 return err; 1807 } 1808 EXPORT_SYMBOL(km_migrate); 1809 #endif 1810 1811 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) 1812 { 1813 int err = -EINVAL; 1814 int ret; 1815 struct xfrm_mgr *km; 1816 1817 rcu_read_lock(); 1818 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 1819 if (km->report) { 1820 ret = km->report(net, proto, sel, addr); 1821 if (!ret) 1822 err = ret; 1823 } 1824 } 1825 rcu_read_unlock(); 1826 return err; 1827 } 1828 EXPORT_SYMBOL(km_report); 1829 1830 bool km_is_alive(const struct km_event *c) 1831 { 1832 struct xfrm_mgr *km; 1833 bool is_alive = false; 1834 1835 rcu_read_lock(); 1836 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 1837 if (km->is_alive && km->is_alive(c)) { 1838 is_alive = true; 1839 break; 1840 } 1841 } 1842 rcu_read_unlock(); 1843 1844 return is_alive; 1845 } 1846 EXPORT_SYMBOL(km_is_alive); 1847 1848 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) 1849 { 1850 int err; 1851 u8 *data; 1852 struct xfrm_mgr *km; 1853 struct xfrm_policy *pol = NULL; 1854 1855 if (optlen <= 0 || optlen > PAGE_SIZE) 1856 return -EMSGSIZE; 1857 1858 data = kmalloc(optlen, GFP_KERNEL); 1859 if (!data) 1860 return -ENOMEM; 1861 1862 err = -EFAULT; 1863 if (copy_from_user(data, optval, optlen)) 1864 goto out; 1865 1866 err = -EINVAL; 1867 rcu_read_lock(); 1868 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 1869 pol = km->compile_policy(sk, optname, data, 1870 optlen, &err); 1871 if (err >= 0) 1872 break; 1873 } 1874 rcu_read_unlock(); 1875 1876 if (err >= 0) { 1877 xfrm_sk_policy_insert(sk, err, pol); 1878 xfrm_pol_put(pol); 1879 err = 0; 1880 } 1881 1882 out: 1883 kfree(data); 1884 return err; 1885 } 1886 EXPORT_SYMBOL(xfrm_user_policy); 1887 1888 static DEFINE_SPINLOCK(xfrm_km_lock); 1889 1890 int xfrm_register_km(struct xfrm_mgr *km) 1891 { 1892 spin_lock_bh(&xfrm_km_lock); 1893 list_add_tail_rcu(&km->list, &xfrm_km_list); 1894 spin_unlock_bh(&xfrm_km_lock); 1895 return 0; 1896 } 1897 EXPORT_SYMBOL(xfrm_register_km); 1898 1899 int xfrm_unregister_km(struct xfrm_mgr *km) 1900 { 1901 spin_lock_bh(&xfrm_km_lock); 1902 list_del_rcu(&km->list); 1903 spin_unlock_bh(&xfrm_km_lock); 1904 synchronize_rcu(); 1905 return 0; 1906 } 1907 EXPORT_SYMBOL(xfrm_unregister_km); 1908 1909 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) 1910 { 1911 int err = 0; 1912 if (unlikely(afinfo == NULL)) 1913 return -EINVAL; 1914 if (unlikely(afinfo->family >= NPROTO)) 1915 return -EAFNOSUPPORT; 1916 spin_lock_bh(&xfrm_state_afinfo_lock); 1917 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) 1918 err = -ENOBUFS; 1919 else 1920 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo); 1921 spin_unlock_bh(&xfrm_state_afinfo_lock); 1922 return err; 1923 } 1924 EXPORT_SYMBOL(xfrm_state_register_afinfo); 1925 1926 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) 1927 { 1928 int err = 0; 1929 if (unlikely(afinfo == NULL)) 1930 return -EINVAL; 1931 if (unlikely(afinfo->family >= NPROTO)) 1932 return -EAFNOSUPPORT; 1933 spin_lock_bh(&xfrm_state_afinfo_lock); 1934 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { 1935 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo)) 1936 err = -EINVAL; 1937 else 1938 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL); 1939 } 1940 spin_unlock_bh(&xfrm_state_afinfo_lock); 1941 synchronize_rcu(); 1942 return err; 1943 } 1944 EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1945 1946 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 1947 { 1948 struct xfrm_state_afinfo *afinfo; 1949 if (unlikely(family >= NPROTO)) 1950 return NULL; 1951 rcu_read_lock(); 1952 afinfo = rcu_dereference(xfrm_state_afinfo[family]); 1953 if (unlikely(!afinfo)) 1954 rcu_read_unlock(); 1955 return afinfo; 1956 } 1957 1958 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1959 { 1960 rcu_read_unlock(); 1961 } 1962 1963 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ 1964 void xfrm_state_delete_tunnel(struct xfrm_state *x) 1965 { 1966 if (x->tunnel) { 1967 struct xfrm_state *t = x->tunnel; 1968 1969 if (atomic_read(&t->tunnel_users) == 2) 1970 xfrm_state_delete(t); 1971 atomic_dec(&t->tunnel_users); 1972 xfrm_state_put(t); 1973 x->tunnel = NULL; 1974 } 1975 } 1976 EXPORT_SYMBOL(xfrm_state_delete_tunnel); 1977 1978 int xfrm_state_mtu(struct xfrm_state *x, int mtu) 1979 { 1980 int res; 1981 1982 spin_lock_bh(&x->lock); 1983 if (x->km.state == XFRM_STATE_VALID && 1984 x->type && x->type->get_mtu) 1985 res = x->type->get_mtu(x, mtu); 1986 else 1987 res = mtu - x->props.header_len; 1988 spin_unlock_bh(&x->lock); 1989 return res; 1990 } 1991 1992 int __xfrm_init_state(struct xfrm_state *x, bool init_replay) 1993 { 1994 struct xfrm_state_afinfo *afinfo; 1995 struct xfrm_mode *inner_mode; 1996 int family = x->props.family; 1997 int err; 1998 1999 err = -EAFNOSUPPORT; 2000 afinfo = xfrm_state_get_afinfo(family); 2001 if (!afinfo) 2002 goto error; 2003 2004 err = 0; 2005 if (afinfo->init_flags) 2006 err = afinfo->init_flags(x); 2007 2008 xfrm_state_put_afinfo(afinfo); 2009 2010 if (err) 2011 goto error; 2012 2013 err = -EPROTONOSUPPORT; 2014 2015 if (x->sel.family != AF_UNSPEC) { 2016 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family); 2017 if (inner_mode == NULL) 2018 goto error; 2019 2020 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) && 2021 family != x->sel.family) { 2022 xfrm_put_mode(inner_mode); 2023 goto error; 2024 } 2025 2026 x->inner_mode = inner_mode; 2027 } else { 2028 struct xfrm_mode *inner_mode_iaf; 2029 int iafamily = AF_INET; 2030 2031 inner_mode = xfrm_get_mode(x->props.mode, x->props.family); 2032 if (inner_mode == NULL) 2033 goto error; 2034 2035 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) { 2036 xfrm_put_mode(inner_mode); 2037 goto error; 2038 } 2039 x->inner_mode = inner_mode; 2040 2041 if (x->props.family == AF_INET) 2042 iafamily = AF_INET6; 2043 2044 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily); 2045 if (inner_mode_iaf) { 2046 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL) 2047 x->inner_mode_iaf = inner_mode_iaf; 2048 else 2049 xfrm_put_mode(inner_mode_iaf); 2050 } 2051 } 2052 2053 x->type = xfrm_get_type(x->id.proto, family); 2054 if (x->type == NULL) 2055 goto error; 2056 2057 err = x->type->init_state(x); 2058 if (err) 2059 goto error; 2060 2061 x->outer_mode = xfrm_get_mode(x->props.mode, family); 2062 if (x->outer_mode == NULL) { 2063 err = -EPROTONOSUPPORT; 2064 goto error; 2065 } 2066 2067 if (init_replay) { 2068 err = xfrm_init_replay(x); 2069 if (err) 2070 goto error; 2071 } 2072 2073 x->km.state = XFRM_STATE_VALID; 2074 2075 error: 2076 return err; 2077 } 2078 2079 EXPORT_SYMBOL(__xfrm_init_state); 2080 2081 int xfrm_init_state(struct xfrm_state *x) 2082 { 2083 return __xfrm_init_state(x, true); 2084 } 2085 2086 EXPORT_SYMBOL(xfrm_init_state); 2087 2088 int __net_init xfrm_state_init(struct net *net) 2089 { 2090 unsigned int sz; 2091 2092 INIT_LIST_HEAD(&net->xfrm.state_all); 2093 2094 sz = sizeof(struct hlist_head) * 8; 2095 2096 net->xfrm.state_bydst = xfrm_hash_alloc(sz); 2097 if (!net->xfrm.state_bydst) 2098 goto out_bydst; 2099 net->xfrm.state_bysrc = xfrm_hash_alloc(sz); 2100 if (!net->xfrm.state_bysrc) 2101 goto out_bysrc; 2102 net->xfrm.state_byspi = xfrm_hash_alloc(sz); 2103 if (!net->xfrm.state_byspi) 2104 goto out_byspi; 2105 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1); 2106 2107 net->xfrm.state_num = 0; 2108 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); 2109 INIT_HLIST_HEAD(&net->xfrm.state_gc_list); 2110 INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task); 2111 spin_lock_init(&net->xfrm.xfrm_state_lock); 2112 return 0; 2113 2114 out_byspi: 2115 xfrm_hash_free(net->xfrm.state_bysrc, sz); 2116 out_bysrc: 2117 xfrm_hash_free(net->xfrm.state_bydst, sz); 2118 out_bydst: 2119 return -ENOMEM; 2120 } 2121 2122 void xfrm_state_fini(struct net *net) 2123 { 2124 unsigned int sz; 2125 2126 flush_work(&net->xfrm.state_hash_work); 2127 xfrm_state_flush(net, IPSEC_PROTO_ANY, false); 2128 flush_work(&net->xfrm.state_gc_work); 2129 2130 WARN_ON(!list_empty(&net->xfrm.state_all)); 2131 2132 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head); 2133 WARN_ON(!hlist_empty(net->xfrm.state_byspi)); 2134 xfrm_hash_free(net->xfrm.state_byspi, sz); 2135 WARN_ON(!hlist_empty(net->xfrm.state_bysrc)); 2136 xfrm_hash_free(net->xfrm.state_bysrc, sz); 2137 WARN_ON(!hlist_empty(net->xfrm.state_bydst)); 2138 xfrm_hash_free(net->xfrm.state_bydst, sz); 2139 } 2140 2141 #ifdef CONFIG_AUDITSYSCALL 2142 static void xfrm_audit_helper_sainfo(struct xfrm_state *x, 2143 struct audit_buffer *audit_buf) 2144 { 2145 struct xfrm_sec_ctx *ctx = x->security; 2146 u32 spi = ntohl(x->id.spi); 2147 2148 if (ctx) 2149 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 2150 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 2151 2152 switch (x->props.family) { 2153 case AF_INET: 2154 audit_log_format(audit_buf, " src=%pI4 dst=%pI4", 2155 &x->props.saddr.a4, &x->id.daddr.a4); 2156 break; 2157 case AF_INET6: 2158 audit_log_format(audit_buf, " src=%pI6 dst=%pI6", 2159 x->props.saddr.a6, x->id.daddr.a6); 2160 break; 2161 } 2162 2163 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 2164 } 2165 2166 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, 2167 struct audit_buffer *audit_buf) 2168 { 2169 const struct iphdr *iph4; 2170 const struct ipv6hdr *iph6; 2171 2172 switch (family) { 2173 case AF_INET: 2174 iph4 = ip_hdr(skb); 2175 audit_log_format(audit_buf, " src=%pI4 dst=%pI4", 2176 &iph4->saddr, &iph4->daddr); 2177 break; 2178 case AF_INET6: 2179 iph6 = ipv6_hdr(skb); 2180 audit_log_format(audit_buf, 2181 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x", 2182 &iph6->saddr, &iph6->daddr, 2183 iph6->flow_lbl[0] & 0x0f, 2184 iph6->flow_lbl[1], 2185 iph6->flow_lbl[2]); 2186 break; 2187 } 2188 } 2189 2190 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid) 2191 { 2192 struct audit_buffer *audit_buf; 2193 2194 audit_buf = xfrm_audit_start("SAD-add"); 2195 if (audit_buf == NULL) 2196 return; 2197 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 2198 xfrm_audit_helper_sainfo(x, audit_buf); 2199 audit_log_format(audit_buf, " res=%u", result); 2200 audit_log_end(audit_buf); 2201 } 2202 EXPORT_SYMBOL_GPL(xfrm_audit_state_add); 2203 2204 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid) 2205 { 2206 struct audit_buffer *audit_buf; 2207 2208 audit_buf = xfrm_audit_start("SAD-delete"); 2209 if (audit_buf == NULL) 2210 return; 2211 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 2212 xfrm_audit_helper_sainfo(x, audit_buf); 2213 audit_log_format(audit_buf, " res=%u", result); 2214 audit_log_end(audit_buf); 2215 } 2216 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete); 2217 2218 void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 2219 struct sk_buff *skb) 2220 { 2221 struct audit_buffer *audit_buf; 2222 u32 spi; 2223 2224 audit_buf = xfrm_audit_start("SA-replay-overflow"); 2225 if (audit_buf == NULL) 2226 return; 2227 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 2228 /* don't record the sequence number because it's inherent in this kind 2229 * of audit message */ 2230 spi = ntohl(x->id.spi); 2231 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 2232 audit_log_end(audit_buf); 2233 } 2234 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow); 2235 2236 void xfrm_audit_state_replay(struct xfrm_state *x, 2237 struct sk_buff *skb, __be32 net_seq) 2238 { 2239 struct audit_buffer *audit_buf; 2240 u32 spi; 2241 2242 audit_buf = xfrm_audit_start("SA-replayed-pkt"); 2243 if (audit_buf == NULL) 2244 return; 2245 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 2246 spi = ntohl(x->id.spi); 2247 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 2248 spi, spi, ntohl(net_seq)); 2249 audit_log_end(audit_buf); 2250 } 2251 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay); 2252 2253 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family) 2254 { 2255 struct audit_buffer *audit_buf; 2256 2257 audit_buf = xfrm_audit_start("SA-notfound"); 2258 if (audit_buf == NULL) 2259 return; 2260 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 2261 audit_log_end(audit_buf); 2262 } 2263 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple); 2264 2265 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, 2266 __be32 net_spi, __be32 net_seq) 2267 { 2268 struct audit_buffer *audit_buf; 2269 u32 spi; 2270 2271 audit_buf = xfrm_audit_start("SA-notfound"); 2272 if (audit_buf == NULL) 2273 return; 2274 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 2275 spi = ntohl(net_spi); 2276 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 2277 spi, spi, ntohl(net_seq)); 2278 audit_log_end(audit_buf); 2279 } 2280 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound); 2281 2282 void xfrm_audit_state_icvfail(struct xfrm_state *x, 2283 struct sk_buff *skb, u8 proto) 2284 { 2285 struct audit_buffer *audit_buf; 2286 __be32 net_spi; 2287 __be32 net_seq; 2288 2289 audit_buf = xfrm_audit_start("SA-icv-failure"); 2290 if (audit_buf == NULL) 2291 return; 2292 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 2293 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) { 2294 u32 spi = ntohl(net_spi); 2295 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 2296 spi, spi, ntohl(net_seq)); 2297 } 2298 audit_log_end(audit_buf); 2299 } 2300 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail); 2301 #endif /* CONFIG_AUDITSYSCALL */ 2302