1 /* xfrm_user.c: User interface to configure xfrm engine. 2 * 3 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * 11 */ 12 13 #include <linux/crypto.h> 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/slab.h> 18 #include <linux/socket.h> 19 #include <linux/string.h> 20 #include <linux/net.h> 21 #include <linux/skbuff.h> 22 #include <linux/rtnetlink.h> 23 #include <linux/pfkeyv2.h> 24 #include <linux/ipsec.h> 25 #include <linux/init.h> 26 #include <linux/security.h> 27 #include <net/sock.h> 28 #include <net/xfrm.h> 29 #include <net/netlink.h> 30 #include <asm/uaccess.h> 31 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 32 #include <linux/in6.h> 33 #endif 34 #include <linux/audit.h> 35 36 static inline int alg_len(struct xfrm_algo *alg) 37 { 38 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 39 } 40 41 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type) 42 { 43 struct rtattr *rt = xfrma[type - 1]; 44 struct xfrm_algo *algp; 45 int len; 46 47 if (!rt) 48 return 0; 49 50 len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp); 51 if (len < 0) 52 return -EINVAL; 53 54 algp = RTA_DATA(rt); 55 56 len -= (algp->alg_key_len + 7U) / 8; 57 if (len < 0) 58 return -EINVAL; 59 60 switch (type) { 61 case XFRMA_ALG_AUTH: 62 if (!algp->alg_key_len && 63 strcmp(algp->alg_name, "digest_null") != 0) 64 return -EINVAL; 65 break; 66 67 case XFRMA_ALG_CRYPT: 68 if (!algp->alg_key_len && 69 strcmp(algp->alg_name, "cipher_null") != 0) 70 return -EINVAL; 71 break; 72 73 case XFRMA_ALG_COMP: 74 /* Zero length keys are legal. */ 75 break; 76 77 default: 78 return -EINVAL; 79 } 80 81 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 82 return 0; 83 } 84 85 static int verify_encap_tmpl(struct rtattr **xfrma) 86 { 87 struct rtattr *rt = xfrma[XFRMA_ENCAP - 1]; 88 struct xfrm_encap_tmpl *encap; 89 90 if (!rt) 91 return 0; 92 93 if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap)) 94 return -EINVAL; 95 96 return 0; 97 } 98 99 static int verify_one_addr(struct rtattr **xfrma, enum xfrm_attr_type_t type, 100 xfrm_address_t **addrp) 101 { 102 struct rtattr *rt = xfrma[type - 1]; 103 104 if (!rt) 105 return 0; 106 107 if ((rt->rta_len - sizeof(*rt)) < sizeof(**addrp)) 108 return -EINVAL; 109 110 if (addrp) 111 *addrp = RTA_DATA(rt); 112 113 return 0; 114 } 115 116 static inline int verify_sec_ctx_len(struct rtattr **xfrma) 117 { 118 struct rtattr *rt = xfrma[XFRMA_SEC_CTX - 1]; 119 struct xfrm_user_sec_ctx *uctx; 120 int len = 0; 121 122 if (!rt) 123 return 0; 124 125 if (rt->rta_len < sizeof(*uctx)) 126 return -EINVAL; 127 128 uctx = RTA_DATA(rt); 129 130 len += sizeof(struct xfrm_user_sec_ctx); 131 len += uctx->ctx_len; 132 133 if (uctx->len != len) 134 return -EINVAL; 135 136 return 0; 137 } 138 139 140 static int verify_newsa_info(struct xfrm_usersa_info *p, 141 struct rtattr **xfrma) 142 { 143 int err; 144 145 err = -EINVAL; 146 switch (p->family) { 147 case AF_INET: 148 break; 149 150 case AF_INET6: 151 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 152 break; 153 #else 154 err = -EAFNOSUPPORT; 155 goto out; 156 #endif 157 158 default: 159 goto out; 160 } 161 162 err = -EINVAL; 163 switch (p->id.proto) { 164 case IPPROTO_AH: 165 if (!xfrma[XFRMA_ALG_AUTH-1] || 166 xfrma[XFRMA_ALG_CRYPT-1] || 167 xfrma[XFRMA_ALG_COMP-1]) 168 goto out; 169 break; 170 171 case IPPROTO_ESP: 172 if ((!xfrma[XFRMA_ALG_AUTH-1] && 173 !xfrma[XFRMA_ALG_CRYPT-1]) || 174 xfrma[XFRMA_ALG_COMP-1]) 175 goto out; 176 break; 177 178 case IPPROTO_COMP: 179 if (!xfrma[XFRMA_ALG_COMP-1] || 180 xfrma[XFRMA_ALG_AUTH-1] || 181 xfrma[XFRMA_ALG_CRYPT-1]) 182 goto out; 183 break; 184 185 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 186 case IPPROTO_DSTOPTS: 187 case IPPROTO_ROUTING: 188 if (xfrma[XFRMA_ALG_COMP-1] || 189 xfrma[XFRMA_ALG_AUTH-1] || 190 xfrma[XFRMA_ALG_CRYPT-1] || 191 xfrma[XFRMA_ENCAP-1] || 192 xfrma[XFRMA_SEC_CTX-1] || 193 !xfrma[XFRMA_COADDR-1]) 194 goto out; 195 break; 196 #endif 197 198 default: 199 goto out; 200 } 201 202 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH))) 203 goto out; 204 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT))) 205 goto out; 206 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP))) 207 goto out; 208 if ((err = verify_encap_tmpl(xfrma))) 209 goto out; 210 if ((err = verify_sec_ctx_len(xfrma))) 211 goto out; 212 if ((err = verify_one_addr(xfrma, XFRMA_COADDR, NULL))) 213 goto out; 214 215 err = -EINVAL; 216 switch (p->mode) { 217 case XFRM_MODE_TRANSPORT: 218 case XFRM_MODE_TUNNEL: 219 case XFRM_MODE_ROUTEOPTIMIZATION: 220 case XFRM_MODE_BEET: 221 break; 222 223 default: 224 goto out; 225 } 226 227 err = 0; 228 229 out: 230 return err; 231 } 232 233 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, 234 struct xfrm_algo_desc *(*get_byname)(char *, int), 235 struct rtattr *u_arg) 236 { 237 struct rtattr *rta = u_arg; 238 struct xfrm_algo *p, *ualg; 239 struct xfrm_algo_desc *algo; 240 241 if (!rta) 242 return 0; 243 244 ualg = RTA_DATA(rta); 245 246 algo = get_byname(ualg->alg_name, 1); 247 if (!algo) 248 return -ENOSYS; 249 *props = algo->desc.sadb_alg_id; 250 251 p = kmemdup(ualg, alg_len(ualg), GFP_KERNEL); 252 if (!p) 253 return -ENOMEM; 254 255 strcpy(p->alg_name, algo->name); 256 *algpp = p; 257 return 0; 258 } 259 260 static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg) 261 { 262 struct rtattr *rta = u_arg; 263 struct xfrm_encap_tmpl *p, *uencap; 264 265 if (!rta) 266 return 0; 267 268 uencap = RTA_DATA(rta); 269 p = kmemdup(uencap, sizeof(*p), GFP_KERNEL); 270 if (!p) 271 return -ENOMEM; 272 273 *encapp = p; 274 return 0; 275 } 276 277 278 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) 279 { 280 int len = 0; 281 282 if (xfrm_ctx) { 283 len += sizeof(struct xfrm_user_sec_ctx); 284 len += xfrm_ctx->ctx_len; 285 } 286 return len; 287 } 288 289 static int attach_sec_ctx(struct xfrm_state *x, struct rtattr *u_arg) 290 { 291 struct xfrm_user_sec_ctx *uctx; 292 293 if (!u_arg) 294 return 0; 295 296 uctx = RTA_DATA(u_arg); 297 return security_xfrm_state_alloc(x, uctx); 298 } 299 300 static int attach_one_addr(xfrm_address_t **addrpp, struct rtattr *u_arg) 301 { 302 struct rtattr *rta = u_arg; 303 xfrm_address_t *p, *uaddrp; 304 305 if (!rta) 306 return 0; 307 308 uaddrp = RTA_DATA(rta); 309 p = kmemdup(uaddrp, sizeof(*p), GFP_KERNEL); 310 if (!p) 311 return -ENOMEM; 312 313 *addrpp = p; 314 return 0; 315 } 316 317 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 318 { 319 memcpy(&x->id, &p->id, sizeof(x->id)); 320 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 321 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 322 x->props.mode = p->mode; 323 x->props.replay_window = p->replay_window; 324 x->props.reqid = p->reqid; 325 x->props.family = p->family; 326 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 327 x->props.flags = p->flags; 328 329 /* 330 * Set inner address family if the KM left it as zero. 331 * See comment in validate_tmpl. 332 */ 333 if (!x->sel.family) 334 x->sel.family = p->family; 335 } 336 337 /* 338 * someday when pfkey also has support, we could have the code 339 * somehow made shareable and move it to xfrm_state.c - JHS 340 * 341 */ 342 static int xfrm_update_ae_params(struct xfrm_state *x, struct rtattr **xfrma) 343 { 344 int err = - EINVAL; 345 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1]; 346 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1]; 347 struct rtattr *et = xfrma[XFRMA_ETIMER_THRESH-1]; 348 struct rtattr *rt = xfrma[XFRMA_REPLAY_THRESH-1]; 349 350 if (rp) { 351 struct xfrm_replay_state *replay; 352 if (RTA_PAYLOAD(rp) < sizeof(*replay)) 353 goto error; 354 replay = RTA_DATA(rp); 355 memcpy(&x->replay, replay, sizeof(*replay)); 356 memcpy(&x->preplay, replay, sizeof(*replay)); 357 } 358 359 if (lt) { 360 struct xfrm_lifetime_cur *ltime; 361 if (RTA_PAYLOAD(lt) < sizeof(*ltime)) 362 goto error; 363 ltime = RTA_DATA(lt); 364 x->curlft.bytes = ltime->bytes; 365 x->curlft.packets = ltime->packets; 366 x->curlft.add_time = ltime->add_time; 367 x->curlft.use_time = ltime->use_time; 368 } 369 370 if (et) { 371 if (RTA_PAYLOAD(et) < sizeof(u32)) 372 goto error; 373 x->replay_maxage = *(u32*)RTA_DATA(et); 374 } 375 376 if (rt) { 377 if (RTA_PAYLOAD(rt) < sizeof(u32)) 378 goto error; 379 x->replay_maxdiff = *(u32*)RTA_DATA(rt); 380 } 381 382 return 0; 383 error: 384 return err; 385 } 386 387 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p, 388 struct rtattr **xfrma, 389 int *errp) 390 { 391 struct xfrm_state *x = xfrm_state_alloc(); 392 int err = -ENOMEM; 393 394 if (!x) 395 goto error_no_put; 396 397 copy_from_user_state(x, p); 398 399 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo, 400 xfrm_aalg_get_byname, 401 xfrma[XFRMA_ALG_AUTH-1]))) 402 goto error; 403 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, 404 xfrm_ealg_get_byname, 405 xfrma[XFRMA_ALG_CRYPT-1]))) 406 goto error; 407 if ((err = attach_one_algo(&x->calg, &x->props.calgo, 408 xfrm_calg_get_byname, 409 xfrma[XFRMA_ALG_COMP-1]))) 410 goto error; 411 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1]))) 412 goto error; 413 if ((err = attach_one_addr(&x->coaddr, xfrma[XFRMA_COADDR-1]))) 414 goto error; 415 err = xfrm_init_state(x); 416 if (err) 417 goto error; 418 419 if ((err = attach_sec_ctx(x, xfrma[XFRMA_SEC_CTX-1]))) 420 goto error; 421 422 x->km.seq = p->seq; 423 x->replay_maxdiff = sysctl_xfrm_aevent_rseqth; 424 /* sysctl_xfrm_aevent_etime is in 100ms units */ 425 x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M; 426 x->preplay.bitmap = 0; 427 x->preplay.seq = x->replay.seq+x->replay_maxdiff; 428 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff; 429 430 /* override default values from above */ 431 432 err = xfrm_update_ae_params(x, (struct rtattr **)xfrma); 433 if (err < 0) 434 goto error; 435 436 return x; 437 438 error: 439 x->km.state = XFRM_STATE_DEAD; 440 xfrm_state_put(x); 441 error_no_put: 442 *errp = err; 443 return NULL; 444 } 445 446 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 447 struct rtattr **xfrma) 448 { 449 struct xfrm_usersa_info *p = nlmsg_data(nlh); 450 struct xfrm_state *x; 451 int err; 452 struct km_event c; 453 454 err = verify_newsa_info(p, xfrma); 455 if (err) 456 return err; 457 458 x = xfrm_state_construct(p, xfrma, &err); 459 if (!x) 460 return err; 461 462 xfrm_state_hold(x); 463 if (nlh->nlmsg_type == XFRM_MSG_NEWSA) 464 err = xfrm_state_add(x); 465 else 466 err = xfrm_state_update(x); 467 468 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 469 AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x); 470 471 if (err < 0) { 472 x->km.state = XFRM_STATE_DEAD; 473 __xfrm_state_put(x); 474 goto out; 475 } 476 477 c.seq = nlh->nlmsg_seq; 478 c.pid = nlh->nlmsg_pid; 479 c.event = nlh->nlmsg_type; 480 481 km_state_notify(x, &c); 482 out: 483 xfrm_state_put(x); 484 return err; 485 } 486 487 static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p, 488 struct rtattr **xfrma, 489 int *errp) 490 { 491 struct xfrm_state *x = NULL; 492 int err; 493 494 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 495 err = -ESRCH; 496 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family); 497 } else { 498 xfrm_address_t *saddr = NULL; 499 500 err = verify_one_addr(xfrma, XFRMA_SRCADDR, &saddr); 501 if (err) 502 goto out; 503 504 if (!saddr) { 505 err = -EINVAL; 506 goto out; 507 } 508 509 err = -ESRCH; 510 x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto, 511 p->family); 512 } 513 514 out: 515 if (!x && errp) 516 *errp = err; 517 return x; 518 } 519 520 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 521 struct rtattr **xfrma) 522 { 523 struct xfrm_state *x; 524 int err = -ESRCH; 525 struct km_event c; 526 struct xfrm_usersa_id *p = nlmsg_data(nlh); 527 528 x = xfrm_user_state_lookup(p, xfrma, &err); 529 if (x == NULL) 530 return err; 531 532 if ((err = security_xfrm_state_delete(x)) != 0) 533 goto out; 534 535 if (xfrm_state_kern(x)) { 536 err = -EPERM; 537 goto out; 538 } 539 540 err = xfrm_state_delete(x); 541 542 if (err < 0) 543 goto out; 544 545 c.seq = nlh->nlmsg_seq; 546 c.pid = nlh->nlmsg_pid; 547 c.event = nlh->nlmsg_type; 548 km_state_notify(x, &c); 549 550 out: 551 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 552 AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); 553 xfrm_state_put(x); 554 return err; 555 } 556 557 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 558 { 559 memcpy(&p->id, &x->id, sizeof(p->id)); 560 memcpy(&p->sel, &x->sel, sizeof(p->sel)); 561 memcpy(&p->lft, &x->lft, sizeof(p->lft)); 562 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); 563 memcpy(&p->stats, &x->stats, sizeof(p->stats)); 564 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); 565 p->mode = x->props.mode; 566 p->replay_window = x->props.replay_window; 567 p->reqid = x->props.reqid; 568 p->family = x->props.family; 569 p->flags = x->props.flags; 570 p->seq = x->km.seq; 571 } 572 573 struct xfrm_dump_info { 574 struct sk_buff *in_skb; 575 struct sk_buff *out_skb; 576 u32 nlmsg_seq; 577 u16 nlmsg_flags; 578 int start_idx; 579 int this_idx; 580 }; 581 582 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 583 { 584 int ctx_size = sizeof(struct xfrm_sec_ctx) + s->ctx_len; 585 struct xfrm_user_sec_ctx *uctx; 586 struct nlattr *attr; 587 588 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); 589 if (attr == NULL) 590 return -EMSGSIZE; 591 592 uctx = nla_data(attr); 593 uctx->exttype = XFRMA_SEC_CTX; 594 uctx->len = ctx_size; 595 uctx->ctx_doi = s->ctx_doi; 596 uctx->ctx_alg = s->ctx_alg; 597 uctx->ctx_len = s->ctx_len; 598 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 599 600 return 0; 601 } 602 603 static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 604 { 605 struct xfrm_dump_info *sp = ptr; 606 struct sk_buff *in_skb = sp->in_skb; 607 struct sk_buff *skb = sp->out_skb; 608 struct xfrm_usersa_info *p; 609 struct nlmsghdr *nlh; 610 611 if (sp->this_idx < sp->start_idx) 612 goto out; 613 614 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 615 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 616 if (nlh == NULL) 617 return -EMSGSIZE; 618 619 p = nlmsg_data(nlh); 620 copy_to_user_state(x, p); 621 622 if (x->aalg) 623 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg); 624 if (x->ealg) 625 NLA_PUT(skb, XFRMA_ALG_CRYPT, alg_len(x->ealg), x->ealg); 626 if (x->calg) 627 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); 628 629 if (x->encap) 630 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 631 632 if (x->security && copy_sec_ctx(x->security, skb) < 0) 633 goto nla_put_failure; 634 635 if (x->coaddr) 636 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 637 638 if (x->lastused) 639 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); 640 641 nlmsg_end(skb, nlh); 642 out: 643 sp->this_idx++; 644 return 0; 645 646 nla_put_failure: 647 nlmsg_cancel(skb, nlh); 648 return -EMSGSIZE; 649 } 650 651 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 652 { 653 struct xfrm_dump_info info; 654 655 info.in_skb = cb->skb; 656 info.out_skb = skb; 657 info.nlmsg_seq = cb->nlh->nlmsg_seq; 658 info.nlmsg_flags = NLM_F_MULTI; 659 info.this_idx = 0; 660 info.start_idx = cb->args[0]; 661 (void) xfrm_state_walk(0, dump_one_state, &info); 662 cb->args[0] = info.this_idx; 663 664 return skb->len; 665 } 666 667 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, 668 struct xfrm_state *x, u32 seq) 669 { 670 struct xfrm_dump_info info; 671 struct sk_buff *skb; 672 673 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 674 if (!skb) 675 return ERR_PTR(-ENOMEM); 676 677 info.in_skb = in_skb; 678 info.out_skb = skb; 679 info.nlmsg_seq = seq; 680 info.nlmsg_flags = 0; 681 info.this_idx = info.start_idx = 0; 682 683 if (dump_one_state(x, 0, &info)) { 684 kfree_skb(skb); 685 return NULL; 686 } 687 688 return skb; 689 } 690 691 static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 692 { 693 struct xfrmk_spdinfo si; 694 struct xfrmu_spdinfo spc; 695 struct xfrmu_spdhinfo sph; 696 struct nlmsghdr *nlh; 697 u32 *f; 698 699 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 700 if (nlh == NULL) /* shouldnt really happen ... */ 701 return -EMSGSIZE; 702 703 f = nlmsg_data(nlh); 704 *f = flags; 705 xfrm_spd_getinfo(&si); 706 spc.incnt = si.incnt; 707 spc.outcnt = si.outcnt; 708 spc.fwdcnt = si.fwdcnt; 709 spc.inscnt = si.inscnt; 710 spc.outscnt = si.outscnt; 711 spc.fwdscnt = si.fwdscnt; 712 sph.spdhcnt = si.spdhcnt; 713 sph.spdhmcnt = si.spdhmcnt; 714 715 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 716 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 717 718 return nlmsg_end(skb, nlh); 719 720 nla_put_failure: 721 nlmsg_cancel(skb, nlh); 722 return -EMSGSIZE; 723 } 724 725 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 726 struct rtattr **xfrma) 727 { 728 struct sk_buff *r_skb; 729 u32 *flags = nlmsg_data(nlh); 730 u32 spid = NETLINK_CB(skb).pid; 731 u32 seq = nlh->nlmsg_seq; 732 int len = NLMSG_LENGTH(sizeof(u32)); 733 734 len += RTA_SPACE(sizeof(struct xfrmu_spdinfo)); 735 len += RTA_SPACE(sizeof(struct xfrmu_spdhinfo)); 736 737 r_skb = alloc_skb(len, GFP_ATOMIC); 738 if (r_skb == NULL) 739 return -ENOMEM; 740 741 if (build_spdinfo(r_skb, spid, seq, *flags) < 0) 742 BUG(); 743 744 return nlmsg_unicast(xfrm_nl, r_skb, spid); 745 } 746 747 static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 748 { 749 struct xfrmk_sadinfo si; 750 struct xfrmu_sadhinfo sh; 751 struct nlmsghdr *nlh; 752 u32 *f; 753 754 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 755 if (nlh == NULL) /* shouldnt really happen ... */ 756 return -EMSGSIZE; 757 758 f = nlmsg_data(nlh); 759 *f = flags; 760 xfrm_sad_getinfo(&si); 761 762 sh.sadhmcnt = si.sadhmcnt; 763 sh.sadhcnt = si.sadhcnt; 764 765 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt); 766 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); 767 768 return nlmsg_end(skb, nlh); 769 770 nla_put_failure: 771 nlmsg_cancel(skb, nlh); 772 return -EMSGSIZE; 773 } 774 775 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 776 struct rtattr **xfrma) 777 { 778 struct sk_buff *r_skb; 779 u32 *flags = nlmsg_data(nlh); 780 u32 spid = NETLINK_CB(skb).pid; 781 u32 seq = nlh->nlmsg_seq; 782 int len = NLMSG_LENGTH(sizeof(u32)); 783 784 len += RTA_SPACE(sizeof(struct xfrmu_sadhinfo)); 785 len += RTA_SPACE(sizeof(u32)); 786 787 r_skb = alloc_skb(len, GFP_ATOMIC); 788 789 if (r_skb == NULL) 790 return -ENOMEM; 791 792 if (build_sadinfo(r_skb, spid, seq, *flags) < 0) 793 BUG(); 794 795 return nlmsg_unicast(xfrm_nl, r_skb, spid); 796 } 797 798 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 799 struct rtattr **xfrma) 800 { 801 struct xfrm_usersa_id *p = nlmsg_data(nlh); 802 struct xfrm_state *x; 803 struct sk_buff *resp_skb; 804 int err = -ESRCH; 805 806 x = xfrm_user_state_lookup(p, xfrma, &err); 807 if (x == NULL) 808 goto out_noput; 809 810 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 811 if (IS_ERR(resp_skb)) { 812 err = PTR_ERR(resp_skb); 813 } else { 814 err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid); 815 } 816 xfrm_state_put(x); 817 out_noput: 818 return err; 819 } 820 821 static int verify_userspi_info(struct xfrm_userspi_info *p) 822 { 823 switch (p->info.id.proto) { 824 case IPPROTO_AH: 825 case IPPROTO_ESP: 826 break; 827 828 case IPPROTO_COMP: 829 /* IPCOMP spi is 16-bits. */ 830 if (p->max >= 0x10000) 831 return -EINVAL; 832 break; 833 834 default: 835 return -EINVAL; 836 } 837 838 if (p->min > p->max) 839 return -EINVAL; 840 841 return 0; 842 } 843 844 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, 845 struct rtattr **xfrma) 846 { 847 struct xfrm_state *x; 848 struct xfrm_userspi_info *p; 849 struct sk_buff *resp_skb; 850 xfrm_address_t *daddr; 851 int family; 852 int err; 853 854 p = nlmsg_data(nlh); 855 err = verify_userspi_info(p); 856 if (err) 857 goto out_noput; 858 859 family = p->info.family; 860 daddr = &p->info.id.daddr; 861 862 x = NULL; 863 if (p->info.seq) { 864 x = xfrm_find_acq_byseq(p->info.seq); 865 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) { 866 xfrm_state_put(x); 867 x = NULL; 868 } 869 } 870 871 if (!x) 872 x = xfrm_find_acq(p->info.mode, p->info.reqid, 873 p->info.id.proto, daddr, 874 &p->info.saddr, 1, 875 family); 876 err = -ENOENT; 877 if (x == NULL) 878 goto out_noput; 879 880 resp_skb = ERR_PTR(-ENOENT); 881 882 spin_lock_bh(&x->lock); 883 if (x->km.state != XFRM_STATE_DEAD) { 884 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max)); 885 if (x->id.spi) 886 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 887 } 888 spin_unlock_bh(&x->lock); 889 890 if (IS_ERR(resp_skb)) { 891 err = PTR_ERR(resp_skb); 892 goto out; 893 } 894 895 err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid); 896 897 out: 898 xfrm_state_put(x); 899 out_noput: 900 return err; 901 } 902 903 static int verify_policy_dir(u8 dir) 904 { 905 switch (dir) { 906 case XFRM_POLICY_IN: 907 case XFRM_POLICY_OUT: 908 case XFRM_POLICY_FWD: 909 break; 910 911 default: 912 return -EINVAL; 913 } 914 915 return 0; 916 } 917 918 static int verify_policy_type(u8 type) 919 { 920 switch (type) { 921 case XFRM_POLICY_TYPE_MAIN: 922 #ifdef CONFIG_XFRM_SUB_POLICY 923 case XFRM_POLICY_TYPE_SUB: 924 #endif 925 break; 926 927 default: 928 return -EINVAL; 929 } 930 931 return 0; 932 } 933 934 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) 935 { 936 switch (p->share) { 937 case XFRM_SHARE_ANY: 938 case XFRM_SHARE_SESSION: 939 case XFRM_SHARE_USER: 940 case XFRM_SHARE_UNIQUE: 941 break; 942 943 default: 944 return -EINVAL; 945 } 946 947 switch (p->action) { 948 case XFRM_POLICY_ALLOW: 949 case XFRM_POLICY_BLOCK: 950 break; 951 952 default: 953 return -EINVAL; 954 } 955 956 switch (p->sel.family) { 957 case AF_INET: 958 break; 959 960 case AF_INET6: 961 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 962 break; 963 #else 964 return -EAFNOSUPPORT; 965 #endif 966 967 default: 968 return -EINVAL; 969 } 970 971 return verify_policy_dir(p->dir); 972 } 973 974 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct rtattr **xfrma) 975 { 976 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; 977 struct xfrm_user_sec_ctx *uctx; 978 979 if (!rt) 980 return 0; 981 982 uctx = RTA_DATA(rt); 983 return security_xfrm_policy_alloc(pol, uctx); 984 } 985 986 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 987 int nr) 988 { 989 int i; 990 991 xp->xfrm_nr = nr; 992 for (i = 0; i < nr; i++, ut++) { 993 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 994 995 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); 996 memcpy(&t->saddr, &ut->saddr, 997 sizeof(xfrm_address_t)); 998 t->reqid = ut->reqid; 999 t->mode = ut->mode; 1000 t->share = ut->share; 1001 t->optional = ut->optional; 1002 t->aalgos = ut->aalgos; 1003 t->ealgos = ut->ealgos; 1004 t->calgos = ut->calgos; 1005 t->encap_family = ut->family; 1006 } 1007 } 1008 1009 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) 1010 { 1011 int i; 1012 1013 if (nr > XFRM_MAX_DEPTH) 1014 return -EINVAL; 1015 1016 for (i = 0; i < nr; i++) { 1017 /* We never validated the ut->family value, so many 1018 * applications simply leave it at zero. The check was 1019 * never made and ut->family was ignored because all 1020 * templates could be assumed to have the same family as 1021 * the policy itself. Now that we will have ipv4-in-ipv6 1022 * and ipv6-in-ipv4 tunnels, this is no longer true. 1023 */ 1024 if (!ut[i].family) 1025 ut[i].family = family; 1026 1027 switch (ut[i].family) { 1028 case AF_INET: 1029 break; 1030 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1031 case AF_INET6: 1032 break; 1033 #endif 1034 default: 1035 return -EINVAL; 1036 } 1037 } 1038 1039 return 0; 1040 } 1041 1042 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma) 1043 { 1044 struct rtattr *rt = xfrma[XFRMA_TMPL-1]; 1045 1046 if (!rt) { 1047 pol->xfrm_nr = 0; 1048 } else { 1049 struct xfrm_user_tmpl *utmpl = RTA_DATA(rt); 1050 int nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl); 1051 int err; 1052 1053 err = validate_tmpl(nr, utmpl, pol->family); 1054 if (err) 1055 return err; 1056 1057 copy_templates(pol, RTA_DATA(rt), nr); 1058 } 1059 return 0; 1060 } 1061 1062 static int copy_from_user_policy_type(u8 *tp, struct rtattr **xfrma) 1063 { 1064 struct rtattr *rt = xfrma[XFRMA_POLICY_TYPE-1]; 1065 struct xfrm_userpolicy_type *upt; 1066 u8 type = XFRM_POLICY_TYPE_MAIN; 1067 int err; 1068 1069 if (rt) { 1070 if (rt->rta_len < sizeof(*upt)) 1071 return -EINVAL; 1072 1073 upt = RTA_DATA(rt); 1074 type = upt->type; 1075 } 1076 1077 err = verify_policy_type(type); 1078 if (err) 1079 return err; 1080 1081 *tp = type; 1082 return 0; 1083 } 1084 1085 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) 1086 { 1087 xp->priority = p->priority; 1088 xp->index = p->index; 1089 memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); 1090 memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); 1091 xp->action = p->action; 1092 xp->flags = p->flags; 1093 xp->family = p->sel.family; 1094 /* XXX xp->share = p->share; */ 1095 } 1096 1097 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) 1098 { 1099 memcpy(&p->sel, &xp->selector, sizeof(p->sel)); 1100 memcpy(&p->lft, &xp->lft, sizeof(p->lft)); 1101 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); 1102 p->priority = xp->priority; 1103 p->index = xp->index; 1104 p->sel.family = xp->family; 1105 p->dir = dir; 1106 p->action = xp->action; 1107 p->flags = xp->flags; 1108 p->share = XFRM_SHARE_ANY; /* XXX xp->share */ 1109 } 1110 1111 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp) 1112 { 1113 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL); 1114 int err; 1115 1116 if (!xp) { 1117 *errp = -ENOMEM; 1118 return NULL; 1119 } 1120 1121 copy_from_user_policy(xp, p); 1122 1123 err = copy_from_user_policy_type(&xp->type, xfrma); 1124 if (err) 1125 goto error; 1126 1127 if (!(err = copy_from_user_tmpl(xp, xfrma))) 1128 err = copy_from_user_sec_ctx(xp, xfrma); 1129 if (err) 1130 goto error; 1131 1132 return xp; 1133 error: 1134 *errp = err; 1135 kfree(xp); 1136 return NULL; 1137 } 1138 1139 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1140 struct rtattr **xfrma) 1141 { 1142 struct xfrm_userpolicy_info *p = nlmsg_data(nlh); 1143 struct xfrm_policy *xp; 1144 struct km_event c; 1145 int err; 1146 int excl; 1147 1148 err = verify_newpolicy_info(p); 1149 if (err) 1150 return err; 1151 err = verify_sec_ctx_len(xfrma); 1152 if (err) 1153 return err; 1154 1155 xp = xfrm_policy_construct(p, xfrma, &err); 1156 if (!xp) 1157 return err; 1158 1159 /* shouldnt excl be based on nlh flags?? 1160 * Aha! this is anti-netlink really i.e more pfkey derived 1161 * in netlink excl is a flag and you wouldnt need 1162 * a type XFRM_MSG_UPDPOLICY - JHS */ 1163 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1164 err = xfrm_policy_insert(p->dir, xp, excl); 1165 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 1166 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); 1167 1168 if (err) { 1169 security_xfrm_policy_free(xp); 1170 kfree(xp); 1171 return err; 1172 } 1173 1174 c.event = nlh->nlmsg_type; 1175 c.seq = nlh->nlmsg_seq; 1176 c.pid = nlh->nlmsg_pid; 1177 km_policy_notify(xp, p->dir, &c); 1178 1179 xfrm_pol_put(xp); 1180 1181 return 0; 1182 } 1183 1184 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) 1185 { 1186 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; 1187 int i; 1188 1189 if (xp->xfrm_nr == 0) 1190 return 0; 1191 1192 for (i = 0; i < xp->xfrm_nr; i++) { 1193 struct xfrm_user_tmpl *up = &vec[i]; 1194 struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; 1195 1196 memcpy(&up->id, &kp->id, sizeof(up->id)); 1197 up->family = kp->encap_family; 1198 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); 1199 up->reqid = kp->reqid; 1200 up->mode = kp->mode; 1201 up->share = kp->share; 1202 up->optional = kp->optional; 1203 up->aalgos = kp->aalgos; 1204 up->ealgos = kp->ealgos; 1205 up->calgos = kp->calgos; 1206 } 1207 1208 return nla_put(skb, XFRMA_TMPL, 1209 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); 1210 } 1211 1212 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) 1213 { 1214 if (x->security) { 1215 return copy_sec_ctx(x->security, skb); 1216 } 1217 return 0; 1218 } 1219 1220 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 1221 { 1222 if (xp->security) { 1223 return copy_sec_ctx(xp->security, skb); 1224 } 1225 return 0; 1226 } 1227 static inline size_t userpolicy_type_attrsize(void) 1228 { 1229 #ifdef CONFIG_XFRM_SUB_POLICY 1230 return nla_total_size(sizeof(struct xfrm_userpolicy_type)); 1231 #else 1232 return 0; 1233 #endif 1234 } 1235 1236 #ifdef CONFIG_XFRM_SUB_POLICY 1237 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1238 { 1239 struct xfrm_userpolicy_type upt = { 1240 .type = type, 1241 }; 1242 1243 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1244 } 1245 1246 #else 1247 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1248 { 1249 return 0; 1250 } 1251 #endif 1252 1253 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) 1254 { 1255 struct xfrm_dump_info *sp = ptr; 1256 struct xfrm_userpolicy_info *p; 1257 struct sk_buff *in_skb = sp->in_skb; 1258 struct sk_buff *skb = sp->out_skb; 1259 struct nlmsghdr *nlh; 1260 1261 if (sp->this_idx < sp->start_idx) 1262 goto out; 1263 1264 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 1265 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1266 if (nlh == NULL) 1267 return -EMSGSIZE; 1268 1269 p = nlmsg_data(nlh); 1270 copy_to_user_policy(xp, p, dir); 1271 if (copy_to_user_tmpl(xp, skb) < 0) 1272 goto nlmsg_failure; 1273 if (copy_to_user_sec_ctx(xp, skb)) 1274 goto nlmsg_failure; 1275 if (copy_to_user_policy_type(xp->type, skb) < 0) 1276 goto nlmsg_failure; 1277 1278 nlmsg_end(skb, nlh); 1279 out: 1280 sp->this_idx++; 1281 return 0; 1282 1283 nlmsg_failure: 1284 nlmsg_cancel(skb, nlh); 1285 return -EMSGSIZE; 1286 } 1287 1288 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1289 { 1290 struct xfrm_dump_info info; 1291 1292 info.in_skb = cb->skb; 1293 info.out_skb = skb; 1294 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1295 info.nlmsg_flags = NLM_F_MULTI; 1296 info.this_idx = 0; 1297 info.start_idx = cb->args[0]; 1298 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info); 1299 #ifdef CONFIG_XFRM_SUB_POLICY 1300 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info); 1301 #endif 1302 cb->args[0] = info.this_idx; 1303 1304 return skb->len; 1305 } 1306 1307 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, 1308 struct xfrm_policy *xp, 1309 int dir, u32 seq) 1310 { 1311 struct xfrm_dump_info info; 1312 struct sk_buff *skb; 1313 1314 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1315 if (!skb) 1316 return ERR_PTR(-ENOMEM); 1317 1318 info.in_skb = in_skb; 1319 info.out_skb = skb; 1320 info.nlmsg_seq = seq; 1321 info.nlmsg_flags = 0; 1322 info.this_idx = info.start_idx = 0; 1323 1324 if (dump_one_policy(xp, dir, 0, &info) < 0) { 1325 kfree_skb(skb); 1326 return NULL; 1327 } 1328 1329 return skb; 1330 } 1331 1332 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1333 struct rtattr **xfrma) 1334 { 1335 struct xfrm_policy *xp; 1336 struct xfrm_userpolicy_id *p; 1337 u8 type = XFRM_POLICY_TYPE_MAIN; 1338 int err; 1339 struct km_event c; 1340 int delete; 1341 1342 p = nlmsg_data(nlh); 1343 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 1344 1345 err = copy_from_user_policy_type(&type, xfrma); 1346 if (err) 1347 return err; 1348 1349 err = verify_policy_dir(p->dir); 1350 if (err) 1351 return err; 1352 1353 if (p->index) 1354 xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err); 1355 else { 1356 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; 1357 struct xfrm_policy tmp; 1358 1359 err = verify_sec_ctx_len(xfrma); 1360 if (err) 1361 return err; 1362 1363 memset(&tmp, 0, sizeof(struct xfrm_policy)); 1364 if (rt) { 1365 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); 1366 1367 if ((err = security_xfrm_policy_alloc(&tmp, uctx))) 1368 return err; 1369 } 1370 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, 1371 delete, &err); 1372 security_xfrm_policy_free(&tmp); 1373 } 1374 if (xp == NULL) 1375 return -ENOENT; 1376 1377 if (!delete) { 1378 struct sk_buff *resp_skb; 1379 1380 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); 1381 if (IS_ERR(resp_skb)) { 1382 err = PTR_ERR(resp_skb); 1383 } else { 1384 err = nlmsg_unicast(xfrm_nl, resp_skb, 1385 NETLINK_CB(skb).pid); 1386 } 1387 } else { 1388 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 1389 AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); 1390 1391 if (err != 0) 1392 goto out; 1393 1394 c.data.byid = p->index; 1395 c.event = nlh->nlmsg_type; 1396 c.seq = nlh->nlmsg_seq; 1397 c.pid = nlh->nlmsg_pid; 1398 km_policy_notify(xp, p->dir, &c); 1399 } 1400 1401 out: 1402 xfrm_pol_put(xp); 1403 return err; 1404 } 1405 1406 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1407 struct rtattr **xfrma) 1408 { 1409 struct km_event c; 1410 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1411 struct xfrm_audit audit_info; 1412 int err; 1413 1414 audit_info.loginuid = NETLINK_CB(skb).loginuid; 1415 audit_info.secid = NETLINK_CB(skb).sid; 1416 err = xfrm_state_flush(p->proto, &audit_info); 1417 if (err) 1418 return err; 1419 c.data.proto = p->proto; 1420 c.event = nlh->nlmsg_type; 1421 c.seq = nlh->nlmsg_seq; 1422 c.pid = nlh->nlmsg_pid; 1423 km_state_notify(NULL, &c); 1424 1425 return 0; 1426 } 1427 1428 1429 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c) 1430 { 1431 struct xfrm_aevent_id *id; 1432 struct nlmsghdr *nlh; 1433 1434 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 1435 if (nlh == NULL) 1436 return -EMSGSIZE; 1437 1438 id = nlmsg_data(nlh); 1439 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr)); 1440 id->sa_id.spi = x->id.spi; 1441 id->sa_id.family = x->props.family; 1442 id->sa_id.proto = x->id.proto; 1443 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr)); 1444 id->reqid = x->props.reqid; 1445 id->flags = c->data.aevent; 1446 1447 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); 1448 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); 1449 1450 if (id->flags & XFRM_AE_RTHR) 1451 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); 1452 1453 if (id->flags & XFRM_AE_ETHR) 1454 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, 1455 x->replay_maxage * 10 / HZ); 1456 1457 return nlmsg_end(skb, nlh); 1458 1459 nla_put_failure: 1460 nlmsg_cancel(skb, nlh); 1461 return -EMSGSIZE; 1462 } 1463 1464 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1465 struct rtattr **xfrma) 1466 { 1467 struct xfrm_state *x; 1468 struct sk_buff *r_skb; 1469 int err; 1470 struct km_event c; 1471 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1472 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id)); 1473 struct xfrm_usersa_id *id = &p->sa_id; 1474 1475 len += RTA_SPACE(sizeof(struct xfrm_replay_state)); 1476 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur)); 1477 1478 if (p->flags&XFRM_AE_RTHR) 1479 len+=RTA_SPACE(sizeof(u32)); 1480 1481 if (p->flags&XFRM_AE_ETHR) 1482 len+=RTA_SPACE(sizeof(u32)); 1483 1484 r_skb = alloc_skb(len, GFP_ATOMIC); 1485 if (r_skb == NULL) 1486 return -ENOMEM; 1487 1488 x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family); 1489 if (x == NULL) { 1490 kfree_skb(r_skb); 1491 return -ESRCH; 1492 } 1493 1494 /* 1495 * XXX: is this lock really needed - none of the other 1496 * gets lock (the concern is things getting updated 1497 * while we are still reading) - jhs 1498 */ 1499 spin_lock_bh(&x->lock); 1500 c.data.aevent = p->flags; 1501 c.seq = nlh->nlmsg_seq; 1502 c.pid = nlh->nlmsg_pid; 1503 1504 if (build_aevent(r_skb, x, &c) < 0) 1505 BUG(); 1506 err = nlmsg_unicast(xfrm_nl, r_skb, NETLINK_CB(skb).pid); 1507 spin_unlock_bh(&x->lock); 1508 xfrm_state_put(x); 1509 return err; 1510 } 1511 1512 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1513 struct rtattr **xfrma) 1514 { 1515 struct xfrm_state *x; 1516 struct km_event c; 1517 int err = - EINVAL; 1518 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1519 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1]; 1520 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1]; 1521 1522 if (!lt && !rp) 1523 return err; 1524 1525 /* pedantic mode - thou shalt sayeth replaceth */ 1526 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 1527 return err; 1528 1529 x = xfrm_state_lookup(&p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 1530 if (x == NULL) 1531 return -ESRCH; 1532 1533 if (x->km.state != XFRM_STATE_VALID) 1534 goto out; 1535 1536 spin_lock_bh(&x->lock); 1537 err = xfrm_update_ae_params(x, xfrma); 1538 spin_unlock_bh(&x->lock); 1539 if (err < 0) 1540 goto out; 1541 1542 c.event = nlh->nlmsg_type; 1543 c.seq = nlh->nlmsg_seq; 1544 c.pid = nlh->nlmsg_pid; 1545 c.data.aevent = XFRM_AE_CU; 1546 km_state_notify(x, &c); 1547 err = 0; 1548 out: 1549 xfrm_state_put(x); 1550 return err; 1551 } 1552 1553 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1554 struct rtattr **xfrma) 1555 { 1556 struct km_event c; 1557 u8 type = XFRM_POLICY_TYPE_MAIN; 1558 int err; 1559 struct xfrm_audit audit_info; 1560 1561 err = copy_from_user_policy_type(&type, xfrma); 1562 if (err) 1563 return err; 1564 1565 audit_info.loginuid = NETLINK_CB(skb).loginuid; 1566 audit_info.secid = NETLINK_CB(skb).sid; 1567 err = xfrm_policy_flush(type, &audit_info); 1568 if (err) 1569 return err; 1570 c.data.type = type; 1571 c.event = nlh->nlmsg_type; 1572 c.seq = nlh->nlmsg_seq; 1573 c.pid = nlh->nlmsg_pid; 1574 km_policy_notify(NULL, 0, &c); 1575 return 0; 1576 } 1577 1578 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1579 struct rtattr **xfrma) 1580 { 1581 struct xfrm_policy *xp; 1582 struct xfrm_user_polexpire *up = nlmsg_data(nlh); 1583 struct xfrm_userpolicy_info *p = &up->pol; 1584 u8 type = XFRM_POLICY_TYPE_MAIN; 1585 int err = -ENOENT; 1586 1587 err = copy_from_user_policy_type(&type, xfrma); 1588 if (err) 1589 return err; 1590 1591 if (p->index) 1592 xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err); 1593 else { 1594 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; 1595 struct xfrm_policy tmp; 1596 1597 err = verify_sec_ctx_len(xfrma); 1598 if (err) 1599 return err; 1600 1601 memset(&tmp, 0, sizeof(struct xfrm_policy)); 1602 if (rt) { 1603 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); 1604 1605 if ((err = security_xfrm_policy_alloc(&tmp, uctx))) 1606 return err; 1607 } 1608 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, 1609 0, &err); 1610 security_xfrm_policy_free(&tmp); 1611 } 1612 1613 if (xp == NULL) 1614 return -ENOENT; 1615 read_lock(&xp->lock); 1616 if (xp->dead) { 1617 read_unlock(&xp->lock); 1618 goto out; 1619 } 1620 1621 read_unlock(&xp->lock); 1622 err = 0; 1623 if (up->hard) { 1624 xfrm_policy_delete(xp, p->dir); 1625 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 1626 AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL); 1627 1628 } else { 1629 // reset the timers here? 1630 printk("Dont know what to do with soft policy expire\n"); 1631 } 1632 km_policy_expired(xp, p->dir, up->hard, current->pid); 1633 1634 out: 1635 xfrm_pol_put(xp); 1636 return err; 1637 } 1638 1639 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1640 struct rtattr **xfrma) 1641 { 1642 struct xfrm_state *x; 1643 int err; 1644 struct xfrm_user_expire *ue = nlmsg_data(nlh); 1645 struct xfrm_usersa_info *p = &ue->state; 1646 1647 x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family); 1648 1649 err = -ENOENT; 1650 if (x == NULL) 1651 return err; 1652 1653 spin_lock_bh(&x->lock); 1654 err = -EINVAL; 1655 if (x->km.state != XFRM_STATE_VALID) 1656 goto out; 1657 km_state_expired(x, ue->hard, current->pid); 1658 1659 if (ue->hard) { 1660 __xfrm_state_delete(x); 1661 xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, 1662 AUDIT_MAC_IPSEC_DELSA, 1, NULL, x); 1663 } 1664 err = 0; 1665 out: 1666 spin_unlock_bh(&x->lock); 1667 xfrm_state_put(x); 1668 return err; 1669 } 1670 1671 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, 1672 struct rtattr **xfrma) 1673 { 1674 struct xfrm_policy *xp; 1675 struct xfrm_user_tmpl *ut; 1676 int i; 1677 struct rtattr *rt = xfrma[XFRMA_TMPL-1]; 1678 1679 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 1680 struct xfrm_state *x = xfrm_state_alloc(); 1681 int err = -ENOMEM; 1682 1683 if (!x) 1684 return err; 1685 1686 err = verify_newpolicy_info(&ua->policy); 1687 if (err) { 1688 printk("BAD policy passed\n"); 1689 kfree(x); 1690 return err; 1691 } 1692 1693 /* build an XP */ 1694 xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) xfrma, &err); 1695 if (!xp) { 1696 kfree(x); 1697 return err; 1698 } 1699 1700 memcpy(&x->id, &ua->id, sizeof(ua->id)); 1701 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 1702 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 1703 1704 ut = RTA_DATA(rt); 1705 /* extract the templates and for each call km_key */ 1706 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 1707 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 1708 memcpy(&x->id, &t->id, sizeof(x->id)); 1709 x->props.mode = t->mode; 1710 x->props.reqid = t->reqid; 1711 x->props.family = ut->family; 1712 t->aalgos = ua->aalgos; 1713 t->ealgos = ua->ealgos; 1714 t->calgos = ua->calgos; 1715 err = km_query(x, t, xp); 1716 1717 } 1718 1719 kfree(x); 1720 kfree(xp); 1721 1722 return 0; 1723 } 1724 1725 #ifdef CONFIG_XFRM_MIGRATE 1726 static int verify_user_migrate(struct rtattr **xfrma) 1727 { 1728 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1]; 1729 struct xfrm_user_migrate *um; 1730 1731 if (!rt) 1732 return -EINVAL; 1733 1734 if ((rt->rta_len - sizeof(*rt)) < sizeof(*um)) 1735 return -EINVAL; 1736 1737 return 0; 1738 } 1739 1740 static int copy_from_user_migrate(struct xfrm_migrate *ma, 1741 struct rtattr **xfrma, int *num) 1742 { 1743 struct rtattr *rt = xfrma[XFRMA_MIGRATE-1]; 1744 struct xfrm_user_migrate *um; 1745 int i, num_migrate; 1746 1747 um = RTA_DATA(rt); 1748 num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um); 1749 1750 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) 1751 return -EINVAL; 1752 1753 for (i = 0; i < num_migrate; i++, um++, ma++) { 1754 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); 1755 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); 1756 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); 1757 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); 1758 1759 ma->proto = um->proto; 1760 ma->mode = um->mode; 1761 ma->reqid = um->reqid; 1762 1763 ma->old_family = um->old_family; 1764 ma->new_family = um->new_family; 1765 } 1766 1767 *num = i; 1768 return 0; 1769 } 1770 1771 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 1772 struct rtattr **xfrma) 1773 { 1774 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); 1775 struct xfrm_migrate m[XFRM_MAX_DEPTH]; 1776 u8 type; 1777 int err; 1778 int n = 0; 1779 1780 err = verify_user_migrate((struct rtattr **)xfrma); 1781 if (err) 1782 return err; 1783 1784 err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); 1785 if (err) 1786 return err; 1787 1788 err = copy_from_user_migrate((struct xfrm_migrate *)m, 1789 (struct rtattr **)xfrma, &n); 1790 if (err) 1791 return err; 1792 1793 if (!n) 1794 return 0; 1795 1796 xfrm_migrate(&pi->sel, pi->dir, type, m, n); 1797 1798 return 0; 1799 } 1800 #else 1801 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 1802 struct rtattr **xfrma) 1803 { 1804 return -ENOPROTOOPT; 1805 } 1806 #endif 1807 1808 #ifdef CONFIG_XFRM_MIGRATE 1809 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb) 1810 { 1811 struct xfrm_user_migrate um; 1812 1813 memset(&um, 0, sizeof(um)); 1814 um.proto = m->proto; 1815 um.mode = m->mode; 1816 um.reqid = m->reqid; 1817 um.old_family = m->old_family; 1818 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); 1819 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); 1820 um.new_family = m->new_family; 1821 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); 1822 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); 1823 1824 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); 1825 } 1826 1827 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m, 1828 int num_migrate, struct xfrm_selector *sel, 1829 u8 dir, u8 type) 1830 { 1831 struct xfrm_migrate *mp; 1832 struct xfrm_userpolicy_id *pol_id; 1833 struct nlmsghdr *nlh; 1834 int i; 1835 1836 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 1837 if (nlh == NULL) 1838 return -EMSGSIZE; 1839 1840 pol_id = nlmsg_data(nlh); 1841 /* copy data from selector, dir, and type to the pol_id */ 1842 memset(pol_id, 0, sizeof(*pol_id)); 1843 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 1844 pol_id->dir = dir; 1845 1846 if (copy_to_user_policy_type(type, skb) < 0) 1847 goto nlmsg_failure; 1848 1849 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 1850 if (copy_to_user_migrate(mp, skb) < 0) 1851 goto nlmsg_failure; 1852 } 1853 1854 return nlmsg_end(skb, nlh); 1855 nlmsg_failure: 1856 nlmsg_cancel(skb, nlh); 1857 return -EMSGSIZE; 1858 } 1859 1860 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, 1861 struct xfrm_migrate *m, int num_migrate) 1862 { 1863 struct sk_buff *skb; 1864 size_t len; 1865 1866 len = RTA_SPACE(sizeof(struct xfrm_user_migrate) * num_migrate); 1867 len += NLMSG_SPACE(sizeof(struct xfrm_userpolicy_id)); 1868 len += userpolicy_type_attrsize(); 1869 skb = alloc_skb(len, GFP_ATOMIC); 1870 if (skb == NULL) 1871 return -ENOMEM; 1872 1873 /* build migrate */ 1874 if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0) 1875 BUG(); 1876 1877 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); 1878 } 1879 #else 1880 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, 1881 struct xfrm_migrate *m, int num_migrate) 1882 { 1883 return -ENOPROTOOPT; 1884 } 1885 #endif 1886 1887 #define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type)) 1888 1889 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 1890 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 1891 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 1892 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 1893 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 1894 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 1895 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 1896 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), 1897 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 1898 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 1899 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 1900 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 1901 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 1902 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 1903 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0), 1904 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 1905 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 1906 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 1907 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 1908 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)), 1909 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)), 1910 }; 1911 1912 #undef XMSGSIZE 1913 1914 static struct xfrm_link { 1915 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct rtattr **); 1916 int (*dump)(struct sk_buff *, struct netlink_callback *); 1917 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 1918 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 1919 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 1920 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 1921 .dump = xfrm_dump_sa }, 1922 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 1923 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 1924 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 1925 .dump = xfrm_dump_policy }, 1926 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 1927 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 1928 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 1929 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 1930 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 1931 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, 1932 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, 1933 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 1934 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 1935 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 1936 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 1937 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 1938 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, 1939 }; 1940 1941 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1942 { 1943 struct rtattr *xfrma[XFRMA_MAX]; 1944 struct xfrm_link *link; 1945 int type, min_len; 1946 1947 type = nlh->nlmsg_type; 1948 if (type > XFRM_MSG_MAX) 1949 return -EINVAL; 1950 1951 type -= XFRM_MSG_BASE; 1952 link = &xfrm_dispatch[type]; 1953 1954 /* All operations require privileges, even GET */ 1955 if (security_netlink_recv(skb, CAP_NET_ADMIN)) 1956 return -EPERM; 1957 1958 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 1959 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 1960 (nlh->nlmsg_flags & NLM_F_DUMP)) { 1961 if (link->dump == NULL) 1962 return -EINVAL; 1963 1964 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL); 1965 } 1966 1967 memset(xfrma, 0, sizeof(xfrma)); 1968 1969 if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type])) 1970 return -EINVAL; 1971 1972 if (nlh->nlmsg_len > min_len) { 1973 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); 1974 struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len); 1975 1976 while (RTA_OK(attr, attrlen)) { 1977 unsigned short flavor = attr->rta_type; 1978 if (flavor) { 1979 if (flavor > XFRMA_MAX) 1980 return -EINVAL; 1981 xfrma[flavor - 1] = attr; 1982 } 1983 attr = RTA_NEXT(attr, attrlen); 1984 } 1985 } 1986 1987 if (link->doit == NULL) 1988 return -EINVAL; 1989 1990 return link->doit(skb, nlh, xfrma); 1991 } 1992 1993 static void xfrm_netlink_rcv(struct sock *sk, int len) 1994 { 1995 unsigned int qlen = 0; 1996 1997 do { 1998 mutex_lock(&xfrm_cfg_mutex); 1999 netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg); 2000 mutex_unlock(&xfrm_cfg_mutex); 2001 2002 } while (qlen); 2003 } 2004 2005 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c) 2006 { 2007 struct xfrm_user_expire *ue; 2008 struct nlmsghdr *nlh; 2009 2010 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 2011 if (nlh == NULL) 2012 return -EMSGSIZE; 2013 2014 ue = nlmsg_data(nlh); 2015 copy_to_user_state(x, &ue->state); 2016 ue->hard = (c->data.hard != 0) ? 1 : 0; 2017 2018 return nlmsg_end(skb, nlh); 2019 } 2020 2021 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c) 2022 { 2023 struct sk_buff *skb; 2024 int len = NLMSG_LENGTH(sizeof(struct xfrm_user_expire)); 2025 2026 skb = alloc_skb(len, GFP_ATOMIC); 2027 if (skb == NULL) 2028 return -ENOMEM; 2029 2030 if (build_expire(skb, x, c) < 0) 2031 BUG(); 2032 2033 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2034 } 2035 2036 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c) 2037 { 2038 struct sk_buff *skb; 2039 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id)); 2040 2041 len += RTA_SPACE(sizeof(struct xfrm_replay_state)); 2042 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur)); 2043 skb = alloc_skb(len, GFP_ATOMIC); 2044 if (skb == NULL) 2045 return -ENOMEM; 2046 2047 if (build_aevent(skb, x, c) < 0) 2048 BUG(); 2049 2050 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); 2051 } 2052 2053 static int xfrm_notify_sa_flush(struct km_event *c) 2054 { 2055 struct xfrm_usersa_flush *p; 2056 struct nlmsghdr *nlh; 2057 struct sk_buff *skb; 2058 int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)); 2059 2060 skb = alloc_skb(len, GFP_ATOMIC); 2061 if (skb == NULL) 2062 return -ENOMEM; 2063 2064 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); 2065 if (nlh == NULL) { 2066 kfree_skb(skb); 2067 return -EMSGSIZE; 2068 } 2069 2070 p = nlmsg_data(nlh); 2071 p->proto = c->data.proto; 2072 2073 nlmsg_end(skb, nlh); 2074 2075 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2076 } 2077 2078 static inline int xfrm_sa_len(struct xfrm_state *x) 2079 { 2080 int l = 0; 2081 if (x->aalg) 2082 l += RTA_SPACE(alg_len(x->aalg)); 2083 if (x->ealg) 2084 l += RTA_SPACE(alg_len(x->ealg)); 2085 if (x->calg) 2086 l += RTA_SPACE(sizeof(*x->calg)); 2087 if (x->encap) 2088 l += RTA_SPACE(sizeof(*x->encap)); 2089 2090 return l; 2091 } 2092 2093 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c) 2094 { 2095 struct xfrm_usersa_info *p; 2096 struct xfrm_usersa_id *id; 2097 struct nlmsghdr *nlh; 2098 struct sk_buff *skb; 2099 int len = xfrm_sa_len(x); 2100 int headlen; 2101 2102 headlen = sizeof(*p); 2103 if (c->event == XFRM_MSG_DELSA) { 2104 len += RTA_SPACE(headlen); 2105 headlen = sizeof(*id); 2106 } 2107 len += NLMSG_SPACE(headlen); 2108 2109 skb = alloc_skb(len, GFP_ATOMIC); 2110 if (skb == NULL) 2111 return -ENOMEM; 2112 2113 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2114 if (nlh == NULL) 2115 goto nla_put_failure; 2116 2117 p = nlmsg_data(nlh); 2118 if (c->event == XFRM_MSG_DELSA) { 2119 struct nlattr *attr; 2120 2121 id = nlmsg_data(nlh); 2122 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 2123 id->spi = x->id.spi; 2124 id->family = x->props.family; 2125 id->proto = x->id.proto; 2126 2127 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 2128 if (attr == NULL) 2129 goto nla_put_failure; 2130 2131 p = nla_data(attr); 2132 } 2133 2134 copy_to_user_state(x, p); 2135 2136 if (x->aalg) 2137 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg); 2138 if (x->ealg) 2139 NLA_PUT(skb, XFRMA_ALG_CRYPT, alg_len(x->ealg), x->ealg); 2140 if (x->calg) 2141 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); 2142 2143 if (x->encap) 2144 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 2145 2146 nlmsg_end(skb, nlh); 2147 2148 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2149 2150 nla_put_failure: 2151 kfree_skb(skb); 2152 return -1; 2153 } 2154 2155 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c) 2156 { 2157 2158 switch (c->event) { 2159 case XFRM_MSG_EXPIRE: 2160 return xfrm_exp_state_notify(x, c); 2161 case XFRM_MSG_NEWAE: 2162 return xfrm_aevent_state_notify(x, c); 2163 case XFRM_MSG_DELSA: 2164 case XFRM_MSG_UPDSA: 2165 case XFRM_MSG_NEWSA: 2166 return xfrm_notify_sa(x, c); 2167 case XFRM_MSG_FLUSHSA: 2168 return xfrm_notify_sa_flush(c); 2169 default: 2170 printk("xfrm_user: Unknown SA event %d\n", c->event); 2171 break; 2172 } 2173 2174 return 0; 2175 2176 } 2177 2178 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 2179 struct xfrm_tmpl *xt, struct xfrm_policy *xp, 2180 int dir) 2181 { 2182 struct xfrm_user_acquire *ua; 2183 struct nlmsghdr *nlh; 2184 __u32 seq = xfrm_get_acqseq(); 2185 2186 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 2187 if (nlh == NULL) 2188 return -EMSGSIZE; 2189 2190 ua = nlmsg_data(nlh); 2191 memcpy(&ua->id, &x->id, sizeof(ua->id)); 2192 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 2193 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 2194 copy_to_user_policy(xp, &ua->policy, dir); 2195 ua->aalgos = xt->aalgos; 2196 ua->ealgos = xt->ealgos; 2197 ua->calgos = xt->calgos; 2198 ua->seq = x->km.seq = seq; 2199 2200 if (copy_to_user_tmpl(xp, skb) < 0) 2201 goto nlmsg_failure; 2202 if (copy_to_user_state_sec_ctx(x, skb)) 2203 goto nlmsg_failure; 2204 if (copy_to_user_policy_type(xp->type, skb) < 0) 2205 goto nlmsg_failure; 2206 2207 return nlmsg_end(skb, nlh); 2208 2209 nlmsg_failure: 2210 nlmsg_cancel(skb, nlh); 2211 return -EMSGSIZE; 2212 } 2213 2214 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 2215 struct xfrm_policy *xp, int dir) 2216 { 2217 struct sk_buff *skb; 2218 size_t len; 2219 2220 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2221 len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire)); 2222 len += RTA_SPACE(xfrm_user_sec_ctx_size(x->security)); 2223 len += userpolicy_type_attrsize(); 2224 skb = alloc_skb(len, GFP_ATOMIC); 2225 if (skb == NULL) 2226 return -ENOMEM; 2227 2228 if (build_acquire(skb, x, xt, xp, dir) < 0) 2229 BUG(); 2230 2231 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); 2232 } 2233 2234 /* User gives us xfrm_user_policy_info followed by an array of 0 2235 * or more templates. 2236 */ 2237 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, 2238 u8 *data, int len, int *dir) 2239 { 2240 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; 2241 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); 2242 struct xfrm_policy *xp; 2243 int nr; 2244 2245 switch (sk->sk_family) { 2246 case AF_INET: 2247 if (opt != IP_XFRM_POLICY) { 2248 *dir = -EOPNOTSUPP; 2249 return NULL; 2250 } 2251 break; 2252 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2253 case AF_INET6: 2254 if (opt != IPV6_XFRM_POLICY) { 2255 *dir = -EOPNOTSUPP; 2256 return NULL; 2257 } 2258 break; 2259 #endif 2260 default: 2261 *dir = -EINVAL; 2262 return NULL; 2263 } 2264 2265 *dir = -EINVAL; 2266 2267 if (len < sizeof(*p) || 2268 verify_newpolicy_info(p)) 2269 return NULL; 2270 2271 nr = ((len - sizeof(*p)) / sizeof(*ut)); 2272 if (validate_tmpl(nr, ut, p->sel.family)) 2273 return NULL; 2274 2275 if (p->dir > XFRM_POLICY_OUT) 2276 return NULL; 2277 2278 xp = xfrm_policy_alloc(GFP_KERNEL); 2279 if (xp == NULL) { 2280 *dir = -ENOBUFS; 2281 return NULL; 2282 } 2283 2284 copy_from_user_policy(xp, p); 2285 xp->type = XFRM_POLICY_TYPE_MAIN; 2286 copy_templates(xp, ut, nr); 2287 2288 *dir = p->dir; 2289 2290 return xp; 2291 } 2292 2293 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, 2294 int dir, struct km_event *c) 2295 { 2296 struct xfrm_user_polexpire *upe; 2297 struct nlmsghdr *nlh; 2298 int hard = c->data.hard; 2299 2300 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 2301 if (nlh == NULL) 2302 return -EMSGSIZE; 2303 2304 upe = nlmsg_data(nlh); 2305 copy_to_user_policy(xp, &upe->pol, dir); 2306 if (copy_to_user_tmpl(xp, skb) < 0) 2307 goto nlmsg_failure; 2308 if (copy_to_user_sec_ctx(xp, skb)) 2309 goto nlmsg_failure; 2310 if (copy_to_user_policy_type(xp->type, skb) < 0) 2311 goto nlmsg_failure; 2312 upe->hard = !!hard; 2313 2314 return nlmsg_end(skb, nlh); 2315 2316 nlmsg_failure: 2317 nlmsg_cancel(skb, nlh); 2318 return -EMSGSIZE; 2319 } 2320 2321 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c) 2322 { 2323 struct sk_buff *skb; 2324 size_t len; 2325 2326 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2327 len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire)); 2328 len += RTA_SPACE(xfrm_user_sec_ctx_size(xp->security)); 2329 len += userpolicy_type_attrsize(); 2330 skb = alloc_skb(len, GFP_ATOMIC); 2331 if (skb == NULL) 2332 return -ENOMEM; 2333 2334 if (build_polexpire(skb, xp, dir, c) < 0) 2335 BUG(); 2336 2337 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2338 } 2339 2340 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c) 2341 { 2342 struct xfrm_userpolicy_info *p; 2343 struct xfrm_userpolicy_id *id; 2344 struct nlmsghdr *nlh; 2345 struct sk_buff *skb; 2346 int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2347 int headlen; 2348 2349 headlen = sizeof(*p); 2350 if (c->event == XFRM_MSG_DELPOLICY) { 2351 len += RTA_SPACE(headlen); 2352 headlen = sizeof(*id); 2353 } 2354 len += userpolicy_type_attrsize(); 2355 len += NLMSG_SPACE(headlen); 2356 2357 skb = alloc_skb(len, GFP_ATOMIC); 2358 if (skb == NULL) 2359 return -ENOMEM; 2360 2361 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2362 if (nlh == NULL) 2363 goto nlmsg_failure; 2364 2365 p = nlmsg_data(nlh); 2366 if (c->event == XFRM_MSG_DELPOLICY) { 2367 struct nlattr *attr; 2368 2369 id = nlmsg_data(nlh); 2370 memset(id, 0, sizeof(*id)); 2371 id->dir = dir; 2372 if (c->data.byid) 2373 id->index = xp->index; 2374 else 2375 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 2376 2377 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 2378 if (attr == NULL) 2379 goto nlmsg_failure; 2380 2381 p = nla_data(attr); 2382 } 2383 2384 copy_to_user_policy(xp, p, dir); 2385 if (copy_to_user_tmpl(xp, skb) < 0) 2386 goto nlmsg_failure; 2387 if (copy_to_user_policy_type(xp->type, skb) < 0) 2388 goto nlmsg_failure; 2389 2390 nlmsg_end(skb, nlh); 2391 2392 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2393 2394 nlmsg_failure: 2395 kfree_skb(skb); 2396 return -1; 2397 } 2398 2399 static int xfrm_notify_policy_flush(struct km_event *c) 2400 { 2401 struct nlmsghdr *nlh; 2402 struct sk_buff *skb; 2403 int len = 0; 2404 len += userpolicy_type_attrsize(); 2405 len += NLMSG_LENGTH(0); 2406 2407 skb = alloc_skb(len, GFP_ATOMIC); 2408 if (skb == NULL) 2409 return -ENOMEM; 2410 2411 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 2412 if (nlh == NULL) 2413 goto nlmsg_failure; 2414 if (copy_to_user_policy_type(c->data.type, skb) < 0) 2415 goto nlmsg_failure; 2416 2417 nlmsg_end(skb, nlh); 2418 2419 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2420 2421 nlmsg_failure: 2422 kfree_skb(skb); 2423 return -1; 2424 } 2425 2426 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c) 2427 { 2428 2429 switch (c->event) { 2430 case XFRM_MSG_NEWPOLICY: 2431 case XFRM_MSG_UPDPOLICY: 2432 case XFRM_MSG_DELPOLICY: 2433 return xfrm_notify_policy(xp, dir, c); 2434 case XFRM_MSG_FLUSHPOLICY: 2435 return xfrm_notify_policy_flush(c); 2436 case XFRM_MSG_POLEXPIRE: 2437 return xfrm_exp_policy_notify(xp, dir, c); 2438 default: 2439 printk("xfrm_user: Unknown Policy event %d\n", c->event); 2440 } 2441 2442 return 0; 2443 2444 } 2445 2446 static int build_report(struct sk_buff *skb, u8 proto, 2447 struct xfrm_selector *sel, xfrm_address_t *addr) 2448 { 2449 struct xfrm_user_report *ur; 2450 struct nlmsghdr *nlh; 2451 2452 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); 2453 if (nlh == NULL) 2454 return -EMSGSIZE; 2455 2456 ur = nlmsg_data(nlh); 2457 ur->proto = proto; 2458 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2459 2460 if (addr) 2461 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); 2462 2463 return nlmsg_end(skb, nlh); 2464 2465 nla_put_failure: 2466 nlmsg_cancel(skb, nlh); 2467 return -EMSGSIZE; 2468 } 2469 2470 static int xfrm_send_report(u8 proto, struct xfrm_selector *sel, 2471 xfrm_address_t *addr) 2472 { 2473 struct sk_buff *skb; 2474 size_t len; 2475 2476 len = NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct xfrm_user_report))); 2477 skb = alloc_skb(len, GFP_ATOMIC); 2478 if (skb == NULL) 2479 return -ENOMEM; 2480 2481 if (build_report(skb, proto, sel, addr) < 0) 2482 BUG(); 2483 2484 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); 2485 } 2486 2487 static struct xfrm_mgr netlink_mgr = { 2488 .id = "netlink", 2489 .notify = xfrm_send_state_notify, 2490 .acquire = xfrm_send_acquire, 2491 .compile_policy = xfrm_compile_policy, 2492 .notify_policy = xfrm_send_policy_notify, 2493 .report = xfrm_send_report, 2494 .migrate = xfrm_send_migrate, 2495 }; 2496 2497 static int __init xfrm_user_init(void) 2498 { 2499 struct sock *nlsk; 2500 2501 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 2502 2503 nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX, 2504 xfrm_netlink_rcv, NULL, THIS_MODULE); 2505 if (nlsk == NULL) 2506 return -ENOMEM; 2507 rcu_assign_pointer(xfrm_nl, nlsk); 2508 2509 xfrm_register_km(&netlink_mgr); 2510 2511 return 0; 2512 } 2513 2514 static void __exit xfrm_user_exit(void) 2515 { 2516 struct sock *nlsk = xfrm_nl; 2517 2518 xfrm_unregister_km(&netlink_mgr); 2519 rcu_assign_pointer(xfrm_nl, NULL); 2520 synchronize_rcu(); 2521 sock_release(nlsk->sk_socket); 2522 } 2523 2524 module_init(xfrm_user_init); 2525 module_exit(xfrm_user_exit); 2526 MODULE_LICENSE("GPL"); 2527 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); 2528 2529