1 /* xfrm_user.c: User interface to configure xfrm engine. 2 * 3 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * 11 */ 12 13 #include <linux/crypto.h> 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/slab.h> 18 #include <linux/socket.h> 19 #include <linux/string.h> 20 #include <linux/net.h> 21 #include <linux/skbuff.h> 22 #include <linux/pfkeyv2.h> 23 #include <linux/ipsec.h> 24 #include <linux/init.h> 25 #include <linux/security.h> 26 #include <net/sock.h> 27 #include <net/xfrm.h> 28 #include <net/netlink.h> 29 #include <net/ah.h> 30 #include <asm/uaccess.h> 31 #if IS_ENABLED(CONFIG_IPV6) 32 #include <linux/in6.h> 33 #endif 34 35 static inline int aead_len(struct xfrm_algo_aead *alg) 36 { 37 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 38 } 39 40 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) 41 { 42 struct nlattr *rt = attrs[type]; 43 struct xfrm_algo *algp; 44 45 if (!rt) 46 return 0; 47 48 algp = nla_data(rt); 49 if (nla_len(rt) < xfrm_alg_len(algp)) 50 return -EINVAL; 51 52 switch (type) { 53 case XFRMA_ALG_AUTH: 54 case XFRMA_ALG_CRYPT: 55 case XFRMA_ALG_COMP: 56 break; 57 58 default: 59 return -EINVAL; 60 } 61 62 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 63 return 0; 64 } 65 66 static int verify_auth_trunc(struct nlattr **attrs) 67 { 68 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; 69 struct xfrm_algo_auth *algp; 70 71 if (!rt) 72 return 0; 73 74 algp = nla_data(rt); 75 if (nla_len(rt) < xfrm_alg_auth_len(algp)) 76 return -EINVAL; 77 78 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 79 return 0; 80 } 81 82 static int verify_aead(struct nlattr **attrs) 83 { 84 struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; 85 struct xfrm_algo_aead *algp; 86 87 if (!rt) 88 return 0; 89 90 algp = nla_data(rt); 91 if (nla_len(rt) < aead_len(algp)) 92 return -EINVAL; 93 94 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 95 return 0; 96 } 97 98 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, 99 xfrm_address_t **addrp) 100 { 101 struct nlattr *rt = attrs[type]; 102 103 if (rt && addrp) 104 *addrp = nla_data(rt); 105 } 106 107 static inline int verify_sec_ctx_len(struct nlattr **attrs) 108 { 109 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 110 struct xfrm_user_sec_ctx *uctx; 111 112 if (!rt) 113 return 0; 114 115 uctx = nla_data(rt); 116 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) 117 return -EINVAL; 118 119 return 0; 120 } 121 122 static inline int verify_replay(struct xfrm_usersa_info *p, 123 struct nlattr **attrs) 124 { 125 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 126 127 if ((p->flags & XFRM_STATE_ESN) && !rt) 128 return -EINVAL; 129 130 if (!rt) 131 return 0; 132 133 if (p->id.proto != IPPROTO_ESP) 134 return -EINVAL; 135 136 if (p->replay_window != 0) 137 return -EINVAL; 138 139 return 0; 140 } 141 142 static int verify_newsa_info(struct xfrm_usersa_info *p, 143 struct nlattr **attrs) 144 { 145 int err; 146 147 err = -EINVAL; 148 switch (p->family) { 149 case AF_INET: 150 break; 151 152 case AF_INET6: 153 #if IS_ENABLED(CONFIG_IPV6) 154 break; 155 #else 156 err = -EAFNOSUPPORT; 157 goto out; 158 #endif 159 160 default: 161 goto out; 162 } 163 164 err = -EINVAL; 165 switch (p->id.proto) { 166 case IPPROTO_AH: 167 if ((!attrs[XFRMA_ALG_AUTH] && 168 !attrs[XFRMA_ALG_AUTH_TRUNC]) || 169 attrs[XFRMA_ALG_AEAD] || 170 attrs[XFRMA_ALG_CRYPT] || 171 attrs[XFRMA_ALG_COMP] || 172 attrs[XFRMA_TFCPAD]) 173 goto out; 174 break; 175 176 case IPPROTO_ESP: 177 if (attrs[XFRMA_ALG_COMP]) 178 goto out; 179 if (!attrs[XFRMA_ALG_AUTH] && 180 !attrs[XFRMA_ALG_AUTH_TRUNC] && 181 !attrs[XFRMA_ALG_CRYPT] && 182 !attrs[XFRMA_ALG_AEAD]) 183 goto out; 184 if ((attrs[XFRMA_ALG_AUTH] || 185 attrs[XFRMA_ALG_AUTH_TRUNC] || 186 attrs[XFRMA_ALG_CRYPT]) && 187 attrs[XFRMA_ALG_AEAD]) 188 goto out; 189 if (attrs[XFRMA_TFCPAD] && 190 p->mode != XFRM_MODE_TUNNEL) 191 goto out; 192 break; 193 194 case IPPROTO_COMP: 195 if (!attrs[XFRMA_ALG_COMP] || 196 attrs[XFRMA_ALG_AEAD] || 197 attrs[XFRMA_ALG_AUTH] || 198 attrs[XFRMA_ALG_AUTH_TRUNC] || 199 attrs[XFRMA_ALG_CRYPT] || 200 attrs[XFRMA_TFCPAD]) 201 goto out; 202 break; 203 204 #if IS_ENABLED(CONFIG_IPV6) 205 case IPPROTO_DSTOPTS: 206 case IPPROTO_ROUTING: 207 if (attrs[XFRMA_ALG_COMP] || 208 attrs[XFRMA_ALG_AUTH] || 209 attrs[XFRMA_ALG_AUTH_TRUNC] || 210 attrs[XFRMA_ALG_AEAD] || 211 attrs[XFRMA_ALG_CRYPT] || 212 attrs[XFRMA_ENCAP] || 213 attrs[XFRMA_SEC_CTX] || 214 attrs[XFRMA_TFCPAD] || 215 !attrs[XFRMA_COADDR]) 216 goto out; 217 break; 218 #endif 219 220 default: 221 goto out; 222 } 223 224 if ((err = verify_aead(attrs))) 225 goto out; 226 if ((err = verify_auth_trunc(attrs))) 227 goto out; 228 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH))) 229 goto out; 230 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT))) 231 goto out; 232 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP))) 233 goto out; 234 if ((err = verify_sec_ctx_len(attrs))) 235 goto out; 236 if ((err = verify_replay(p, attrs))) 237 goto out; 238 239 err = -EINVAL; 240 switch (p->mode) { 241 case XFRM_MODE_TRANSPORT: 242 case XFRM_MODE_TUNNEL: 243 case XFRM_MODE_ROUTEOPTIMIZATION: 244 case XFRM_MODE_BEET: 245 break; 246 247 default: 248 goto out; 249 } 250 251 err = 0; 252 253 out: 254 return err; 255 } 256 257 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, 258 struct xfrm_algo_desc *(*get_byname)(const char *, int), 259 struct nlattr *rta) 260 { 261 struct xfrm_algo *p, *ualg; 262 struct xfrm_algo_desc *algo; 263 264 if (!rta) 265 return 0; 266 267 ualg = nla_data(rta); 268 269 algo = get_byname(ualg->alg_name, 1); 270 if (!algo) 271 return -ENOSYS; 272 *props = algo->desc.sadb_alg_id; 273 274 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 275 if (!p) 276 return -ENOMEM; 277 278 strcpy(p->alg_name, algo->name); 279 *algpp = p; 280 return 0; 281 } 282 283 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, 284 struct nlattr *rta) 285 { 286 struct xfrm_algo *ualg; 287 struct xfrm_algo_auth *p; 288 struct xfrm_algo_desc *algo; 289 290 if (!rta) 291 return 0; 292 293 ualg = nla_data(rta); 294 295 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 296 if (!algo) 297 return -ENOSYS; 298 *props = algo->desc.sadb_alg_id; 299 300 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL); 301 if (!p) 302 return -ENOMEM; 303 304 strcpy(p->alg_name, algo->name); 305 p->alg_key_len = ualg->alg_key_len; 306 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 307 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8); 308 309 *algpp = p; 310 return 0; 311 } 312 313 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, 314 struct nlattr *rta) 315 { 316 struct xfrm_algo_auth *p, *ualg; 317 struct xfrm_algo_desc *algo; 318 319 if (!rta) 320 return 0; 321 322 ualg = nla_data(rta); 323 324 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 325 if (!algo) 326 return -ENOSYS; 327 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || 328 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 329 return -EINVAL; 330 *props = algo->desc.sadb_alg_id; 331 332 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL); 333 if (!p) 334 return -ENOMEM; 335 336 strcpy(p->alg_name, algo->name); 337 if (!p->alg_trunc_len) 338 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 339 340 *algpp = p; 341 return 0; 342 } 343 344 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props, 345 struct nlattr *rta) 346 { 347 struct xfrm_algo_aead *p, *ualg; 348 struct xfrm_algo_desc *algo; 349 350 if (!rta) 351 return 0; 352 353 ualg = nla_data(rta); 354 355 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); 356 if (!algo) 357 return -ENOSYS; 358 *props = algo->desc.sadb_alg_id; 359 360 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); 361 if (!p) 362 return -ENOMEM; 363 364 strcpy(p->alg_name, algo->name); 365 *algpp = p; 366 return 0; 367 } 368 369 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, 370 struct nlattr *rp) 371 { 372 struct xfrm_replay_state_esn *up; 373 374 if (!replay_esn || !rp) 375 return 0; 376 377 up = nla_data(rp); 378 379 if (xfrm_replay_state_esn_len(replay_esn) != 380 xfrm_replay_state_esn_len(up)) 381 return -EINVAL; 382 383 return 0; 384 } 385 386 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, 387 struct xfrm_replay_state_esn **preplay_esn, 388 struct nlattr *rta) 389 { 390 struct xfrm_replay_state_esn *p, *pp, *up; 391 392 if (!rta) 393 return 0; 394 395 up = nla_data(rta); 396 397 p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); 398 if (!p) 399 return -ENOMEM; 400 401 pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); 402 if (!pp) { 403 kfree(p); 404 return -ENOMEM; 405 } 406 407 *replay_esn = p; 408 *preplay_esn = pp; 409 410 return 0; 411 } 412 413 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) 414 { 415 int len = 0; 416 417 if (xfrm_ctx) { 418 len += sizeof(struct xfrm_user_sec_ctx); 419 len += xfrm_ctx->ctx_len; 420 } 421 return len; 422 } 423 424 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 425 { 426 memcpy(&x->id, &p->id, sizeof(x->id)); 427 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 428 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 429 x->props.mode = p->mode; 430 x->props.replay_window = p->replay_window; 431 x->props.reqid = p->reqid; 432 x->props.family = p->family; 433 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 434 x->props.flags = p->flags; 435 436 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC)) 437 x->sel.family = p->family; 438 } 439 440 /* 441 * someday when pfkey also has support, we could have the code 442 * somehow made shareable and move it to xfrm_state.c - JHS 443 * 444 */ 445 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) 446 { 447 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 448 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 449 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 450 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 451 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 452 453 if (re) { 454 struct xfrm_replay_state_esn *replay_esn; 455 replay_esn = nla_data(re); 456 memcpy(x->replay_esn, replay_esn, 457 xfrm_replay_state_esn_len(replay_esn)); 458 memcpy(x->preplay_esn, replay_esn, 459 xfrm_replay_state_esn_len(replay_esn)); 460 } 461 462 if (rp) { 463 struct xfrm_replay_state *replay; 464 replay = nla_data(rp); 465 memcpy(&x->replay, replay, sizeof(*replay)); 466 memcpy(&x->preplay, replay, sizeof(*replay)); 467 } 468 469 if (lt) { 470 struct xfrm_lifetime_cur *ltime; 471 ltime = nla_data(lt); 472 x->curlft.bytes = ltime->bytes; 473 x->curlft.packets = ltime->packets; 474 x->curlft.add_time = ltime->add_time; 475 x->curlft.use_time = ltime->use_time; 476 } 477 478 if (et) 479 x->replay_maxage = nla_get_u32(et); 480 481 if (rt) 482 x->replay_maxdiff = nla_get_u32(rt); 483 } 484 485 static struct xfrm_state *xfrm_state_construct(struct net *net, 486 struct xfrm_usersa_info *p, 487 struct nlattr **attrs, 488 int *errp) 489 { 490 struct xfrm_state *x = xfrm_state_alloc(net); 491 int err = -ENOMEM; 492 493 if (!x) 494 goto error_no_put; 495 496 copy_from_user_state(x, p); 497 498 if ((err = attach_aead(&x->aead, &x->props.ealgo, 499 attrs[XFRMA_ALG_AEAD]))) 500 goto error; 501 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, 502 attrs[XFRMA_ALG_AUTH_TRUNC]))) 503 goto error; 504 if (!x->props.aalgo) { 505 if ((err = attach_auth(&x->aalg, &x->props.aalgo, 506 attrs[XFRMA_ALG_AUTH]))) 507 goto error; 508 } 509 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, 510 xfrm_ealg_get_byname, 511 attrs[XFRMA_ALG_CRYPT]))) 512 goto error; 513 if ((err = attach_one_algo(&x->calg, &x->props.calgo, 514 xfrm_calg_get_byname, 515 attrs[XFRMA_ALG_COMP]))) 516 goto error; 517 518 if (attrs[XFRMA_ENCAP]) { 519 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 520 sizeof(*x->encap), GFP_KERNEL); 521 if (x->encap == NULL) 522 goto error; 523 } 524 525 if (attrs[XFRMA_TFCPAD]) 526 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); 527 528 if (attrs[XFRMA_COADDR]) { 529 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 530 sizeof(*x->coaddr), GFP_KERNEL); 531 if (x->coaddr == NULL) 532 goto error; 533 } 534 535 xfrm_mark_get(attrs, &x->mark); 536 537 err = __xfrm_init_state(x, false); 538 if (err) 539 goto error; 540 541 if (attrs[XFRMA_SEC_CTX] && 542 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX]))) 543 goto error; 544 545 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 546 attrs[XFRMA_REPLAY_ESN_VAL]))) 547 goto error; 548 549 x->km.seq = p->seq; 550 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth; 551 /* sysctl_xfrm_aevent_etime is in 100ms units */ 552 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M; 553 554 if ((err = xfrm_init_replay(x))) 555 goto error; 556 557 /* override default values from above */ 558 xfrm_update_ae_params(x, attrs); 559 560 return x; 561 562 error: 563 x->km.state = XFRM_STATE_DEAD; 564 xfrm_state_put(x); 565 error_no_put: 566 *errp = err; 567 return NULL; 568 } 569 570 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 571 struct nlattr **attrs) 572 { 573 struct net *net = sock_net(skb->sk); 574 struct xfrm_usersa_info *p = nlmsg_data(nlh); 575 struct xfrm_state *x; 576 int err; 577 struct km_event c; 578 uid_t loginuid = audit_get_loginuid(current); 579 u32 sessionid = audit_get_sessionid(current); 580 u32 sid; 581 582 err = verify_newsa_info(p, attrs); 583 if (err) 584 return err; 585 586 x = xfrm_state_construct(net, p, attrs, &err); 587 if (!x) 588 return err; 589 590 xfrm_state_hold(x); 591 if (nlh->nlmsg_type == XFRM_MSG_NEWSA) 592 err = xfrm_state_add(x); 593 else 594 err = xfrm_state_update(x); 595 596 security_task_getsecid(current, &sid); 597 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid); 598 599 if (err < 0) { 600 x->km.state = XFRM_STATE_DEAD; 601 __xfrm_state_put(x); 602 goto out; 603 } 604 605 c.seq = nlh->nlmsg_seq; 606 c.pid = nlh->nlmsg_pid; 607 c.event = nlh->nlmsg_type; 608 609 km_state_notify(x, &c); 610 out: 611 xfrm_state_put(x); 612 return err; 613 } 614 615 static struct xfrm_state *xfrm_user_state_lookup(struct net *net, 616 struct xfrm_usersa_id *p, 617 struct nlattr **attrs, 618 int *errp) 619 { 620 struct xfrm_state *x = NULL; 621 struct xfrm_mark m; 622 int err; 623 u32 mark = xfrm_mark_get(attrs, &m); 624 625 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 626 err = -ESRCH; 627 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family); 628 } else { 629 xfrm_address_t *saddr = NULL; 630 631 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); 632 if (!saddr) { 633 err = -EINVAL; 634 goto out; 635 } 636 637 err = -ESRCH; 638 x = xfrm_state_lookup_byaddr(net, mark, 639 &p->daddr, saddr, 640 p->proto, p->family); 641 } 642 643 out: 644 if (!x && errp) 645 *errp = err; 646 return x; 647 } 648 649 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 650 struct nlattr **attrs) 651 { 652 struct net *net = sock_net(skb->sk); 653 struct xfrm_state *x; 654 int err = -ESRCH; 655 struct km_event c; 656 struct xfrm_usersa_id *p = nlmsg_data(nlh); 657 uid_t loginuid = audit_get_loginuid(current); 658 u32 sessionid = audit_get_sessionid(current); 659 u32 sid; 660 661 x = xfrm_user_state_lookup(net, p, attrs, &err); 662 if (x == NULL) 663 return err; 664 665 if ((err = security_xfrm_state_delete(x)) != 0) 666 goto out; 667 668 if (xfrm_state_kern(x)) { 669 err = -EPERM; 670 goto out; 671 } 672 673 err = xfrm_state_delete(x); 674 675 if (err < 0) 676 goto out; 677 678 c.seq = nlh->nlmsg_seq; 679 c.pid = nlh->nlmsg_pid; 680 c.event = nlh->nlmsg_type; 681 km_state_notify(x, &c); 682 683 out: 684 security_task_getsecid(current, &sid); 685 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid); 686 xfrm_state_put(x); 687 return err; 688 } 689 690 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 691 { 692 memcpy(&p->id, &x->id, sizeof(p->id)); 693 memcpy(&p->sel, &x->sel, sizeof(p->sel)); 694 memcpy(&p->lft, &x->lft, sizeof(p->lft)); 695 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); 696 memcpy(&p->stats, &x->stats, sizeof(p->stats)); 697 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); 698 p->mode = x->props.mode; 699 p->replay_window = x->props.replay_window; 700 p->reqid = x->props.reqid; 701 p->family = x->props.family; 702 p->flags = x->props.flags; 703 p->seq = x->km.seq; 704 } 705 706 struct xfrm_dump_info { 707 struct sk_buff *in_skb; 708 struct sk_buff *out_skb; 709 u32 nlmsg_seq; 710 u16 nlmsg_flags; 711 }; 712 713 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 714 { 715 struct xfrm_user_sec_ctx *uctx; 716 struct nlattr *attr; 717 int ctx_size = sizeof(*uctx) + s->ctx_len; 718 719 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); 720 if (attr == NULL) 721 return -EMSGSIZE; 722 723 uctx = nla_data(attr); 724 uctx->exttype = XFRMA_SEC_CTX; 725 uctx->len = ctx_size; 726 uctx->ctx_doi = s->ctx_doi; 727 uctx->ctx_alg = s->ctx_alg; 728 uctx->ctx_len = s->ctx_len; 729 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 730 731 return 0; 732 } 733 734 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) 735 { 736 struct xfrm_algo *algo; 737 struct nlattr *nla; 738 739 nla = nla_reserve(skb, XFRMA_ALG_AUTH, 740 sizeof(*algo) + (auth->alg_key_len + 7) / 8); 741 if (!nla) 742 return -EMSGSIZE; 743 744 algo = nla_data(nla); 745 strcpy(algo->alg_name, auth->alg_name); 746 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); 747 algo->alg_key_len = auth->alg_key_len; 748 749 return 0; 750 } 751 752 /* Don't change this without updating xfrm_sa_len! */ 753 static int copy_to_user_state_extra(struct xfrm_state *x, 754 struct xfrm_usersa_info *p, 755 struct sk_buff *skb) 756 { 757 copy_to_user_state(x, p); 758 759 if (x->coaddr && 760 nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr)) 761 goto nla_put_failure; 762 763 if (x->lastused && 764 nla_put_u64(skb, XFRMA_LASTUSED, x->lastused)) 765 goto nla_put_failure; 766 767 if (x->aead && 768 nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead)) 769 goto nla_put_failure; 770 771 if (x->aalg && 772 (copy_to_user_auth(x->aalg, skb) || 773 nla_put(skb, XFRMA_ALG_AUTH_TRUNC, 774 xfrm_alg_auth_len(x->aalg), x->aalg))) 775 goto nla_put_failure; 776 777 if (x->ealg && 778 nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg)) 779 goto nla_put_failure; 780 781 if (x->calg && 782 nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg)) 783 goto nla_put_failure; 784 785 if (x->encap && 786 nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap)) 787 goto nla_put_failure; 788 789 if (x->tfcpad && 790 nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad)) 791 goto nla_put_failure; 792 793 if (xfrm_mark_put(skb, &x->mark)) 794 goto nla_put_failure; 795 796 if (x->replay_esn && 797 nla_put(skb, XFRMA_REPLAY_ESN_VAL, 798 xfrm_replay_state_esn_len(x->replay_esn), 799 x->replay_esn)) 800 goto nla_put_failure; 801 802 if (x->security && copy_sec_ctx(x->security, skb)) 803 goto nla_put_failure; 804 805 return 0; 806 807 nla_put_failure: 808 return -EMSGSIZE; 809 } 810 811 static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 812 { 813 struct xfrm_dump_info *sp = ptr; 814 struct sk_buff *in_skb = sp->in_skb; 815 struct sk_buff *skb = sp->out_skb; 816 struct xfrm_usersa_info *p; 817 struct nlmsghdr *nlh; 818 int err; 819 820 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 821 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 822 if (nlh == NULL) 823 return -EMSGSIZE; 824 825 p = nlmsg_data(nlh); 826 827 err = copy_to_user_state_extra(x, p, skb); 828 if (err) 829 goto nla_put_failure; 830 831 nlmsg_end(skb, nlh); 832 return 0; 833 834 nla_put_failure: 835 nlmsg_cancel(skb, nlh); 836 return err; 837 } 838 839 static int xfrm_dump_sa_done(struct netlink_callback *cb) 840 { 841 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 842 xfrm_state_walk_done(walk); 843 return 0; 844 } 845 846 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 847 { 848 struct net *net = sock_net(skb->sk); 849 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 850 struct xfrm_dump_info info; 851 852 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > 853 sizeof(cb->args) - sizeof(cb->args[0])); 854 855 info.in_skb = cb->skb; 856 info.out_skb = skb; 857 info.nlmsg_seq = cb->nlh->nlmsg_seq; 858 info.nlmsg_flags = NLM_F_MULTI; 859 860 if (!cb->args[0]) { 861 cb->args[0] = 1; 862 xfrm_state_walk_init(walk, 0); 863 } 864 865 (void) xfrm_state_walk(net, walk, dump_one_state, &info); 866 867 return skb->len; 868 } 869 870 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, 871 struct xfrm_state *x, u32 seq) 872 { 873 struct xfrm_dump_info info; 874 struct sk_buff *skb; 875 876 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 877 if (!skb) 878 return ERR_PTR(-ENOMEM); 879 880 info.in_skb = in_skb; 881 info.out_skb = skb; 882 info.nlmsg_seq = seq; 883 info.nlmsg_flags = 0; 884 885 if (dump_one_state(x, 0, &info)) { 886 kfree_skb(skb); 887 return NULL; 888 } 889 890 return skb; 891 } 892 893 static inline size_t xfrm_spdinfo_msgsize(void) 894 { 895 return NLMSG_ALIGN(4) 896 + nla_total_size(sizeof(struct xfrmu_spdinfo)) 897 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 898 } 899 900 static int build_spdinfo(struct sk_buff *skb, struct net *net, 901 u32 pid, u32 seq, u32 flags) 902 { 903 struct xfrmk_spdinfo si; 904 struct xfrmu_spdinfo spc; 905 struct xfrmu_spdhinfo sph; 906 struct nlmsghdr *nlh; 907 u32 *f; 908 909 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 910 if (nlh == NULL) /* shouldn't really happen ... */ 911 return -EMSGSIZE; 912 913 f = nlmsg_data(nlh); 914 *f = flags; 915 xfrm_spd_getinfo(net, &si); 916 spc.incnt = si.incnt; 917 spc.outcnt = si.outcnt; 918 spc.fwdcnt = si.fwdcnt; 919 spc.inscnt = si.inscnt; 920 spc.outscnt = si.outscnt; 921 spc.fwdscnt = si.fwdscnt; 922 sph.spdhcnt = si.spdhcnt; 923 sph.spdhmcnt = si.spdhmcnt; 924 925 if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) || 926 nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph)) 927 goto nla_put_failure; 928 929 return nlmsg_end(skb, nlh); 930 931 nla_put_failure: 932 nlmsg_cancel(skb, nlh); 933 return -EMSGSIZE; 934 } 935 936 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 937 struct nlattr **attrs) 938 { 939 struct net *net = sock_net(skb->sk); 940 struct sk_buff *r_skb; 941 u32 *flags = nlmsg_data(nlh); 942 u32 spid = NETLINK_CB(skb).pid; 943 u32 seq = nlh->nlmsg_seq; 944 945 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); 946 if (r_skb == NULL) 947 return -ENOMEM; 948 949 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) 950 BUG(); 951 952 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 953 } 954 955 static inline size_t xfrm_sadinfo_msgsize(void) 956 { 957 return NLMSG_ALIGN(4) 958 + nla_total_size(sizeof(struct xfrmu_sadhinfo)) 959 + nla_total_size(4); /* XFRMA_SAD_CNT */ 960 } 961 962 static int build_sadinfo(struct sk_buff *skb, struct net *net, 963 u32 pid, u32 seq, u32 flags) 964 { 965 struct xfrmk_sadinfo si; 966 struct xfrmu_sadhinfo sh; 967 struct nlmsghdr *nlh; 968 u32 *f; 969 970 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 971 if (nlh == NULL) /* shouldn't really happen ... */ 972 return -EMSGSIZE; 973 974 f = nlmsg_data(nlh); 975 *f = flags; 976 xfrm_sad_getinfo(net, &si); 977 978 sh.sadhmcnt = si.sadhmcnt; 979 sh.sadhcnt = si.sadhcnt; 980 981 if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) || 982 nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh)) 983 goto nla_put_failure; 984 985 return nlmsg_end(skb, nlh); 986 987 nla_put_failure: 988 nlmsg_cancel(skb, nlh); 989 return -EMSGSIZE; 990 } 991 992 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 993 struct nlattr **attrs) 994 { 995 struct net *net = sock_net(skb->sk); 996 struct sk_buff *r_skb; 997 u32 *flags = nlmsg_data(nlh); 998 u32 spid = NETLINK_CB(skb).pid; 999 u32 seq = nlh->nlmsg_seq; 1000 1001 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); 1002 if (r_skb == NULL) 1003 return -ENOMEM; 1004 1005 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) 1006 BUG(); 1007 1008 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 1009 } 1010 1011 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1012 struct nlattr **attrs) 1013 { 1014 struct net *net = sock_net(skb->sk); 1015 struct xfrm_usersa_id *p = nlmsg_data(nlh); 1016 struct xfrm_state *x; 1017 struct sk_buff *resp_skb; 1018 int err = -ESRCH; 1019 1020 x = xfrm_user_state_lookup(net, p, attrs, &err); 1021 if (x == NULL) 1022 goto out_noput; 1023 1024 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1025 if (IS_ERR(resp_skb)) { 1026 err = PTR_ERR(resp_skb); 1027 } else { 1028 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); 1029 } 1030 xfrm_state_put(x); 1031 out_noput: 1032 return err; 1033 } 1034 1035 static int verify_userspi_info(struct xfrm_userspi_info *p) 1036 { 1037 switch (p->info.id.proto) { 1038 case IPPROTO_AH: 1039 case IPPROTO_ESP: 1040 break; 1041 1042 case IPPROTO_COMP: 1043 /* IPCOMP spi is 16-bits. */ 1044 if (p->max >= 0x10000) 1045 return -EINVAL; 1046 break; 1047 1048 default: 1049 return -EINVAL; 1050 } 1051 1052 if (p->min > p->max) 1053 return -EINVAL; 1054 1055 return 0; 1056 } 1057 1058 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, 1059 struct nlattr **attrs) 1060 { 1061 struct net *net = sock_net(skb->sk); 1062 struct xfrm_state *x; 1063 struct xfrm_userspi_info *p; 1064 struct sk_buff *resp_skb; 1065 xfrm_address_t *daddr; 1066 int family; 1067 int err; 1068 u32 mark; 1069 struct xfrm_mark m; 1070 1071 p = nlmsg_data(nlh); 1072 err = verify_userspi_info(p); 1073 if (err) 1074 goto out_noput; 1075 1076 family = p->info.family; 1077 daddr = &p->info.id.daddr; 1078 1079 x = NULL; 1080 1081 mark = xfrm_mark_get(attrs, &m); 1082 if (p->info.seq) { 1083 x = xfrm_find_acq_byseq(net, mark, p->info.seq); 1084 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) { 1085 xfrm_state_put(x); 1086 x = NULL; 1087 } 1088 } 1089 1090 if (!x) 1091 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, 1092 p->info.id.proto, daddr, 1093 &p->info.saddr, 1, 1094 family); 1095 err = -ENOENT; 1096 if (x == NULL) 1097 goto out_noput; 1098 1099 err = xfrm_alloc_spi(x, p->min, p->max); 1100 if (err) 1101 goto out; 1102 1103 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1104 if (IS_ERR(resp_skb)) { 1105 err = PTR_ERR(resp_skb); 1106 goto out; 1107 } 1108 1109 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); 1110 1111 out: 1112 xfrm_state_put(x); 1113 out_noput: 1114 return err; 1115 } 1116 1117 static int verify_policy_dir(u8 dir) 1118 { 1119 switch (dir) { 1120 case XFRM_POLICY_IN: 1121 case XFRM_POLICY_OUT: 1122 case XFRM_POLICY_FWD: 1123 break; 1124 1125 default: 1126 return -EINVAL; 1127 } 1128 1129 return 0; 1130 } 1131 1132 static int verify_policy_type(u8 type) 1133 { 1134 switch (type) { 1135 case XFRM_POLICY_TYPE_MAIN: 1136 #ifdef CONFIG_XFRM_SUB_POLICY 1137 case XFRM_POLICY_TYPE_SUB: 1138 #endif 1139 break; 1140 1141 default: 1142 return -EINVAL; 1143 } 1144 1145 return 0; 1146 } 1147 1148 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) 1149 { 1150 switch (p->share) { 1151 case XFRM_SHARE_ANY: 1152 case XFRM_SHARE_SESSION: 1153 case XFRM_SHARE_USER: 1154 case XFRM_SHARE_UNIQUE: 1155 break; 1156 1157 default: 1158 return -EINVAL; 1159 } 1160 1161 switch (p->action) { 1162 case XFRM_POLICY_ALLOW: 1163 case XFRM_POLICY_BLOCK: 1164 break; 1165 1166 default: 1167 return -EINVAL; 1168 } 1169 1170 switch (p->sel.family) { 1171 case AF_INET: 1172 break; 1173 1174 case AF_INET6: 1175 #if IS_ENABLED(CONFIG_IPV6) 1176 break; 1177 #else 1178 return -EAFNOSUPPORT; 1179 #endif 1180 1181 default: 1182 return -EINVAL; 1183 } 1184 1185 return verify_policy_dir(p->dir); 1186 } 1187 1188 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) 1189 { 1190 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1191 struct xfrm_user_sec_ctx *uctx; 1192 1193 if (!rt) 1194 return 0; 1195 1196 uctx = nla_data(rt); 1197 return security_xfrm_policy_alloc(&pol->security, uctx); 1198 } 1199 1200 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 1201 int nr) 1202 { 1203 int i; 1204 1205 xp->xfrm_nr = nr; 1206 for (i = 0; i < nr; i++, ut++) { 1207 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 1208 1209 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); 1210 memcpy(&t->saddr, &ut->saddr, 1211 sizeof(xfrm_address_t)); 1212 t->reqid = ut->reqid; 1213 t->mode = ut->mode; 1214 t->share = ut->share; 1215 t->optional = ut->optional; 1216 t->aalgos = ut->aalgos; 1217 t->ealgos = ut->ealgos; 1218 t->calgos = ut->calgos; 1219 /* If all masks are ~0, then we allow all algorithms. */ 1220 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos); 1221 t->encap_family = ut->family; 1222 } 1223 } 1224 1225 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) 1226 { 1227 int i; 1228 1229 if (nr > XFRM_MAX_DEPTH) 1230 return -EINVAL; 1231 1232 for (i = 0; i < nr; i++) { 1233 /* We never validated the ut->family value, so many 1234 * applications simply leave it at zero. The check was 1235 * never made and ut->family was ignored because all 1236 * templates could be assumed to have the same family as 1237 * the policy itself. Now that we will have ipv4-in-ipv6 1238 * and ipv6-in-ipv4 tunnels, this is no longer true. 1239 */ 1240 if (!ut[i].family) 1241 ut[i].family = family; 1242 1243 switch (ut[i].family) { 1244 case AF_INET: 1245 break; 1246 #if IS_ENABLED(CONFIG_IPV6) 1247 case AF_INET6: 1248 break; 1249 #endif 1250 default: 1251 return -EINVAL; 1252 } 1253 } 1254 1255 return 0; 1256 } 1257 1258 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs) 1259 { 1260 struct nlattr *rt = attrs[XFRMA_TMPL]; 1261 1262 if (!rt) { 1263 pol->xfrm_nr = 0; 1264 } else { 1265 struct xfrm_user_tmpl *utmpl = nla_data(rt); 1266 int nr = nla_len(rt) / sizeof(*utmpl); 1267 int err; 1268 1269 err = validate_tmpl(nr, utmpl, pol->family); 1270 if (err) 1271 return err; 1272 1273 copy_templates(pol, utmpl, nr); 1274 } 1275 return 0; 1276 } 1277 1278 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs) 1279 { 1280 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; 1281 struct xfrm_userpolicy_type *upt; 1282 u8 type = XFRM_POLICY_TYPE_MAIN; 1283 int err; 1284 1285 if (rt) { 1286 upt = nla_data(rt); 1287 type = upt->type; 1288 } 1289 1290 err = verify_policy_type(type); 1291 if (err) 1292 return err; 1293 1294 *tp = type; 1295 return 0; 1296 } 1297 1298 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) 1299 { 1300 xp->priority = p->priority; 1301 xp->index = p->index; 1302 memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); 1303 memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); 1304 xp->action = p->action; 1305 xp->flags = p->flags; 1306 xp->family = p->sel.family; 1307 /* XXX xp->share = p->share; */ 1308 } 1309 1310 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) 1311 { 1312 memcpy(&p->sel, &xp->selector, sizeof(p->sel)); 1313 memcpy(&p->lft, &xp->lft, sizeof(p->lft)); 1314 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); 1315 p->priority = xp->priority; 1316 p->index = xp->index; 1317 p->sel.family = xp->family; 1318 p->dir = dir; 1319 p->action = xp->action; 1320 p->flags = xp->flags; 1321 p->share = XFRM_SHARE_ANY; /* XXX xp->share */ 1322 } 1323 1324 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp) 1325 { 1326 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL); 1327 int err; 1328 1329 if (!xp) { 1330 *errp = -ENOMEM; 1331 return NULL; 1332 } 1333 1334 copy_from_user_policy(xp, p); 1335 1336 err = copy_from_user_policy_type(&xp->type, attrs); 1337 if (err) 1338 goto error; 1339 1340 if (!(err = copy_from_user_tmpl(xp, attrs))) 1341 err = copy_from_user_sec_ctx(xp, attrs); 1342 if (err) 1343 goto error; 1344 1345 xfrm_mark_get(attrs, &xp->mark); 1346 1347 return xp; 1348 error: 1349 *errp = err; 1350 xp->walk.dead = 1; 1351 xfrm_policy_destroy(xp); 1352 return NULL; 1353 } 1354 1355 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1356 struct nlattr **attrs) 1357 { 1358 struct net *net = sock_net(skb->sk); 1359 struct xfrm_userpolicy_info *p = nlmsg_data(nlh); 1360 struct xfrm_policy *xp; 1361 struct km_event c; 1362 int err; 1363 int excl; 1364 uid_t loginuid = audit_get_loginuid(current); 1365 u32 sessionid = audit_get_sessionid(current); 1366 u32 sid; 1367 1368 err = verify_newpolicy_info(p); 1369 if (err) 1370 return err; 1371 err = verify_sec_ctx_len(attrs); 1372 if (err) 1373 return err; 1374 1375 xp = xfrm_policy_construct(net, p, attrs, &err); 1376 if (!xp) 1377 return err; 1378 1379 /* shouldn't excl be based on nlh flags?? 1380 * Aha! this is anti-netlink really i.e more pfkey derived 1381 * in netlink excl is a flag and you wouldnt need 1382 * a type XFRM_MSG_UPDPOLICY - JHS */ 1383 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1384 err = xfrm_policy_insert(p->dir, xp, excl); 1385 security_task_getsecid(current, &sid); 1386 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid); 1387 1388 if (err) { 1389 security_xfrm_policy_free(xp->security); 1390 kfree(xp); 1391 return err; 1392 } 1393 1394 c.event = nlh->nlmsg_type; 1395 c.seq = nlh->nlmsg_seq; 1396 c.pid = nlh->nlmsg_pid; 1397 km_policy_notify(xp, p->dir, &c); 1398 1399 xfrm_pol_put(xp); 1400 1401 return 0; 1402 } 1403 1404 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) 1405 { 1406 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; 1407 int i; 1408 1409 if (xp->xfrm_nr == 0) 1410 return 0; 1411 1412 for (i = 0; i < xp->xfrm_nr; i++) { 1413 struct xfrm_user_tmpl *up = &vec[i]; 1414 struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; 1415 1416 memcpy(&up->id, &kp->id, sizeof(up->id)); 1417 up->family = kp->encap_family; 1418 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); 1419 up->reqid = kp->reqid; 1420 up->mode = kp->mode; 1421 up->share = kp->share; 1422 up->optional = kp->optional; 1423 up->aalgos = kp->aalgos; 1424 up->ealgos = kp->ealgos; 1425 up->calgos = kp->calgos; 1426 } 1427 1428 return nla_put(skb, XFRMA_TMPL, 1429 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); 1430 } 1431 1432 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) 1433 { 1434 if (x->security) { 1435 return copy_sec_ctx(x->security, skb); 1436 } 1437 return 0; 1438 } 1439 1440 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 1441 { 1442 if (xp->security) { 1443 return copy_sec_ctx(xp->security, skb); 1444 } 1445 return 0; 1446 } 1447 static inline size_t userpolicy_type_attrsize(void) 1448 { 1449 #ifdef CONFIG_XFRM_SUB_POLICY 1450 return nla_total_size(sizeof(struct xfrm_userpolicy_type)); 1451 #else 1452 return 0; 1453 #endif 1454 } 1455 1456 #ifdef CONFIG_XFRM_SUB_POLICY 1457 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1458 { 1459 struct xfrm_userpolicy_type upt = { 1460 .type = type, 1461 }; 1462 1463 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1464 } 1465 1466 #else 1467 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1468 { 1469 return 0; 1470 } 1471 #endif 1472 1473 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) 1474 { 1475 struct xfrm_dump_info *sp = ptr; 1476 struct xfrm_userpolicy_info *p; 1477 struct sk_buff *in_skb = sp->in_skb; 1478 struct sk_buff *skb = sp->out_skb; 1479 struct nlmsghdr *nlh; 1480 1481 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 1482 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1483 if (nlh == NULL) 1484 return -EMSGSIZE; 1485 1486 p = nlmsg_data(nlh); 1487 copy_to_user_policy(xp, p, dir); 1488 if (copy_to_user_tmpl(xp, skb) < 0) 1489 goto nlmsg_failure; 1490 if (copy_to_user_sec_ctx(xp, skb)) 1491 goto nlmsg_failure; 1492 if (copy_to_user_policy_type(xp->type, skb) < 0) 1493 goto nlmsg_failure; 1494 if (xfrm_mark_put(skb, &xp->mark)) 1495 goto nla_put_failure; 1496 1497 nlmsg_end(skb, nlh); 1498 return 0; 1499 1500 nla_put_failure: 1501 nlmsg_failure: 1502 nlmsg_cancel(skb, nlh); 1503 return -EMSGSIZE; 1504 } 1505 1506 static int xfrm_dump_policy_done(struct netlink_callback *cb) 1507 { 1508 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1509 1510 xfrm_policy_walk_done(walk); 1511 return 0; 1512 } 1513 1514 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1515 { 1516 struct net *net = sock_net(skb->sk); 1517 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1518 struct xfrm_dump_info info; 1519 1520 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > 1521 sizeof(cb->args) - sizeof(cb->args[0])); 1522 1523 info.in_skb = cb->skb; 1524 info.out_skb = skb; 1525 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1526 info.nlmsg_flags = NLM_F_MULTI; 1527 1528 if (!cb->args[0]) { 1529 cb->args[0] = 1; 1530 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); 1531 } 1532 1533 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 1534 1535 return skb->len; 1536 } 1537 1538 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, 1539 struct xfrm_policy *xp, 1540 int dir, u32 seq) 1541 { 1542 struct xfrm_dump_info info; 1543 struct sk_buff *skb; 1544 1545 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1546 if (!skb) 1547 return ERR_PTR(-ENOMEM); 1548 1549 info.in_skb = in_skb; 1550 info.out_skb = skb; 1551 info.nlmsg_seq = seq; 1552 info.nlmsg_flags = 0; 1553 1554 if (dump_one_policy(xp, dir, 0, &info) < 0) { 1555 kfree_skb(skb); 1556 return NULL; 1557 } 1558 1559 return skb; 1560 } 1561 1562 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1563 struct nlattr **attrs) 1564 { 1565 struct net *net = sock_net(skb->sk); 1566 struct xfrm_policy *xp; 1567 struct xfrm_userpolicy_id *p; 1568 u8 type = XFRM_POLICY_TYPE_MAIN; 1569 int err; 1570 struct km_event c; 1571 int delete; 1572 struct xfrm_mark m; 1573 u32 mark = xfrm_mark_get(attrs, &m); 1574 1575 p = nlmsg_data(nlh); 1576 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 1577 1578 err = copy_from_user_policy_type(&type, attrs); 1579 if (err) 1580 return err; 1581 1582 err = verify_policy_dir(p->dir); 1583 if (err) 1584 return err; 1585 1586 if (p->index) 1587 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err); 1588 else { 1589 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1590 struct xfrm_sec_ctx *ctx; 1591 1592 err = verify_sec_ctx_len(attrs); 1593 if (err) 1594 return err; 1595 1596 ctx = NULL; 1597 if (rt) { 1598 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1599 1600 err = security_xfrm_policy_alloc(&ctx, uctx); 1601 if (err) 1602 return err; 1603 } 1604 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel, 1605 ctx, delete, &err); 1606 security_xfrm_policy_free(ctx); 1607 } 1608 if (xp == NULL) 1609 return -ENOENT; 1610 1611 if (!delete) { 1612 struct sk_buff *resp_skb; 1613 1614 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); 1615 if (IS_ERR(resp_skb)) { 1616 err = PTR_ERR(resp_skb); 1617 } else { 1618 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, 1619 NETLINK_CB(skb).pid); 1620 } 1621 } else { 1622 uid_t loginuid = audit_get_loginuid(current); 1623 u32 sessionid = audit_get_sessionid(current); 1624 u32 sid; 1625 1626 security_task_getsecid(current, &sid); 1627 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid, 1628 sid); 1629 1630 if (err != 0) 1631 goto out; 1632 1633 c.data.byid = p->index; 1634 c.event = nlh->nlmsg_type; 1635 c.seq = nlh->nlmsg_seq; 1636 c.pid = nlh->nlmsg_pid; 1637 km_policy_notify(xp, p->dir, &c); 1638 } 1639 1640 out: 1641 xfrm_pol_put(xp); 1642 return err; 1643 } 1644 1645 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1646 struct nlattr **attrs) 1647 { 1648 struct net *net = sock_net(skb->sk); 1649 struct km_event c; 1650 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1651 struct xfrm_audit audit_info; 1652 int err; 1653 1654 audit_info.loginuid = audit_get_loginuid(current); 1655 audit_info.sessionid = audit_get_sessionid(current); 1656 security_task_getsecid(current, &audit_info.secid); 1657 err = xfrm_state_flush(net, p->proto, &audit_info); 1658 if (err) { 1659 if (err == -ESRCH) /* empty table */ 1660 return 0; 1661 return err; 1662 } 1663 c.data.proto = p->proto; 1664 c.event = nlh->nlmsg_type; 1665 c.seq = nlh->nlmsg_seq; 1666 c.pid = nlh->nlmsg_pid; 1667 c.net = net; 1668 km_state_notify(NULL, &c); 1669 1670 return 0; 1671 } 1672 1673 static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x) 1674 { 1675 size_t replay_size = x->replay_esn ? 1676 xfrm_replay_state_esn_len(x->replay_esn) : 1677 sizeof(struct xfrm_replay_state); 1678 1679 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 1680 + nla_total_size(replay_size) 1681 + nla_total_size(sizeof(struct xfrm_lifetime_cur)) 1682 + nla_total_size(sizeof(struct xfrm_mark)) 1683 + nla_total_size(4) /* XFRM_AE_RTHR */ 1684 + nla_total_size(4); /* XFRM_AE_ETHR */ 1685 } 1686 1687 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 1688 { 1689 struct xfrm_aevent_id *id; 1690 struct nlmsghdr *nlh; 1691 1692 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 1693 if (nlh == NULL) 1694 return -EMSGSIZE; 1695 1696 id = nlmsg_data(nlh); 1697 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr)); 1698 id->sa_id.spi = x->id.spi; 1699 id->sa_id.family = x->props.family; 1700 id->sa_id.proto = x->id.proto; 1701 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr)); 1702 id->reqid = x->props.reqid; 1703 id->flags = c->data.aevent; 1704 1705 if (x->replay_esn) { 1706 if (nla_put(skb, XFRMA_REPLAY_ESN_VAL, 1707 xfrm_replay_state_esn_len(x->replay_esn), 1708 x->replay_esn)) 1709 goto nla_put_failure; 1710 } else { 1711 if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 1712 &x->replay)) 1713 goto nla_put_failure; 1714 } 1715 if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft)) 1716 goto nla_put_failure; 1717 1718 if ((id->flags & XFRM_AE_RTHR) && 1719 nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff)) 1720 goto nla_put_failure; 1721 1722 if ((id->flags & XFRM_AE_ETHR) && 1723 nla_put_u32(skb, XFRMA_ETIMER_THRESH, 1724 x->replay_maxage * 10 / HZ)) 1725 goto nla_put_failure; 1726 1727 if (xfrm_mark_put(skb, &x->mark)) 1728 goto nla_put_failure; 1729 1730 return nlmsg_end(skb, nlh); 1731 1732 nla_put_failure: 1733 nlmsg_cancel(skb, nlh); 1734 return -EMSGSIZE; 1735 } 1736 1737 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1738 struct nlattr **attrs) 1739 { 1740 struct net *net = sock_net(skb->sk); 1741 struct xfrm_state *x; 1742 struct sk_buff *r_skb; 1743 int err; 1744 struct km_event c; 1745 u32 mark; 1746 struct xfrm_mark m; 1747 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1748 struct xfrm_usersa_id *id = &p->sa_id; 1749 1750 mark = xfrm_mark_get(attrs, &m); 1751 1752 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family); 1753 if (x == NULL) 1754 return -ESRCH; 1755 1756 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 1757 if (r_skb == NULL) { 1758 xfrm_state_put(x); 1759 return -ENOMEM; 1760 } 1761 1762 /* 1763 * XXX: is this lock really needed - none of the other 1764 * gets lock (the concern is things getting updated 1765 * while we are still reading) - jhs 1766 */ 1767 spin_lock_bh(&x->lock); 1768 c.data.aevent = p->flags; 1769 c.seq = nlh->nlmsg_seq; 1770 c.pid = nlh->nlmsg_pid; 1771 1772 if (build_aevent(r_skb, x, &c) < 0) 1773 BUG(); 1774 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid); 1775 spin_unlock_bh(&x->lock); 1776 xfrm_state_put(x); 1777 return err; 1778 } 1779 1780 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1781 struct nlattr **attrs) 1782 { 1783 struct net *net = sock_net(skb->sk); 1784 struct xfrm_state *x; 1785 struct km_event c; 1786 int err = - EINVAL; 1787 u32 mark = 0; 1788 struct xfrm_mark m; 1789 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1790 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 1791 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 1792 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 1793 1794 if (!lt && !rp && !re) 1795 return err; 1796 1797 /* pedantic mode - thou shalt sayeth replaceth */ 1798 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 1799 return err; 1800 1801 mark = xfrm_mark_get(attrs, &m); 1802 1803 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 1804 if (x == NULL) 1805 return -ESRCH; 1806 1807 if (x->km.state != XFRM_STATE_VALID) 1808 goto out; 1809 1810 err = xfrm_replay_verify_len(x->replay_esn, rp); 1811 if (err) 1812 goto out; 1813 1814 spin_lock_bh(&x->lock); 1815 xfrm_update_ae_params(x, attrs); 1816 spin_unlock_bh(&x->lock); 1817 1818 c.event = nlh->nlmsg_type; 1819 c.seq = nlh->nlmsg_seq; 1820 c.pid = nlh->nlmsg_pid; 1821 c.data.aevent = XFRM_AE_CU; 1822 km_state_notify(x, &c); 1823 err = 0; 1824 out: 1825 xfrm_state_put(x); 1826 return err; 1827 } 1828 1829 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1830 struct nlattr **attrs) 1831 { 1832 struct net *net = sock_net(skb->sk); 1833 struct km_event c; 1834 u8 type = XFRM_POLICY_TYPE_MAIN; 1835 int err; 1836 struct xfrm_audit audit_info; 1837 1838 err = copy_from_user_policy_type(&type, attrs); 1839 if (err) 1840 return err; 1841 1842 audit_info.loginuid = audit_get_loginuid(current); 1843 audit_info.sessionid = audit_get_sessionid(current); 1844 security_task_getsecid(current, &audit_info.secid); 1845 err = xfrm_policy_flush(net, type, &audit_info); 1846 if (err) { 1847 if (err == -ESRCH) /* empty table */ 1848 return 0; 1849 return err; 1850 } 1851 1852 c.data.type = type; 1853 c.event = nlh->nlmsg_type; 1854 c.seq = nlh->nlmsg_seq; 1855 c.pid = nlh->nlmsg_pid; 1856 c.net = net; 1857 km_policy_notify(NULL, 0, &c); 1858 return 0; 1859 } 1860 1861 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1862 struct nlattr **attrs) 1863 { 1864 struct net *net = sock_net(skb->sk); 1865 struct xfrm_policy *xp; 1866 struct xfrm_user_polexpire *up = nlmsg_data(nlh); 1867 struct xfrm_userpolicy_info *p = &up->pol; 1868 u8 type = XFRM_POLICY_TYPE_MAIN; 1869 int err = -ENOENT; 1870 struct xfrm_mark m; 1871 u32 mark = xfrm_mark_get(attrs, &m); 1872 1873 err = copy_from_user_policy_type(&type, attrs); 1874 if (err) 1875 return err; 1876 1877 err = verify_policy_dir(p->dir); 1878 if (err) 1879 return err; 1880 1881 if (p->index) 1882 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); 1883 else { 1884 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1885 struct xfrm_sec_ctx *ctx; 1886 1887 err = verify_sec_ctx_len(attrs); 1888 if (err) 1889 return err; 1890 1891 ctx = NULL; 1892 if (rt) { 1893 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1894 1895 err = security_xfrm_policy_alloc(&ctx, uctx); 1896 if (err) 1897 return err; 1898 } 1899 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, 1900 &p->sel, ctx, 0, &err); 1901 security_xfrm_policy_free(ctx); 1902 } 1903 if (xp == NULL) 1904 return -ENOENT; 1905 1906 if (unlikely(xp->walk.dead)) 1907 goto out; 1908 1909 err = 0; 1910 if (up->hard) { 1911 uid_t loginuid = audit_get_loginuid(current); 1912 u32 sessionid = audit_get_sessionid(current); 1913 u32 sid; 1914 1915 security_task_getsecid(current, &sid); 1916 xfrm_policy_delete(xp, p->dir); 1917 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid); 1918 1919 } else { 1920 // reset the timers here? 1921 WARN(1, "Dont know what to do with soft policy expire\n"); 1922 } 1923 km_policy_expired(xp, p->dir, up->hard, current->pid); 1924 1925 out: 1926 xfrm_pol_put(xp); 1927 return err; 1928 } 1929 1930 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1931 struct nlattr **attrs) 1932 { 1933 struct net *net = sock_net(skb->sk); 1934 struct xfrm_state *x; 1935 int err; 1936 struct xfrm_user_expire *ue = nlmsg_data(nlh); 1937 struct xfrm_usersa_info *p = &ue->state; 1938 struct xfrm_mark m; 1939 u32 mark = xfrm_mark_get(attrs, &m); 1940 1941 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); 1942 1943 err = -ENOENT; 1944 if (x == NULL) 1945 return err; 1946 1947 spin_lock_bh(&x->lock); 1948 err = -EINVAL; 1949 if (x->km.state != XFRM_STATE_VALID) 1950 goto out; 1951 km_state_expired(x, ue->hard, current->pid); 1952 1953 if (ue->hard) { 1954 uid_t loginuid = audit_get_loginuid(current); 1955 u32 sessionid = audit_get_sessionid(current); 1956 u32 sid; 1957 1958 security_task_getsecid(current, &sid); 1959 __xfrm_state_delete(x); 1960 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid); 1961 } 1962 err = 0; 1963 out: 1964 spin_unlock_bh(&x->lock); 1965 xfrm_state_put(x); 1966 return err; 1967 } 1968 1969 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, 1970 struct nlattr **attrs) 1971 { 1972 struct net *net = sock_net(skb->sk); 1973 struct xfrm_policy *xp; 1974 struct xfrm_user_tmpl *ut; 1975 int i; 1976 struct nlattr *rt = attrs[XFRMA_TMPL]; 1977 struct xfrm_mark mark; 1978 1979 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 1980 struct xfrm_state *x = xfrm_state_alloc(net); 1981 int err = -ENOMEM; 1982 1983 if (!x) 1984 goto nomem; 1985 1986 xfrm_mark_get(attrs, &mark); 1987 1988 err = verify_newpolicy_info(&ua->policy); 1989 if (err) 1990 goto bad_policy; 1991 1992 /* build an XP */ 1993 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err); 1994 if (!xp) 1995 goto free_state; 1996 1997 memcpy(&x->id, &ua->id, sizeof(ua->id)); 1998 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 1999 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 2000 xp->mark.m = x->mark.m = mark.m; 2001 xp->mark.v = x->mark.v = mark.v; 2002 ut = nla_data(rt); 2003 /* extract the templates and for each call km_key */ 2004 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 2005 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 2006 memcpy(&x->id, &t->id, sizeof(x->id)); 2007 x->props.mode = t->mode; 2008 x->props.reqid = t->reqid; 2009 x->props.family = ut->family; 2010 t->aalgos = ua->aalgos; 2011 t->ealgos = ua->ealgos; 2012 t->calgos = ua->calgos; 2013 err = km_query(x, t, xp); 2014 2015 } 2016 2017 kfree(x); 2018 kfree(xp); 2019 2020 return 0; 2021 2022 bad_policy: 2023 WARN(1, "BAD policy passed\n"); 2024 free_state: 2025 kfree(x); 2026 nomem: 2027 return err; 2028 } 2029 2030 #ifdef CONFIG_XFRM_MIGRATE 2031 static int copy_from_user_migrate(struct xfrm_migrate *ma, 2032 struct xfrm_kmaddress *k, 2033 struct nlattr **attrs, int *num) 2034 { 2035 struct nlattr *rt = attrs[XFRMA_MIGRATE]; 2036 struct xfrm_user_migrate *um; 2037 int i, num_migrate; 2038 2039 if (k != NULL) { 2040 struct xfrm_user_kmaddress *uk; 2041 2042 uk = nla_data(attrs[XFRMA_KMADDRESS]); 2043 memcpy(&k->local, &uk->local, sizeof(k->local)); 2044 memcpy(&k->remote, &uk->remote, sizeof(k->remote)); 2045 k->family = uk->family; 2046 k->reserved = uk->reserved; 2047 } 2048 2049 um = nla_data(rt); 2050 num_migrate = nla_len(rt) / sizeof(*um); 2051 2052 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) 2053 return -EINVAL; 2054 2055 for (i = 0; i < num_migrate; i++, um++, ma++) { 2056 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); 2057 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); 2058 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); 2059 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); 2060 2061 ma->proto = um->proto; 2062 ma->mode = um->mode; 2063 ma->reqid = um->reqid; 2064 2065 ma->old_family = um->old_family; 2066 ma->new_family = um->new_family; 2067 } 2068 2069 *num = i; 2070 return 0; 2071 } 2072 2073 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2074 struct nlattr **attrs) 2075 { 2076 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); 2077 struct xfrm_migrate m[XFRM_MAX_DEPTH]; 2078 struct xfrm_kmaddress km, *kmp; 2079 u8 type; 2080 int err; 2081 int n = 0; 2082 2083 if (attrs[XFRMA_MIGRATE] == NULL) 2084 return -EINVAL; 2085 2086 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; 2087 2088 err = copy_from_user_policy_type(&type, attrs); 2089 if (err) 2090 return err; 2091 2092 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n); 2093 if (err) 2094 return err; 2095 2096 if (!n) 2097 return 0; 2098 2099 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp); 2100 2101 return 0; 2102 } 2103 #else 2104 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2105 struct nlattr **attrs) 2106 { 2107 return -ENOPROTOOPT; 2108 } 2109 #endif 2110 2111 #ifdef CONFIG_XFRM_MIGRATE 2112 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb) 2113 { 2114 struct xfrm_user_migrate um; 2115 2116 memset(&um, 0, sizeof(um)); 2117 um.proto = m->proto; 2118 um.mode = m->mode; 2119 um.reqid = m->reqid; 2120 um.old_family = m->old_family; 2121 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); 2122 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); 2123 um.new_family = m->new_family; 2124 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); 2125 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); 2126 2127 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); 2128 } 2129 2130 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb) 2131 { 2132 struct xfrm_user_kmaddress uk; 2133 2134 memset(&uk, 0, sizeof(uk)); 2135 uk.family = k->family; 2136 uk.reserved = k->reserved; 2137 memcpy(&uk.local, &k->local, sizeof(uk.local)); 2138 memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); 2139 2140 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); 2141 } 2142 2143 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma) 2144 { 2145 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) 2146 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0) 2147 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) 2148 + userpolicy_type_attrsize(); 2149 } 2150 2151 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, 2152 int num_migrate, const struct xfrm_kmaddress *k, 2153 const struct xfrm_selector *sel, u8 dir, u8 type) 2154 { 2155 const struct xfrm_migrate *mp; 2156 struct xfrm_userpolicy_id *pol_id; 2157 struct nlmsghdr *nlh; 2158 int i; 2159 2160 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 2161 if (nlh == NULL) 2162 return -EMSGSIZE; 2163 2164 pol_id = nlmsg_data(nlh); 2165 /* copy data from selector, dir, and type to the pol_id */ 2166 memset(pol_id, 0, sizeof(*pol_id)); 2167 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 2168 pol_id->dir = dir; 2169 2170 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0)) 2171 goto nlmsg_failure; 2172 2173 if (copy_to_user_policy_type(type, skb) < 0) 2174 goto nlmsg_failure; 2175 2176 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 2177 if (copy_to_user_migrate(mp, skb) < 0) 2178 goto nlmsg_failure; 2179 } 2180 2181 return nlmsg_end(skb, nlh); 2182 nlmsg_failure: 2183 nlmsg_cancel(skb, nlh); 2184 return -EMSGSIZE; 2185 } 2186 2187 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2188 const struct xfrm_migrate *m, int num_migrate, 2189 const struct xfrm_kmaddress *k) 2190 { 2191 struct net *net = &init_net; 2192 struct sk_buff *skb; 2193 2194 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC); 2195 if (skb == NULL) 2196 return -ENOMEM; 2197 2198 /* build migrate */ 2199 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0) 2200 BUG(); 2201 2202 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); 2203 } 2204 #else 2205 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2206 const struct xfrm_migrate *m, int num_migrate, 2207 const struct xfrm_kmaddress *k) 2208 { 2209 return -ENOPROTOOPT; 2210 } 2211 #endif 2212 2213 #define XMSGSIZE(type) sizeof(struct type) 2214 2215 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 2216 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2217 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2218 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2219 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2220 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2221 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2222 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), 2223 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 2224 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 2225 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2226 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2227 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 2228 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 2229 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, 2230 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2231 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2232 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 2233 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2234 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), 2235 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 2236 }; 2237 2238 #undef XMSGSIZE 2239 2240 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 2241 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, 2242 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, 2243 [XFRMA_LASTUSED] = { .type = NLA_U64}, 2244 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, 2245 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 2246 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 2247 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 2248 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, 2249 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, 2250 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, 2251 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, 2252 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, 2253 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, 2254 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, 2255 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, 2256 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, 2257 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, 2258 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 2259 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 2260 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 2261 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, 2262 [XFRMA_TFCPAD] = { .type = NLA_U32 }, 2263 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, 2264 }; 2265 2266 static struct xfrm_link { 2267 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 2268 int (*dump)(struct sk_buff *, struct netlink_callback *); 2269 int (*done)(struct netlink_callback *); 2270 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 2271 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2272 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 2273 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 2274 .dump = xfrm_dump_sa, 2275 .done = xfrm_dump_sa_done }, 2276 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2277 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 2278 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 2279 .dump = xfrm_dump_policy, 2280 .done = xfrm_dump_policy_done }, 2281 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 2282 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 2283 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 2284 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2285 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2286 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, 2287 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, 2288 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 2289 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 2290 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 2291 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 2292 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 2293 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, 2294 }; 2295 2296 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 2297 { 2298 struct net *net = sock_net(skb->sk); 2299 struct nlattr *attrs[XFRMA_MAX+1]; 2300 struct xfrm_link *link; 2301 int type, err; 2302 2303 type = nlh->nlmsg_type; 2304 if (type > XFRM_MSG_MAX) 2305 return -EINVAL; 2306 2307 type -= XFRM_MSG_BASE; 2308 link = &xfrm_dispatch[type]; 2309 2310 /* All operations require privileges, even GET */ 2311 if (!capable(CAP_NET_ADMIN)) 2312 return -EPERM; 2313 2314 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2315 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2316 (nlh->nlmsg_flags & NLM_F_DUMP)) { 2317 if (link->dump == NULL) 2318 return -EINVAL; 2319 2320 { 2321 struct netlink_dump_control c = { 2322 .dump = link->dump, 2323 .done = link->done, 2324 }; 2325 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c); 2326 } 2327 } 2328 2329 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 2330 xfrma_policy); 2331 if (err < 0) 2332 return err; 2333 2334 if (link->doit == NULL) 2335 return -EINVAL; 2336 2337 return link->doit(skb, nlh, attrs); 2338 } 2339 2340 static void xfrm_netlink_rcv(struct sk_buff *skb) 2341 { 2342 mutex_lock(&xfrm_cfg_mutex); 2343 netlink_rcv_skb(skb, &xfrm_user_rcv_msg); 2344 mutex_unlock(&xfrm_cfg_mutex); 2345 } 2346 2347 static inline size_t xfrm_expire_msgsize(void) 2348 { 2349 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) 2350 + nla_total_size(sizeof(struct xfrm_mark)); 2351 } 2352 2353 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 2354 { 2355 struct xfrm_user_expire *ue; 2356 struct nlmsghdr *nlh; 2357 2358 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 2359 if (nlh == NULL) 2360 return -EMSGSIZE; 2361 2362 ue = nlmsg_data(nlh); 2363 copy_to_user_state(x, &ue->state); 2364 ue->hard = (c->data.hard != 0) ? 1 : 0; 2365 2366 if (xfrm_mark_put(skb, &x->mark)) 2367 goto nla_put_failure; 2368 2369 return nlmsg_end(skb, nlh); 2370 2371 nla_put_failure: 2372 return -EMSGSIZE; 2373 } 2374 2375 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) 2376 { 2377 struct net *net = xs_net(x); 2378 struct sk_buff *skb; 2379 2380 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); 2381 if (skb == NULL) 2382 return -ENOMEM; 2383 2384 if (build_expire(skb, x, c) < 0) { 2385 kfree_skb(skb); 2386 return -EMSGSIZE; 2387 } 2388 2389 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2390 } 2391 2392 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) 2393 { 2394 struct net *net = xs_net(x); 2395 struct sk_buff *skb; 2396 2397 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 2398 if (skb == NULL) 2399 return -ENOMEM; 2400 2401 if (build_aevent(skb, x, c) < 0) 2402 BUG(); 2403 2404 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); 2405 } 2406 2407 static int xfrm_notify_sa_flush(const struct km_event *c) 2408 { 2409 struct net *net = c->net; 2410 struct xfrm_usersa_flush *p; 2411 struct nlmsghdr *nlh; 2412 struct sk_buff *skb; 2413 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); 2414 2415 skb = nlmsg_new(len, GFP_ATOMIC); 2416 if (skb == NULL) 2417 return -ENOMEM; 2418 2419 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); 2420 if (nlh == NULL) { 2421 kfree_skb(skb); 2422 return -EMSGSIZE; 2423 } 2424 2425 p = nlmsg_data(nlh); 2426 p->proto = c->data.proto; 2427 2428 nlmsg_end(skb, nlh); 2429 2430 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2431 } 2432 2433 static inline size_t xfrm_sa_len(struct xfrm_state *x) 2434 { 2435 size_t l = 0; 2436 if (x->aead) 2437 l += nla_total_size(aead_len(x->aead)); 2438 if (x->aalg) { 2439 l += nla_total_size(sizeof(struct xfrm_algo) + 2440 (x->aalg->alg_key_len + 7) / 8); 2441 l += nla_total_size(xfrm_alg_auth_len(x->aalg)); 2442 } 2443 if (x->ealg) 2444 l += nla_total_size(xfrm_alg_len(x->ealg)); 2445 if (x->calg) 2446 l += nla_total_size(sizeof(*x->calg)); 2447 if (x->encap) 2448 l += nla_total_size(sizeof(*x->encap)); 2449 if (x->tfcpad) 2450 l += nla_total_size(sizeof(x->tfcpad)); 2451 if (x->replay_esn) 2452 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); 2453 if (x->security) 2454 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + 2455 x->security->ctx_len); 2456 if (x->coaddr) 2457 l += nla_total_size(sizeof(*x->coaddr)); 2458 2459 /* Must count x->lastused as it may become non-zero behind our back. */ 2460 l += nla_total_size(sizeof(u64)); 2461 2462 return l; 2463 } 2464 2465 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) 2466 { 2467 struct net *net = xs_net(x); 2468 struct xfrm_usersa_info *p; 2469 struct xfrm_usersa_id *id; 2470 struct nlmsghdr *nlh; 2471 struct sk_buff *skb; 2472 int len = xfrm_sa_len(x); 2473 int headlen; 2474 2475 headlen = sizeof(*p); 2476 if (c->event == XFRM_MSG_DELSA) { 2477 len += nla_total_size(headlen); 2478 headlen = sizeof(*id); 2479 len += nla_total_size(sizeof(struct xfrm_mark)); 2480 } 2481 len += NLMSG_ALIGN(headlen); 2482 2483 skb = nlmsg_new(len, GFP_ATOMIC); 2484 if (skb == NULL) 2485 return -ENOMEM; 2486 2487 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2488 if (nlh == NULL) 2489 goto nla_put_failure; 2490 2491 p = nlmsg_data(nlh); 2492 if (c->event == XFRM_MSG_DELSA) { 2493 struct nlattr *attr; 2494 2495 id = nlmsg_data(nlh); 2496 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 2497 id->spi = x->id.spi; 2498 id->family = x->props.family; 2499 id->proto = x->id.proto; 2500 2501 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 2502 if (attr == NULL) 2503 goto nla_put_failure; 2504 2505 p = nla_data(attr); 2506 } 2507 2508 if (copy_to_user_state_extra(x, p, skb)) 2509 goto nla_put_failure; 2510 2511 nlmsg_end(skb, nlh); 2512 2513 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2514 2515 nla_put_failure: 2516 /* Somebody screwed up with xfrm_sa_len! */ 2517 WARN_ON(1); 2518 kfree_skb(skb); 2519 return -1; 2520 } 2521 2522 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) 2523 { 2524 2525 switch (c->event) { 2526 case XFRM_MSG_EXPIRE: 2527 return xfrm_exp_state_notify(x, c); 2528 case XFRM_MSG_NEWAE: 2529 return xfrm_aevent_state_notify(x, c); 2530 case XFRM_MSG_DELSA: 2531 case XFRM_MSG_UPDSA: 2532 case XFRM_MSG_NEWSA: 2533 return xfrm_notify_sa(x, c); 2534 case XFRM_MSG_FLUSHSA: 2535 return xfrm_notify_sa_flush(c); 2536 default: 2537 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", 2538 c->event); 2539 break; 2540 } 2541 2542 return 0; 2543 2544 } 2545 2546 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x, 2547 struct xfrm_policy *xp) 2548 { 2549 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 2550 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2551 + nla_total_size(sizeof(struct xfrm_mark)) 2552 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 2553 + userpolicy_type_attrsize(); 2554 } 2555 2556 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 2557 struct xfrm_tmpl *xt, struct xfrm_policy *xp, 2558 int dir) 2559 { 2560 struct xfrm_user_acquire *ua; 2561 struct nlmsghdr *nlh; 2562 __u32 seq = xfrm_get_acqseq(); 2563 2564 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 2565 if (nlh == NULL) 2566 return -EMSGSIZE; 2567 2568 ua = nlmsg_data(nlh); 2569 memcpy(&ua->id, &x->id, sizeof(ua->id)); 2570 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 2571 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 2572 copy_to_user_policy(xp, &ua->policy, dir); 2573 ua->aalgos = xt->aalgos; 2574 ua->ealgos = xt->ealgos; 2575 ua->calgos = xt->calgos; 2576 ua->seq = x->km.seq = seq; 2577 2578 if (copy_to_user_tmpl(xp, skb) < 0) 2579 goto nlmsg_failure; 2580 if (copy_to_user_state_sec_ctx(x, skb)) 2581 goto nlmsg_failure; 2582 if (copy_to_user_policy_type(xp->type, skb) < 0) 2583 goto nlmsg_failure; 2584 if (xfrm_mark_put(skb, &xp->mark)) 2585 goto nla_put_failure; 2586 2587 return nlmsg_end(skb, nlh); 2588 2589 nla_put_failure: 2590 nlmsg_failure: 2591 nlmsg_cancel(skb, nlh); 2592 return -EMSGSIZE; 2593 } 2594 2595 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 2596 struct xfrm_policy *xp, int dir) 2597 { 2598 struct net *net = xs_net(x); 2599 struct sk_buff *skb; 2600 2601 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); 2602 if (skb == NULL) 2603 return -ENOMEM; 2604 2605 if (build_acquire(skb, x, xt, xp, dir) < 0) 2606 BUG(); 2607 2608 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); 2609 } 2610 2611 /* User gives us xfrm_user_policy_info followed by an array of 0 2612 * or more templates. 2613 */ 2614 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, 2615 u8 *data, int len, int *dir) 2616 { 2617 struct net *net = sock_net(sk); 2618 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; 2619 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); 2620 struct xfrm_policy *xp; 2621 int nr; 2622 2623 switch (sk->sk_family) { 2624 case AF_INET: 2625 if (opt != IP_XFRM_POLICY) { 2626 *dir = -EOPNOTSUPP; 2627 return NULL; 2628 } 2629 break; 2630 #if IS_ENABLED(CONFIG_IPV6) 2631 case AF_INET6: 2632 if (opt != IPV6_XFRM_POLICY) { 2633 *dir = -EOPNOTSUPP; 2634 return NULL; 2635 } 2636 break; 2637 #endif 2638 default: 2639 *dir = -EINVAL; 2640 return NULL; 2641 } 2642 2643 *dir = -EINVAL; 2644 2645 if (len < sizeof(*p) || 2646 verify_newpolicy_info(p)) 2647 return NULL; 2648 2649 nr = ((len - sizeof(*p)) / sizeof(*ut)); 2650 if (validate_tmpl(nr, ut, p->sel.family)) 2651 return NULL; 2652 2653 if (p->dir > XFRM_POLICY_OUT) 2654 return NULL; 2655 2656 xp = xfrm_policy_alloc(net, GFP_ATOMIC); 2657 if (xp == NULL) { 2658 *dir = -ENOBUFS; 2659 return NULL; 2660 } 2661 2662 copy_from_user_policy(xp, p); 2663 xp->type = XFRM_POLICY_TYPE_MAIN; 2664 copy_templates(xp, ut, nr); 2665 2666 *dir = p->dir; 2667 2668 return xp; 2669 } 2670 2671 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp) 2672 { 2673 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 2674 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2675 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 2676 + nla_total_size(sizeof(struct xfrm_mark)) 2677 + userpolicy_type_attrsize(); 2678 } 2679 2680 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, 2681 int dir, const struct km_event *c) 2682 { 2683 struct xfrm_user_polexpire *upe; 2684 struct nlmsghdr *nlh; 2685 int hard = c->data.hard; 2686 2687 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 2688 if (nlh == NULL) 2689 return -EMSGSIZE; 2690 2691 upe = nlmsg_data(nlh); 2692 copy_to_user_policy(xp, &upe->pol, dir); 2693 if (copy_to_user_tmpl(xp, skb) < 0) 2694 goto nlmsg_failure; 2695 if (copy_to_user_sec_ctx(xp, skb)) 2696 goto nlmsg_failure; 2697 if (copy_to_user_policy_type(xp->type, skb) < 0) 2698 goto nlmsg_failure; 2699 if (xfrm_mark_put(skb, &xp->mark)) 2700 goto nla_put_failure; 2701 upe->hard = !!hard; 2702 2703 return nlmsg_end(skb, nlh); 2704 2705 nla_put_failure: 2706 nlmsg_failure: 2707 nlmsg_cancel(skb, nlh); 2708 return -EMSGSIZE; 2709 } 2710 2711 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2712 { 2713 struct net *net = xp_net(xp); 2714 struct sk_buff *skb; 2715 2716 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); 2717 if (skb == NULL) 2718 return -ENOMEM; 2719 2720 if (build_polexpire(skb, xp, dir, c) < 0) 2721 BUG(); 2722 2723 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2724 } 2725 2726 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 2727 { 2728 struct net *net = xp_net(xp); 2729 struct xfrm_userpolicy_info *p; 2730 struct xfrm_userpolicy_id *id; 2731 struct nlmsghdr *nlh; 2732 struct sk_buff *skb; 2733 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2734 int headlen; 2735 2736 headlen = sizeof(*p); 2737 if (c->event == XFRM_MSG_DELPOLICY) { 2738 len += nla_total_size(headlen); 2739 headlen = sizeof(*id); 2740 } 2741 len += userpolicy_type_attrsize(); 2742 len += nla_total_size(sizeof(struct xfrm_mark)); 2743 len += NLMSG_ALIGN(headlen); 2744 2745 skb = nlmsg_new(len, GFP_ATOMIC); 2746 if (skb == NULL) 2747 return -ENOMEM; 2748 2749 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2750 if (nlh == NULL) 2751 goto nlmsg_failure; 2752 2753 p = nlmsg_data(nlh); 2754 if (c->event == XFRM_MSG_DELPOLICY) { 2755 struct nlattr *attr; 2756 2757 id = nlmsg_data(nlh); 2758 memset(id, 0, sizeof(*id)); 2759 id->dir = dir; 2760 if (c->data.byid) 2761 id->index = xp->index; 2762 else 2763 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 2764 2765 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 2766 if (attr == NULL) 2767 goto nlmsg_failure; 2768 2769 p = nla_data(attr); 2770 } 2771 2772 copy_to_user_policy(xp, p, dir); 2773 if (copy_to_user_tmpl(xp, skb) < 0) 2774 goto nlmsg_failure; 2775 if (copy_to_user_policy_type(xp->type, skb) < 0) 2776 goto nlmsg_failure; 2777 2778 if (xfrm_mark_put(skb, &xp->mark)) 2779 goto nla_put_failure; 2780 2781 nlmsg_end(skb, nlh); 2782 2783 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2784 2785 nla_put_failure: 2786 nlmsg_failure: 2787 kfree_skb(skb); 2788 return -1; 2789 } 2790 2791 static int xfrm_notify_policy_flush(const struct km_event *c) 2792 { 2793 struct net *net = c->net; 2794 struct nlmsghdr *nlh; 2795 struct sk_buff *skb; 2796 2797 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); 2798 if (skb == NULL) 2799 return -ENOMEM; 2800 2801 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 2802 if (nlh == NULL) 2803 goto nlmsg_failure; 2804 if (copy_to_user_policy_type(c->data.type, skb) < 0) 2805 goto nlmsg_failure; 2806 2807 nlmsg_end(skb, nlh); 2808 2809 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2810 2811 nlmsg_failure: 2812 kfree_skb(skb); 2813 return -1; 2814 } 2815 2816 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2817 { 2818 2819 switch (c->event) { 2820 case XFRM_MSG_NEWPOLICY: 2821 case XFRM_MSG_UPDPOLICY: 2822 case XFRM_MSG_DELPOLICY: 2823 return xfrm_notify_policy(xp, dir, c); 2824 case XFRM_MSG_FLUSHPOLICY: 2825 return xfrm_notify_policy_flush(c); 2826 case XFRM_MSG_POLEXPIRE: 2827 return xfrm_exp_policy_notify(xp, dir, c); 2828 default: 2829 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", 2830 c->event); 2831 } 2832 2833 return 0; 2834 2835 } 2836 2837 static inline size_t xfrm_report_msgsize(void) 2838 { 2839 return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); 2840 } 2841 2842 static int build_report(struct sk_buff *skb, u8 proto, 2843 struct xfrm_selector *sel, xfrm_address_t *addr) 2844 { 2845 struct xfrm_user_report *ur; 2846 struct nlmsghdr *nlh; 2847 2848 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); 2849 if (nlh == NULL) 2850 return -EMSGSIZE; 2851 2852 ur = nlmsg_data(nlh); 2853 ur->proto = proto; 2854 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2855 2856 if (addr && 2857 nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr)) 2858 goto nla_put_failure; 2859 2860 return nlmsg_end(skb, nlh); 2861 2862 nla_put_failure: 2863 nlmsg_cancel(skb, nlh); 2864 return -EMSGSIZE; 2865 } 2866 2867 static int xfrm_send_report(struct net *net, u8 proto, 2868 struct xfrm_selector *sel, xfrm_address_t *addr) 2869 { 2870 struct sk_buff *skb; 2871 2872 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); 2873 if (skb == NULL) 2874 return -ENOMEM; 2875 2876 if (build_report(skb, proto, sel, addr) < 0) 2877 BUG(); 2878 2879 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); 2880 } 2881 2882 static inline size_t xfrm_mapping_msgsize(void) 2883 { 2884 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping)); 2885 } 2886 2887 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, 2888 xfrm_address_t *new_saddr, __be16 new_sport) 2889 { 2890 struct xfrm_user_mapping *um; 2891 struct nlmsghdr *nlh; 2892 2893 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0); 2894 if (nlh == NULL) 2895 return -EMSGSIZE; 2896 2897 um = nlmsg_data(nlh); 2898 2899 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); 2900 um->id.spi = x->id.spi; 2901 um->id.family = x->props.family; 2902 um->id.proto = x->id.proto; 2903 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr)); 2904 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr)); 2905 um->new_sport = new_sport; 2906 um->old_sport = x->encap->encap_sport; 2907 um->reqid = x->props.reqid; 2908 2909 return nlmsg_end(skb, nlh); 2910 } 2911 2912 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, 2913 __be16 sport) 2914 { 2915 struct net *net = xs_net(x); 2916 struct sk_buff *skb; 2917 2918 if (x->id.proto != IPPROTO_ESP) 2919 return -EINVAL; 2920 2921 if (!x->encap) 2922 return -EINVAL; 2923 2924 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC); 2925 if (skb == NULL) 2926 return -ENOMEM; 2927 2928 if (build_mapping(skb, x, ipaddr, sport) < 0) 2929 BUG(); 2930 2931 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC); 2932 } 2933 2934 static struct xfrm_mgr netlink_mgr = { 2935 .id = "netlink", 2936 .notify = xfrm_send_state_notify, 2937 .acquire = xfrm_send_acquire, 2938 .compile_policy = xfrm_compile_policy, 2939 .notify_policy = xfrm_send_policy_notify, 2940 .report = xfrm_send_report, 2941 .migrate = xfrm_send_migrate, 2942 .new_mapping = xfrm_send_mapping, 2943 }; 2944 2945 static int __net_init xfrm_user_net_init(struct net *net) 2946 { 2947 struct sock *nlsk; 2948 2949 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX, 2950 xfrm_netlink_rcv, NULL, THIS_MODULE); 2951 if (nlsk == NULL) 2952 return -ENOMEM; 2953 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 2954 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 2955 return 0; 2956 } 2957 2958 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) 2959 { 2960 struct net *net; 2961 list_for_each_entry(net, net_exit_list, exit_list) 2962 RCU_INIT_POINTER(net->xfrm.nlsk, NULL); 2963 synchronize_net(); 2964 list_for_each_entry(net, net_exit_list, exit_list) 2965 netlink_kernel_release(net->xfrm.nlsk_stash); 2966 } 2967 2968 static struct pernet_operations xfrm_user_net_ops = { 2969 .init = xfrm_user_net_init, 2970 .exit_batch = xfrm_user_net_exit, 2971 }; 2972 2973 static int __init xfrm_user_init(void) 2974 { 2975 int rv; 2976 2977 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 2978 2979 rv = register_pernet_subsys(&xfrm_user_net_ops); 2980 if (rv < 0) 2981 return rv; 2982 rv = xfrm_register_km(&netlink_mgr); 2983 if (rv < 0) 2984 unregister_pernet_subsys(&xfrm_user_net_ops); 2985 return rv; 2986 } 2987 2988 static void __exit xfrm_user_exit(void) 2989 { 2990 xfrm_unregister_km(&netlink_mgr); 2991 unregister_pernet_subsys(&xfrm_user_net_ops); 2992 } 2993 2994 module_init(xfrm_user_init); 2995 module_exit(xfrm_user_exit); 2996 MODULE_LICENSE("GPL"); 2997 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); 2998 2999