1 /* xfrm_user.c: User interface to configure xfrm engine. 2 * 3 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * 11 */ 12 13 #include <linux/crypto.h> 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/slab.h> 18 #include <linux/socket.h> 19 #include <linux/string.h> 20 #include <linux/net.h> 21 #include <linux/skbuff.h> 22 #include <linux/pfkeyv2.h> 23 #include <linux/ipsec.h> 24 #include <linux/init.h> 25 #include <linux/security.h> 26 #include <net/sock.h> 27 #include <net/xfrm.h> 28 #include <net/netlink.h> 29 #include <net/ah.h> 30 #include <asm/uaccess.h> 31 #if IS_ENABLED(CONFIG_IPV6) 32 #include <linux/in6.h> 33 #endif 34 35 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) 36 { 37 struct nlattr *rt = attrs[type]; 38 struct xfrm_algo *algp; 39 40 if (!rt) 41 return 0; 42 43 algp = nla_data(rt); 44 if (nla_len(rt) < xfrm_alg_len(algp)) 45 return -EINVAL; 46 47 switch (type) { 48 case XFRMA_ALG_AUTH: 49 case XFRMA_ALG_CRYPT: 50 case XFRMA_ALG_COMP: 51 break; 52 53 default: 54 return -EINVAL; 55 } 56 57 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 58 return 0; 59 } 60 61 static int verify_auth_trunc(struct nlattr **attrs) 62 { 63 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; 64 struct xfrm_algo_auth *algp; 65 66 if (!rt) 67 return 0; 68 69 algp = nla_data(rt); 70 if (nla_len(rt) < xfrm_alg_auth_len(algp)) 71 return -EINVAL; 72 73 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 74 return 0; 75 } 76 77 static int verify_aead(struct nlattr **attrs) 78 { 79 struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; 80 struct xfrm_algo_aead *algp; 81 82 if (!rt) 83 return 0; 84 85 algp = nla_data(rt); 86 if (nla_len(rt) < aead_len(algp)) 87 return -EINVAL; 88 89 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 90 return 0; 91 } 92 93 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, 94 xfrm_address_t **addrp) 95 { 96 struct nlattr *rt = attrs[type]; 97 98 if (rt && addrp) 99 *addrp = nla_data(rt); 100 } 101 102 static inline int verify_sec_ctx_len(struct nlattr **attrs) 103 { 104 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 105 struct xfrm_user_sec_ctx *uctx; 106 107 if (!rt) 108 return 0; 109 110 uctx = nla_data(rt); 111 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) 112 return -EINVAL; 113 114 return 0; 115 } 116 117 static inline int verify_replay(struct xfrm_usersa_info *p, 118 struct nlattr **attrs) 119 { 120 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 121 struct xfrm_replay_state_esn *rs; 122 123 if (p->flags & XFRM_STATE_ESN) { 124 if (!rt) 125 return -EINVAL; 126 127 rs = nla_data(rt); 128 129 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) 130 return -EINVAL; 131 132 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && 133 nla_len(rt) != sizeof(*rs)) 134 return -EINVAL; 135 } 136 137 if (!rt) 138 return 0; 139 140 /* As only ESP and AH support ESN feature. */ 141 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) 142 return -EINVAL; 143 144 if (p->replay_window != 0) 145 return -EINVAL; 146 147 return 0; 148 } 149 150 static int verify_newsa_info(struct xfrm_usersa_info *p, 151 struct nlattr **attrs) 152 { 153 int err; 154 155 err = -EINVAL; 156 switch (p->family) { 157 case AF_INET: 158 break; 159 160 case AF_INET6: 161 #if IS_ENABLED(CONFIG_IPV6) 162 break; 163 #else 164 err = -EAFNOSUPPORT; 165 goto out; 166 #endif 167 168 default: 169 goto out; 170 } 171 172 err = -EINVAL; 173 switch (p->id.proto) { 174 case IPPROTO_AH: 175 if ((!attrs[XFRMA_ALG_AUTH] && 176 !attrs[XFRMA_ALG_AUTH_TRUNC]) || 177 attrs[XFRMA_ALG_AEAD] || 178 attrs[XFRMA_ALG_CRYPT] || 179 attrs[XFRMA_ALG_COMP] || 180 attrs[XFRMA_TFCPAD] || 181 (ntohl(p->id.spi) >= 0x10000)) 182 183 goto out; 184 break; 185 186 case IPPROTO_ESP: 187 if (attrs[XFRMA_ALG_COMP]) 188 goto out; 189 if (!attrs[XFRMA_ALG_AUTH] && 190 !attrs[XFRMA_ALG_AUTH_TRUNC] && 191 !attrs[XFRMA_ALG_CRYPT] && 192 !attrs[XFRMA_ALG_AEAD]) 193 goto out; 194 if ((attrs[XFRMA_ALG_AUTH] || 195 attrs[XFRMA_ALG_AUTH_TRUNC] || 196 attrs[XFRMA_ALG_CRYPT]) && 197 attrs[XFRMA_ALG_AEAD]) 198 goto out; 199 if (attrs[XFRMA_TFCPAD] && 200 p->mode != XFRM_MODE_TUNNEL) 201 goto out; 202 break; 203 204 case IPPROTO_COMP: 205 if (!attrs[XFRMA_ALG_COMP] || 206 attrs[XFRMA_ALG_AEAD] || 207 attrs[XFRMA_ALG_AUTH] || 208 attrs[XFRMA_ALG_AUTH_TRUNC] || 209 attrs[XFRMA_ALG_CRYPT] || 210 attrs[XFRMA_TFCPAD]) 211 goto out; 212 break; 213 214 #if IS_ENABLED(CONFIG_IPV6) 215 case IPPROTO_DSTOPTS: 216 case IPPROTO_ROUTING: 217 if (attrs[XFRMA_ALG_COMP] || 218 attrs[XFRMA_ALG_AUTH] || 219 attrs[XFRMA_ALG_AUTH_TRUNC] || 220 attrs[XFRMA_ALG_AEAD] || 221 attrs[XFRMA_ALG_CRYPT] || 222 attrs[XFRMA_ENCAP] || 223 attrs[XFRMA_SEC_CTX] || 224 attrs[XFRMA_TFCPAD] || 225 !attrs[XFRMA_COADDR]) 226 goto out; 227 break; 228 #endif 229 230 default: 231 goto out; 232 } 233 234 if ((err = verify_aead(attrs))) 235 goto out; 236 if ((err = verify_auth_trunc(attrs))) 237 goto out; 238 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH))) 239 goto out; 240 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT))) 241 goto out; 242 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP))) 243 goto out; 244 if ((err = verify_sec_ctx_len(attrs))) 245 goto out; 246 if ((err = verify_replay(p, attrs))) 247 goto out; 248 249 err = -EINVAL; 250 switch (p->mode) { 251 case XFRM_MODE_TRANSPORT: 252 case XFRM_MODE_TUNNEL: 253 case XFRM_MODE_ROUTEOPTIMIZATION: 254 case XFRM_MODE_BEET: 255 break; 256 257 default: 258 goto out; 259 } 260 261 err = 0; 262 263 out: 264 return err; 265 } 266 267 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, 268 struct xfrm_algo_desc *(*get_byname)(const char *, int), 269 struct nlattr *rta) 270 { 271 struct xfrm_algo *p, *ualg; 272 struct xfrm_algo_desc *algo; 273 274 if (!rta) 275 return 0; 276 277 ualg = nla_data(rta); 278 279 algo = get_byname(ualg->alg_name, 1); 280 if (!algo) 281 return -ENOSYS; 282 *props = algo->desc.sadb_alg_id; 283 284 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 285 if (!p) 286 return -ENOMEM; 287 288 strcpy(p->alg_name, algo->name); 289 *algpp = p; 290 return 0; 291 } 292 293 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, 294 struct nlattr *rta) 295 { 296 struct xfrm_algo *ualg; 297 struct xfrm_algo_auth *p; 298 struct xfrm_algo_desc *algo; 299 300 if (!rta) 301 return 0; 302 303 ualg = nla_data(rta); 304 305 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 306 if (!algo) 307 return -ENOSYS; 308 *props = algo->desc.sadb_alg_id; 309 310 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL); 311 if (!p) 312 return -ENOMEM; 313 314 strcpy(p->alg_name, algo->name); 315 p->alg_key_len = ualg->alg_key_len; 316 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 317 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8); 318 319 *algpp = p; 320 return 0; 321 } 322 323 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, 324 struct nlattr *rta) 325 { 326 struct xfrm_algo_auth *p, *ualg; 327 struct xfrm_algo_desc *algo; 328 329 if (!rta) 330 return 0; 331 332 ualg = nla_data(rta); 333 334 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 335 if (!algo) 336 return -ENOSYS; 337 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || 338 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 339 return -EINVAL; 340 *props = algo->desc.sadb_alg_id; 341 342 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL); 343 if (!p) 344 return -ENOMEM; 345 346 strcpy(p->alg_name, algo->name); 347 if (!p->alg_trunc_len) 348 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 349 350 *algpp = p; 351 return 0; 352 } 353 354 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props, 355 struct nlattr *rta) 356 { 357 struct xfrm_algo_aead *p, *ualg; 358 struct xfrm_algo_desc *algo; 359 360 if (!rta) 361 return 0; 362 363 ualg = nla_data(rta); 364 365 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); 366 if (!algo) 367 return -ENOSYS; 368 *props = algo->desc.sadb_alg_id; 369 370 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); 371 if (!p) 372 return -ENOMEM; 373 374 strcpy(p->alg_name, algo->name); 375 *algpp = p; 376 return 0; 377 } 378 379 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, 380 struct nlattr *rp) 381 { 382 struct xfrm_replay_state_esn *up; 383 int ulen; 384 385 if (!replay_esn || !rp) 386 return 0; 387 388 up = nla_data(rp); 389 ulen = xfrm_replay_state_esn_len(up); 390 391 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) 392 return -EINVAL; 393 394 return 0; 395 } 396 397 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, 398 struct xfrm_replay_state_esn **preplay_esn, 399 struct nlattr *rta) 400 { 401 struct xfrm_replay_state_esn *p, *pp, *up; 402 int klen, ulen; 403 404 if (!rta) 405 return 0; 406 407 up = nla_data(rta); 408 klen = xfrm_replay_state_esn_len(up); 409 ulen = nla_len(rta) >= klen ? klen : sizeof(*up); 410 411 p = kzalloc(klen, GFP_KERNEL); 412 if (!p) 413 return -ENOMEM; 414 415 pp = kzalloc(klen, GFP_KERNEL); 416 if (!pp) { 417 kfree(p); 418 return -ENOMEM; 419 } 420 421 memcpy(p, up, ulen); 422 memcpy(pp, up, ulen); 423 424 *replay_esn = p; 425 *preplay_esn = pp; 426 427 return 0; 428 } 429 430 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) 431 { 432 int len = 0; 433 434 if (xfrm_ctx) { 435 len += sizeof(struct xfrm_user_sec_ctx); 436 len += xfrm_ctx->ctx_len; 437 } 438 return len; 439 } 440 441 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 442 { 443 memcpy(&x->id, &p->id, sizeof(x->id)); 444 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 445 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 446 x->props.mode = p->mode; 447 x->props.replay_window = min_t(unsigned int, p->replay_window, 448 sizeof(x->replay.bitmap) * 8); 449 x->props.reqid = p->reqid; 450 x->props.family = p->family; 451 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 452 x->props.flags = p->flags; 453 454 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC)) 455 x->sel.family = p->family; 456 } 457 458 /* 459 * someday when pfkey also has support, we could have the code 460 * somehow made shareable and move it to xfrm_state.c - JHS 461 * 462 */ 463 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, 464 int update_esn) 465 { 466 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 467 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; 468 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 469 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 470 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 471 472 if (re) { 473 struct xfrm_replay_state_esn *replay_esn; 474 replay_esn = nla_data(re); 475 memcpy(x->replay_esn, replay_esn, 476 xfrm_replay_state_esn_len(replay_esn)); 477 memcpy(x->preplay_esn, replay_esn, 478 xfrm_replay_state_esn_len(replay_esn)); 479 } 480 481 if (rp) { 482 struct xfrm_replay_state *replay; 483 replay = nla_data(rp); 484 memcpy(&x->replay, replay, sizeof(*replay)); 485 memcpy(&x->preplay, replay, sizeof(*replay)); 486 } 487 488 if (lt) { 489 struct xfrm_lifetime_cur *ltime; 490 ltime = nla_data(lt); 491 x->curlft.bytes = ltime->bytes; 492 x->curlft.packets = ltime->packets; 493 x->curlft.add_time = ltime->add_time; 494 x->curlft.use_time = ltime->use_time; 495 } 496 497 if (et) 498 x->replay_maxage = nla_get_u32(et); 499 500 if (rt) 501 x->replay_maxdiff = nla_get_u32(rt); 502 } 503 504 static struct xfrm_state *xfrm_state_construct(struct net *net, 505 struct xfrm_usersa_info *p, 506 struct nlattr **attrs, 507 int *errp) 508 { 509 struct xfrm_state *x = xfrm_state_alloc(net); 510 int err = -ENOMEM; 511 512 if (!x) 513 goto error_no_put; 514 515 copy_from_user_state(x, p); 516 517 if (attrs[XFRMA_SA_EXTRA_FLAGS]) 518 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); 519 520 if ((err = attach_aead(&x->aead, &x->props.ealgo, 521 attrs[XFRMA_ALG_AEAD]))) 522 goto error; 523 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, 524 attrs[XFRMA_ALG_AUTH_TRUNC]))) 525 goto error; 526 if (!x->props.aalgo) { 527 if ((err = attach_auth(&x->aalg, &x->props.aalgo, 528 attrs[XFRMA_ALG_AUTH]))) 529 goto error; 530 } 531 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, 532 xfrm_ealg_get_byname, 533 attrs[XFRMA_ALG_CRYPT]))) 534 goto error; 535 if ((err = attach_one_algo(&x->calg, &x->props.calgo, 536 xfrm_calg_get_byname, 537 attrs[XFRMA_ALG_COMP]))) 538 goto error; 539 540 if (attrs[XFRMA_ENCAP]) { 541 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 542 sizeof(*x->encap), GFP_KERNEL); 543 if (x->encap == NULL) 544 goto error; 545 } 546 547 if (attrs[XFRMA_TFCPAD]) 548 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); 549 550 if (attrs[XFRMA_COADDR]) { 551 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 552 sizeof(*x->coaddr), GFP_KERNEL); 553 if (x->coaddr == NULL) 554 goto error; 555 } 556 557 xfrm_mark_get(attrs, &x->mark); 558 559 err = __xfrm_init_state(x, false); 560 if (err) 561 goto error; 562 563 if (attrs[XFRMA_SEC_CTX] && 564 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX]))) 565 goto error; 566 567 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 568 attrs[XFRMA_REPLAY_ESN_VAL]))) 569 goto error; 570 571 x->km.seq = p->seq; 572 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth; 573 /* sysctl_xfrm_aevent_etime is in 100ms units */ 574 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M; 575 576 if ((err = xfrm_init_replay(x))) 577 goto error; 578 579 /* override default values from above */ 580 xfrm_update_ae_params(x, attrs, 0); 581 582 return x; 583 584 error: 585 x->km.state = XFRM_STATE_DEAD; 586 xfrm_state_put(x); 587 error_no_put: 588 *errp = err; 589 return NULL; 590 } 591 592 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 593 struct nlattr **attrs) 594 { 595 struct net *net = sock_net(skb->sk); 596 struct xfrm_usersa_info *p = nlmsg_data(nlh); 597 struct xfrm_state *x; 598 int err; 599 struct km_event c; 600 kuid_t loginuid = audit_get_loginuid(current); 601 unsigned int sessionid = audit_get_sessionid(current); 602 u32 sid; 603 604 err = verify_newsa_info(p, attrs); 605 if (err) 606 return err; 607 608 x = xfrm_state_construct(net, p, attrs, &err); 609 if (!x) 610 return err; 611 612 xfrm_state_hold(x); 613 if (nlh->nlmsg_type == XFRM_MSG_NEWSA) 614 err = xfrm_state_add(x); 615 else 616 err = xfrm_state_update(x); 617 618 security_task_getsecid(current, &sid); 619 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid); 620 621 if (err < 0) { 622 x->km.state = XFRM_STATE_DEAD; 623 __xfrm_state_put(x); 624 goto out; 625 } 626 627 c.seq = nlh->nlmsg_seq; 628 c.portid = nlh->nlmsg_pid; 629 c.event = nlh->nlmsg_type; 630 631 km_state_notify(x, &c); 632 out: 633 xfrm_state_put(x); 634 return err; 635 } 636 637 static struct xfrm_state *xfrm_user_state_lookup(struct net *net, 638 struct xfrm_usersa_id *p, 639 struct nlattr **attrs, 640 int *errp) 641 { 642 struct xfrm_state *x = NULL; 643 struct xfrm_mark m; 644 int err; 645 u32 mark = xfrm_mark_get(attrs, &m); 646 647 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 648 err = -ESRCH; 649 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family); 650 } else { 651 xfrm_address_t *saddr = NULL; 652 653 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); 654 if (!saddr) { 655 err = -EINVAL; 656 goto out; 657 } 658 659 err = -ESRCH; 660 x = xfrm_state_lookup_byaddr(net, mark, 661 &p->daddr, saddr, 662 p->proto, p->family); 663 } 664 665 out: 666 if (!x && errp) 667 *errp = err; 668 return x; 669 } 670 671 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 672 struct nlattr **attrs) 673 { 674 struct net *net = sock_net(skb->sk); 675 struct xfrm_state *x; 676 int err = -ESRCH; 677 struct km_event c; 678 struct xfrm_usersa_id *p = nlmsg_data(nlh); 679 kuid_t loginuid = audit_get_loginuid(current); 680 unsigned int sessionid = audit_get_sessionid(current); 681 u32 sid; 682 683 x = xfrm_user_state_lookup(net, p, attrs, &err); 684 if (x == NULL) 685 return err; 686 687 if ((err = security_xfrm_state_delete(x)) != 0) 688 goto out; 689 690 if (xfrm_state_kern(x)) { 691 err = -EPERM; 692 goto out; 693 } 694 695 err = xfrm_state_delete(x); 696 697 if (err < 0) 698 goto out; 699 700 c.seq = nlh->nlmsg_seq; 701 c.portid = nlh->nlmsg_pid; 702 c.event = nlh->nlmsg_type; 703 km_state_notify(x, &c); 704 705 out: 706 security_task_getsecid(current, &sid); 707 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid); 708 xfrm_state_put(x); 709 return err; 710 } 711 712 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 713 { 714 memset(p, 0, sizeof(*p)); 715 memcpy(&p->id, &x->id, sizeof(p->id)); 716 memcpy(&p->sel, &x->sel, sizeof(p->sel)); 717 memcpy(&p->lft, &x->lft, sizeof(p->lft)); 718 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); 719 memcpy(&p->stats, &x->stats, sizeof(p->stats)); 720 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); 721 p->mode = x->props.mode; 722 p->replay_window = x->props.replay_window; 723 p->reqid = x->props.reqid; 724 p->family = x->props.family; 725 p->flags = x->props.flags; 726 p->seq = x->km.seq; 727 } 728 729 struct xfrm_dump_info { 730 struct sk_buff *in_skb; 731 struct sk_buff *out_skb; 732 u32 nlmsg_seq; 733 u16 nlmsg_flags; 734 }; 735 736 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 737 { 738 struct xfrm_user_sec_ctx *uctx; 739 struct nlattr *attr; 740 int ctx_size = sizeof(*uctx) + s->ctx_len; 741 742 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); 743 if (attr == NULL) 744 return -EMSGSIZE; 745 746 uctx = nla_data(attr); 747 uctx->exttype = XFRMA_SEC_CTX; 748 uctx->len = ctx_size; 749 uctx->ctx_doi = s->ctx_doi; 750 uctx->ctx_alg = s->ctx_alg; 751 uctx->ctx_len = s->ctx_len; 752 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 753 754 return 0; 755 } 756 757 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) 758 { 759 struct xfrm_algo *algo; 760 struct nlattr *nla; 761 762 nla = nla_reserve(skb, XFRMA_ALG_AUTH, 763 sizeof(*algo) + (auth->alg_key_len + 7) / 8); 764 if (!nla) 765 return -EMSGSIZE; 766 767 algo = nla_data(nla); 768 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); 769 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); 770 algo->alg_key_len = auth->alg_key_len; 771 772 return 0; 773 } 774 775 /* Don't change this without updating xfrm_sa_len! */ 776 static int copy_to_user_state_extra(struct xfrm_state *x, 777 struct xfrm_usersa_info *p, 778 struct sk_buff *skb) 779 { 780 int ret = 0; 781 782 copy_to_user_state(x, p); 783 784 if (x->props.extra_flags) { 785 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS, 786 x->props.extra_flags); 787 if (ret) 788 goto out; 789 } 790 791 if (x->coaddr) { 792 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 793 if (ret) 794 goto out; 795 } 796 if (x->lastused) { 797 ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused); 798 if (ret) 799 goto out; 800 } 801 if (x->aead) { 802 ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead); 803 if (ret) 804 goto out; 805 } 806 if (x->aalg) { 807 ret = copy_to_user_auth(x->aalg, skb); 808 if (!ret) 809 ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC, 810 xfrm_alg_auth_len(x->aalg), x->aalg); 811 if (ret) 812 goto out; 813 } 814 if (x->ealg) { 815 ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg); 816 if (ret) 817 goto out; 818 } 819 if (x->calg) { 820 ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); 821 if (ret) 822 goto out; 823 } 824 if (x->encap) { 825 ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 826 if (ret) 827 goto out; 828 } 829 if (x->tfcpad) { 830 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad); 831 if (ret) 832 goto out; 833 } 834 ret = xfrm_mark_put(skb, &x->mark); 835 if (ret) 836 goto out; 837 if (x->replay_esn) { 838 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 839 xfrm_replay_state_esn_len(x->replay_esn), 840 x->replay_esn); 841 if (ret) 842 goto out; 843 } 844 if (x->security) 845 ret = copy_sec_ctx(x->security, skb); 846 out: 847 return ret; 848 } 849 850 static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 851 { 852 struct xfrm_dump_info *sp = ptr; 853 struct sk_buff *in_skb = sp->in_skb; 854 struct sk_buff *skb = sp->out_skb; 855 struct xfrm_usersa_info *p; 856 struct nlmsghdr *nlh; 857 int err; 858 859 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 860 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 861 if (nlh == NULL) 862 return -EMSGSIZE; 863 864 p = nlmsg_data(nlh); 865 866 err = copy_to_user_state_extra(x, p, skb); 867 if (err) { 868 nlmsg_cancel(skb, nlh); 869 return err; 870 } 871 nlmsg_end(skb, nlh); 872 return 0; 873 } 874 875 static int xfrm_dump_sa_done(struct netlink_callback *cb) 876 { 877 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 878 struct sock *sk = cb->skb->sk; 879 struct net *net = sock_net(sk); 880 881 xfrm_state_walk_done(walk, net); 882 return 0; 883 } 884 885 static const struct nla_policy xfrma_policy[XFRMA_MAX+1]; 886 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 887 { 888 struct net *net = sock_net(skb->sk); 889 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 890 struct xfrm_dump_info info; 891 892 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > 893 sizeof(cb->args) - sizeof(cb->args[0])); 894 895 info.in_skb = cb->skb; 896 info.out_skb = skb; 897 info.nlmsg_seq = cb->nlh->nlmsg_seq; 898 info.nlmsg_flags = NLM_F_MULTI; 899 900 if (!cb->args[0]) { 901 struct nlattr *attrs[XFRMA_MAX+1]; 902 struct xfrm_address_filter *filter = NULL; 903 u8 proto = 0; 904 int err; 905 906 cb->args[0] = 1; 907 908 err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, 909 xfrma_policy); 910 if (err < 0) 911 return err; 912 913 if (attrs[XFRMA_ADDRESS_FILTER]) { 914 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 915 if (filter == NULL) 916 return -ENOMEM; 917 918 memcpy(filter, nla_data(attrs[XFRMA_ADDRESS_FILTER]), 919 sizeof(*filter)); 920 } 921 922 if (attrs[XFRMA_PROTO]) 923 proto = nla_get_u8(attrs[XFRMA_PROTO]); 924 925 xfrm_state_walk_init(walk, proto, filter); 926 } 927 928 (void) xfrm_state_walk(net, walk, dump_one_state, &info); 929 930 return skb->len; 931 } 932 933 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, 934 struct xfrm_state *x, u32 seq) 935 { 936 struct xfrm_dump_info info; 937 struct sk_buff *skb; 938 int err; 939 940 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 941 if (!skb) 942 return ERR_PTR(-ENOMEM); 943 944 info.in_skb = in_skb; 945 info.out_skb = skb; 946 info.nlmsg_seq = seq; 947 info.nlmsg_flags = 0; 948 949 err = dump_one_state(x, 0, &info); 950 if (err) { 951 kfree_skb(skb); 952 return ERR_PTR(err); 953 } 954 955 return skb; 956 } 957 958 static inline size_t xfrm_spdinfo_msgsize(void) 959 { 960 return NLMSG_ALIGN(4) 961 + nla_total_size(sizeof(struct xfrmu_spdinfo)) 962 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 963 } 964 965 static int build_spdinfo(struct sk_buff *skb, struct net *net, 966 u32 portid, u32 seq, u32 flags) 967 { 968 struct xfrmk_spdinfo si; 969 struct xfrmu_spdinfo spc; 970 struct xfrmu_spdhinfo sph; 971 struct nlmsghdr *nlh; 972 int err; 973 u32 *f; 974 975 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 976 if (nlh == NULL) /* shouldn't really happen ... */ 977 return -EMSGSIZE; 978 979 f = nlmsg_data(nlh); 980 *f = flags; 981 xfrm_spd_getinfo(net, &si); 982 spc.incnt = si.incnt; 983 spc.outcnt = si.outcnt; 984 spc.fwdcnt = si.fwdcnt; 985 spc.inscnt = si.inscnt; 986 spc.outscnt = si.outscnt; 987 spc.fwdscnt = si.fwdscnt; 988 sph.spdhcnt = si.spdhcnt; 989 sph.spdhmcnt = si.spdhmcnt; 990 991 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 992 if (!err) 993 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 994 if (err) { 995 nlmsg_cancel(skb, nlh); 996 return err; 997 } 998 999 return nlmsg_end(skb, nlh); 1000 } 1001 1002 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1003 struct nlattr **attrs) 1004 { 1005 struct net *net = sock_net(skb->sk); 1006 struct sk_buff *r_skb; 1007 u32 *flags = nlmsg_data(nlh); 1008 u32 sportid = NETLINK_CB(skb).portid; 1009 u32 seq = nlh->nlmsg_seq; 1010 1011 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); 1012 if (r_skb == NULL) 1013 return -ENOMEM; 1014 1015 if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0) 1016 BUG(); 1017 1018 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); 1019 } 1020 1021 static inline size_t xfrm_sadinfo_msgsize(void) 1022 { 1023 return NLMSG_ALIGN(4) 1024 + nla_total_size(sizeof(struct xfrmu_sadhinfo)) 1025 + nla_total_size(4); /* XFRMA_SAD_CNT */ 1026 } 1027 1028 static int build_sadinfo(struct sk_buff *skb, struct net *net, 1029 u32 portid, u32 seq, u32 flags) 1030 { 1031 struct xfrmk_sadinfo si; 1032 struct xfrmu_sadhinfo sh; 1033 struct nlmsghdr *nlh; 1034 int err; 1035 u32 *f; 1036 1037 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 1038 if (nlh == NULL) /* shouldn't really happen ... */ 1039 return -EMSGSIZE; 1040 1041 f = nlmsg_data(nlh); 1042 *f = flags; 1043 xfrm_sad_getinfo(net, &si); 1044 1045 sh.sadhmcnt = si.sadhmcnt; 1046 sh.sadhcnt = si.sadhcnt; 1047 1048 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt); 1049 if (!err) 1050 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); 1051 if (err) { 1052 nlmsg_cancel(skb, nlh); 1053 return err; 1054 } 1055 1056 return nlmsg_end(skb, nlh); 1057 } 1058 1059 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1060 struct nlattr **attrs) 1061 { 1062 struct net *net = sock_net(skb->sk); 1063 struct sk_buff *r_skb; 1064 u32 *flags = nlmsg_data(nlh); 1065 u32 sportid = NETLINK_CB(skb).portid; 1066 u32 seq = nlh->nlmsg_seq; 1067 1068 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); 1069 if (r_skb == NULL) 1070 return -ENOMEM; 1071 1072 if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0) 1073 BUG(); 1074 1075 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); 1076 } 1077 1078 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1079 struct nlattr **attrs) 1080 { 1081 struct net *net = sock_net(skb->sk); 1082 struct xfrm_usersa_id *p = nlmsg_data(nlh); 1083 struct xfrm_state *x; 1084 struct sk_buff *resp_skb; 1085 int err = -ESRCH; 1086 1087 x = xfrm_user_state_lookup(net, p, attrs, &err); 1088 if (x == NULL) 1089 goto out_noput; 1090 1091 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1092 if (IS_ERR(resp_skb)) { 1093 err = PTR_ERR(resp_skb); 1094 } else { 1095 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); 1096 } 1097 xfrm_state_put(x); 1098 out_noput: 1099 return err; 1100 } 1101 1102 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, 1103 struct nlattr **attrs) 1104 { 1105 struct net *net = sock_net(skb->sk); 1106 struct xfrm_state *x; 1107 struct xfrm_userspi_info *p; 1108 struct sk_buff *resp_skb; 1109 xfrm_address_t *daddr; 1110 int family; 1111 int err; 1112 u32 mark; 1113 struct xfrm_mark m; 1114 1115 p = nlmsg_data(nlh); 1116 err = verify_spi_info(p->info.id.proto, p->min, p->max); 1117 if (err) 1118 goto out_noput; 1119 1120 family = p->info.family; 1121 daddr = &p->info.id.daddr; 1122 1123 x = NULL; 1124 1125 mark = xfrm_mark_get(attrs, &m); 1126 if (p->info.seq) { 1127 x = xfrm_find_acq_byseq(net, mark, p->info.seq); 1128 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) { 1129 xfrm_state_put(x); 1130 x = NULL; 1131 } 1132 } 1133 1134 if (!x) 1135 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, 1136 p->info.id.proto, daddr, 1137 &p->info.saddr, 1, 1138 family); 1139 err = -ENOENT; 1140 if (x == NULL) 1141 goto out_noput; 1142 1143 err = xfrm_alloc_spi(x, p->min, p->max); 1144 if (err) 1145 goto out; 1146 1147 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1148 if (IS_ERR(resp_skb)) { 1149 err = PTR_ERR(resp_skb); 1150 goto out; 1151 } 1152 1153 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); 1154 1155 out: 1156 xfrm_state_put(x); 1157 out_noput: 1158 return err; 1159 } 1160 1161 static int verify_policy_dir(u8 dir) 1162 { 1163 switch (dir) { 1164 case XFRM_POLICY_IN: 1165 case XFRM_POLICY_OUT: 1166 case XFRM_POLICY_FWD: 1167 break; 1168 1169 default: 1170 return -EINVAL; 1171 } 1172 1173 return 0; 1174 } 1175 1176 static int verify_policy_type(u8 type) 1177 { 1178 switch (type) { 1179 case XFRM_POLICY_TYPE_MAIN: 1180 #ifdef CONFIG_XFRM_SUB_POLICY 1181 case XFRM_POLICY_TYPE_SUB: 1182 #endif 1183 break; 1184 1185 default: 1186 return -EINVAL; 1187 } 1188 1189 return 0; 1190 } 1191 1192 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) 1193 { 1194 int ret; 1195 1196 switch (p->share) { 1197 case XFRM_SHARE_ANY: 1198 case XFRM_SHARE_SESSION: 1199 case XFRM_SHARE_USER: 1200 case XFRM_SHARE_UNIQUE: 1201 break; 1202 1203 default: 1204 return -EINVAL; 1205 } 1206 1207 switch (p->action) { 1208 case XFRM_POLICY_ALLOW: 1209 case XFRM_POLICY_BLOCK: 1210 break; 1211 1212 default: 1213 return -EINVAL; 1214 } 1215 1216 switch (p->sel.family) { 1217 case AF_INET: 1218 break; 1219 1220 case AF_INET6: 1221 #if IS_ENABLED(CONFIG_IPV6) 1222 break; 1223 #else 1224 return -EAFNOSUPPORT; 1225 #endif 1226 1227 default: 1228 return -EINVAL; 1229 } 1230 1231 ret = verify_policy_dir(p->dir); 1232 if (ret) 1233 return ret; 1234 if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir)) 1235 return -EINVAL; 1236 1237 return 0; 1238 } 1239 1240 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) 1241 { 1242 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1243 struct xfrm_user_sec_ctx *uctx; 1244 1245 if (!rt) 1246 return 0; 1247 1248 uctx = nla_data(rt); 1249 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL); 1250 } 1251 1252 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 1253 int nr) 1254 { 1255 int i; 1256 1257 xp->xfrm_nr = nr; 1258 for (i = 0; i < nr; i++, ut++) { 1259 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 1260 1261 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); 1262 memcpy(&t->saddr, &ut->saddr, 1263 sizeof(xfrm_address_t)); 1264 t->reqid = ut->reqid; 1265 t->mode = ut->mode; 1266 t->share = ut->share; 1267 t->optional = ut->optional; 1268 t->aalgos = ut->aalgos; 1269 t->ealgos = ut->ealgos; 1270 t->calgos = ut->calgos; 1271 /* If all masks are ~0, then we allow all algorithms. */ 1272 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos); 1273 t->encap_family = ut->family; 1274 } 1275 } 1276 1277 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) 1278 { 1279 int i; 1280 1281 if (nr > XFRM_MAX_DEPTH) 1282 return -EINVAL; 1283 1284 for (i = 0; i < nr; i++) { 1285 /* We never validated the ut->family value, so many 1286 * applications simply leave it at zero. The check was 1287 * never made and ut->family was ignored because all 1288 * templates could be assumed to have the same family as 1289 * the policy itself. Now that we will have ipv4-in-ipv6 1290 * and ipv6-in-ipv4 tunnels, this is no longer true. 1291 */ 1292 if (!ut[i].family) 1293 ut[i].family = family; 1294 1295 switch (ut[i].family) { 1296 case AF_INET: 1297 break; 1298 #if IS_ENABLED(CONFIG_IPV6) 1299 case AF_INET6: 1300 break; 1301 #endif 1302 default: 1303 return -EINVAL; 1304 } 1305 } 1306 1307 return 0; 1308 } 1309 1310 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs) 1311 { 1312 struct nlattr *rt = attrs[XFRMA_TMPL]; 1313 1314 if (!rt) { 1315 pol->xfrm_nr = 0; 1316 } else { 1317 struct xfrm_user_tmpl *utmpl = nla_data(rt); 1318 int nr = nla_len(rt) / sizeof(*utmpl); 1319 int err; 1320 1321 err = validate_tmpl(nr, utmpl, pol->family); 1322 if (err) 1323 return err; 1324 1325 copy_templates(pol, utmpl, nr); 1326 } 1327 return 0; 1328 } 1329 1330 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs) 1331 { 1332 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; 1333 struct xfrm_userpolicy_type *upt; 1334 u8 type = XFRM_POLICY_TYPE_MAIN; 1335 int err; 1336 1337 if (rt) { 1338 upt = nla_data(rt); 1339 type = upt->type; 1340 } 1341 1342 err = verify_policy_type(type); 1343 if (err) 1344 return err; 1345 1346 *tp = type; 1347 return 0; 1348 } 1349 1350 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) 1351 { 1352 xp->priority = p->priority; 1353 xp->index = p->index; 1354 memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); 1355 memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); 1356 xp->action = p->action; 1357 xp->flags = p->flags; 1358 xp->family = p->sel.family; 1359 /* XXX xp->share = p->share; */ 1360 } 1361 1362 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) 1363 { 1364 memset(p, 0, sizeof(*p)); 1365 memcpy(&p->sel, &xp->selector, sizeof(p->sel)); 1366 memcpy(&p->lft, &xp->lft, sizeof(p->lft)); 1367 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); 1368 p->priority = xp->priority; 1369 p->index = xp->index; 1370 p->sel.family = xp->family; 1371 p->dir = dir; 1372 p->action = xp->action; 1373 p->flags = xp->flags; 1374 p->share = XFRM_SHARE_ANY; /* XXX xp->share */ 1375 } 1376 1377 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp) 1378 { 1379 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL); 1380 int err; 1381 1382 if (!xp) { 1383 *errp = -ENOMEM; 1384 return NULL; 1385 } 1386 1387 copy_from_user_policy(xp, p); 1388 1389 err = copy_from_user_policy_type(&xp->type, attrs); 1390 if (err) 1391 goto error; 1392 1393 if (!(err = copy_from_user_tmpl(xp, attrs))) 1394 err = copy_from_user_sec_ctx(xp, attrs); 1395 if (err) 1396 goto error; 1397 1398 xfrm_mark_get(attrs, &xp->mark); 1399 1400 return xp; 1401 error: 1402 *errp = err; 1403 xp->walk.dead = 1; 1404 xfrm_policy_destroy(xp); 1405 return NULL; 1406 } 1407 1408 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1409 struct nlattr **attrs) 1410 { 1411 struct net *net = sock_net(skb->sk); 1412 struct xfrm_userpolicy_info *p = nlmsg_data(nlh); 1413 struct xfrm_policy *xp; 1414 struct km_event c; 1415 int err; 1416 int excl; 1417 kuid_t loginuid = audit_get_loginuid(current); 1418 unsigned int sessionid = audit_get_sessionid(current); 1419 u32 sid; 1420 1421 err = verify_newpolicy_info(p); 1422 if (err) 1423 return err; 1424 err = verify_sec_ctx_len(attrs); 1425 if (err) 1426 return err; 1427 1428 xp = xfrm_policy_construct(net, p, attrs, &err); 1429 if (!xp) 1430 return err; 1431 1432 /* shouldn't excl be based on nlh flags?? 1433 * Aha! this is anti-netlink really i.e more pfkey derived 1434 * in netlink excl is a flag and you wouldnt need 1435 * a type XFRM_MSG_UPDPOLICY - JHS */ 1436 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1437 err = xfrm_policy_insert(p->dir, xp, excl); 1438 security_task_getsecid(current, &sid); 1439 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid); 1440 1441 if (err) { 1442 security_xfrm_policy_free(xp->security); 1443 kfree(xp); 1444 return err; 1445 } 1446 1447 c.event = nlh->nlmsg_type; 1448 c.seq = nlh->nlmsg_seq; 1449 c.portid = nlh->nlmsg_pid; 1450 km_policy_notify(xp, p->dir, &c); 1451 1452 xfrm_pol_put(xp); 1453 1454 return 0; 1455 } 1456 1457 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) 1458 { 1459 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; 1460 int i; 1461 1462 if (xp->xfrm_nr == 0) 1463 return 0; 1464 1465 for (i = 0; i < xp->xfrm_nr; i++) { 1466 struct xfrm_user_tmpl *up = &vec[i]; 1467 struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; 1468 1469 memset(up, 0, sizeof(*up)); 1470 memcpy(&up->id, &kp->id, sizeof(up->id)); 1471 up->family = kp->encap_family; 1472 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); 1473 up->reqid = kp->reqid; 1474 up->mode = kp->mode; 1475 up->share = kp->share; 1476 up->optional = kp->optional; 1477 up->aalgos = kp->aalgos; 1478 up->ealgos = kp->ealgos; 1479 up->calgos = kp->calgos; 1480 } 1481 1482 return nla_put(skb, XFRMA_TMPL, 1483 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); 1484 } 1485 1486 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) 1487 { 1488 if (x->security) { 1489 return copy_sec_ctx(x->security, skb); 1490 } 1491 return 0; 1492 } 1493 1494 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 1495 { 1496 if (xp->security) 1497 return copy_sec_ctx(xp->security, skb); 1498 return 0; 1499 } 1500 static inline size_t userpolicy_type_attrsize(void) 1501 { 1502 #ifdef CONFIG_XFRM_SUB_POLICY 1503 return nla_total_size(sizeof(struct xfrm_userpolicy_type)); 1504 #else 1505 return 0; 1506 #endif 1507 } 1508 1509 #ifdef CONFIG_XFRM_SUB_POLICY 1510 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1511 { 1512 struct xfrm_userpolicy_type upt = { 1513 .type = type, 1514 }; 1515 1516 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1517 } 1518 1519 #else 1520 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1521 { 1522 return 0; 1523 } 1524 #endif 1525 1526 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) 1527 { 1528 struct xfrm_dump_info *sp = ptr; 1529 struct xfrm_userpolicy_info *p; 1530 struct sk_buff *in_skb = sp->in_skb; 1531 struct sk_buff *skb = sp->out_skb; 1532 struct nlmsghdr *nlh; 1533 int err; 1534 1535 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 1536 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1537 if (nlh == NULL) 1538 return -EMSGSIZE; 1539 1540 p = nlmsg_data(nlh); 1541 copy_to_user_policy(xp, p, dir); 1542 err = copy_to_user_tmpl(xp, skb); 1543 if (!err) 1544 err = copy_to_user_sec_ctx(xp, skb); 1545 if (!err) 1546 err = copy_to_user_policy_type(xp->type, skb); 1547 if (!err) 1548 err = xfrm_mark_put(skb, &xp->mark); 1549 if (err) { 1550 nlmsg_cancel(skb, nlh); 1551 return err; 1552 } 1553 nlmsg_end(skb, nlh); 1554 return 0; 1555 } 1556 1557 static int xfrm_dump_policy_done(struct netlink_callback *cb) 1558 { 1559 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1560 struct net *net = sock_net(cb->skb->sk); 1561 1562 xfrm_policy_walk_done(walk, net); 1563 return 0; 1564 } 1565 1566 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1567 { 1568 struct net *net = sock_net(skb->sk); 1569 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1570 struct xfrm_dump_info info; 1571 1572 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > 1573 sizeof(cb->args) - sizeof(cb->args[0])); 1574 1575 info.in_skb = cb->skb; 1576 info.out_skb = skb; 1577 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1578 info.nlmsg_flags = NLM_F_MULTI; 1579 1580 if (!cb->args[0]) { 1581 cb->args[0] = 1; 1582 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); 1583 } 1584 1585 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 1586 1587 return skb->len; 1588 } 1589 1590 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, 1591 struct xfrm_policy *xp, 1592 int dir, u32 seq) 1593 { 1594 struct xfrm_dump_info info; 1595 struct sk_buff *skb; 1596 int err; 1597 1598 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1599 if (!skb) 1600 return ERR_PTR(-ENOMEM); 1601 1602 info.in_skb = in_skb; 1603 info.out_skb = skb; 1604 info.nlmsg_seq = seq; 1605 info.nlmsg_flags = 0; 1606 1607 err = dump_one_policy(xp, dir, 0, &info); 1608 if (err) { 1609 kfree_skb(skb); 1610 return ERR_PTR(err); 1611 } 1612 1613 return skb; 1614 } 1615 1616 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1617 struct nlattr **attrs) 1618 { 1619 struct net *net = sock_net(skb->sk); 1620 struct xfrm_policy *xp; 1621 struct xfrm_userpolicy_id *p; 1622 u8 type = XFRM_POLICY_TYPE_MAIN; 1623 int err; 1624 struct km_event c; 1625 int delete; 1626 struct xfrm_mark m; 1627 u32 mark = xfrm_mark_get(attrs, &m); 1628 1629 p = nlmsg_data(nlh); 1630 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 1631 1632 err = copy_from_user_policy_type(&type, attrs); 1633 if (err) 1634 return err; 1635 1636 err = verify_policy_dir(p->dir); 1637 if (err) 1638 return err; 1639 1640 if (p->index) 1641 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err); 1642 else { 1643 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1644 struct xfrm_sec_ctx *ctx; 1645 1646 err = verify_sec_ctx_len(attrs); 1647 if (err) 1648 return err; 1649 1650 ctx = NULL; 1651 if (rt) { 1652 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1653 1654 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); 1655 if (err) 1656 return err; 1657 } 1658 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel, 1659 ctx, delete, &err); 1660 security_xfrm_policy_free(ctx); 1661 } 1662 if (xp == NULL) 1663 return -ENOENT; 1664 1665 if (!delete) { 1666 struct sk_buff *resp_skb; 1667 1668 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); 1669 if (IS_ERR(resp_skb)) { 1670 err = PTR_ERR(resp_skb); 1671 } else { 1672 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, 1673 NETLINK_CB(skb).portid); 1674 } 1675 } else { 1676 kuid_t loginuid = audit_get_loginuid(current); 1677 unsigned int sessionid = audit_get_sessionid(current); 1678 u32 sid; 1679 1680 security_task_getsecid(current, &sid); 1681 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid, 1682 sid); 1683 1684 if (err != 0) 1685 goto out; 1686 1687 c.data.byid = p->index; 1688 c.event = nlh->nlmsg_type; 1689 c.seq = nlh->nlmsg_seq; 1690 c.portid = nlh->nlmsg_pid; 1691 km_policy_notify(xp, p->dir, &c); 1692 } 1693 1694 out: 1695 xfrm_pol_put(xp); 1696 if (delete && err == 0) 1697 xfrm_garbage_collect(net); 1698 return err; 1699 } 1700 1701 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1702 struct nlattr **attrs) 1703 { 1704 struct net *net = sock_net(skb->sk); 1705 struct km_event c; 1706 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1707 struct xfrm_audit audit_info; 1708 int err; 1709 1710 audit_info.loginuid = audit_get_loginuid(current); 1711 audit_info.sessionid = audit_get_sessionid(current); 1712 security_task_getsecid(current, &audit_info.secid); 1713 err = xfrm_state_flush(net, p->proto, &audit_info); 1714 if (err) { 1715 if (err == -ESRCH) /* empty table */ 1716 return 0; 1717 return err; 1718 } 1719 c.data.proto = p->proto; 1720 c.event = nlh->nlmsg_type; 1721 c.seq = nlh->nlmsg_seq; 1722 c.portid = nlh->nlmsg_pid; 1723 c.net = net; 1724 km_state_notify(NULL, &c); 1725 1726 return 0; 1727 } 1728 1729 static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x) 1730 { 1731 size_t replay_size = x->replay_esn ? 1732 xfrm_replay_state_esn_len(x->replay_esn) : 1733 sizeof(struct xfrm_replay_state); 1734 1735 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 1736 + nla_total_size(replay_size) 1737 + nla_total_size(sizeof(struct xfrm_lifetime_cur)) 1738 + nla_total_size(sizeof(struct xfrm_mark)) 1739 + nla_total_size(4) /* XFRM_AE_RTHR */ 1740 + nla_total_size(4); /* XFRM_AE_ETHR */ 1741 } 1742 1743 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 1744 { 1745 struct xfrm_aevent_id *id; 1746 struct nlmsghdr *nlh; 1747 int err; 1748 1749 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 1750 if (nlh == NULL) 1751 return -EMSGSIZE; 1752 1753 id = nlmsg_data(nlh); 1754 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); 1755 id->sa_id.spi = x->id.spi; 1756 id->sa_id.family = x->props.family; 1757 id->sa_id.proto = x->id.proto; 1758 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr)); 1759 id->reqid = x->props.reqid; 1760 id->flags = c->data.aevent; 1761 1762 if (x->replay_esn) { 1763 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 1764 xfrm_replay_state_esn_len(x->replay_esn), 1765 x->replay_esn); 1766 } else { 1767 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 1768 &x->replay); 1769 } 1770 if (err) 1771 goto out_cancel; 1772 err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); 1773 if (err) 1774 goto out_cancel; 1775 1776 if (id->flags & XFRM_AE_RTHR) { 1777 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); 1778 if (err) 1779 goto out_cancel; 1780 } 1781 if (id->flags & XFRM_AE_ETHR) { 1782 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH, 1783 x->replay_maxage * 10 / HZ); 1784 if (err) 1785 goto out_cancel; 1786 } 1787 err = xfrm_mark_put(skb, &x->mark); 1788 if (err) 1789 goto out_cancel; 1790 1791 return nlmsg_end(skb, nlh); 1792 1793 out_cancel: 1794 nlmsg_cancel(skb, nlh); 1795 return err; 1796 } 1797 1798 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1799 struct nlattr **attrs) 1800 { 1801 struct net *net = sock_net(skb->sk); 1802 struct xfrm_state *x; 1803 struct sk_buff *r_skb; 1804 int err; 1805 struct km_event c; 1806 u32 mark; 1807 struct xfrm_mark m; 1808 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1809 struct xfrm_usersa_id *id = &p->sa_id; 1810 1811 mark = xfrm_mark_get(attrs, &m); 1812 1813 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family); 1814 if (x == NULL) 1815 return -ESRCH; 1816 1817 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 1818 if (r_skb == NULL) { 1819 xfrm_state_put(x); 1820 return -ENOMEM; 1821 } 1822 1823 /* 1824 * XXX: is this lock really needed - none of the other 1825 * gets lock (the concern is things getting updated 1826 * while we are still reading) - jhs 1827 */ 1828 spin_lock_bh(&x->lock); 1829 c.data.aevent = p->flags; 1830 c.seq = nlh->nlmsg_seq; 1831 c.portid = nlh->nlmsg_pid; 1832 1833 if (build_aevent(r_skb, x, &c) < 0) 1834 BUG(); 1835 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid); 1836 spin_unlock_bh(&x->lock); 1837 xfrm_state_put(x); 1838 return err; 1839 } 1840 1841 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1842 struct nlattr **attrs) 1843 { 1844 struct net *net = sock_net(skb->sk); 1845 struct xfrm_state *x; 1846 struct km_event c; 1847 int err = -EINVAL; 1848 u32 mark = 0; 1849 struct xfrm_mark m; 1850 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1851 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 1852 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 1853 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 1854 1855 if (!lt && !rp && !re) 1856 return err; 1857 1858 /* pedantic mode - thou shalt sayeth replaceth */ 1859 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 1860 return err; 1861 1862 mark = xfrm_mark_get(attrs, &m); 1863 1864 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 1865 if (x == NULL) 1866 return -ESRCH; 1867 1868 if (x->km.state != XFRM_STATE_VALID) 1869 goto out; 1870 1871 err = xfrm_replay_verify_len(x->replay_esn, re); 1872 if (err) 1873 goto out; 1874 1875 spin_lock_bh(&x->lock); 1876 xfrm_update_ae_params(x, attrs, 1); 1877 spin_unlock_bh(&x->lock); 1878 1879 c.event = nlh->nlmsg_type; 1880 c.seq = nlh->nlmsg_seq; 1881 c.portid = nlh->nlmsg_pid; 1882 c.data.aevent = XFRM_AE_CU; 1883 km_state_notify(x, &c); 1884 err = 0; 1885 out: 1886 xfrm_state_put(x); 1887 return err; 1888 } 1889 1890 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1891 struct nlattr **attrs) 1892 { 1893 struct net *net = sock_net(skb->sk); 1894 struct km_event c; 1895 u8 type = XFRM_POLICY_TYPE_MAIN; 1896 int err; 1897 struct xfrm_audit audit_info; 1898 1899 err = copy_from_user_policy_type(&type, attrs); 1900 if (err) 1901 return err; 1902 1903 audit_info.loginuid = audit_get_loginuid(current); 1904 audit_info.sessionid = audit_get_sessionid(current); 1905 security_task_getsecid(current, &audit_info.secid); 1906 err = xfrm_policy_flush(net, type, &audit_info); 1907 if (err) { 1908 if (err == -ESRCH) /* empty table */ 1909 return 0; 1910 return err; 1911 } 1912 1913 c.data.type = type; 1914 c.event = nlh->nlmsg_type; 1915 c.seq = nlh->nlmsg_seq; 1916 c.portid = nlh->nlmsg_pid; 1917 c.net = net; 1918 km_policy_notify(NULL, 0, &c); 1919 return 0; 1920 } 1921 1922 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1923 struct nlattr **attrs) 1924 { 1925 struct net *net = sock_net(skb->sk); 1926 struct xfrm_policy *xp; 1927 struct xfrm_user_polexpire *up = nlmsg_data(nlh); 1928 struct xfrm_userpolicy_info *p = &up->pol; 1929 u8 type = XFRM_POLICY_TYPE_MAIN; 1930 int err = -ENOENT; 1931 struct xfrm_mark m; 1932 u32 mark = xfrm_mark_get(attrs, &m); 1933 1934 err = copy_from_user_policy_type(&type, attrs); 1935 if (err) 1936 return err; 1937 1938 err = verify_policy_dir(p->dir); 1939 if (err) 1940 return err; 1941 1942 if (p->index) 1943 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); 1944 else { 1945 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1946 struct xfrm_sec_ctx *ctx; 1947 1948 err = verify_sec_ctx_len(attrs); 1949 if (err) 1950 return err; 1951 1952 ctx = NULL; 1953 if (rt) { 1954 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1955 1956 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); 1957 if (err) 1958 return err; 1959 } 1960 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, 1961 &p->sel, ctx, 0, &err); 1962 security_xfrm_policy_free(ctx); 1963 } 1964 if (xp == NULL) 1965 return -ENOENT; 1966 1967 if (unlikely(xp->walk.dead)) 1968 goto out; 1969 1970 err = 0; 1971 if (up->hard) { 1972 kuid_t loginuid = audit_get_loginuid(current); 1973 unsigned int sessionid = audit_get_sessionid(current); 1974 u32 sid; 1975 1976 security_task_getsecid(current, &sid); 1977 xfrm_policy_delete(xp, p->dir); 1978 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid); 1979 1980 } else { 1981 // reset the timers here? 1982 WARN(1, "Dont know what to do with soft policy expire\n"); 1983 } 1984 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid); 1985 1986 out: 1987 xfrm_pol_put(xp); 1988 return err; 1989 } 1990 1991 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1992 struct nlattr **attrs) 1993 { 1994 struct net *net = sock_net(skb->sk); 1995 struct xfrm_state *x; 1996 int err; 1997 struct xfrm_user_expire *ue = nlmsg_data(nlh); 1998 struct xfrm_usersa_info *p = &ue->state; 1999 struct xfrm_mark m; 2000 u32 mark = xfrm_mark_get(attrs, &m); 2001 2002 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); 2003 2004 err = -ENOENT; 2005 if (x == NULL) 2006 return err; 2007 2008 spin_lock_bh(&x->lock); 2009 err = -EINVAL; 2010 if (x->km.state != XFRM_STATE_VALID) 2011 goto out; 2012 km_state_expired(x, ue->hard, nlh->nlmsg_pid); 2013 2014 if (ue->hard) { 2015 kuid_t loginuid = audit_get_loginuid(current); 2016 unsigned int sessionid = audit_get_sessionid(current); 2017 u32 sid; 2018 2019 security_task_getsecid(current, &sid); 2020 __xfrm_state_delete(x); 2021 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid); 2022 } 2023 err = 0; 2024 out: 2025 spin_unlock_bh(&x->lock); 2026 xfrm_state_put(x); 2027 return err; 2028 } 2029 2030 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, 2031 struct nlattr **attrs) 2032 { 2033 struct net *net = sock_net(skb->sk); 2034 struct xfrm_policy *xp; 2035 struct xfrm_user_tmpl *ut; 2036 int i; 2037 struct nlattr *rt = attrs[XFRMA_TMPL]; 2038 struct xfrm_mark mark; 2039 2040 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 2041 struct xfrm_state *x = xfrm_state_alloc(net); 2042 int err = -ENOMEM; 2043 2044 if (!x) 2045 goto nomem; 2046 2047 xfrm_mark_get(attrs, &mark); 2048 2049 err = verify_newpolicy_info(&ua->policy); 2050 if (err) 2051 goto bad_policy; 2052 2053 /* build an XP */ 2054 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err); 2055 if (!xp) 2056 goto free_state; 2057 2058 memcpy(&x->id, &ua->id, sizeof(ua->id)); 2059 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 2060 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 2061 xp->mark.m = x->mark.m = mark.m; 2062 xp->mark.v = x->mark.v = mark.v; 2063 ut = nla_data(rt); 2064 /* extract the templates and for each call km_key */ 2065 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 2066 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 2067 memcpy(&x->id, &t->id, sizeof(x->id)); 2068 x->props.mode = t->mode; 2069 x->props.reqid = t->reqid; 2070 x->props.family = ut->family; 2071 t->aalgos = ua->aalgos; 2072 t->ealgos = ua->ealgos; 2073 t->calgos = ua->calgos; 2074 err = km_query(x, t, xp); 2075 2076 } 2077 2078 kfree(x); 2079 kfree(xp); 2080 2081 return 0; 2082 2083 bad_policy: 2084 WARN(1, "BAD policy passed\n"); 2085 free_state: 2086 kfree(x); 2087 nomem: 2088 return err; 2089 } 2090 2091 #ifdef CONFIG_XFRM_MIGRATE 2092 static int copy_from_user_migrate(struct xfrm_migrate *ma, 2093 struct xfrm_kmaddress *k, 2094 struct nlattr **attrs, int *num) 2095 { 2096 struct nlattr *rt = attrs[XFRMA_MIGRATE]; 2097 struct xfrm_user_migrate *um; 2098 int i, num_migrate; 2099 2100 if (k != NULL) { 2101 struct xfrm_user_kmaddress *uk; 2102 2103 uk = nla_data(attrs[XFRMA_KMADDRESS]); 2104 memcpy(&k->local, &uk->local, sizeof(k->local)); 2105 memcpy(&k->remote, &uk->remote, sizeof(k->remote)); 2106 k->family = uk->family; 2107 k->reserved = uk->reserved; 2108 } 2109 2110 um = nla_data(rt); 2111 num_migrate = nla_len(rt) / sizeof(*um); 2112 2113 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) 2114 return -EINVAL; 2115 2116 for (i = 0; i < num_migrate; i++, um++, ma++) { 2117 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); 2118 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); 2119 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); 2120 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); 2121 2122 ma->proto = um->proto; 2123 ma->mode = um->mode; 2124 ma->reqid = um->reqid; 2125 2126 ma->old_family = um->old_family; 2127 ma->new_family = um->new_family; 2128 } 2129 2130 *num = i; 2131 return 0; 2132 } 2133 2134 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2135 struct nlattr **attrs) 2136 { 2137 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); 2138 struct xfrm_migrate m[XFRM_MAX_DEPTH]; 2139 struct xfrm_kmaddress km, *kmp; 2140 u8 type; 2141 int err; 2142 int n = 0; 2143 struct net *net = sock_net(skb->sk); 2144 2145 if (attrs[XFRMA_MIGRATE] == NULL) 2146 return -EINVAL; 2147 2148 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; 2149 2150 err = copy_from_user_policy_type(&type, attrs); 2151 if (err) 2152 return err; 2153 2154 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n); 2155 if (err) 2156 return err; 2157 2158 if (!n) 2159 return 0; 2160 2161 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net); 2162 2163 return 0; 2164 } 2165 #else 2166 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2167 struct nlattr **attrs) 2168 { 2169 return -ENOPROTOOPT; 2170 } 2171 #endif 2172 2173 #ifdef CONFIG_XFRM_MIGRATE 2174 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb) 2175 { 2176 struct xfrm_user_migrate um; 2177 2178 memset(&um, 0, sizeof(um)); 2179 um.proto = m->proto; 2180 um.mode = m->mode; 2181 um.reqid = m->reqid; 2182 um.old_family = m->old_family; 2183 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); 2184 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); 2185 um.new_family = m->new_family; 2186 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); 2187 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); 2188 2189 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); 2190 } 2191 2192 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb) 2193 { 2194 struct xfrm_user_kmaddress uk; 2195 2196 memset(&uk, 0, sizeof(uk)); 2197 uk.family = k->family; 2198 uk.reserved = k->reserved; 2199 memcpy(&uk.local, &k->local, sizeof(uk.local)); 2200 memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); 2201 2202 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); 2203 } 2204 2205 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma) 2206 { 2207 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) 2208 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0) 2209 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) 2210 + userpolicy_type_attrsize(); 2211 } 2212 2213 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, 2214 int num_migrate, const struct xfrm_kmaddress *k, 2215 const struct xfrm_selector *sel, u8 dir, u8 type) 2216 { 2217 const struct xfrm_migrate *mp; 2218 struct xfrm_userpolicy_id *pol_id; 2219 struct nlmsghdr *nlh; 2220 int i, err; 2221 2222 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 2223 if (nlh == NULL) 2224 return -EMSGSIZE; 2225 2226 pol_id = nlmsg_data(nlh); 2227 /* copy data from selector, dir, and type to the pol_id */ 2228 memset(pol_id, 0, sizeof(*pol_id)); 2229 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 2230 pol_id->dir = dir; 2231 2232 if (k != NULL) { 2233 err = copy_to_user_kmaddress(k, skb); 2234 if (err) 2235 goto out_cancel; 2236 } 2237 err = copy_to_user_policy_type(type, skb); 2238 if (err) 2239 goto out_cancel; 2240 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 2241 err = copy_to_user_migrate(mp, skb); 2242 if (err) 2243 goto out_cancel; 2244 } 2245 2246 return nlmsg_end(skb, nlh); 2247 2248 out_cancel: 2249 nlmsg_cancel(skb, nlh); 2250 return err; 2251 } 2252 2253 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2254 const struct xfrm_migrate *m, int num_migrate, 2255 const struct xfrm_kmaddress *k) 2256 { 2257 struct net *net = &init_net; 2258 struct sk_buff *skb; 2259 2260 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC); 2261 if (skb == NULL) 2262 return -ENOMEM; 2263 2264 /* build migrate */ 2265 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0) 2266 BUG(); 2267 2268 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); 2269 } 2270 #else 2271 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2272 const struct xfrm_migrate *m, int num_migrate, 2273 const struct xfrm_kmaddress *k) 2274 { 2275 return -ENOPROTOOPT; 2276 } 2277 #endif 2278 2279 #define XMSGSIZE(type) sizeof(struct type) 2280 2281 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 2282 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2283 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2284 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2285 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2286 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2287 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2288 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), 2289 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 2290 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 2291 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2292 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2293 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 2294 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 2295 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, 2296 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2297 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2298 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 2299 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2300 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), 2301 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 2302 }; 2303 2304 #undef XMSGSIZE 2305 2306 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 2307 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, 2308 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, 2309 [XFRMA_LASTUSED] = { .type = NLA_U64}, 2310 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, 2311 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 2312 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 2313 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 2314 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, 2315 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, 2316 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, 2317 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, 2318 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, 2319 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, 2320 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, 2321 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, 2322 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, 2323 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, 2324 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 2325 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 2326 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 2327 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, 2328 [XFRMA_TFCPAD] = { .type = NLA_U32 }, 2329 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, 2330 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, 2331 [XFRMA_PROTO] = { .type = NLA_U8 }, 2332 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, 2333 }; 2334 2335 static const struct xfrm_link { 2336 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 2337 int (*dump)(struct sk_buff *, struct netlink_callback *); 2338 int (*done)(struct netlink_callback *); 2339 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 2340 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2341 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 2342 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 2343 .dump = xfrm_dump_sa, 2344 .done = xfrm_dump_sa_done }, 2345 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2346 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 2347 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 2348 .dump = xfrm_dump_policy, 2349 .done = xfrm_dump_policy_done }, 2350 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 2351 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 2352 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 2353 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2354 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2355 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, 2356 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, 2357 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 2358 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 2359 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 2360 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 2361 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 2362 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, 2363 }; 2364 2365 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 2366 { 2367 struct net *net = sock_net(skb->sk); 2368 struct nlattr *attrs[XFRMA_MAX+1]; 2369 const struct xfrm_link *link; 2370 int type, err; 2371 2372 type = nlh->nlmsg_type; 2373 if (type > XFRM_MSG_MAX) 2374 return -EINVAL; 2375 2376 type -= XFRM_MSG_BASE; 2377 link = &xfrm_dispatch[type]; 2378 2379 /* All operations require privileges, even GET */ 2380 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2381 return -EPERM; 2382 2383 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2384 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2385 (nlh->nlmsg_flags & NLM_F_DUMP)) { 2386 if (link->dump == NULL) 2387 return -EINVAL; 2388 2389 { 2390 struct netlink_dump_control c = { 2391 .dump = link->dump, 2392 .done = link->done, 2393 }; 2394 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c); 2395 } 2396 } 2397 2398 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 2399 xfrma_policy); 2400 if (err < 0) 2401 return err; 2402 2403 if (link->doit == NULL) 2404 return -EINVAL; 2405 2406 return link->doit(skb, nlh, attrs); 2407 } 2408 2409 static void xfrm_netlink_rcv(struct sk_buff *skb) 2410 { 2411 struct net *net = sock_net(skb->sk); 2412 2413 mutex_lock(&net->xfrm.xfrm_cfg_mutex); 2414 netlink_rcv_skb(skb, &xfrm_user_rcv_msg); 2415 mutex_unlock(&net->xfrm.xfrm_cfg_mutex); 2416 } 2417 2418 static inline size_t xfrm_expire_msgsize(void) 2419 { 2420 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) 2421 + nla_total_size(sizeof(struct xfrm_mark)); 2422 } 2423 2424 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 2425 { 2426 struct xfrm_user_expire *ue; 2427 struct nlmsghdr *nlh; 2428 int err; 2429 2430 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 2431 if (nlh == NULL) 2432 return -EMSGSIZE; 2433 2434 ue = nlmsg_data(nlh); 2435 copy_to_user_state(x, &ue->state); 2436 ue->hard = (c->data.hard != 0) ? 1 : 0; 2437 2438 err = xfrm_mark_put(skb, &x->mark); 2439 if (err) 2440 return err; 2441 2442 return nlmsg_end(skb, nlh); 2443 } 2444 2445 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) 2446 { 2447 struct net *net = xs_net(x); 2448 struct sk_buff *skb; 2449 2450 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); 2451 if (skb == NULL) 2452 return -ENOMEM; 2453 2454 if (build_expire(skb, x, c) < 0) { 2455 kfree_skb(skb); 2456 return -EMSGSIZE; 2457 } 2458 2459 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2460 } 2461 2462 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) 2463 { 2464 struct net *net = xs_net(x); 2465 struct sk_buff *skb; 2466 2467 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 2468 if (skb == NULL) 2469 return -ENOMEM; 2470 2471 if (build_aevent(skb, x, c) < 0) 2472 BUG(); 2473 2474 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); 2475 } 2476 2477 static int xfrm_notify_sa_flush(const struct km_event *c) 2478 { 2479 struct net *net = c->net; 2480 struct xfrm_usersa_flush *p; 2481 struct nlmsghdr *nlh; 2482 struct sk_buff *skb; 2483 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); 2484 2485 skb = nlmsg_new(len, GFP_ATOMIC); 2486 if (skb == NULL) 2487 return -ENOMEM; 2488 2489 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); 2490 if (nlh == NULL) { 2491 kfree_skb(skb); 2492 return -EMSGSIZE; 2493 } 2494 2495 p = nlmsg_data(nlh); 2496 p->proto = c->data.proto; 2497 2498 nlmsg_end(skb, nlh); 2499 2500 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2501 } 2502 2503 static inline size_t xfrm_sa_len(struct xfrm_state *x) 2504 { 2505 size_t l = 0; 2506 if (x->aead) 2507 l += nla_total_size(aead_len(x->aead)); 2508 if (x->aalg) { 2509 l += nla_total_size(sizeof(struct xfrm_algo) + 2510 (x->aalg->alg_key_len + 7) / 8); 2511 l += nla_total_size(xfrm_alg_auth_len(x->aalg)); 2512 } 2513 if (x->ealg) 2514 l += nla_total_size(xfrm_alg_len(x->ealg)); 2515 if (x->calg) 2516 l += nla_total_size(sizeof(*x->calg)); 2517 if (x->encap) 2518 l += nla_total_size(sizeof(*x->encap)); 2519 if (x->tfcpad) 2520 l += nla_total_size(sizeof(x->tfcpad)); 2521 if (x->replay_esn) 2522 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); 2523 if (x->security) 2524 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + 2525 x->security->ctx_len); 2526 if (x->coaddr) 2527 l += nla_total_size(sizeof(*x->coaddr)); 2528 if (x->props.extra_flags) 2529 l += nla_total_size(sizeof(x->props.extra_flags)); 2530 2531 /* Must count x->lastused as it may become non-zero behind our back. */ 2532 l += nla_total_size(sizeof(u64)); 2533 2534 return l; 2535 } 2536 2537 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) 2538 { 2539 struct net *net = xs_net(x); 2540 struct xfrm_usersa_info *p; 2541 struct xfrm_usersa_id *id; 2542 struct nlmsghdr *nlh; 2543 struct sk_buff *skb; 2544 int len = xfrm_sa_len(x); 2545 int headlen, err; 2546 2547 headlen = sizeof(*p); 2548 if (c->event == XFRM_MSG_DELSA) { 2549 len += nla_total_size(headlen); 2550 headlen = sizeof(*id); 2551 len += nla_total_size(sizeof(struct xfrm_mark)); 2552 } 2553 len += NLMSG_ALIGN(headlen); 2554 2555 skb = nlmsg_new(len, GFP_ATOMIC); 2556 if (skb == NULL) 2557 return -ENOMEM; 2558 2559 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 2560 err = -EMSGSIZE; 2561 if (nlh == NULL) 2562 goto out_free_skb; 2563 2564 p = nlmsg_data(nlh); 2565 if (c->event == XFRM_MSG_DELSA) { 2566 struct nlattr *attr; 2567 2568 id = nlmsg_data(nlh); 2569 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 2570 id->spi = x->id.spi; 2571 id->family = x->props.family; 2572 id->proto = x->id.proto; 2573 2574 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 2575 err = -EMSGSIZE; 2576 if (attr == NULL) 2577 goto out_free_skb; 2578 2579 p = nla_data(attr); 2580 } 2581 err = copy_to_user_state_extra(x, p, skb); 2582 if (err) 2583 goto out_free_skb; 2584 2585 nlmsg_end(skb, nlh); 2586 2587 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2588 2589 out_free_skb: 2590 kfree_skb(skb); 2591 return err; 2592 } 2593 2594 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) 2595 { 2596 2597 switch (c->event) { 2598 case XFRM_MSG_EXPIRE: 2599 return xfrm_exp_state_notify(x, c); 2600 case XFRM_MSG_NEWAE: 2601 return xfrm_aevent_state_notify(x, c); 2602 case XFRM_MSG_DELSA: 2603 case XFRM_MSG_UPDSA: 2604 case XFRM_MSG_NEWSA: 2605 return xfrm_notify_sa(x, c); 2606 case XFRM_MSG_FLUSHSA: 2607 return xfrm_notify_sa_flush(c); 2608 default: 2609 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", 2610 c->event); 2611 break; 2612 } 2613 2614 return 0; 2615 2616 } 2617 2618 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x, 2619 struct xfrm_policy *xp) 2620 { 2621 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 2622 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2623 + nla_total_size(sizeof(struct xfrm_mark)) 2624 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 2625 + userpolicy_type_attrsize(); 2626 } 2627 2628 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 2629 struct xfrm_tmpl *xt, struct xfrm_policy *xp) 2630 { 2631 __u32 seq = xfrm_get_acqseq(); 2632 struct xfrm_user_acquire *ua; 2633 struct nlmsghdr *nlh; 2634 int err; 2635 2636 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 2637 if (nlh == NULL) 2638 return -EMSGSIZE; 2639 2640 ua = nlmsg_data(nlh); 2641 memcpy(&ua->id, &x->id, sizeof(ua->id)); 2642 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 2643 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 2644 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT); 2645 ua->aalgos = xt->aalgos; 2646 ua->ealgos = xt->ealgos; 2647 ua->calgos = xt->calgos; 2648 ua->seq = x->km.seq = seq; 2649 2650 err = copy_to_user_tmpl(xp, skb); 2651 if (!err) 2652 err = copy_to_user_state_sec_ctx(x, skb); 2653 if (!err) 2654 err = copy_to_user_policy_type(xp->type, skb); 2655 if (!err) 2656 err = xfrm_mark_put(skb, &xp->mark); 2657 if (err) { 2658 nlmsg_cancel(skb, nlh); 2659 return err; 2660 } 2661 2662 return nlmsg_end(skb, nlh); 2663 } 2664 2665 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 2666 struct xfrm_policy *xp) 2667 { 2668 struct net *net = xs_net(x); 2669 struct sk_buff *skb; 2670 2671 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); 2672 if (skb == NULL) 2673 return -ENOMEM; 2674 2675 if (build_acquire(skb, x, xt, xp) < 0) 2676 BUG(); 2677 2678 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); 2679 } 2680 2681 /* User gives us xfrm_user_policy_info followed by an array of 0 2682 * or more templates. 2683 */ 2684 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, 2685 u8 *data, int len, int *dir) 2686 { 2687 struct net *net = sock_net(sk); 2688 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; 2689 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); 2690 struct xfrm_policy *xp; 2691 int nr; 2692 2693 switch (sk->sk_family) { 2694 case AF_INET: 2695 if (opt != IP_XFRM_POLICY) { 2696 *dir = -EOPNOTSUPP; 2697 return NULL; 2698 } 2699 break; 2700 #if IS_ENABLED(CONFIG_IPV6) 2701 case AF_INET6: 2702 if (opt != IPV6_XFRM_POLICY) { 2703 *dir = -EOPNOTSUPP; 2704 return NULL; 2705 } 2706 break; 2707 #endif 2708 default: 2709 *dir = -EINVAL; 2710 return NULL; 2711 } 2712 2713 *dir = -EINVAL; 2714 2715 if (len < sizeof(*p) || 2716 verify_newpolicy_info(p)) 2717 return NULL; 2718 2719 nr = ((len - sizeof(*p)) / sizeof(*ut)); 2720 if (validate_tmpl(nr, ut, p->sel.family)) 2721 return NULL; 2722 2723 if (p->dir > XFRM_POLICY_OUT) 2724 return NULL; 2725 2726 xp = xfrm_policy_alloc(net, GFP_ATOMIC); 2727 if (xp == NULL) { 2728 *dir = -ENOBUFS; 2729 return NULL; 2730 } 2731 2732 copy_from_user_policy(xp, p); 2733 xp->type = XFRM_POLICY_TYPE_MAIN; 2734 copy_templates(xp, ut, nr); 2735 2736 *dir = p->dir; 2737 2738 return xp; 2739 } 2740 2741 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp) 2742 { 2743 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 2744 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2745 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 2746 + nla_total_size(sizeof(struct xfrm_mark)) 2747 + userpolicy_type_attrsize(); 2748 } 2749 2750 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, 2751 int dir, const struct km_event *c) 2752 { 2753 struct xfrm_user_polexpire *upe; 2754 int hard = c->data.hard; 2755 struct nlmsghdr *nlh; 2756 int err; 2757 2758 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 2759 if (nlh == NULL) 2760 return -EMSGSIZE; 2761 2762 upe = nlmsg_data(nlh); 2763 copy_to_user_policy(xp, &upe->pol, dir); 2764 err = copy_to_user_tmpl(xp, skb); 2765 if (!err) 2766 err = copy_to_user_sec_ctx(xp, skb); 2767 if (!err) 2768 err = copy_to_user_policy_type(xp->type, skb); 2769 if (!err) 2770 err = xfrm_mark_put(skb, &xp->mark); 2771 if (err) { 2772 nlmsg_cancel(skb, nlh); 2773 return err; 2774 } 2775 upe->hard = !!hard; 2776 2777 return nlmsg_end(skb, nlh); 2778 } 2779 2780 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2781 { 2782 struct net *net = xp_net(xp); 2783 struct sk_buff *skb; 2784 2785 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); 2786 if (skb == NULL) 2787 return -ENOMEM; 2788 2789 if (build_polexpire(skb, xp, dir, c) < 0) 2790 BUG(); 2791 2792 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2793 } 2794 2795 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 2796 { 2797 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2798 struct net *net = xp_net(xp); 2799 struct xfrm_userpolicy_info *p; 2800 struct xfrm_userpolicy_id *id; 2801 struct nlmsghdr *nlh; 2802 struct sk_buff *skb; 2803 int headlen, err; 2804 2805 headlen = sizeof(*p); 2806 if (c->event == XFRM_MSG_DELPOLICY) { 2807 len += nla_total_size(headlen); 2808 headlen = sizeof(*id); 2809 } 2810 len += userpolicy_type_attrsize(); 2811 len += nla_total_size(sizeof(struct xfrm_mark)); 2812 len += NLMSG_ALIGN(headlen); 2813 2814 skb = nlmsg_new(len, GFP_ATOMIC); 2815 if (skb == NULL) 2816 return -ENOMEM; 2817 2818 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 2819 err = -EMSGSIZE; 2820 if (nlh == NULL) 2821 goto out_free_skb; 2822 2823 p = nlmsg_data(nlh); 2824 if (c->event == XFRM_MSG_DELPOLICY) { 2825 struct nlattr *attr; 2826 2827 id = nlmsg_data(nlh); 2828 memset(id, 0, sizeof(*id)); 2829 id->dir = dir; 2830 if (c->data.byid) 2831 id->index = xp->index; 2832 else 2833 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 2834 2835 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 2836 err = -EMSGSIZE; 2837 if (attr == NULL) 2838 goto out_free_skb; 2839 2840 p = nla_data(attr); 2841 } 2842 2843 copy_to_user_policy(xp, p, dir); 2844 err = copy_to_user_tmpl(xp, skb); 2845 if (!err) 2846 err = copy_to_user_policy_type(xp->type, skb); 2847 if (!err) 2848 err = xfrm_mark_put(skb, &xp->mark); 2849 if (err) 2850 goto out_free_skb; 2851 2852 nlmsg_end(skb, nlh); 2853 2854 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2855 2856 out_free_skb: 2857 kfree_skb(skb); 2858 return err; 2859 } 2860 2861 static int xfrm_notify_policy_flush(const struct km_event *c) 2862 { 2863 struct net *net = c->net; 2864 struct nlmsghdr *nlh; 2865 struct sk_buff *skb; 2866 int err; 2867 2868 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); 2869 if (skb == NULL) 2870 return -ENOMEM; 2871 2872 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 2873 err = -EMSGSIZE; 2874 if (nlh == NULL) 2875 goto out_free_skb; 2876 err = copy_to_user_policy_type(c->data.type, skb); 2877 if (err) 2878 goto out_free_skb; 2879 2880 nlmsg_end(skb, nlh); 2881 2882 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2883 2884 out_free_skb: 2885 kfree_skb(skb); 2886 return err; 2887 } 2888 2889 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2890 { 2891 2892 switch (c->event) { 2893 case XFRM_MSG_NEWPOLICY: 2894 case XFRM_MSG_UPDPOLICY: 2895 case XFRM_MSG_DELPOLICY: 2896 return xfrm_notify_policy(xp, dir, c); 2897 case XFRM_MSG_FLUSHPOLICY: 2898 return xfrm_notify_policy_flush(c); 2899 case XFRM_MSG_POLEXPIRE: 2900 return xfrm_exp_policy_notify(xp, dir, c); 2901 default: 2902 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", 2903 c->event); 2904 } 2905 2906 return 0; 2907 2908 } 2909 2910 static inline size_t xfrm_report_msgsize(void) 2911 { 2912 return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); 2913 } 2914 2915 static int build_report(struct sk_buff *skb, u8 proto, 2916 struct xfrm_selector *sel, xfrm_address_t *addr) 2917 { 2918 struct xfrm_user_report *ur; 2919 struct nlmsghdr *nlh; 2920 2921 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); 2922 if (nlh == NULL) 2923 return -EMSGSIZE; 2924 2925 ur = nlmsg_data(nlh); 2926 ur->proto = proto; 2927 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2928 2929 if (addr) { 2930 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr); 2931 if (err) { 2932 nlmsg_cancel(skb, nlh); 2933 return err; 2934 } 2935 } 2936 return nlmsg_end(skb, nlh); 2937 } 2938 2939 static int xfrm_send_report(struct net *net, u8 proto, 2940 struct xfrm_selector *sel, xfrm_address_t *addr) 2941 { 2942 struct sk_buff *skb; 2943 2944 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); 2945 if (skb == NULL) 2946 return -ENOMEM; 2947 2948 if (build_report(skb, proto, sel, addr) < 0) 2949 BUG(); 2950 2951 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); 2952 } 2953 2954 static inline size_t xfrm_mapping_msgsize(void) 2955 { 2956 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping)); 2957 } 2958 2959 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, 2960 xfrm_address_t *new_saddr, __be16 new_sport) 2961 { 2962 struct xfrm_user_mapping *um; 2963 struct nlmsghdr *nlh; 2964 2965 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0); 2966 if (nlh == NULL) 2967 return -EMSGSIZE; 2968 2969 um = nlmsg_data(nlh); 2970 2971 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); 2972 um->id.spi = x->id.spi; 2973 um->id.family = x->props.family; 2974 um->id.proto = x->id.proto; 2975 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr)); 2976 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr)); 2977 um->new_sport = new_sport; 2978 um->old_sport = x->encap->encap_sport; 2979 um->reqid = x->props.reqid; 2980 2981 return nlmsg_end(skb, nlh); 2982 } 2983 2984 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, 2985 __be16 sport) 2986 { 2987 struct net *net = xs_net(x); 2988 struct sk_buff *skb; 2989 2990 if (x->id.proto != IPPROTO_ESP) 2991 return -EINVAL; 2992 2993 if (!x->encap) 2994 return -EINVAL; 2995 2996 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC); 2997 if (skb == NULL) 2998 return -ENOMEM; 2999 3000 if (build_mapping(skb, x, ipaddr, sport) < 0) 3001 BUG(); 3002 3003 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC); 3004 } 3005 3006 static bool xfrm_is_alive(const struct km_event *c) 3007 { 3008 return (bool)xfrm_acquire_is_on(c->net); 3009 } 3010 3011 static struct xfrm_mgr netlink_mgr = { 3012 .id = "netlink", 3013 .notify = xfrm_send_state_notify, 3014 .acquire = xfrm_send_acquire, 3015 .compile_policy = xfrm_compile_policy, 3016 .notify_policy = xfrm_send_policy_notify, 3017 .report = xfrm_send_report, 3018 .migrate = xfrm_send_migrate, 3019 .new_mapping = xfrm_send_mapping, 3020 .is_alive = xfrm_is_alive, 3021 }; 3022 3023 static int __net_init xfrm_user_net_init(struct net *net) 3024 { 3025 struct sock *nlsk; 3026 struct netlink_kernel_cfg cfg = { 3027 .groups = XFRMNLGRP_MAX, 3028 .input = xfrm_netlink_rcv, 3029 }; 3030 3031 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg); 3032 if (nlsk == NULL) 3033 return -ENOMEM; 3034 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 3035 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 3036 return 0; 3037 } 3038 3039 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) 3040 { 3041 struct net *net; 3042 list_for_each_entry(net, net_exit_list, exit_list) 3043 RCU_INIT_POINTER(net->xfrm.nlsk, NULL); 3044 synchronize_net(); 3045 list_for_each_entry(net, net_exit_list, exit_list) 3046 netlink_kernel_release(net->xfrm.nlsk_stash); 3047 } 3048 3049 static struct pernet_operations xfrm_user_net_ops = { 3050 .init = xfrm_user_net_init, 3051 .exit_batch = xfrm_user_net_exit, 3052 }; 3053 3054 static int __init xfrm_user_init(void) 3055 { 3056 int rv; 3057 3058 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 3059 3060 rv = register_pernet_subsys(&xfrm_user_net_ops); 3061 if (rv < 0) 3062 return rv; 3063 rv = xfrm_register_km(&netlink_mgr); 3064 if (rv < 0) 3065 unregister_pernet_subsys(&xfrm_user_net_ops); 3066 return rv; 3067 } 3068 3069 static void __exit xfrm_user_exit(void) 3070 { 3071 xfrm_unregister_km(&netlink_mgr); 3072 unregister_pernet_subsys(&xfrm_user_net_ops); 3073 } 3074 3075 module_init(xfrm_user_init); 3076 module_exit(xfrm_user_exit); 3077 MODULE_LICENSE("GPL"); 3078 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); 3079 3080