1 /* xfrm_user.c: User interface to configure xfrm engine. 2 * 3 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * 11 */ 12 13 #include <linux/crypto.h> 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/slab.h> 18 #include <linux/socket.h> 19 #include <linux/string.h> 20 #include <linux/net.h> 21 #include <linux/skbuff.h> 22 #include <linux/pfkeyv2.h> 23 #include <linux/ipsec.h> 24 #include <linux/init.h> 25 #include <linux/security.h> 26 #include <net/sock.h> 27 #include <net/xfrm.h> 28 #include <net/netlink.h> 29 #include <net/ah.h> 30 #include <asm/uaccess.h> 31 #if IS_ENABLED(CONFIG_IPV6) 32 #include <linux/in6.h> 33 #endif 34 35 static inline int aead_len(struct xfrm_algo_aead *alg) 36 { 37 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 38 } 39 40 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) 41 { 42 struct nlattr *rt = attrs[type]; 43 struct xfrm_algo *algp; 44 45 if (!rt) 46 return 0; 47 48 algp = nla_data(rt); 49 if (nla_len(rt) < xfrm_alg_len(algp)) 50 return -EINVAL; 51 52 switch (type) { 53 case XFRMA_ALG_AUTH: 54 case XFRMA_ALG_CRYPT: 55 case XFRMA_ALG_COMP: 56 break; 57 58 default: 59 return -EINVAL; 60 } 61 62 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 63 return 0; 64 } 65 66 static int verify_auth_trunc(struct nlattr **attrs) 67 { 68 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; 69 struct xfrm_algo_auth *algp; 70 71 if (!rt) 72 return 0; 73 74 algp = nla_data(rt); 75 if (nla_len(rt) < xfrm_alg_auth_len(algp)) 76 return -EINVAL; 77 78 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 79 return 0; 80 } 81 82 static int verify_aead(struct nlattr **attrs) 83 { 84 struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; 85 struct xfrm_algo_aead *algp; 86 87 if (!rt) 88 return 0; 89 90 algp = nla_data(rt); 91 if (nla_len(rt) < aead_len(algp)) 92 return -EINVAL; 93 94 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 95 return 0; 96 } 97 98 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, 99 xfrm_address_t **addrp) 100 { 101 struct nlattr *rt = attrs[type]; 102 103 if (rt && addrp) 104 *addrp = nla_data(rt); 105 } 106 107 static inline int verify_sec_ctx_len(struct nlattr **attrs) 108 { 109 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 110 struct xfrm_user_sec_ctx *uctx; 111 112 if (!rt) 113 return 0; 114 115 uctx = nla_data(rt); 116 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) 117 return -EINVAL; 118 119 return 0; 120 } 121 122 static inline int verify_replay(struct xfrm_usersa_info *p, 123 struct nlattr **attrs) 124 { 125 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 126 struct xfrm_replay_state_esn *rs; 127 128 if (p->flags & XFRM_STATE_ESN) { 129 if (!rt) 130 return -EINVAL; 131 132 rs = nla_data(rt); 133 134 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) 135 return -EINVAL; 136 137 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && 138 nla_len(rt) != sizeof(*rs)) 139 return -EINVAL; 140 } 141 142 if (!rt) 143 return 0; 144 145 if (p->id.proto != IPPROTO_ESP) 146 return -EINVAL; 147 148 if (p->replay_window != 0) 149 return -EINVAL; 150 151 return 0; 152 } 153 154 static int verify_newsa_info(struct xfrm_usersa_info *p, 155 struct nlattr **attrs) 156 { 157 int err; 158 159 err = -EINVAL; 160 switch (p->family) { 161 case AF_INET: 162 break; 163 164 case AF_INET6: 165 #if IS_ENABLED(CONFIG_IPV6) 166 break; 167 #else 168 err = -EAFNOSUPPORT; 169 goto out; 170 #endif 171 172 default: 173 goto out; 174 } 175 176 err = -EINVAL; 177 switch (p->id.proto) { 178 case IPPROTO_AH: 179 if ((!attrs[XFRMA_ALG_AUTH] && 180 !attrs[XFRMA_ALG_AUTH_TRUNC]) || 181 attrs[XFRMA_ALG_AEAD] || 182 attrs[XFRMA_ALG_CRYPT] || 183 attrs[XFRMA_ALG_COMP] || 184 attrs[XFRMA_TFCPAD] || 185 (ntohl(p->id.spi) >= 0x10000)) 186 187 goto out; 188 break; 189 190 case IPPROTO_ESP: 191 if (attrs[XFRMA_ALG_COMP]) 192 goto out; 193 if (!attrs[XFRMA_ALG_AUTH] && 194 !attrs[XFRMA_ALG_AUTH_TRUNC] && 195 !attrs[XFRMA_ALG_CRYPT] && 196 !attrs[XFRMA_ALG_AEAD]) 197 goto out; 198 if ((attrs[XFRMA_ALG_AUTH] || 199 attrs[XFRMA_ALG_AUTH_TRUNC] || 200 attrs[XFRMA_ALG_CRYPT]) && 201 attrs[XFRMA_ALG_AEAD]) 202 goto out; 203 if (attrs[XFRMA_TFCPAD] && 204 p->mode != XFRM_MODE_TUNNEL) 205 goto out; 206 break; 207 208 case IPPROTO_COMP: 209 if (!attrs[XFRMA_ALG_COMP] || 210 attrs[XFRMA_ALG_AEAD] || 211 attrs[XFRMA_ALG_AUTH] || 212 attrs[XFRMA_ALG_AUTH_TRUNC] || 213 attrs[XFRMA_ALG_CRYPT] || 214 attrs[XFRMA_TFCPAD]) 215 goto out; 216 break; 217 218 #if IS_ENABLED(CONFIG_IPV6) 219 case IPPROTO_DSTOPTS: 220 case IPPROTO_ROUTING: 221 if (attrs[XFRMA_ALG_COMP] || 222 attrs[XFRMA_ALG_AUTH] || 223 attrs[XFRMA_ALG_AUTH_TRUNC] || 224 attrs[XFRMA_ALG_AEAD] || 225 attrs[XFRMA_ALG_CRYPT] || 226 attrs[XFRMA_ENCAP] || 227 attrs[XFRMA_SEC_CTX] || 228 attrs[XFRMA_TFCPAD] || 229 !attrs[XFRMA_COADDR]) 230 goto out; 231 break; 232 #endif 233 234 default: 235 goto out; 236 } 237 238 if ((err = verify_aead(attrs))) 239 goto out; 240 if ((err = verify_auth_trunc(attrs))) 241 goto out; 242 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH))) 243 goto out; 244 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT))) 245 goto out; 246 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP))) 247 goto out; 248 if ((err = verify_sec_ctx_len(attrs))) 249 goto out; 250 if ((err = verify_replay(p, attrs))) 251 goto out; 252 253 err = -EINVAL; 254 switch (p->mode) { 255 case XFRM_MODE_TRANSPORT: 256 case XFRM_MODE_TUNNEL: 257 case XFRM_MODE_ROUTEOPTIMIZATION: 258 case XFRM_MODE_BEET: 259 break; 260 261 default: 262 goto out; 263 } 264 265 err = 0; 266 267 out: 268 return err; 269 } 270 271 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, 272 struct xfrm_algo_desc *(*get_byname)(const char *, int), 273 struct nlattr *rta) 274 { 275 struct xfrm_algo *p, *ualg; 276 struct xfrm_algo_desc *algo; 277 278 if (!rta) 279 return 0; 280 281 ualg = nla_data(rta); 282 283 algo = get_byname(ualg->alg_name, 1); 284 if (!algo) 285 return -ENOSYS; 286 *props = algo->desc.sadb_alg_id; 287 288 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 289 if (!p) 290 return -ENOMEM; 291 292 strcpy(p->alg_name, algo->name); 293 *algpp = p; 294 return 0; 295 } 296 297 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, 298 struct nlattr *rta) 299 { 300 struct xfrm_algo *ualg; 301 struct xfrm_algo_auth *p; 302 struct xfrm_algo_desc *algo; 303 304 if (!rta) 305 return 0; 306 307 ualg = nla_data(rta); 308 309 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 310 if (!algo) 311 return -ENOSYS; 312 *props = algo->desc.sadb_alg_id; 313 314 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL); 315 if (!p) 316 return -ENOMEM; 317 318 strcpy(p->alg_name, algo->name); 319 p->alg_key_len = ualg->alg_key_len; 320 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 321 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8); 322 323 *algpp = p; 324 return 0; 325 } 326 327 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, 328 struct nlattr *rta) 329 { 330 struct xfrm_algo_auth *p, *ualg; 331 struct xfrm_algo_desc *algo; 332 333 if (!rta) 334 return 0; 335 336 ualg = nla_data(rta); 337 338 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 339 if (!algo) 340 return -ENOSYS; 341 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || 342 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 343 return -EINVAL; 344 *props = algo->desc.sadb_alg_id; 345 346 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL); 347 if (!p) 348 return -ENOMEM; 349 350 strcpy(p->alg_name, algo->name); 351 if (!p->alg_trunc_len) 352 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 353 354 *algpp = p; 355 return 0; 356 } 357 358 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props, 359 struct nlattr *rta) 360 { 361 struct xfrm_algo_aead *p, *ualg; 362 struct xfrm_algo_desc *algo; 363 364 if (!rta) 365 return 0; 366 367 ualg = nla_data(rta); 368 369 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); 370 if (!algo) 371 return -ENOSYS; 372 *props = algo->desc.sadb_alg_id; 373 374 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); 375 if (!p) 376 return -ENOMEM; 377 378 strcpy(p->alg_name, algo->name); 379 *algpp = p; 380 return 0; 381 } 382 383 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, 384 struct nlattr *rp) 385 { 386 struct xfrm_replay_state_esn *up; 387 int ulen; 388 389 if (!replay_esn || !rp) 390 return 0; 391 392 up = nla_data(rp); 393 ulen = xfrm_replay_state_esn_len(up); 394 395 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) 396 return -EINVAL; 397 398 return 0; 399 } 400 401 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, 402 struct xfrm_replay_state_esn **preplay_esn, 403 struct nlattr *rta) 404 { 405 struct xfrm_replay_state_esn *p, *pp, *up; 406 int klen, ulen; 407 408 if (!rta) 409 return 0; 410 411 up = nla_data(rta); 412 klen = xfrm_replay_state_esn_len(up); 413 ulen = nla_len(rta) >= klen ? klen : sizeof(*up); 414 415 p = kzalloc(klen, GFP_KERNEL); 416 if (!p) 417 return -ENOMEM; 418 419 pp = kzalloc(klen, GFP_KERNEL); 420 if (!pp) { 421 kfree(p); 422 return -ENOMEM; 423 } 424 425 memcpy(p, up, ulen); 426 memcpy(pp, up, ulen); 427 428 *replay_esn = p; 429 *preplay_esn = pp; 430 431 return 0; 432 } 433 434 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) 435 { 436 int len = 0; 437 438 if (xfrm_ctx) { 439 len += sizeof(struct xfrm_user_sec_ctx); 440 len += xfrm_ctx->ctx_len; 441 } 442 return len; 443 } 444 445 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 446 { 447 memcpy(&x->id, &p->id, sizeof(x->id)); 448 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 449 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 450 x->props.mode = p->mode; 451 x->props.replay_window = min_t(unsigned int, p->replay_window, 452 sizeof(x->replay.bitmap) * 8); 453 x->props.reqid = p->reqid; 454 x->props.family = p->family; 455 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 456 x->props.flags = p->flags; 457 458 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC)) 459 x->sel.family = p->family; 460 } 461 462 /* 463 * someday when pfkey also has support, we could have the code 464 * somehow made shareable and move it to xfrm_state.c - JHS 465 * 466 */ 467 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, 468 int update_esn) 469 { 470 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 471 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; 472 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 473 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 474 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 475 476 if (re) { 477 struct xfrm_replay_state_esn *replay_esn; 478 replay_esn = nla_data(re); 479 memcpy(x->replay_esn, replay_esn, 480 xfrm_replay_state_esn_len(replay_esn)); 481 memcpy(x->preplay_esn, replay_esn, 482 xfrm_replay_state_esn_len(replay_esn)); 483 } 484 485 if (rp) { 486 struct xfrm_replay_state *replay; 487 replay = nla_data(rp); 488 memcpy(&x->replay, replay, sizeof(*replay)); 489 memcpy(&x->preplay, replay, sizeof(*replay)); 490 } 491 492 if (lt) { 493 struct xfrm_lifetime_cur *ltime; 494 ltime = nla_data(lt); 495 x->curlft.bytes = ltime->bytes; 496 x->curlft.packets = ltime->packets; 497 x->curlft.add_time = ltime->add_time; 498 x->curlft.use_time = ltime->use_time; 499 } 500 501 if (et) 502 x->replay_maxage = nla_get_u32(et); 503 504 if (rt) 505 x->replay_maxdiff = nla_get_u32(rt); 506 } 507 508 static struct xfrm_state *xfrm_state_construct(struct net *net, 509 struct xfrm_usersa_info *p, 510 struct nlattr **attrs, 511 int *errp) 512 { 513 struct xfrm_state *x = xfrm_state_alloc(net); 514 int err = -ENOMEM; 515 516 if (!x) 517 goto error_no_put; 518 519 copy_from_user_state(x, p); 520 521 if (attrs[XFRMA_SA_EXTRA_FLAGS]) 522 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); 523 524 if ((err = attach_aead(&x->aead, &x->props.ealgo, 525 attrs[XFRMA_ALG_AEAD]))) 526 goto error; 527 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, 528 attrs[XFRMA_ALG_AUTH_TRUNC]))) 529 goto error; 530 if (!x->props.aalgo) { 531 if ((err = attach_auth(&x->aalg, &x->props.aalgo, 532 attrs[XFRMA_ALG_AUTH]))) 533 goto error; 534 } 535 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, 536 xfrm_ealg_get_byname, 537 attrs[XFRMA_ALG_CRYPT]))) 538 goto error; 539 if ((err = attach_one_algo(&x->calg, &x->props.calgo, 540 xfrm_calg_get_byname, 541 attrs[XFRMA_ALG_COMP]))) 542 goto error; 543 544 if (attrs[XFRMA_ENCAP]) { 545 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 546 sizeof(*x->encap), GFP_KERNEL); 547 if (x->encap == NULL) 548 goto error; 549 } 550 551 if (attrs[XFRMA_TFCPAD]) 552 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); 553 554 if (attrs[XFRMA_COADDR]) { 555 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 556 sizeof(*x->coaddr), GFP_KERNEL); 557 if (x->coaddr == NULL) 558 goto error; 559 } 560 561 xfrm_mark_get(attrs, &x->mark); 562 563 err = __xfrm_init_state(x, false); 564 if (err) 565 goto error; 566 567 if (attrs[XFRMA_SEC_CTX] && 568 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX]))) 569 goto error; 570 571 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 572 attrs[XFRMA_REPLAY_ESN_VAL]))) 573 goto error; 574 575 x->km.seq = p->seq; 576 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth; 577 /* sysctl_xfrm_aevent_etime is in 100ms units */ 578 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M; 579 580 if ((err = xfrm_init_replay(x))) 581 goto error; 582 583 /* override default values from above */ 584 xfrm_update_ae_params(x, attrs, 0); 585 586 return x; 587 588 error: 589 x->km.state = XFRM_STATE_DEAD; 590 xfrm_state_put(x); 591 error_no_put: 592 *errp = err; 593 return NULL; 594 } 595 596 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 597 struct nlattr **attrs) 598 { 599 struct net *net = sock_net(skb->sk); 600 struct xfrm_usersa_info *p = nlmsg_data(nlh); 601 struct xfrm_state *x; 602 int err; 603 struct km_event c; 604 kuid_t loginuid = audit_get_loginuid(current); 605 unsigned int sessionid = audit_get_sessionid(current); 606 u32 sid; 607 608 err = verify_newsa_info(p, attrs); 609 if (err) 610 return err; 611 612 x = xfrm_state_construct(net, p, attrs, &err); 613 if (!x) 614 return err; 615 616 xfrm_state_hold(x); 617 if (nlh->nlmsg_type == XFRM_MSG_NEWSA) 618 err = xfrm_state_add(x); 619 else 620 err = xfrm_state_update(x); 621 622 security_task_getsecid(current, &sid); 623 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid); 624 625 if (err < 0) { 626 x->km.state = XFRM_STATE_DEAD; 627 __xfrm_state_put(x); 628 goto out; 629 } 630 631 c.seq = nlh->nlmsg_seq; 632 c.portid = nlh->nlmsg_pid; 633 c.event = nlh->nlmsg_type; 634 635 km_state_notify(x, &c); 636 out: 637 xfrm_state_put(x); 638 return err; 639 } 640 641 static struct xfrm_state *xfrm_user_state_lookup(struct net *net, 642 struct xfrm_usersa_id *p, 643 struct nlattr **attrs, 644 int *errp) 645 { 646 struct xfrm_state *x = NULL; 647 struct xfrm_mark m; 648 int err; 649 u32 mark = xfrm_mark_get(attrs, &m); 650 651 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 652 err = -ESRCH; 653 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family); 654 } else { 655 xfrm_address_t *saddr = NULL; 656 657 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); 658 if (!saddr) { 659 err = -EINVAL; 660 goto out; 661 } 662 663 err = -ESRCH; 664 x = xfrm_state_lookup_byaddr(net, mark, 665 &p->daddr, saddr, 666 p->proto, p->family); 667 } 668 669 out: 670 if (!x && errp) 671 *errp = err; 672 return x; 673 } 674 675 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 676 struct nlattr **attrs) 677 { 678 struct net *net = sock_net(skb->sk); 679 struct xfrm_state *x; 680 int err = -ESRCH; 681 struct km_event c; 682 struct xfrm_usersa_id *p = nlmsg_data(nlh); 683 kuid_t loginuid = audit_get_loginuid(current); 684 unsigned int sessionid = audit_get_sessionid(current); 685 u32 sid; 686 687 x = xfrm_user_state_lookup(net, p, attrs, &err); 688 if (x == NULL) 689 return err; 690 691 if ((err = security_xfrm_state_delete(x)) != 0) 692 goto out; 693 694 if (xfrm_state_kern(x)) { 695 err = -EPERM; 696 goto out; 697 } 698 699 err = xfrm_state_delete(x); 700 701 if (err < 0) 702 goto out; 703 704 c.seq = nlh->nlmsg_seq; 705 c.portid = nlh->nlmsg_pid; 706 c.event = nlh->nlmsg_type; 707 km_state_notify(x, &c); 708 709 out: 710 security_task_getsecid(current, &sid); 711 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid); 712 xfrm_state_put(x); 713 return err; 714 } 715 716 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 717 { 718 memset(p, 0, sizeof(*p)); 719 memcpy(&p->id, &x->id, sizeof(p->id)); 720 memcpy(&p->sel, &x->sel, sizeof(p->sel)); 721 memcpy(&p->lft, &x->lft, sizeof(p->lft)); 722 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); 723 memcpy(&p->stats, &x->stats, sizeof(p->stats)); 724 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); 725 p->mode = x->props.mode; 726 p->replay_window = x->props.replay_window; 727 p->reqid = x->props.reqid; 728 p->family = x->props.family; 729 p->flags = x->props.flags; 730 p->seq = x->km.seq; 731 } 732 733 struct xfrm_dump_info { 734 struct sk_buff *in_skb; 735 struct sk_buff *out_skb; 736 u32 nlmsg_seq; 737 u16 nlmsg_flags; 738 }; 739 740 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 741 { 742 struct xfrm_user_sec_ctx *uctx; 743 struct nlattr *attr; 744 int ctx_size = sizeof(*uctx) + s->ctx_len; 745 746 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); 747 if (attr == NULL) 748 return -EMSGSIZE; 749 750 uctx = nla_data(attr); 751 uctx->exttype = XFRMA_SEC_CTX; 752 uctx->len = ctx_size; 753 uctx->ctx_doi = s->ctx_doi; 754 uctx->ctx_alg = s->ctx_alg; 755 uctx->ctx_len = s->ctx_len; 756 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 757 758 return 0; 759 } 760 761 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) 762 { 763 struct xfrm_algo *algo; 764 struct nlattr *nla; 765 766 nla = nla_reserve(skb, XFRMA_ALG_AUTH, 767 sizeof(*algo) + (auth->alg_key_len + 7) / 8); 768 if (!nla) 769 return -EMSGSIZE; 770 771 algo = nla_data(nla); 772 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); 773 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); 774 algo->alg_key_len = auth->alg_key_len; 775 776 return 0; 777 } 778 779 /* Don't change this without updating xfrm_sa_len! */ 780 static int copy_to_user_state_extra(struct xfrm_state *x, 781 struct xfrm_usersa_info *p, 782 struct sk_buff *skb) 783 { 784 int ret = 0; 785 786 copy_to_user_state(x, p); 787 788 if (x->props.extra_flags) { 789 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS, 790 x->props.extra_flags); 791 if (ret) 792 goto out; 793 } 794 795 if (x->coaddr) { 796 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 797 if (ret) 798 goto out; 799 } 800 if (x->lastused) { 801 ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused); 802 if (ret) 803 goto out; 804 } 805 if (x->aead) { 806 ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead); 807 if (ret) 808 goto out; 809 } 810 if (x->aalg) { 811 ret = copy_to_user_auth(x->aalg, skb); 812 if (!ret) 813 ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC, 814 xfrm_alg_auth_len(x->aalg), x->aalg); 815 if (ret) 816 goto out; 817 } 818 if (x->ealg) { 819 ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg); 820 if (ret) 821 goto out; 822 } 823 if (x->calg) { 824 ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); 825 if (ret) 826 goto out; 827 } 828 if (x->encap) { 829 ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 830 if (ret) 831 goto out; 832 } 833 if (x->tfcpad) { 834 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad); 835 if (ret) 836 goto out; 837 } 838 ret = xfrm_mark_put(skb, &x->mark); 839 if (ret) 840 goto out; 841 if (x->replay_esn) { 842 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 843 xfrm_replay_state_esn_len(x->replay_esn), 844 x->replay_esn); 845 if (ret) 846 goto out; 847 } 848 if (x->security) 849 ret = copy_sec_ctx(x->security, skb); 850 out: 851 return ret; 852 } 853 854 static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 855 { 856 struct xfrm_dump_info *sp = ptr; 857 struct sk_buff *in_skb = sp->in_skb; 858 struct sk_buff *skb = sp->out_skb; 859 struct xfrm_usersa_info *p; 860 struct nlmsghdr *nlh; 861 int err; 862 863 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 864 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 865 if (nlh == NULL) 866 return -EMSGSIZE; 867 868 p = nlmsg_data(nlh); 869 870 err = copy_to_user_state_extra(x, p, skb); 871 if (err) { 872 nlmsg_cancel(skb, nlh); 873 return err; 874 } 875 nlmsg_end(skb, nlh); 876 return 0; 877 } 878 879 static int xfrm_dump_sa_done(struct netlink_callback *cb) 880 { 881 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 882 struct sock *sk = cb->skb->sk; 883 struct net *net = sock_net(sk); 884 885 xfrm_state_walk_done(walk, net); 886 return 0; 887 } 888 889 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 890 { 891 struct net *net = sock_net(skb->sk); 892 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 893 struct xfrm_dump_info info; 894 895 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > 896 sizeof(cb->args) - sizeof(cb->args[0])); 897 898 info.in_skb = cb->skb; 899 info.out_skb = skb; 900 info.nlmsg_seq = cb->nlh->nlmsg_seq; 901 info.nlmsg_flags = NLM_F_MULTI; 902 903 if (!cb->args[0]) { 904 cb->args[0] = 1; 905 xfrm_state_walk_init(walk, 0); 906 } 907 908 (void) xfrm_state_walk(net, walk, dump_one_state, &info); 909 910 return skb->len; 911 } 912 913 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, 914 struct xfrm_state *x, u32 seq) 915 { 916 struct xfrm_dump_info info; 917 struct sk_buff *skb; 918 int err; 919 920 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 921 if (!skb) 922 return ERR_PTR(-ENOMEM); 923 924 info.in_skb = in_skb; 925 info.out_skb = skb; 926 info.nlmsg_seq = seq; 927 info.nlmsg_flags = 0; 928 929 err = dump_one_state(x, 0, &info); 930 if (err) { 931 kfree_skb(skb); 932 return ERR_PTR(err); 933 } 934 935 return skb; 936 } 937 938 static inline size_t xfrm_spdinfo_msgsize(void) 939 { 940 return NLMSG_ALIGN(4) 941 + nla_total_size(sizeof(struct xfrmu_spdinfo)) 942 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 943 } 944 945 static int build_spdinfo(struct sk_buff *skb, struct net *net, 946 u32 portid, u32 seq, u32 flags) 947 { 948 struct xfrmk_spdinfo si; 949 struct xfrmu_spdinfo spc; 950 struct xfrmu_spdhinfo sph; 951 struct nlmsghdr *nlh; 952 int err; 953 u32 *f; 954 955 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 956 if (nlh == NULL) /* shouldn't really happen ... */ 957 return -EMSGSIZE; 958 959 f = nlmsg_data(nlh); 960 *f = flags; 961 xfrm_spd_getinfo(net, &si); 962 spc.incnt = si.incnt; 963 spc.outcnt = si.outcnt; 964 spc.fwdcnt = si.fwdcnt; 965 spc.inscnt = si.inscnt; 966 spc.outscnt = si.outscnt; 967 spc.fwdscnt = si.fwdscnt; 968 sph.spdhcnt = si.spdhcnt; 969 sph.spdhmcnt = si.spdhmcnt; 970 971 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 972 if (!err) 973 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 974 if (err) { 975 nlmsg_cancel(skb, nlh); 976 return err; 977 } 978 979 return nlmsg_end(skb, nlh); 980 } 981 982 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 983 struct nlattr **attrs) 984 { 985 struct net *net = sock_net(skb->sk); 986 struct sk_buff *r_skb; 987 u32 *flags = nlmsg_data(nlh); 988 u32 sportid = NETLINK_CB(skb).portid; 989 u32 seq = nlh->nlmsg_seq; 990 991 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); 992 if (r_skb == NULL) 993 return -ENOMEM; 994 995 if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0) 996 BUG(); 997 998 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); 999 } 1000 1001 static inline size_t xfrm_sadinfo_msgsize(void) 1002 { 1003 return NLMSG_ALIGN(4) 1004 + nla_total_size(sizeof(struct xfrmu_sadhinfo)) 1005 + nla_total_size(4); /* XFRMA_SAD_CNT */ 1006 } 1007 1008 static int build_sadinfo(struct sk_buff *skb, struct net *net, 1009 u32 portid, u32 seq, u32 flags) 1010 { 1011 struct xfrmk_sadinfo si; 1012 struct xfrmu_sadhinfo sh; 1013 struct nlmsghdr *nlh; 1014 int err; 1015 u32 *f; 1016 1017 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 1018 if (nlh == NULL) /* shouldn't really happen ... */ 1019 return -EMSGSIZE; 1020 1021 f = nlmsg_data(nlh); 1022 *f = flags; 1023 xfrm_sad_getinfo(net, &si); 1024 1025 sh.sadhmcnt = si.sadhmcnt; 1026 sh.sadhcnt = si.sadhcnt; 1027 1028 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt); 1029 if (!err) 1030 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); 1031 if (err) { 1032 nlmsg_cancel(skb, nlh); 1033 return err; 1034 } 1035 1036 return nlmsg_end(skb, nlh); 1037 } 1038 1039 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1040 struct nlattr **attrs) 1041 { 1042 struct net *net = sock_net(skb->sk); 1043 struct sk_buff *r_skb; 1044 u32 *flags = nlmsg_data(nlh); 1045 u32 sportid = NETLINK_CB(skb).portid; 1046 u32 seq = nlh->nlmsg_seq; 1047 1048 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); 1049 if (r_skb == NULL) 1050 return -ENOMEM; 1051 1052 if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0) 1053 BUG(); 1054 1055 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); 1056 } 1057 1058 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1059 struct nlattr **attrs) 1060 { 1061 struct net *net = sock_net(skb->sk); 1062 struct xfrm_usersa_id *p = nlmsg_data(nlh); 1063 struct xfrm_state *x; 1064 struct sk_buff *resp_skb; 1065 int err = -ESRCH; 1066 1067 x = xfrm_user_state_lookup(net, p, attrs, &err); 1068 if (x == NULL) 1069 goto out_noput; 1070 1071 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1072 if (IS_ERR(resp_skb)) { 1073 err = PTR_ERR(resp_skb); 1074 } else { 1075 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); 1076 } 1077 xfrm_state_put(x); 1078 out_noput: 1079 return err; 1080 } 1081 1082 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, 1083 struct nlattr **attrs) 1084 { 1085 struct net *net = sock_net(skb->sk); 1086 struct xfrm_state *x; 1087 struct xfrm_userspi_info *p; 1088 struct sk_buff *resp_skb; 1089 xfrm_address_t *daddr; 1090 int family; 1091 int err; 1092 u32 mark; 1093 struct xfrm_mark m; 1094 1095 p = nlmsg_data(nlh); 1096 err = verify_spi_info(p->info.id.proto, p->min, p->max); 1097 if (err) 1098 goto out_noput; 1099 1100 family = p->info.family; 1101 daddr = &p->info.id.daddr; 1102 1103 x = NULL; 1104 1105 mark = xfrm_mark_get(attrs, &m); 1106 if (p->info.seq) { 1107 x = xfrm_find_acq_byseq(net, mark, p->info.seq); 1108 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) { 1109 xfrm_state_put(x); 1110 x = NULL; 1111 } 1112 } 1113 1114 if (!x) 1115 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, 1116 p->info.id.proto, daddr, 1117 &p->info.saddr, 1, 1118 family); 1119 err = -ENOENT; 1120 if (x == NULL) 1121 goto out_noput; 1122 1123 err = xfrm_alloc_spi(x, p->min, p->max); 1124 if (err) 1125 goto out; 1126 1127 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1128 if (IS_ERR(resp_skb)) { 1129 err = PTR_ERR(resp_skb); 1130 goto out; 1131 } 1132 1133 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); 1134 1135 out: 1136 xfrm_state_put(x); 1137 out_noput: 1138 return err; 1139 } 1140 1141 static int verify_policy_dir(u8 dir) 1142 { 1143 switch (dir) { 1144 case XFRM_POLICY_IN: 1145 case XFRM_POLICY_OUT: 1146 case XFRM_POLICY_FWD: 1147 break; 1148 1149 default: 1150 return -EINVAL; 1151 } 1152 1153 return 0; 1154 } 1155 1156 static int verify_policy_type(u8 type) 1157 { 1158 switch (type) { 1159 case XFRM_POLICY_TYPE_MAIN: 1160 #ifdef CONFIG_XFRM_SUB_POLICY 1161 case XFRM_POLICY_TYPE_SUB: 1162 #endif 1163 break; 1164 1165 default: 1166 return -EINVAL; 1167 } 1168 1169 return 0; 1170 } 1171 1172 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) 1173 { 1174 int ret; 1175 1176 switch (p->share) { 1177 case XFRM_SHARE_ANY: 1178 case XFRM_SHARE_SESSION: 1179 case XFRM_SHARE_USER: 1180 case XFRM_SHARE_UNIQUE: 1181 break; 1182 1183 default: 1184 return -EINVAL; 1185 } 1186 1187 switch (p->action) { 1188 case XFRM_POLICY_ALLOW: 1189 case XFRM_POLICY_BLOCK: 1190 break; 1191 1192 default: 1193 return -EINVAL; 1194 } 1195 1196 switch (p->sel.family) { 1197 case AF_INET: 1198 break; 1199 1200 case AF_INET6: 1201 #if IS_ENABLED(CONFIG_IPV6) 1202 break; 1203 #else 1204 return -EAFNOSUPPORT; 1205 #endif 1206 1207 default: 1208 return -EINVAL; 1209 } 1210 1211 ret = verify_policy_dir(p->dir); 1212 if (ret) 1213 return ret; 1214 if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir)) 1215 return -EINVAL; 1216 1217 return 0; 1218 } 1219 1220 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) 1221 { 1222 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1223 struct xfrm_user_sec_ctx *uctx; 1224 1225 if (!rt) 1226 return 0; 1227 1228 uctx = nla_data(rt); 1229 return security_xfrm_policy_alloc(&pol->security, uctx); 1230 } 1231 1232 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 1233 int nr) 1234 { 1235 int i; 1236 1237 xp->xfrm_nr = nr; 1238 for (i = 0; i < nr; i++, ut++) { 1239 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 1240 1241 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); 1242 memcpy(&t->saddr, &ut->saddr, 1243 sizeof(xfrm_address_t)); 1244 t->reqid = ut->reqid; 1245 t->mode = ut->mode; 1246 t->share = ut->share; 1247 t->optional = ut->optional; 1248 t->aalgos = ut->aalgos; 1249 t->ealgos = ut->ealgos; 1250 t->calgos = ut->calgos; 1251 /* If all masks are ~0, then we allow all algorithms. */ 1252 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos); 1253 t->encap_family = ut->family; 1254 } 1255 } 1256 1257 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) 1258 { 1259 int i; 1260 1261 if (nr > XFRM_MAX_DEPTH) 1262 return -EINVAL; 1263 1264 for (i = 0; i < nr; i++) { 1265 /* We never validated the ut->family value, so many 1266 * applications simply leave it at zero. The check was 1267 * never made and ut->family was ignored because all 1268 * templates could be assumed to have the same family as 1269 * the policy itself. Now that we will have ipv4-in-ipv6 1270 * and ipv6-in-ipv4 tunnels, this is no longer true. 1271 */ 1272 if (!ut[i].family) 1273 ut[i].family = family; 1274 1275 switch (ut[i].family) { 1276 case AF_INET: 1277 break; 1278 #if IS_ENABLED(CONFIG_IPV6) 1279 case AF_INET6: 1280 break; 1281 #endif 1282 default: 1283 return -EINVAL; 1284 } 1285 } 1286 1287 return 0; 1288 } 1289 1290 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs) 1291 { 1292 struct nlattr *rt = attrs[XFRMA_TMPL]; 1293 1294 if (!rt) { 1295 pol->xfrm_nr = 0; 1296 } else { 1297 struct xfrm_user_tmpl *utmpl = nla_data(rt); 1298 int nr = nla_len(rt) / sizeof(*utmpl); 1299 int err; 1300 1301 err = validate_tmpl(nr, utmpl, pol->family); 1302 if (err) 1303 return err; 1304 1305 copy_templates(pol, utmpl, nr); 1306 } 1307 return 0; 1308 } 1309 1310 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs) 1311 { 1312 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; 1313 struct xfrm_userpolicy_type *upt; 1314 u8 type = XFRM_POLICY_TYPE_MAIN; 1315 int err; 1316 1317 if (rt) { 1318 upt = nla_data(rt); 1319 type = upt->type; 1320 } 1321 1322 err = verify_policy_type(type); 1323 if (err) 1324 return err; 1325 1326 *tp = type; 1327 return 0; 1328 } 1329 1330 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) 1331 { 1332 xp->priority = p->priority; 1333 xp->index = p->index; 1334 memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); 1335 memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); 1336 xp->action = p->action; 1337 xp->flags = p->flags; 1338 xp->family = p->sel.family; 1339 /* XXX xp->share = p->share; */ 1340 } 1341 1342 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) 1343 { 1344 memset(p, 0, sizeof(*p)); 1345 memcpy(&p->sel, &xp->selector, sizeof(p->sel)); 1346 memcpy(&p->lft, &xp->lft, sizeof(p->lft)); 1347 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); 1348 p->priority = xp->priority; 1349 p->index = xp->index; 1350 p->sel.family = xp->family; 1351 p->dir = dir; 1352 p->action = xp->action; 1353 p->flags = xp->flags; 1354 p->share = XFRM_SHARE_ANY; /* XXX xp->share */ 1355 } 1356 1357 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp) 1358 { 1359 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL); 1360 int err; 1361 1362 if (!xp) { 1363 *errp = -ENOMEM; 1364 return NULL; 1365 } 1366 1367 copy_from_user_policy(xp, p); 1368 1369 err = copy_from_user_policy_type(&xp->type, attrs); 1370 if (err) 1371 goto error; 1372 1373 if (!(err = copy_from_user_tmpl(xp, attrs))) 1374 err = copy_from_user_sec_ctx(xp, attrs); 1375 if (err) 1376 goto error; 1377 1378 xfrm_mark_get(attrs, &xp->mark); 1379 1380 return xp; 1381 error: 1382 *errp = err; 1383 xp->walk.dead = 1; 1384 xfrm_policy_destroy(xp); 1385 return NULL; 1386 } 1387 1388 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1389 struct nlattr **attrs) 1390 { 1391 struct net *net = sock_net(skb->sk); 1392 struct xfrm_userpolicy_info *p = nlmsg_data(nlh); 1393 struct xfrm_policy *xp; 1394 struct km_event c; 1395 int err; 1396 int excl; 1397 kuid_t loginuid = audit_get_loginuid(current); 1398 unsigned int sessionid = audit_get_sessionid(current); 1399 u32 sid; 1400 1401 err = verify_newpolicy_info(p); 1402 if (err) 1403 return err; 1404 err = verify_sec_ctx_len(attrs); 1405 if (err) 1406 return err; 1407 1408 xp = xfrm_policy_construct(net, p, attrs, &err); 1409 if (!xp) 1410 return err; 1411 1412 /* shouldn't excl be based on nlh flags?? 1413 * Aha! this is anti-netlink really i.e more pfkey derived 1414 * in netlink excl is a flag and you wouldnt need 1415 * a type XFRM_MSG_UPDPOLICY - JHS */ 1416 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1417 err = xfrm_policy_insert(p->dir, xp, excl); 1418 security_task_getsecid(current, &sid); 1419 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid); 1420 1421 if (err) { 1422 security_xfrm_policy_free(xp->security); 1423 kfree(xp); 1424 return err; 1425 } 1426 1427 c.event = nlh->nlmsg_type; 1428 c.seq = nlh->nlmsg_seq; 1429 c.portid = nlh->nlmsg_pid; 1430 km_policy_notify(xp, p->dir, &c); 1431 1432 xfrm_pol_put(xp); 1433 1434 return 0; 1435 } 1436 1437 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) 1438 { 1439 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; 1440 int i; 1441 1442 if (xp->xfrm_nr == 0) 1443 return 0; 1444 1445 for (i = 0; i < xp->xfrm_nr; i++) { 1446 struct xfrm_user_tmpl *up = &vec[i]; 1447 struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; 1448 1449 memset(up, 0, sizeof(*up)); 1450 memcpy(&up->id, &kp->id, sizeof(up->id)); 1451 up->family = kp->encap_family; 1452 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); 1453 up->reqid = kp->reqid; 1454 up->mode = kp->mode; 1455 up->share = kp->share; 1456 up->optional = kp->optional; 1457 up->aalgos = kp->aalgos; 1458 up->ealgos = kp->ealgos; 1459 up->calgos = kp->calgos; 1460 } 1461 1462 return nla_put(skb, XFRMA_TMPL, 1463 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); 1464 } 1465 1466 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) 1467 { 1468 if (x->security) { 1469 return copy_sec_ctx(x->security, skb); 1470 } 1471 return 0; 1472 } 1473 1474 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 1475 { 1476 if (xp->security) 1477 return copy_sec_ctx(xp->security, skb); 1478 return 0; 1479 } 1480 static inline size_t userpolicy_type_attrsize(void) 1481 { 1482 #ifdef CONFIG_XFRM_SUB_POLICY 1483 return nla_total_size(sizeof(struct xfrm_userpolicy_type)); 1484 #else 1485 return 0; 1486 #endif 1487 } 1488 1489 #ifdef CONFIG_XFRM_SUB_POLICY 1490 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1491 { 1492 struct xfrm_userpolicy_type upt = { 1493 .type = type, 1494 }; 1495 1496 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1497 } 1498 1499 #else 1500 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1501 { 1502 return 0; 1503 } 1504 #endif 1505 1506 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) 1507 { 1508 struct xfrm_dump_info *sp = ptr; 1509 struct xfrm_userpolicy_info *p; 1510 struct sk_buff *in_skb = sp->in_skb; 1511 struct sk_buff *skb = sp->out_skb; 1512 struct nlmsghdr *nlh; 1513 int err; 1514 1515 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 1516 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1517 if (nlh == NULL) 1518 return -EMSGSIZE; 1519 1520 p = nlmsg_data(nlh); 1521 copy_to_user_policy(xp, p, dir); 1522 err = copy_to_user_tmpl(xp, skb); 1523 if (!err) 1524 err = copy_to_user_sec_ctx(xp, skb); 1525 if (!err) 1526 err = copy_to_user_policy_type(xp->type, skb); 1527 if (!err) 1528 err = xfrm_mark_put(skb, &xp->mark); 1529 if (err) { 1530 nlmsg_cancel(skb, nlh); 1531 return err; 1532 } 1533 nlmsg_end(skb, nlh); 1534 return 0; 1535 } 1536 1537 static int xfrm_dump_policy_done(struct netlink_callback *cb) 1538 { 1539 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1540 struct net *net = sock_net(cb->skb->sk); 1541 1542 xfrm_policy_walk_done(walk, net); 1543 return 0; 1544 } 1545 1546 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1547 { 1548 struct net *net = sock_net(skb->sk); 1549 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1550 struct xfrm_dump_info info; 1551 1552 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > 1553 sizeof(cb->args) - sizeof(cb->args[0])); 1554 1555 info.in_skb = cb->skb; 1556 info.out_skb = skb; 1557 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1558 info.nlmsg_flags = NLM_F_MULTI; 1559 1560 if (!cb->args[0]) { 1561 cb->args[0] = 1; 1562 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); 1563 } 1564 1565 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 1566 1567 return skb->len; 1568 } 1569 1570 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, 1571 struct xfrm_policy *xp, 1572 int dir, u32 seq) 1573 { 1574 struct xfrm_dump_info info; 1575 struct sk_buff *skb; 1576 int err; 1577 1578 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1579 if (!skb) 1580 return ERR_PTR(-ENOMEM); 1581 1582 info.in_skb = in_skb; 1583 info.out_skb = skb; 1584 info.nlmsg_seq = seq; 1585 info.nlmsg_flags = 0; 1586 1587 err = dump_one_policy(xp, dir, 0, &info); 1588 if (err) { 1589 kfree_skb(skb); 1590 return ERR_PTR(err); 1591 } 1592 1593 return skb; 1594 } 1595 1596 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1597 struct nlattr **attrs) 1598 { 1599 struct net *net = sock_net(skb->sk); 1600 struct xfrm_policy *xp; 1601 struct xfrm_userpolicy_id *p; 1602 u8 type = XFRM_POLICY_TYPE_MAIN; 1603 int err; 1604 struct km_event c; 1605 int delete; 1606 struct xfrm_mark m; 1607 u32 mark = xfrm_mark_get(attrs, &m); 1608 1609 p = nlmsg_data(nlh); 1610 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 1611 1612 err = copy_from_user_policy_type(&type, attrs); 1613 if (err) 1614 return err; 1615 1616 err = verify_policy_dir(p->dir); 1617 if (err) 1618 return err; 1619 1620 if (p->index) 1621 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err); 1622 else { 1623 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1624 struct xfrm_sec_ctx *ctx; 1625 1626 err = verify_sec_ctx_len(attrs); 1627 if (err) 1628 return err; 1629 1630 ctx = NULL; 1631 if (rt) { 1632 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1633 1634 err = security_xfrm_policy_alloc(&ctx, uctx); 1635 if (err) 1636 return err; 1637 } 1638 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel, 1639 ctx, delete, &err); 1640 security_xfrm_policy_free(ctx); 1641 } 1642 if (xp == NULL) 1643 return -ENOENT; 1644 1645 if (!delete) { 1646 struct sk_buff *resp_skb; 1647 1648 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); 1649 if (IS_ERR(resp_skb)) { 1650 err = PTR_ERR(resp_skb); 1651 } else { 1652 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, 1653 NETLINK_CB(skb).portid); 1654 } 1655 } else { 1656 kuid_t loginuid = audit_get_loginuid(current); 1657 unsigned int sessionid = audit_get_sessionid(current); 1658 u32 sid; 1659 1660 security_task_getsecid(current, &sid); 1661 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid, 1662 sid); 1663 1664 if (err != 0) 1665 goto out; 1666 1667 c.data.byid = p->index; 1668 c.event = nlh->nlmsg_type; 1669 c.seq = nlh->nlmsg_seq; 1670 c.portid = nlh->nlmsg_pid; 1671 km_policy_notify(xp, p->dir, &c); 1672 } 1673 1674 out: 1675 xfrm_pol_put(xp); 1676 if (delete && err == 0) 1677 xfrm_garbage_collect(net); 1678 return err; 1679 } 1680 1681 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1682 struct nlattr **attrs) 1683 { 1684 struct net *net = sock_net(skb->sk); 1685 struct km_event c; 1686 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1687 struct xfrm_audit audit_info; 1688 int err; 1689 1690 audit_info.loginuid = audit_get_loginuid(current); 1691 audit_info.sessionid = audit_get_sessionid(current); 1692 security_task_getsecid(current, &audit_info.secid); 1693 err = xfrm_state_flush(net, p->proto, &audit_info); 1694 if (err) { 1695 if (err == -ESRCH) /* empty table */ 1696 return 0; 1697 return err; 1698 } 1699 c.data.proto = p->proto; 1700 c.event = nlh->nlmsg_type; 1701 c.seq = nlh->nlmsg_seq; 1702 c.portid = nlh->nlmsg_pid; 1703 c.net = net; 1704 km_state_notify(NULL, &c); 1705 1706 return 0; 1707 } 1708 1709 static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x) 1710 { 1711 size_t replay_size = x->replay_esn ? 1712 xfrm_replay_state_esn_len(x->replay_esn) : 1713 sizeof(struct xfrm_replay_state); 1714 1715 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 1716 + nla_total_size(replay_size) 1717 + nla_total_size(sizeof(struct xfrm_lifetime_cur)) 1718 + nla_total_size(sizeof(struct xfrm_mark)) 1719 + nla_total_size(4) /* XFRM_AE_RTHR */ 1720 + nla_total_size(4); /* XFRM_AE_ETHR */ 1721 } 1722 1723 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 1724 { 1725 struct xfrm_aevent_id *id; 1726 struct nlmsghdr *nlh; 1727 int err; 1728 1729 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 1730 if (nlh == NULL) 1731 return -EMSGSIZE; 1732 1733 id = nlmsg_data(nlh); 1734 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); 1735 id->sa_id.spi = x->id.spi; 1736 id->sa_id.family = x->props.family; 1737 id->sa_id.proto = x->id.proto; 1738 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr)); 1739 id->reqid = x->props.reqid; 1740 id->flags = c->data.aevent; 1741 1742 if (x->replay_esn) { 1743 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 1744 xfrm_replay_state_esn_len(x->replay_esn), 1745 x->replay_esn); 1746 } else { 1747 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 1748 &x->replay); 1749 } 1750 if (err) 1751 goto out_cancel; 1752 err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); 1753 if (err) 1754 goto out_cancel; 1755 1756 if (id->flags & XFRM_AE_RTHR) { 1757 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); 1758 if (err) 1759 goto out_cancel; 1760 } 1761 if (id->flags & XFRM_AE_ETHR) { 1762 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH, 1763 x->replay_maxage * 10 / HZ); 1764 if (err) 1765 goto out_cancel; 1766 } 1767 err = xfrm_mark_put(skb, &x->mark); 1768 if (err) 1769 goto out_cancel; 1770 1771 return nlmsg_end(skb, nlh); 1772 1773 out_cancel: 1774 nlmsg_cancel(skb, nlh); 1775 return err; 1776 } 1777 1778 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1779 struct nlattr **attrs) 1780 { 1781 struct net *net = sock_net(skb->sk); 1782 struct xfrm_state *x; 1783 struct sk_buff *r_skb; 1784 int err; 1785 struct km_event c; 1786 u32 mark; 1787 struct xfrm_mark m; 1788 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1789 struct xfrm_usersa_id *id = &p->sa_id; 1790 1791 mark = xfrm_mark_get(attrs, &m); 1792 1793 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family); 1794 if (x == NULL) 1795 return -ESRCH; 1796 1797 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 1798 if (r_skb == NULL) { 1799 xfrm_state_put(x); 1800 return -ENOMEM; 1801 } 1802 1803 /* 1804 * XXX: is this lock really needed - none of the other 1805 * gets lock (the concern is things getting updated 1806 * while we are still reading) - jhs 1807 */ 1808 spin_lock_bh(&x->lock); 1809 c.data.aevent = p->flags; 1810 c.seq = nlh->nlmsg_seq; 1811 c.portid = nlh->nlmsg_pid; 1812 1813 if (build_aevent(r_skb, x, &c) < 0) 1814 BUG(); 1815 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid); 1816 spin_unlock_bh(&x->lock); 1817 xfrm_state_put(x); 1818 return err; 1819 } 1820 1821 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1822 struct nlattr **attrs) 1823 { 1824 struct net *net = sock_net(skb->sk); 1825 struct xfrm_state *x; 1826 struct km_event c; 1827 int err = -EINVAL; 1828 u32 mark = 0; 1829 struct xfrm_mark m; 1830 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1831 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 1832 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 1833 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 1834 1835 if (!lt && !rp && !re) 1836 return err; 1837 1838 /* pedantic mode - thou shalt sayeth replaceth */ 1839 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 1840 return err; 1841 1842 mark = xfrm_mark_get(attrs, &m); 1843 1844 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 1845 if (x == NULL) 1846 return -ESRCH; 1847 1848 if (x->km.state != XFRM_STATE_VALID) 1849 goto out; 1850 1851 err = xfrm_replay_verify_len(x->replay_esn, re); 1852 if (err) 1853 goto out; 1854 1855 spin_lock_bh(&x->lock); 1856 xfrm_update_ae_params(x, attrs, 1); 1857 spin_unlock_bh(&x->lock); 1858 1859 c.event = nlh->nlmsg_type; 1860 c.seq = nlh->nlmsg_seq; 1861 c.portid = nlh->nlmsg_pid; 1862 c.data.aevent = XFRM_AE_CU; 1863 km_state_notify(x, &c); 1864 err = 0; 1865 out: 1866 xfrm_state_put(x); 1867 return err; 1868 } 1869 1870 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1871 struct nlattr **attrs) 1872 { 1873 struct net *net = sock_net(skb->sk); 1874 struct km_event c; 1875 u8 type = XFRM_POLICY_TYPE_MAIN; 1876 int err; 1877 struct xfrm_audit audit_info; 1878 1879 err = copy_from_user_policy_type(&type, attrs); 1880 if (err) 1881 return err; 1882 1883 audit_info.loginuid = audit_get_loginuid(current); 1884 audit_info.sessionid = audit_get_sessionid(current); 1885 security_task_getsecid(current, &audit_info.secid); 1886 err = xfrm_policy_flush(net, type, &audit_info); 1887 if (err) { 1888 if (err == -ESRCH) /* empty table */ 1889 return 0; 1890 return err; 1891 } 1892 1893 c.data.type = type; 1894 c.event = nlh->nlmsg_type; 1895 c.seq = nlh->nlmsg_seq; 1896 c.portid = nlh->nlmsg_pid; 1897 c.net = net; 1898 km_policy_notify(NULL, 0, &c); 1899 return 0; 1900 } 1901 1902 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1903 struct nlattr **attrs) 1904 { 1905 struct net *net = sock_net(skb->sk); 1906 struct xfrm_policy *xp; 1907 struct xfrm_user_polexpire *up = nlmsg_data(nlh); 1908 struct xfrm_userpolicy_info *p = &up->pol; 1909 u8 type = XFRM_POLICY_TYPE_MAIN; 1910 int err = -ENOENT; 1911 struct xfrm_mark m; 1912 u32 mark = xfrm_mark_get(attrs, &m); 1913 1914 err = copy_from_user_policy_type(&type, attrs); 1915 if (err) 1916 return err; 1917 1918 err = verify_policy_dir(p->dir); 1919 if (err) 1920 return err; 1921 1922 if (p->index) 1923 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); 1924 else { 1925 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1926 struct xfrm_sec_ctx *ctx; 1927 1928 err = verify_sec_ctx_len(attrs); 1929 if (err) 1930 return err; 1931 1932 ctx = NULL; 1933 if (rt) { 1934 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1935 1936 err = security_xfrm_policy_alloc(&ctx, uctx); 1937 if (err) 1938 return err; 1939 } 1940 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, 1941 &p->sel, ctx, 0, &err); 1942 security_xfrm_policy_free(ctx); 1943 } 1944 if (xp == NULL) 1945 return -ENOENT; 1946 1947 if (unlikely(xp->walk.dead)) 1948 goto out; 1949 1950 err = 0; 1951 if (up->hard) { 1952 kuid_t loginuid = audit_get_loginuid(current); 1953 unsigned int sessionid = audit_get_sessionid(current); 1954 u32 sid; 1955 1956 security_task_getsecid(current, &sid); 1957 xfrm_policy_delete(xp, p->dir); 1958 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid); 1959 1960 } else { 1961 // reset the timers here? 1962 WARN(1, "Dont know what to do with soft policy expire\n"); 1963 } 1964 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid); 1965 1966 out: 1967 xfrm_pol_put(xp); 1968 return err; 1969 } 1970 1971 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1972 struct nlattr **attrs) 1973 { 1974 struct net *net = sock_net(skb->sk); 1975 struct xfrm_state *x; 1976 int err; 1977 struct xfrm_user_expire *ue = nlmsg_data(nlh); 1978 struct xfrm_usersa_info *p = &ue->state; 1979 struct xfrm_mark m; 1980 u32 mark = xfrm_mark_get(attrs, &m); 1981 1982 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); 1983 1984 err = -ENOENT; 1985 if (x == NULL) 1986 return err; 1987 1988 spin_lock_bh(&x->lock); 1989 err = -EINVAL; 1990 if (x->km.state != XFRM_STATE_VALID) 1991 goto out; 1992 km_state_expired(x, ue->hard, nlh->nlmsg_pid); 1993 1994 if (ue->hard) { 1995 kuid_t loginuid = audit_get_loginuid(current); 1996 unsigned int sessionid = audit_get_sessionid(current); 1997 u32 sid; 1998 1999 security_task_getsecid(current, &sid); 2000 __xfrm_state_delete(x); 2001 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid); 2002 } 2003 err = 0; 2004 out: 2005 spin_unlock_bh(&x->lock); 2006 xfrm_state_put(x); 2007 return err; 2008 } 2009 2010 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, 2011 struct nlattr **attrs) 2012 { 2013 struct net *net = sock_net(skb->sk); 2014 struct xfrm_policy *xp; 2015 struct xfrm_user_tmpl *ut; 2016 int i; 2017 struct nlattr *rt = attrs[XFRMA_TMPL]; 2018 struct xfrm_mark mark; 2019 2020 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 2021 struct xfrm_state *x = xfrm_state_alloc(net); 2022 int err = -ENOMEM; 2023 2024 if (!x) 2025 goto nomem; 2026 2027 xfrm_mark_get(attrs, &mark); 2028 2029 err = verify_newpolicy_info(&ua->policy); 2030 if (err) 2031 goto bad_policy; 2032 2033 /* build an XP */ 2034 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err); 2035 if (!xp) 2036 goto free_state; 2037 2038 memcpy(&x->id, &ua->id, sizeof(ua->id)); 2039 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 2040 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 2041 xp->mark.m = x->mark.m = mark.m; 2042 xp->mark.v = x->mark.v = mark.v; 2043 ut = nla_data(rt); 2044 /* extract the templates and for each call km_key */ 2045 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 2046 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 2047 memcpy(&x->id, &t->id, sizeof(x->id)); 2048 x->props.mode = t->mode; 2049 x->props.reqid = t->reqid; 2050 x->props.family = ut->family; 2051 t->aalgos = ua->aalgos; 2052 t->ealgos = ua->ealgos; 2053 t->calgos = ua->calgos; 2054 err = km_query(x, t, xp); 2055 2056 } 2057 2058 kfree(x); 2059 kfree(xp); 2060 2061 return 0; 2062 2063 bad_policy: 2064 WARN(1, "BAD policy passed\n"); 2065 free_state: 2066 kfree(x); 2067 nomem: 2068 return err; 2069 } 2070 2071 #ifdef CONFIG_XFRM_MIGRATE 2072 static int copy_from_user_migrate(struct xfrm_migrate *ma, 2073 struct xfrm_kmaddress *k, 2074 struct nlattr **attrs, int *num) 2075 { 2076 struct nlattr *rt = attrs[XFRMA_MIGRATE]; 2077 struct xfrm_user_migrate *um; 2078 int i, num_migrate; 2079 2080 if (k != NULL) { 2081 struct xfrm_user_kmaddress *uk; 2082 2083 uk = nla_data(attrs[XFRMA_KMADDRESS]); 2084 memcpy(&k->local, &uk->local, sizeof(k->local)); 2085 memcpy(&k->remote, &uk->remote, sizeof(k->remote)); 2086 k->family = uk->family; 2087 k->reserved = uk->reserved; 2088 } 2089 2090 um = nla_data(rt); 2091 num_migrate = nla_len(rt) / sizeof(*um); 2092 2093 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) 2094 return -EINVAL; 2095 2096 for (i = 0; i < num_migrate; i++, um++, ma++) { 2097 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); 2098 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); 2099 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); 2100 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); 2101 2102 ma->proto = um->proto; 2103 ma->mode = um->mode; 2104 ma->reqid = um->reqid; 2105 2106 ma->old_family = um->old_family; 2107 ma->new_family = um->new_family; 2108 } 2109 2110 *num = i; 2111 return 0; 2112 } 2113 2114 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2115 struct nlattr **attrs) 2116 { 2117 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); 2118 struct xfrm_migrate m[XFRM_MAX_DEPTH]; 2119 struct xfrm_kmaddress km, *kmp; 2120 u8 type; 2121 int err; 2122 int n = 0; 2123 struct net *net = sock_net(skb->sk); 2124 2125 if (attrs[XFRMA_MIGRATE] == NULL) 2126 return -EINVAL; 2127 2128 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; 2129 2130 err = copy_from_user_policy_type(&type, attrs); 2131 if (err) 2132 return err; 2133 2134 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n); 2135 if (err) 2136 return err; 2137 2138 if (!n) 2139 return 0; 2140 2141 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net); 2142 2143 return 0; 2144 } 2145 #else 2146 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2147 struct nlattr **attrs) 2148 { 2149 return -ENOPROTOOPT; 2150 } 2151 #endif 2152 2153 #ifdef CONFIG_XFRM_MIGRATE 2154 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb) 2155 { 2156 struct xfrm_user_migrate um; 2157 2158 memset(&um, 0, sizeof(um)); 2159 um.proto = m->proto; 2160 um.mode = m->mode; 2161 um.reqid = m->reqid; 2162 um.old_family = m->old_family; 2163 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); 2164 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); 2165 um.new_family = m->new_family; 2166 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); 2167 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); 2168 2169 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); 2170 } 2171 2172 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb) 2173 { 2174 struct xfrm_user_kmaddress uk; 2175 2176 memset(&uk, 0, sizeof(uk)); 2177 uk.family = k->family; 2178 uk.reserved = k->reserved; 2179 memcpy(&uk.local, &k->local, sizeof(uk.local)); 2180 memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); 2181 2182 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); 2183 } 2184 2185 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma) 2186 { 2187 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) 2188 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0) 2189 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) 2190 + userpolicy_type_attrsize(); 2191 } 2192 2193 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, 2194 int num_migrate, const struct xfrm_kmaddress *k, 2195 const struct xfrm_selector *sel, u8 dir, u8 type) 2196 { 2197 const struct xfrm_migrate *mp; 2198 struct xfrm_userpolicy_id *pol_id; 2199 struct nlmsghdr *nlh; 2200 int i, err; 2201 2202 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 2203 if (nlh == NULL) 2204 return -EMSGSIZE; 2205 2206 pol_id = nlmsg_data(nlh); 2207 /* copy data from selector, dir, and type to the pol_id */ 2208 memset(pol_id, 0, sizeof(*pol_id)); 2209 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 2210 pol_id->dir = dir; 2211 2212 if (k != NULL) { 2213 err = copy_to_user_kmaddress(k, skb); 2214 if (err) 2215 goto out_cancel; 2216 } 2217 err = copy_to_user_policy_type(type, skb); 2218 if (err) 2219 goto out_cancel; 2220 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 2221 err = copy_to_user_migrate(mp, skb); 2222 if (err) 2223 goto out_cancel; 2224 } 2225 2226 return nlmsg_end(skb, nlh); 2227 2228 out_cancel: 2229 nlmsg_cancel(skb, nlh); 2230 return err; 2231 } 2232 2233 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2234 const struct xfrm_migrate *m, int num_migrate, 2235 const struct xfrm_kmaddress *k) 2236 { 2237 struct net *net = &init_net; 2238 struct sk_buff *skb; 2239 2240 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC); 2241 if (skb == NULL) 2242 return -ENOMEM; 2243 2244 /* build migrate */ 2245 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0) 2246 BUG(); 2247 2248 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); 2249 } 2250 #else 2251 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2252 const struct xfrm_migrate *m, int num_migrate, 2253 const struct xfrm_kmaddress *k) 2254 { 2255 return -ENOPROTOOPT; 2256 } 2257 #endif 2258 2259 #define XMSGSIZE(type) sizeof(struct type) 2260 2261 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 2262 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2263 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2264 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2265 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2266 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2267 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2268 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), 2269 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 2270 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 2271 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2272 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2273 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 2274 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 2275 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, 2276 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2277 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2278 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 2279 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2280 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), 2281 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 2282 }; 2283 2284 #undef XMSGSIZE 2285 2286 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 2287 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, 2288 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, 2289 [XFRMA_LASTUSED] = { .type = NLA_U64}, 2290 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, 2291 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 2292 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 2293 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 2294 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, 2295 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, 2296 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, 2297 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, 2298 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, 2299 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, 2300 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, 2301 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, 2302 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, 2303 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, 2304 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 2305 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 2306 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 2307 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, 2308 [XFRMA_TFCPAD] = { .type = NLA_U32 }, 2309 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, 2310 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, 2311 }; 2312 2313 static const struct xfrm_link { 2314 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 2315 int (*dump)(struct sk_buff *, struct netlink_callback *); 2316 int (*done)(struct netlink_callback *); 2317 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 2318 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2319 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 2320 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 2321 .dump = xfrm_dump_sa, 2322 .done = xfrm_dump_sa_done }, 2323 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2324 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 2325 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 2326 .dump = xfrm_dump_policy, 2327 .done = xfrm_dump_policy_done }, 2328 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 2329 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 2330 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 2331 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2332 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2333 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, 2334 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, 2335 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 2336 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 2337 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 2338 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 2339 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 2340 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, 2341 }; 2342 2343 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 2344 { 2345 struct net *net = sock_net(skb->sk); 2346 struct nlattr *attrs[XFRMA_MAX+1]; 2347 const struct xfrm_link *link; 2348 int type, err; 2349 2350 type = nlh->nlmsg_type; 2351 if (type > XFRM_MSG_MAX) 2352 return -EINVAL; 2353 2354 type -= XFRM_MSG_BASE; 2355 link = &xfrm_dispatch[type]; 2356 2357 /* All operations require privileges, even GET */ 2358 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2359 return -EPERM; 2360 2361 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2362 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2363 (nlh->nlmsg_flags & NLM_F_DUMP)) { 2364 if (link->dump == NULL) 2365 return -EINVAL; 2366 2367 { 2368 struct netlink_dump_control c = { 2369 .dump = link->dump, 2370 .done = link->done, 2371 }; 2372 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c); 2373 } 2374 } 2375 2376 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 2377 xfrma_policy); 2378 if (err < 0) 2379 return err; 2380 2381 if (link->doit == NULL) 2382 return -EINVAL; 2383 2384 return link->doit(skb, nlh, attrs); 2385 } 2386 2387 static void xfrm_netlink_rcv(struct sk_buff *skb) 2388 { 2389 struct net *net = sock_net(skb->sk); 2390 2391 mutex_lock(&net->xfrm.xfrm_cfg_mutex); 2392 netlink_rcv_skb(skb, &xfrm_user_rcv_msg); 2393 mutex_unlock(&net->xfrm.xfrm_cfg_mutex); 2394 } 2395 2396 static inline size_t xfrm_expire_msgsize(void) 2397 { 2398 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) 2399 + nla_total_size(sizeof(struct xfrm_mark)); 2400 } 2401 2402 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 2403 { 2404 struct xfrm_user_expire *ue; 2405 struct nlmsghdr *nlh; 2406 int err; 2407 2408 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 2409 if (nlh == NULL) 2410 return -EMSGSIZE; 2411 2412 ue = nlmsg_data(nlh); 2413 copy_to_user_state(x, &ue->state); 2414 ue->hard = (c->data.hard != 0) ? 1 : 0; 2415 2416 err = xfrm_mark_put(skb, &x->mark); 2417 if (err) 2418 return err; 2419 2420 return nlmsg_end(skb, nlh); 2421 } 2422 2423 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) 2424 { 2425 struct net *net = xs_net(x); 2426 struct sk_buff *skb; 2427 2428 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); 2429 if (skb == NULL) 2430 return -ENOMEM; 2431 2432 if (build_expire(skb, x, c) < 0) { 2433 kfree_skb(skb); 2434 return -EMSGSIZE; 2435 } 2436 2437 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2438 } 2439 2440 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) 2441 { 2442 struct net *net = xs_net(x); 2443 struct sk_buff *skb; 2444 2445 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 2446 if (skb == NULL) 2447 return -ENOMEM; 2448 2449 if (build_aevent(skb, x, c) < 0) 2450 BUG(); 2451 2452 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); 2453 } 2454 2455 static int xfrm_notify_sa_flush(const struct km_event *c) 2456 { 2457 struct net *net = c->net; 2458 struct xfrm_usersa_flush *p; 2459 struct nlmsghdr *nlh; 2460 struct sk_buff *skb; 2461 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); 2462 2463 skb = nlmsg_new(len, GFP_ATOMIC); 2464 if (skb == NULL) 2465 return -ENOMEM; 2466 2467 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); 2468 if (nlh == NULL) { 2469 kfree_skb(skb); 2470 return -EMSGSIZE; 2471 } 2472 2473 p = nlmsg_data(nlh); 2474 p->proto = c->data.proto; 2475 2476 nlmsg_end(skb, nlh); 2477 2478 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2479 } 2480 2481 static inline size_t xfrm_sa_len(struct xfrm_state *x) 2482 { 2483 size_t l = 0; 2484 if (x->aead) 2485 l += nla_total_size(aead_len(x->aead)); 2486 if (x->aalg) { 2487 l += nla_total_size(sizeof(struct xfrm_algo) + 2488 (x->aalg->alg_key_len + 7) / 8); 2489 l += nla_total_size(xfrm_alg_auth_len(x->aalg)); 2490 } 2491 if (x->ealg) 2492 l += nla_total_size(xfrm_alg_len(x->ealg)); 2493 if (x->calg) 2494 l += nla_total_size(sizeof(*x->calg)); 2495 if (x->encap) 2496 l += nla_total_size(sizeof(*x->encap)); 2497 if (x->tfcpad) 2498 l += nla_total_size(sizeof(x->tfcpad)); 2499 if (x->replay_esn) 2500 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); 2501 if (x->security) 2502 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + 2503 x->security->ctx_len); 2504 if (x->coaddr) 2505 l += nla_total_size(sizeof(*x->coaddr)); 2506 if (x->props.extra_flags) 2507 l += nla_total_size(sizeof(x->props.extra_flags)); 2508 2509 /* Must count x->lastused as it may become non-zero behind our back. */ 2510 l += nla_total_size(sizeof(u64)); 2511 2512 return l; 2513 } 2514 2515 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) 2516 { 2517 struct net *net = xs_net(x); 2518 struct xfrm_usersa_info *p; 2519 struct xfrm_usersa_id *id; 2520 struct nlmsghdr *nlh; 2521 struct sk_buff *skb; 2522 int len = xfrm_sa_len(x); 2523 int headlen, err; 2524 2525 headlen = sizeof(*p); 2526 if (c->event == XFRM_MSG_DELSA) { 2527 len += nla_total_size(headlen); 2528 headlen = sizeof(*id); 2529 len += nla_total_size(sizeof(struct xfrm_mark)); 2530 } 2531 len += NLMSG_ALIGN(headlen); 2532 2533 skb = nlmsg_new(len, GFP_ATOMIC); 2534 if (skb == NULL) 2535 return -ENOMEM; 2536 2537 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 2538 err = -EMSGSIZE; 2539 if (nlh == NULL) 2540 goto out_free_skb; 2541 2542 p = nlmsg_data(nlh); 2543 if (c->event == XFRM_MSG_DELSA) { 2544 struct nlattr *attr; 2545 2546 id = nlmsg_data(nlh); 2547 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 2548 id->spi = x->id.spi; 2549 id->family = x->props.family; 2550 id->proto = x->id.proto; 2551 2552 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 2553 err = -EMSGSIZE; 2554 if (attr == NULL) 2555 goto out_free_skb; 2556 2557 p = nla_data(attr); 2558 } 2559 err = copy_to_user_state_extra(x, p, skb); 2560 if (err) 2561 goto out_free_skb; 2562 2563 nlmsg_end(skb, nlh); 2564 2565 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2566 2567 out_free_skb: 2568 kfree_skb(skb); 2569 return err; 2570 } 2571 2572 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) 2573 { 2574 2575 switch (c->event) { 2576 case XFRM_MSG_EXPIRE: 2577 return xfrm_exp_state_notify(x, c); 2578 case XFRM_MSG_NEWAE: 2579 return xfrm_aevent_state_notify(x, c); 2580 case XFRM_MSG_DELSA: 2581 case XFRM_MSG_UPDSA: 2582 case XFRM_MSG_NEWSA: 2583 return xfrm_notify_sa(x, c); 2584 case XFRM_MSG_FLUSHSA: 2585 return xfrm_notify_sa_flush(c); 2586 default: 2587 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", 2588 c->event); 2589 break; 2590 } 2591 2592 return 0; 2593 2594 } 2595 2596 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x, 2597 struct xfrm_policy *xp) 2598 { 2599 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 2600 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2601 + nla_total_size(sizeof(struct xfrm_mark)) 2602 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 2603 + userpolicy_type_attrsize(); 2604 } 2605 2606 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 2607 struct xfrm_tmpl *xt, struct xfrm_policy *xp) 2608 { 2609 __u32 seq = xfrm_get_acqseq(); 2610 struct xfrm_user_acquire *ua; 2611 struct nlmsghdr *nlh; 2612 int err; 2613 2614 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 2615 if (nlh == NULL) 2616 return -EMSGSIZE; 2617 2618 ua = nlmsg_data(nlh); 2619 memcpy(&ua->id, &x->id, sizeof(ua->id)); 2620 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 2621 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 2622 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT); 2623 ua->aalgos = xt->aalgos; 2624 ua->ealgos = xt->ealgos; 2625 ua->calgos = xt->calgos; 2626 ua->seq = x->km.seq = seq; 2627 2628 err = copy_to_user_tmpl(xp, skb); 2629 if (!err) 2630 err = copy_to_user_state_sec_ctx(x, skb); 2631 if (!err) 2632 err = copy_to_user_policy_type(xp->type, skb); 2633 if (!err) 2634 err = xfrm_mark_put(skb, &xp->mark); 2635 if (err) { 2636 nlmsg_cancel(skb, nlh); 2637 return err; 2638 } 2639 2640 return nlmsg_end(skb, nlh); 2641 } 2642 2643 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 2644 struct xfrm_policy *xp) 2645 { 2646 struct net *net = xs_net(x); 2647 struct sk_buff *skb; 2648 2649 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); 2650 if (skb == NULL) 2651 return -ENOMEM; 2652 2653 if (build_acquire(skb, x, xt, xp) < 0) 2654 BUG(); 2655 2656 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); 2657 } 2658 2659 /* User gives us xfrm_user_policy_info followed by an array of 0 2660 * or more templates. 2661 */ 2662 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, 2663 u8 *data, int len, int *dir) 2664 { 2665 struct net *net = sock_net(sk); 2666 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; 2667 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); 2668 struct xfrm_policy *xp; 2669 int nr; 2670 2671 switch (sk->sk_family) { 2672 case AF_INET: 2673 if (opt != IP_XFRM_POLICY) { 2674 *dir = -EOPNOTSUPP; 2675 return NULL; 2676 } 2677 break; 2678 #if IS_ENABLED(CONFIG_IPV6) 2679 case AF_INET6: 2680 if (opt != IPV6_XFRM_POLICY) { 2681 *dir = -EOPNOTSUPP; 2682 return NULL; 2683 } 2684 break; 2685 #endif 2686 default: 2687 *dir = -EINVAL; 2688 return NULL; 2689 } 2690 2691 *dir = -EINVAL; 2692 2693 if (len < sizeof(*p) || 2694 verify_newpolicy_info(p)) 2695 return NULL; 2696 2697 nr = ((len - sizeof(*p)) / sizeof(*ut)); 2698 if (validate_tmpl(nr, ut, p->sel.family)) 2699 return NULL; 2700 2701 if (p->dir > XFRM_POLICY_OUT) 2702 return NULL; 2703 2704 xp = xfrm_policy_alloc(net, GFP_ATOMIC); 2705 if (xp == NULL) { 2706 *dir = -ENOBUFS; 2707 return NULL; 2708 } 2709 2710 copy_from_user_policy(xp, p); 2711 xp->type = XFRM_POLICY_TYPE_MAIN; 2712 copy_templates(xp, ut, nr); 2713 2714 *dir = p->dir; 2715 2716 return xp; 2717 } 2718 2719 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp) 2720 { 2721 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 2722 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2723 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 2724 + nla_total_size(sizeof(struct xfrm_mark)) 2725 + userpolicy_type_attrsize(); 2726 } 2727 2728 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, 2729 int dir, const struct km_event *c) 2730 { 2731 struct xfrm_user_polexpire *upe; 2732 int hard = c->data.hard; 2733 struct nlmsghdr *nlh; 2734 int err; 2735 2736 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 2737 if (nlh == NULL) 2738 return -EMSGSIZE; 2739 2740 upe = nlmsg_data(nlh); 2741 copy_to_user_policy(xp, &upe->pol, dir); 2742 err = copy_to_user_tmpl(xp, skb); 2743 if (!err) 2744 err = copy_to_user_sec_ctx(xp, skb); 2745 if (!err) 2746 err = copy_to_user_policy_type(xp->type, skb); 2747 if (!err) 2748 err = xfrm_mark_put(skb, &xp->mark); 2749 if (err) { 2750 nlmsg_cancel(skb, nlh); 2751 return err; 2752 } 2753 upe->hard = !!hard; 2754 2755 return nlmsg_end(skb, nlh); 2756 } 2757 2758 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2759 { 2760 struct net *net = xp_net(xp); 2761 struct sk_buff *skb; 2762 2763 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); 2764 if (skb == NULL) 2765 return -ENOMEM; 2766 2767 if (build_polexpire(skb, xp, dir, c) < 0) 2768 BUG(); 2769 2770 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2771 } 2772 2773 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 2774 { 2775 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2776 struct net *net = xp_net(xp); 2777 struct xfrm_userpolicy_info *p; 2778 struct xfrm_userpolicy_id *id; 2779 struct nlmsghdr *nlh; 2780 struct sk_buff *skb; 2781 int headlen, err; 2782 2783 headlen = sizeof(*p); 2784 if (c->event == XFRM_MSG_DELPOLICY) { 2785 len += nla_total_size(headlen); 2786 headlen = sizeof(*id); 2787 } 2788 len += userpolicy_type_attrsize(); 2789 len += nla_total_size(sizeof(struct xfrm_mark)); 2790 len += NLMSG_ALIGN(headlen); 2791 2792 skb = nlmsg_new(len, GFP_ATOMIC); 2793 if (skb == NULL) 2794 return -ENOMEM; 2795 2796 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 2797 err = -EMSGSIZE; 2798 if (nlh == NULL) 2799 goto out_free_skb; 2800 2801 p = nlmsg_data(nlh); 2802 if (c->event == XFRM_MSG_DELPOLICY) { 2803 struct nlattr *attr; 2804 2805 id = nlmsg_data(nlh); 2806 memset(id, 0, sizeof(*id)); 2807 id->dir = dir; 2808 if (c->data.byid) 2809 id->index = xp->index; 2810 else 2811 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 2812 2813 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 2814 err = -EMSGSIZE; 2815 if (attr == NULL) 2816 goto out_free_skb; 2817 2818 p = nla_data(attr); 2819 } 2820 2821 copy_to_user_policy(xp, p, dir); 2822 err = copy_to_user_tmpl(xp, skb); 2823 if (!err) 2824 err = copy_to_user_policy_type(xp->type, skb); 2825 if (!err) 2826 err = xfrm_mark_put(skb, &xp->mark); 2827 if (err) 2828 goto out_free_skb; 2829 2830 nlmsg_end(skb, nlh); 2831 2832 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2833 2834 out_free_skb: 2835 kfree_skb(skb); 2836 return err; 2837 } 2838 2839 static int xfrm_notify_policy_flush(const struct km_event *c) 2840 { 2841 struct net *net = c->net; 2842 struct nlmsghdr *nlh; 2843 struct sk_buff *skb; 2844 int err; 2845 2846 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); 2847 if (skb == NULL) 2848 return -ENOMEM; 2849 2850 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 2851 err = -EMSGSIZE; 2852 if (nlh == NULL) 2853 goto out_free_skb; 2854 err = copy_to_user_policy_type(c->data.type, skb); 2855 if (err) 2856 goto out_free_skb; 2857 2858 nlmsg_end(skb, nlh); 2859 2860 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2861 2862 out_free_skb: 2863 kfree_skb(skb); 2864 return err; 2865 } 2866 2867 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2868 { 2869 2870 switch (c->event) { 2871 case XFRM_MSG_NEWPOLICY: 2872 case XFRM_MSG_UPDPOLICY: 2873 case XFRM_MSG_DELPOLICY: 2874 return xfrm_notify_policy(xp, dir, c); 2875 case XFRM_MSG_FLUSHPOLICY: 2876 return xfrm_notify_policy_flush(c); 2877 case XFRM_MSG_POLEXPIRE: 2878 return xfrm_exp_policy_notify(xp, dir, c); 2879 default: 2880 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", 2881 c->event); 2882 } 2883 2884 return 0; 2885 2886 } 2887 2888 static inline size_t xfrm_report_msgsize(void) 2889 { 2890 return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); 2891 } 2892 2893 static int build_report(struct sk_buff *skb, u8 proto, 2894 struct xfrm_selector *sel, xfrm_address_t *addr) 2895 { 2896 struct xfrm_user_report *ur; 2897 struct nlmsghdr *nlh; 2898 2899 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); 2900 if (nlh == NULL) 2901 return -EMSGSIZE; 2902 2903 ur = nlmsg_data(nlh); 2904 ur->proto = proto; 2905 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2906 2907 if (addr) { 2908 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr); 2909 if (err) { 2910 nlmsg_cancel(skb, nlh); 2911 return err; 2912 } 2913 } 2914 return nlmsg_end(skb, nlh); 2915 } 2916 2917 static int xfrm_send_report(struct net *net, u8 proto, 2918 struct xfrm_selector *sel, xfrm_address_t *addr) 2919 { 2920 struct sk_buff *skb; 2921 2922 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); 2923 if (skb == NULL) 2924 return -ENOMEM; 2925 2926 if (build_report(skb, proto, sel, addr) < 0) 2927 BUG(); 2928 2929 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); 2930 } 2931 2932 static inline size_t xfrm_mapping_msgsize(void) 2933 { 2934 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping)); 2935 } 2936 2937 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, 2938 xfrm_address_t *new_saddr, __be16 new_sport) 2939 { 2940 struct xfrm_user_mapping *um; 2941 struct nlmsghdr *nlh; 2942 2943 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0); 2944 if (nlh == NULL) 2945 return -EMSGSIZE; 2946 2947 um = nlmsg_data(nlh); 2948 2949 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); 2950 um->id.spi = x->id.spi; 2951 um->id.family = x->props.family; 2952 um->id.proto = x->id.proto; 2953 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr)); 2954 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr)); 2955 um->new_sport = new_sport; 2956 um->old_sport = x->encap->encap_sport; 2957 um->reqid = x->props.reqid; 2958 2959 return nlmsg_end(skb, nlh); 2960 } 2961 2962 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, 2963 __be16 sport) 2964 { 2965 struct net *net = xs_net(x); 2966 struct sk_buff *skb; 2967 2968 if (x->id.proto != IPPROTO_ESP) 2969 return -EINVAL; 2970 2971 if (!x->encap) 2972 return -EINVAL; 2973 2974 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC); 2975 if (skb == NULL) 2976 return -ENOMEM; 2977 2978 if (build_mapping(skb, x, ipaddr, sport) < 0) 2979 BUG(); 2980 2981 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC); 2982 } 2983 2984 static struct xfrm_mgr netlink_mgr = { 2985 .id = "netlink", 2986 .notify = xfrm_send_state_notify, 2987 .acquire = xfrm_send_acquire, 2988 .compile_policy = xfrm_compile_policy, 2989 .notify_policy = xfrm_send_policy_notify, 2990 .report = xfrm_send_report, 2991 .migrate = xfrm_send_migrate, 2992 .new_mapping = xfrm_send_mapping, 2993 }; 2994 2995 static int __net_init xfrm_user_net_init(struct net *net) 2996 { 2997 struct sock *nlsk; 2998 struct netlink_kernel_cfg cfg = { 2999 .groups = XFRMNLGRP_MAX, 3000 .input = xfrm_netlink_rcv, 3001 }; 3002 3003 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg); 3004 if (nlsk == NULL) 3005 return -ENOMEM; 3006 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 3007 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 3008 return 0; 3009 } 3010 3011 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) 3012 { 3013 struct net *net; 3014 list_for_each_entry(net, net_exit_list, exit_list) 3015 RCU_INIT_POINTER(net->xfrm.nlsk, NULL); 3016 synchronize_net(); 3017 list_for_each_entry(net, net_exit_list, exit_list) 3018 netlink_kernel_release(net->xfrm.nlsk_stash); 3019 } 3020 3021 static struct pernet_operations xfrm_user_net_ops = { 3022 .init = xfrm_user_net_init, 3023 .exit_batch = xfrm_user_net_exit, 3024 }; 3025 3026 static int __init xfrm_user_init(void) 3027 { 3028 int rv; 3029 3030 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 3031 3032 rv = register_pernet_subsys(&xfrm_user_net_ops); 3033 if (rv < 0) 3034 return rv; 3035 rv = xfrm_register_km(&netlink_mgr); 3036 if (rv < 0) 3037 unregister_pernet_subsys(&xfrm_user_net_ops); 3038 return rv; 3039 } 3040 3041 static void __exit xfrm_user_exit(void) 3042 { 3043 xfrm_unregister_km(&netlink_mgr); 3044 unregister_pernet_subsys(&xfrm_user_net_ops); 3045 } 3046 3047 module_init(xfrm_user_init); 3048 module_exit(xfrm_user_exit); 3049 MODULE_LICENSE("GPL"); 3050 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); 3051 3052