1 // SPDX-License-Identifier: GPL-2.0-only 2 /* xfrm_user.c: User interface to configure xfrm engine. 3 * 4 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 5 * 6 * Changes: 7 * Mitsuru KANDA @USAGI 8 * Kazunori MIYAZAWA @USAGI 9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 10 * IPv6 support 11 * 12 */ 13 14 #include <linux/compat.h> 15 #include <linux/crypto.h> 16 #include <linux/module.h> 17 #include <linux/kernel.h> 18 #include <linux/types.h> 19 #include <linux/slab.h> 20 #include <linux/socket.h> 21 #include <linux/string.h> 22 #include <linux/net.h> 23 #include <linux/skbuff.h> 24 #include <linux/pfkeyv2.h> 25 #include <linux/ipsec.h> 26 #include <linux/init.h> 27 #include <linux/security.h> 28 #include <net/sock.h> 29 #include <net/xfrm.h> 30 #include <net/netlink.h> 31 #include <net/ah.h> 32 #include <linux/uaccess.h> 33 #if IS_ENABLED(CONFIG_IPV6) 34 #include <linux/in6.h> 35 #endif 36 #include <asm/unaligned.h> 37 38 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type, 39 struct netlink_ext_ack *extack) 40 { 41 struct nlattr *rt = attrs[type]; 42 struct xfrm_algo *algp; 43 44 if (!rt) 45 return 0; 46 47 algp = nla_data(rt); 48 if (nla_len(rt) < (int)xfrm_alg_len(algp)) { 49 NL_SET_ERR_MSG(extack, "Invalid AUTH/CRYPT/COMP attribute length"); 50 return -EINVAL; 51 } 52 53 switch (type) { 54 case XFRMA_ALG_AUTH: 55 case XFRMA_ALG_CRYPT: 56 case XFRMA_ALG_COMP: 57 break; 58 59 default: 60 NL_SET_ERR_MSG(extack, "Invalid algorithm attribute type"); 61 return -EINVAL; 62 } 63 64 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; 65 return 0; 66 } 67 68 static int verify_auth_trunc(struct nlattr **attrs, 69 struct netlink_ext_ack *extack) 70 { 71 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; 72 struct xfrm_algo_auth *algp; 73 74 if (!rt) 75 return 0; 76 77 algp = nla_data(rt); 78 if (nla_len(rt) < (int)xfrm_alg_auth_len(algp)) { 79 NL_SET_ERR_MSG(extack, "Invalid AUTH_TRUNC attribute length"); 80 return -EINVAL; 81 } 82 83 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; 84 return 0; 85 } 86 87 static int verify_aead(struct nlattr **attrs, struct netlink_ext_ack *extack) 88 { 89 struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; 90 struct xfrm_algo_aead *algp; 91 92 if (!rt) 93 return 0; 94 95 algp = nla_data(rt); 96 if (nla_len(rt) < (int)aead_len(algp)) { 97 NL_SET_ERR_MSG(extack, "Invalid AEAD attribute length"); 98 return -EINVAL; 99 } 100 101 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; 102 return 0; 103 } 104 105 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, 106 xfrm_address_t **addrp) 107 { 108 struct nlattr *rt = attrs[type]; 109 110 if (rt && addrp) 111 *addrp = nla_data(rt); 112 } 113 114 static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_ack *extack) 115 { 116 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 117 struct xfrm_user_sec_ctx *uctx; 118 119 if (!rt) 120 return 0; 121 122 uctx = nla_data(rt); 123 if (uctx->len > nla_len(rt) || 124 uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) { 125 NL_SET_ERR_MSG(extack, "Invalid security context length"); 126 return -EINVAL; 127 } 128 129 return 0; 130 } 131 132 static inline int verify_replay(struct xfrm_usersa_info *p, 133 struct nlattr **attrs, 134 struct netlink_ext_ack *extack) 135 { 136 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 137 struct xfrm_replay_state_esn *rs; 138 139 if (!rt) { 140 if (p->flags & XFRM_STATE_ESN) { 141 NL_SET_ERR_MSG(extack, "Missing required attribute for ESN"); 142 return -EINVAL; 143 } 144 return 0; 145 } 146 147 rs = nla_data(rt); 148 149 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) { 150 NL_SET_ERR_MSG(extack, "ESN bitmap length must be <= 128"); 151 return -EINVAL; 152 } 153 154 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) && 155 nla_len(rt) != sizeof(*rs)) { 156 NL_SET_ERR_MSG(extack, "ESN attribute is too short to fit the full bitmap length"); 157 return -EINVAL; 158 } 159 160 /* As only ESP and AH support ESN feature. */ 161 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) { 162 NL_SET_ERR_MSG(extack, "ESN only supported for ESP and AH"); 163 return -EINVAL; 164 } 165 166 if (p->replay_window != 0) { 167 NL_SET_ERR_MSG(extack, "ESN not compatible with legacy replay_window"); 168 return -EINVAL; 169 } 170 171 return 0; 172 } 173 174 static int verify_newsa_info(struct xfrm_usersa_info *p, 175 struct nlattr **attrs, 176 struct netlink_ext_ack *extack) 177 { 178 int err; 179 180 err = -EINVAL; 181 switch (p->family) { 182 case AF_INET: 183 break; 184 185 case AF_INET6: 186 #if IS_ENABLED(CONFIG_IPV6) 187 break; 188 #else 189 err = -EAFNOSUPPORT; 190 NL_SET_ERR_MSG(extack, "IPv6 support disabled"); 191 goto out; 192 #endif 193 194 default: 195 NL_SET_ERR_MSG(extack, "Invalid address family"); 196 goto out; 197 } 198 199 switch (p->sel.family) { 200 case AF_UNSPEC: 201 break; 202 203 case AF_INET: 204 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) { 205 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)"); 206 goto out; 207 } 208 209 break; 210 211 case AF_INET6: 212 #if IS_ENABLED(CONFIG_IPV6) 213 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) { 214 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)"); 215 goto out; 216 } 217 218 break; 219 #else 220 NL_SET_ERR_MSG(extack, "IPv6 support disabled"); 221 err = -EAFNOSUPPORT; 222 goto out; 223 #endif 224 225 default: 226 NL_SET_ERR_MSG(extack, "Invalid address family in selector"); 227 goto out; 228 } 229 230 err = -EINVAL; 231 switch (p->id.proto) { 232 case IPPROTO_AH: 233 if (!attrs[XFRMA_ALG_AUTH] && 234 !attrs[XFRMA_ALG_AUTH_TRUNC]) { 235 NL_SET_ERR_MSG(extack, "Missing required attribute for AH: AUTH_TRUNC or AUTH"); 236 goto out; 237 } 238 239 if (attrs[XFRMA_ALG_AEAD] || 240 attrs[XFRMA_ALG_CRYPT] || 241 attrs[XFRMA_ALG_COMP] || 242 attrs[XFRMA_TFCPAD]) { 243 NL_SET_ERR_MSG(extack, "Invalid attributes for AH: AEAD, CRYPT, COMP, TFCPAD"); 244 goto out; 245 } 246 break; 247 248 case IPPROTO_ESP: 249 if (attrs[XFRMA_ALG_COMP]) { 250 NL_SET_ERR_MSG(extack, "Invalid attribute for ESP: COMP"); 251 goto out; 252 } 253 254 if (!attrs[XFRMA_ALG_AUTH] && 255 !attrs[XFRMA_ALG_AUTH_TRUNC] && 256 !attrs[XFRMA_ALG_CRYPT] && 257 !attrs[XFRMA_ALG_AEAD]) { 258 NL_SET_ERR_MSG(extack, "Missing required attribute for ESP: at least one of AUTH, AUTH_TRUNC, CRYPT, AEAD"); 259 goto out; 260 } 261 262 if ((attrs[XFRMA_ALG_AUTH] || 263 attrs[XFRMA_ALG_AUTH_TRUNC] || 264 attrs[XFRMA_ALG_CRYPT]) && 265 attrs[XFRMA_ALG_AEAD]) { 266 NL_SET_ERR_MSG(extack, "Invalid attribute combination for ESP: AEAD can't be used with AUTH, AUTH_TRUNC, CRYPT"); 267 goto out; 268 } 269 270 if (attrs[XFRMA_TFCPAD] && 271 p->mode != XFRM_MODE_TUNNEL) { 272 NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode"); 273 goto out; 274 } 275 break; 276 277 case IPPROTO_COMP: 278 if (!attrs[XFRMA_ALG_COMP]) { 279 NL_SET_ERR_MSG(extack, "Missing required attribute for COMP: COMP"); 280 goto out; 281 } 282 283 if (attrs[XFRMA_ALG_AEAD] || 284 attrs[XFRMA_ALG_AUTH] || 285 attrs[XFRMA_ALG_AUTH_TRUNC] || 286 attrs[XFRMA_ALG_CRYPT] || 287 attrs[XFRMA_TFCPAD]) { 288 NL_SET_ERR_MSG(extack, "Invalid attributes for COMP: AEAD, AUTH, AUTH_TRUNC, CRYPT, TFCPAD"); 289 goto out; 290 } 291 292 if (ntohl(p->id.spi) >= 0x10000) { 293 NL_SET_ERR_MSG(extack, "SPI is too large for COMP (must be < 0x10000)"); 294 goto out; 295 } 296 break; 297 298 #if IS_ENABLED(CONFIG_IPV6) 299 case IPPROTO_DSTOPTS: 300 case IPPROTO_ROUTING: 301 if (attrs[XFRMA_ALG_COMP] || 302 attrs[XFRMA_ALG_AUTH] || 303 attrs[XFRMA_ALG_AUTH_TRUNC] || 304 attrs[XFRMA_ALG_AEAD] || 305 attrs[XFRMA_ALG_CRYPT] || 306 attrs[XFRMA_ENCAP] || 307 attrs[XFRMA_SEC_CTX] || 308 attrs[XFRMA_TFCPAD]) { 309 NL_SET_ERR_MSG(extack, "Invalid attributes for DSTOPTS/ROUTING"); 310 goto out; 311 } 312 313 if (!attrs[XFRMA_COADDR]) { 314 NL_SET_ERR_MSG(extack, "Missing required COADDR attribute for DSTOPTS/ROUTING"); 315 goto out; 316 } 317 break; 318 #endif 319 320 default: 321 NL_SET_ERR_MSG(extack, "Unsupported protocol"); 322 goto out; 323 } 324 325 if ((err = verify_aead(attrs, extack))) 326 goto out; 327 if ((err = verify_auth_trunc(attrs, extack))) 328 goto out; 329 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH, extack))) 330 goto out; 331 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT, extack))) 332 goto out; 333 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP, extack))) 334 goto out; 335 if ((err = verify_sec_ctx_len(attrs, extack))) 336 goto out; 337 if ((err = verify_replay(p, attrs, extack))) 338 goto out; 339 340 err = -EINVAL; 341 switch (p->mode) { 342 case XFRM_MODE_TRANSPORT: 343 case XFRM_MODE_TUNNEL: 344 case XFRM_MODE_ROUTEOPTIMIZATION: 345 case XFRM_MODE_BEET: 346 break; 347 348 default: 349 NL_SET_ERR_MSG(extack, "Unsupported mode"); 350 goto out; 351 } 352 353 err = 0; 354 355 if (attrs[XFRMA_MTIMER_THRESH]) { 356 if (!attrs[XFRMA_ENCAP]) { 357 NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute can only be set on ENCAP states"); 358 err = -EINVAL; 359 goto out; 360 } 361 } 362 363 out: 364 return err; 365 } 366 367 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, 368 struct xfrm_algo_desc *(*get_byname)(const char *, int), 369 struct nlattr *rta) 370 { 371 struct xfrm_algo *p, *ualg; 372 struct xfrm_algo_desc *algo; 373 374 if (!rta) 375 return 0; 376 377 ualg = nla_data(rta); 378 379 algo = get_byname(ualg->alg_name, 1); 380 if (!algo) 381 return -ENOSYS; 382 *props = algo->desc.sadb_alg_id; 383 384 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 385 if (!p) 386 return -ENOMEM; 387 388 strcpy(p->alg_name, algo->name); 389 *algpp = p; 390 return 0; 391 } 392 393 static int attach_crypt(struct xfrm_state *x, struct nlattr *rta) 394 { 395 struct xfrm_algo *p, *ualg; 396 struct xfrm_algo_desc *algo; 397 398 if (!rta) 399 return 0; 400 401 ualg = nla_data(rta); 402 403 algo = xfrm_ealg_get_byname(ualg->alg_name, 1); 404 if (!algo) 405 return -ENOSYS; 406 x->props.ealgo = algo->desc.sadb_alg_id; 407 408 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 409 if (!p) 410 return -ENOMEM; 411 412 strcpy(p->alg_name, algo->name); 413 x->ealg = p; 414 x->geniv = algo->uinfo.encr.geniv; 415 return 0; 416 } 417 418 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, 419 struct nlattr *rta) 420 { 421 struct xfrm_algo *ualg; 422 struct xfrm_algo_auth *p; 423 struct xfrm_algo_desc *algo; 424 425 if (!rta) 426 return 0; 427 428 ualg = nla_data(rta); 429 430 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 431 if (!algo) 432 return -ENOSYS; 433 *props = algo->desc.sadb_alg_id; 434 435 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL); 436 if (!p) 437 return -ENOMEM; 438 439 strcpy(p->alg_name, algo->name); 440 p->alg_key_len = ualg->alg_key_len; 441 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 442 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8); 443 444 *algpp = p; 445 return 0; 446 } 447 448 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, 449 struct nlattr *rta) 450 { 451 struct xfrm_algo_auth *p, *ualg; 452 struct xfrm_algo_desc *algo; 453 454 if (!rta) 455 return 0; 456 457 ualg = nla_data(rta); 458 459 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 460 if (!algo) 461 return -ENOSYS; 462 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 463 return -EINVAL; 464 *props = algo->desc.sadb_alg_id; 465 466 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL); 467 if (!p) 468 return -ENOMEM; 469 470 strcpy(p->alg_name, algo->name); 471 if (!p->alg_trunc_len) 472 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 473 474 *algpp = p; 475 return 0; 476 } 477 478 static int attach_aead(struct xfrm_state *x, struct nlattr *rta) 479 { 480 struct xfrm_algo_aead *p, *ualg; 481 struct xfrm_algo_desc *algo; 482 483 if (!rta) 484 return 0; 485 486 ualg = nla_data(rta); 487 488 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); 489 if (!algo) 490 return -ENOSYS; 491 x->props.ealgo = algo->desc.sadb_alg_id; 492 493 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); 494 if (!p) 495 return -ENOMEM; 496 497 strcpy(p->alg_name, algo->name); 498 x->aead = p; 499 x->geniv = algo->uinfo.aead.geniv; 500 return 0; 501 } 502 503 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, 504 struct nlattr *rp) 505 { 506 struct xfrm_replay_state_esn *up; 507 unsigned int ulen; 508 509 if (!replay_esn || !rp) 510 return 0; 511 512 up = nla_data(rp); 513 ulen = xfrm_replay_state_esn_len(up); 514 515 /* Check the overall length and the internal bitmap length to avoid 516 * potential overflow. */ 517 if (nla_len(rp) < (int)ulen || 518 xfrm_replay_state_esn_len(replay_esn) != ulen || 519 replay_esn->bmp_len != up->bmp_len) 520 return -EINVAL; 521 522 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) 523 return -EINVAL; 524 525 return 0; 526 } 527 528 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, 529 struct xfrm_replay_state_esn **preplay_esn, 530 struct nlattr *rta) 531 { 532 struct xfrm_replay_state_esn *p, *pp, *up; 533 unsigned int klen, ulen; 534 535 if (!rta) 536 return 0; 537 538 up = nla_data(rta); 539 klen = xfrm_replay_state_esn_len(up); 540 ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up); 541 542 p = kzalloc(klen, GFP_KERNEL); 543 if (!p) 544 return -ENOMEM; 545 546 pp = kzalloc(klen, GFP_KERNEL); 547 if (!pp) { 548 kfree(p); 549 return -ENOMEM; 550 } 551 552 memcpy(p, up, ulen); 553 memcpy(pp, up, ulen); 554 555 *replay_esn = p; 556 *preplay_esn = pp; 557 558 return 0; 559 } 560 561 static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) 562 { 563 unsigned int len = 0; 564 565 if (xfrm_ctx) { 566 len += sizeof(struct xfrm_user_sec_ctx); 567 len += xfrm_ctx->ctx_len; 568 } 569 return len; 570 } 571 572 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 573 { 574 memcpy(&x->id, &p->id, sizeof(x->id)); 575 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 576 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 577 x->props.mode = p->mode; 578 x->props.replay_window = min_t(unsigned int, p->replay_window, 579 sizeof(x->replay.bitmap) * 8); 580 x->props.reqid = p->reqid; 581 x->props.family = p->family; 582 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 583 x->props.flags = p->flags; 584 585 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC)) 586 x->sel.family = p->family; 587 } 588 589 /* 590 * someday when pfkey also has support, we could have the code 591 * somehow made shareable and move it to xfrm_state.c - JHS 592 * 593 */ 594 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, 595 int update_esn) 596 { 597 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 598 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; 599 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 600 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 601 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 602 struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH]; 603 604 if (re) { 605 struct xfrm_replay_state_esn *replay_esn; 606 replay_esn = nla_data(re); 607 memcpy(x->replay_esn, replay_esn, 608 xfrm_replay_state_esn_len(replay_esn)); 609 memcpy(x->preplay_esn, replay_esn, 610 xfrm_replay_state_esn_len(replay_esn)); 611 } 612 613 if (rp) { 614 struct xfrm_replay_state *replay; 615 replay = nla_data(rp); 616 memcpy(&x->replay, replay, sizeof(*replay)); 617 memcpy(&x->preplay, replay, sizeof(*replay)); 618 } 619 620 if (lt) { 621 struct xfrm_lifetime_cur *ltime; 622 ltime = nla_data(lt); 623 x->curlft.bytes = ltime->bytes; 624 x->curlft.packets = ltime->packets; 625 x->curlft.add_time = ltime->add_time; 626 x->curlft.use_time = ltime->use_time; 627 } 628 629 if (et) 630 x->replay_maxage = nla_get_u32(et); 631 632 if (rt) 633 x->replay_maxdiff = nla_get_u32(rt); 634 635 if (mt) 636 x->mapping_maxage = nla_get_u32(mt); 637 } 638 639 static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m) 640 { 641 if (attrs[XFRMA_SET_MARK]) { 642 m->v = nla_get_u32(attrs[XFRMA_SET_MARK]); 643 if (attrs[XFRMA_SET_MARK_MASK]) 644 m->m = nla_get_u32(attrs[XFRMA_SET_MARK_MASK]); 645 else 646 m->m = 0xffffffff; 647 } else { 648 m->v = m->m = 0; 649 } 650 } 651 652 static struct xfrm_state *xfrm_state_construct(struct net *net, 653 struct xfrm_usersa_info *p, 654 struct nlattr **attrs, 655 int *errp, 656 struct netlink_ext_ack *extack) 657 { 658 struct xfrm_state *x = xfrm_state_alloc(net); 659 int err = -ENOMEM; 660 661 if (!x) 662 goto error_no_put; 663 664 copy_from_user_state(x, p); 665 666 if (attrs[XFRMA_ENCAP]) { 667 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 668 sizeof(*x->encap), GFP_KERNEL); 669 if (x->encap == NULL) 670 goto error; 671 } 672 673 if (attrs[XFRMA_COADDR]) { 674 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 675 sizeof(*x->coaddr), GFP_KERNEL); 676 if (x->coaddr == NULL) 677 goto error; 678 } 679 680 if (attrs[XFRMA_SA_EXTRA_FLAGS]) 681 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); 682 683 if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD]))) 684 goto error; 685 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, 686 attrs[XFRMA_ALG_AUTH_TRUNC]))) 687 goto error; 688 if (!x->props.aalgo) { 689 if ((err = attach_auth(&x->aalg, &x->props.aalgo, 690 attrs[XFRMA_ALG_AUTH]))) 691 goto error; 692 } 693 if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT]))) 694 goto error; 695 if ((err = attach_one_algo(&x->calg, &x->props.calgo, 696 xfrm_calg_get_byname, 697 attrs[XFRMA_ALG_COMP]))) 698 goto error; 699 700 if (attrs[XFRMA_TFCPAD]) 701 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); 702 703 xfrm_mark_get(attrs, &x->mark); 704 705 xfrm_smark_init(attrs, &x->props.smark); 706 707 if (attrs[XFRMA_IF_ID]) 708 x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 709 710 err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]); 711 if (err) 712 goto error; 713 714 if (attrs[XFRMA_SEC_CTX]) { 715 err = security_xfrm_state_alloc(x, 716 nla_data(attrs[XFRMA_SEC_CTX])); 717 if (err) 718 goto error; 719 } 720 721 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 722 attrs[XFRMA_REPLAY_ESN_VAL]))) 723 goto error; 724 725 x->km.seq = p->seq; 726 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth; 727 /* sysctl_xfrm_aevent_etime is in 100ms units */ 728 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M; 729 730 if ((err = xfrm_init_replay(x))) 731 goto error; 732 733 /* override default values from above */ 734 xfrm_update_ae_params(x, attrs, 0); 735 736 /* configure the hardware if offload is requested */ 737 if (attrs[XFRMA_OFFLOAD_DEV]) { 738 err = xfrm_dev_state_add(net, x, 739 nla_data(attrs[XFRMA_OFFLOAD_DEV]), 740 extack); 741 if (err) 742 goto error; 743 } 744 745 return x; 746 747 error: 748 x->km.state = XFRM_STATE_DEAD; 749 xfrm_state_put(x); 750 error_no_put: 751 *errp = err; 752 return NULL; 753 } 754 755 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 756 struct nlattr **attrs, struct netlink_ext_ack *extack) 757 { 758 struct net *net = sock_net(skb->sk); 759 struct xfrm_usersa_info *p = nlmsg_data(nlh); 760 struct xfrm_state *x; 761 int err; 762 struct km_event c; 763 764 err = verify_newsa_info(p, attrs, extack); 765 if (err) 766 return err; 767 768 x = xfrm_state_construct(net, p, attrs, &err, extack); 769 if (!x) 770 return err; 771 772 xfrm_state_hold(x); 773 if (nlh->nlmsg_type == XFRM_MSG_NEWSA) 774 err = xfrm_state_add(x); 775 else 776 err = xfrm_state_update(x); 777 778 xfrm_audit_state_add(x, err ? 0 : 1, true); 779 780 if (err < 0) { 781 x->km.state = XFRM_STATE_DEAD; 782 xfrm_dev_state_delete(x); 783 __xfrm_state_put(x); 784 goto out; 785 } 786 787 if (x->km.state == XFRM_STATE_VOID) 788 x->km.state = XFRM_STATE_VALID; 789 790 c.seq = nlh->nlmsg_seq; 791 c.portid = nlh->nlmsg_pid; 792 c.event = nlh->nlmsg_type; 793 794 km_state_notify(x, &c); 795 out: 796 xfrm_state_put(x); 797 return err; 798 } 799 800 static struct xfrm_state *xfrm_user_state_lookup(struct net *net, 801 struct xfrm_usersa_id *p, 802 struct nlattr **attrs, 803 int *errp) 804 { 805 struct xfrm_state *x = NULL; 806 struct xfrm_mark m; 807 int err; 808 u32 mark = xfrm_mark_get(attrs, &m); 809 810 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 811 err = -ESRCH; 812 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family); 813 } else { 814 xfrm_address_t *saddr = NULL; 815 816 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); 817 if (!saddr) { 818 err = -EINVAL; 819 goto out; 820 } 821 822 err = -ESRCH; 823 x = xfrm_state_lookup_byaddr(net, mark, 824 &p->daddr, saddr, 825 p->proto, p->family); 826 } 827 828 out: 829 if (!x && errp) 830 *errp = err; 831 return x; 832 } 833 834 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 835 struct nlattr **attrs, struct netlink_ext_ack *extack) 836 { 837 struct net *net = sock_net(skb->sk); 838 struct xfrm_state *x; 839 int err = -ESRCH; 840 struct km_event c; 841 struct xfrm_usersa_id *p = nlmsg_data(nlh); 842 843 x = xfrm_user_state_lookup(net, p, attrs, &err); 844 if (x == NULL) 845 return err; 846 847 if ((err = security_xfrm_state_delete(x)) != 0) 848 goto out; 849 850 if (xfrm_state_kern(x)) { 851 err = -EPERM; 852 goto out; 853 } 854 855 err = xfrm_state_delete(x); 856 857 if (err < 0) 858 goto out; 859 860 c.seq = nlh->nlmsg_seq; 861 c.portid = nlh->nlmsg_pid; 862 c.event = nlh->nlmsg_type; 863 km_state_notify(x, &c); 864 865 out: 866 xfrm_audit_state_delete(x, err ? 0 : 1, true); 867 xfrm_state_put(x); 868 return err; 869 } 870 871 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 872 { 873 memset(p, 0, sizeof(*p)); 874 memcpy(&p->id, &x->id, sizeof(p->id)); 875 memcpy(&p->sel, &x->sel, sizeof(p->sel)); 876 memcpy(&p->lft, &x->lft, sizeof(p->lft)); 877 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); 878 put_unaligned(x->stats.replay_window, &p->stats.replay_window); 879 put_unaligned(x->stats.replay, &p->stats.replay); 880 put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed); 881 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); 882 p->mode = x->props.mode; 883 p->replay_window = x->props.replay_window; 884 p->reqid = x->props.reqid; 885 p->family = x->props.family; 886 p->flags = x->props.flags; 887 p->seq = x->km.seq; 888 } 889 890 struct xfrm_dump_info { 891 struct sk_buff *in_skb; 892 struct sk_buff *out_skb; 893 u32 nlmsg_seq; 894 u16 nlmsg_flags; 895 }; 896 897 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 898 { 899 struct xfrm_user_sec_ctx *uctx; 900 struct nlattr *attr; 901 int ctx_size = sizeof(*uctx) + s->ctx_len; 902 903 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); 904 if (attr == NULL) 905 return -EMSGSIZE; 906 907 uctx = nla_data(attr); 908 uctx->exttype = XFRMA_SEC_CTX; 909 uctx->len = ctx_size; 910 uctx->ctx_doi = s->ctx_doi; 911 uctx->ctx_alg = s->ctx_alg; 912 uctx->ctx_len = s->ctx_len; 913 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 914 915 return 0; 916 } 917 918 static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb) 919 { 920 struct xfrm_user_offload *xuo; 921 struct nlattr *attr; 922 923 attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo)); 924 if (attr == NULL) 925 return -EMSGSIZE; 926 927 xuo = nla_data(attr); 928 memset(xuo, 0, sizeof(*xuo)); 929 xuo->ifindex = xso->dev->ifindex; 930 if (xso->dir == XFRM_DEV_OFFLOAD_IN) 931 xuo->flags = XFRM_OFFLOAD_INBOUND; 932 933 return 0; 934 } 935 936 static bool xfrm_redact(void) 937 { 938 return IS_ENABLED(CONFIG_SECURITY) && 939 security_locked_down(LOCKDOWN_XFRM_SECRET); 940 } 941 942 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) 943 { 944 struct xfrm_algo *algo; 945 struct xfrm_algo_auth *ap; 946 struct nlattr *nla; 947 bool redact_secret = xfrm_redact(); 948 949 nla = nla_reserve(skb, XFRMA_ALG_AUTH, 950 sizeof(*algo) + (auth->alg_key_len + 7) / 8); 951 if (!nla) 952 return -EMSGSIZE; 953 algo = nla_data(nla); 954 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); 955 956 if (redact_secret && auth->alg_key_len) 957 memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8); 958 else 959 memcpy(algo->alg_key, auth->alg_key, 960 (auth->alg_key_len + 7) / 8); 961 algo->alg_key_len = auth->alg_key_len; 962 963 nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth)); 964 if (!nla) 965 return -EMSGSIZE; 966 ap = nla_data(nla); 967 memcpy(ap, auth, sizeof(struct xfrm_algo_auth)); 968 if (redact_secret && auth->alg_key_len) 969 memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8); 970 else 971 memcpy(ap->alg_key, auth->alg_key, 972 (auth->alg_key_len + 7) / 8); 973 return 0; 974 } 975 976 static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb) 977 { 978 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead)); 979 struct xfrm_algo_aead *ap; 980 bool redact_secret = xfrm_redact(); 981 982 if (!nla) 983 return -EMSGSIZE; 984 985 ap = nla_data(nla); 986 memcpy(ap, aead, sizeof(*aead)); 987 988 if (redact_secret && aead->alg_key_len) 989 memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8); 990 else 991 memcpy(ap->alg_key, aead->alg_key, 992 (aead->alg_key_len + 7) / 8); 993 return 0; 994 } 995 996 static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb) 997 { 998 struct xfrm_algo *ap; 999 bool redact_secret = xfrm_redact(); 1000 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT, 1001 xfrm_alg_len(ealg)); 1002 if (!nla) 1003 return -EMSGSIZE; 1004 1005 ap = nla_data(nla); 1006 memcpy(ap, ealg, sizeof(*ealg)); 1007 1008 if (redact_secret && ealg->alg_key_len) 1009 memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8); 1010 else 1011 memcpy(ap->alg_key, ealg->alg_key, 1012 (ealg->alg_key_len + 7) / 8); 1013 1014 return 0; 1015 } 1016 1017 static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m) 1018 { 1019 int ret = 0; 1020 1021 if (m->v | m->m) { 1022 ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v); 1023 if (!ret) 1024 ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m); 1025 } 1026 return ret; 1027 } 1028 1029 /* Don't change this without updating xfrm_sa_len! */ 1030 static int copy_to_user_state_extra(struct xfrm_state *x, 1031 struct xfrm_usersa_info *p, 1032 struct sk_buff *skb) 1033 { 1034 int ret = 0; 1035 1036 copy_to_user_state(x, p); 1037 1038 if (x->props.extra_flags) { 1039 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS, 1040 x->props.extra_flags); 1041 if (ret) 1042 goto out; 1043 } 1044 1045 if (x->coaddr) { 1046 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 1047 if (ret) 1048 goto out; 1049 } 1050 if (x->lastused) { 1051 ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused, 1052 XFRMA_PAD); 1053 if (ret) 1054 goto out; 1055 } 1056 if (x->aead) { 1057 ret = copy_to_user_aead(x->aead, skb); 1058 if (ret) 1059 goto out; 1060 } 1061 if (x->aalg) { 1062 ret = copy_to_user_auth(x->aalg, skb); 1063 if (ret) 1064 goto out; 1065 } 1066 if (x->ealg) { 1067 ret = copy_to_user_ealg(x->ealg, skb); 1068 if (ret) 1069 goto out; 1070 } 1071 if (x->calg) { 1072 ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); 1073 if (ret) 1074 goto out; 1075 } 1076 if (x->encap) { 1077 ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 1078 if (ret) 1079 goto out; 1080 } 1081 if (x->tfcpad) { 1082 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad); 1083 if (ret) 1084 goto out; 1085 } 1086 ret = xfrm_mark_put(skb, &x->mark); 1087 if (ret) 1088 goto out; 1089 1090 ret = xfrm_smark_put(skb, &x->props.smark); 1091 if (ret) 1092 goto out; 1093 1094 if (x->replay_esn) 1095 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 1096 xfrm_replay_state_esn_len(x->replay_esn), 1097 x->replay_esn); 1098 else 1099 ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 1100 &x->replay); 1101 if (ret) 1102 goto out; 1103 if(x->xso.dev) 1104 ret = copy_user_offload(&x->xso, skb); 1105 if (ret) 1106 goto out; 1107 if (x->if_id) { 1108 ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id); 1109 if (ret) 1110 goto out; 1111 } 1112 if (x->security) { 1113 ret = copy_sec_ctx(x->security, skb); 1114 if (ret) 1115 goto out; 1116 } 1117 if (x->mapping_maxage) 1118 ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage); 1119 out: 1120 return ret; 1121 } 1122 1123 static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 1124 { 1125 struct xfrm_dump_info *sp = ptr; 1126 struct sk_buff *in_skb = sp->in_skb; 1127 struct sk_buff *skb = sp->out_skb; 1128 struct xfrm_translator *xtr; 1129 struct xfrm_usersa_info *p; 1130 struct nlmsghdr *nlh; 1131 int err; 1132 1133 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 1134 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 1135 if (nlh == NULL) 1136 return -EMSGSIZE; 1137 1138 p = nlmsg_data(nlh); 1139 1140 err = copy_to_user_state_extra(x, p, skb); 1141 if (err) { 1142 nlmsg_cancel(skb, nlh); 1143 return err; 1144 } 1145 nlmsg_end(skb, nlh); 1146 1147 xtr = xfrm_get_translator(); 1148 if (xtr) { 1149 err = xtr->alloc_compat(skb, nlh); 1150 1151 xfrm_put_translator(xtr); 1152 if (err) { 1153 nlmsg_cancel(skb, nlh); 1154 return err; 1155 } 1156 } 1157 1158 return 0; 1159 } 1160 1161 static int xfrm_dump_sa_done(struct netlink_callback *cb) 1162 { 1163 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 1164 struct sock *sk = cb->skb->sk; 1165 struct net *net = sock_net(sk); 1166 1167 if (cb->args[0]) 1168 xfrm_state_walk_done(walk, net); 1169 return 0; 1170 } 1171 1172 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 1173 { 1174 struct net *net = sock_net(skb->sk); 1175 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 1176 struct xfrm_dump_info info; 1177 1178 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > 1179 sizeof(cb->args) - sizeof(cb->args[0])); 1180 1181 info.in_skb = cb->skb; 1182 info.out_skb = skb; 1183 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1184 info.nlmsg_flags = NLM_F_MULTI; 1185 1186 if (!cb->args[0]) { 1187 struct nlattr *attrs[XFRMA_MAX+1]; 1188 struct xfrm_address_filter *filter = NULL; 1189 u8 proto = 0; 1190 int err; 1191 1192 err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX, 1193 xfrma_policy, cb->extack); 1194 if (err < 0) 1195 return err; 1196 1197 if (attrs[XFRMA_ADDRESS_FILTER]) { 1198 filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]), 1199 sizeof(*filter), GFP_KERNEL); 1200 if (filter == NULL) 1201 return -ENOMEM; 1202 } 1203 1204 if (attrs[XFRMA_PROTO]) 1205 proto = nla_get_u8(attrs[XFRMA_PROTO]); 1206 1207 xfrm_state_walk_init(walk, proto, filter); 1208 cb->args[0] = 1; 1209 } 1210 1211 (void) xfrm_state_walk(net, walk, dump_one_state, &info); 1212 1213 return skb->len; 1214 } 1215 1216 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, 1217 struct xfrm_state *x, u32 seq) 1218 { 1219 struct xfrm_dump_info info; 1220 struct sk_buff *skb; 1221 int err; 1222 1223 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1224 if (!skb) 1225 return ERR_PTR(-ENOMEM); 1226 1227 info.in_skb = in_skb; 1228 info.out_skb = skb; 1229 info.nlmsg_seq = seq; 1230 info.nlmsg_flags = 0; 1231 1232 err = dump_one_state(x, 0, &info); 1233 if (err) { 1234 kfree_skb(skb); 1235 return ERR_PTR(err); 1236 } 1237 1238 return skb; 1239 } 1240 1241 /* A wrapper for nlmsg_multicast() checking that nlsk is still available. 1242 * Must be called with RCU read lock. 1243 */ 1244 static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, 1245 u32 pid, unsigned int group) 1246 { 1247 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); 1248 struct xfrm_translator *xtr; 1249 1250 if (!nlsk) { 1251 kfree_skb(skb); 1252 return -EPIPE; 1253 } 1254 1255 xtr = xfrm_get_translator(); 1256 if (xtr) { 1257 int err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); 1258 1259 xfrm_put_translator(xtr); 1260 if (err) { 1261 kfree_skb(skb); 1262 return err; 1263 } 1264 } 1265 1266 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); 1267 } 1268 1269 static inline unsigned int xfrm_spdinfo_msgsize(void) 1270 { 1271 return NLMSG_ALIGN(4) 1272 + nla_total_size(sizeof(struct xfrmu_spdinfo)) 1273 + nla_total_size(sizeof(struct xfrmu_spdhinfo)) 1274 + nla_total_size(sizeof(struct xfrmu_spdhthresh)) 1275 + nla_total_size(sizeof(struct xfrmu_spdhthresh)); 1276 } 1277 1278 static int build_spdinfo(struct sk_buff *skb, struct net *net, 1279 u32 portid, u32 seq, u32 flags) 1280 { 1281 struct xfrmk_spdinfo si; 1282 struct xfrmu_spdinfo spc; 1283 struct xfrmu_spdhinfo sph; 1284 struct xfrmu_spdhthresh spt4, spt6; 1285 struct nlmsghdr *nlh; 1286 int err; 1287 u32 *f; 1288 unsigned lseq; 1289 1290 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 1291 if (nlh == NULL) /* shouldn't really happen ... */ 1292 return -EMSGSIZE; 1293 1294 f = nlmsg_data(nlh); 1295 *f = flags; 1296 xfrm_spd_getinfo(net, &si); 1297 spc.incnt = si.incnt; 1298 spc.outcnt = si.outcnt; 1299 spc.fwdcnt = si.fwdcnt; 1300 spc.inscnt = si.inscnt; 1301 spc.outscnt = si.outscnt; 1302 spc.fwdscnt = si.fwdscnt; 1303 sph.spdhcnt = si.spdhcnt; 1304 sph.spdhmcnt = si.spdhmcnt; 1305 1306 do { 1307 lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 1308 1309 spt4.lbits = net->xfrm.policy_hthresh.lbits4; 1310 spt4.rbits = net->xfrm.policy_hthresh.rbits4; 1311 spt6.lbits = net->xfrm.policy_hthresh.lbits6; 1312 spt6.rbits = net->xfrm.policy_hthresh.rbits6; 1313 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq)); 1314 1315 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 1316 if (!err) 1317 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 1318 if (!err) 1319 err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4); 1320 if (!err) 1321 err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6); 1322 if (err) { 1323 nlmsg_cancel(skb, nlh); 1324 return err; 1325 } 1326 1327 nlmsg_end(skb, nlh); 1328 return 0; 1329 } 1330 1331 static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1332 struct nlattr **attrs, 1333 struct netlink_ext_ack *extack) 1334 { 1335 struct net *net = sock_net(skb->sk); 1336 struct xfrmu_spdhthresh *thresh4 = NULL; 1337 struct xfrmu_spdhthresh *thresh6 = NULL; 1338 1339 /* selector prefixlen thresholds to hash policies */ 1340 if (attrs[XFRMA_SPD_IPV4_HTHRESH]) { 1341 struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH]; 1342 1343 if (nla_len(rta) < sizeof(*thresh4)) 1344 return -EINVAL; 1345 thresh4 = nla_data(rta); 1346 if (thresh4->lbits > 32 || thresh4->rbits > 32) 1347 return -EINVAL; 1348 } 1349 if (attrs[XFRMA_SPD_IPV6_HTHRESH]) { 1350 struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH]; 1351 1352 if (nla_len(rta) < sizeof(*thresh6)) 1353 return -EINVAL; 1354 thresh6 = nla_data(rta); 1355 if (thresh6->lbits > 128 || thresh6->rbits > 128) 1356 return -EINVAL; 1357 } 1358 1359 if (thresh4 || thresh6) { 1360 write_seqlock(&net->xfrm.policy_hthresh.lock); 1361 if (thresh4) { 1362 net->xfrm.policy_hthresh.lbits4 = thresh4->lbits; 1363 net->xfrm.policy_hthresh.rbits4 = thresh4->rbits; 1364 } 1365 if (thresh6) { 1366 net->xfrm.policy_hthresh.lbits6 = thresh6->lbits; 1367 net->xfrm.policy_hthresh.rbits6 = thresh6->rbits; 1368 } 1369 write_sequnlock(&net->xfrm.policy_hthresh.lock); 1370 1371 xfrm_policy_hash_rebuild(net); 1372 } 1373 1374 return 0; 1375 } 1376 1377 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1378 struct nlattr **attrs, 1379 struct netlink_ext_ack *extack) 1380 { 1381 struct net *net = sock_net(skb->sk); 1382 struct sk_buff *r_skb; 1383 u32 *flags = nlmsg_data(nlh); 1384 u32 sportid = NETLINK_CB(skb).portid; 1385 u32 seq = nlh->nlmsg_seq; 1386 int err; 1387 1388 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); 1389 if (r_skb == NULL) 1390 return -ENOMEM; 1391 1392 err = build_spdinfo(r_skb, net, sportid, seq, *flags); 1393 BUG_ON(err < 0); 1394 1395 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); 1396 } 1397 1398 static inline unsigned int xfrm_sadinfo_msgsize(void) 1399 { 1400 return NLMSG_ALIGN(4) 1401 + nla_total_size(sizeof(struct xfrmu_sadhinfo)) 1402 + nla_total_size(4); /* XFRMA_SAD_CNT */ 1403 } 1404 1405 static int build_sadinfo(struct sk_buff *skb, struct net *net, 1406 u32 portid, u32 seq, u32 flags) 1407 { 1408 struct xfrmk_sadinfo si; 1409 struct xfrmu_sadhinfo sh; 1410 struct nlmsghdr *nlh; 1411 int err; 1412 u32 *f; 1413 1414 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 1415 if (nlh == NULL) /* shouldn't really happen ... */ 1416 return -EMSGSIZE; 1417 1418 f = nlmsg_data(nlh); 1419 *f = flags; 1420 xfrm_sad_getinfo(net, &si); 1421 1422 sh.sadhmcnt = si.sadhmcnt; 1423 sh.sadhcnt = si.sadhcnt; 1424 1425 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt); 1426 if (!err) 1427 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); 1428 if (err) { 1429 nlmsg_cancel(skb, nlh); 1430 return err; 1431 } 1432 1433 nlmsg_end(skb, nlh); 1434 return 0; 1435 } 1436 1437 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 1438 struct nlattr **attrs, 1439 struct netlink_ext_ack *extack) 1440 { 1441 struct net *net = sock_net(skb->sk); 1442 struct sk_buff *r_skb; 1443 u32 *flags = nlmsg_data(nlh); 1444 u32 sportid = NETLINK_CB(skb).portid; 1445 u32 seq = nlh->nlmsg_seq; 1446 int err; 1447 1448 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); 1449 if (r_skb == NULL) 1450 return -ENOMEM; 1451 1452 err = build_sadinfo(r_skb, net, sportid, seq, *flags); 1453 BUG_ON(err < 0); 1454 1455 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); 1456 } 1457 1458 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1459 struct nlattr **attrs, struct netlink_ext_ack *extack) 1460 { 1461 struct net *net = sock_net(skb->sk); 1462 struct xfrm_usersa_id *p = nlmsg_data(nlh); 1463 struct xfrm_state *x; 1464 struct sk_buff *resp_skb; 1465 int err = -ESRCH; 1466 1467 x = xfrm_user_state_lookup(net, p, attrs, &err); 1468 if (x == NULL) 1469 goto out_noput; 1470 1471 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1472 if (IS_ERR(resp_skb)) { 1473 err = PTR_ERR(resp_skb); 1474 } else { 1475 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); 1476 } 1477 xfrm_state_put(x); 1478 out_noput: 1479 return err; 1480 } 1481 1482 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, 1483 struct nlattr **attrs, 1484 struct netlink_ext_ack *extack) 1485 { 1486 struct net *net = sock_net(skb->sk); 1487 struct xfrm_state *x; 1488 struct xfrm_userspi_info *p; 1489 struct xfrm_translator *xtr; 1490 struct sk_buff *resp_skb; 1491 xfrm_address_t *daddr; 1492 int family; 1493 int err; 1494 u32 mark; 1495 struct xfrm_mark m; 1496 u32 if_id = 0; 1497 1498 p = nlmsg_data(nlh); 1499 err = verify_spi_info(p->info.id.proto, p->min, p->max); 1500 if (err) 1501 goto out_noput; 1502 1503 family = p->info.family; 1504 daddr = &p->info.id.daddr; 1505 1506 x = NULL; 1507 1508 mark = xfrm_mark_get(attrs, &m); 1509 1510 if (attrs[XFRMA_IF_ID]) 1511 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 1512 1513 if (p->info.seq) { 1514 x = xfrm_find_acq_byseq(net, mark, p->info.seq); 1515 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) { 1516 xfrm_state_put(x); 1517 x = NULL; 1518 } 1519 } 1520 1521 if (!x) 1522 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, 1523 if_id, p->info.id.proto, daddr, 1524 &p->info.saddr, 1, 1525 family); 1526 err = -ENOENT; 1527 if (x == NULL) 1528 goto out_noput; 1529 1530 err = xfrm_alloc_spi(x, p->min, p->max); 1531 if (err) 1532 goto out; 1533 1534 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1535 if (IS_ERR(resp_skb)) { 1536 err = PTR_ERR(resp_skb); 1537 goto out; 1538 } 1539 1540 xtr = xfrm_get_translator(); 1541 if (xtr) { 1542 err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); 1543 1544 xfrm_put_translator(xtr); 1545 if (err) { 1546 kfree_skb(resp_skb); 1547 goto out; 1548 } 1549 } 1550 1551 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); 1552 1553 out: 1554 xfrm_state_put(x); 1555 out_noput: 1556 return err; 1557 } 1558 1559 static int verify_policy_dir(u8 dir, struct netlink_ext_ack *extack) 1560 { 1561 switch (dir) { 1562 case XFRM_POLICY_IN: 1563 case XFRM_POLICY_OUT: 1564 case XFRM_POLICY_FWD: 1565 break; 1566 1567 default: 1568 NL_SET_ERR_MSG(extack, "Invalid policy direction"); 1569 return -EINVAL; 1570 } 1571 1572 return 0; 1573 } 1574 1575 static int verify_policy_type(u8 type, struct netlink_ext_ack *extack) 1576 { 1577 switch (type) { 1578 case XFRM_POLICY_TYPE_MAIN: 1579 #ifdef CONFIG_XFRM_SUB_POLICY 1580 case XFRM_POLICY_TYPE_SUB: 1581 #endif 1582 break; 1583 1584 default: 1585 NL_SET_ERR_MSG(extack, "Invalid policy type"); 1586 return -EINVAL; 1587 } 1588 1589 return 0; 1590 } 1591 1592 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p, 1593 struct netlink_ext_ack *extack) 1594 { 1595 int ret; 1596 1597 switch (p->share) { 1598 case XFRM_SHARE_ANY: 1599 case XFRM_SHARE_SESSION: 1600 case XFRM_SHARE_USER: 1601 case XFRM_SHARE_UNIQUE: 1602 break; 1603 1604 default: 1605 NL_SET_ERR_MSG(extack, "Invalid policy share"); 1606 return -EINVAL; 1607 } 1608 1609 switch (p->action) { 1610 case XFRM_POLICY_ALLOW: 1611 case XFRM_POLICY_BLOCK: 1612 break; 1613 1614 default: 1615 NL_SET_ERR_MSG(extack, "Invalid policy action"); 1616 return -EINVAL; 1617 } 1618 1619 switch (p->sel.family) { 1620 case AF_INET: 1621 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) { 1622 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)"); 1623 return -EINVAL; 1624 } 1625 1626 break; 1627 1628 case AF_INET6: 1629 #if IS_ENABLED(CONFIG_IPV6) 1630 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) { 1631 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)"); 1632 return -EINVAL; 1633 } 1634 1635 break; 1636 #else 1637 NL_SET_ERR_MSG(extack, "IPv6 support disabled"); 1638 return -EAFNOSUPPORT; 1639 #endif 1640 1641 default: 1642 NL_SET_ERR_MSG(extack, "Invalid selector family"); 1643 return -EINVAL; 1644 } 1645 1646 ret = verify_policy_dir(p->dir, extack); 1647 if (ret) 1648 return ret; 1649 if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) { 1650 NL_SET_ERR_MSG(extack, "Policy index doesn't match direction"); 1651 return -EINVAL; 1652 } 1653 1654 return 0; 1655 } 1656 1657 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) 1658 { 1659 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1660 struct xfrm_user_sec_ctx *uctx; 1661 1662 if (!rt) 1663 return 0; 1664 1665 uctx = nla_data(rt); 1666 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL); 1667 } 1668 1669 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 1670 int nr) 1671 { 1672 int i; 1673 1674 xp->xfrm_nr = nr; 1675 for (i = 0; i < nr; i++, ut++) { 1676 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 1677 1678 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); 1679 memcpy(&t->saddr, &ut->saddr, 1680 sizeof(xfrm_address_t)); 1681 t->reqid = ut->reqid; 1682 t->mode = ut->mode; 1683 t->share = ut->share; 1684 t->optional = ut->optional; 1685 t->aalgos = ut->aalgos; 1686 t->ealgos = ut->ealgos; 1687 t->calgos = ut->calgos; 1688 /* If all masks are ~0, then we allow all algorithms. */ 1689 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos); 1690 t->encap_family = ut->family; 1691 } 1692 } 1693 1694 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family, 1695 struct netlink_ext_ack *extack) 1696 { 1697 u16 prev_family; 1698 int i; 1699 1700 if (nr > XFRM_MAX_DEPTH) { 1701 NL_SET_ERR_MSG(extack, "Template count must be <= XFRM_MAX_DEPTH (" __stringify(XFRM_MAX_DEPTH) ")"); 1702 return -EINVAL; 1703 } 1704 1705 prev_family = family; 1706 1707 for (i = 0; i < nr; i++) { 1708 /* We never validated the ut->family value, so many 1709 * applications simply leave it at zero. The check was 1710 * never made and ut->family was ignored because all 1711 * templates could be assumed to have the same family as 1712 * the policy itself. Now that we will have ipv4-in-ipv6 1713 * and ipv6-in-ipv4 tunnels, this is no longer true. 1714 */ 1715 if (!ut[i].family) 1716 ut[i].family = family; 1717 1718 switch (ut[i].mode) { 1719 case XFRM_MODE_TUNNEL: 1720 case XFRM_MODE_BEET: 1721 break; 1722 default: 1723 if (ut[i].family != prev_family) { 1724 NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change"); 1725 return -EINVAL; 1726 } 1727 break; 1728 } 1729 if (ut[i].mode >= XFRM_MODE_MAX) { 1730 NL_SET_ERR_MSG(extack, "Mode in template must be < XFRM_MODE_MAX (" __stringify(XFRM_MODE_MAX) ")"); 1731 return -EINVAL; 1732 } 1733 1734 prev_family = ut[i].family; 1735 1736 switch (ut[i].family) { 1737 case AF_INET: 1738 break; 1739 #if IS_ENABLED(CONFIG_IPV6) 1740 case AF_INET6: 1741 break; 1742 #endif 1743 default: 1744 NL_SET_ERR_MSG(extack, "Invalid family in template"); 1745 return -EINVAL; 1746 } 1747 1748 if (!xfrm_id_proto_valid(ut[i].id.proto)) { 1749 NL_SET_ERR_MSG(extack, "Invalid XFRM protocol in template"); 1750 return -EINVAL; 1751 } 1752 } 1753 1754 return 0; 1755 } 1756 1757 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs, 1758 struct netlink_ext_ack *extack) 1759 { 1760 struct nlattr *rt = attrs[XFRMA_TMPL]; 1761 1762 if (!rt) { 1763 pol->xfrm_nr = 0; 1764 } else { 1765 struct xfrm_user_tmpl *utmpl = nla_data(rt); 1766 int nr = nla_len(rt) / sizeof(*utmpl); 1767 int err; 1768 1769 err = validate_tmpl(nr, utmpl, pol->family, extack); 1770 if (err) 1771 return err; 1772 1773 copy_templates(pol, utmpl, nr); 1774 } 1775 return 0; 1776 } 1777 1778 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs, 1779 struct netlink_ext_ack *extack) 1780 { 1781 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; 1782 struct xfrm_userpolicy_type *upt; 1783 u8 type = XFRM_POLICY_TYPE_MAIN; 1784 int err; 1785 1786 if (rt) { 1787 upt = nla_data(rt); 1788 type = upt->type; 1789 } 1790 1791 err = verify_policy_type(type, extack); 1792 if (err) 1793 return err; 1794 1795 *tp = type; 1796 return 0; 1797 } 1798 1799 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) 1800 { 1801 xp->priority = p->priority; 1802 xp->index = p->index; 1803 memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); 1804 memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); 1805 xp->action = p->action; 1806 xp->flags = p->flags; 1807 xp->family = p->sel.family; 1808 /* XXX xp->share = p->share; */ 1809 } 1810 1811 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) 1812 { 1813 memset(p, 0, sizeof(*p)); 1814 memcpy(&p->sel, &xp->selector, sizeof(p->sel)); 1815 memcpy(&p->lft, &xp->lft, sizeof(p->lft)); 1816 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); 1817 p->priority = xp->priority; 1818 p->index = xp->index; 1819 p->sel.family = xp->family; 1820 p->dir = dir; 1821 p->action = xp->action; 1822 p->flags = xp->flags; 1823 p->share = XFRM_SHARE_ANY; /* XXX xp->share */ 1824 } 1825 1826 static struct xfrm_policy *xfrm_policy_construct(struct net *net, 1827 struct xfrm_userpolicy_info *p, 1828 struct nlattr **attrs, 1829 int *errp, 1830 struct netlink_ext_ack *extack) 1831 { 1832 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL); 1833 int err; 1834 1835 if (!xp) { 1836 *errp = -ENOMEM; 1837 return NULL; 1838 } 1839 1840 copy_from_user_policy(xp, p); 1841 1842 err = copy_from_user_policy_type(&xp->type, attrs, extack); 1843 if (err) 1844 goto error; 1845 1846 if (!(err = copy_from_user_tmpl(xp, attrs, extack))) 1847 err = copy_from_user_sec_ctx(xp, attrs); 1848 if (err) 1849 goto error; 1850 1851 xfrm_mark_get(attrs, &xp->mark); 1852 1853 if (attrs[XFRMA_IF_ID]) 1854 xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 1855 1856 return xp; 1857 error: 1858 *errp = err; 1859 xp->walk.dead = 1; 1860 xfrm_policy_destroy(xp); 1861 return NULL; 1862 } 1863 1864 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1865 struct nlattr **attrs, 1866 struct netlink_ext_ack *extack) 1867 { 1868 struct net *net = sock_net(skb->sk); 1869 struct xfrm_userpolicy_info *p = nlmsg_data(nlh); 1870 struct xfrm_policy *xp; 1871 struct km_event c; 1872 int err; 1873 int excl; 1874 1875 err = verify_newpolicy_info(p, extack); 1876 if (err) 1877 return err; 1878 err = verify_sec_ctx_len(attrs, extack); 1879 if (err) 1880 return err; 1881 1882 xp = xfrm_policy_construct(net, p, attrs, &err, extack); 1883 if (!xp) 1884 return err; 1885 1886 /* shouldn't excl be based on nlh flags?? 1887 * Aha! this is anti-netlink really i.e more pfkey derived 1888 * in netlink excl is a flag and you wouldn't need 1889 * a type XFRM_MSG_UPDPOLICY - JHS */ 1890 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1891 err = xfrm_policy_insert(p->dir, xp, excl); 1892 xfrm_audit_policy_add(xp, err ? 0 : 1, true); 1893 1894 if (err) { 1895 security_xfrm_policy_free(xp->security); 1896 kfree(xp); 1897 return err; 1898 } 1899 1900 c.event = nlh->nlmsg_type; 1901 c.seq = nlh->nlmsg_seq; 1902 c.portid = nlh->nlmsg_pid; 1903 km_policy_notify(xp, p->dir, &c); 1904 1905 xfrm_pol_put(xp); 1906 1907 return 0; 1908 } 1909 1910 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) 1911 { 1912 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; 1913 int i; 1914 1915 if (xp->xfrm_nr == 0) 1916 return 0; 1917 1918 for (i = 0; i < xp->xfrm_nr; i++) { 1919 struct xfrm_user_tmpl *up = &vec[i]; 1920 struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; 1921 1922 memset(up, 0, sizeof(*up)); 1923 memcpy(&up->id, &kp->id, sizeof(up->id)); 1924 up->family = kp->encap_family; 1925 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); 1926 up->reqid = kp->reqid; 1927 up->mode = kp->mode; 1928 up->share = kp->share; 1929 up->optional = kp->optional; 1930 up->aalgos = kp->aalgos; 1931 up->ealgos = kp->ealgos; 1932 up->calgos = kp->calgos; 1933 } 1934 1935 return nla_put(skb, XFRMA_TMPL, 1936 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); 1937 } 1938 1939 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) 1940 { 1941 if (x->security) { 1942 return copy_sec_ctx(x->security, skb); 1943 } 1944 return 0; 1945 } 1946 1947 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 1948 { 1949 if (xp->security) 1950 return copy_sec_ctx(xp->security, skb); 1951 return 0; 1952 } 1953 static inline unsigned int userpolicy_type_attrsize(void) 1954 { 1955 #ifdef CONFIG_XFRM_SUB_POLICY 1956 return nla_total_size(sizeof(struct xfrm_userpolicy_type)); 1957 #else 1958 return 0; 1959 #endif 1960 } 1961 1962 #ifdef CONFIG_XFRM_SUB_POLICY 1963 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1964 { 1965 struct xfrm_userpolicy_type upt; 1966 1967 /* Sadly there are two holes in struct xfrm_userpolicy_type */ 1968 memset(&upt, 0, sizeof(upt)); 1969 upt.type = type; 1970 1971 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1972 } 1973 1974 #else 1975 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1976 { 1977 return 0; 1978 } 1979 #endif 1980 1981 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) 1982 { 1983 struct xfrm_dump_info *sp = ptr; 1984 struct xfrm_userpolicy_info *p; 1985 struct sk_buff *in_skb = sp->in_skb; 1986 struct sk_buff *skb = sp->out_skb; 1987 struct xfrm_translator *xtr; 1988 struct nlmsghdr *nlh; 1989 int err; 1990 1991 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, 1992 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1993 if (nlh == NULL) 1994 return -EMSGSIZE; 1995 1996 p = nlmsg_data(nlh); 1997 copy_to_user_policy(xp, p, dir); 1998 err = copy_to_user_tmpl(xp, skb); 1999 if (!err) 2000 err = copy_to_user_sec_ctx(xp, skb); 2001 if (!err) 2002 err = copy_to_user_policy_type(xp->type, skb); 2003 if (!err) 2004 err = xfrm_mark_put(skb, &xp->mark); 2005 if (!err) 2006 err = xfrm_if_id_put(skb, xp->if_id); 2007 if (err) { 2008 nlmsg_cancel(skb, nlh); 2009 return err; 2010 } 2011 nlmsg_end(skb, nlh); 2012 2013 xtr = xfrm_get_translator(); 2014 if (xtr) { 2015 err = xtr->alloc_compat(skb, nlh); 2016 2017 xfrm_put_translator(xtr); 2018 if (err) { 2019 nlmsg_cancel(skb, nlh); 2020 return err; 2021 } 2022 } 2023 2024 return 0; 2025 } 2026 2027 static int xfrm_dump_policy_done(struct netlink_callback *cb) 2028 { 2029 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 2030 struct net *net = sock_net(cb->skb->sk); 2031 2032 xfrm_policy_walk_done(walk, net); 2033 return 0; 2034 } 2035 2036 static int xfrm_dump_policy_start(struct netlink_callback *cb) 2037 { 2038 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 2039 2040 BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args)); 2041 2042 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); 2043 return 0; 2044 } 2045 2046 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 2047 { 2048 struct net *net = sock_net(skb->sk); 2049 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; 2050 struct xfrm_dump_info info; 2051 2052 info.in_skb = cb->skb; 2053 info.out_skb = skb; 2054 info.nlmsg_seq = cb->nlh->nlmsg_seq; 2055 info.nlmsg_flags = NLM_F_MULTI; 2056 2057 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 2058 2059 return skb->len; 2060 } 2061 2062 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, 2063 struct xfrm_policy *xp, 2064 int dir, u32 seq) 2065 { 2066 struct xfrm_dump_info info; 2067 struct sk_buff *skb; 2068 int err; 2069 2070 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2071 if (!skb) 2072 return ERR_PTR(-ENOMEM); 2073 2074 info.in_skb = in_skb; 2075 info.out_skb = skb; 2076 info.nlmsg_seq = seq; 2077 info.nlmsg_flags = 0; 2078 2079 err = dump_one_policy(xp, dir, 0, &info); 2080 if (err) { 2081 kfree_skb(skb); 2082 return ERR_PTR(err); 2083 } 2084 2085 return skb; 2086 } 2087 2088 static int xfrm_notify_userpolicy(struct net *net) 2089 { 2090 struct xfrm_userpolicy_default *up; 2091 int len = NLMSG_ALIGN(sizeof(*up)); 2092 struct nlmsghdr *nlh; 2093 struct sk_buff *skb; 2094 int err; 2095 2096 skb = nlmsg_new(len, GFP_ATOMIC); 2097 if (skb == NULL) 2098 return -ENOMEM; 2099 2100 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0); 2101 if (nlh == NULL) { 2102 kfree_skb(skb); 2103 return -EMSGSIZE; 2104 } 2105 2106 up = nlmsg_data(nlh); 2107 up->in = net->xfrm.policy_default[XFRM_POLICY_IN]; 2108 up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD]; 2109 up->out = net->xfrm.policy_default[XFRM_POLICY_OUT]; 2110 2111 nlmsg_end(skb, nlh); 2112 2113 rcu_read_lock(); 2114 err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 2115 rcu_read_unlock(); 2116 2117 return err; 2118 } 2119 2120 static bool xfrm_userpolicy_is_valid(__u8 policy) 2121 { 2122 return policy == XFRM_USERPOLICY_BLOCK || 2123 policy == XFRM_USERPOLICY_ACCEPT; 2124 } 2125 2126 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh, 2127 struct nlattr **attrs, struct netlink_ext_ack *extack) 2128 { 2129 struct net *net = sock_net(skb->sk); 2130 struct xfrm_userpolicy_default *up = nlmsg_data(nlh); 2131 2132 if (xfrm_userpolicy_is_valid(up->in)) 2133 net->xfrm.policy_default[XFRM_POLICY_IN] = up->in; 2134 2135 if (xfrm_userpolicy_is_valid(up->fwd)) 2136 net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd; 2137 2138 if (xfrm_userpolicy_is_valid(up->out)) 2139 net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out; 2140 2141 rt_genid_bump_all(net); 2142 2143 xfrm_notify_userpolicy(net); 2144 return 0; 2145 } 2146 2147 static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh, 2148 struct nlattr **attrs, struct netlink_ext_ack *extack) 2149 { 2150 struct sk_buff *r_skb; 2151 struct nlmsghdr *r_nlh; 2152 struct net *net = sock_net(skb->sk); 2153 struct xfrm_userpolicy_default *r_up; 2154 int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default)); 2155 u32 portid = NETLINK_CB(skb).portid; 2156 u32 seq = nlh->nlmsg_seq; 2157 2158 r_skb = nlmsg_new(len, GFP_ATOMIC); 2159 if (!r_skb) 2160 return -ENOMEM; 2161 2162 r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0); 2163 if (!r_nlh) { 2164 kfree_skb(r_skb); 2165 return -EMSGSIZE; 2166 } 2167 2168 r_up = nlmsg_data(r_nlh); 2169 r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN]; 2170 r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD]; 2171 r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT]; 2172 nlmsg_end(r_skb, r_nlh); 2173 2174 return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid); 2175 } 2176 2177 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 2178 struct nlattr **attrs, 2179 struct netlink_ext_ack *extack) 2180 { 2181 struct net *net = sock_net(skb->sk); 2182 struct xfrm_policy *xp; 2183 struct xfrm_userpolicy_id *p; 2184 u8 type = XFRM_POLICY_TYPE_MAIN; 2185 int err; 2186 struct km_event c; 2187 int delete; 2188 struct xfrm_mark m; 2189 u32 if_id = 0; 2190 2191 p = nlmsg_data(nlh); 2192 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 2193 2194 err = copy_from_user_policy_type(&type, attrs, extack); 2195 if (err) 2196 return err; 2197 2198 err = verify_policy_dir(p->dir, extack); 2199 if (err) 2200 return err; 2201 2202 if (attrs[XFRMA_IF_ID]) 2203 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2204 2205 xfrm_mark_get(attrs, &m); 2206 2207 if (p->index) 2208 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, 2209 p->index, delete, &err); 2210 else { 2211 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 2212 struct xfrm_sec_ctx *ctx; 2213 2214 err = verify_sec_ctx_len(attrs, extack); 2215 if (err) 2216 return err; 2217 2218 ctx = NULL; 2219 if (rt) { 2220 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 2221 2222 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); 2223 if (err) 2224 return err; 2225 } 2226 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, 2227 &p->sel, ctx, delete, &err); 2228 security_xfrm_policy_free(ctx); 2229 } 2230 if (xp == NULL) 2231 return -ENOENT; 2232 2233 if (!delete) { 2234 struct sk_buff *resp_skb; 2235 2236 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); 2237 if (IS_ERR(resp_skb)) { 2238 err = PTR_ERR(resp_skb); 2239 } else { 2240 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, 2241 NETLINK_CB(skb).portid); 2242 } 2243 } else { 2244 xfrm_audit_policy_delete(xp, err ? 0 : 1, true); 2245 2246 if (err != 0) 2247 goto out; 2248 2249 c.data.byid = p->index; 2250 c.event = nlh->nlmsg_type; 2251 c.seq = nlh->nlmsg_seq; 2252 c.portid = nlh->nlmsg_pid; 2253 km_policy_notify(xp, p->dir, &c); 2254 } 2255 2256 out: 2257 xfrm_pol_put(xp); 2258 return err; 2259 } 2260 2261 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 2262 struct nlattr **attrs, 2263 struct netlink_ext_ack *extack) 2264 { 2265 struct net *net = sock_net(skb->sk); 2266 struct km_event c; 2267 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 2268 int err; 2269 2270 err = xfrm_state_flush(net, p->proto, true, false); 2271 if (err) { 2272 if (err == -ESRCH) /* empty table */ 2273 return 0; 2274 return err; 2275 } 2276 c.data.proto = p->proto; 2277 c.event = nlh->nlmsg_type; 2278 c.seq = nlh->nlmsg_seq; 2279 c.portid = nlh->nlmsg_pid; 2280 c.net = net; 2281 km_state_notify(NULL, &c); 2282 2283 return 0; 2284 } 2285 2286 static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x) 2287 { 2288 unsigned int replay_size = x->replay_esn ? 2289 xfrm_replay_state_esn_len(x->replay_esn) : 2290 sizeof(struct xfrm_replay_state); 2291 2292 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 2293 + nla_total_size(replay_size) 2294 + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur)) 2295 + nla_total_size(sizeof(struct xfrm_mark)) 2296 + nla_total_size(4) /* XFRM_AE_RTHR */ 2297 + nla_total_size(4); /* XFRM_AE_ETHR */ 2298 } 2299 2300 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 2301 { 2302 struct xfrm_aevent_id *id; 2303 struct nlmsghdr *nlh; 2304 int err; 2305 2306 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 2307 if (nlh == NULL) 2308 return -EMSGSIZE; 2309 2310 id = nlmsg_data(nlh); 2311 memset(&id->sa_id, 0, sizeof(id->sa_id)); 2312 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); 2313 id->sa_id.spi = x->id.spi; 2314 id->sa_id.family = x->props.family; 2315 id->sa_id.proto = x->id.proto; 2316 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr)); 2317 id->reqid = x->props.reqid; 2318 id->flags = c->data.aevent; 2319 2320 if (x->replay_esn) { 2321 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL, 2322 xfrm_replay_state_esn_len(x->replay_esn), 2323 x->replay_esn); 2324 } else { 2325 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 2326 &x->replay); 2327 } 2328 if (err) 2329 goto out_cancel; 2330 err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft, 2331 XFRMA_PAD); 2332 if (err) 2333 goto out_cancel; 2334 2335 if (id->flags & XFRM_AE_RTHR) { 2336 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); 2337 if (err) 2338 goto out_cancel; 2339 } 2340 if (id->flags & XFRM_AE_ETHR) { 2341 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH, 2342 x->replay_maxage * 10 / HZ); 2343 if (err) 2344 goto out_cancel; 2345 } 2346 err = xfrm_mark_put(skb, &x->mark); 2347 if (err) 2348 goto out_cancel; 2349 2350 err = xfrm_if_id_put(skb, x->if_id); 2351 if (err) 2352 goto out_cancel; 2353 2354 nlmsg_end(skb, nlh); 2355 return 0; 2356 2357 out_cancel: 2358 nlmsg_cancel(skb, nlh); 2359 return err; 2360 } 2361 2362 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 2363 struct nlattr **attrs, struct netlink_ext_ack *extack) 2364 { 2365 struct net *net = sock_net(skb->sk); 2366 struct xfrm_state *x; 2367 struct sk_buff *r_skb; 2368 int err; 2369 struct km_event c; 2370 u32 mark; 2371 struct xfrm_mark m; 2372 struct xfrm_aevent_id *p = nlmsg_data(nlh); 2373 struct xfrm_usersa_id *id = &p->sa_id; 2374 2375 mark = xfrm_mark_get(attrs, &m); 2376 2377 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family); 2378 if (x == NULL) 2379 return -ESRCH; 2380 2381 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 2382 if (r_skb == NULL) { 2383 xfrm_state_put(x); 2384 return -ENOMEM; 2385 } 2386 2387 /* 2388 * XXX: is this lock really needed - none of the other 2389 * gets lock (the concern is things getting updated 2390 * while we are still reading) - jhs 2391 */ 2392 spin_lock_bh(&x->lock); 2393 c.data.aevent = p->flags; 2394 c.seq = nlh->nlmsg_seq; 2395 c.portid = nlh->nlmsg_pid; 2396 2397 err = build_aevent(r_skb, x, &c); 2398 BUG_ON(err < 0); 2399 2400 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid); 2401 spin_unlock_bh(&x->lock); 2402 xfrm_state_put(x); 2403 return err; 2404 } 2405 2406 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 2407 struct nlattr **attrs, struct netlink_ext_ack *extack) 2408 { 2409 struct net *net = sock_net(skb->sk); 2410 struct xfrm_state *x; 2411 struct km_event c; 2412 int err = -EINVAL; 2413 u32 mark = 0; 2414 struct xfrm_mark m; 2415 struct xfrm_aevent_id *p = nlmsg_data(nlh); 2416 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 2417 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 2418 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 2419 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 2420 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 2421 2422 if (!lt && !rp && !re && !et && !rt) 2423 return err; 2424 2425 /* pedantic mode - thou shalt sayeth replaceth */ 2426 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 2427 return err; 2428 2429 mark = xfrm_mark_get(attrs, &m); 2430 2431 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 2432 if (x == NULL) 2433 return -ESRCH; 2434 2435 if (x->km.state != XFRM_STATE_VALID) 2436 goto out; 2437 2438 err = xfrm_replay_verify_len(x->replay_esn, re); 2439 if (err) 2440 goto out; 2441 2442 spin_lock_bh(&x->lock); 2443 xfrm_update_ae_params(x, attrs, 1); 2444 spin_unlock_bh(&x->lock); 2445 2446 c.event = nlh->nlmsg_type; 2447 c.seq = nlh->nlmsg_seq; 2448 c.portid = nlh->nlmsg_pid; 2449 c.data.aevent = XFRM_AE_CU; 2450 km_state_notify(x, &c); 2451 err = 0; 2452 out: 2453 xfrm_state_put(x); 2454 return err; 2455 } 2456 2457 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 2458 struct nlattr **attrs, 2459 struct netlink_ext_ack *extack) 2460 { 2461 struct net *net = sock_net(skb->sk); 2462 struct km_event c; 2463 u8 type = XFRM_POLICY_TYPE_MAIN; 2464 int err; 2465 2466 err = copy_from_user_policy_type(&type, attrs, extack); 2467 if (err) 2468 return err; 2469 2470 err = xfrm_policy_flush(net, type, true); 2471 if (err) { 2472 if (err == -ESRCH) /* empty table */ 2473 return 0; 2474 return err; 2475 } 2476 2477 c.data.type = type; 2478 c.event = nlh->nlmsg_type; 2479 c.seq = nlh->nlmsg_seq; 2480 c.portid = nlh->nlmsg_pid; 2481 c.net = net; 2482 km_policy_notify(NULL, 0, &c); 2483 return 0; 2484 } 2485 2486 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 2487 struct nlattr **attrs, 2488 struct netlink_ext_ack *extack) 2489 { 2490 struct net *net = sock_net(skb->sk); 2491 struct xfrm_policy *xp; 2492 struct xfrm_user_polexpire *up = nlmsg_data(nlh); 2493 struct xfrm_userpolicy_info *p = &up->pol; 2494 u8 type = XFRM_POLICY_TYPE_MAIN; 2495 int err = -ENOENT; 2496 struct xfrm_mark m; 2497 u32 if_id = 0; 2498 2499 err = copy_from_user_policy_type(&type, attrs, extack); 2500 if (err) 2501 return err; 2502 2503 err = verify_policy_dir(p->dir, extack); 2504 if (err) 2505 return err; 2506 2507 if (attrs[XFRMA_IF_ID]) 2508 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2509 2510 xfrm_mark_get(attrs, &m); 2511 2512 if (p->index) 2513 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, 2514 0, &err); 2515 else { 2516 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 2517 struct xfrm_sec_ctx *ctx; 2518 2519 err = verify_sec_ctx_len(attrs, extack); 2520 if (err) 2521 return err; 2522 2523 ctx = NULL; 2524 if (rt) { 2525 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 2526 2527 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); 2528 if (err) 2529 return err; 2530 } 2531 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, 2532 &p->sel, ctx, 0, &err); 2533 security_xfrm_policy_free(ctx); 2534 } 2535 if (xp == NULL) 2536 return -ENOENT; 2537 2538 if (unlikely(xp->walk.dead)) 2539 goto out; 2540 2541 err = 0; 2542 if (up->hard) { 2543 xfrm_policy_delete(xp, p->dir); 2544 xfrm_audit_policy_delete(xp, 1, true); 2545 } 2546 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid); 2547 2548 out: 2549 xfrm_pol_put(xp); 2550 return err; 2551 } 2552 2553 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 2554 struct nlattr **attrs, 2555 struct netlink_ext_ack *extack) 2556 { 2557 struct net *net = sock_net(skb->sk); 2558 struct xfrm_state *x; 2559 int err; 2560 struct xfrm_user_expire *ue = nlmsg_data(nlh); 2561 struct xfrm_usersa_info *p = &ue->state; 2562 struct xfrm_mark m; 2563 u32 mark = xfrm_mark_get(attrs, &m); 2564 2565 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); 2566 2567 err = -ENOENT; 2568 if (x == NULL) 2569 return err; 2570 2571 spin_lock_bh(&x->lock); 2572 err = -EINVAL; 2573 if (x->km.state != XFRM_STATE_VALID) 2574 goto out; 2575 km_state_expired(x, ue->hard, nlh->nlmsg_pid); 2576 2577 if (ue->hard) { 2578 __xfrm_state_delete(x); 2579 xfrm_audit_state_delete(x, 1, true); 2580 } 2581 err = 0; 2582 out: 2583 spin_unlock_bh(&x->lock); 2584 xfrm_state_put(x); 2585 return err; 2586 } 2587 2588 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, 2589 struct nlattr **attrs, 2590 struct netlink_ext_ack *extack) 2591 { 2592 struct net *net = sock_net(skb->sk); 2593 struct xfrm_policy *xp; 2594 struct xfrm_user_tmpl *ut; 2595 int i; 2596 struct nlattr *rt = attrs[XFRMA_TMPL]; 2597 struct xfrm_mark mark; 2598 2599 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 2600 struct xfrm_state *x = xfrm_state_alloc(net); 2601 int err = -ENOMEM; 2602 2603 if (!x) 2604 goto nomem; 2605 2606 xfrm_mark_get(attrs, &mark); 2607 2608 err = verify_newpolicy_info(&ua->policy, extack); 2609 if (err) 2610 goto free_state; 2611 err = verify_sec_ctx_len(attrs, extack); 2612 if (err) 2613 goto free_state; 2614 2615 /* build an XP */ 2616 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err, extack); 2617 if (!xp) 2618 goto free_state; 2619 2620 memcpy(&x->id, &ua->id, sizeof(ua->id)); 2621 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 2622 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 2623 xp->mark.m = x->mark.m = mark.m; 2624 xp->mark.v = x->mark.v = mark.v; 2625 ut = nla_data(rt); 2626 /* extract the templates and for each call km_key */ 2627 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 2628 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 2629 memcpy(&x->id, &t->id, sizeof(x->id)); 2630 x->props.mode = t->mode; 2631 x->props.reqid = t->reqid; 2632 x->props.family = ut->family; 2633 t->aalgos = ua->aalgos; 2634 t->ealgos = ua->ealgos; 2635 t->calgos = ua->calgos; 2636 err = km_query(x, t, xp); 2637 2638 } 2639 2640 xfrm_state_free(x); 2641 kfree(xp); 2642 2643 return 0; 2644 2645 free_state: 2646 xfrm_state_free(x); 2647 nomem: 2648 return err; 2649 } 2650 2651 #ifdef CONFIG_XFRM_MIGRATE 2652 static int copy_from_user_migrate(struct xfrm_migrate *ma, 2653 struct xfrm_kmaddress *k, 2654 struct nlattr **attrs, int *num) 2655 { 2656 struct nlattr *rt = attrs[XFRMA_MIGRATE]; 2657 struct xfrm_user_migrate *um; 2658 int i, num_migrate; 2659 2660 if (k != NULL) { 2661 struct xfrm_user_kmaddress *uk; 2662 2663 uk = nla_data(attrs[XFRMA_KMADDRESS]); 2664 memcpy(&k->local, &uk->local, sizeof(k->local)); 2665 memcpy(&k->remote, &uk->remote, sizeof(k->remote)); 2666 k->family = uk->family; 2667 k->reserved = uk->reserved; 2668 } 2669 2670 um = nla_data(rt); 2671 num_migrate = nla_len(rt) / sizeof(*um); 2672 2673 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) 2674 return -EINVAL; 2675 2676 for (i = 0; i < num_migrate; i++, um++, ma++) { 2677 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); 2678 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); 2679 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); 2680 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); 2681 2682 ma->proto = um->proto; 2683 ma->mode = um->mode; 2684 ma->reqid = um->reqid; 2685 2686 ma->old_family = um->old_family; 2687 ma->new_family = um->new_family; 2688 } 2689 2690 *num = i; 2691 return 0; 2692 } 2693 2694 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2695 struct nlattr **attrs, struct netlink_ext_ack *extack) 2696 { 2697 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); 2698 struct xfrm_migrate m[XFRM_MAX_DEPTH]; 2699 struct xfrm_kmaddress km, *kmp; 2700 u8 type; 2701 int err; 2702 int n = 0; 2703 struct net *net = sock_net(skb->sk); 2704 struct xfrm_encap_tmpl *encap = NULL; 2705 u32 if_id = 0; 2706 2707 if (attrs[XFRMA_MIGRATE] == NULL) 2708 return -EINVAL; 2709 2710 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; 2711 2712 err = copy_from_user_policy_type(&type, attrs, extack); 2713 if (err) 2714 return err; 2715 2716 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n); 2717 if (err) 2718 return err; 2719 2720 if (!n) 2721 return 0; 2722 2723 if (attrs[XFRMA_ENCAP]) { 2724 encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 2725 sizeof(*encap), GFP_KERNEL); 2726 if (!encap) 2727 return -ENOMEM; 2728 } 2729 2730 if (attrs[XFRMA_IF_ID]) 2731 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 2732 2733 err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id); 2734 2735 kfree(encap); 2736 2737 return err; 2738 } 2739 #else 2740 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2741 struct nlattr **attrs, struct netlink_ext_ack *extack) 2742 { 2743 return -ENOPROTOOPT; 2744 } 2745 #endif 2746 2747 #ifdef CONFIG_XFRM_MIGRATE 2748 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb) 2749 { 2750 struct xfrm_user_migrate um; 2751 2752 memset(&um, 0, sizeof(um)); 2753 um.proto = m->proto; 2754 um.mode = m->mode; 2755 um.reqid = m->reqid; 2756 um.old_family = m->old_family; 2757 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); 2758 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); 2759 um.new_family = m->new_family; 2760 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); 2761 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); 2762 2763 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); 2764 } 2765 2766 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb) 2767 { 2768 struct xfrm_user_kmaddress uk; 2769 2770 memset(&uk, 0, sizeof(uk)); 2771 uk.family = k->family; 2772 uk.reserved = k->reserved; 2773 memcpy(&uk.local, &k->local, sizeof(uk.local)); 2774 memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); 2775 2776 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); 2777 } 2778 2779 static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma, 2780 int with_encp) 2781 { 2782 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) 2783 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0) 2784 + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0) 2785 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) 2786 + userpolicy_type_attrsize(); 2787 } 2788 2789 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, 2790 int num_migrate, const struct xfrm_kmaddress *k, 2791 const struct xfrm_selector *sel, 2792 const struct xfrm_encap_tmpl *encap, u8 dir, u8 type) 2793 { 2794 const struct xfrm_migrate *mp; 2795 struct xfrm_userpolicy_id *pol_id; 2796 struct nlmsghdr *nlh; 2797 int i, err; 2798 2799 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 2800 if (nlh == NULL) 2801 return -EMSGSIZE; 2802 2803 pol_id = nlmsg_data(nlh); 2804 /* copy data from selector, dir, and type to the pol_id */ 2805 memset(pol_id, 0, sizeof(*pol_id)); 2806 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 2807 pol_id->dir = dir; 2808 2809 if (k != NULL) { 2810 err = copy_to_user_kmaddress(k, skb); 2811 if (err) 2812 goto out_cancel; 2813 } 2814 if (encap) { 2815 err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap); 2816 if (err) 2817 goto out_cancel; 2818 } 2819 err = copy_to_user_policy_type(type, skb); 2820 if (err) 2821 goto out_cancel; 2822 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 2823 err = copy_to_user_migrate(mp, skb); 2824 if (err) 2825 goto out_cancel; 2826 } 2827 2828 nlmsg_end(skb, nlh); 2829 return 0; 2830 2831 out_cancel: 2832 nlmsg_cancel(skb, nlh); 2833 return err; 2834 } 2835 2836 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2837 const struct xfrm_migrate *m, int num_migrate, 2838 const struct xfrm_kmaddress *k, 2839 const struct xfrm_encap_tmpl *encap) 2840 { 2841 struct net *net = &init_net; 2842 struct sk_buff *skb; 2843 int err; 2844 2845 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap), 2846 GFP_ATOMIC); 2847 if (skb == NULL) 2848 return -ENOMEM; 2849 2850 /* build migrate */ 2851 err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type); 2852 BUG_ON(err < 0); 2853 2854 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE); 2855 } 2856 #else 2857 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2858 const struct xfrm_migrate *m, int num_migrate, 2859 const struct xfrm_kmaddress *k, 2860 const struct xfrm_encap_tmpl *encap) 2861 { 2862 return -ENOPROTOOPT; 2863 } 2864 #endif 2865 2866 #define XMSGSIZE(type) sizeof(struct type) 2867 2868 const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 2869 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2870 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2871 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2872 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2873 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2874 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2875 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), 2876 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 2877 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 2878 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2879 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2880 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 2881 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 2882 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, 2883 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2884 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2885 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 2886 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2887 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), 2888 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 2889 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 2890 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), 2891 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), 2892 }; 2893 EXPORT_SYMBOL_GPL(xfrm_msg_min); 2894 2895 #undef XMSGSIZE 2896 2897 const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 2898 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, 2899 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, 2900 [XFRMA_LASTUSED] = { .type = NLA_U64}, 2901 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, 2902 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 2903 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 2904 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 2905 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, 2906 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, 2907 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, 2908 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, 2909 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, 2910 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, 2911 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, 2912 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, 2913 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, 2914 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, 2915 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 2916 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 2917 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 2918 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, 2919 [XFRMA_TFCPAD] = { .type = NLA_U32 }, 2920 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, 2921 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, 2922 [XFRMA_PROTO] = { .type = NLA_U8 }, 2923 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, 2924 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, 2925 [XFRMA_SET_MARK] = { .type = NLA_U32 }, 2926 [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, 2927 [XFRMA_IF_ID] = { .type = NLA_U32 }, 2928 }; 2929 EXPORT_SYMBOL_GPL(xfrma_policy); 2930 2931 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { 2932 [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, 2933 [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, 2934 }; 2935 2936 static const struct xfrm_link { 2937 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **, 2938 struct netlink_ext_ack *); 2939 int (*start)(struct netlink_callback *); 2940 int (*dump)(struct sk_buff *, struct netlink_callback *); 2941 int (*done)(struct netlink_callback *); 2942 const struct nla_policy *nla_pol; 2943 int nla_max; 2944 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 2945 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2946 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 2947 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 2948 .dump = xfrm_dump_sa, 2949 .done = xfrm_dump_sa_done }, 2950 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2951 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 2952 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 2953 .start = xfrm_dump_policy_start, 2954 .dump = xfrm_dump_policy, 2955 .done = xfrm_dump_policy_done }, 2956 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 2957 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 2958 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 2959 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2960 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2961 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, 2962 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, 2963 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 2964 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 2965 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 2966 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 2967 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 2968 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo, 2969 .nla_pol = xfrma_spd_policy, 2970 .nla_max = XFRMA_SPD_MAX }, 2971 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, 2972 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_set_default }, 2973 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default }, 2974 }; 2975 2976 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 2977 struct netlink_ext_ack *extack) 2978 { 2979 struct net *net = sock_net(skb->sk); 2980 struct nlattr *attrs[XFRMA_MAX+1]; 2981 const struct xfrm_link *link; 2982 struct nlmsghdr *nlh64 = NULL; 2983 int type, err; 2984 2985 type = nlh->nlmsg_type; 2986 if (type > XFRM_MSG_MAX) 2987 return -EINVAL; 2988 2989 type -= XFRM_MSG_BASE; 2990 link = &xfrm_dispatch[type]; 2991 2992 /* All operations require privileges, even GET */ 2993 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) 2994 return -EPERM; 2995 2996 if (in_compat_syscall()) { 2997 struct xfrm_translator *xtr = xfrm_get_translator(); 2998 2999 if (!xtr) 3000 return -EOPNOTSUPP; 3001 3002 nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max, 3003 link->nla_pol, extack); 3004 xfrm_put_translator(xtr); 3005 if (IS_ERR(nlh64)) 3006 return PTR_ERR(nlh64); 3007 if (nlh64) 3008 nlh = nlh64; 3009 } 3010 3011 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 3012 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 3013 (nlh->nlmsg_flags & NLM_F_DUMP)) { 3014 struct netlink_dump_control c = { 3015 .start = link->start, 3016 .dump = link->dump, 3017 .done = link->done, 3018 }; 3019 3020 if (link->dump == NULL) { 3021 err = -EINVAL; 3022 goto err; 3023 } 3024 3025 err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c); 3026 goto err; 3027 } 3028 3029 err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs, 3030 link->nla_max ? : XFRMA_MAX, 3031 link->nla_pol ? : xfrma_policy, extack); 3032 if (err < 0) 3033 goto err; 3034 3035 if (link->doit == NULL) { 3036 err = -EINVAL; 3037 goto err; 3038 } 3039 3040 err = link->doit(skb, nlh, attrs, extack); 3041 3042 /* We need to free skb allocated in xfrm_alloc_compat() before 3043 * returning from this function, because consume_skb() won't take 3044 * care of frag_list since netlink destructor sets 3045 * sbk->head to NULL. (see netlink_skb_destructor()) 3046 */ 3047 if (skb_has_frag_list(skb)) { 3048 kfree_skb(skb_shinfo(skb)->frag_list); 3049 skb_shinfo(skb)->frag_list = NULL; 3050 } 3051 3052 err: 3053 kvfree(nlh64); 3054 return err; 3055 } 3056 3057 static void xfrm_netlink_rcv(struct sk_buff *skb) 3058 { 3059 struct net *net = sock_net(skb->sk); 3060 3061 mutex_lock(&net->xfrm.xfrm_cfg_mutex); 3062 netlink_rcv_skb(skb, &xfrm_user_rcv_msg); 3063 mutex_unlock(&net->xfrm.xfrm_cfg_mutex); 3064 } 3065 3066 static inline unsigned int xfrm_expire_msgsize(void) 3067 { 3068 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) 3069 + nla_total_size(sizeof(struct xfrm_mark)); 3070 } 3071 3072 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 3073 { 3074 struct xfrm_user_expire *ue; 3075 struct nlmsghdr *nlh; 3076 int err; 3077 3078 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 3079 if (nlh == NULL) 3080 return -EMSGSIZE; 3081 3082 ue = nlmsg_data(nlh); 3083 copy_to_user_state(x, &ue->state); 3084 ue->hard = (c->data.hard != 0) ? 1 : 0; 3085 /* clear the padding bytes */ 3086 memset_after(ue, 0, hard); 3087 3088 err = xfrm_mark_put(skb, &x->mark); 3089 if (err) 3090 return err; 3091 3092 err = xfrm_if_id_put(skb, x->if_id); 3093 if (err) 3094 return err; 3095 3096 nlmsg_end(skb, nlh); 3097 return 0; 3098 } 3099 3100 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) 3101 { 3102 struct net *net = xs_net(x); 3103 struct sk_buff *skb; 3104 3105 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); 3106 if (skb == NULL) 3107 return -ENOMEM; 3108 3109 if (build_expire(skb, x, c) < 0) { 3110 kfree_skb(skb); 3111 return -EMSGSIZE; 3112 } 3113 3114 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); 3115 } 3116 3117 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) 3118 { 3119 struct net *net = xs_net(x); 3120 struct sk_buff *skb; 3121 int err; 3122 3123 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 3124 if (skb == NULL) 3125 return -ENOMEM; 3126 3127 err = build_aevent(skb, x, c); 3128 BUG_ON(err < 0); 3129 3130 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS); 3131 } 3132 3133 static int xfrm_notify_sa_flush(const struct km_event *c) 3134 { 3135 struct net *net = c->net; 3136 struct xfrm_usersa_flush *p; 3137 struct nlmsghdr *nlh; 3138 struct sk_buff *skb; 3139 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); 3140 3141 skb = nlmsg_new(len, GFP_ATOMIC); 3142 if (skb == NULL) 3143 return -ENOMEM; 3144 3145 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); 3146 if (nlh == NULL) { 3147 kfree_skb(skb); 3148 return -EMSGSIZE; 3149 } 3150 3151 p = nlmsg_data(nlh); 3152 p->proto = c->data.proto; 3153 3154 nlmsg_end(skb, nlh); 3155 3156 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA); 3157 } 3158 3159 static inline unsigned int xfrm_sa_len(struct xfrm_state *x) 3160 { 3161 unsigned int l = 0; 3162 if (x->aead) 3163 l += nla_total_size(aead_len(x->aead)); 3164 if (x->aalg) { 3165 l += nla_total_size(sizeof(struct xfrm_algo) + 3166 (x->aalg->alg_key_len + 7) / 8); 3167 l += nla_total_size(xfrm_alg_auth_len(x->aalg)); 3168 } 3169 if (x->ealg) 3170 l += nla_total_size(xfrm_alg_len(x->ealg)); 3171 if (x->calg) 3172 l += nla_total_size(sizeof(*x->calg)); 3173 if (x->encap) 3174 l += nla_total_size(sizeof(*x->encap)); 3175 if (x->tfcpad) 3176 l += nla_total_size(sizeof(x->tfcpad)); 3177 if (x->replay_esn) 3178 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); 3179 else 3180 l += nla_total_size(sizeof(struct xfrm_replay_state)); 3181 if (x->security) 3182 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + 3183 x->security->ctx_len); 3184 if (x->coaddr) 3185 l += nla_total_size(sizeof(*x->coaddr)); 3186 if (x->props.extra_flags) 3187 l += nla_total_size(sizeof(x->props.extra_flags)); 3188 if (x->xso.dev) 3189 l += nla_total_size(sizeof(struct xfrm_user_offload)); 3190 if (x->props.smark.v | x->props.smark.m) { 3191 l += nla_total_size(sizeof(x->props.smark.v)); 3192 l += nla_total_size(sizeof(x->props.smark.m)); 3193 } 3194 if (x->if_id) 3195 l += nla_total_size(sizeof(x->if_id)); 3196 3197 /* Must count x->lastused as it may become non-zero behind our back. */ 3198 l += nla_total_size_64bit(sizeof(u64)); 3199 3200 if (x->mapping_maxage) 3201 l += nla_total_size(sizeof(x->mapping_maxage)); 3202 3203 return l; 3204 } 3205 3206 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) 3207 { 3208 struct net *net = xs_net(x); 3209 struct xfrm_usersa_info *p; 3210 struct xfrm_usersa_id *id; 3211 struct nlmsghdr *nlh; 3212 struct sk_buff *skb; 3213 unsigned int len = xfrm_sa_len(x); 3214 unsigned int headlen; 3215 int err; 3216 3217 headlen = sizeof(*p); 3218 if (c->event == XFRM_MSG_DELSA) { 3219 len += nla_total_size(headlen); 3220 headlen = sizeof(*id); 3221 len += nla_total_size(sizeof(struct xfrm_mark)); 3222 } 3223 len += NLMSG_ALIGN(headlen); 3224 3225 skb = nlmsg_new(len, GFP_ATOMIC); 3226 if (skb == NULL) 3227 return -ENOMEM; 3228 3229 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 3230 err = -EMSGSIZE; 3231 if (nlh == NULL) 3232 goto out_free_skb; 3233 3234 p = nlmsg_data(nlh); 3235 if (c->event == XFRM_MSG_DELSA) { 3236 struct nlattr *attr; 3237 3238 id = nlmsg_data(nlh); 3239 memset(id, 0, sizeof(*id)); 3240 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 3241 id->spi = x->id.spi; 3242 id->family = x->props.family; 3243 id->proto = x->id.proto; 3244 3245 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 3246 err = -EMSGSIZE; 3247 if (attr == NULL) 3248 goto out_free_skb; 3249 3250 p = nla_data(attr); 3251 } 3252 err = copy_to_user_state_extra(x, p, skb); 3253 if (err) 3254 goto out_free_skb; 3255 3256 nlmsg_end(skb, nlh); 3257 3258 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA); 3259 3260 out_free_skb: 3261 kfree_skb(skb); 3262 return err; 3263 } 3264 3265 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) 3266 { 3267 3268 switch (c->event) { 3269 case XFRM_MSG_EXPIRE: 3270 return xfrm_exp_state_notify(x, c); 3271 case XFRM_MSG_NEWAE: 3272 return xfrm_aevent_state_notify(x, c); 3273 case XFRM_MSG_DELSA: 3274 case XFRM_MSG_UPDSA: 3275 case XFRM_MSG_NEWSA: 3276 return xfrm_notify_sa(x, c); 3277 case XFRM_MSG_FLUSHSA: 3278 return xfrm_notify_sa_flush(c); 3279 default: 3280 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", 3281 c->event); 3282 break; 3283 } 3284 3285 return 0; 3286 3287 } 3288 3289 static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x, 3290 struct xfrm_policy *xp) 3291 { 3292 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 3293 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 3294 + nla_total_size(sizeof(struct xfrm_mark)) 3295 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 3296 + userpolicy_type_attrsize(); 3297 } 3298 3299 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 3300 struct xfrm_tmpl *xt, struct xfrm_policy *xp) 3301 { 3302 __u32 seq = xfrm_get_acqseq(); 3303 struct xfrm_user_acquire *ua; 3304 struct nlmsghdr *nlh; 3305 int err; 3306 3307 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 3308 if (nlh == NULL) 3309 return -EMSGSIZE; 3310 3311 ua = nlmsg_data(nlh); 3312 memcpy(&ua->id, &x->id, sizeof(ua->id)); 3313 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 3314 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 3315 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT); 3316 ua->aalgos = xt->aalgos; 3317 ua->ealgos = xt->ealgos; 3318 ua->calgos = xt->calgos; 3319 ua->seq = x->km.seq = seq; 3320 3321 err = copy_to_user_tmpl(xp, skb); 3322 if (!err) 3323 err = copy_to_user_state_sec_ctx(x, skb); 3324 if (!err) 3325 err = copy_to_user_policy_type(xp->type, skb); 3326 if (!err) 3327 err = xfrm_mark_put(skb, &xp->mark); 3328 if (!err) 3329 err = xfrm_if_id_put(skb, xp->if_id); 3330 if (err) { 3331 nlmsg_cancel(skb, nlh); 3332 return err; 3333 } 3334 3335 nlmsg_end(skb, nlh); 3336 return 0; 3337 } 3338 3339 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 3340 struct xfrm_policy *xp) 3341 { 3342 struct net *net = xs_net(x); 3343 struct sk_buff *skb; 3344 int err; 3345 3346 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); 3347 if (skb == NULL) 3348 return -ENOMEM; 3349 3350 err = build_acquire(skb, x, xt, xp); 3351 BUG_ON(err < 0); 3352 3353 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE); 3354 } 3355 3356 /* User gives us xfrm_user_policy_info followed by an array of 0 3357 * or more templates. 3358 */ 3359 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, 3360 u8 *data, int len, int *dir) 3361 { 3362 struct net *net = sock_net(sk); 3363 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; 3364 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); 3365 struct xfrm_policy *xp; 3366 int nr; 3367 3368 switch (sk->sk_family) { 3369 case AF_INET: 3370 if (opt != IP_XFRM_POLICY) { 3371 *dir = -EOPNOTSUPP; 3372 return NULL; 3373 } 3374 break; 3375 #if IS_ENABLED(CONFIG_IPV6) 3376 case AF_INET6: 3377 if (opt != IPV6_XFRM_POLICY) { 3378 *dir = -EOPNOTSUPP; 3379 return NULL; 3380 } 3381 break; 3382 #endif 3383 default: 3384 *dir = -EINVAL; 3385 return NULL; 3386 } 3387 3388 *dir = -EINVAL; 3389 3390 if (len < sizeof(*p) || 3391 verify_newpolicy_info(p, NULL)) 3392 return NULL; 3393 3394 nr = ((len - sizeof(*p)) / sizeof(*ut)); 3395 if (validate_tmpl(nr, ut, p->sel.family, NULL)) 3396 return NULL; 3397 3398 if (p->dir > XFRM_POLICY_OUT) 3399 return NULL; 3400 3401 xp = xfrm_policy_alloc(net, GFP_ATOMIC); 3402 if (xp == NULL) { 3403 *dir = -ENOBUFS; 3404 return NULL; 3405 } 3406 3407 copy_from_user_policy(xp, p); 3408 xp->type = XFRM_POLICY_TYPE_MAIN; 3409 copy_templates(xp, ut, nr); 3410 3411 *dir = p->dir; 3412 3413 return xp; 3414 } 3415 3416 static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp) 3417 { 3418 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 3419 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 3420 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 3421 + nla_total_size(sizeof(struct xfrm_mark)) 3422 + userpolicy_type_attrsize(); 3423 } 3424 3425 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, 3426 int dir, const struct km_event *c) 3427 { 3428 struct xfrm_user_polexpire *upe; 3429 int hard = c->data.hard; 3430 struct nlmsghdr *nlh; 3431 int err; 3432 3433 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 3434 if (nlh == NULL) 3435 return -EMSGSIZE; 3436 3437 upe = nlmsg_data(nlh); 3438 copy_to_user_policy(xp, &upe->pol, dir); 3439 err = copy_to_user_tmpl(xp, skb); 3440 if (!err) 3441 err = copy_to_user_sec_ctx(xp, skb); 3442 if (!err) 3443 err = copy_to_user_policy_type(xp->type, skb); 3444 if (!err) 3445 err = xfrm_mark_put(skb, &xp->mark); 3446 if (!err) 3447 err = xfrm_if_id_put(skb, xp->if_id); 3448 if (err) { 3449 nlmsg_cancel(skb, nlh); 3450 return err; 3451 } 3452 upe->hard = !!hard; 3453 3454 nlmsg_end(skb, nlh); 3455 return 0; 3456 } 3457 3458 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 3459 { 3460 struct net *net = xp_net(xp); 3461 struct sk_buff *skb; 3462 int err; 3463 3464 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); 3465 if (skb == NULL) 3466 return -ENOMEM; 3467 3468 err = build_polexpire(skb, xp, dir, c); 3469 BUG_ON(err < 0); 3470 3471 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); 3472 } 3473 3474 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 3475 { 3476 unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 3477 struct net *net = xp_net(xp); 3478 struct xfrm_userpolicy_info *p; 3479 struct xfrm_userpolicy_id *id; 3480 struct nlmsghdr *nlh; 3481 struct sk_buff *skb; 3482 unsigned int headlen; 3483 int err; 3484 3485 headlen = sizeof(*p); 3486 if (c->event == XFRM_MSG_DELPOLICY) { 3487 len += nla_total_size(headlen); 3488 headlen = sizeof(*id); 3489 } 3490 len += userpolicy_type_attrsize(); 3491 len += nla_total_size(sizeof(struct xfrm_mark)); 3492 len += NLMSG_ALIGN(headlen); 3493 3494 skb = nlmsg_new(len, GFP_ATOMIC); 3495 if (skb == NULL) 3496 return -ENOMEM; 3497 3498 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); 3499 err = -EMSGSIZE; 3500 if (nlh == NULL) 3501 goto out_free_skb; 3502 3503 p = nlmsg_data(nlh); 3504 if (c->event == XFRM_MSG_DELPOLICY) { 3505 struct nlattr *attr; 3506 3507 id = nlmsg_data(nlh); 3508 memset(id, 0, sizeof(*id)); 3509 id->dir = dir; 3510 if (c->data.byid) 3511 id->index = xp->index; 3512 else 3513 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 3514 3515 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 3516 err = -EMSGSIZE; 3517 if (attr == NULL) 3518 goto out_free_skb; 3519 3520 p = nla_data(attr); 3521 } 3522 3523 copy_to_user_policy(xp, p, dir); 3524 err = copy_to_user_tmpl(xp, skb); 3525 if (!err) 3526 err = copy_to_user_policy_type(xp->type, skb); 3527 if (!err) 3528 err = xfrm_mark_put(skb, &xp->mark); 3529 if (!err) 3530 err = xfrm_if_id_put(skb, xp->if_id); 3531 if (err) 3532 goto out_free_skb; 3533 3534 nlmsg_end(skb, nlh); 3535 3536 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 3537 3538 out_free_skb: 3539 kfree_skb(skb); 3540 return err; 3541 } 3542 3543 static int xfrm_notify_policy_flush(const struct km_event *c) 3544 { 3545 struct net *net = c->net; 3546 struct nlmsghdr *nlh; 3547 struct sk_buff *skb; 3548 int err; 3549 3550 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); 3551 if (skb == NULL) 3552 return -ENOMEM; 3553 3554 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 3555 err = -EMSGSIZE; 3556 if (nlh == NULL) 3557 goto out_free_skb; 3558 err = copy_to_user_policy_type(c->data.type, skb); 3559 if (err) 3560 goto out_free_skb; 3561 3562 nlmsg_end(skb, nlh); 3563 3564 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 3565 3566 out_free_skb: 3567 kfree_skb(skb); 3568 return err; 3569 } 3570 3571 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 3572 { 3573 3574 switch (c->event) { 3575 case XFRM_MSG_NEWPOLICY: 3576 case XFRM_MSG_UPDPOLICY: 3577 case XFRM_MSG_DELPOLICY: 3578 return xfrm_notify_policy(xp, dir, c); 3579 case XFRM_MSG_FLUSHPOLICY: 3580 return xfrm_notify_policy_flush(c); 3581 case XFRM_MSG_POLEXPIRE: 3582 return xfrm_exp_policy_notify(xp, dir, c); 3583 default: 3584 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", 3585 c->event); 3586 } 3587 3588 return 0; 3589 3590 } 3591 3592 static inline unsigned int xfrm_report_msgsize(void) 3593 { 3594 return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); 3595 } 3596 3597 static int build_report(struct sk_buff *skb, u8 proto, 3598 struct xfrm_selector *sel, xfrm_address_t *addr) 3599 { 3600 struct xfrm_user_report *ur; 3601 struct nlmsghdr *nlh; 3602 3603 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); 3604 if (nlh == NULL) 3605 return -EMSGSIZE; 3606 3607 ur = nlmsg_data(nlh); 3608 ur->proto = proto; 3609 memcpy(&ur->sel, sel, sizeof(ur->sel)); 3610 3611 if (addr) { 3612 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr); 3613 if (err) { 3614 nlmsg_cancel(skb, nlh); 3615 return err; 3616 } 3617 } 3618 nlmsg_end(skb, nlh); 3619 return 0; 3620 } 3621 3622 static int xfrm_send_report(struct net *net, u8 proto, 3623 struct xfrm_selector *sel, xfrm_address_t *addr) 3624 { 3625 struct sk_buff *skb; 3626 int err; 3627 3628 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); 3629 if (skb == NULL) 3630 return -ENOMEM; 3631 3632 err = build_report(skb, proto, sel, addr); 3633 BUG_ON(err < 0); 3634 3635 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT); 3636 } 3637 3638 static inline unsigned int xfrm_mapping_msgsize(void) 3639 { 3640 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping)); 3641 } 3642 3643 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, 3644 xfrm_address_t *new_saddr, __be16 new_sport) 3645 { 3646 struct xfrm_user_mapping *um; 3647 struct nlmsghdr *nlh; 3648 3649 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0); 3650 if (nlh == NULL) 3651 return -EMSGSIZE; 3652 3653 um = nlmsg_data(nlh); 3654 3655 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); 3656 um->id.spi = x->id.spi; 3657 um->id.family = x->props.family; 3658 um->id.proto = x->id.proto; 3659 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr)); 3660 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr)); 3661 um->new_sport = new_sport; 3662 um->old_sport = x->encap->encap_sport; 3663 um->reqid = x->props.reqid; 3664 3665 nlmsg_end(skb, nlh); 3666 return 0; 3667 } 3668 3669 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, 3670 __be16 sport) 3671 { 3672 struct net *net = xs_net(x); 3673 struct sk_buff *skb; 3674 int err; 3675 3676 if (x->id.proto != IPPROTO_ESP) 3677 return -EINVAL; 3678 3679 if (!x->encap) 3680 return -EINVAL; 3681 3682 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC); 3683 if (skb == NULL) 3684 return -ENOMEM; 3685 3686 err = build_mapping(skb, x, ipaddr, sport); 3687 BUG_ON(err < 0); 3688 3689 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING); 3690 } 3691 3692 static bool xfrm_is_alive(const struct km_event *c) 3693 { 3694 return (bool)xfrm_acquire_is_on(c->net); 3695 } 3696 3697 static struct xfrm_mgr netlink_mgr = { 3698 .notify = xfrm_send_state_notify, 3699 .acquire = xfrm_send_acquire, 3700 .compile_policy = xfrm_compile_policy, 3701 .notify_policy = xfrm_send_policy_notify, 3702 .report = xfrm_send_report, 3703 .migrate = xfrm_send_migrate, 3704 .new_mapping = xfrm_send_mapping, 3705 .is_alive = xfrm_is_alive, 3706 }; 3707 3708 static int __net_init xfrm_user_net_init(struct net *net) 3709 { 3710 struct sock *nlsk; 3711 struct netlink_kernel_cfg cfg = { 3712 .groups = XFRMNLGRP_MAX, 3713 .input = xfrm_netlink_rcv, 3714 }; 3715 3716 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg); 3717 if (nlsk == NULL) 3718 return -ENOMEM; 3719 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 3720 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 3721 return 0; 3722 } 3723 3724 static void __net_exit xfrm_user_net_pre_exit(struct net *net) 3725 { 3726 RCU_INIT_POINTER(net->xfrm.nlsk, NULL); 3727 } 3728 3729 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) 3730 { 3731 struct net *net; 3732 3733 list_for_each_entry(net, net_exit_list, exit_list) 3734 netlink_kernel_release(net->xfrm.nlsk_stash); 3735 } 3736 3737 static struct pernet_operations xfrm_user_net_ops = { 3738 .init = xfrm_user_net_init, 3739 .pre_exit = xfrm_user_net_pre_exit, 3740 .exit_batch = xfrm_user_net_exit, 3741 }; 3742 3743 static int __init xfrm_user_init(void) 3744 { 3745 int rv; 3746 3747 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 3748 3749 rv = register_pernet_subsys(&xfrm_user_net_ops); 3750 if (rv < 0) 3751 return rv; 3752 xfrm_register_km(&netlink_mgr); 3753 return 0; 3754 } 3755 3756 static void __exit xfrm_user_exit(void) 3757 { 3758 xfrm_unregister_km(&netlink_mgr); 3759 unregister_pernet_subsys(&xfrm_user_net_ops); 3760 } 3761 3762 module_init(xfrm_user_init); 3763 module_exit(xfrm_user_exit); 3764 MODULE_LICENSE("GPL"); 3765 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); 3766