1 /* 2 * algif_aead: User-space interface for AEAD algorithms 3 * 4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> 5 * 6 * This file provides the user-space API for AEAD ciphers. 7 * 8 * This file is derived from algif_skcipher.c. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the Free 12 * Software Foundation; either version 2 of the License, or (at your option) 13 * any later version. 14 */ 15 16 #include <crypto/internal/aead.h> 17 #include <crypto/scatterwalk.h> 18 #include <crypto/if_alg.h> 19 #include <linux/init.h> 20 #include <linux/list.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/net.h> 25 #include <net/sock.h> 26 27 struct aead_sg_list { 28 unsigned int cur; 29 struct scatterlist sg[ALG_MAX_PAGES]; 30 }; 31 32 struct aead_async_rsgl { 33 struct af_alg_sgl sgl; 34 struct list_head list; 35 }; 36 37 struct aead_async_req { 38 struct scatterlist *tsgl; 39 struct aead_async_rsgl first_rsgl; 40 struct list_head list; 41 struct kiocb *iocb; 42 unsigned int tsgls; 43 char iv[]; 44 }; 45 46 struct aead_ctx { 47 struct aead_sg_list tsgl; 48 struct aead_async_rsgl first_rsgl; 49 struct list_head list; 50 51 void *iv; 52 53 struct af_alg_completion completion; 54 55 unsigned long used; 56 57 unsigned int len; 58 bool more; 59 bool merge; 60 bool enc; 61 62 size_t aead_assoclen; 63 struct aead_request aead_req; 64 }; 65 66 static inline int aead_sndbuf(struct sock *sk) 67 { 68 struct alg_sock *ask = alg_sk(sk); 69 struct aead_ctx *ctx = ask->private; 70 71 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 72 ctx->used, 0); 73 } 74 75 static inline bool aead_writable(struct sock *sk) 76 { 77 return PAGE_SIZE <= aead_sndbuf(sk); 78 } 79 80 static inline bool aead_sufficient_data(struct aead_ctx *ctx) 81 { 82 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 83 84 /* 85 * The minimum amount of memory needed for an AEAD cipher is 86 * the AAD and in case of decryption the tag. 87 */ 88 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); 89 } 90 91 static void aead_reset_ctx(struct aead_ctx *ctx) 92 { 93 struct aead_sg_list *sgl = &ctx->tsgl; 94 95 sg_init_table(sgl->sg, ALG_MAX_PAGES); 96 sgl->cur = 0; 97 ctx->used = 0; 98 ctx->more = 0; 99 ctx->merge = 0; 100 } 101 102 static void aead_put_sgl(struct sock *sk) 103 { 104 struct alg_sock *ask = alg_sk(sk); 105 struct aead_ctx *ctx = ask->private; 106 struct aead_sg_list *sgl = &ctx->tsgl; 107 struct scatterlist *sg = sgl->sg; 108 unsigned int i; 109 110 for (i = 0; i < sgl->cur; i++) { 111 if (!sg_page(sg + i)) 112 continue; 113 114 put_page(sg_page(sg + i)); 115 sg_assign_page(sg + i, NULL); 116 } 117 aead_reset_ctx(ctx); 118 } 119 120 static void aead_wmem_wakeup(struct sock *sk) 121 { 122 struct socket_wq *wq; 123 124 if (!aead_writable(sk)) 125 return; 126 127 rcu_read_lock(); 128 wq = rcu_dereference(sk->sk_wq); 129 if (skwq_has_sleeper(wq)) 130 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 131 POLLRDNORM | 132 POLLRDBAND); 133 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 134 rcu_read_unlock(); 135 } 136 137 static int aead_wait_for_data(struct sock *sk, unsigned flags) 138 { 139 DEFINE_WAIT_FUNC(wait, woken_wake_function); 140 struct alg_sock *ask = alg_sk(sk); 141 struct aead_ctx *ctx = ask->private; 142 long timeout; 143 int err = -ERESTARTSYS; 144 145 if (flags & MSG_DONTWAIT) 146 return -EAGAIN; 147 148 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 149 add_wait_queue(sk_sleep(sk), &wait); 150 for (;;) { 151 if (signal_pending(current)) 152 break; 153 timeout = MAX_SCHEDULE_TIMEOUT; 154 if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) { 155 err = 0; 156 break; 157 } 158 } 159 remove_wait_queue(sk_sleep(sk), &wait); 160 161 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 162 163 return err; 164 } 165 166 static void aead_data_wakeup(struct sock *sk) 167 { 168 struct alg_sock *ask = alg_sk(sk); 169 struct aead_ctx *ctx = ask->private; 170 struct socket_wq *wq; 171 172 if (ctx->more) 173 return; 174 if (!ctx->used) 175 return; 176 177 rcu_read_lock(); 178 wq = rcu_dereference(sk->sk_wq); 179 if (skwq_has_sleeper(wq)) 180 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 181 POLLRDNORM | 182 POLLRDBAND); 183 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 184 rcu_read_unlock(); 185 } 186 187 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 188 { 189 struct sock *sk = sock->sk; 190 struct alg_sock *ask = alg_sk(sk); 191 struct aead_ctx *ctx = ask->private; 192 unsigned ivsize = 193 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); 194 struct aead_sg_list *sgl = &ctx->tsgl; 195 struct af_alg_control con = {}; 196 long copied = 0; 197 bool enc = 0; 198 bool init = 0; 199 int err = -EINVAL; 200 201 if (msg->msg_controllen) { 202 err = af_alg_cmsg_send(msg, &con); 203 if (err) 204 return err; 205 206 init = 1; 207 switch (con.op) { 208 case ALG_OP_ENCRYPT: 209 enc = 1; 210 break; 211 case ALG_OP_DECRYPT: 212 enc = 0; 213 break; 214 default: 215 return -EINVAL; 216 } 217 218 if (con.iv && con.iv->ivlen != ivsize) 219 return -EINVAL; 220 } 221 222 lock_sock(sk); 223 if (!ctx->more && ctx->used) 224 goto unlock; 225 226 if (init) { 227 ctx->enc = enc; 228 if (con.iv) 229 memcpy(ctx->iv, con.iv->iv, ivsize); 230 231 ctx->aead_assoclen = con.aead_assoclen; 232 } 233 234 while (size) { 235 size_t len = size; 236 struct scatterlist *sg = NULL; 237 238 /* use the existing memory in an allocated page */ 239 if (ctx->merge) { 240 sg = sgl->sg + sgl->cur - 1; 241 len = min_t(unsigned long, len, 242 PAGE_SIZE - sg->offset - sg->length); 243 err = memcpy_from_msg(page_address(sg_page(sg)) + 244 sg->offset + sg->length, 245 msg, len); 246 if (err) 247 goto unlock; 248 249 sg->length += len; 250 ctx->merge = (sg->offset + sg->length) & 251 (PAGE_SIZE - 1); 252 253 ctx->used += len; 254 copied += len; 255 size -= len; 256 continue; 257 } 258 259 if (!aead_writable(sk)) { 260 /* user space sent too much data */ 261 aead_put_sgl(sk); 262 err = -EMSGSIZE; 263 goto unlock; 264 } 265 266 /* allocate a new page */ 267 len = min_t(unsigned long, size, aead_sndbuf(sk)); 268 while (len) { 269 size_t plen = 0; 270 271 if (sgl->cur >= ALG_MAX_PAGES) { 272 aead_put_sgl(sk); 273 err = -E2BIG; 274 goto unlock; 275 } 276 277 sg = sgl->sg + sgl->cur; 278 plen = min_t(size_t, len, PAGE_SIZE); 279 280 sg_assign_page(sg, alloc_page(GFP_KERNEL)); 281 err = -ENOMEM; 282 if (!sg_page(sg)) 283 goto unlock; 284 285 err = memcpy_from_msg(page_address(sg_page(sg)), 286 msg, plen); 287 if (err) { 288 __free_page(sg_page(sg)); 289 sg_assign_page(sg, NULL); 290 goto unlock; 291 } 292 293 sg->offset = 0; 294 sg->length = plen; 295 len -= plen; 296 ctx->used += plen; 297 copied += plen; 298 sgl->cur++; 299 size -= plen; 300 ctx->merge = plen & (PAGE_SIZE - 1); 301 } 302 } 303 304 err = 0; 305 306 ctx->more = msg->msg_flags & MSG_MORE; 307 if (!ctx->more && !aead_sufficient_data(ctx)) { 308 aead_put_sgl(sk); 309 err = -EMSGSIZE; 310 } 311 312 unlock: 313 aead_data_wakeup(sk); 314 release_sock(sk); 315 316 return err ?: copied; 317 } 318 319 static ssize_t aead_sendpage(struct socket *sock, struct page *page, 320 int offset, size_t size, int flags) 321 { 322 struct sock *sk = sock->sk; 323 struct alg_sock *ask = alg_sk(sk); 324 struct aead_ctx *ctx = ask->private; 325 struct aead_sg_list *sgl = &ctx->tsgl; 326 int err = -EINVAL; 327 328 if (flags & MSG_SENDPAGE_NOTLAST) 329 flags |= MSG_MORE; 330 331 if (sgl->cur >= ALG_MAX_PAGES) 332 return -E2BIG; 333 334 lock_sock(sk); 335 if (!ctx->more && ctx->used) 336 goto unlock; 337 338 if (!size) 339 goto done; 340 341 if (!aead_writable(sk)) { 342 /* user space sent too much data */ 343 aead_put_sgl(sk); 344 err = -EMSGSIZE; 345 goto unlock; 346 } 347 348 ctx->merge = 0; 349 350 get_page(page); 351 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 352 sgl->cur++; 353 ctx->used += size; 354 355 err = 0; 356 357 done: 358 ctx->more = flags & MSG_MORE; 359 if (!ctx->more && !aead_sufficient_data(ctx)) { 360 aead_put_sgl(sk); 361 err = -EMSGSIZE; 362 } 363 364 unlock: 365 aead_data_wakeup(sk); 366 release_sock(sk); 367 368 return err ?: size; 369 } 370 371 #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \ 372 ((char *)req + sizeof(struct aead_request) + \ 373 crypto_aead_reqsize(tfm)) 374 375 #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \ 376 crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \ 377 sizeof(struct aead_request) 378 379 static void aead_async_cb(struct crypto_async_request *_req, int err) 380 { 381 struct sock *sk = _req->data; 382 struct alg_sock *ask = alg_sk(sk); 383 struct aead_ctx *ctx = ask->private; 384 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); 385 struct aead_request *req = aead_request_cast(_req); 386 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); 387 struct scatterlist *sg = areq->tsgl; 388 struct aead_async_rsgl *rsgl; 389 struct kiocb *iocb = areq->iocb; 390 unsigned int i, reqlen = GET_REQ_SIZE(tfm); 391 392 list_for_each_entry(rsgl, &areq->list, list) { 393 af_alg_free_sg(&rsgl->sgl); 394 if (rsgl != &areq->first_rsgl) 395 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 396 } 397 398 for (i = 0; i < areq->tsgls; i++) 399 put_page(sg_page(sg + i)); 400 401 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); 402 sock_kfree_s(sk, req, reqlen); 403 __sock_put(sk); 404 iocb->ki_complete(iocb, err, err); 405 } 406 407 static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, 408 int flags) 409 { 410 struct sock *sk = sock->sk; 411 struct alg_sock *ask = alg_sk(sk); 412 struct aead_ctx *ctx = ask->private; 413 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); 414 struct aead_async_req *areq; 415 struct aead_request *req = NULL; 416 struct aead_sg_list *sgl = &ctx->tsgl; 417 struct aead_async_rsgl *last_rsgl = NULL, *rsgl; 418 unsigned int as = crypto_aead_authsize(tfm); 419 unsigned int i, reqlen = GET_REQ_SIZE(tfm); 420 int err = -ENOMEM; 421 unsigned long used; 422 size_t outlen = 0; 423 size_t usedpages = 0; 424 425 lock_sock(sk); 426 if (ctx->more) { 427 err = aead_wait_for_data(sk, flags); 428 if (err) 429 goto unlock; 430 } 431 432 if (!aead_sufficient_data(ctx)) 433 goto unlock; 434 435 used = ctx->used; 436 if (ctx->enc) 437 outlen = used + as; 438 else 439 outlen = used - as; 440 441 req = sock_kmalloc(sk, reqlen, GFP_KERNEL); 442 if (unlikely(!req)) 443 goto unlock; 444 445 areq = GET_ASYM_REQ(req, tfm); 446 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); 447 INIT_LIST_HEAD(&areq->list); 448 areq->iocb = msg->msg_iocb; 449 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); 450 aead_request_set_tfm(req, tfm); 451 aead_request_set_ad(req, ctx->aead_assoclen); 452 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 453 aead_async_cb, sk); 454 used -= ctx->aead_assoclen; 455 456 /* take over all tx sgls from ctx */ 457 areq->tsgl = sock_kmalloc(sk, 458 sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1), 459 GFP_KERNEL); 460 if (unlikely(!areq->tsgl)) 461 goto free; 462 463 sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1)); 464 for (i = 0; i < sgl->cur; i++) 465 sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), 466 sgl->sg[i].length, sgl->sg[i].offset); 467 468 areq->tsgls = sgl->cur; 469 470 /* create rx sgls */ 471 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { 472 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 473 (outlen - usedpages)); 474 475 if (list_empty(&areq->list)) { 476 rsgl = &areq->first_rsgl; 477 478 } else { 479 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 480 if (unlikely(!rsgl)) { 481 err = -ENOMEM; 482 goto free; 483 } 484 } 485 rsgl->sgl.npages = 0; 486 list_add_tail(&rsgl->list, &areq->list); 487 488 /* make one iovec available as scatterlist */ 489 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 490 if (err < 0) 491 goto free; 492 493 usedpages += err; 494 495 /* chain the new scatterlist with previous one */ 496 if (last_rsgl) 497 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 498 499 last_rsgl = rsgl; 500 501 iov_iter_advance(&msg->msg_iter, err); 502 } 503 504 /* ensure output buffer is sufficiently large */ 505 if (usedpages < outlen) { 506 err = -EINVAL; 507 goto unlock; 508 } 509 510 aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, 511 areq->iv); 512 err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); 513 if (err) { 514 if (err == -EINPROGRESS) { 515 sock_hold(sk); 516 err = -EIOCBQUEUED; 517 aead_reset_ctx(ctx); 518 goto unlock; 519 } else if (err == -EBADMSG) { 520 aead_put_sgl(sk); 521 } 522 goto free; 523 } 524 aead_put_sgl(sk); 525 526 free: 527 list_for_each_entry(rsgl, &areq->list, list) { 528 af_alg_free_sg(&rsgl->sgl); 529 if (rsgl != &areq->first_rsgl) 530 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 531 } 532 if (areq->tsgl) 533 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); 534 if (req) 535 sock_kfree_s(sk, req, reqlen); 536 unlock: 537 aead_wmem_wakeup(sk); 538 release_sock(sk); 539 return err ? err : outlen; 540 } 541 542 static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) 543 { 544 struct sock *sk = sock->sk; 545 struct alg_sock *ask = alg_sk(sk); 546 struct aead_ctx *ctx = ask->private; 547 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 548 struct aead_sg_list *sgl = &ctx->tsgl; 549 struct aead_async_rsgl *last_rsgl = NULL; 550 struct aead_async_rsgl *rsgl, *tmp; 551 int err = -EINVAL; 552 unsigned long used = 0; 553 size_t outlen = 0; 554 size_t usedpages = 0; 555 556 lock_sock(sk); 557 558 /* 559 * Please see documentation of aead_request_set_crypt for the 560 * description of the AEAD memory structure expected from the caller. 561 */ 562 563 if (ctx->more) { 564 err = aead_wait_for_data(sk, flags); 565 if (err) 566 goto unlock; 567 } 568 569 /* data length provided by caller via sendmsg/sendpage */ 570 used = ctx->used; 571 572 /* 573 * Make sure sufficient data is present -- note, the same check is 574 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg 575 * shall provide an information to the data sender that something is 576 * wrong, but they are irrelevant to maintain the kernel integrity. 577 * We need this check here too in case user space decides to not honor 578 * the error message in sendmsg/sendpage and still call recvmsg. This 579 * check here protects the kernel integrity. 580 */ 581 if (!aead_sufficient_data(ctx)) 582 goto unlock; 583 584 /* 585 * Calculate the minimum output buffer size holding the result of the 586 * cipher operation. When encrypting data, the receiving buffer is 587 * larger by the tag length compared to the input buffer as the 588 * encryption operation generates the tag. For decryption, the input 589 * buffer provides the tag which is consumed resulting in only the 590 * plaintext without a buffer for the tag returned to the caller. 591 */ 592 if (ctx->enc) 593 outlen = used + as; 594 else 595 outlen = used - as; 596 597 /* 598 * The cipher operation input data is reduced by the associated data 599 * length as this data is processed separately later on. 600 */ 601 used -= ctx->aead_assoclen; 602 603 /* convert iovecs of output buffers into scatterlists */ 604 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { 605 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 606 (outlen - usedpages)); 607 608 if (list_empty(&ctx->list)) { 609 rsgl = &ctx->first_rsgl; 610 } else { 611 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 612 if (unlikely(!rsgl)) { 613 err = -ENOMEM; 614 goto unlock; 615 } 616 } 617 rsgl->sgl.npages = 0; 618 list_add_tail(&rsgl->list, &ctx->list); 619 620 /* make one iovec available as scatterlist */ 621 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 622 if (err < 0) 623 goto unlock; 624 usedpages += err; 625 /* chain the new scatterlist with previous one */ 626 if (last_rsgl) 627 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 628 629 last_rsgl = rsgl; 630 631 iov_iter_advance(&msg->msg_iter, err); 632 } 633 634 /* ensure output buffer is sufficiently large */ 635 if (usedpages < outlen) { 636 err = -EINVAL; 637 goto unlock; 638 } 639 640 sg_mark_end(sgl->sg + sgl->cur - 1); 641 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, 642 used, ctx->iv); 643 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); 644 645 err = af_alg_wait_for_completion(ctx->enc ? 646 crypto_aead_encrypt(&ctx->aead_req) : 647 crypto_aead_decrypt(&ctx->aead_req), 648 &ctx->completion); 649 650 if (err) { 651 /* EBADMSG implies a valid cipher operation took place */ 652 if (err == -EBADMSG) 653 aead_put_sgl(sk); 654 655 goto unlock; 656 } 657 658 aead_put_sgl(sk); 659 err = 0; 660 661 unlock: 662 list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { 663 af_alg_free_sg(&rsgl->sgl); 664 if (rsgl != &ctx->first_rsgl) 665 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 666 list_del(&rsgl->list); 667 } 668 INIT_LIST_HEAD(&ctx->list); 669 aead_wmem_wakeup(sk); 670 release_sock(sk); 671 672 return err ? err : outlen; 673 } 674 675 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, 676 int flags) 677 { 678 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? 679 aead_recvmsg_async(sock, msg, flags) : 680 aead_recvmsg_sync(sock, msg, flags); 681 } 682 683 static unsigned int aead_poll(struct file *file, struct socket *sock, 684 poll_table *wait) 685 { 686 struct sock *sk = sock->sk; 687 struct alg_sock *ask = alg_sk(sk); 688 struct aead_ctx *ctx = ask->private; 689 unsigned int mask; 690 691 sock_poll_wait(file, sk_sleep(sk), wait); 692 mask = 0; 693 694 if (!ctx->more) 695 mask |= POLLIN | POLLRDNORM; 696 697 if (aead_writable(sk)) 698 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 699 700 return mask; 701 } 702 703 static struct proto_ops algif_aead_ops = { 704 .family = PF_ALG, 705 706 .connect = sock_no_connect, 707 .socketpair = sock_no_socketpair, 708 .getname = sock_no_getname, 709 .ioctl = sock_no_ioctl, 710 .listen = sock_no_listen, 711 .shutdown = sock_no_shutdown, 712 .getsockopt = sock_no_getsockopt, 713 .mmap = sock_no_mmap, 714 .bind = sock_no_bind, 715 .accept = sock_no_accept, 716 .setsockopt = sock_no_setsockopt, 717 718 .release = af_alg_release, 719 .sendmsg = aead_sendmsg, 720 .sendpage = aead_sendpage, 721 .recvmsg = aead_recvmsg, 722 .poll = aead_poll, 723 }; 724 725 static void *aead_bind(const char *name, u32 type, u32 mask) 726 { 727 return crypto_alloc_aead(name, type, mask); 728 } 729 730 static void aead_release(void *private) 731 { 732 crypto_free_aead(private); 733 } 734 735 static int aead_setauthsize(void *private, unsigned int authsize) 736 { 737 return crypto_aead_setauthsize(private, authsize); 738 } 739 740 static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 741 { 742 return crypto_aead_setkey(private, key, keylen); 743 } 744 745 static void aead_sock_destruct(struct sock *sk) 746 { 747 struct alg_sock *ask = alg_sk(sk); 748 struct aead_ctx *ctx = ask->private; 749 unsigned int ivlen = crypto_aead_ivsize( 750 crypto_aead_reqtfm(&ctx->aead_req)); 751 752 WARN_ON(atomic_read(&sk->sk_refcnt) != 0); 753 aead_put_sgl(sk); 754 sock_kzfree_s(sk, ctx->iv, ivlen); 755 sock_kfree_s(sk, ctx, ctx->len); 756 af_alg_release_parent(sk); 757 } 758 759 static int aead_accept_parent(void *private, struct sock *sk) 760 { 761 struct aead_ctx *ctx; 762 struct alg_sock *ask = alg_sk(sk); 763 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); 764 unsigned int ivlen = crypto_aead_ivsize(private); 765 766 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 767 if (!ctx) 768 return -ENOMEM; 769 memset(ctx, 0, len); 770 771 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); 772 if (!ctx->iv) { 773 sock_kfree_s(sk, ctx, len); 774 return -ENOMEM; 775 } 776 memset(ctx->iv, 0, ivlen); 777 778 ctx->len = len; 779 ctx->used = 0; 780 ctx->more = 0; 781 ctx->merge = 0; 782 ctx->enc = 0; 783 ctx->tsgl.cur = 0; 784 ctx->aead_assoclen = 0; 785 af_alg_init_completion(&ctx->completion); 786 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); 787 INIT_LIST_HEAD(&ctx->list); 788 789 ask->private = ctx; 790 791 aead_request_set_tfm(&ctx->aead_req, private); 792 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 793 af_alg_complete, &ctx->completion); 794 795 sk->sk_destruct = aead_sock_destruct; 796 797 return 0; 798 } 799 800 static const struct af_alg_type algif_type_aead = { 801 .bind = aead_bind, 802 .release = aead_release, 803 .setkey = aead_setkey, 804 .setauthsize = aead_setauthsize, 805 .accept = aead_accept_parent, 806 .ops = &algif_aead_ops, 807 .name = "aead", 808 .owner = THIS_MODULE 809 }; 810 811 static int __init algif_aead_init(void) 812 { 813 return af_alg_register_type(&algif_type_aead); 814 } 815 816 static void __exit algif_aead_exit(void) 817 { 818 int err = af_alg_unregister_type(&algif_type_aead); 819 BUG_ON(err); 820 } 821 822 module_init(algif_aead_init); 823 module_exit(algif_aead_exit); 824 MODULE_LICENSE("GPL"); 825 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); 826 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface"); 827