1 /* 2 * algif_aead: User-space interface for AEAD algorithms 3 * 4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> 5 * 6 * This file provides the user-space API for AEAD ciphers. 7 * 8 * This file is derived from algif_skcipher.c. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the Free 12 * Software Foundation; either version 2 of the License, or (at your option) 13 * any later version. 14 */ 15 16 #include <crypto/internal/aead.h> 17 #include <crypto/scatterwalk.h> 18 #include <crypto/if_alg.h> 19 #include <linux/init.h> 20 #include <linux/list.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/net.h> 25 #include <net/sock.h> 26 27 struct aead_sg_list { 28 unsigned int cur; 29 struct scatterlist sg[ALG_MAX_PAGES]; 30 }; 31 32 struct aead_async_rsgl { 33 struct af_alg_sgl sgl; 34 struct list_head list; 35 }; 36 37 struct aead_async_req { 38 struct scatterlist *tsgl; 39 struct aead_async_rsgl first_rsgl; 40 struct list_head list; 41 struct kiocb *iocb; 42 unsigned int tsgls; 43 char iv[]; 44 }; 45 46 struct aead_ctx { 47 struct aead_sg_list tsgl; 48 struct aead_async_rsgl first_rsgl; 49 struct list_head list; 50 51 void *iv; 52 53 struct af_alg_completion completion; 54 55 unsigned long used; 56 57 unsigned int len; 58 bool more; 59 bool merge; 60 bool enc; 61 62 size_t aead_assoclen; 63 struct aead_request aead_req; 64 }; 65 66 static inline int aead_sndbuf(struct sock *sk) 67 { 68 struct alg_sock *ask = alg_sk(sk); 69 struct aead_ctx *ctx = ask->private; 70 71 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 72 ctx->used, 0); 73 } 74 75 static inline bool aead_writable(struct sock *sk) 76 { 77 return PAGE_SIZE <= aead_sndbuf(sk); 78 } 79 80 static inline bool aead_sufficient_data(struct aead_ctx *ctx) 81 { 82 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 83 84 return ctx->used >= ctx->aead_assoclen + as; 85 } 86 87 static void aead_reset_ctx(struct aead_ctx *ctx) 88 { 89 struct aead_sg_list *sgl = &ctx->tsgl; 90 91 sg_init_table(sgl->sg, ALG_MAX_PAGES); 92 sgl->cur = 0; 93 ctx->used = 0; 94 ctx->more = 0; 95 ctx->merge = 0; 96 } 97 98 static void aead_put_sgl(struct sock *sk) 99 { 100 struct alg_sock *ask = alg_sk(sk); 101 struct aead_ctx *ctx = ask->private; 102 struct aead_sg_list *sgl = &ctx->tsgl; 103 struct scatterlist *sg = sgl->sg; 104 unsigned int i; 105 106 for (i = 0; i < sgl->cur; i++) { 107 if (!sg_page(sg + i)) 108 continue; 109 110 put_page(sg_page(sg + i)); 111 sg_assign_page(sg + i, NULL); 112 } 113 aead_reset_ctx(ctx); 114 } 115 116 static void aead_wmem_wakeup(struct sock *sk) 117 { 118 struct socket_wq *wq; 119 120 if (!aead_writable(sk)) 121 return; 122 123 rcu_read_lock(); 124 wq = rcu_dereference(sk->sk_wq); 125 if (skwq_has_sleeper(wq)) 126 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 127 POLLRDNORM | 128 POLLRDBAND); 129 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 130 rcu_read_unlock(); 131 } 132 133 static int aead_wait_for_data(struct sock *sk, unsigned flags) 134 { 135 DEFINE_WAIT_FUNC(wait, woken_wake_function); 136 struct alg_sock *ask = alg_sk(sk); 137 struct aead_ctx *ctx = ask->private; 138 long timeout; 139 int err = -ERESTARTSYS; 140 141 if (flags & MSG_DONTWAIT) 142 return -EAGAIN; 143 144 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 145 add_wait_queue(sk_sleep(sk), &wait); 146 for (;;) { 147 if (signal_pending(current)) 148 break; 149 timeout = MAX_SCHEDULE_TIMEOUT; 150 if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) { 151 err = 0; 152 break; 153 } 154 } 155 remove_wait_queue(sk_sleep(sk), &wait); 156 157 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 158 159 return err; 160 } 161 162 static void aead_data_wakeup(struct sock *sk) 163 { 164 struct alg_sock *ask = alg_sk(sk); 165 struct aead_ctx *ctx = ask->private; 166 struct socket_wq *wq; 167 168 if (ctx->more) 169 return; 170 if (!ctx->used) 171 return; 172 173 rcu_read_lock(); 174 wq = rcu_dereference(sk->sk_wq); 175 if (skwq_has_sleeper(wq)) 176 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 177 POLLRDNORM | 178 POLLRDBAND); 179 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 180 rcu_read_unlock(); 181 } 182 183 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 184 { 185 struct sock *sk = sock->sk; 186 struct alg_sock *ask = alg_sk(sk); 187 struct aead_ctx *ctx = ask->private; 188 unsigned ivsize = 189 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); 190 struct aead_sg_list *sgl = &ctx->tsgl; 191 struct af_alg_control con = {}; 192 long copied = 0; 193 bool enc = 0; 194 bool init = 0; 195 int err = -EINVAL; 196 197 if (msg->msg_controllen) { 198 err = af_alg_cmsg_send(msg, &con); 199 if (err) 200 return err; 201 202 init = 1; 203 switch (con.op) { 204 case ALG_OP_ENCRYPT: 205 enc = 1; 206 break; 207 case ALG_OP_DECRYPT: 208 enc = 0; 209 break; 210 default: 211 return -EINVAL; 212 } 213 214 if (con.iv && con.iv->ivlen != ivsize) 215 return -EINVAL; 216 } 217 218 lock_sock(sk); 219 if (!ctx->more && ctx->used) 220 goto unlock; 221 222 if (init) { 223 ctx->enc = enc; 224 if (con.iv) 225 memcpy(ctx->iv, con.iv->iv, ivsize); 226 227 ctx->aead_assoclen = con.aead_assoclen; 228 } 229 230 while (size) { 231 size_t len = size; 232 struct scatterlist *sg = NULL; 233 234 /* use the existing memory in an allocated page */ 235 if (ctx->merge) { 236 sg = sgl->sg + sgl->cur - 1; 237 len = min_t(unsigned long, len, 238 PAGE_SIZE - sg->offset - sg->length); 239 err = memcpy_from_msg(page_address(sg_page(sg)) + 240 sg->offset + sg->length, 241 msg, len); 242 if (err) 243 goto unlock; 244 245 sg->length += len; 246 ctx->merge = (sg->offset + sg->length) & 247 (PAGE_SIZE - 1); 248 249 ctx->used += len; 250 copied += len; 251 size -= len; 252 continue; 253 } 254 255 if (!aead_writable(sk)) { 256 /* user space sent too much data */ 257 aead_put_sgl(sk); 258 err = -EMSGSIZE; 259 goto unlock; 260 } 261 262 /* allocate a new page */ 263 len = min_t(unsigned long, size, aead_sndbuf(sk)); 264 while (len) { 265 size_t plen = 0; 266 267 if (sgl->cur >= ALG_MAX_PAGES) { 268 aead_put_sgl(sk); 269 err = -E2BIG; 270 goto unlock; 271 } 272 273 sg = sgl->sg + sgl->cur; 274 plen = min_t(size_t, len, PAGE_SIZE); 275 276 sg_assign_page(sg, alloc_page(GFP_KERNEL)); 277 err = -ENOMEM; 278 if (!sg_page(sg)) 279 goto unlock; 280 281 err = memcpy_from_msg(page_address(sg_page(sg)), 282 msg, plen); 283 if (err) { 284 __free_page(sg_page(sg)); 285 sg_assign_page(sg, NULL); 286 goto unlock; 287 } 288 289 sg->offset = 0; 290 sg->length = plen; 291 len -= plen; 292 ctx->used += plen; 293 copied += plen; 294 sgl->cur++; 295 size -= plen; 296 ctx->merge = plen & (PAGE_SIZE - 1); 297 } 298 } 299 300 err = 0; 301 302 ctx->more = msg->msg_flags & MSG_MORE; 303 if (!ctx->more && !aead_sufficient_data(ctx)) { 304 aead_put_sgl(sk); 305 err = -EMSGSIZE; 306 } 307 308 unlock: 309 aead_data_wakeup(sk); 310 release_sock(sk); 311 312 return err ?: copied; 313 } 314 315 static ssize_t aead_sendpage(struct socket *sock, struct page *page, 316 int offset, size_t size, int flags) 317 { 318 struct sock *sk = sock->sk; 319 struct alg_sock *ask = alg_sk(sk); 320 struct aead_ctx *ctx = ask->private; 321 struct aead_sg_list *sgl = &ctx->tsgl; 322 int err = -EINVAL; 323 324 if (flags & MSG_SENDPAGE_NOTLAST) 325 flags |= MSG_MORE; 326 327 if (sgl->cur >= ALG_MAX_PAGES) 328 return -E2BIG; 329 330 lock_sock(sk); 331 if (!ctx->more && ctx->used) 332 goto unlock; 333 334 if (!size) 335 goto done; 336 337 if (!aead_writable(sk)) { 338 /* user space sent too much data */ 339 aead_put_sgl(sk); 340 err = -EMSGSIZE; 341 goto unlock; 342 } 343 344 ctx->merge = 0; 345 346 get_page(page); 347 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 348 sgl->cur++; 349 ctx->used += size; 350 351 err = 0; 352 353 done: 354 ctx->more = flags & MSG_MORE; 355 if (!ctx->more && !aead_sufficient_data(ctx)) { 356 aead_put_sgl(sk); 357 err = -EMSGSIZE; 358 } 359 360 unlock: 361 aead_data_wakeup(sk); 362 release_sock(sk); 363 364 return err ?: size; 365 } 366 367 #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \ 368 ((char *)req + sizeof(struct aead_request) + \ 369 crypto_aead_reqsize(tfm)) 370 371 #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \ 372 crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \ 373 sizeof(struct aead_request) 374 375 static void aead_async_cb(struct crypto_async_request *_req, int err) 376 { 377 struct sock *sk = _req->data; 378 struct alg_sock *ask = alg_sk(sk); 379 struct aead_ctx *ctx = ask->private; 380 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); 381 struct aead_request *req = aead_request_cast(_req); 382 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); 383 struct scatterlist *sg = areq->tsgl; 384 struct aead_async_rsgl *rsgl; 385 struct kiocb *iocb = areq->iocb; 386 unsigned int i, reqlen = GET_REQ_SIZE(tfm); 387 388 list_for_each_entry(rsgl, &areq->list, list) { 389 af_alg_free_sg(&rsgl->sgl); 390 if (rsgl != &areq->first_rsgl) 391 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 392 } 393 394 for (i = 0; i < areq->tsgls; i++) 395 put_page(sg_page(sg + i)); 396 397 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); 398 sock_kfree_s(sk, req, reqlen); 399 __sock_put(sk); 400 iocb->ki_complete(iocb, err, err); 401 } 402 403 static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, 404 int flags) 405 { 406 struct sock *sk = sock->sk; 407 struct alg_sock *ask = alg_sk(sk); 408 struct aead_ctx *ctx = ask->private; 409 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); 410 struct aead_async_req *areq; 411 struct aead_request *req = NULL; 412 struct aead_sg_list *sgl = &ctx->tsgl; 413 struct aead_async_rsgl *last_rsgl = NULL, *rsgl; 414 unsigned int as = crypto_aead_authsize(tfm); 415 unsigned int i, reqlen = GET_REQ_SIZE(tfm); 416 int err = -ENOMEM; 417 unsigned long used; 418 size_t outlen; 419 size_t usedpages = 0; 420 421 lock_sock(sk); 422 if (ctx->more) { 423 err = aead_wait_for_data(sk, flags); 424 if (err) 425 goto unlock; 426 } 427 428 used = ctx->used; 429 outlen = used; 430 431 if (!aead_sufficient_data(ctx)) 432 goto unlock; 433 434 req = sock_kmalloc(sk, reqlen, GFP_KERNEL); 435 if (unlikely(!req)) 436 goto unlock; 437 438 areq = GET_ASYM_REQ(req, tfm); 439 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); 440 INIT_LIST_HEAD(&areq->list); 441 areq->iocb = msg->msg_iocb; 442 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); 443 aead_request_set_tfm(req, tfm); 444 aead_request_set_ad(req, ctx->aead_assoclen); 445 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 446 aead_async_cb, sk); 447 used -= ctx->aead_assoclen + (ctx->enc ? as : 0); 448 449 /* take over all tx sgls from ctx */ 450 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur, 451 GFP_KERNEL); 452 if (unlikely(!areq->tsgl)) 453 goto free; 454 455 sg_init_table(areq->tsgl, sgl->cur); 456 for (i = 0; i < sgl->cur; i++) 457 sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), 458 sgl->sg[i].length, sgl->sg[i].offset); 459 460 areq->tsgls = sgl->cur; 461 462 /* create rx sgls */ 463 while (iov_iter_count(&msg->msg_iter)) { 464 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 465 (outlen - usedpages)); 466 467 if (list_empty(&areq->list)) { 468 rsgl = &areq->first_rsgl; 469 470 } else { 471 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 472 if (unlikely(!rsgl)) { 473 err = -ENOMEM; 474 goto free; 475 } 476 } 477 rsgl->sgl.npages = 0; 478 list_add_tail(&rsgl->list, &areq->list); 479 480 /* make one iovec available as scatterlist */ 481 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 482 if (err < 0) 483 goto free; 484 485 usedpages += err; 486 487 /* chain the new scatterlist with previous one */ 488 if (last_rsgl) 489 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 490 491 last_rsgl = rsgl; 492 493 /* we do not need more iovecs as we have sufficient memory */ 494 if (outlen <= usedpages) 495 break; 496 497 iov_iter_advance(&msg->msg_iter, err); 498 } 499 err = -EINVAL; 500 /* ensure output buffer is sufficiently large */ 501 if (usedpages < outlen) 502 goto free; 503 504 aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, 505 areq->iv); 506 err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); 507 if (err) { 508 if (err == -EINPROGRESS) { 509 sock_hold(sk); 510 err = -EIOCBQUEUED; 511 aead_reset_ctx(ctx); 512 goto unlock; 513 } else if (err == -EBADMSG) { 514 aead_put_sgl(sk); 515 } 516 goto free; 517 } 518 aead_put_sgl(sk); 519 520 free: 521 list_for_each_entry(rsgl, &areq->list, list) { 522 af_alg_free_sg(&rsgl->sgl); 523 if (rsgl != &areq->first_rsgl) 524 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 525 } 526 if (areq->tsgl) 527 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); 528 if (req) 529 sock_kfree_s(sk, req, reqlen); 530 unlock: 531 aead_wmem_wakeup(sk); 532 release_sock(sk); 533 return err ? err : outlen; 534 } 535 536 static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) 537 { 538 struct sock *sk = sock->sk; 539 struct alg_sock *ask = alg_sk(sk); 540 struct aead_ctx *ctx = ask->private; 541 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 542 struct aead_sg_list *sgl = &ctx->tsgl; 543 struct aead_async_rsgl *last_rsgl = NULL; 544 struct aead_async_rsgl *rsgl, *tmp; 545 int err = -EINVAL; 546 unsigned long used = 0; 547 size_t outlen = 0; 548 size_t usedpages = 0; 549 550 lock_sock(sk); 551 552 /* 553 * AEAD memory structure: For encryption, the tag is appended to the 554 * ciphertext which implies that the memory allocated for the ciphertext 555 * must be increased by the tag length. For decryption, the tag 556 * is expected to be concatenated to the ciphertext. The plaintext 557 * therefore has a memory size of the ciphertext minus the tag length. 558 * 559 * The memory structure for cipher operation has the following 560 * structure: 561 * AEAD encryption input: assoc data || plaintext 562 * AEAD encryption output: cipherntext || auth tag 563 * AEAD decryption input: assoc data || ciphertext || auth tag 564 * AEAD decryption output: plaintext 565 */ 566 567 if (ctx->more) { 568 err = aead_wait_for_data(sk, flags); 569 if (err) 570 goto unlock; 571 } 572 573 used = ctx->used; 574 575 /* 576 * Make sure sufficient data is present -- note, the same check is 577 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg 578 * shall provide an information to the data sender that something is 579 * wrong, but they are irrelevant to maintain the kernel integrity. 580 * We need this check here too in case user space decides to not honor 581 * the error message in sendmsg/sendpage and still call recvmsg. This 582 * check here protects the kernel integrity. 583 */ 584 if (!aead_sufficient_data(ctx)) 585 goto unlock; 586 587 outlen = used; 588 589 /* 590 * The cipher operation input data is reduced by the associated data 591 * length as this data is processed separately later on. 592 */ 593 used -= ctx->aead_assoclen + (ctx->enc ? as : 0); 594 595 /* convert iovecs of output buffers into scatterlists */ 596 while (iov_iter_count(&msg->msg_iter)) { 597 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 598 (outlen - usedpages)); 599 600 if (list_empty(&ctx->list)) { 601 rsgl = &ctx->first_rsgl; 602 } else { 603 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 604 if (unlikely(!rsgl)) { 605 err = -ENOMEM; 606 goto unlock; 607 } 608 } 609 rsgl->sgl.npages = 0; 610 list_add_tail(&rsgl->list, &ctx->list); 611 612 /* make one iovec available as scatterlist */ 613 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 614 if (err < 0) 615 goto unlock; 616 usedpages += err; 617 /* chain the new scatterlist with previous one */ 618 if (last_rsgl) 619 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 620 621 last_rsgl = rsgl; 622 623 /* we do not need more iovecs as we have sufficient memory */ 624 if (outlen <= usedpages) 625 break; 626 iov_iter_advance(&msg->msg_iter, err); 627 } 628 629 err = -EINVAL; 630 /* ensure output buffer is sufficiently large */ 631 if (usedpages < outlen) 632 goto unlock; 633 634 sg_mark_end(sgl->sg + sgl->cur - 1); 635 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, 636 used, ctx->iv); 637 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); 638 639 err = af_alg_wait_for_completion(ctx->enc ? 640 crypto_aead_encrypt(&ctx->aead_req) : 641 crypto_aead_decrypt(&ctx->aead_req), 642 &ctx->completion); 643 644 if (err) { 645 /* EBADMSG implies a valid cipher operation took place */ 646 if (err == -EBADMSG) 647 aead_put_sgl(sk); 648 649 goto unlock; 650 } 651 652 aead_put_sgl(sk); 653 err = 0; 654 655 unlock: 656 list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { 657 af_alg_free_sg(&rsgl->sgl); 658 if (rsgl != &ctx->first_rsgl) 659 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 660 list_del(&rsgl->list); 661 } 662 INIT_LIST_HEAD(&ctx->list); 663 aead_wmem_wakeup(sk); 664 release_sock(sk); 665 666 return err ? err : outlen; 667 } 668 669 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, 670 int flags) 671 { 672 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? 673 aead_recvmsg_async(sock, msg, flags) : 674 aead_recvmsg_sync(sock, msg, flags); 675 } 676 677 static unsigned int aead_poll(struct file *file, struct socket *sock, 678 poll_table *wait) 679 { 680 struct sock *sk = sock->sk; 681 struct alg_sock *ask = alg_sk(sk); 682 struct aead_ctx *ctx = ask->private; 683 unsigned int mask; 684 685 sock_poll_wait(file, sk_sleep(sk), wait); 686 mask = 0; 687 688 if (!ctx->more) 689 mask |= POLLIN | POLLRDNORM; 690 691 if (aead_writable(sk)) 692 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 693 694 return mask; 695 } 696 697 static struct proto_ops algif_aead_ops = { 698 .family = PF_ALG, 699 700 .connect = sock_no_connect, 701 .socketpair = sock_no_socketpair, 702 .getname = sock_no_getname, 703 .ioctl = sock_no_ioctl, 704 .listen = sock_no_listen, 705 .shutdown = sock_no_shutdown, 706 .getsockopt = sock_no_getsockopt, 707 .mmap = sock_no_mmap, 708 .bind = sock_no_bind, 709 .accept = sock_no_accept, 710 .setsockopt = sock_no_setsockopt, 711 712 .release = af_alg_release, 713 .sendmsg = aead_sendmsg, 714 .sendpage = aead_sendpage, 715 .recvmsg = aead_recvmsg, 716 .poll = aead_poll, 717 }; 718 719 static void *aead_bind(const char *name, u32 type, u32 mask) 720 { 721 return crypto_alloc_aead(name, type, mask); 722 } 723 724 static void aead_release(void *private) 725 { 726 crypto_free_aead(private); 727 } 728 729 static int aead_setauthsize(void *private, unsigned int authsize) 730 { 731 return crypto_aead_setauthsize(private, authsize); 732 } 733 734 static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 735 { 736 return crypto_aead_setkey(private, key, keylen); 737 } 738 739 static void aead_sock_destruct(struct sock *sk) 740 { 741 struct alg_sock *ask = alg_sk(sk); 742 struct aead_ctx *ctx = ask->private; 743 unsigned int ivlen = crypto_aead_ivsize( 744 crypto_aead_reqtfm(&ctx->aead_req)); 745 746 WARN_ON(atomic_read(&sk->sk_refcnt) != 0); 747 aead_put_sgl(sk); 748 sock_kzfree_s(sk, ctx->iv, ivlen); 749 sock_kfree_s(sk, ctx, ctx->len); 750 af_alg_release_parent(sk); 751 } 752 753 static int aead_accept_parent(void *private, struct sock *sk) 754 { 755 struct aead_ctx *ctx; 756 struct alg_sock *ask = alg_sk(sk); 757 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); 758 unsigned int ivlen = crypto_aead_ivsize(private); 759 760 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 761 if (!ctx) 762 return -ENOMEM; 763 memset(ctx, 0, len); 764 765 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); 766 if (!ctx->iv) { 767 sock_kfree_s(sk, ctx, len); 768 return -ENOMEM; 769 } 770 memset(ctx->iv, 0, ivlen); 771 772 ctx->len = len; 773 ctx->used = 0; 774 ctx->more = 0; 775 ctx->merge = 0; 776 ctx->enc = 0; 777 ctx->tsgl.cur = 0; 778 ctx->aead_assoclen = 0; 779 af_alg_init_completion(&ctx->completion); 780 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); 781 INIT_LIST_HEAD(&ctx->list); 782 783 ask->private = ctx; 784 785 aead_request_set_tfm(&ctx->aead_req, private); 786 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 787 af_alg_complete, &ctx->completion); 788 789 sk->sk_destruct = aead_sock_destruct; 790 791 return 0; 792 } 793 794 static const struct af_alg_type algif_type_aead = { 795 .bind = aead_bind, 796 .release = aead_release, 797 .setkey = aead_setkey, 798 .setauthsize = aead_setauthsize, 799 .accept = aead_accept_parent, 800 .ops = &algif_aead_ops, 801 .name = "aead", 802 .owner = THIS_MODULE 803 }; 804 805 static int __init algif_aead_init(void) 806 { 807 return af_alg_register_type(&algif_type_aead); 808 } 809 810 static void __exit algif_aead_exit(void) 811 { 812 int err = af_alg_unregister_type(&algif_type_aead); 813 BUG_ON(err); 814 } 815 816 module_init(algif_aead_init); 817 module_exit(algif_aead_exit); 818 MODULE_LICENSE("GPL"); 819 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); 820 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface"); 821