1 /* 2 * algif_skcipher: User-space interface for skcipher algorithms 3 * 4 * This file provides the user-space API for symmetric key ciphers. 5 * 6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 */ 14 15 #include <crypto/scatterwalk.h> 16 #include <crypto/skcipher.h> 17 #include <crypto/if_alg.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/net.h> 24 #include <net/sock.h> 25 26 struct skcipher_sg_list { 27 struct list_head list; 28 29 int cur; 30 31 struct scatterlist sg[0]; 32 }; 33 34 struct skcipher_tfm { 35 struct crypto_skcipher *skcipher; 36 bool has_key; 37 }; 38 39 struct skcipher_ctx { 40 struct list_head tsgl; 41 struct af_alg_sgl rsgl; 42 43 void *iv; 44 45 struct af_alg_completion completion; 46 47 atomic_t inflight; 48 size_t used; 49 50 unsigned int len; 51 bool more; 52 bool merge; 53 bool enc; 54 55 struct skcipher_request req; 56 }; 57 58 struct skcipher_async_rsgl { 59 struct af_alg_sgl sgl; 60 struct list_head list; 61 }; 62 63 struct skcipher_async_req { 64 struct kiocb *iocb; 65 struct skcipher_async_rsgl first_sgl; 66 struct list_head list; 67 struct scatterlist *tsg; 68 atomic_t *inflight; 69 struct skcipher_request req; 70 }; 71 72 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ 73 sizeof(struct scatterlist) - 1) 74 75 static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) 76 { 77 struct skcipher_async_rsgl *rsgl, *tmp; 78 struct scatterlist *sgl; 79 struct scatterlist *sg; 80 int i, n; 81 82 list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { 83 af_alg_free_sg(&rsgl->sgl); 84 if (rsgl != &sreq->first_sgl) 85 kfree(rsgl); 86 } 87 sgl = sreq->tsg; 88 n = sg_nents(sgl); 89 for_each_sg(sgl, sg, n, i) 90 put_page(sg_page(sg)); 91 92 kfree(sreq->tsg); 93 } 94 95 static void skcipher_async_cb(struct crypto_async_request *req, int err) 96 { 97 struct skcipher_async_req *sreq = req->data; 98 struct kiocb *iocb = sreq->iocb; 99 100 atomic_dec(sreq->inflight); 101 skcipher_free_async_sgls(sreq); 102 kzfree(sreq); 103 iocb->ki_complete(iocb, err, err); 104 } 105 106 static inline int skcipher_sndbuf(struct sock *sk) 107 { 108 struct alg_sock *ask = alg_sk(sk); 109 struct skcipher_ctx *ctx = ask->private; 110 111 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 112 ctx->used, 0); 113 } 114 115 static inline bool skcipher_writable(struct sock *sk) 116 { 117 return PAGE_SIZE <= skcipher_sndbuf(sk); 118 } 119 120 static int skcipher_alloc_sgl(struct sock *sk) 121 { 122 struct alg_sock *ask = alg_sk(sk); 123 struct skcipher_ctx *ctx = ask->private; 124 struct skcipher_sg_list *sgl; 125 struct scatterlist *sg = NULL; 126 127 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 128 if (!list_empty(&ctx->tsgl)) 129 sg = sgl->sg; 130 131 if (!sg || sgl->cur >= MAX_SGL_ENTS) { 132 sgl = sock_kmalloc(sk, sizeof(*sgl) + 133 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 134 GFP_KERNEL); 135 if (!sgl) 136 return -ENOMEM; 137 138 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 139 sgl->cur = 0; 140 141 if (sg) 142 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 143 144 list_add_tail(&sgl->list, &ctx->tsgl); 145 } 146 147 return 0; 148 } 149 150 static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) 151 { 152 struct alg_sock *ask = alg_sk(sk); 153 struct skcipher_ctx *ctx = ask->private; 154 struct skcipher_sg_list *sgl; 155 struct scatterlist *sg; 156 int i; 157 158 while (!list_empty(&ctx->tsgl)) { 159 sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, 160 list); 161 sg = sgl->sg; 162 163 for (i = 0; i < sgl->cur; i++) { 164 size_t plen = min_t(size_t, used, sg[i].length); 165 166 if (!sg_page(sg + i)) 167 continue; 168 169 sg[i].length -= plen; 170 sg[i].offset += plen; 171 172 used -= plen; 173 ctx->used -= plen; 174 175 if (sg[i].length) 176 return; 177 if (put) 178 put_page(sg_page(sg + i)); 179 sg_assign_page(sg + i, NULL); 180 } 181 182 list_del(&sgl->list); 183 sock_kfree_s(sk, sgl, 184 sizeof(*sgl) + sizeof(sgl->sg[0]) * 185 (MAX_SGL_ENTS + 1)); 186 } 187 188 if (!ctx->used) 189 ctx->merge = 0; 190 } 191 192 static void skcipher_free_sgl(struct sock *sk) 193 { 194 struct alg_sock *ask = alg_sk(sk); 195 struct skcipher_ctx *ctx = ask->private; 196 197 skcipher_pull_sgl(sk, ctx->used, 1); 198 } 199 200 static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) 201 { 202 DEFINE_WAIT_FUNC(wait, woken_wake_function); 203 int err = -ERESTARTSYS; 204 long timeout; 205 206 if (flags & MSG_DONTWAIT) 207 return -EAGAIN; 208 209 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 210 211 add_wait_queue(sk_sleep(sk), &wait); 212 for (;;) { 213 if (signal_pending(current)) 214 break; 215 timeout = MAX_SCHEDULE_TIMEOUT; 216 if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) { 217 err = 0; 218 break; 219 } 220 } 221 remove_wait_queue(sk_sleep(sk), &wait); 222 223 return err; 224 } 225 226 static void skcipher_wmem_wakeup(struct sock *sk) 227 { 228 struct socket_wq *wq; 229 230 if (!skcipher_writable(sk)) 231 return; 232 233 rcu_read_lock(); 234 wq = rcu_dereference(sk->sk_wq); 235 if (skwq_has_sleeper(wq)) 236 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 237 POLLRDNORM | 238 POLLRDBAND); 239 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 240 rcu_read_unlock(); 241 } 242 243 static int skcipher_wait_for_data(struct sock *sk, unsigned flags) 244 { 245 DEFINE_WAIT_FUNC(wait, woken_wake_function); 246 struct alg_sock *ask = alg_sk(sk); 247 struct skcipher_ctx *ctx = ask->private; 248 long timeout; 249 int err = -ERESTARTSYS; 250 251 if (flags & MSG_DONTWAIT) { 252 return -EAGAIN; 253 } 254 255 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 256 257 add_wait_queue(sk_sleep(sk), &wait); 258 for (;;) { 259 if (signal_pending(current)) 260 break; 261 timeout = MAX_SCHEDULE_TIMEOUT; 262 if (sk_wait_event(sk, &timeout, ctx->used, &wait)) { 263 err = 0; 264 break; 265 } 266 } 267 remove_wait_queue(sk_sleep(sk), &wait); 268 269 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 270 271 return err; 272 } 273 274 static void skcipher_data_wakeup(struct sock *sk) 275 { 276 struct alg_sock *ask = alg_sk(sk); 277 struct skcipher_ctx *ctx = ask->private; 278 struct socket_wq *wq; 279 280 if (!ctx->used) 281 return; 282 283 rcu_read_lock(); 284 wq = rcu_dereference(sk->sk_wq); 285 if (skwq_has_sleeper(wq)) 286 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 287 POLLRDNORM | 288 POLLRDBAND); 289 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 290 rcu_read_unlock(); 291 } 292 293 static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, 294 size_t size) 295 { 296 struct sock *sk = sock->sk; 297 struct alg_sock *ask = alg_sk(sk); 298 struct sock *psk = ask->parent; 299 struct alg_sock *pask = alg_sk(psk); 300 struct skcipher_ctx *ctx = ask->private; 301 struct skcipher_tfm *skc = pask->private; 302 struct crypto_skcipher *tfm = skc->skcipher; 303 unsigned ivsize = crypto_skcipher_ivsize(tfm); 304 struct skcipher_sg_list *sgl; 305 struct af_alg_control con = {}; 306 long copied = 0; 307 bool enc = 0; 308 bool init = 0; 309 int err; 310 int i; 311 312 if (msg->msg_controllen) { 313 err = af_alg_cmsg_send(msg, &con); 314 if (err) 315 return err; 316 317 init = 1; 318 switch (con.op) { 319 case ALG_OP_ENCRYPT: 320 enc = 1; 321 break; 322 case ALG_OP_DECRYPT: 323 enc = 0; 324 break; 325 default: 326 return -EINVAL; 327 } 328 329 if (con.iv && con.iv->ivlen != ivsize) 330 return -EINVAL; 331 } 332 333 err = -EINVAL; 334 335 lock_sock(sk); 336 if (!ctx->more && ctx->used) 337 goto unlock; 338 339 if (init) { 340 ctx->enc = enc; 341 if (con.iv) 342 memcpy(ctx->iv, con.iv->iv, ivsize); 343 } 344 345 while (size) { 346 struct scatterlist *sg; 347 unsigned long len = size; 348 size_t plen; 349 350 if (ctx->merge) { 351 sgl = list_entry(ctx->tsgl.prev, 352 struct skcipher_sg_list, list); 353 sg = sgl->sg + sgl->cur - 1; 354 len = min_t(unsigned long, len, 355 PAGE_SIZE - sg->offset - sg->length); 356 357 err = memcpy_from_msg(page_address(sg_page(sg)) + 358 sg->offset + sg->length, 359 msg, len); 360 if (err) 361 goto unlock; 362 363 sg->length += len; 364 ctx->merge = (sg->offset + sg->length) & 365 (PAGE_SIZE - 1); 366 367 ctx->used += len; 368 copied += len; 369 size -= len; 370 continue; 371 } 372 373 if (!skcipher_writable(sk)) { 374 err = skcipher_wait_for_wmem(sk, msg->msg_flags); 375 if (err) 376 goto unlock; 377 } 378 379 len = min_t(unsigned long, len, skcipher_sndbuf(sk)); 380 381 err = skcipher_alloc_sgl(sk); 382 if (err) 383 goto unlock; 384 385 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 386 sg = sgl->sg; 387 if (sgl->cur) 388 sg_unmark_end(sg + sgl->cur - 1); 389 do { 390 i = sgl->cur; 391 plen = min_t(size_t, len, PAGE_SIZE); 392 393 sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 394 err = -ENOMEM; 395 if (!sg_page(sg + i)) 396 goto unlock; 397 398 err = memcpy_from_msg(page_address(sg_page(sg + i)), 399 msg, plen); 400 if (err) { 401 __free_page(sg_page(sg + i)); 402 sg_assign_page(sg + i, NULL); 403 goto unlock; 404 } 405 406 sg[i].length = plen; 407 len -= plen; 408 ctx->used += plen; 409 copied += plen; 410 size -= plen; 411 sgl->cur++; 412 } while (len && sgl->cur < MAX_SGL_ENTS); 413 414 if (!size) 415 sg_mark_end(sg + sgl->cur - 1); 416 417 ctx->merge = plen & (PAGE_SIZE - 1); 418 } 419 420 err = 0; 421 422 ctx->more = msg->msg_flags & MSG_MORE; 423 424 unlock: 425 skcipher_data_wakeup(sk); 426 release_sock(sk); 427 428 return copied ?: err; 429 } 430 431 static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, 432 int offset, size_t size, int flags) 433 { 434 struct sock *sk = sock->sk; 435 struct alg_sock *ask = alg_sk(sk); 436 struct skcipher_ctx *ctx = ask->private; 437 struct skcipher_sg_list *sgl; 438 int err = -EINVAL; 439 440 if (flags & MSG_SENDPAGE_NOTLAST) 441 flags |= MSG_MORE; 442 443 lock_sock(sk); 444 if (!ctx->more && ctx->used) 445 goto unlock; 446 447 if (!size) 448 goto done; 449 450 if (!skcipher_writable(sk)) { 451 err = skcipher_wait_for_wmem(sk, flags); 452 if (err) 453 goto unlock; 454 } 455 456 err = skcipher_alloc_sgl(sk); 457 if (err) 458 goto unlock; 459 460 ctx->merge = 0; 461 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 462 463 if (sgl->cur) 464 sg_unmark_end(sgl->sg + sgl->cur - 1); 465 466 sg_mark_end(sgl->sg + sgl->cur); 467 get_page(page); 468 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 469 sgl->cur++; 470 ctx->used += size; 471 472 done: 473 ctx->more = flags & MSG_MORE; 474 475 unlock: 476 skcipher_data_wakeup(sk); 477 release_sock(sk); 478 479 return err ?: size; 480 } 481 482 static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) 483 { 484 struct skcipher_sg_list *sgl; 485 struct scatterlist *sg; 486 int nents = 0; 487 488 list_for_each_entry(sgl, &ctx->tsgl, list) { 489 sg = sgl->sg; 490 491 while (!sg->length) 492 sg++; 493 494 nents += sg_nents(sg); 495 } 496 return nents; 497 } 498 499 static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, 500 int flags) 501 { 502 struct sock *sk = sock->sk; 503 struct alg_sock *ask = alg_sk(sk); 504 struct sock *psk = ask->parent; 505 struct alg_sock *pask = alg_sk(psk); 506 struct skcipher_ctx *ctx = ask->private; 507 struct skcipher_tfm *skc = pask->private; 508 struct crypto_skcipher *tfm = skc->skcipher; 509 struct skcipher_sg_list *sgl; 510 struct scatterlist *sg; 511 struct skcipher_async_req *sreq; 512 struct skcipher_request *req; 513 struct skcipher_async_rsgl *last_rsgl = NULL; 514 unsigned int txbufs = 0, len = 0, tx_nents; 515 unsigned int reqsize = crypto_skcipher_reqsize(tfm); 516 unsigned int ivsize = crypto_skcipher_ivsize(tfm); 517 int err = -ENOMEM; 518 bool mark = false; 519 char *iv; 520 521 sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL); 522 if (unlikely(!sreq)) 523 goto out; 524 525 req = &sreq->req; 526 iv = (char *)(req + 1) + reqsize; 527 sreq->iocb = msg->msg_iocb; 528 INIT_LIST_HEAD(&sreq->list); 529 sreq->inflight = &ctx->inflight; 530 531 lock_sock(sk); 532 tx_nents = skcipher_all_sg_nents(ctx); 533 sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); 534 if (unlikely(!sreq->tsg)) 535 goto unlock; 536 sg_init_table(sreq->tsg, tx_nents); 537 memcpy(iv, ctx->iv, ivsize); 538 skcipher_request_set_tfm(req, tfm); 539 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, 540 skcipher_async_cb, sreq); 541 542 while (iov_iter_count(&msg->msg_iter)) { 543 struct skcipher_async_rsgl *rsgl; 544 int used; 545 546 if (!ctx->used) { 547 err = skcipher_wait_for_data(sk, flags); 548 if (err) 549 goto free; 550 } 551 sgl = list_first_entry(&ctx->tsgl, 552 struct skcipher_sg_list, list); 553 sg = sgl->sg; 554 555 while (!sg->length) 556 sg++; 557 558 used = min_t(unsigned long, ctx->used, 559 iov_iter_count(&msg->msg_iter)); 560 used = min_t(unsigned long, used, sg->length); 561 562 if (txbufs == tx_nents) { 563 struct scatterlist *tmp; 564 int x; 565 /* Ran out of tx slots in async request 566 * need to expand */ 567 tmp = kcalloc(tx_nents * 2, sizeof(*tmp), 568 GFP_KERNEL); 569 if (!tmp) { 570 err = -ENOMEM; 571 goto free; 572 } 573 574 sg_init_table(tmp, tx_nents * 2); 575 for (x = 0; x < tx_nents; x++) 576 sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), 577 sreq->tsg[x].length, 578 sreq->tsg[x].offset); 579 kfree(sreq->tsg); 580 sreq->tsg = tmp; 581 tx_nents *= 2; 582 mark = true; 583 } 584 /* Need to take over the tx sgl from ctx 585 * to the asynch req - these sgls will be freed later */ 586 sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, 587 sg->offset); 588 589 if (list_empty(&sreq->list)) { 590 rsgl = &sreq->first_sgl; 591 list_add_tail(&rsgl->list, &sreq->list); 592 } else { 593 rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); 594 if (!rsgl) { 595 err = -ENOMEM; 596 goto free; 597 } 598 list_add_tail(&rsgl->list, &sreq->list); 599 } 600 601 used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); 602 err = used; 603 if (used < 0) 604 goto free; 605 if (last_rsgl) 606 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 607 608 last_rsgl = rsgl; 609 len += used; 610 skcipher_pull_sgl(sk, used, 0); 611 iov_iter_advance(&msg->msg_iter, used); 612 } 613 614 if (mark) 615 sg_mark_end(sreq->tsg + txbufs - 1); 616 617 skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, 618 len, iv); 619 err = ctx->enc ? crypto_skcipher_encrypt(req) : 620 crypto_skcipher_decrypt(req); 621 if (err == -EINPROGRESS) { 622 atomic_inc(&ctx->inflight); 623 err = -EIOCBQUEUED; 624 sreq = NULL; 625 goto unlock; 626 } 627 free: 628 skcipher_free_async_sgls(sreq); 629 unlock: 630 skcipher_wmem_wakeup(sk); 631 release_sock(sk); 632 kzfree(sreq); 633 out: 634 return err; 635 } 636 637 static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, 638 int flags) 639 { 640 struct sock *sk = sock->sk; 641 struct alg_sock *ask = alg_sk(sk); 642 struct sock *psk = ask->parent; 643 struct alg_sock *pask = alg_sk(psk); 644 struct skcipher_ctx *ctx = ask->private; 645 struct skcipher_tfm *skc = pask->private; 646 struct crypto_skcipher *tfm = skc->skcipher; 647 unsigned bs = crypto_skcipher_blocksize(tfm); 648 struct skcipher_sg_list *sgl; 649 struct scatterlist *sg; 650 int err = -EAGAIN; 651 int used; 652 long copied = 0; 653 654 lock_sock(sk); 655 while (msg_data_left(msg)) { 656 if (!ctx->used) { 657 err = skcipher_wait_for_data(sk, flags); 658 if (err) 659 goto unlock; 660 } 661 662 used = min_t(unsigned long, ctx->used, msg_data_left(msg)); 663 664 used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); 665 err = used; 666 if (err < 0) 667 goto unlock; 668 669 if (ctx->more || used < ctx->used) 670 used -= used % bs; 671 672 err = -EINVAL; 673 if (!used) 674 goto free; 675 676 sgl = list_first_entry(&ctx->tsgl, 677 struct skcipher_sg_list, list); 678 sg = sgl->sg; 679 680 while (!sg->length) 681 sg++; 682 683 skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, 684 ctx->iv); 685 686 err = af_alg_wait_for_completion( 687 ctx->enc ? 688 crypto_skcipher_encrypt(&ctx->req) : 689 crypto_skcipher_decrypt(&ctx->req), 690 &ctx->completion); 691 692 free: 693 af_alg_free_sg(&ctx->rsgl); 694 695 if (err) 696 goto unlock; 697 698 copied += used; 699 skcipher_pull_sgl(sk, used, 1); 700 iov_iter_advance(&msg->msg_iter, used); 701 } 702 703 err = 0; 704 705 unlock: 706 skcipher_wmem_wakeup(sk); 707 release_sock(sk); 708 709 return copied ?: err; 710 } 711 712 static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, 713 size_t ignored, int flags) 714 { 715 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? 716 skcipher_recvmsg_async(sock, msg, flags) : 717 skcipher_recvmsg_sync(sock, msg, flags); 718 } 719 720 static unsigned int skcipher_poll(struct file *file, struct socket *sock, 721 poll_table *wait) 722 { 723 struct sock *sk = sock->sk; 724 struct alg_sock *ask = alg_sk(sk); 725 struct skcipher_ctx *ctx = ask->private; 726 unsigned int mask; 727 728 sock_poll_wait(file, sk_sleep(sk), wait); 729 mask = 0; 730 731 if (ctx->used) 732 mask |= POLLIN | POLLRDNORM; 733 734 if (skcipher_writable(sk)) 735 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 736 737 return mask; 738 } 739 740 static struct proto_ops algif_skcipher_ops = { 741 .family = PF_ALG, 742 743 .connect = sock_no_connect, 744 .socketpair = sock_no_socketpair, 745 .getname = sock_no_getname, 746 .ioctl = sock_no_ioctl, 747 .listen = sock_no_listen, 748 .shutdown = sock_no_shutdown, 749 .getsockopt = sock_no_getsockopt, 750 .mmap = sock_no_mmap, 751 .bind = sock_no_bind, 752 .accept = sock_no_accept, 753 .setsockopt = sock_no_setsockopt, 754 755 .release = af_alg_release, 756 .sendmsg = skcipher_sendmsg, 757 .sendpage = skcipher_sendpage, 758 .recvmsg = skcipher_recvmsg, 759 .poll = skcipher_poll, 760 }; 761 762 static int skcipher_check_key(struct socket *sock) 763 { 764 int err = 0; 765 struct sock *psk; 766 struct alg_sock *pask; 767 struct skcipher_tfm *tfm; 768 struct sock *sk = sock->sk; 769 struct alg_sock *ask = alg_sk(sk); 770 771 lock_sock(sk); 772 if (ask->refcnt) 773 goto unlock_child; 774 775 psk = ask->parent; 776 pask = alg_sk(ask->parent); 777 tfm = pask->private; 778 779 err = -ENOKEY; 780 lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 781 if (!tfm->has_key) 782 goto unlock; 783 784 if (!pask->refcnt++) 785 sock_hold(psk); 786 787 ask->refcnt = 1; 788 sock_put(psk); 789 790 err = 0; 791 792 unlock: 793 release_sock(psk); 794 unlock_child: 795 release_sock(sk); 796 797 return err; 798 } 799 800 static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg, 801 size_t size) 802 { 803 int err; 804 805 err = skcipher_check_key(sock); 806 if (err) 807 return err; 808 809 return skcipher_sendmsg(sock, msg, size); 810 } 811 812 static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, 813 int offset, size_t size, int flags) 814 { 815 int err; 816 817 err = skcipher_check_key(sock); 818 if (err) 819 return err; 820 821 return skcipher_sendpage(sock, page, offset, size, flags); 822 } 823 824 static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 825 size_t ignored, int flags) 826 { 827 int err; 828 829 err = skcipher_check_key(sock); 830 if (err) 831 return err; 832 833 return skcipher_recvmsg(sock, msg, ignored, flags); 834 } 835 836 static struct proto_ops algif_skcipher_ops_nokey = { 837 .family = PF_ALG, 838 839 .connect = sock_no_connect, 840 .socketpair = sock_no_socketpair, 841 .getname = sock_no_getname, 842 .ioctl = sock_no_ioctl, 843 .listen = sock_no_listen, 844 .shutdown = sock_no_shutdown, 845 .getsockopt = sock_no_getsockopt, 846 .mmap = sock_no_mmap, 847 .bind = sock_no_bind, 848 .accept = sock_no_accept, 849 .setsockopt = sock_no_setsockopt, 850 851 .release = af_alg_release, 852 .sendmsg = skcipher_sendmsg_nokey, 853 .sendpage = skcipher_sendpage_nokey, 854 .recvmsg = skcipher_recvmsg_nokey, 855 .poll = skcipher_poll, 856 }; 857 858 static void *skcipher_bind(const char *name, u32 type, u32 mask) 859 { 860 struct skcipher_tfm *tfm; 861 struct crypto_skcipher *skcipher; 862 863 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 864 if (!tfm) 865 return ERR_PTR(-ENOMEM); 866 867 skcipher = crypto_alloc_skcipher(name, type, mask); 868 if (IS_ERR(skcipher)) { 869 kfree(tfm); 870 return ERR_CAST(skcipher); 871 } 872 873 tfm->skcipher = skcipher; 874 875 return tfm; 876 } 877 878 static void skcipher_release(void *private) 879 { 880 struct skcipher_tfm *tfm = private; 881 882 crypto_free_skcipher(tfm->skcipher); 883 kfree(tfm); 884 } 885 886 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) 887 { 888 struct skcipher_tfm *tfm = private; 889 int err; 890 891 err = crypto_skcipher_setkey(tfm->skcipher, key, keylen); 892 tfm->has_key = !err; 893 894 return err; 895 } 896 897 static void skcipher_wait(struct sock *sk) 898 { 899 struct alg_sock *ask = alg_sk(sk); 900 struct skcipher_ctx *ctx = ask->private; 901 int ctr = 0; 902 903 while (atomic_read(&ctx->inflight) && ctr++ < 100) 904 msleep(100); 905 } 906 907 static void skcipher_sock_destruct(struct sock *sk) 908 { 909 struct alg_sock *ask = alg_sk(sk); 910 struct skcipher_ctx *ctx = ask->private; 911 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); 912 913 if (atomic_read(&ctx->inflight)) 914 skcipher_wait(sk); 915 916 skcipher_free_sgl(sk); 917 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); 918 sock_kfree_s(sk, ctx, ctx->len); 919 af_alg_release_parent(sk); 920 } 921 922 static int skcipher_accept_parent_nokey(void *private, struct sock *sk) 923 { 924 struct skcipher_ctx *ctx; 925 struct alg_sock *ask = alg_sk(sk); 926 struct skcipher_tfm *tfm = private; 927 struct crypto_skcipher *skcipher = tfm->skcipher; 928 unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher); 929 930 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 931 if (!ctx) 932 return -ENOMEM; 933 934 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher), 935 GFP_KERNEL); 936 if (!ctx->iv) { 937 sock_kfree_s(sk, ctx, len); 938 return -ENOMEM; 939 } 940 941 memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); 942 943 INIT_LIST_HEAD(&ctx->tsgl); 944 ctx->len = len; 945 ctx->used = 0; 946 ctx->more = 0; 947 ctx->merge = 0; 948 ctx->enc = 0; 949 atomic_set(&ctx->inflight, 0); 950 af_alg_init_completion(&ctx->completion); 951 952 ask->private = ctx; 953 954 skcipher_request_set_tfm(&ctx->req, skcipher); 955 skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP | 956 CRYPTO_TFM_REQ_MAY_BACKLOG, 957 af_alg_complete, &ctx->completion); 958 959 sk->sk_destruct = skcipher_sock_destruct; 960 961 return 0; 962 } 963 964 static int skcipher_accept_parent(void *private, struct sock *sk) 965 { 966 struct skcipher_tfm *tfm = private; 967 968 if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher)) 969 return -ENOKEY; 970 971 return skcipher_accept_parent_nokey(private, sk); 972 } 973 974 static const struct af_alg_type algif_type_skcipher = { 975 .bind = skcipher_bind, 976 .release = skcipher_release, 977 .setkey = skcipher_setkey, 978 .accept = skcipher_accept_parent, 979 .accept_nokey = skcipher_accept_parent_nokey, 980 .ops = &algif_skcipher_ops, 981 .ops_nokey = &algif_skcipher_ops_nokey, 982 .name = "skcipher", 983 .owner = THIS_MODULE 984 }; 985 986 static int __init algif_skcipher_init(void) 987 { 988 return af_alg_register_type(&algif_type_skcipher); 989 } 990 991 static void __exit algif_skcipher_exit(void) 992 { 993 int err = af_alg_unregister_type(&algif_type_skcipher); 994 BUG_ON(err); 995 } 996 997 module_init(algif_skcipher_init); 998 module_exit(algif_skcipher_exit); 999 MODULE_LICENSE("GPL"); 1000