1 /* 2 * algif_skcipher: User-space interface for skcipher algorithms 3 * 4 * This file provides the user-space API for symmetric key ciphers. 5 * 6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 */ 14 15 #include <crypto/scatterwalk.h> 16 #include <crypto/skcipher.h> 17 #include <crypto/if_alg.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/net.h> 24 #include <net/sock.h> 25 26 struct skcipher_sg_list { 27 struct list_head list; 28 29 int cur; 30 31 struct scatterlist sg[0]; 32 }; 33 34 struct skcipher_ctx { 35 struct list_head tsgl; 36 struct af_alg_sgl rsgl; 37 38 void *iv; 39 40 struct af_alg_completion completion; 41 42 unsigned used; 43 44 unsigned int len; 45 bool more; 46 bool merge; 47 bool enc; 48 49 struct ablkcipher_request req; 50 }; 51 52 #define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \ 53 sizeof(struct scatterlist) - 1) 54 55 static inline int skcipher_sndbuf(struct sock *sk) 56 { 57 struct alg_sock *ask = alg_sk(sk); 58 struct skcipher_ctx *ctx = ask->private; 59 60 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 61 ctx->used, 0); 62 } 63 64 static inline bool skcipher_writable(struct sock *sk) 65 { 66 return PAGE_SIZE <= skcipher_sndbuf(sk); 67 } 68 69 static int skcipher_alloc_sgl(struct sock *sk) 70 { 71 struct alg_sock *ask = alg_sk(sk); 72 struct skcipher_ctx *ctx = ask->private; 73 struct skcipher_sg_list *sgl; 74 struct scatterlist *sg = NULL; 75 76 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 77 if (!list_empty(&ctx->tsgl)) 78 sg = sgl->sg; 79 80 if (!sg || sgl->cur >= MAX_SGL_ENTS) { 81 sgl = sock_kmalloc(sk, sizeof(*sgl) + 82 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 83 GFP_KERNEL); 84 if (!sgl) 85 return -ENOMEM; 86 87 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 88 sgl->cur = 0; 89 90 if (sg) 91 scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 92 93 list_add_tail(&sgl->list, &ctx->tsgl); 94 } 95 96 return 0; 97 } 98 99 static void skcipher_pull_sgl(struct sock *sk, int used) 100 { 101 struct alg_sock *ask = alg_sk(sk); 102 struct skcipher_ctx *ctx = ask->private; 103 struct skcipher_sg_list *sgl; 104 struct scatterlist *sg; 105 int i; 106 107 while (!list_empty(&ctx->tsgl)) { 108 sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, 109 list); 110 sg = sgl->sg; 111 112 for (i = 0; i < sgl->cur; i++) { 113 int plen = min_t(int, used, sg[i].length); 114 115 if (!sg_page(sg + i)) 116 continue; 117 118 sg[i].length -= plen; 119 sg[i].offset += plen; 120 121 used -= plen; 122 ctx->used -= plen; 123 124 if (sg[i].length) 125 return; 126 127 put_page(sg_page(sg + i)); 128 sg_assign_page(sg + i, NULL); 129 } 130 131 list_del(&sgl->list); 132 sock_kfree_s(sk, sgl, 133 sizeof(*sgl) + sizeof(sgl->sg[0]) * 134 (MAX_SGL_ENTS + 1)); 135 } 136 137 if (!ctx->used) 138 ctx->merge = 0; 139 } 140 141 static void skcipher_free_sgl(struct sock *sk) 142 { 143 struct alg_sock *ask = alg_sk(sk); 144 struct skcipher_ctx *ctx = ask->private; 145 146 skcipher_pull_sgl(sk, ctx->used); 147 } 148 149 static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) 150 { 151 long timeout; 152 DEFINE_WAIT(wait); 153 int err = -ERESTARTSYS; 154 155 if (flags & MSG_DONTWAIT) 156 return -EAGAIN; 157 158 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 159 160 for (;;) { 161 if (signal_pending(current)) 162 break; 163 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 164 timeout = MAX_SCHEDULE_TIMEOUT; 165 if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) { 166 err = 0; 167 break; 168 } 169 } 170 finish_wait(sk_sleep(sk), &wait); 171 172 return err; 173 } 174 175 static void skcipher_wmem_wakeup(struct sock *sk) 176 { 177 struct socket_wq *wq; 178 179 if (!skcipher_writable(sk)) 180 return; 181 182 rcu_read_lock(); 183 wq = rcu_dereference(sk->sk_wq); 184 if (wq_has_sleeper(wq)) 185 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 186 POLLRDNORM | 187 POLLRDBAND); 188 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 189 rcu_read_unlock(); 190 } 191 192 static int skcipher_wait_for_data(struct sock *sk, unsigned flags) 193 { 194 struct alg_sock *ask = alg_sk(sk); 195 struct skcipher_ctx *ctx = ask->private; 196 long timeout; 197 DEFINE_WAIT(wait); 198 int err = -ERESTARTSYS; 199 200 if (flags & MSG_DONTWAIT) { 201 return -EAGAIN; 202 } 203 204 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 205 206 for (;;) { 207 if (signal_pending(current)) 208 break; 209 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 210 timeout = MAX_SCHEDULE_TIMEOUT; 211 if (sk_wait_event(sk, &timeout, ctx->used)) { 212 err = 0; 213 break; 214 } 215 } 216 finish_wait(sk_sleep(sk), &wait); 217 218 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 219 220 return err; 221 } 222 223 static void skcipher_data_wakeup(struct sock *sk) 224 { 225 struct alg_sock *ask = alg_sk(sk); 226 struct skcipher_ctx *ctx = ask->private; 227 struct socket_wq *wq; 228 229 if (!ctx->used) 230 return; 231 232 rcu_read_lock(); 233 wq = rcu_dereference(sk->sk_wq); 234 if (wq_has_sleeper(wq)) 235 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 236 POLLRDNORM | 237 POLLRDBAND); 238 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 239 rcu_read_unlock(); 240 } 241 242 static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock, 243 struct msghdr *msg, size_t size) 244 { 245 struct sock *sk = sock->sk; 246 struct alg_sock *ask = alg_sk(sk); 247 struct skcipher_ctx *ctx = ask->private; 248 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); 249 unsigned ivsize = crypto_ablkcipher_ivsize(tfm); 250 struct skcipher_sg_list *sgl; 251 struct af_alg_control con = {}; 252 long copied = 0; 253 bool enc = 0; 254 int err; 255 int i; 256 257 if (msg->msg_controllen) { 258 err = af_alg_cmsg_send(msg, &con); 259 if (err) 260 return err; 261 262 switch (con.op) { 263 case ALG_OP_ENCRYPT: 264 enc = 1; 265 break; 266 case ALG_OP_DECRYPT: 267 enc = 0; 268 break; 269 default: 270 return -EINVAL; 271 } 272 273 if (con.iv && con.iv->ivlen != ivsize) 274 return -EINVAL; 275 } 276 277 err = -EINVAL; 278 279 lock_sock(sk); 280 if (!ctx->more && ctx->used) 281 goto unlock; 282 283 if (!ctx->used) { 284 ctx->enc = enc; 285 if (con.iv) 286 memcpy(ctx->iv, con.iv->iv, ivsize); 287 } 288 289 while (size) { 290 struct scatterlist *sg; 291 unsigned long len = size; 292 int plen; 293 294 if (ctx->merge) { 295 sgl = list_entry(ctx->tsgl.prev, 296 struct skcipher_sg_list, list); 297 sg = sgl->sg + sgl->cur - 1; 298 len = min_t(unsigned long, len, 299 PAGE_SIZE - sg->offset - sg->length); 300 301 err = memcpy_fromiovec(page_address(sg_page(sg)) + 302 sg->offset + sg->length, 303 msg->msg_iov, len); 304 if (err) 305 goto unlock; 306 307 sg->length += len; 308 ctx->merge = (sg->offset + sg->length) & 309 (PAGE_SIZE - 1); 310 311 ctx->used += len; 312 copied += len; 313 size -= len; 314 continue; 315 } 316 317 if (!skcipher_writable(sk)) { 318 err = skcipher_wait_for_wmem(sk, msg->msg_flags); 319 if (err) 320 goto unlock; 321 } 322 323 len = min_t(unsigned long, len, skcipher_sndbuf(sk)); 324 325 err = skcipher_alloc_sgl(sk); 326 if (err) 327 goto unlock; 328 329 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 330 sg = sgl->sg; 331 do { 332 i = sgl->cur; 333 plen = min_t(int, len, PAGE_SIZE); 334 335 sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 336 err = -ENOMEM; 337 if (!sg_page(sg + i)) 338 goto unlock; 339 340 err = memcpy_fromiovec(page_address(sg_page(sg + i)), 341 msg->msg_iov, plen); 342 if (err) { 343 __free_page(sg_page(sg + i)); 344 sg_assign_page(sg + i, NULL); 345 goto unlock; 346 } 347 348 sg[i].length = plen; 349 len -= plen; 350 ctx->used += plen; 351 copied += plen; 352 size -= plen; 353 sgl->cur++; 354 } while (len && sgl->cur < MAX_SGL_ENTS); 355 356 ctx->merge = plen & (PAGE_SIZE - 1); 357 } 358 359 err = 0; 360 361 ctx->more = msg->msg_flags & MSG_MORE; 362 if (!ctx->more && !list_empty(&ctx->tsgl)) 363 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 364 365 unlock: 366 skcipher_data_wakeup(sk); 367 release_sock(sk); 368 369 return copied ?: err; 370 } 371 372 static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, 373 int offset, size_t size, int flags) 374 { 375 struct sock *sk = sock->sk; 376 struct alg_sock *ask = alg_sk(sk); 377 struct skcipher_ctx *ctx = ask->private; 378 struct skcipher_sg_list *sgl; 379 int err = -EINVAL; 380 381 if (flags & MSG_SENDPAGE_NOTLAST) 382 flags |= MSG_MORE; 383 384 lock_sock(sk); 385 if (!ctx->more && ctx->used) 386 goto unlock; 387 388 if (!size) 389 goto done; 390 391 if (!skcipher_writable(sk)) { 392 err = skcipher_wait_for_wmem(sk, flags); 393 if (err) 394 goto unlock; 395 } 396 397 err = skcipher_alloc_sgl(sk); 398 if (err) 399 goto unlock; 400 401 ctx->merge = 0; 402 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 403 404 get_page(page); 405 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 406 sgl->cur++; 407 ctx->used += size; 408 409 done: 410 ctx->more = flags & MSG_MORE; 411 if (!ctx->more && !list_empty(&ctx->tsgl)) 412 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 413 414 unlock: 415 skcipher_data_wakeup(sk); 416 release_sock(sk); 417 418 return err ?: size; 419 } 420 421 static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, 422 struct msghdr *msg, size_t ignored, int flags) 423 { 424 struct sock *sk = sock->sk; 425 struct alg_sock *ask = alg_sk(sk); 426 struct skcipher_ctx *ctx = ask->private; 427 unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( 428 &ctx->req)); 429 struct skcipher_sg_list *sgl; 430 struct scatterlist *sg; 431 unsigned long iovlen; 432 struct iovec *iov; 433 int err = -EAGAIN; 434 int used; 435 long copied = 0; 436 437 lock_sock(sk); 438 for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; 439 iovlen--, iov++) { 440 unsigned long seglen = iov->iov_len; 441 char __user *from = iov->iov_base; 442 443 while (seglen) { 444 sgl = list_first_entry(&ctx->tsgl, 445 struct skcipher_sg_list, list); 446 sg = sgl->sg; 447 448 while (!sg->length) 449 sg++; 450 451 used = ctx->used; 452 if (!used) { 453 err = skcipher_wait_for_data(sk, flags); 454 if (err) 455 goto unlock; 456 } 457 458 used = min_t(unsigned long, used, seglen); 459 460 used = af_alg_make_sg(&ctx->rsgl, from, used, 1); 461 err = used; 462 if (err < 0) 463 goto unlock; 464 465 if (ctx->more || used < ctx->used) 466 used -= used % bs; 467 468 err = -EINVAL; 469 if (!used) 470 goto free; 471 472 ablkcipher_request_set_crypt(&ctx->req, sg, 473 ctx->rsgl.sg, used, 474 ctx->iv); 475 476 err = af_alg_wait_for_completion( 477 ctx->enc ? 478 crypto_ablkcipher_encrypt(&ctx->req) : 479 crypto_ablkcipher_decrypt(&ctx->req), 480 &ctx->completion); 481 482 free: 483 af_alg_free_sg(&ctx->rsgl); 484 485 if (err) 486 goto unlock; 487 488 copied += used; 489 from += used; 490 seglen -= used; 491 skcipher_pull_sgl(sk, used); 492 } 493 } 494 495 err = 0; 496 497 unlock: 498 skcipher_wmem_wakeup(sk); 499 release_sock(sk); 500 501 return copied ?: err; 502 } 503 504 505 static unsigned int skcipher_poll(struct file *file, struct socket *sock, 506 poll_table *wait) 507 { 508 struct sock *sk = sock->sk; 509 struct alg_sock *ask = alg_sk(sk); 510 struct skcipher_ctx *ctx = ask->private; 511 unsigned int mask; 512 513 sock_poll_wait(file, sk_sleep(sk), wait); 514 mask = 0; 515 516 if (ctx->used) 517 mask |= POLLIN | POLLRDNORM; 518 519 if (skcipher_writable(sk)) 520 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 521 522 return mask; 523 } 524 525 static struct proto_ops algif_skcipher_ops = { 526 .family = PF_ALG, 527 528 .connect = sock_no_connect, 529 .socketpair = sock_no_socketpair, 530 .getname = sock_no_getname, 531 .ioctl = sock_no_ioctl, 532 .listen = sock_no_listen, 533 .shutdown = sock_no_shutdown, 534 .getsockopt = sock_no_getsockopt, 535 .mmap = sock_no_mmap, 536 .bind = sock_no_bind, 537 .accept = sock_no_accept, 538 .setsockopt = sock_no_setsockopt, 539 540 .release = af_alg_release, 541 .sendmsg = skcipher_sendmsg, 542 .sendpage = skcipher_sendpage, 543 .recvmsg = skcipher_recvmsg, 544 .poll = skcipher_poll, 545 }; 546 547 static void *skcipher_bind(const char *name, u32 type, u32 mask) 548 { 549 return crypto_alloc_ablkcipher(name, type, mask); 550 } 551 552 static void skcipher_release(void *private) 553 { 554 crypto_free_ablkcipher(private); 555 } 556 557 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) 558 { 559 return crypto_ablkcipher_setkey(private, key, keylen); 560 } 561 562 static void skcipher_sock_destruct(struct sock *sk) 563 { 564 struct alg_sock *ask = alg_sk(sk); 565 struct skcipher_ctx *ctx = ask->private; 566 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); 567 568 skcipher_free_sgl(sk); 569 sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); 570 sock_kfree_s(sk, ctx, ctx->len); 571 af_alg_release_parent(sk); 572 } 573 574 static int skcipher_accept_parent(void *private, struct sock *sk) 575 { 576 struct skcipher_ctx *ctx; 577 struct alg_sock *ask = alg_sk(sk); 578 unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private); 579 580 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 581 if (!ctx) 582 return -ENOMEM; 583 584 ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private), 585 GFP_KERNEL); 586 if (!ctx->iv) { 587 sock_kfree_s(sk, ctx, len); 588 return -ENOMEM; 589 } 590 591 memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private)); 592 593 INIT_LIST_HEAD(&ctx->tsgl); 594 ctx->len = len; 595 ctx->used = 0; 596 ctx->more = 0; 597 ctx->merge = 0; 598 ctx->enc = 0; 599 af_alg_init_completion(&ctx->completion); 600 601 ask->private = ctx; 602 603 ablkcipher_request_set_tfm(&ctx->req, private); 604 ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, 605 af_alg_complete, &ctx->completion); 606 607 sk->sk_destruct = skcipher_sock_destruct; 608 609 return 0; 610 } 611 612 static const struct af_alg_type algif_type_skcipher = { 613 .bind = skcipher_bind, 614 .release = skcipher_release, 615 .setkey = skcipher_setkey, 616 .accept = skcipher_accept_parent, 617 .ops = &algif_skcipher_ops, 618 .name = "skcipher", 619 .owner = THIS_MODULE 620 }; 621 622 static int __init algif_skcipher_init(void) 623 { 624 return af_alg_register_type(&algif_type_skcipher); 625 } 626 627 static void __exit algif_skcipher_exit(void) 628 { 629 int err = af_alg_unregister_type(&algif_type_skcipher); 630 BUG_ON(err); 631 } 632 633 module_init(algif_skcipher_init); 634 module_exit(algif_skcipher_exit); 635 MODULE_LICENSE("GPL"); 636