1 /* 2 * algif_aead: User-space interface for AEAD algorithms 3 * 4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> 5 * 6 * This file provides the user-space API for AEAD ciphers. 7 * 8 * This file is derived from algif_skcipher.c. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the Free 12 * Software Foundation; either version 2 of the License, or (at your option) 13 * any later version. 14 */ 15 16 #include <crypto/aead.h> 17 #include <crypto/scatterwalk.h> 18 #include <crypto/if_alg.h> 19 #include <linux/init.h> 20 #include <linux/list.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/net.h> 25 #include <net/sock.h> 26 27 struct aead_sg_list { 28 unsigned int cur; 29 struct scatterlist sg[ALG_MAX_PAGES]; 30 }; 31 32 struct aead_ctx { 33 struct aead_sg_list tsgl; 34 /* 35 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum 36 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES 37 * pages 38 */ 39 #define RSGL_MAX_ENTRIES ALG_MAX_PAGES 40 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES]; 41 42 void *iv; 43 44 struct af_alg_completion completion; 45 46 unsigned long used; 47 48 unsigned int len; 49 bool more; 50 bool merge; 51 bool enc; 52 53 size_t aead_assoclen; 54 struct aead_request aead_req; 55 }; 56 57 static inline int aead_sndbuf(struct sock *sk) 58 { 59 struct alg_sock *ask = alg_sk(sk); 60 struct aead_ctx *ctx = ask->private; 61 62 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 63 ctx->used, 0); 64 } 65 66 static inline bool aead_writable(struct sock *sk) 67 { 68 return PAGE_SIZE <= aead_sndbuf(sk); 69 } 70 71 static inline bool aead_sufficient_data(struct aead_ctx *ctx) 72 { 73 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 74 75 return ctx->used >= ctx->aead_assoclen + as; 76 } 77 78 static void aead_put_sgl(struct sock *sk) 79 { 80 struct alg_sock *ask = alg_sk(sk); 81 struct aead_ctx *ctx = ask->private; 82 struct aead_sg_list *sgl = &ctx->tsgl; 83 struct scatterlist *sg = sgl->sg; 84 unsigned int i; 85 86 for (i = 0; i < sgl->cur; i++) { 87 if (!sg_page(sg + i)) 88 continue; 89 90 put_page(sg_page(sg + i)); 91 sg_assign_page(sg + i, NULL); 92 } 93 sgl->cur = 0; 94 ctx->used = 0; 95 ctx->more = 0; 96 ctx->merge = 0; 97 } 98 99 static void aead_wmem_wakeup(struct sock *sk) 100 { 101 struct socket_wq *wq; 102 103 if (!aead_writable(sk)) 104 return; 105 106 rcu_read_lock(); 107 wq = rcu_dereference(sk->sk_wq); 108 if (wq_has_sleeper(wq)) 109 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 110 POLLRDNORM | 111 POLLRDBAND); 112 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 113 rcu_read_unlock(); 114 } 115 116 static int aead_wait_for_data(struct sock *sk, unsigned flags) 117 { 118 struct alg_sock *ask = alg_sk(sk); 119 struct aead_ctx *ctx = ask->private; 120 long timeout; 121 DEFINE_WAIT(wait); 122 int err = -ERESTARTSYS; 123 124 if (flags & MSG_DONTWAIT) 125 return -EAGAIN; 126 127 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 128 129 for (;;) { 130 if (signal_pending(current)) 131 break; 132 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 133 timeout = MAX_SCHEDULE_TIMEOUT; 134 if (sk_wait_event(sk, &timeout, !ctx->more)) { 135 err = 0; 136 break; 137 } 138 } 139 finish_wait(sk_sleep(sk), &wait); 140 141 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 142 143 return err; 144 } 145 146 static void aead_data_wakeup(struct sock *sk) 147 { 148 struct alg_sock *ask = alg_sk(sk); 149 struct aead_ctx *ctx = ask->private; 150 struct socket_wq *wq; 151 152 if (ctx->more) 153 return; 154 if (!ctx->used) 155 return; 156 157 rcu_read_lock(); 158 wq = rcu_dereference(sk->sk_wq); 159 if (wq_has_sleeper(wq)) 160 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 161 POLLRDNORM | 162 POLLRDBAND); 163 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 164 rcu_read_unlock(); 165 } 166 167 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 168 { 169 struct sock *sk = sock->sk; 170 struct alg_sock *ask = alg_sk(sk); 171 struct aead_ctx *ctx = ask->private; 172 unsigned ivsize = 173 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); 174 struct aead_sg_list *sgl = &ctx->tsgl; 175 struct af_alg_control con = {}; 176 long copied = 0; 177 bool enc = 0; 178 bool init = 0; 179 int err = -EINVAL; 180 181 if (msg->msg_controllen) { 182 err = af_alg_cmsg_send(msg, &con); 183 if (err) 184 return err; 185 186 init = 1; 187 switch (con.op) { 188 case ALG_OP_ENCRYPT: 189 enc = 1; 190 break; 191 case ALG_OP_DECRYPT: 192 enc = 0; 193 break; 194 default: 195 return -EINVAL; 196 } 197 198 if (con.iv && con.iv->ivlen != ivsize) 199 return -EINVAL; 200 } 201 202 lock_sock(sk); 203 if (!ctx->more && ctx->used) 204 goto unlock; 205 206 if (init) { 207 ctx->enc = enc; 208 if (con.iv) 209 memcpy(ctx->iv, con.iv->iv, ivsize); 210 211 ctx->aead_assoclen = con.aead_assoclen; 212 } 213 214 while (size) { 215 unsigned long len = size; 216 struct scatterlist *sg = NULL; 217 218 /* use the existing memory in an allocated page */ 219 if (ctx->merge) { 220 sg = sgl->sg + sgl->cur - 1; 221 len = min_t(unsigned long, len, 222 PAGE_SIZE - sg->offset - sg->length); 223 err = memcpy_from_msg(page_address(sg_page(sg)) + 224 sg->offset + sg->length, 225 msg, len); 226 if (err) 227 goto unlock; 228 229 sg->length += len; 230 ctx->merge = (sg->offset + sg->length) & 231 (PAGE_SIZE - 1); 232 233 ctx->used += len; 234 copied += len; 235 size -= len; 236 continue; 237 } 238 239 if (!aead_writable(sk)) { 240 /* user space sent too much data */ 241 aead_put_sgl(sk); 242 err = -EMSGSIZE; 243 goto unlock; 244 } 245 246 /* allocate a new page */ 247 len = min_t(unsigned long, size, aead_sndbuf(sk)); 248 while (len) { 249 int plen = 0; 250 251 if (sgl->cur >= ALG_MAX_PAGES) { 252 aead_put_sgl(sk); 253 err = -E2BIG; 254 goto unlock; 255 } 256 257 sg = sgl->sg + sgl->cur; 258 plen = min_t(int, len, PAGE_SIZE); 259 260 sg_assign_page(sg, alloc_page(GFP_KERNEL)); 261 err = -ENOMEM; 262 if (!sg_page(sg)) 263 goto unlock; 264 265 err = memcpy_from_msg(page_address(sg_page(sg)), 266 msg, plen); 267 if (err) { 268 __free_page(sg_page(sg)); 269 sg_assign_page(sg, NULL); 270 goto unlock; 271 } 272 273 sg->offset = 0; 274 sg->length = plen; 275 len -= plen; 276 ctx->used += plen; 277 copied += plen; 278 sgl->cur++; 279 size -= plen; 280 ctx->merge = plen & (PAGE_SIZE - 1); 281 } 282 } 283 284 err = 0; 285 286 ctx->more = msg->msg_flags & MSG_MORE; 287 if (!ctx->more && !aead_sufficient_data(ctx)) { 288 aead_put_sgl(sk); 289 err = -EMSGSIZE; 290 } 291 292 unlock: 293 aead_data_wakeup(sk); 294 release_sock(sk); 295 296 return err ?: copied; 297 } 298 299 static ssize_t aead_sendpage(struct socket *sock, struct page *page, 300 int offset, size_t size, int flags) 301 { 302 struct sock *sk = sock->sk; 303 struct alg_sock *ask = alg_sk(sk); 304 struct aead_ctx *ctx = ask->private; 305 struct aead_sg_list *sgl = &ctx->tsgl; 306 int err = -EINVAL; 307 308 if (flags & MSG_SENDPAGE_NOTLAST) 309 flags |= MSG_MORE; 310 311 if (sgl->cur >= ALG_MAX_PAGES) 312 return -E2BIG; 313 314 lock_sock(sk); 315 if (!ctx->more && ctx->used) 316 goto unlock; 317 318 if (!size) 319 goto done; 320 321 if (!aead_writable(sk)) { 322 /* user space sent too much data */ 323 aead_put_sgl(sk); 324 err = -EMSGSIZE; 325 goto unlock; 326 } 327 328 ctx->merge = 0; 329 330 get_page(page); 331 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 332 sgl->cur++; 333 ctx->used += size; 334 335 err = 0; 336 337 done: 338 ctx->more = flags & MSG_MORE; 339 if (!ctx->more && !aead_sufficient_data(ctx)) { 340 aead_put_sgl(sk); 341 err = -EMSGSIZE; 342 } 343 344 unlock: 345 aead_data_wakeup(sk); 346 release_sock(sk); 347 348 return err ?: size; 349 } 350 351 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) 352 { 353 struct sock *sk = sock->sk; 354 struct alg_sock *ask = alg_sk(sk); 355 struct aead_ctx *ctx = ask->private; 356 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 357 struct aead_sg_list *sgl = &ctx->tsgl; 358 unsigned int i = 0; 359 int err = -EINVAL; 360 unsigned long used = 0; 361 size_t outlen = 0; 362 size_t usedpages = 0; 363 unsigned int cnt = 0; 364 365 /* Limit number of IOV blocks to be accessed below */ 366 if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES) 367 return -ENOMSG; 368 369 lock_sock(sk); 370 371 /* 372 * AEAD memory structure: For encryption, the tag is appended to the 373 * ciphertext which implies that the memory allocated for the ciphertext 374 * must be increased by the tag length. For decryption, the tag 375 * is expected to be concatenated to the ciphertext. The plaintext 376 * therefore has a memory size of the ciphertext minus the tag length. 377 * 378 * The memory structure for cipher operation has the following 379 * structure: 380 * AEAD encryption input: assoc data || plaintext 381 * AEAD encryption output: cipherntext || auth tag 382 * AEAD decryption input: assoc data || ciphertext || auth tag 383 * AEAD decryption output: plaintext 384 */ 385 386 if (ctx->more) { 387 err = aead_wait_for_data(sk, flags); 388 if (err) 389 goto unlock; 390 } 391 392 used = ctx->used; 393 394 /* 395 * Make sure sufficient data is present -- note, the same check is 396 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg 397 * shall provide an information to the data sender that something is 398 * wrong, but they are irrelevant to maintain the kernel integrity. 399 * We need this check here too in case user space decides to not honor 400 * the error message in sendmsg/sendpage and still call recvmsg. This 401 * check here protects the kernel integrity. 402 */ 403 if (!aead_sufficient_data(ctx)) 404 goto unlock; 405 406 outlen = used; 407 408 /* 409 * The cipher operation input data is reduced by the associated data 410 * length as this data is processed separately later on. 411 */ 412 used -= ctx->aead_assoclen + (ctx->enc ? as : 0); 413 414 /* convert iovecs of output buffers into scatterlists */ 415 while (iov_iter_count(&msg->msg_iter)) { 416 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 417 (outlen - usedpages)); 418 419 /* make one iovec available as scatterlist */ 420 err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter, 421 seglen); 422 if (err < 0) 423 goto unlock; 424 usedpages += err; 425 /* chain the new scatterlist with previous one */ 426 if (cnt) 427 af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]); 428 429 /* we do not need more iovecs as we have sufficient memory */ 430 if (outlen <= usedpages) 431 break; 432 iov_iter_advance(&msg->msg_iter, err); 433 cnt++; 434 } 435 436 err = -EINVAL; 437 /* ensure output buffer is sufficiently large */ 438 if (usedpages < outlen) 439 goto unlock; 440 441 sg_mark_end(sgl->sg + sgl->cur - 1); 442 443 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg, 444 used, ctx->iv); 445 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); 446 447 err = af_alg_wait_for_completion(ctx->enc ? 448 crypto_aead_encrypt(&ctx->aead_req) : 449 crypto_aead_decrypt(&ctx->aead_req), 450 &ctx->completion); 451 452 if (err) { 453 /* EBADMSG implies a valid cipher operation took place */ 454 if (err == -EBADMSG) 455 aead_put_sgl(sk); 456 goto unlock; 457 } 458 459 aead_put_sgl(sk); 460 461 err = 0; 462 463 unlock: 464 for (i = 0; i < cnt; i++) 465 af_alg_free_sg(&ctx->rsgl[i]); 466 467 aead_wmem_wakeup(sk); 468 release_sock(sk); 469 470 return err ? err : outlen; 471 } 472 473 static unsigned int aead_poll(struct file *file, struct socket *sock, 474 poll_table *wait) 475 { 476 struct sock *sk = sock->sk; 477 struct alg_sock *ask = alg_sk(sk); 478 struct aead_ctx *ctx = ask->private; 479 unsigned int mask; 480 481 sock_poll_wait(file, sk_sleep(sk), wait); 482 mask = 0; 483 484 if (!ctx->more) 485 mask |= POLLIN | POLLRDNORM; 486 487 if (aead_writable(sk)) 488 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 489 490 return mask; 491 } 492 493 static struct proto_ops algif_aead_ops = { 494 .family = PF_ALG, 495 496 .connect = sock_no_connect, 497 .socketpair = sock_no_socketpair, 498 .getname = sock_no_getname, 499 .ioctl = sock_no_ioctl, 500 .listen = sock_no_listen, 501 .shutdown = sock_no_shutdown, 502 .getsockopt = sock_no_getsockopt, 503 .mmap = sock_no_mmap, 504 .bind = sock_no_bind, 505 .accept = sock_no_accept, 506 .setsockopt = sock_no_setsockopt, 507 508 .release = af_alg_release, 509 .sendmsg = aead_sendmsg, 510 .sendpage = aead_sendpage, 511 .recvmsg = aead_recvmsg, 512 .poll = aead_poll, 513 }; 514 515 static void *aead_bind(const char *name, u32 type, u32 mask) 516 { 517 return crypto_alloc_aead(name, type | CRYPTO_ALG_AEAD_NEW, 518 mask | CRYPTO_ALG_AEAD_NEW); 519 } 520 521 static void aead_release(void *private) 522 { 523 crypto_free_aead(private); 524 } 525 526 static int aead_setauthsize(void *private, unsigned int authsize) 527 { 528 return crypto_aead_setauthsize(private, authsize); 529 } 530 531 static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 532 { 533 return crypto_aead_setkey(private, key, keylen); 534 } 535 536 static void aead_sock_destruct(struct sock *sk) 537 { 538 struct alg_sock *ask = alg_sk(sk); 539 struct aead_ctx *ctx = ask->private; 540 unsigned int ivlen = crypto_aead_ivsize( 541 crypto_aead_reqtfm(&ctx->aead_req)); 542 543 aead_put_sgl(sk); 544 sock_kzfree_s(sk, ctx->iv, ivlen); 545 sock_kfree_s(sk, ctx, ctx->len); 546 af_alg_release_parent(sk); 547 } 548 549 static int aead_accept_parent(void *private, struct sock *sk) 550 { 551 struct aead_ctx *ctx; 552 struct alg_sock *ask = alg_sk(sk); 553 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); 554 unsigned int ivlen = crypto_aead_ivsize(private); 555 556 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 557 if (!ctx) 558 return -ENOMEM; 559 memset(ctx, 0, len); 560 561 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); 562 if (!ctx->iv) { 563 sock_kfree_s(sk, ctx, len); 564 return -ENOMEM; 565 } 566 memset(ctx->iv, 0, ivlen); 567 568 ctx->len = len; 569 ctx->used = 0; 570 ctx->more = 0; 571 ctx->merge = 0; 572 ctx->enc = 0; 573 ctx->tsgl.cur = 0; 574 ctx->aead_assoclen = 0; 575 af_alg_init_completion(&ctx->completion); 576 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); 577 578 ask->private = ctx; 579 580 aead_request_set_tfm(&ctx->aead_req, private); 581 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 582 af_alg_complete, &ctx->completion); 583 584 sk->sk_destruct = aead_sock_destruct; 585 586 return 0; 587 } 588 589 static const struct af_alg_type algif_type_aead = { 590 .bind = aead_bind, 591 .release = aead_release, 592 .setkey = aead_setkey, 593 .setauthsize = aead_setauthsize, 594 .accept = aead_accept_parent, 595 .ops = &algif_aead_ops, 596 .name = "aead", 597 .owner = THIS_MODULE 598 }; 599 600 static int __init algif_aead_init(void) 601 { 602 return af_alg_register_type(&algif_type_aead); 603 } 604 605 static void __exit algif_aead_exit(void) 606 { 607 int err = af_alg_unregister_type(&algif_type_aead); 608 BUG_ON(err); 609 } 610 611 module_init(algif_aead_init); 612 module_exit(algif_aead_exit); 613 MODULE_LICENSE("GPL"); 614 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); 615 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface"); 616