1 /* 2 * algif_aead: User-space interface for AEAD algorithms 3 * 4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> 5 * 6 * This file provides the user-space API for AEAD ciphers. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * The following concept of the memory management is used: 14 * 15 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is 16 * filled by user space with the data submitted via sendpage/sendmsg. Filling 17 * up the TX SGL does not cause a crypto operation -- the data will only be 18 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must 19 * provide a buffer which is tracked with the RX SGL. 20 * 21 * During the processing of the recvmsg operation, the cipher request is 22 * allocated and prepared. As part of the recvmsg operation, the processed 23 * TX buffers are extracted from the TX SGL into a separate SGL. 24 * 25 * After the completion of the crypto operation, the RX SGL and the cipher 26 * request is released. The extracted TX SGL parts are released together with 27 * the RX SGL release. 28 */ 29 30 #include <crypto/internal/aead.h> 31 #include <crypto/scatterwalk.h> 32 #include <crypto/if_alg.h> 33 #include <crypto/skcipher.h> 34 #include <crypto/null.h> 35 #include <linux/init.h> 36 #include <linux/list.h> 37 #include <linux/kernel.h> 38 #include <linux/mm.h> 39 #include <linux/module.h> 40 #include <linux/net.h> 41 #include <net/sock.h> 42 43 struct aead_tfm { 44 struct crypto_aead *aead; 45 struct crypto_sync_skcipher *null_tfm; 46 }; 47 48 static inline bool aead_sufficient_data(struct sock *sk) 49 { 50 struct alg_sock *ask = alg_sk(sk); 51 struct sock *psk = ask->parent; 52 struct alg_sock *pask = alg_sk(psk); 53 struct af_alg_ctx *ctx = ask->private; 54 struct aead_tfm *aeadc = pask->private; 55 struct crypto_aead *tfm = aeadc->aead; 56 unsigned int as = crypto_aead_authsize(tfm); 57 58 /* 59 * The minimum amount of memory needed for an AEAD cipher is 60 * the AAD and in case of decryption the tag. 61 */ 62 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); 63 } 64 65 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 66 { 67 struct sock *sk = sock->sk; 68 struct alg_sock *ask = alg_sk(sk); 69 struct sock *psk = ask->parent; 70 struct alg_sock *pask = alg_sk(psk); 71 struct aead_tfm *aeadc = pask->private; 72 struct crypto_aead *tfm = aeadc->aead; 73 unsigned int ivsize = crypto_aead_ivsize(tfm); 74 75 return af_alg_sendmsg(sock, msg, size, ivsize); 76 } 77 78 static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm, 79 struct scatterlist *src, 80 struct scatterlist *dst, unsigned int len) 81 { 82 SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); 83 84 skcipher_request_set_sync_tfm(skreq, null_tfm); 85 skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, 86 NULL, NULL); 87 skcipher_request_set_crypt(skreq, src, dst, len, NULL); 88 89 return crypto_skcipher_encrypt(skreq); 90 } 91 92 static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, 93 size_t ignored, int flags) 94 { 95 struct sock *sk = sock->sk; 96 struct alg_sock *ask = alg_sk(sk); 97 struct sock *psk = ask->parent; 98 struct alg_sock *pask = alg_sk(psk); 99 struct af_alg_ctx *ctx = ask->private; 100 struct aead_tfm *aeadc = pask->private; 101 struct crypto_aead *tfm = aeadc->aead; 102 struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm; 103 unsigned int i, as = crypto_aead_authsize(tfm); 104 struct af_alg_async_req *areq; 105 struct af_alg_tsgl *tsgl, *tmp; 106 struct scatterlist *rsgl_src, *tsgl_src = NULL; 107 int err = 0; 108 size_t used = 0; /* [in] TX bufs to be en/decrypted */ 109 size_t outlen = 0; /* [out] RX bufs produced by kernel */ 110 size_t usedpages = 0; /* [in] RX bufs to be used from user */ 111 size_t processed = 0; /* [in] TX bufs to be consumed */ 112 113 if (!ctx->used) { 114 err = af_alg_wait_for_data(sk, flags); 115 if (err) 116 return err; 117 } 118 119 /* 120 * Data length provided by caller via sendmsg/sendpage that has not 121 * yet been processed. 122 */ 123 used = ctx->used; 124 125 /* 126 * Make sure sufficient data is present -- note, the same check is 127 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg 128 * shall provide an information to the data sender that something is 129 * wrong, but they are irrelevant to maintain the kernel integrity. 130 * We need this check here too in case user space decides to not honor 131 * the error message in sendmsg/sendpage and still call recvmsg. This 132 * check here protects the kernel integrity. 133 */ 134 if (!aead_sufficient_data(sk)) 135 return -EINVAL; 136 137 /* 138 * Calculate the minimum output buffer size holding the result of the 139 * cipher operation. When encrypting data, the receiving buffer is 140 * larger by the tag length compared to the input buffer as the 141 * encryption operation generates the tag. For decryption, the input 142 * buffer provides the tag which is consumed resulting in only the 143 * plaintext without a buffer for the tag returned to the caller. 144 */ 145 if (ctx->enc) 146 outlen = used + as; 147 else 148 outlen = used - as; 149 150 /* 151 * The cipher operation input data is reduced by the associated data 152 * length as this data is processed separately later on. 153 */ 154 used -= ctx->aead_assoclen; 155 156 /* Allocate cipher request for current operation. */ 157 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + 158 crypto_aead_reqsize(tfm)); 159 if (IS_ERR(areq)) 160 return PTR_ERR(areq); 161 162 /* convert iovecs of output buffers into RX SGL */ 163 err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages); 164 if (err) 165 goto free; 166 167 /* 168 * Ensure output buffer is sufficiently large. If the caller provides 169 * less buffer space, only use the relative required input size. This 170 * allows AIO operation where the caller sent all data to be processed 171 * and the AIO operation performs the operation on the different chunks 172 * of the input data. 173 */ 174 if (usedpages < outlen) { 175 size_t less = outlen - usedpages; 176 177 if (used < less) { 178 err = -EINVAL; 179 goto free; 180 } 181 used -= less; 182 outlen -= less; 183 } 184 185 processed = used + ctx->aead_assoclen; 186 list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) { 187 for (i = 0; i < tsgl->cur; i++) { 188 struct scatterlist *process_sg = tsgl->sg + i; 189 190 if (!(process_sg->length) || !sg_page(process_sg)) 191 continue; 192 tsgl_src = process_sg; 193 break; 194 } 195 if (tsgl_src) 196 break; 197 } 198 if (processed && !tsgl_src) { 199 err = -EFAULT; 200 goto free; 201 } 202 203 /* 204 * Copy of AAD from source to destination 205 * 206 * The AAD is copied to the destination buffer without change. Even 207 * when user space uses an in-place cipher operation, the kernel 208 * will copy the data as it does not see whether such in-place operation 209 * is initiated. 210 * 211 * To ensure efficiency, the following implementation ensure that the 212 * ciphers are invoked to perform a crypto operation in-place. This 213 * is achieved by memory management specified as follows. 214 */ 215 216 /* Use the RX SGL as source (and destination) for crypto op. */ 217 rsgl_src = areq->first_rsgl.sgl.sg; 218 219 if (ctx->enc) { 220 /* 221 * Encryption operation - The in-place cipher operation is 222 * achieved by the following operation: 223 * 224 * TX SGL: AAD || PT 225 * | | 226 * | copy | 227 * v v 228 * RX SGL: AAD || PT || Tag 229 */ 230 err = crypto_aead_copy_sgl(null_tfm, tsgl_src, 231 areq->first_rsgl.sgl.sg, processed); 232 if (err) 233 goto free; 234 af_alg_pull_tsgl(sk, processed, NULL, 0); 235 } else { 236 /* 237 * Decryption operation - To achieve an in-place cipher 238 * operation, the following SGL structure is used: 239 * 240 * TX SGL: AAD || CT || Tag 241 * | | ^ 242 * | copy | | Create SGL link. 243 * v v | 244 * RX SGL: AAD || CT ----+ 245 */ 246 247 /* Copy AAD || CT to RX SGL buffer for in-place operation. */ 248 err = crypto_aead_copy_sgl(null_tfm, tsgl_src, 249 areq->first_rsgl.sgl.sg, outlen); 250 if (err) 251 goto free; 252 253 /* Create TX SGL for tag and chain it to RX SGL. */ 254 areq->tsgl_entries = af_alg_count_tsgl(sk, processed, 255 processed - as); 256 if (!areq->tsgl_entries) 257 areq->tsgl_entries = 1; 258 areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), 259 areq->tsgl_entries), 260 GFP_KERNEL); 261 if (!areq->tsgl) { 262 err = -ENOMEM; 263 goto free; 264 } 265 sg_init_table(areq->tsgl, areq->tsgl_entries); 266 267 /* Release TX SGL, except for tag data and reassign tag data. */ 268 af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as); 269 270 /* chain the areq TX SGL holding the tag with RX SGL */ 271 if (usedpages) { 272 /* RX SGL present */ 273 struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; 274 275 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); 276 sg_chain(sgl_prev->sg, sgl_prev->npages + 1, 277 areq->tsgl); 278 } else 279 /* no RX SGL present (e.g. authentication only) */ 280 rsgl_src = areq->tsgl; 281 } 282 283 /* Initialize the crypto operation */ 284 aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, 285 areq->first_rsgl.sgl.sg, used, ctx->iv); 286 aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); 287 aead_request_set_tfm(&areq->cra_u.aead_req, tfm); 288 289 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { 290 /* AIO operation */ 291 sock_hold(sk); 292 areq->iocb = msg->msg_iocb; 293 294 /* Remember output size that will be generated. */ 295 areq->outlen = outlen; 296 297 aead_request_set_callback(&areq->cra_u.aead_req, 298 CRYPTO_TFM_REQ_MAY_BACKLOG, 299 af_alg_async_cb, areq); 300 err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : 301 crypto_aead_decrypt(&areq->cra_u.aead_req); 302 303 /* AIO operation in progress */ 304 if (err == -EINPROGRESS || err == -EBUSY) 305 return -EIOCBQUEUED; 306 307 sock_put(sk); 308 } else { 309 /* Synchronous operation */ 310 aead_request_set_callback(&areq->cra_u.aead_req, 311 CRYPTO_TFM_REQ_MAY_BACKLOG, 312 crypto_req_done, &ctx->wait); 313 err = crypto_wait_req(ctx->enc ? 314 crypto_aead_encrypt(&areq->cra_u.aead_req) : 315 crypto_aead_decrypt(&areq->cra_u.aead_req), 316 &ctx->wait); 317 } 318 319 320 free: 321 af_alg_free_resources(areq); 322 323 return err ? err : outlen; 324 } 325 326 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, 327 size_t ignored, int flags) 328 { 329 struct sock *sk = sock->sk; 330 int ret = 0; 331 332 lock_sock(sk); 333 while (msg_data_left(msg)) { 334 int err = _aead_recvmsg(sock, msg, ignored, flags); 335 336 /* 337 * This error covers -EIOCBQUEUED which implies that we can 338 * only handle one AIO request. If the caller wants to have 339 * multiple AIO requests in parallel, he must make multiple 340 * separate AIO calls. 341 * 342 * Also return the error if no data has been processed so far. 343 */ 344 if (err <= 0) { 345 if (err == -EIOCBQUEUED || err == -EBADMSG || !ret) 346 ret = err; 347 goto out; 348 } 349 350 ret += err; 351 } 352 353 out: 354 af_alg_wmem_wakeup(sk); 355 release_sock(sk); 356 return ret; 357 } 358 359 static struct proto_ops algif_aead_ops = { 360 .family = PF_ALG, 361 362 .connect = sock_no_connect, 363 .socketpair = sock_no_socketpair, 364 .getname = sock_no_getname, 365 .ioctl = sock_no_ioctl, 366 .listen = sock_no_listen, 367 .shutdown = sock_no_shutdown, 368 .getsockopt = sock_no_getsockopt, 369 .mmap = sock_no_mmap, 370 .bind = sock_no_bind, 371 .accept = sock_no_accept, 372 .setsockopt = sock_no_setsockopt, 373 374 .release = af_alg_release, 375 .sendmsg = aead_sendmsg, 376 .sendpage = af_alg_sendpage, 377 .recvmsg = aead_recvmsg, 378 .poll = af_alg_poll, 379 }; 380 381 static int aead_check_key(struct socket *sock) 382 { 383 int err = 0; 384 struct sock *psk; 385 struct alg_sock *pask; 386 struct aead_tfm *tfm; 387 struct sock *sk = sock->sk; 388 struct alg_sock *ask = alg_sk(sk); 389 390 lock_sock(sk); 391 if (ask->refcnt) 392 goto unlock_child; 393 394 psk = ask->parent; 395 pask = alg_sk(ask->parent); 396 tfm = pask->private; 397 398 err = -ENOKEY; 399 lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 400 if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) 401 goto unlock; 402 403 if (!pask->refcnt++) 404 sock_hold(psk); 405 406 ask->refcnt = 1; 407 sock_put(psk); 408 409 err = 0; 410 411 unlock: 412 release_sock(psk); 413 unlock_child: 414 release_sock(sk); 415 416 return err; 417 } 418 419 static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg, 420 size_t size) 421 { 422 int err; 423 424 err = aead_check_key(sock); 425 if (err) 426 return err; 427 428 return aead_sendmsg(sock, msg, size); 429 } 430 431 static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, 432 int offset, size_t size, int flags) 433 { 434 int err; 435 436 err = aead_check_key(sock); 437 if (err) 438 return err; 439 440 return af_alg_sendpage(sock, page, offset, size, flags); 441 } 442 443 static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 444 size_t ignored, int flags) 445 { 446 int err; 447 448 err = aead_check_key(sock); 449 if (err) 450 return err; 451 452 return aead_recvmsg(sock, msg, ignored, flags); 453 } 454 455 static struct proto_ops algif_aead_ops_nokey = { 456 .family = PF_ALG, 457 458 .connect = sock_no_connect, 459 .socketpair = sock_no_socketpair, 460 .getname = sock_no_getname, 461 .ioctl = sock_no_ioctl, 462 .listen = sock_no_listen, 463 .shutdown = sock_no_shutdown, 464 .getsockopt = sock_no_getsockopt, 465 .mmap = sock_no_mmap, 466 .bind = sock_no_bind, 467 .accept = sock_no_accept, 468 .setsockopt = sock_no_setsockopt, 469 470 .release = af_alg_release, 471 .sendmsg = aead_sendmsg_nokey, 472 .sendpage = aead_sendpage_nokey, 473 .recvmsg = aead_recvmsg_nokey, 474 .poll = af_alg_poll, 475 }; 476 477 static void *aead_bind(const char *name, u32 type, u32 mask) 478 { 479 struct aead_tfm *tfm; 480 struct crypto_aead *aead; 481 struct crypto_sync_skcipher *null_tfm; 482 483 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 484 if (!tfm) 485 return ERR_PTR(-ENOMEM); 486 487 aead = crypto_alloc_aead(name, type, mask); 488 if (IS_ERR(aead)) { 489 kfree(tfm); 490 return ERR_CAST(aead); 491 } 492 493 null_tfm = crypto_get_default_null_skcipher(); 494 if (IS_ERR(null_tfm)) { 495 crypto_free_aead(aead); 496 kfree(tfm); 497 return ERR_CAST(null_tfm); 498 } 499 500 tfm->aead = aead; 501 tfm->null_tfm = null_tfm; 502 503 return tfm; 504 } 505 506 static void aead_release(void *private) 507 { 508 struct aead_tfm *tfm = private; 509 510 crypto_free_aead(tfm->aead); 511 crypto_put_default_null_skcipher(); 512 kfree(tfm); 513 } 514 515 static int aead_setauthsize(void *private, unsigned int authsize) 516 { 517 struct aead_tfm *tfm = private; 518 519 return crypto_aead_setauthsize(tfm->aead, authsize); 520 } 521 522 static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 523 { 524 struct aead_tfm *tfm = private; 525 526 return crypto_aead_setkey(tfm->aead, key, keylen); 527 } 528 529 static void aead_sock_destruct(struct sock *sk) 530 { 531 struct alg_sock *ask = alg_sk(sk); 532 struct af_alg_ctx *ctx = ask->private; 533 struct sock *psk = ask->parent; 534 struct alg_sock *pask = alg_sk(psk); 535 struct aead_tfm *aeadc = pask->private; 536 struct crypto_aead *tfm = aeadc->aead; 537 unsigned int ivlen = crypto_aead_ivsize(tfm); 538 539 af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 540 sock_kzfree_s(sk, ctx->iv, ivlen); 541 sock_kfree_s(sk, ctx, ctx->len); 542 af_alg_release_parent(sk); 543 } 544 545 static int aead_accept_parent_nokey(void *private, struct sock *sk) 546 { 547 struct af_alg_ctx *ctx; 548 struct alg_sock *ask = alg_sk(sk); 549 struct aead_tfm *tfm = private; 550 struct crypto_aead *aead = tfm->aead; 551 unsigned int len = sizeof(*ctx); 552 unsigned int ivlen = crypto_aead_ivsize(aead); 553 554 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 555 if (!ctx) 556 return -ENOMEM; 557 memset(ctx, 0, len); 558 559 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); 560 if (!ctx->iv) { 561 sock_kfree_s(sk, ctx, len); 562 return -ENOMEM; 563 } 564 memset(ctx->iv, 0, ivlen); 565 566 INIT_LIST_HEAD(&ctx->tsgl_list); 567 ctx->len = len; 568 ctx->used = 0; 569 atomic_set(&ctx->rcvused, 0); 570 ctx->more = 0; 571 ctx->merge = 0; 572 ctx->enc = 0; 573 ctx->aead_assoclen = 0; 574 crypto_init_wait(&ctx->wait); 575 576 ask->private = ctx; 577 578 sk->sk_destruct = aead_sock_destruct; 579 580 return 0; 581 } 582 583 static int aead_accept_parent(void *private, struct sock *sk) 584 { 585 struct aead_tfm *tfm = private; 586 587 if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) 588 return -ENOKEY; 589 590 return aead_accept_parent_nokey(private, sk); 591 } 592 593 static const struct af_alg_type algif_type_aead = { 594 .bind = aead_bind, 595 .release = aead_release, 596 .setkey = aead_setkey, 597 .setauthsize = aead_setauthsize, 598 .accept = aead_accept_parent, 599 .accept_nokey = aead_accept_parent_nokey, 600 .ops = &algif_aead_ops, 601 .ops_nokey = &algif_aead_ops_nokey, 602 .name = "aead", 603 .owner = THIS_MODULE 604 }; 605 606 static int __init algif_aead_init(void) 607 { 608 return af_alg_register_type(&algif_type_aead); 609 } 610 611 static void __exit algif_aead_exit(void) 612 { 613 int err = af_alg_unregister_type(&algif_type_aead); 614 BUG_ON(err); 615 } 616 617 module_init(algif_aead_init); 618 module_exit(algif_aead_exit); 619 MODULE_LICENSE("GPL"); 620 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); 621 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface"); 622