1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include <linux/sched/signal.h> 38 #include <linux/module.h> 39 #include <crypto/aead.h> 40 41 #include <net/strparser.h> 42 #include <net/tls.h> 43 44 #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE 45 46 static int tls_do_decryption(struct sock *sk, 47 struct scatterlist *sgin, 48 struct scatterlist *sgout, 49 char *iv_recv, 50 size_t data_len, 51 struct sk_buff *skb, 52 gfp_t flags) 53 { 54 struct tls_context *tls_ctx = tls_get_ctx(sk); 55 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 56 struct strp_msg *rxm = strp_msg(skb); 57 struct aead_request *aead_req; 58 59 int ret; 60 unsigned int req_size = sizeof(struct aead_request) + 61 crypto_aead_reqsize(ctx->aead_recv); 62 63 aead_req = kzalloc(req_size, flags); 64 if (!aead_req) 65 return -ENOMEM; 66 67 aead_request_set_tfm(aead_req, ctx->aead_recv); 68 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); 69 aead_request_set_crypt(aead_req, sgin, sgout, 70 data_len + tls_ctx->rx.tag_size, 71 (u8 *)iv_recv); 72 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 73 crypto_req_done, &ctx->async_wait); 74 75 ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait); 76 77 if (ret < 0) 78 goto out; 79 80 rxm->offset += tls_ctx->rx.prepend_size; 81 rxm->full_len -= tls_ctx->rx.overhead_size; 82 tls_advance_record_sn(sk, &tls_ctx->rx); 83 84 ctx->decrypted = true; 85 86 ctx->saved_data_ready(sk); 87 88 out: 89 kfree(aead_req); 90 return ret; 91 } 92 93 static void trim_sg(struct sock *sk, struct scatterlist *sg, 94 int *sg_num_elem, unsigned int *sg_size, int target_size) 95 { 96 int i = *sg_num_elem - 1; 97 int trim = *sg_size - target_size; 98 99 if (trim <= 0) { 100 WARN_ON(trim < 0); 101 return; 102 } 103 104 *sg_size = target_size; 105 while (trim >= sg[i].length) { 106 trim -= sg[i].length; 107 sk_mem_uncharge(sk, sg[i].length); 108 put_page(sg_page(&sg[i])); 109 i--; 110 111 if (i < 0) 112 goto out; 113 } 114 115 sg[i].length -= trim; 116 sk_mem_uncharge(sk, trim); 117 118 out: 119 *sg_num_elem = i + 1; 120 } 121 122 static void trim_both_sgl(struct sock *sk, int target_size) 123 { 124 struct tls_context *tls_ctx = tls_get_ctx(sk); 125 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 126 127 trim_sg(sk, ctx->sg_plaintext_data, 128 &ctx->sg_plaintext_num_elem, 129 &ctx->sg_plaintext_size, 130 target_size); 131 132 if (target_size > 0) 133 target_size += tls_ctx->tx.overhead_size; 134 135 trim_sg(sk, ctx->sg_encrypted_data, 136 &ctx->sg_encrypted_num_elem, 137 &ctx->sg_encrypted_size, 138 target_size); 139 } 140 141 static int alloc_encrypted_sg(struct sock *sk, int len) 142 { 143 struct tls_context *tls_ctx = tls_get_ctx(sk); 144 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 145 int rc = 0; 146 147 rc = sk_alloc_sg(sk, len, 148 ctx->sg_encrypted_data, 0, 149 &ctx->sg_encrypted_num_elem, 150 &ctx->sg_encrypted_size, 0); 151 152 return rc; 153 } 154 155 static int alloc_plaintext_sg(struct sock *sk, int len) 156 { 157 struct tls_context *tls_ctx = tls_get_ctx(sk); 158 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 159 int rc = 0; 160 161 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0, 162 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, 163 tls_ctx->pending_open_record_frags); 164 165 return rc; 166 } 167 168 static void free_sg(struct sock *sk, struct scatterlist *sg, 169 int *sg_num_elem, unsigned int *sg_size) 170 { 171 int i, n = *sg_num_elem; 172 173 for (i = 0; i < n; ++i) { 174 sk_mem_uncharge(sk, sg[i].length); 175 put_page(sg_page(&sg[i])); 176 } 177 *sg_num_elem = 0; 178 *sg_size = 0; 179 } 180 181 static void tls_free_both_sg(struct sock *sk) 182 { 183 struct tls_context *tls_ctx = tls_get_ctx(sk); 184 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 185 186 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, 187 &ctx->sg_encrypted_size); 188 189 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, 190 &ctx->sg_plaintext_size); 191 } 192 193 static int tls_do_encryption(struct tls_context *tls_ctx, 194 struct tls_sw_context *ctx, size_t data_len, 195 gfp_t flags) 196 { 197 unsigned int req_size = sizeof(struct aead_request) + 198 crypto_aead_reqsize(ctx->aead_send); 199 struct aead_request *aead_req; 200 int rc; 201 202 aead_req = kzalloc(req_size, flags); 203 if (!aead_req) 204 return -ENOMEM; 205 206 ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size; 207 ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size; 208 209 aead_request_set_tfm(aead_req, ctx->aead_send); 210 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); 211 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out, 212 data_len, tls_ctx->tx.iv); 213 214 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 215 crypto_req_done, &ctx->async_wait); 216 217 rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait); 218 219 ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size; 220 ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size; 221 222 kfree(aead_req); 223 return rc; 224 } 225 226 static int tls_push_record(struct sock *sk, int flags, 227 unsigned char record_type) 228 { 229 struct tls_context *tls_ctx = tls_get_ctx(sk); 230 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 231 int rc; 232 233 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); 234 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); 235 236 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size, 237 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size, 238 record_type); 239 240 tls_fill_prepend(tls_ctx, 241 page_address(sg_page(&ctx->sg_encrypted_data[0])) + 242 ctx->sg_encrypted_data[0].offset, 243 ctx->sg_plaintext_size, record_type); 244 245 tls_ctx->pending_open_record_frags = 0; 246 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); 247 248 rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size, 249 sk->sk_allocation); 250 if (rc < 0) { 251 /* If we are called from write_space and 252 * we fail, we need to set this SOCK_NOSPACE 253 * to trigger another write_space in the future. 254 */ 255 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 256 return rc; 257 } 258 259 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, 260 &ctx->sg_plaintext_size); 261 262 ctx->sg_encrypted_num_elem = 0; 263 ctx->sg_encrypted_size = 0; 264 265 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */ 266 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags); 267 if (rc < 0 && rc != -EAGAIN) 268 tls_err_abort(sk, EBADMSG); 269 270 tls_advance_record_sn(sk, &tls_ctx->tx); 271 return rc; 272 } 273 274 static int tls_sw_push_pending_record(struct sock *sk, int flags) 275 { 276 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA); 277 } 278 279 static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 280 int length, int *pages_used, 281 unsigned int *size_used, 282 struct scatterlist *to, int to_max_pages, 283 bool charge) 284 { 285 struct page *pages[MAX_SKB_FRAGS]; 286 287 size_t offset; 288 ssize_t copied, use; 289 int i = 0; 290 unsigned int size = *size_used; 291 int num_elem = *pages_used; 292 int rc = 0; 293 int maxpages; 294 295 while (length > 0) { 296 i = 0; 297 maxpages = to_max_pages - num_elem; 298 if (maxpages == 0) { 299 rc = -EFAULT; 300 goto out; 301 } 302 copied = iov_iter_get_pages(from, pages, 303 length, 304 maxpages, &offset); 305 if (copied <= 0) { 306 rc = -EFAULT; 307 goto out; 308 } 309 310 iov_iter_advance(from, copied); 311 312 length -= copied; 313 size += copied; 314 while (copied) { 315 use = min_t(int, copied, PAGE_SIZE - offset); 316 317 sg_set_page(&to[num_elem], 318 pages[i], use, offset); 319 sg_unmark_end(&to[num_elem]); 320 if (charge) 321 sk_mem_charge(sk, use); 322 323 offset = 0; 324 copied -= use; 325 326 ++i; 327 ++num_elem; 328 } 329 } 330 331 out: 332 *size_used = size; 333 *pages_used = num_elem; 334 335 return rc; 336 } 337 338 static int memcopy_from_iter(struct sock *sk, struct iov_iter *from, 339 int bytes) 340 { 341 struct tls_context *tls_ctx = tls_get_ctx(sk); 342 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 343 struct scatterlist *sg = ctx->sg_plaintext_data; 344 int copy, i, rc = 0; 345 346 for (i = tls_ctx->pending_open_record_frags; 347 i < ctx->sg_plaintext_num_elem; ++i) { 348 copy = sg[i].length; 349 if (copy_from_iter( 350 page_address(sg_page(&sg[i])) + sg[i].offset, 351 copy, from) != copy) { 352 rc = -EFAULT; 353 goto out; 354 } 355 bytes -= copy; 356 357 ++tls_ctx->pending_open_record_frags; 358 359 if (!bytes) 360 break; 361 } 362 363 out: 364 return rc; 365 } 366 367 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 368 { 369 struct tls_context *tls_ctx = tls_get_ctx(sk); 370 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 371 int ret = 0; 372 int required_size; 373 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 374 bool eor = !(msg->msg_flags & MSG_MORE); 375 size_t try_to_copy, copied = 0; 376 unsigned char record_type = TLS_RECORD_TYPE_DATA; 377 int record_room; 378 bool full_record; 379 int orig_size; 380 381 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 382 return -ENOTSUPP; 383 384 lock_sock(sk); 385 386 if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo)) 387 goto send_end; 388 389 if (unlikely(msg->msg_controllen)) { 390 ret = tls_proccess_cmsg(sk, msg, &record_type); 391 if (ret) 392 goto send_end; 393 } 394 395 while (msg_data_left(msg)) { 396 if (sk->sk_err) { 397 ret = -sk->sk_err; 398 goto send_end; 399 } 400 401 orig_size = ctx->sg_plaintext_size; 402 full_record = false; 403 try_to_copy = msg_data_left(msg); 404 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; 405 if (try_to_copy >= record_room) { 406 try_to_copy = record_room; 407 full_record = true; 408 } 409 410 required_size = ctx->sg_plaintext_size + try_to_copy + 411 tls_ctx->tx.overhead_size; 412 413 if (!sk_stream_memory_free(sk)) 414 goto wait_for_sndbuf; 415 alloc_encrypted: 416 ret = alloc_encrypted_sg(sk, required_size); 417 if (ret) { 418 if (ret != -ENOSPC) 419 goto wait_for_memory; 420 421 /* Adjust try_to_copy according to the amount that was 422 * actually allocated. The difference is due 423 * to max sg elements limit 424 */ 425 try_to_copy -= required_size - ctx->sg_encrypted_size; 426 full_record = true; 427 } 428 429 if (full_record || eor) { 430 ret = zerocopy_from_iter(sk, &msg->msg_iter, 431 try_to_copy, &ctx->sg_plaintext_num_elem, 432 &ctx->sg_plaintext_size, 433 ctx->sg_plaintext_data, 434 ARRAY_SIZE(ctx->sg_plaintext_data), 435 true); 436 if (ret) 437 goto fallback_to_reg_send; 438 439 copied += try_to_copy; 440 ret = tls_push_record(sk, msg->msg_flags, record_type); 441 if (!ret) 442 continue; 443 if (ret == -EAGAIN) 444 goto send_end; 445 446 copied -= try_to_copy; 447 fallback_to_reg_send: 448 iov_iter_revert(&msg->msg_iter, 449 ctx->sg_plaintext_size - orig_size); 450 trim_sg(sk, ctx->sg_plaintext_data, 451 &ctx->sg_plaintext_num_elem, 452 &ctx->sg_plaintext_size, 453 orig_size); 454 } 455 456 required_size = ctx->sg_plaintext_size + try_to_copy; 457 alloc_plaintext: 458 ret = alloc_plaintext_sg(sk, required_size); 459 if (ret) { 460 if (ret != -ENOSPC) 461 goto wait_for_memory; 462 463 /* Adjust try_to_copy according to the amount that was 464 * actually allocated. The difference is due 465 * to max sg elements limit 466 */ 467 try_to_copy -= required_size - ctx->sg_plaintext_size; 468 full_record = true; 469 470 trim_sg(sk, ctx->sg_encrypted_data, 471 &ctx->sg_encrypted_num_elem, 472 &ctx->sg_encrypted_size, 473 ctx->sg_plaintext_size + 474 tls_ctx->tx.overhead_size); 475 } 476 477 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy); 478 if (ret) 479 goto trim_sgl; 480 481 copied += try_to_copy; 482 if (full_record || eor) { 483 push_record: 484 ret = tls_push_record(sk, msg->msg_flags, record_type); 485 if (ret) { 486 if (ret == -ENOMEM) 487 goto wait_for_memory; 488 489 goto send_end; 490 } 491 } 492 493 continue; 494 495 wait_for_sndbuf: 496 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 497 wait_for_memory: 498 ret = sk_stream_wait_memory(sk, &timeo); 499 if (ret) { 500 trim_sgl: 501 trim_both_sgl(sk, orig_size); 502 goto send_end; 503 } 504 505 if (tls_is_pending_closed_record(tls_ctx)) 506 goto push_record; 507 508 if (ctx->sg_encrypted_size < required_size) 509 goto alloc_encrypted; 510 511 goto alloc_plaintext; 512 } 513 514 send_end: 515 ret = sk_stream_error(sk, msg->msg_flags, ret); 516 517 release_sock(sk); 518 return copied ? copied : ret; 519 } 520 521 int tls_sw_sendpage(struct sock *sk, struct page *page, 522 int offset, size_t size, int flags) 523 { 524 struct tls_context *tls_ctx = tls_get_ctx(sk); 525 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 526 int ret = 0; 527 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 528 bool eor; 529 size_t orig_size = size; 530 unsigned char record_type = TLS_RECORD_TYPE_DATA; 531 struct scatterlist *sg; 532 bool full_record; 533 int record_room; 534 535 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 536 MSG_SENDPAGE_NOTLAST)) 537 return -ENOTSUPP; 538 539 /* No MSG_EOR from splice, only look at MSG_MORE */ 540 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); 541 542 lock_sock(sk); 543 544 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 545 546 if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo)) 547 goto sendpage_end; 548 549 /* Call the sk_stream functions to manage the sndbuf mem. */ 550 while (size > 0) { 551 size_t copy, required_size; 552 553 if (sk->sk_err) { 554 ret = -sk->sk_err; 555 goto sendpage_end; 556 } 557 558 full_record = false; 559 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; 560 copy = size; 561 if (copy >= record_room) { 562 copy = record_room; 563 full_record = true; 564 } 565 required_size = ctx->sg_plaintext_size + copy + 566 tls_ctx->tx.overhead_size; 567 568 if (!sk_stream_memory_free(sk)) 569 goto wait_for_sndbuf; 570 alloc_payload: 571 ret = alloc_encrypted_sg(sk, required_size); 572 if (ret) { 573 if (ret != -ENOSPC) 574 goto wait_for_memory; 575 576 /* Adjust copy according to the amount that was 577 * actually allocated. The difference is due 578 * to max sg elements limit 579 */ 580 copy -= required_size - ctx->sg_plaintext_size; 581 full_record = true; 582 } 583 584 get_page(page); 585 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; 586 sg_set_page(sg, page, copy, offset); 587 sg_unmark_end(sg); 588 589 ctx->sg_plaintext_num_elem++; 590 591 sk_mem_charge(sk, copy); 592 offset += copy; 593 size -= copy; 594 ctx->sg_plaintext_size += copy; 595 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem; 596 597 if (full_record || eor || 598 ctx->sg_plaintext_num_elem == 599 ARRAY_SIZE(ctx->sg_plaintext_data)) { 600 push_record: 601 ret = tls_push_record(sk, flags, record_type); 602 if (ret) { 603 if (ret == -ENOMEM) 604 goto wait_for_memory; 605 606 goto sendpage_end; 607 } 608 } 609 continue; 610 wait_for_sndbuf: 611 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 612 wait_for_memory: 613 ret = sk_stream_wait_memory(sk, &timeo); 614 if (ret) { 615 trim_both_sgl(sk, ctx->sg_plaintext_size); 616 goto sendpage_end; 617 } 618 619 if (tls_is_pending_closed_record(tls_ctx)) 620 goto push_record; 621 622 goto alloc_payload; 623 } 624 625 sendpage_end: 626 if (orig_size > size) 627 ret = orig_size - size; 628 else 629 ret = sk_stream_error(sk, flags, ret); 630 631 release_sock(sk); 632 return ret; 633 } 634 635 static struct sk_buff *tls_wait_data(struct sock *sk, int flags, 636 long timeo, int *err) 637 { 638 struct tls_context *tls_ctx = tls_get_ctx(sk); 639 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 640 struct sk_buff *skb; 641 DEFINE_WAIT_FUNC(wait, woken_wake_function); 642 643 while (!(skb = ctx->recv_pkt)) { 644 if (sk->sk_err) { 645 *err = sock_error(sk); 646 return NULL; 647 } 648 649 if (sock_flag(sk, SOCK_DONE)) 650 return NULL; 651 652 if ((flags & MSG_DONTWAIT) || !timeo) { 653 *err = -EAGAIN; 654 return NULL; 655 } 656 657 add_wait_queue(sk_sleep(sk), &wait); 658 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 659 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait); 660 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 661 remove_wait_queue(sk_sleep(sk), &wait); 662 663 /* Handle signals */ 664 if (signal_pending(current)) { 665 *err = sock_intr_errno(timeo); 666 return NULL; 667 } 668 } 669 670 return skb; 671 } 672 673 static int decrypt_skb(struct sock *sk, struct sk_buff *skb, 674 struct scatterlist *sgout) 675 { 676 struct tls_context *tls_ctx = tls_get_ctx(sk); 677 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 678 char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE]; 679 struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; 680 struct scatterlist *sgin = &sgin_arr[0]; 681 struct strp_msg *rxm = strp_msg(skb); 682 int ret, nsg = ARRAY_SIZE(sgin_arr); 683 char aad_recv[TLS_AAD_SPACE_SIZE]; 684 struct sk_buff *unused; 685 686 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 687 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 688 tls_ctx->rx.iv_size); 689 if (ret < 0) 690 return ret; 691 692 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 693 if (!sgout) { 694 nsg = skb_cow_data(skb, 0, &unused) + 1; 695 sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation); 696 if (!sgout) 697 sgout = sgin; 698 } 699 700 sg_init_table(sgin, nsg); 701 sg_set_buf(&sgin[0], aad_recv, sizeof(aad_recv)); 702 703 nsg = skb_to_sgvec(skb, &sgin[1], 704 rxm->offset + tls_ctx->rx.prepend_size, 705 rxm->full_len - tls_ctx->rx.prepend_size); 706 707 tls_make_aad(aad_recv, 708 rxm->full_len - tls_ctx->rx.overhead_size, 709 tls_ctx->rx.rec_seq, 710 tls_ctx->rx.rec_seq_size, 711 ctx->control); 712 713 ret = tls_do_decryption(sk, sgin, sgout, iv, 714 rxm->full_len - tls_ctx->rx.overhead_size, 715 skb, sk->sk_allocation); 716 717 if (sgin != &sgin_arr[0]) 718 kfree(sgin); 719 720 return ret; 721 } 722 723 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb, 724 unsigned int len) 725 { 726 struct tls_context *tls_ctx = tls_get_ctx(sk); 727 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 728 struct strp_msg *rxm = strp_msg(skb); 729 730 if (len < rxm->full_len) { 731 rxm->offset += len; 732 rxm->full_len -= len; 733 734 return false; 735 } 736 737 /* Finished with message */ 738 ctx->recv_pkt = NULL; 739 kfree_skb(skb); 740 strp_unpause(&ctx->strp); 741 742 return true; 743 } 744 745 int tls_sw_recvmsg(struct sock *sk, 746 struct msghdr *msg, 747 size_t len, 748 int nonblock, 749 int flags, 750 int *addr_len) 751 { 752 struct tls_context *tls_ctx = tls_get_ctx(sk); 753 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 754 unsigned char control; 755 struct strp_msg *rxm; 756 struct sk_buff *skb; 757 ssize_t copied = 0; 758 bool cmsg = false; 759 int err = 0; 760 long timeo; 761 762 flags |= nonblock; 763 764 if (unlikely(flags & MSG_ERRQUEUE)) 765 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 766 767 lock_sock(sk); 768 769 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 770 do { 771 bool zc = false; 772 int chunk = 0; 773 774 skb = tls_wait_data(sk, flags, timeo, &err); 775 if (!skb) 776 goto recv_end; 777 778 rxm = strp_msg(skb); 779 if (!cmsg) { 780 int cerr; 781 782 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 783 sizeof(ctx->control), &ctx->control); 784 cmsg = true; 785 control = ctx->control; 786 if (ctx->control != TLS_RECORD_TYPE_DATA) { 787 if (cerr || msg->msg_flags & MSG_CTRUNC) { 788 err = -EIO; 789 goto recv_end; 790 } 791 } 792 } else if (control != ctx->control) { 793 goto recv_end; 794 } 795 796 if (!ctx->decrypted) { 797 int page_count; 798 int to_copy; 799 800 page_count = iov_iter_npages(&msg->msg_iter, 801 MAX_SKB_FRAGS); 802 to_copy = rxm->full_len - tls_ctx->rx.overhead_size; 803 if (to_copy <= len && page_count < MAX_SKB_FRAGS && 804 likely(!(flags & MSG_PEEK))) { 805 struct scatterlist sgin[MAX_SKB_FRAGS + 1]; 806 char unused[21]; 807 int pages = 0; 808 809 zc = true; 810 sg_init_table(sgin, MAX_SKB_FRAGS + 1); 811 sg_set_buf(&sgin[0], unused, 13); 812 813 err = zerocopy_from_iter(sk, &msg->msg_iter, 814 to_copy, &pages, 815 &chunk, &sgin[1], 816 MAX_SKB_FRAGS, false); 817 if (err < 0) 818 goto fallback_to_reg_recv; 819 820 err = decrypt_skb(sk, skb, sgin); 821 for (; pages > 0; pages--) 822 put_page(sg_page(&sgin[pages])); 823 if (err < 0) { 824 tls_err_abort(sk, EBADMSG); 825 goto recv_end; 826 } 827 } else { 828 fallback_to_reg_recv: 829 err = decrypt_skb(sk, skb, NULL); 830 if (err < 0) { 831 tls_err_abort(sk, EBADMSG); 832 goto recv_end; 833 } 834 } 835 ctx->decrypted = true; 836 } 837 838 if (!zc) { 839 chunk = min_t(unsigned int, rxm->full_len, len); 840 err = skb_copy_datagram_msg(skb, rxm->offset, msg, 841 chunk); 842 if (err < 0) 843 goto recv_end; 844 } 845 846 copied += chunk; 847 len -= chunk; 848 if (likely(!(flags & MSG_PEEK))) { 849 u8 control = ctx->control; 850 851 if (tls_sw_advance_skb(sk, skb, chunk)) { 852 /* Return full control message to 853 * userspace before trying to parse 854 * another message type 855 */ 856 msg->msg_flags |= MSG_EOR; 857 if (control != TLS_RECORD_TYPE_DATA) 858 goto recv_end; 859 } 860 } 861 } while (len); 862 863 recv_end: 864 release_sock(sk); 865 return copied ? : err; 866 } 867 868 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 869 struct pipe_inode_info *pipe, 870 size_t len, unsigned int flags) 871 { 872 struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 873 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 874 struct strp_msg *rxm = NULL; 875 struct sock *sk = sock->sk; 876 struct sk_buff *skb; 877 ssize_t copied = 0; 878 int err = 0; 879 long timeo; 880 int chunk; 881 882 lock_sock(sk); 883 884 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 885 886 skb = tls_wait_data(sk, flags, timeo, &err); 887 if (!skb) 888 goto splice_read_end; 889 890 /* splice does not support reading control messages */ 891 if (ctx->control != TLS_RECORD_TYPE_DATA) { 892 err = -ENOTSUPP; 893 goto splice_read_end; 894 } 895 896 if (!ctx->decrypted) { 897 err = decrypt_skb(sk, skb, NULL); 898 899 if (err < 0) { 900 tls_err_abort(sk, EBADMSG); 901 goto splice_read_end; 902 } 903 ctx->decrypted = true; 904 } 905 rxm = strp_msg(skb); 906 907 chunk = min_t(unsigned int, rxm->full_len, len); 908 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); 909 if (copied < 0) 910 goto splice_read_end; 911 912 if (likely(!(flags & MSG_PEEK))) 913 tls_sw_advance_skb(sk, skb, copied); 914 915 splice_read_end: 916 release_sock(sk); 917 return copied ? : err; 918 } 919 920 unsigned int tls_sw_poll(struct file *file, struct socket *sock, 921 struct poll_table_struct *wait) 922 { 923 unsigned int ret; 924 struct sock *sk = sock->sk; 925 struct tls_context *tls_ctx = tls_get_ctx(sk); 926 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 927 928 /* Grab POLLOUT and POLLHUP from the underlying socket */ 929 ret = ctx->sk_poll(file, sock, wait); 930 931 /* Clear POLLIN bits, and set based on recv_pkt */ 932 ret &= ~(POLLIN | POLLRDNORM); 933 if (ctx->recv_pkt) 934 ret |= POLLIN | POLLRDNORM; 935 936 return ret; 937 } 938 939 static int tls_read_size(struct strparser *strp, struct sk_buff *skb) 940 { 941 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 942 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 943 char header[tls_ctx->rx.prepend_size]; 944 struct strp_msg *rxm = strp_msg(skb); 945 size_t cipher_overhead; 946 size_t data_len = 0; 947 int ret; 948 949 /* Verify that we have a full TLS header, or wait for more data */ 950 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len) 951 return 0; 952 953 /* Linearize header to local buffer */ 954 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size); 955 956 if (ret < 0) 957 goto read_failure; 958 959 ctx->control = header[0]; 960 961 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 962 963 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size; 964 965 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) { 966 ret = -EMSGSIZE; 967 goto read_failure; 968 } 969 if (data_len < cipher_overhead) { 970 ret = -EBADMSG; 971 goto read_failure; 972 } 973 974 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) || 975 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) { 976 ret = -EINVAL; 977 goto read_failure; 978 } 979 980 return data_len + TLS_HEADER_SIZE; 981 982 read_failure: 983 tls_err_abort(strp->sk, ret); 984 985 return ret; 986 } 987 988 static void tls_queue(struct strparser *strp, struct sk_buff *skb) 989 { 990 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 991 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 992 struct strp_msg *rxm; 993 994 rxm = strp_msg(skb); 995 996 ctx->decrypted = false; 997 998 ctx->recv_pkt = skb; 999 strp_pause(strp); 1000 1001 strp->sk->sk_state_change(strp->sk); 1002 } 1003 1004 static void tls_data_ready(struct sock *sk) 1005 { 1006 struct tls_context *tls_ctx = tls_get_ctx(sk); 1007 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 1008 1009 strp_data_ready(&ctx->strp); 1010 } 1011 1012 void tls_sw_free_resources(struct sock *sk) 1013 { 1014 struct tls_context *tls_ctx = tls_get_ctx(sk); 1015 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 1016 1017 if (ctx->aead_send) 1018 crypto_free_aead(ctx->aead_send); 1019 if (ctx->aead_recv) { 1020 if (ctx->recv_pkt) { 1021 kfree_skb(ctx->recv_pkt); 1022 ctx->recv_pkt = NULL; 1023 } 1024 crypto_free_aead(ctx->aead_recv); 1025 strp_stop(&ctx->strp); 1026 write_lock_bh(&sk->sk_callback_lock); 1027 sk->sk_data_ready = ctx->saved_data_ready; 1028 write_unlock_bh(&sk->sk_callback_lock); 1029 release_sock(sk); 1030 strp_done(&ctx->strp); 1031 lock_sock(sk); 1032 } 1033 1034 tls_free_both_sg(sk); 1035 1036 kfree(ctx); 1037 kfree(tls_ctx); 1038 } 1039 1040 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 1041 { 1042 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; 1043 struct tls_crypto_info *crypto_info; 1044 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; 1045 struct tls_sw_context *sw_ctx; 1046 struct cipher_context *cctx; 1047 struct crypto_aead **aead; 1048 struct strp_callbacks cb; 1049 u16 nonce_size, tag_size, iv_size, rec_seq_size; 1050 char *iv, *rec_seq; 1051 int rc = 0; 1052 1053 if (!ctx) { 1054 rc = -EINVAL; 1055 goto out; 1056 } 1057 1058 if (!ctx->priv_ctx) { 1059 sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL); 1060 if (!sw_ctx) { 1061 rc = -ENOMEM; 1062 goto out; 1063 } 1064 crypto_init_wait(&sw_ctx->async_wait); 1065 } else { 1066 sw_ctx = ctx->priv_ctx; 1067 } 1068 1069 ctx->priv_ctx = (struct tls_offload_context *)sw_ctx; 1070 1071 if (tx) { 1072 crypto_info = &ctx->crypto_send; 1073 cctx = &ctx->tx; 1074 aead = &sw_ctx->aead_send; 1075 } else { 1076 crypto_info = &ctx->crypto_recv; 1077 cctx = &ctx->rx; 1078 aead = &sw_ctx->aead_recv; 1079 } 1080 1081 switch (crypto_info->cipher_type) { 1082 case TLS_CIPHER_AES_GCM_128: { 1083 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 1084 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; 1085 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 1086 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; 1087 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; 1088 rec_seq = 1089 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; 1090 gcm_128_info = 1091 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 1092 break; 1093 } 1094 default: 1095 rc = -EINVAL; 1096 goto free_priv; 1097 } 1098 1099 /* Sanity-check the IV size for stack allocations. */ 1100 if (iv_size > MAX_IV_SIZE) { 1101 rc = -EINVAL; 1102 goto free_priv; 1103 } 1104 1105 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; 1106 cctx->tag_size = tag_size; 1107 cctx->overhead_size = cctx->prepend_size + cctx->tag_size; 1108 cctx->iv_size = iv_size; 1109 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 1110 GFP_KERNEL); 1111 if (!cctx->iv) { 1112 rc = -ENOMEM; 1113 goto free_priv; 1114 } 1115 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 1116 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); 1117 cctx->rec_seq_size = rec_seq_size; 1118 cctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL); 1119 if (!cctx->rec_seq) { 1120 rc = -ENOMEM; 1121 goto free_iv; 1122 } 1123 memcpy(cctx->rec_seq, rec_seq, rec_seq_size); 1124 1125 if (tx) { 1126 sg_init_table(sw_ctx->sg_encrypted_data, 1127 ARRAY_SIZE(sw_ctx->sg_encrypted_data)); 1128 sg_init_table(sw_ctx->sg_plaintext_data, 1129 ARRAY_SIZE(sw_ctx->sg_plaintext_data)); 1130 1131 sg_init_table(sw_ctx->sg_aead_in, 2); 1132 sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space, 1133 sizeof(sw_ctx->aad_space)); 1134 sg_unmark_end(&sw_ctx->sg_aead_in[1]); 1135 sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data); 1136 sg_init_table(sw_ctx->sg_aead_out, 2); 1137 sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space, 1138 sizeof(sw_ctx->aad_space)); 1139 sg_unmark_end(&sw_ctx->sg_aead_out[1]); 1140 sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data); 1141 } 1142 1143 if (!*aead) { 1144 *aead = crypto_alloc_aead("gcm(aes)", 0, 0); 1145 if (IS_ERR(*aead)) { 1146 rc = PTR_ERR(*aead); 1147 *aead = NULL; 1148 goto free_rec_seq; 1149 } 1150 } 1151 1152 ctx->push_pending_record = tls_sw_push_pending_record; 1153 1154 memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); 1155 1156 rc = crypto_aead_setkey(*aead, keyval, 1157 TLS_CIPHER_AES_GCM_128_KEY_SIZE); 1158 if (rc) 1159 goto free_aead; 1160 1161 rc = crypto_aead_setauthsize(*aead, cctx->tag_size); 1162 if (rc) 1163 goto free_aead; 1164 1165 if (!tx) { 1166 /* Set up strparser */ 1167 memset(&cb, 0, sizeof(cb)); 1168 cb.rcv_msg = tls_queue; 1169 cb.parse_msg = tls_read_size; 1170 1171 strp_init(&sw_ctx->strp, sk, &cb); 1172 1173 write_lock_bh(&sk->sk_callback_lock); 1174 sw_ctx->saved_data_ready = sk->sk_data_ready; 1175 sk->sk_data_ready = tls_data_ready; 1176 write_unlock_bh(&sk->sk_callback_lock); 1177 1178 sw_ctx->sk_poll = sk->sk_socket->ops->poll; 1179 1180 strp_check_rcv(&sw_ctx->strp); 1181 } 1182 1183 goto out; 1184 1185 free_aead: 1186 crypto_free_aead(*aead); 1187 *aead = NULL; 1188 free_rec_seq: 1189 kfree(cctx->rec_seq); 1190 cctx->rec_seq = NULL; 1191 free_iv: 1192 kfree(ctx->tx.iv); 1193 ctx->tx.iv = NULL; 1194 free_priv: 1195 kfree(ctx->priv_ctx); 1196 ctx->priv_ctx = NULL; 1197 out: 1198 return rc; 1199 } 1200