1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <linux/bug.h> 39 #include <linux/sched/signal.h> 40 #include <linux/module.h> 41 #include <linux/kernel.h> 42 #include <linux/splice.h> 43 #include <crypto/aead.h> 44 45 #include <net/strparser.h> 46 #include <net/tls.h> 47 #include <trace/events/sock.h> 48 49 #include "tls.h" 50 51 struct tls_decrypt_arg { 52 struct_group(inargs, 53 bool zc; 54 bool async; 55 u8 tail; 56 ); 57 58 struct sk_buff *skb; 59 }; 60 61 struct tls_decrypt_ctx { 62 struct sock *sk; 63 u8 iv[MAX_IV_SIZE]; 64 u8 aad[TLS_MAX_AAD_SIZE]; 65 u8 tail; 66 bool free_sgout; 67 struct scatterlist sg[]; 68 }; 69 70 noinline void tls_err_abort(struct sock *sk, int err) 71 { 72 WARN_ON_ONCE(err >= 0); 73 /* sk->sk_err should contain a positive error code. */ 74 WRITE_ONCE(sk->sk_err, -err); 75 /* Paired with smp_rmb() in tcp_poll() */ 76 smp_wmb(); 77 sk_error_report(sk); 78 } 79 80 static int __skb_nsg(struct sk_buff *skb, int offset, int len, 81 unsigned int recursion_level) 82 { 83 int start = skb_headlen(skb); 84 int i, chunk = start - offset; 85 struct sk_buff *frag_iter; 86 int elt = 0; 87 88 if (unlikely(recursion_level >= 24)) 89 return -EMSGSIZE; 90 91 if (chunk > 0) { 92 if (chunk > len) 93 chunk = len; 94 elt++; 95 len -= chunk; 96 if (len == 0) 97 return elt; 98 offset += chunk; 99 } 100 101 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 102 int end; 103 104 WARN_ON(start > offset + len); 105 106 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 107 chunk = end - offset; 108 if (chunk > 0) { 109 if (chunk > len) 110 chunk = len; 111 elt++; 112 len -= chunk; 113 if (len == 0) 114 return elt; 115 offset += chunk; 116 } 117 start = end; 118 } 119 120 if (unlikely(skb_has_frag_list(skb))) { 121 skb_walk_frags(skb, frag_iter) { 122 int end, ret; 123 124 WARN_ON(start > offset + len); 125 126 end = start + frag_iter->len; 127 chunk = end - offset; 128 if (chunk > 0) { 129 if (chunk > len) 130 chunk = len; 131 ret = __skb_nsg(frag_iter, offset - start, chunk, 132 recursion_level + 1); 133 if (unlikely(ret < 0)) 134 return ret; 135 elt += ret; 136 len -= chunk; 137 if (len == 0) 138 return elt; 139 offset += chunk; 140 } 141 start = end; 142 } 143 } 144 BUG_ON(len); 145 return elt; 146 } 147 148 /* Return the number of scatterlist elements required to completely map the 149 * skb, or -EMSGSIZE if the recursion depth is exceeded. 150 */ 151 static int skb_nsg(struct sk_buff *skb, int offset, int len) 152 { 153 return __skb_nsg(skb, offset, len, 0); 154 } 155 156 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb, 157 struct tls_decrypt_arg *darg) 158 { 159 struct strp_msg *rxm = strp_msg(skb); 160 struct tls_msg *tlm = tls_msg(skb); 161 int sub = 0; 162 163 /* Determine zero-padding length */ 164 if (prot->version == TLS_1_3_VERSION) { 165 int offset = rxm->full_len - TLS_TAG_SIZE - 1; 166 char content_type = darg->zc ? darg->tail : 0; 167 int err; 168 169 while (content_type == 0) { 170 if (offset < prot->prepend_size) 171 return -EBADMSG; 172 err = skb_copy_bits(skb, rxm->offset + offset, 173 &content_type, 1); 174 if (err) 175 return err; 176 if (content_type) 177 break; 178 sub++; 179 offset--; 180 } 181 tlm->control = content_type; 182 } 183 return sub; 184 } 185 186 static void tls_decrypt_done(void *data, int err) 187 { 188 struct aead_request *aead_req = data; 189 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 190 struct scatterlist *sgout = aead_req->dst; 191 struct tls_sw_context_rx *ctx; 192 struct tls_decrypt_ctx *dctx; 193 struct tls_context *tls_ctx; 194 struct scatterlist *sg; 195 unsigned int pages; 196 struct sock *sk; 197 int aead_size; 198 199 /* If requests get too backlogged crypto API returns -EBUSY and calls 200 * ->complete(-EINPROGRESS) immediately followed by ->complete(0) 201 * to make waiting for backlog to flush with crypto_wait_req() easier. 202 * First wait converts -EBUSY -> -EINPROGRESS, and the second one 203 * -EINPROGRESS -> 0. 204 * We have a single struct crypto_async_request per direction, this 205 * scheme doesn't help us, so just ignore the first ->complete(). 206 */ 207 if (err == -EINPROGRESS) 208 return; 209 210 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead); 211 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 212 dctx = (void *)((u8 *)aead_req + aead_size); 213 214 sk = dctx->sk; 215 tls_ctx = tls_get_ctx(sk); 216 ctx = tls_sw_ctx_rx(tls_ctx); 217 218 /* Propagate if there was an err */ 219 if (err) { 220 if (err == -EBADMSG) 221 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 222 ctx->async_wait.err = err; 223 tls_err_abort(sk, err); 224 } 225 226 /* Free the destination pages if skb was not decrypted inplace */ 227 if (dctx->free_sgout) { 228 /* Skip the first S/G entry as it points to AAD */ 229 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 230 if (!sg) 231 break; 232 put_page(sg_page(sg)); 233 } 234 } 235 236 kfree(aead_req); 237 238 if (atomic_dec_and_test(&ctx->decrypt_pending)) 239 complete(&ctx->async_wait.completion); 240 } 241 242 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) 243 { 244 if (!atomic_dec_and_test(&ctx->decrypt_pending)) 245 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 246 atomic_inc(&ctx->decrypt_pending); 247 248 return ctx->async_wait.err; 249 } 250 251 static int tls_do_decryption(struct sock *sk, 252 struct scatterlist *sgin, 253 struct scatterlist *sgout, 254 char *iv_recv, 255 size_t data_len, 256 struct aead_request *aead_req, 257 struct tls_decrypt_arg *darg) 258 { 259 struct tls_context *tls_ctx = tls_get_ctx(sk); 260 struct tls_prot_info *prot = &tls_ctx->prot_info; 261 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 262 int ret; 263 264 aead_request_set_tfm(aead_req, ctx->aead_recv); 265 aead_request_set_ad(aead_req, prot->aad_size); 266 aead_request_set_crypt(aead_req, sgin, sgout, 267 data_len + prot->tag_size, 268 (u8 *)iv_recv); 269 270 if (darg->async) { 271 aead_request_set_callback(aead_req, 272 CRYPTO_TFM_REQ_MAY_BACKLOG, 273 tls_decrypt_done, aead_req); 274 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1); 275 atomic_inc(&ctx->decrypt_pending); 276 } else { 277 aead_request_set_callback(aead_req, 278 CRYPTO_TFM_REQ_MAY_BACKLOG, 279 crypto_req_done, &ctx->async_wait); 280 } 281 282 ret = crypto_aead_decrypt(aead_req); 283 if (ret == -EBUSY) { 284 ret = tls_decrypt_async_wait(ctx); 285 ret = ret ?: -EINPROGRESS; 286 } 287 if (ret == -EINPROGRESS) { 288 if (darg->async) 289 return 0; 290 291 ret = crypto_wait_req(ret, &ctx->async_wait); 292 } 293 darg->async = false; 294 295 return ret; 296 } 297 298 static void tls_trim_both_msgs(struct sock *sk, int target_size) 299 { 300 struct tls_context *tls_ctx = tls_get_ctx(sk); 301 struct tls_prot_info *prot = &tls_ctx->prot_info; 302 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 303 struct tls_rec *rec = ctx->open_rec; 304 305 sk_msg_trim(sk, &rec->msg_plaintext, target_size); 306 if (target_size > 0) 307 target_size += prot->overhead_size; 308 sk_msg_trim(sk, &rec->msg_encrypted, target_size); 309 } 310 311 static int tls_alloc_encrypted_msg(struct sock *sk, int len) 312 { 313 struct tls_context *tls_ctx = tls_get_ctx(sk); 314 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 315 struct tls_rec *rec = ctx->open_rec; 316 struct sk_msg *msg_en = &rec->msg_encrypted; 317 318 return sk_msg_alloc(sk, msg_en, len, 0); 319 } 320 321 static int tls_clone_plaintext_msg(struct sock *sk, int required) 322 { 323 struct tls_context *tls_ctx = tls_get_ctx(sk); 324 struct tls_prot_info *prot = &tls_ctx->prot_info; 325 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 326 struct tls_rec *rec = ctx->open_rec; 327 struct sk_msg *msg_pl = &rec->msg_plaintext; 328 struct sk_msg *msg_en = &rec->msg_encrypted; 329 int skip, len; 330 331 /* We add page references worth len bytes from encrypted sg 332 * at the end of plaintext sg. It is guaranteed that msg_en 333 * has enough required room (ensured by caller). 334 */ 335 len = required - msg_pl->sg.size; 336 337 /* Skip initial bytes in msg_en's data to be able to use 338 * same offset of both plain and encrypted data. 339 */ 340 skip = prot->prepend_size + msg_pl->sg.size; 341 342 return sk_msg_clone(sk, msg_pl, msg_en, skip, len); 343 } 344 345 static struct tls_rec *tls_get_rec(struct sock *sk) 346 { 347 struct tls_context *tls_ctx = tls_get_ctx(sk); 348 struct tls_prot_info *prot = &tls_ctx->prot_info; 349 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 350 struct sk_msg *msg_pl, *msg_en; 351 struct tls_rec *rec; 352 int mem_size; 353 354 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send); 355 356 rec = kzalloc(mem_size, sk->sk_allocation); 357 if (!rec) 358 return NULL; 359 360 msg_pl = &rec->msg_plaintext; 361 msg_en = &rec->msg_encrypted; 362 363 sk_msg_init(msg_pl); 364 sk_msg_init(msg_en); 365 366 sg_init_table(rec->sg_aead_in, 2); 367 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size); 368 sg_unmark_end(&rec->sg_aead_in[1]); 369 370 sg_init_table(rec->sg_aead_out, 2); 371 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size); 372 sg_unmark_end(&rec->sg_aead_out[1]); 373 374 rec->sk = sk; 375 376 return rec; 377 } 378 379 static void tls_free_rec(struct sock *sk, struct tls_rec *rec) 380 { 381 sk_msg_free(sk, &rec->msg_encrypted); 382 sk_msg_free(sk, &rec->msg_plaintext); 383 kfree(rec); 384 } 385 386 static void tls_free_open_rec(struct sock *sk) 387 { 388 struct tls_context *tls_ctx = tls_get_ctx(sk); 389 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 390 struct tls_rec *rec = ctx->open_rec; 391 392 if (rec) { 393 tls_free_rec(sk, rec); 394 ctx->open_rec = NULL; 395 } 396 } 397 398 int tls_tx_records(struct sock *sk, int flags) 399 { 400 struct tls_context *tls_ctx = tls_get_ctx(sk); 401 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 402 struct tls_rec *rec, *tmp; 403 struct sk_msg *msg_en; 404 int tx_flags, rc = 0; 405 406 if (tls_is_partially_sent_record(tls_ctx)) { 407 rec = list_first_entry(&ctx->tx_list, 408 struct tls_rec, list); 409 410 if (flags == -1) 411 tx_flags = rec->tx_flags; 412 else 413 tx_flags = flags; 414 415 rc = tls_push_partial_record(sk, tls_ctx, tx_flags); 416 if (rc) 417 goto tx_err; 418 419 /* Full record has been transmitted. 420 * Remove the head of tx_list 421 */ 422 list_del(&rec->list); 423 sk_msg_free(sk, &rec->msg_plaintext); 424 kfree(rec); 425 } 426 427 /* Tx all ready records */ 428 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 429 if (READ_ONCE(rec->tx_ready)) { 430 if (flags == -1) 431 tx_flags = rec->tx_flags; 432 else 433 tx_flags = flags; 434 435 msg_en = &rec->msg_encrypted; 436 rc = tls_push_sg(sk, tls_ctx, 437 &msg_en->sg.data[msg_en->sg.curr], 438 0, tx_flags); 439 if (rc) 440 goto tx_err; 441 442 list_del(&rec->list); 443 sk_msg_free(sk, &rec->msg_plaintext); 444 kfree(rec); 445 } else { 446 break; 447 } 448 } 449 450 tx_err: 451 if (rc < 0 && rc != -EAGAIN) 452 tls_err_abort(sk, -EBADMSG); 453 454 return rc; 455 } 456 457 static void tls_encrypt_done(void *data, int err) 458 { 459 struct tls_sw_context_tx *ctx; 460 struct tls_context *tls_ctx; 461 struct tls_prot_info *prot; 462 struct tls_rec *rec = data; 463 struct scatterlist *sge; 464 struct sk_msg *msg_en; 465 struct sock *sk; 466 467 if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */ 468 return; 469 470 msg_en = &rec->msg_encrypted; 471 472 sk = rec->sk; 473 tls_ctx = tls_get_ctx(sk); 474 prot = &tls_ctx->prot_info; 475 ctx = tls_sw_ctx_tx(tls_ctx); 476 477 sge = sk_msg_elem(msg_en, msg_en->sg.curr); 478 sge->offset -= prot->prepend_size; 479 sge->length += prot->prepend_size; 480 481 /* Check if error is previously set on socket */ 482 if (err || sk->sk_err) { 483 rec = NULL; 484 485 /* If err is already set on socket, return the same code */ 486 if (sk->sk_err) { 487 ctx->async_wait.err = -sk->sk_err; 488 } else { 489 ctx->async_wait.err = err; 490 tls_err_abort(sk, err); 491 } 492 } 493 494 if (rec) { 495 struct tls_rec *first_rec; 496 497 /* Mark the record as ready for transmission */ 498 smp_store_mb(rec->tx_ready, true); 499 500 /* If received record is at head of tx_list, schedule tx */ 501 first_rec = list_first_entry(&ctx->tx_list, 502 struct tls_rec, list); 503 if (rec == first_rec) { 504 /* Schedule the transmission */ 505 if (!test_and_set_bit(BIT_TX_SCHEDULED, 506 &ctx->tx_bitmask)) 507 schedule_delayed_work(&ctx->tx_work.work, 1); 508 } 509 } 510 511 if (atomic_dec_and_test(&ctx->encrypt_pending)) 512 complete(&ctx->async_wait.completion); 513 } 514 515 static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) 516 { 517 if (!atomic_dec_and_test(&ctx->encrypt_pending)) 518 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 519 atomic_inc(&ctx->encrypt_pending); 520 521 return ctx->async_wait.err; 522 } 523 524 static int tls_do_encryption(struct sock *sk, 525 struct tls_context *tls_ctx, 526 struct tls_sw_context_tx *ctx, 527 struct aead_request *aead_req, 528 size_t data_len, u32 start) 529 { 530 struct tls_prot_info *prot = &tls_ctx->prot_info; 531 struct tls_rec *rec = ctx->open_rec; 532 struct sk_msg *msg_en = &rec->msg_encrypted; 533 struct scatterlist *sge = sk_msg_elem(msg_en, start); 534 int rc, iv_offset = 0; 535 536 /* For CCM based ciphers, first byte of IV is a constant */ 537 switch (prot->cipher_type) { 538 case TLS_CIPHER_AES_CCM_128: 539 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; 540 iv_offset = 1; 541 break; 542 case TLS_CIPHER_SM4_CCM: 543 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; 544 iv_offset = 1; 545 break; 546 } 547 548 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, 549 prot->iv_size + prot->salt_size); 550 551 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset, 552 tls_ctx->tx.rec_seq); 553 554 sge->offset += prot->prepend_size; 555 sge->length -= prot->prepend_size; 556 557 msg_en->sg.curr = start; 558 559 aead_request_set_tfm(aead_req, ctx->aead_send); 560 aead_request_set_ad(aead_req, prot->aad_size); 561 aead_request_set_crypt(aead_req, rec->sg_aead_in, 562 rec->sg_aead_out, 563 data_len, rec->iv_data); 564 565 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 566 tls_encrypt_done, rec); 567 568 /* Add the record in tx_list */ 569 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); 570 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1); 571 atomic_inc(&ctx->encrypt_pending); 572 573 rc = crypto_aead_encrypt(aead_req); 574 if (rc == -EBUSY) { 575 rc = tls_encrypt_async_wait(ctx); 576 rc = rc ?: -EINPROGRESS; 577 } 578 if (!rc || rc != -EINPROGRESS) { 579 atomic_dec(&ctx->encrypt_pending); 580 sge->offset -= prot->prepend_size; 581 sge->length += prot->prepend_size; 582 } 583 584 if (!rc) { 585 WRITE_ONCE(rec->tx_ready, true); 586 } else if (rc != -EINPROGRESS) { 587 list_del(&rec->list); 588 return rc; 589 } 590 591 /* Unhook the record from context if encryption is not failure */ 592 ctx->open_rec = NULL; 593 tls_advance_record_sn(sk, prot, &tls_ctx->tx); 594 return rc; 595 } 596 597 static int tls_split_open_record(struct sock *sk, struct tls_rec *from, 598 struct tls_rec **to, struct sk_msg *msg_opl, 599 struct sk_msg *msg_oen, u32 split_point, 600 u32 tx_overhead_size, u32 *orig_end) 601 { 602 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes; 603 struct scatterlist *sge, *osge, *nsge; 604 u32 orig_size = msg_opl->sg.size; 605 struct scatterlist tmp = { }; 606 struct sk_msg *msg_npl; 607 struct tls_rec *new; 608 int ret; 609 610 new = tls_get_rec(sk); 611 if (!new) 612 return -ENOMEM; 613 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size + 614 tx_overhead_size, 0); 615 if (ret < 0) { 616 tls_free_rec(sk, new); 617 return ret; 618 } 619 620 *orig_end = msg_opl->sg.end; 621 i = msg_opl->sg.start; 622 sge = sk_msg_elem(msg_opl, i); 623 while (apply && sge->length) { 624 if (sge->length > apply) { 625 u32 len = sge->length - apply; 626 627 get_page(sg_page(sge)); 628 sg_set_page(&tmp, sg_page(sge), len, 629 sge->offset + apply); 630 sge->length = apply; 631 bytes += apply; 632 apply = 0; 633 } else { 634 apply -= sge->length; 635 bytes += sge->length; 636 } 637 638 sk_msg_iter_var_next(i); 639 if (i == msg_opl->sg.end) 640 break; 641 sge = sk_msg_elem(msg_opl, i); 642 } 643 644 msg_opl->sg.end = i; 645 msg_opl->sg.curr = i; 646 msg_opl->sg.copybreak = 0; 647 msg_opl->apply_bytes = 0; 648 msg_opl->sg.size = bytes; 649 650 msg_npl = &new->msg_plaintext; 651 msg_npl->apply_bytes = apply; 652 msg_npl->sg.size = orig_size - bytes; 653 654 j = msg_npl->sg.start; 655 nsge = sk_msg_elem(msg_npl, j); 656 if (tmp.length) { 657 memcpy(nsge, &tmp, sizeof(*nsge)); 658 sk_msg_iter_var_next(j); 659 nsge = sk_msg_elem(msg_npl, j); 660 } 661 662 osge = sk_msg_elem(msg_opl, i); 663 while (osge->length) { 664 memcpy(nsge, osge, sizeof(*nsge)); 665 sg_unmark_end(nsge); 666 sk_msg_iter_var_next(i); 667 sk_msg_iter_var_next(j); 668 if (i == *orig_end) 669 break; 670 osge = sk_msg_elem(msg_opl, i); 671 nsge = sk_msg_elem(msg_npl, j); 672 } 673 674 msg_npl->sg.end = j; 675 msg_npl->sg.curr = j; 676 msg_npl->sg.copybreak = 0; 677 678 *to = new; 679 return 0; 680 } 681 682 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to, 683 struct tls_rec *from, u32 orig_end) 684 { 685 struct sk_msg *msg_npl = &from->msg_plaintext; 686 struct sk_msg *msg_opl = &to->msg_plaintext; 687 struct scatterlist *osge, *nsge; 688 u32 i, j; 689 690 i = msg_opl->sg.end; 691 sk_msg_iter_var_prev(i); 692 j = msg_npl->sg.start; 693 694 osge = sk_msg_elem(msg_opl, i); 695 nsge = sk_msg_elem(msg_npl, j); 696 697 if (sg_page(osge) == sg_page(nsge) && 698 osge->offset + osge->length == nsge->offset) { 699 osge->length += nsge->length; 700 put_page(sg_page(nsge)); 701 } 702 703 msg_opl->sg.end = orig_end; 704 msg_opl->sg.curr = orig_end; 705 msg_opl->sg.copybreak = 0; 706 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size; 707 msg_opl->sg.size += msg_npl->sg.size; 708 709 sk_msg_free(sk, &to->msg_encrypted); 710 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted); 711 712 kfree(from); 713 } 714 715 static int tls_push_record(struct sock *sk, int flags, 716 unsigned char record_type) 717 { 718 struct tls_context *tls_ctx = tls_get_ctx(sk); 719 struct tls_prot_info *prot = &tls_ctx->prot_info; 720 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 721 struct tls_rec *rec = ctx->open_rec, *tmp = NULL; 722 u32 i, split_point, orig_end; 723 struct sk_msg *msg_pl, *msg_en; 724 struct aead_request *req; 725 bool split; 726 int rc; 727 728 if (!rec) 729 return 0; 730 731 msg_pl = &rec->msg_plaintext; 732 msg_en = &rec->msg_encrypted; 733 734 split_point = msg_pl->apply_bytes; 735 split = split_point && split_point < msg_pl->sg.size; 736 if (unlikely((!split && 737 msg_pl->sg.size + 738 prot->overhead_size > msg_en->sg.size) || 739 (split && 740 split_point + 741 prot->overhead_size > msg_en->sg.size))) { 742 split = true; 743 split_point = msg_en->sg.size; 744 } 745 if (split) { 746 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, 747 split_point, prot->overhead_size, 748 &orig_end); 749 if (rc < 0) 750 return rc; 751 /* This can happen if above tls_split_open_record allocates 752 * a single large encryption buffer instead of two smaller 753 * ones. In this case adjust pointers and continue without 754 * split. 755 */ 756 if (!msg_pl->sg.size) { 757 tls_merge_open_record(sk, rec, tmp, orig_end); 758 msg_pl = &rec->msg_plaintext; 759 msg_en = &rec->msg_encrypted; 760 split = false; 761 } 762 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 763 prot->overhead_size); 764 } 765 766 rec->tx_flags = flags; 767 req = &rec->aead_req; 768 769 i = msg_pl->sg.end; 770 sk_msg_iter_var_prev(i); 771 772 rec->content_type = record_type; 773 if (prot->version == TLS_1_3_VERSION) { 774 /* Add content type to end of message. No padding added */ 775 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1); 776 sg_mark_end(&rec->sg_content_type); 777 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1, 778 &rec->sg_content_type); 779 } else { 780 sg_mark_end(sk_msg_elem(msg_pl, i)); 781 } 782 783 if (msg_pl->sg.end < msg_pl->sg.start) { 784 sg_chain(&msg_pl->sg.data[msg_pl->sg.start], 785 MAX_SKB_FRAGS - msg_pl->sg.start + 1, 786 msg_pl->sg.data); 787 } 788 789 i = msg_pl->sg.start; 790 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]); 791 792 i = msg_en->sg.end; 793 sk_msg_iter_var_prev(i); 794 sg_mark_end(sk_msg_elem(msg_en, i)); 795 796 i = msg_en->sg.start; 797 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); 798 799 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size, 800 tls_ctx->tx.rec_seq, record_type, prot); 801 802 tls_fill_prepend(tls_ctx, 803 page_address(sg_page(&msg_en->sg.data[i])) + 804 msg_en->sg.data[i].offset, 805 msg_pl->sg.size + prot->tail_size, 806 record_type); 807 808 tls_ctx->pending_open_record_frags = false; 809 810 rc = tls_do_encryption(sk, tls_ctx, ctx, req, 811 msg_pl->sg.size + prot->tail_size, i); 812 if (rc < 0) { 813 if (rc != -EINPROGRESS) { 814 tls_err_abort(sk, -EBADMSG); 815 if (split) { 816 tls_ctx->pending_open_record_frags = true; 817 tls_merge_open_record(sk, rec, tmp, orig_end); 818 } 819 } 820 ctx->async_capable = 1; 821 return rc; 822 } else if (split) { 823 msg_pl = &tmp->msg_plaintext; 824 msg_en = &tmp->msg_encrypted; 825 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size); 826 tls_ctx->pending_open_record_frags = true; 827 ctx->open_rec = tmp; 828 } 829 830 return tls_tx_records(sk, flags); 831 } 832 833 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 834 bool full_record, u8 record_type, 835 ssize_t *copied, int flags) 836 { 837 struct tls_context *tls_ctx = tls_get_ctx(sk); 838 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 839 struct sk_msg msg_redir = { }; 840 struct sk_psock *psock; 841 struct sock *sk_redir; 842 struct tls_rec *rec; 843 bool enospc, policy, redir_ingress; 844 int err = 0, send; 845 u32 delta = 0; 846 847 policy = !(flags & MSG_SENDPAGE_NOPOLICY); 848 psock = sk_psock_get(sk); 849 if (!psock || !policy) { 850 err = tls_push_record(sk, flags, record_type); 851 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 852 *copied -= sk_msg_free(sk, msg); 853 tls_free_open_rec(sk); 854 err = -sk->sk_err; 855 } 856 if (psock) 857 sk_psock_put(sk, psock); 858 return err; 859 } 860 more_data: 861 enospc = sk_msg_full(msg); 862 if (psock->eval == __SK_NONE) { 863 delta = msg->sg.size; 864 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 865 delta -= msg->sg.size; 866 } 867 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && 868 !enospc && !full_record) { 869 err = -ENOSPC; 870 goto out_err; 871 } 872 msg->cork_bytes = 0; 873 send = msg->sg.size; 874 if (msg->apply_bytes && msg->apply_bytes < send) 875 send = msg->apply_bytes; 876 877 switch (psock->eval) { 878 case __SK_PASS: 879 err = tls_push_record(sk, flags, record_type); 880 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 881 *copied -= sk_msg_free(sk, msg); 882 tls_free_open_rec(sk); 883 err = -sk->sk_err; 884 goto out_err; 885 } 886 break; 887 case __SK_REDIRECT: 888 redir_ingress = psock->redir_ingress; 889 sk_redir = psock->sk_redir; 890 memcpy(&msg_redir, msg, sizeof(*msg)); 891 if (msg->apply_bytes < send) 892 msg->apply_bytes = 0; 893 else 894 msg->apply_bytes -= send; 895 sk_msg_return_zero(sk, msg, send); 896 msg->sg.size -= send; 897 release_sock(sk); 898 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, 899 &msg_redir, send, flags); 900 lock_sock(sk); 901 if (err < 0) { 902 *copied -= sk_msg_free_nocharge(sk, &msg_redir); 903 msg->sg.size = 0; 904 } 905 if (msg->sg.size == 0) 906 tls_free_open_rec(sk); 907 break; 908 case __SK_DROP: 909 default: 910 sk_msg_free_partial(sk, msg, send); 911 if (msg->apply_bytes < send) 912 msg->apply_bytes = 0; 913 else 914 msg->apply_bytes -= send; 915 if (msg->sg.size == 0) 916 tls_free_open_rec(sk); 917 *copied -= (send + delta); 918 err = -EACCES; 919 } 920 921 if (likely(!err)) { 922 bool reset_eval = !ctx->open_rec; 923 924 rec = ctx->open_rec; 925 if (rec) { 926 msg = &rec->msg_plaintext; 927 if (!msg->apply_bytes) 928 reset_eval = true; 929 } 930 if (reset_eval) { 931 psock->eval = __SK_NONE; 932 if (psock->sk_redir) { 933 sock_put(psock->sk_redir); 934 psock->sk_redir = NULL; 935 } 936 } 937 if (rec) 938 goto more_data; 939 } 940 out_err: 941 sk_psock_put(sk, psock); 942 return err; 943 } 944 945 static int tls_sw_push_pending_record(struct sock *sk, int flags) 946 { 947 struct tls_context *tls_ctx = tls_get_ctx(sk); 948 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 949 struct tls_rec *rec = ctx->open_rec; 950 struct sk_msg *msg_pl; 951 size_t copied; 952 953 if (!rec) 954 return 0; 955 956 msg_pl = &rec->msg_plaintext; 957 copied = msg_pl->sg.size; 958 if (!copied) 959 return 0; 960 961 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA, 962 &copied, flags); 963 } 964 965 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg, 966 struct sk_msg *msg_pl, size_t try_to_copy, 967 ssize_t *copied) 968 { 969 struct page *page = NULL, **pages = &page; 970 971 do { 972 ssize_t part; 973 size_t off; 974 975 part = iov_iter_extract_pages(&msg->msg_iter, &pages, 976 try_to_copy, 1, 0, &off); 977 if (part <= 0) 978 return part ?: -EIO; 979 980 if (WARN_ON_ONCE(!sendpage_ok(page))) { 981 iov_iter_revert(&msg->msg_iter, part); 982 return -EIO; 983 } 984 985 sk_msg_page_add(msg_pl, page, part, off); 986 msg_pl->sg.copybreak = 0; 987 msg_pl->sg.curr = msg_pl->sg.end; 988 sk_mem_charge(sk, part); 989 *copied += part; 990 try_to_copy -= part; 991 } while (try_to_copy && !sk_msg_full(msg_pl)); 992 993 return 0; 994 } 995 996 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, 997 size_t size) 998 { 999 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1000 struct tls_context *tls_ctx = tls_get_ctx(sk); 1001 struct tls_prot_info *prot = &tls_ctx->prot_info; 1002 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1003 bool async_capable = ctx->async_capable; 1004 unsigned char record_type = TLS_RECORD_TYPE_DATA; 1005 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1006 bool eor = !(msg->msg_flags & MSG_MORE); 1007 size_t try_to_copy; 1008 ssize_t copied = 0; 1009 struct sk_msg *msg_pl, *msg_en; 1010 struct tls_rec *rec; 1011 int required_size; 1012 int num_async = 0; 1013 bool full_record; 1014 int record_room; 1015 int num_zc = 0; 1016 int orig_size; 1017 int ret = 0; 1018 1019 if (!eor && (msg->msg_flags & MSG_EOR)) 1020 return -EINVAL; 1021 1022 if (unlikely(msg->msg_controllen)) { 1023 ret = tls_process_cmsg(sk, msg, &record_type); 1024 if (ret) { 1025 if (ret == -EINPROGRESS) 1026 num_async++; 1027 else if (ret != -EAGAIN) 1028 goto send_end; 1029 } 1030 } 1031 1032 while (msg_data_left(msg)) { 1033 if (sk->sk_err) { 1034 ret = -sk->sk_err; 1035 goto send_end; 1036 } 1037 1038 if (ctx->open_rec) 1039 rec = ctx->open_rec; 1040 else 1041 rec = ctx->open_rec = tls_get_rec(sk); 1042 if (!rec) { 1043 ret = -ENOMEM; 1044 goto send_end; 1045 } 1046 1047 msg_pl = &rec->msg_plaintext; 1048 msg_en = &rec->msg_encrypted; 1049 1050 orig_size = msg_pl->sg.size; 1051 full_record = false; 1052 try_to_copy = msg_data_left(msg); 1053 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 1054 if (try_to_copy >= record_room) { 1055 try_to_copy = record_room; 1056 full_record = true; 1057 } 1058 1059 required_size = msg_pl->sg.size + try_to_copy + 1060 prot->overhead_size; 1061 1062 if (!sk_stream_memory_free(sk)) 1063 goto wait_for_sndbuf; 1064 1065 alloc_encrypted: 1066 ret = tls_alloc_encrypted_msg(sk, required_size); 1067 if (ret) { 1068 if (ret != -ENOSPC) 1069 goto wait_for_memory; 1070 1071 /* Adjust try_to_copy according to the amount that was 1072 * actually allocated. The difference is due 1073 * to max sg elements limit 1074 */ 1075 try_to_copy -= required_size - msg_en->sg.size; 1076 full_record = true; 1077 } 1078 1079 if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) { 1080 ret = tls_sw_sendmsg_splice(sk, msg, msg_pl, 1081 try_to_copy, &copied); 1082 if (ret < 0) 1083 goto send_end; 1084 tls_ctx->pending_open_record_frags = true; 1085 1086 if (sk_msg_full(msg_pl)) 1087 full_record = true; 1088 1089 if (full_record || eor) 1090 goto copied; 1091 continue; 1092 } 1093 1094 if (!is_kvec && (full_record || eor) && !async_capable) { 1095 u32 first = msg_pl->sg.end; 1096 1097 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter, 1098 msg_pl, try_to_copy); 1099 if (ret) 1100 goto fallback_to_reg_send; 1101 1102 num_zc++; 1103 copied += try_to_copy; 1104 1105 sk_msg_sg_copy_set(msg_pl, first); 1106 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1107 record_type, &copied, 1108 msg->msg_flags); 1109 if (ret) { 1110 if (ret == -EINPROGRESS) 1111 num_async++; 1112 else if (ret == -ENOMEM) 1113 goto wait_for_memory; 1114 else if (ctx->open_rec && ret == -ENOSPC) 1115 goto rollback_iter; 1116 else if (ret != -EAGAIN) 1117 goto send_end; 1118 } 1119 continue; 1120 rollback_iter: 1121 copied -= try_to_copy; 1122 sk_msg_sg_copy_clear(msg_pl, first); 1123 iov_iter_revert(&msg->msg_iter, 1124 msg_pl->sg.size - orig_size); 1125 fallback_to_reg_send: 1126 sk_msg_trim(sk, msg_pl, orig_size); 1127 } 1128 1129 required_size = msg_pl->sg.size + try_to_copy; 1130 1131 ret = tls_clone_plaintext_msg(sk, required_size); 1132 if (ret) { 1133 if (ret != -ENOSPC) 1134 goto send_end; 1135 1136 /* Adjust try_to_copy according to the amount that was 1137 * actually allocated. The difference is due 1138 * to max sg elements limit 1139 */ 1140 try_to_copy -= required_size - msg_pl->sg.size; 1141 full_record = true; 1142 sk_msg_trim(sk, msg_en, 1143 msg_pl->sg.size + prot->overhead_size); 1144 } 1145 1146 if (try_to_copy) { 1147 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, 1148 msg_pl, try_to_copy); 1149 if (ret < 0) 1150 goto trim_sgl; 1151 } 1152 1153 /* Open records defined only if successfully copied, otherwise 1154 * we would trim the sg but not reset the open record frags. 1155 */ 1156 tls_ctx->pending_open_record_frags = true; 1157 copied += try_to_copy; 1158 copied: 1159 if (full_record || eor) { 1160 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1161 record_type, &copied, 1162 msg->msg_flags); 1163 if (ret) { 1164 if (ret == -EINPROGRESS) 1165 num_async++; 1166 else if (ret == -ENOMEM) 1167 goto wait_for_memory; 1168 else if (ret != -EAGAIN) { 1169 if (ret == -ENOSPC) 1170 ret = 0; 1171 goto send_end; 1172 } 1173 } 1174 } 1175 1176 continue; 1177 1178 wait_for_sndbuf: 1179 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1180 wait_for_memory: 1181 ret = sk_stream_wait_memory(sk, &timeo); 1182 if (ret) { 1183 trim_sgl: 1184 if (ctx->open_rec) 1185 tls_trim_both_msgs(sk, orig_size); 1186 goto send_end; 1187 } 1188 1189 if (ctx->open_rec && msg_en->sg.size < required_size) 1190 goto alloc_encrypted; 1191 } 1192 1193 if (!num_async) { 1194 goto send_end; 1195 } else if (num_zc) { 1196 int err; 1197 1198 /* Wait for pending encryptions to get completed */ 1199 err = tls_encrypt_async_wait(ctx); 1200 if (err) { 1201 ret = err; 1202 copied = 0; 1203 } 1204 } 1205 1206 /* Transmit if any encryptions have completed */ 1207 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1208 cancel_delayed_work(&ctx->tx_work.work); 1209 tls_tx_records(sk, msg->msg_flags); 1210 } 1211 1212 send_end: 1213 ret = sk_stream_error(sk, msg->msg_flags, ret); 1214 return copied > 0 ? copied : ret; 1215 } 1216 1217 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1218 { 1219 struct tls_context *tls_ctx = tls_get_ctx(sk); 1220 int ret; 1221 1222 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1223 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR | 1224 MSG_SENDPAGE_NOPOLICY)) 1225 return -EOPNOTSUPP; 1226 1227 ret = mutex_lock_interruptible(&tls_ctx->tx_lock); 1228 if (ret) 1229 return ret; 1230 lock_sock(sk); 1231 ret = tls_sw_sendmsg_locked(sk, msg, size); 1232 release_sock(sk); 1233 mutex_unlock(&tls_ctx->tx_lock); 1234 return ret; 1235 } 1236 1237 /* 1238 * Handle unexpected EOF during splice without SPLICE_F_MORE set. 1239 */ 1240 void tls_sw_splice_eof(struct socket *sock) 1241 { 1242 struct sock *sk = sock->sk; 1243 struct tls_context *tls_ctx = tls_get_ctx(sk); 1244 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1245 struct tls_rec *rec; 1246 struct sk_msg *msg_pl; 1247 ssize_t copied = 0; 1248 bool retrying = false; 1249 int ret = 0; 1250 1251 if (!ctx->open_rec) 1252 return; 1253 1254 mutex_lock(&tls_ctx->tx_lock); 1255 lock_sock(sk); 1256 1257 retry: 1258 /* same checks as in tls_sw_push_pending_record() */ 1259 rec = ctx->open_rec; 1260 if (!rec) 1261 goto unlock; 1262 1263 msg_pl = &rec->msg_plaintext; 1264 if (msg_pl->sg.size == 0) 1265 goto unlock; 1266 1267 /* Check the BPF advisor and perform transmission. */ 1268 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA, 1269 &copied, 0); 1270 switch (ret) { 1271 case 0: 1272 case -EAGAIN: 1273 if (retrying) 1274 goto unlock; 1275 retrying = true; 1276 goto retry; 1277 case -EINPROGRESS: 1278 break; 1279 default: 1280 goto unlock; 1281 } 1282 1283 /* Wait for pending encryptions to get completed */ 1284 if (tls_encrypt_async_wait(ctx)) 1285 goto unlock; 1286 1287 /* Transmit if any encryptions have completed */ 1288 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1289 cancel_delayed_work(&ctx->tx_work.work); 1290 tls_tx_records(sk, 0); 1291 } 1292 1293 unlock: 1294 release_sock(sk); 1295 mutex_unlock(&tls_ctx->tx_lock); 1296 } 1297 1298 static int 1299 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, 1300 bool released) 1301 { 1302 struct tls_context *tls_ctx = tls_get_ctx(sk); 1303 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1304 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1305 int ret = 0; 1306 long timeo; 1307 1308 timeo = sock_rcvtimeo(sk, nonblock); 1309 1310 while (!tls_strp_msg_ready(ctx)) { 1311 if (!sk_psock_queue_empty(psock)) 1312 return 0; 1313 1314 if (sk->sk_err) 1315 return sock_error(sk); 1316 1317 if (ret < 0) 1318 return ret; 1319 1320 if (!skb_queue_empty(&sk->sk_receive_queue)) { 1321 tls_strp_check_rcv(&ctx->strp); 1322 if (tls_strp_msg_ready(ctx)) 1323 break; 1324 } 1325 1326 if (sk->sk_shutdown & RCV_SHUTDOWN) 1327 return 0; 1328 1329 if (sock_flag(sk, SOCK_DONE)) 1330 return 0; 1331 1332 if (!timeo) 1333 return -EAGAIN; 1334 1335 released = true; 1336 add_wait_queue(sk_sleep(sk), &wait); 1337 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1338 ret = sk_wait_event(sk, &timeo, 1339 tls_strp_msg_ready(ctx) || 1340 !sk_psock_queue_empty(psock), 1341 &wait); 1342 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1343 remove_wait_queue(sk_sleep(sk), &wait); 1344 1345 /* Handle signals */ 1346 if (signal_pending(current)) 1347 return sock_intr_errno(timeo); 1348 } 1349 1350 tls_strp_msg_load(&ctx->strp, released); 1351 1352 return 1; 1353 } 1354 1355 static int tls_setup_from_iter(struct iov_iter *from, 1356 int length, int *pages_used, 1357 struct scatterlist *to, 1358 int to_max_pages) 1359 { 1360 int rc = 0, i = 0, num_elem = *pages_used, maxpages; 1361 struct page *pages[MAX_SKB_FRAGS]; 1362 unsigned int size = 0; 1363 ssize_t copied, use; 1364 size_t offset; 1365 1366 while (length > 0) { 1367 i = 0; 1368 maxpages = to_max_pages - num_elem; 1369 if (maxpages == 0) { 1370 rc = -EFAULT; 1371 goto out; 1372 } 1373 copied = iov_iter_get_pages2(from, pages, 1374 length, 1375 maxpages, &offset); 1376 if (copied <= 0) { 1377 rc = -EFAULT; 1378 goto out; 1379 } 1380 1381 length -= copied; 1382 size += copied; 1383 while (copied) { 1384 use = min_t(int, copied, PAGE_SIZE - offset); 1385 1386 sg_set_page(&to[num_elem], 1387 pages[i], use, offset); 1388 sg_unmark_end(&to[num_elem]); 1389 /* We do not uncharge memory from this API */ 1390 1391 offset = 0; 1392 copied -= use; 1393 1394 i++; 1395 num_elem++; 1396 } 1397 } 1398 /* Mark the end in the last sg entry if newly added */ 1399 if (num_elem > *pages_used) 1400 sg_mark_end(&to[num_elem - 1]); 1401 out: 1402 if (rc) 1403 iov_iter_revert(from, size); 1404 *pages_used = num_elem; 1405 1406 return rc; 1407 } 1408 1409 static struct sk_buff * 1410 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb, 1411 unsigned int full_len) 1412 { 1413 struct strp_msg *clr_rxm; 1414 struct sk_buff *clr_skb; 1415 int err; 1416 1417 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER, 1418 &err, sk->sk_allocation); 1419 if (!clr_skb) 1420 return NULL; 1421 1422 skb_copy_header(clr_skb, skb); 1423 clr_skb->len = full_len; 1424 clr_skb->data_len = full_len; 1425 1426 clr_rxm = strp_msg(clr_skb); 1427 clr_rxm->offset = 0; 1428 1429 return clr_skb; 1430 } 1431 1432 /* Decrypt handlers 1433 * 1434 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers. 1435 * They must transform the darg in/out argument are as follows: 1436 * | Input | Output 1437 * ------------------------------------------------------------------- 1438 * zc | Zero-copy decrypt allowed | Zero-copy performed 1439 * async | Async decrypt allowed | Async crypto used / in progress 1440 * skb | * | Output skb 1441 * 1442 * If ZC decryption was performed darg.skb will point to the input skb. 1443 */ 1444 1445 /* This function decrypts the input skb into either out_iov or in out_sg 1446 * or in skb buffers itself. The input parameter 'darg->zc' indicates if 1447 * zero-copy mode needs to be tried or not. With zero-copy mode, either 1448 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are 1449 * NULL, then the decryption happens inside skb buffers itself, i.e. 1450 * zero-copy gets disabled and 'darg->zc' is updated. 1451 */ 1452 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, 1453 struct scatterlist *out_sg, 1454 struct tls_decrypt_arg *darg) 1455 { 1456 struct tls_context *tls_ctx = tls_get_ctx(sk); 1457 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1458 struct tls_prot_info *prot = &tls_ctx->prot_info; 1459 int n_sgin, n_sgout, aead_size, err, pages = 0; 1460 struct sk_buff *skb = tls_strp_msg(ctx); 1461 const struct strp_msg *rxm = strp_msg(skb); 1462 const struct tls_msg *tlm = tls_msg(skb); 1463 struct aead_request *aead_req; 1464 struct scatterlist *sgin = NULL; 1465 struct scatterlist *sgout = NULL; 1466 const int data_len = rxm->full_len - prot->overhead_size; 1467 int tail_pages = !!prot->tail_size; 1468 struct tls_decrypt_ctx *dctx; 1469 struct sk_buff *clear_skb; 1470 int iv_offset = 0; 1471 u8 *mem; 1472 1473 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size, 1474 rxm->full_len - prot->prepend_size); 1475 if (n_sgin < 1) 1476 return n_sgin ?: -EBADMSG; 1477 1478 if (darg->zc && (out_iov || out_sg)) { 1479 clear_skb = NULL; 1480 1481 if (out_iov) 1482 n_sgout = 1 + tail_pages + 1483 iov_iter_npages_cap(out_iov, INT_MAX, data_len); 1484 else 1485 n_sgout = sg_nents(out_sg); 1486 } else { 1487 darg->zc = false; 1488 1489 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len); 1490 if (!clear_skb) 1491 return -ENOMEM; 1492 1493 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags; 1494 } 1495 1496 /* Increment to accommodate AAD */ 1497 n_sgin = n_sgin + 1; 1498 1499 /* Allocate a single block of memory which contains 1500 * aead_req || tls_decrypt_ctx. 1501 * Both structs are variable length. 1502 */ 1503 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); 1504 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 1505 mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)), 1506 sk->sk_allocation); 1507 if (!mem) { 1508 err = -ENOMEM; 1509 goto exit_free_skb; 1510 } 1511 1512 /* Segment the allocated memory */ 1513 aead_req = (struct aead_request *)mem; 1514 dctx = (struct tls_decrypt_ctx *)(mem + aead_size); 1515 dctx->sk = sk; 1516 sgin = &dctx->sg[0]; 1517 sgout = &dctx->sg[n_sgin]; 1518 1519 /* For CCM based ciphers, first byte of nonce+iv is a constant */ 1520 switch (prot->cipher_type) { 1521 case TLS_CIPHER_AES_CCM_128: 1522 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE; 1523 iv_offset = 1; 1524 break; 1525 case TLS_CIPHER_SM4_CCM: 1526 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE; 1527 iv_offset = 1; 1528 break; 1529 } 1530 1531 /* Prepare IV */ 1532 if (prot->version == TLS_1_3_VERSION || 1533 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { 1534 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, 1535 prot->iv_size + prot->salt_size); 1536 } else { 1537 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 1538 &dctx->iv[iv_offset] + prot->salt_size, 1539 prot->iv_size); 1540 if (err < 0) 1541 goto exit_free; 1542 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size); 1543 } 1544 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq); 1545 1546 /* Prepare AAD */ 1547 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size + 1548 prot->tail_size, 1549 tls_ctx->rx.rec_seq, tlm->control, prot); 1550 1551 /* Prepare sgin */ 1552 sg_init_table(sgin, n_sgin); 1553 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size); 1554 err = skb_to_sgvec(skb, &sgin[1], 1555 rxm->offset + prot->prepend_size, 1556 rxm->full_len - prot->prepend_size); 1557 if (err < 0) 1558 goto exit_free; 1559 1560 if (clear_skb) { 1561 sg_init_table(sgout, n_sgout); 1562 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1563 1564 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size, 1565 data_len + prot->tail_size); 1566 if (err < 0) 1567 goto exit_free; 1568 } else if (out_iov) { 1569 sg_init_table(sgout, n_sgout); 1570 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1571 1572 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1], 1573 (n_sgout - 1 - tail_pages)); 1574 if (err < 0) 1575 goto exit_free_pages; 1576 1577 if (prot->tail_size) { 1578 sg_unmark_end(&sgout[pages]); 1579 sg_set_buf(&sgout[pages + 1], &dctx->tail, 1580 prot->tail_size); 1581 sg_mark_end(&sgout[pages + 1]); 1582 } 1583 } else if (out_sg) { 1584 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); 1585 } 1586 dctx->free_sgout = !!pages; 1587 1588 /* Prepare and submit AEAD request */ 1589 err = tls_do_decryption(sk, sgin, sgout, dctx->iv, 1590 data_len + prot->tail_size, aead_req, darg); 1591 if (err) 1592 goto exit_free_pages; 1593 1594 darg->skb = clear_skb ?: tls_strp_msg(ctx); 1595 clear_skb = NULL; 1596 1597 if (unlikely(darg->async)) { 1598 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold); 1599 if (err) 1600 __skb_queue_tail(&ctx->async_hold, darg->skb); 1601 return err; 1602 } 1603 1604 if (prot->tail_size) 1605 darg->tail = dctx->tail; 1606 1607 exit_free_pages: 1608 /* Release the pages in case iov was mapped to pages */ 1609 for (; pages > 0; pages--) 1610 put_page(sg_page(&sgout[pages])); 1611 exit_free: 1612 kfree(mem); 1613 exit_free_skb: 1614 consume_skb(clear_skb); 1615 return err; 1616 } 1617 1618 static int 1619 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx, 1620 struct msghdr *msg, struct tls_decrypt_arg *darg) 1621 { 1622 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1623 struct tls_prot_info *prot = &tls_ctx->prot_info; 1624 struct strp_msg *rxm; 1625 int pad, err; 1626 1627 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg); 1628 if (err < 0) { 1629 if (err == -EBADMSG) 1630 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 1631 return err; 1632 } 1633 /* keep going even for ->async, the code below is TLS 1.3 */ 1634 1635 /* If opportunistic TLS 1.3 ZC failed retry without ZC */ 1636 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && 1637 darg->tail != TLS_RECORD_TYPE_DATA)) { 1638 darg->zc = false; 1639 if (!darg->tail) 1640 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL); 1641 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY); 1642 return tls_decrypt_sw(sk, tls_ctx, msg, darg); 1643 } 1644 1645 pad = tls_padding_length(prot, darg->skb, darg); 1646 if (pad < 0) { 1647 if (darg->skb != tls_strp_msg(ctx)) 1648 consume_skb(darg->skb); 1649 return pad; 1650 } 1651 1652 rxm = strp_msg(darg->skb); 1653 rxm->full_len -= pad; 1654 1655 return 0; 1656 } 1657 1658 static int 1659 tls_decrypt_device(struct sock *sk, struct msghdr *msg, 1660 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg) 1661 { 1662 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1663 struct tls_prot_info *prot = &tls_ctx->prot_info; 1664 struct strp_msg *rxm; 1665 int pad, err; 1666 1667 if (tls_ctx->rx_conf != TLS_HW) 1668 return 0; 1669 1670 err = tls_device_decrypted(sk, tls_ctx); 1671 if (err <= 0) 1672 return err; 1673 1674 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg); 1675 if (pad < 0) 1676 return pad; 1677 1678 darg->async = false; 1679 darg->skb = tls_strp_msg(ctx); 1680 /* ->zc downgrade check, in case TLS 1.3 gets here */ 1681 darg->zc &= !(prot->version == TLS_1_3_VERSION && 1682 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA); 1683 1684 rxm = strp_msg(darg->skb); 1685 rxm->full_len -= pad; 1686 1687 if (!darg->zc) { 1688 /* Non-ZC case needs a real skb */ 1689 darg->skb = tls_strp_msg_detach(ctx); 1690 if (!darg->skb) 1691 return -ENOMEM; 1692 } else { 1693 unsigned int off, len; 1694 1695 /* In ZC case nobody cares about the output skb. 1696 * Just copy the data here. Note the skb is not fully trimmed. 1697 */ 1698 off = rxm->offset + prot->prepend_size; 1699 len = rxm->full_len - prot->overhead_size; 1700 1701 err = skb_copy_datagram_msg(darg->skb, off, msg, len); 1702 if (err) 1703 return err; 1704 } 1705 return 1; 1706 } 1707 1708 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg, 1709 struct tls_decrypt_arg *darg) 1710 { 1711 struct tls_context *tls_ctx = tls_get_ctx(sk); 1712 struct tls_prot_info *prot = &tls_ctx->prot_info; 1713 struct strp_msg *rxm; 1714 int err; 1715 1716 err = tls_decrypt_device(sk, msg, tls_ctx, darg); 1717 if (!err) 1718 err = tls_decrypt_sw(sk, tls_ctx, msg, darg); 1719 if (err < 0) 1720 return err; 1721 1722 rxm = strp_msg(darg->skb); 1723 rxm->offset += prot->prepend_size; 1724 rxm->full_len -= prot->overhead_size; 1725 tls_advance_record_sn(sk, prot, &tls_ctx->rx); 1726 1727 return 0; 1728 } 1729 1730 int decrypt_skb(struct sock *sk, struct scatterlist *sgout) 1731 { 1732 struct tls_decrypt_arg darg = { .zc = true, }; 1733 1734 return tls_decrypt_sg(sk, NULL, sgout, &darg); 1735 } 1736 1737 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, 1738 u8 *control) 1739 { 1740 int err; 1741 1742 if (!*control) { 1743 *control = tlm->control; 1744 if (!*control) 1745 return -EBADMSG; 1746 1747 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 1748 sizeof(*control), control); 1749 if (*control != TLS_RECORD_TYPE_DATA) { 1750 if (err || msg->msg_flags & MSG_CTRUNC) 1751 return -EIO; 1752 } 1753 } else if (*control != tlm->control) { 1754 return 0; 1755 } 1756 1757 return 1; 1758 } 1759 1760 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx) 1761 { 1762 tls_strp_msg_done(&ctx->strp); 1763 } 1764 1765 /* This function traverses the rx_list in tls receive context to copies the 1766 * decrypted records into the buffer provided by caller zero copy is not 1767 * true. Further, the records are removed from the rx_list if it is not a peek 1768 * case and the record has been consumed completely. 1769 */ 1770 static int process_rx_list(struct tls_sw_context_rx *ctx, 1771 struct msghdr *msg, 1772 u8 *control, 1773 size_t skip, 1774 size_t len, 1775 bool is_peek, 1776 bool *more) 1777 { 1778 struct sk_buff *skb = skb_peek(&ctx->rx_list); 1779 struct tls_msg *tlm; 1780 ssize_t copied = 0; 1781 int err; 1782 1783 while (skip && skb) { 1784 struct strp_msg *rxm = strp_msg(skb); 1785 tlm = tls_msg(skb); 1786 1787 err = tls_record_content_type(msg, tlm, control); 1788 if (err <= 0) 1789 goto more; 1790 1791 if (skip < rxm->full_len) 1792 break; 1793 1794 skip = skip - rxm->full_len; 1795 skb = skb_peek_next(skb, &ctx->rx_list); 1796 } 1797 1798 while (len && skb) { 1799 struct sk_buff *next_skb; 1800 struct strp_msg *rxm = strp_msg(skb); 1801 int chunk = min_t(unsigned int, rxm->full_len - skip, len); 1802 1803 tlm = tls_msg(skb); 1804 1805 err = tls_record_content_type(msg, tlm, control); 1806 if (err <= 0) 1807 goto more; 1808 1809 err = skb_copy_datagram_msg(skb, rxm->offset + skip, 1810 msg, chunk); 1811 if (err < 0) 1812 goto more; 1813 1814 len = len - chunk; 1815 copied = copied + chunk; 1816 1817 /* Consume the data from record if it is non-peek case*/ 1818 if (!is_peek) { 1819 rxm->offset = rxm->offset + chunk; 1820 rxm->full_len = rxm->full_len - chunk; 1821 1822 /* Return if there is unconsumed data in the record */ 1823 if (rxm->full_len - skip) 1824 break; 1825 } 1826 1827 /* The remaining skip-bytes must lie in 1st record in rx_list. 1828 * So from the 2nd record, 'skip' should be 0. 1829 */ 1830 skip = 0; 1831 1832 if (msg) 1833 msg->msg_flags |= MSG_EOR; 1834 1835 next_skb = skb_peek_next(skb, &ctx->rx_list); 1836 1837 if (!is_peek) { 1838 __skb_unlink(skb, &ctx->rx_list); 1839 consume_skb(skb); 1840 } 1841 1842 skb = next_skb; 1843 } 1844 err = 0; 1845 1846 out: 1847 return copied ? : err; 1848 more: 1849 if (more) 1850 *more = true; 1851 goto out; 1852 } 1853 1854 static bool 1855 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, 1856 size_t len_left, size_t decrypted, ssize_t done, 1857 size_t *flushed_at) 1858 { 1859 size_t max_rec; 1860 1861 if (len_left <= decrypted) 1862 return false; 1863 1864 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE; 1865 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec) 1866 return false; 1867 1868 *flushed_at = done; 1869 return sk_flush_backlog(sk); 1870 } 1871 1872 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx, 1873 bool nonblock) 1874 { 1875 long timeo; 1876 int ret; 1877 1878 timeo = sock_rcvtimeo(sk, nonblock); 1879 1880 while (unlikely(ctx->reader_present)) { 1881 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1882 1883 ctx->reader_contended = 1; 1884 1885 add_wait_queue(&ctx->wq, &wait); 1886 ret = sk_wait_event(sk, &timeo, 1887 !READ_ONCE(ctx->reader_present), &wait); 1888 remove_wait_queue(&ctx->wq, &wait); 1889 1890 if (timeo <= 0) 1891 return -EAGAIN; 1892 if (signal_pending(current)) 1893 return sock_intr_errno(timeo); 1894 if (ret < 0) 1895 return ret; 1896 } 1897 1898 WRITE_ONCE(ctx->reader_present, 1); 1899 1900 return 0; 1901 } 1902 1903 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, 1904 bool nonblock) 1905 { 1906 int err; 1907 1908 lock_sock(sk); 1909 err = tls_rx_reader_acquire(sk, ctx, nonblock); 1910 if (err) 1911 release_sock(sk); 1912 return err; 1913 } 1914 1915 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx) 1916 { 1917 if (unlikely(ctx->reader_contended)) { 1918 if (wq_has_sleeper(&ctx->wq)) 1919 wake_up(&ctx->wq); 1920 else 1921 ctx->reader_contended = 0; 1922 1923 WARN_ON_ONCE(!ctx->reader_present); 1924 } 1925 1926 WRITE_ONCE(ctx->reader_present, 0); 1927 } 1928 1929 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) 1930 { 1931 tls_rx_reader_release(sk, ctx); 1932 release_sock(sk); 1933 } 1934 1935 int tls_sw_recvmsg(struct sock *sk, 1936 struct msghdr *msg, 1937 size_t len, 1938 int flags, 1939 int *addr_len) 1940 { 1941 struct tls_context *tls_ctx = tls_get_ctx(sk); 1942 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1943 struct tls_prot_info *prot = &tls_ctx->prot_info; 1944 ssize_t decrypted = 0, async_copy_bytes = 0; 1945 struct sk_psock *psock; 1946 unsigned char control = 0; 1947 size_t flushed_at = 0; 1948 struct strp_msg *rxm; 1949 struct tls_msg *tlm; 1950 ssize_t copied = 0; 1951 bool async = false; 1952 int target, err; 1953 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1954 bool is_peek = flags & MSG_PEEK; 1955 bool rx_more = false; 1956 bool released = true; 1957 bool bpf_strp_enabled; 1958 bool zc_capable; 1959 1960 if (unlikely(flags & MSG_ERRQUEUE)) 1961 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 1962 1963 psock = sk_psock_get(sk); 1964 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT); 1965 if (err < 0) 1966 return err; 1967 bpf_strp_enabled = sk_psock_strp_enabled(psock); 1968 1969 /* If crypto failed the connection is broken */ 1970 err = ctx->async_wait.err; 1971 if (err) 1972 goto end; 1973 1974 /* Process pending decrypted records. It must be non-zero-copy */ 1975 err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more); 1976 if (err < 0) 1977 goto end; 1978 1979 copied = err; 1980 if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more) 1981 goto end; 1982 1983 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1984 len = len - copied; 1985 1986 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek && 1987 ctx->zc_capable; 1988 decrypted = 0; 1989 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) { 1990 struct tls_decrypt_arg darg; 1991 int to_decrypt, chunk; 1992 1993 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, 1994 released); 1995 if (err <= 0) { 1996 if (psock) { 1997 chunk = sk_msg_recvmsg(sk, psock, msg, len, 1998 flags); 1999 if (chunk > 0) { 2000 decrypted += chunk; 2001 len -= chunk; 2002 continue; 2003 } 2004 } 2005 goto recv_end; 2006 } 2007 2008 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2009 2010 rxm = strp_msg(tls_strp_msg(ctx)); 2011 tlm = tls_msg(tls_strp_msg(ctx)); 2012 2013 to_decrypt = rxm->full_len - prot->overhead_size; 2014 2015 if (zc_capable && to_decrypt <= len && 2016 tlm->control == TLS_RECORD_TYPE_DATA) 2017 darg.zc = true; 2018 2019 /* Do not use async mode if record is non-data */ 2020 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) 2021 darg.async = ctx->async_capable; 2022 else 2023 darg.async = false; 2024 2025 err = tls_rx_one_record(sk, msg, &darg); 2026 if (err < 0) { 2027 tls_err_abort(sk, -EBADMSG); 2028 goto recv_end; 2029 } 2030 2031 async |= darg.async; 2032 2033 /* If the type of records being processed is not known yet, 2034 * set it to record type just dequeued. If it is already known, 2035 * but does not match the record type just dequeued, go to end. 2036 * We always get record type here since for tls1.2, record type 2037 * is known just after record is dequeued from stream parser. 2038 * For tls1.3, we disable async. 2039 */ 2040 err = tls_record_content_type(msg, tls_msg(darg.skb), &control); 2041 if (err <= 0) { 2042 DEBUG_NET_WARN_ON_ONCE(darg.zc); 2043 tls_rx_rec_done(ctx); 2044 put_on_rx_list_err: 2045 __skb_queue_tail(&ctx->rx_list, darg.skb); 2046 goto recv_end; 2047 } 2048 2049 /* periodically flush backlog, and feed strparser */ 2050 released = tls_read_flush_backlog(sk, prot, len, to_decrypt, 2051 decrypted + copied, 2052 &flushed_at); 2053 2054 /* TLS 1.3 may have updated the length by more than overhead */ 2055 rxm = strp_msg(darg.skb); 2056 chunk = rxm->full_len; 2057 tls_rx_rec_done(ctx); 2058 2059 if (!darg.zc) { 2060 bool partially_consumed = chunk > len; 2061 struct sk_buff *skb = darg.skb; 2062 2063 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor); 2064 2065 if (async) { 2066 /* TLS 1.2-only, to_decrypt must be text len */ 2067 chunk = min_t(int, to_decrypt, len); 2068 async_copy_bytes += chunk; 2069 put_on_rx_list: 2070 decrypted += chunk; 2071 len -= chunk; 2072 __skb_queue_tail(&ctx->rx_list, skb); 2073 if (unlikely(control != TLS_RECORD_TYPE_DATA)) 2074 break; 2075 continue; 2076 } 2077 2078 if (bpf_strp_enabled) { 2079 released = true; 2080 err = sk_psock_tls_strp_read(psock, skb); 2081 if (err != __SK_PASS) { 2082 rxm->offset = rxm->offset + rxm->full_len; 2083 rxm->full_len = 0; 2084 if (err == __SK_DROP) 2085 consume_skb(skb); 2086 continue; 2087 } 2088 } 2089 2090 if (partially_consumed) 2091 chunk = len; 2092 2093 err = skb_copy_datagram_msg(skb, rxm->offset, 2094 msg, chunk); 2095 if (err < 0) 2096 goto put_on_rx_list_err; 2097 2098 if (is_peek) 2099 goto put_on_rx_list; 2100 2101 if (partially_consumed) { 2102 rxm->offset += chunk; 2103 rxm->full_len -= chunk; 2104 goto put_on_rx_list; 2105 } 2106 2107 consume_skb(skb); 2108 } 2109 2110 decrypted += chunk; 2111 len -= chunk; 2112 2113 /* Return full control message to userspace before trying 2114 * to parse another message type 2115 */ 2116 msg->msg_flags |= MSG_EOR; 2117 if (control != TLS_RECORD_TYPE_DATA) 2118 break; 2119 } 2120 2121 recv_end: 2122 if (async) { 2123 int ret; 2124 2125 /* Wait for all previously submitted records to be decrypted */ 2126 ret = tls_decrypt_async_wait(ctx); 2127 __skb_queue_purge(&ctx->async_hold); 2128 2129 if (ret) { 2130 if (err >= 0 || err == -EINPROGRESS) 2131 err = ret; 2132 decrypted = 0; 2133 goto end; 2134 } 2135 2136 /* Drain records from the rx_list & copy if required */ 2137 if (is_peek || is_kvec) 2138 err = process_rx_list(ctx, msg, &control, copied, 2139 decrypted, is_peek, NULL); 2140 else 2141 err = process_rx_list(ctx, msg, &control, 0, 2142 async_copy_bytes, is_peek, NULL); 2143 } 2144 2145 copied += decrypted; 2146 2147 end: 2148 tls_rx_reader_unlock(sk, ctx); 2149 if (psock) 2150 sk_psock_put(sk, psock); 2151 return copied ? : err; 2152 } 2153 2154 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 2155 struct pipe_inode_info *pipe, 2156 size_t len, unsigned int flags) 2157 { 2158 struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 2159 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2160 struct strp_msg *rxm = NULL; 2161 struct sock *sk = sock->sk; 2162 struct tls_msg *tlm; 2163 struct sk_buff *skb; 2164 ssize_t copied = 0; 2165 int chunk; 2166 int err; 2167 2168 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK); 2169 if (err < 0) 2170 return err; 2171 2172 if (!skb_queue_empty(&ctx->rx_list)) { 2173 skb = __skb_dequeue(&ctx->rx_list); 2174 } else { 2175 struct tls_decrypt_arg darg; 2176 2177 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK, 2178 true); 2179 if (err <= 0) 2180 goto splice_read_end; 2181 2182 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2183 2184 err = tls_rx_one_record(sk, NULL, &darg); 2185 if (err < 0) { 2186 tls_err_abort(sk, -EBADMSG); 2187 goto splice_read_end; 2188 } 2189 2190 tls_rx_rec_done(ctx); 2191 skb = darg.skb; 2192 } 2193 2194 rxm = strp_msg(skb); 2195 tlm = tls_msg(skb); 2196 2197 /* splice does not support reading control messages */ 2198 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2199 err = -EINVAL; 2200 goto splice_requeue; 2201 } 2202 2203 chunk = min_t(unsigned int, rxm->full_len, len); 2204 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); 2205 if (copied < 0) 2206 goto splice_requeue; 2207 2208 if (chunk < rxm->full_len) { 2209 rxm->offset += len; 2210 rxm->full_len -= len; 2211 goto splice_requeue; 2212 } 2213 2214 consume_skb(skb); 2215 2216 splice_read_end: 2217 tls_rx_reader_unlock(sk, ctx); 2218 return copied ? : err; 2219 2220 splice_requeue: 2221 __skb_queue_head(&ctx->rx_list, skb); 2222 goto splice_read_end; 2223 } 2224 2225 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc, 2226 sk_read_actor_t read_actor) 2227 { 2228 struct tls_context *tls_ctx = tls_get_ctx(sk); 2229 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2230 struct tls_prot_info *prot = &tls_ctx->prot_info; 2231 struct strp_msg *rxm = NULL; 2232 struct sk_buff *skb = NULL; 2233 struct sk_psock *psock; 2234 size_t flushed_at = 0; 2235 bool released = true; 2236 struct tls_msg *tlm; 2237 ssize_t copied = 0; 2238 ssize_t decrypted; 2239 int err, used; 2240 2241 psock = sk_psock_get(sk); 2242 if (psock) { 2243 sk_psock_put(sk, psock); 2244 return -EINVAL; 2245 } 2246 err = tls_rx_reader_acquire(sk, ctx, true); 2247 if (err < 0) 2248 return err; 2249 2250 /* If crypto failed the connection is broken */ 2251 err = ctx->async_wait.err; 2252 if (err) 2253 goto read_sock_end; 2254 2255 decrypted = 0; 2256 do { 2257 if (!skb_queue_empty(&ctx->rx_list)) { 2258 skb = __skb_dequeue(&ctx->rx_list); 2259 rxm = strp_msg(skb); 2260 tlm = tls_msg(skb); 2261 } else { 2262 struct tls_decrypt_arg darg; 2263 2264 err = tls_rx_rec_wait(sk, NULL, true, released); 2265 if (err <= 0) 2266 goto read_sock_end; 2267 2268 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2269 2270 err = tls_rx_one_record(sk, NULL, &darg); 2271 if (err < 0) { 2272 tls_err_abort(sk, -EBADMSG); 2273 goto read_sock_end; 2274 } 2275 2276 released = tls_read_flush_backlog(sk, prot, INT_MAX, 2277 0, decrypted, 2278 &flushed_at); 2279 skb = darg.skb; 2280 rxm = strp_msg(skb); 2281 tlm = tls_msg(skb); 2282 decrypted += rxm->full_len; 2283 2284 tls_rx_rec_done(ctx); 2285 } 2286 2287 /* read_sock does not support reading control messages */ 2288 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2289 err = -EINVAL; 2290 goto read_sock_requeue; 2291 } 2292 2293 used = read_actor(desc, skb, rxm->offset, rxm->full_len); 2294 if (used <= 0) { 2295 if (!copied) 2296 err = used; 2297 goto read_sock_requeue; 2298 } 2299 copied += used; 2300 if (used < rxm->full_len) { 2301 rxm->offset += used; 2302 rxm->full_len -= used; 2303 if (!desc->count) 2304 goto read_sock_requeue; 2305 } else { 2306 consume_skb(skb); 2307 if (!desc->count) 2308 skb = NULL; 2309 } 2310 } while (skb); 2311 2312 read_sock_end: 2313 tls_rx_reader_release(sk, ctx); 2314 return copied ? : err; 2315 2316 read_sock_requeue: 2317 __skb_queue_head(&ctx->rx_list, skb); 2318 goto read_sock_end; 2319 } 2320 2321 bool tls_sw_sock_is_readable(struct sock *sk) 2322 { 2323 struct tls_context *tls_ctx = tls_get_ctx(sk); 2324 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2325 bool ingress_empty = true; 2326 struct sk_psock *psock; 2327 2328 rcu_read_lock(); 2329 psock = sk_psock(sk); 2330 if (psock) 2331 ingress_empty = list_empty(&psock->ingress_msg); 2332 rcu_read_unlock(); 2333 2334 return !ingress_empty || tls_strp_msg_ready(ctx) || 2335 !skb_queue_empty(&ctx->rx_list); 2336 } 2337 2338 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb) 2339 { 2340 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 2341 struct tls_prot_info *prot = &tls_ctx->prot_info; 2342 char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; 2343 size_t cipher_overhead; 2344 size_t data_len = 0; 2345 int ret; 2346 2347 /* Verify that we have a full TLS header, or wait for more data */ 2348 if (strp->stm.offset + prot->prepend_size > skb->len) 2349 return 0; 2350 2351 /* Sanity-check size of on-stack buffer. */ 2352 if (WARN_ON(prot->prepend_size > sizeof(header))) { 2353 ret = -EINVAL; 2354 goto read_failure; 2355 } 2356 2357 /* Linearize header to local buffer */ 2358 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size); 2359 if (ret < 0) 2360 goto read_failure; 2361 2362 strp->mark = header[0]; 2363 2364 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 2365 2366 cipher_overhead = prot->tag_size; 2367 if (prot->version != TLS_1_3_VERSION && 2368 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) 2369 cipher_overhead += prot->iv_size; 2370 2371 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead + 2372 prot->tail_size) { 2373 ret = -EMSGSIZE; 2374 goto read_failure; 2375 } 2376 if (data_len < cipher_overhead) { 2377 ret = -EBADMSG; 2378 goto read_failure; 2379 } 2380 2381 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */ 2382 if (header[1] != TLS_1_2_VERSION_MINOR || 2383 header[2] != TLS_1_2_VERSION_MAJOR) { 2384 ret = -EINVAL; 2385 goto read_failure; 2386 } 2387 2388 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE, 2389 TCP_SKB_CB(skb)->seq + strp->stm.offset); 2390 return data_len + TLS_HEADER_SIZE; 2391 2392 read_failure: 2393 tls_err_abort(strp->sk, ret); 2394 2395 return ret; 2396 } 2397 2398 void tls_rx_msg_ready(struct tls_strparser *strp) 2399 { 2400 struct tls_sw_context_rx *ctx; 2401 2402 ctx = container_of(strp, struct tls_sw_context_rx, strp); 2403 ctx->saved_data_ready(strp->sk); 2404 } 2405 2406 static void tls_data_ready(struct sock *sk) 2407 { 2408 struct tls_context *tls_ctx = tls_get_ctx(sk); 2409 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2410 struct sk_psock *psock; 2411 gfp_t alloc_save; 2412 2413 trace_sk_data_ready(sk); 2414 2415 alloc_save = sk->sk_allocation; 2416 sk->sk_allocation = GFP_ATOMIC; 2417 tls_strp_data_ready(&ctx->strp); 2418 sk->sk_allocation = alloc_save; 2419 2420 psock = sk_psock_get(sk); 2421 if (psock) { 2422 if (!list_empty(&psock->ingress_msg)) 2423 ctx->saved_data_ready(sk); 2424 sk_psock_put(sk, psock); 2425 } 2426 } 2427 2428 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx) 2429 { 2430 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2431 2432 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask); 2433 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask); 2434 cancel_delayed_work_sync(&ctx->tx_work.work); 2435 } 2436 2437 void tls_sw_release_resources_tx(struct sock *sk) 2438 { 2439 struct tls_context *tls_ctx = tls_get_ctx(sk); 2440 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2441 struct tls_rec *rec, *tmp; 2442 2443 /* Wait for any pending async encryptions to complete */ 2444 tls_encrypt_async_wait(ctx); 2445 2446 tls_tx_records(sk, -1); 2447 2448 /* Free up un-sent records in tx_list. First, free 2449 * the partially sent record if any at head of tx_list. 2450 */ 2451 if (tls_ctx->partially_sent_record) { 2452 tls_free_partial_record(sk, tls_ctx); 2453 rec = list_first_entry(&ctx->tx_list, 2454 struct tls_rec, list); 2455 list_del(&rec->list); 2456 sk_msg_free(sk, &rec->msg_plaintext); 2457 kfree(rec); 2458 } 2459 2460 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 2461 list_del(&rec->list); 2462 sk_msg_free(sk, &rec->msg_encrypted); 2463 sk_msg_free(sk, &rec->msg_plaintext); 2464 kfree(rec); 2465 } 2466 2467 crypto_free_aead(ctx->aead_send); 2468 tls_free_open_rec(sk); 2469 } 2470 2471 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx) 2472 { 2473 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2474 2475 kfree(ctx); 2476 } 2477 2478 void tls_sw_release_resources_rx(struct sock *sk) 2479 { 2480 struct tls_context *tls_ctx = tls_get_ctx(sk); 2481 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2482 2483 kfree(tls_ctx->rx.rec_seq); 2484 kfree(tls_ctx->rx.iv); 2485 2486 if (ctx->aead_recv) { 2487 __skb_queue_purge(&ctx->rx_list); 2488 crypto_free_aead(ctx->aead_recv); 2489 tls_strp_stop(&ctx->strp); 2490 /* If tls_sw_strparser_arm() was not called (cleanup paths) 2491 * we still want to tls_strp_stop(), but sk->sk_data_ready was 2492 * never swapped. 2493 */ 2494 if (ctx->saved_data_ready) { 2495 write_lock_bh(&sk->sk_callback_lock); 2496 sk->sk_data_ready = ctx->saved_data_ready; 2497 write_unlock_bh(&sk->sk_callback_lock); 2498 } 2499 } 2500 } 2501 2502 void tls_sw_strparser_done(struct tls_context *tls_ctx) 2503 { 2504 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2505 2506 tls_strp_done(&ctx->strp); 2507 } 2508 2509 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx) 2510 { 2511 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2512 2513 kfree(ctx); 2514 } 2515 2516 void tls_sw_free_resources_rx(struct sock *sk) 2517 { 2518 struct tls_context *tls_ctx = tls_get_ctx(sk); 2519 2520 tls_sw_release_resources_rx(sk); 2521 tls_sw_free_ctx_rx(tls_ctx); 2522 } 2523 2524 /* The work handler to transmitt the encrypted records in tx_list */ 2525 static void tx_work_handler(struct work_struct *work) 2526 { 2527 struct delayed_work *delayed_work = to_delayed_work(work); 2528 struct tx_work *tx_work = container_of(delayed_work, 2529 struct tx_work, work); 2530 struct sock *sk = tx_work->sk; 2531 struct tls_context *tls_ctx = tls_get_ctx(sk); 2532 struct tls_sw_context_tx *ctx; 2533 2534 if (unlikely(!tls_ctx)) 2535 return; 2536 2537 ctx = tls_sw_ctx_tx(tls_ctx); 2538 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask)) 2539 return; 2540 2541 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2542 return; 2543 2544 if (mutex_trylock(&tls_ctx->tx_lock)) { 2545 lock_sock(sk); 2546 tls_tx_records(sk, -1); 2547 release_sock(sk); 2548 mutex_unlock(&tls_ctx->tx_lock); 2549 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 2550 /* Someone is holding the tx_lock, they will likely run Tx 2551 * and cancel the work on their way out of the lock section. 2552 * Schedule a long delay just in case. 2553 */ 2554 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); 2555 } 2556 } 2557 2558 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx) 2559 { 2560 struct tls_rec *rec; 2561 2562 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list); 2563 if (!rec) 2564 return false; 2565 2566 return READ_ONCE(rec->tx_ready); 2567 } 2568 2569 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) 2570 { 2571 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 2572 2573 /* Schedule the transmission if tx list is ready */ 2574 if (tls_is_tx_ready(tx_ctx) && 2575 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 2576 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 2577 } 2578 2579 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) 2580 { 2581 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2582 2583 write_lock_bh(&sk->sk_callback_lock); 2584 rx_ctx->saved_data_ready = sk->sk_data_ready; 2585 sk->sk_data_ready = tls_data_ready; 2586 write_unlock_bh(&sk->sk_callback_lock); 2587 } 2588 2589 void tls_update_rx_zc_capable(struct tls_context *tls_ctx) 2590 { 2591 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2592 2593 rx_ctx->zc_capable = tls_ctx->rx_no_pad || 2594 tls_ctx->prot_info.version != TLS_1_3_VERSION; 2595 } 2596 2597 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk) 2598 { 2599 struct tls_sw_context_tx *sw_ctx_tx; 2600 2601 if (!ctx->priv_ctx_tx) { 2602 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); 2603 if (!sw_ctx_tx) 2604 return NULL; 2605 } else { 2606 sw_ctx_tx = ctx->priv_ctx_tx; 2607 } 2608 2609 crypto_init_wait(&sw_ctx_tx->async_wait); 2610 atomic_set(&sw_ctx_tx->encrypt_pending, 1); 2611 INIT_LIST_HEAD(&sw_ctx_tx->tx_list); 2612 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); 2613 sw_ctx_tx->tx_work.sk = sk; 2614 2615 return sw_ctx_tx; 2616 } 2617 2618 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) 2619 { 2620 struct tls_sw_context_rx *sw_ctx_rx; 2621 2622 if (!ctx->priv_ctx_rx) { 2623 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); 2624 if (!sw_ctx_rx) 2625 return NULL; 2626 } else { 2627 sw_ctx_rx = ctx->priv_ctx_rx; 2628 } 2629 2630 crypto_init_wait(&sw_ctx_rx->async_wait); 2631 atomic_set(&sw_ctx_rx->decrypt_pending, 1); 2632 init_waitqueue_head(&sw_ctx_rx->wq); 2633 skb_queue_head_init(&sw_ctx_rx->rx_list); 2634 skb_queue_head_init(&sw_ctx_rx->async_hold); 2635 2636 return sw_ctx_rx; 2637 } 2638 2639 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 2640 { 2641 struct tls_context *tls_ctx = tls_get_ctx(sk); 2642 struct tls_prot_info *prot = &tls_ctx->prot_info; 2643 struct tls_crypto_info *crypto_info; 2644 struct tls_sw_context_tx *sw_ctx_tx = NULL; 2645 struct tls_sw_context_rx *sw_ctx_rx = NULL; 2646 struct cipher_context *cctx; 2647 struct crypto_aead **aead; 2648 struct crypto_tfm *tfm; 2649 char *iv, *rec_seq, *key, *salt; 2650 const struct tls_cipher_desc *cipher_desc; 2651 u16 nonce_size; 2652 int rc = 0; 2653 2654 if (!ctx) { 2655 rc = -EINVAL; 2656 goto out; 2657 } 2658 2659 if (tx) { 2660 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk); 2661 if (!ctx->priv_ctx_tx) 2662 return -ENOMEM; 2663 2664 sw_ctx_tx = ctx->priv_ctx_tx; 2665 crypto_info = &ctx->crypto_send.info; 2666 cctx = &ctx->tx; 2667 aead = &sw_ctx_tx->aead_send; 2668 } else { 2669 ctx->priv_ctx_rx = init_ctx_rx(ctx); 2670 if (!ctx->priv_ctx_rx) 2671 return -ENOMEM; 2672 2673 sw_ctx_rx = ctx->priv_ctx_rx; 2674 crypto_info = &ctx->crypto_recv.info; 2675 cctx = &ctx->rx; 2676 aead = &sw_ctx_rx->aead_recv; 2677 } 2678 2679 cipher_desc = get_cipher_desc(crypto_info->cipher_type); 2680 if (!cipher_desc) { 2681 rc = -EINVAL; 2682 goto free_priv; 2683 } 2684 2685 nonce_size = cipher_desc->nonce; 2686 2687 iv = crypto_info_iv(crypto_info, cipher_desc); 2688 key = crypto_info_key(crypto_info, cipher_desc); 2689 salt = crypto_info_salt(crypto_info, cipher_desc); 2690 rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc); 2691 2692 if (crypto_info->version == TLS_1_3_VERSION) { 2693 nonce_size = 0; 2694 prot->aad_size = TLS_HEADER_SIZE; 2695 prot->tail_size = 1; 2696 } else { 2697 prot->aad_size = TLS_AAD_SPACE_SIZE; 2698 prot->tail_size = 0; 2699 } 2700 2701 /* Sanity-check the sizes for stack allocations. */ 2702 if (nonce_size > MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE) { 2703 rc = -EINVAL; 2704 goto free_priv; 2705 } 2706 2707 prot->version = crypto_info->version; 2708 prot->cipher_type = crypto_info->cipher_type; 2709 prot->prepend_size = TLS_HEADER_SIZE + nonce_size; 2710 prot->tag_size = cipher_desc->tag; 2711 prot->overhead_size = prot->prepend_size + 2712 prot->tag_size + prot->tail_size; 2713 prot->iv_size = cipher_desc->iv; 2714 prot->salt_size = cipher_desc->salt; 2715 cctx->iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL); 2716 if (!cctx->iv) { 2717 rc = -ENOMEM; 2718 goto free_priv; 2719 } 2720 /* Note: 128 & 256 bit salt are the same size */ 2721 prot->rec_seq_size = cipher_desc->rec_seq; 2722 memcpy(cctx->iv, salt, cipher_desc->salt); 2723 memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv); 2724 2725 cctx->rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL); 2726 if (!cctx->rec_seq) { 2727 rc = -ENOMEM; 2728 goto free_iv; 2729 } 2730 2731 if (!*aead) { 2732 *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0); 2733 if (IS_ERR(*aead)) { 2734 rc = PTR_ERR(*aead); 2735 *aead = NULL; 2736 goto free_rec_seq; 2737 } 2738 } 2739 2740 ctx->push_pending_record = tls_sw_push_pending_record; 2741 2742 rc = crypto_aead_setkey(*aead, key, cipher_desc->key); 2743 if (rc) 2744 goto free_aead; 2745 2746 rc = crypto_aead_setauthsize(*aead, prot->tag_size); 2747 if (rc) 2748 goto free_aead; 2749 2750 if (sw_ctx_rx) { 2751 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv); 2752 2753 tls_update_rx_zc_capable(ctx); 2754 sw_ctx_rx->async_capable = 2755 crypto_info->version != TLS_1_3_VERSION && 2756 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC); 2757 2758 rc = tls_strp_init(&sw_ctx_rx->strp, sk); 2759 if (rc) 2760 goto free_aead; 2761 } 2762 2763 goto out; 2764 2765 free_aead: 2766 crypto_free_aead(*aead); 2767 *aead = NULL; 2768 free_rec_seq: 2769 kfree(cctx->rec_seq); 2770 cctx->rec_seq = NULL; 2771 free_iv: 2772 kfree(cctx->iv); 2773 cctx->iv = NULL; 2774 free_priv: 2775 if (tx) { 2776 kfree(ctx->priv_ctx_tx); 2777 ctx->priv_ctx_tx = NULL; 2778 } else { 2779 kfree(ctx->priv_ctx_rx); 2780 ctx->priv_ctx_rx = NULL; 2781 } 2782 out: 2783 return rc; 2784 } 2785