1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <linux/bug.h> 39 #include <linux/sched/signal.h> 40 #include <linux/module.h> 41 #include <linux/kernel.h> 42 #include <linux/splice.h> 43 #include <crypto/aead.h> 44 45 #include <net/strparser.h> 46 #include <net/tls.h> 47 #include <trace/events/sock.h> 48 49 #include "tls.h" 50 51 struct tls_decrypt_arg { 52 struct_group(inargs, 53 bool zc; 54 bool async; 55 u8 tail; 56 ); 57 58 struct sk_buff *skb; 59 }; 60 61 struct tls_decrypt_ctx { 62 struct sock *sk; 63 u8 iv[MAX_IV_SIZE]; 64 u8 aad[TLS_MAX_AAD_SIZE]; 65 u8 tail; 66 struct scatterlist sg[]; 67 }; 68 69 noinline void tls_err_abort(struct sock *sk, int err) 70 { 71 WARN_ON_ONCE(err >= 0); 72 /* sk->sk_err should contain a positive error code. */ 73 WRITE_ONCE(sk->sk_err, -err); 74 /* Paired with smp_rmb() in tcp_poll() */ 75 smp_wmb(); 76 sk_error_report(sk); 77 } 78 79 static int __skb_nsg(struct sk_buff *skb, int offset, int len, 80 unsigned int recursion_level) 81 { 82 int start = skb_headlen(skb); 83 int i, chunk = start - offset; 84 struct sk_buff *frag_iter; 85 int elt = 0; 86 87 if (unlikely(recursion_level >= 24)) 88 return -EMSGSIZE; 89 90 if (chunk > 0) { 91 if (chunk > len) 92 chunk = len; 93 elt++; 94 len -= chunk; 95 if (len == 0) 96 return elt; 97 offset += chunk; 98 } 99 100 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 101 int end; 102 103 WARN_ON(start > offset + len); 104 105 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 106 chunk = end - offset; 107 if (chunk > 0) { 108 if (chunk > len) 109 chunk = len; 110 elt++; 111 len -= chunk; 112 if (len == 0) 113 return elt; 114 offset += chunk; 115 } 116 start = end; 117 } 118 119 if (unlikely(skb_has_frag_list(skb))) { 120 skb_walk_frags(skb, frag_iter) { 121 int end, ret; 122 123 WARN_ON(start > offset + len); 124 125 end = start + frag_iter->len; 126 chunk = end - offset; 127 if (chunk > 0) { 128 if (chunk > len) 129 chunk = len; 130 ret = __skb_nsg(frag_iter, offset - start, chunk, 131 recursion_level + 1); 132 if (unlikely(ret < 0)) 133 return ret; 134 elt += ret; 135 len -= chunk; 136 if (len == 0) 137 return elt; 138 offset += chunk; 139 } 140 start = end; 141 } 142 } 143 BUG_ON(len); 144 return elt; 145 } 146 147 /* Return the number of scatterlist elements required to completely map the 148 * skb, or -EMSGSIZE if the recursion depth is exceeded. 149 */ 150 static int skb_nsg(struct sk_buff *skb, int offset, int len) 151 { 152 return __skb_nsg(skb, offset, len, 0); 153 } 154 155 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb, 156 struct tls_decrypt_arg *darg) 157 { 158 struct strp_msg *rxm = strp_msg(skb); 159 struct tls_msg *tlm = tls_msg(skb); 160 int sub = 0; 161 162 /* Determine zero-padding length */ 163 if (prot->version == TLS_1_3_VERSION) { 164 int offset = rxm->full_len - TLS_TAG_SIZE - 1; 165 char content_type = darg->zc ? darg->tail : 0; 166 int err; 167 168 while (content_type == 0) { 169 if (offset < prot->prepend_size) 170 return -EBADMSG; 171 err = skb_copy_bits(skb, rxm->offset + offset, 172 &content_type, 1); 173 if (err) 174 return err; 175 if (content_type) 176 break; 177 sub++; 178 offset--; 179 } 180 tlm->control = content_type; 181 } 182 return sub; 183 } 184 185 static void tls_decrypt_done(void *data, int err) 186 { 187 struct aead_request *aead_req = data; 188 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 189 struct scatterlist *sgout = aead_req->dst; 190 struct scatterlist *sgin = aead_req->src; 191 struct tls_sw_context_rx *ctx; 192 struct tls_decrypt_ctx *dctx; 193 struct tls_context *tls_ctx; 194 struct scatterlist *sg; 195 unsigned int pages; 196 struct sock *sk; 197 int aead_size; 198 199 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead); 200 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 201 dctx = (void *)((u8 *)aead_req + aead_size); 202 203 sk = dctx->sk; 204 tls_ctx = tls_get_ctx(sk); 205 ctx = tls_sw_ctx_rx(tls_ctx); 206 207 /* Propagate if there was an err */ 208 if (err) { 209 if (err == -EBADMSG) 210 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 211 ctx->async_wait.err = err; 212 tls_err_abort(sk, err); 213 } 214 215 /* Free the destination pages if skb was not decrypted inplace */ 216 if (sgout != sgin) { 217 /* Skip the first S/G entry as it points to AAD */ 218 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 219 if (!sg) 220 break; 221 put_page(sg_page(sg)); 222 } 223 } 224 225 kfree(aead_req); 226 227 spin_lock_bh(&ctx->decrypt_compl_lock); 228 if (!atomic_dec_return(&ctx->decrypt_pending)) 229 complete(&ctx->async_wait.completion); 230 spin_unlock_bh(&ctx->decrypt_compl_lock); 231 } 232 233 static int tls_do_decryption(struct sock *sk, 234 struct scatterlist *sgin, 235 struct scatterlist *sgout, 236 char *iv_recv, 237 size_t data_len, 238 struct aead_request *aead_req, 239 struct tls_decrypt_arg *darg) 240 { 241 struct tls_context *tls_ctx = tls_get_ctx(sk); 242 struct tls_prot_info *prot = &tls_ctx->prot_info; 243 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 244 int ret; 245 246 aead_request_set_tfm(aead_req, ctx->aead_recv); 247 aead_request_set_ad(aead_req, prot->aad_size); 248 aead_request_set_crypt(aead_req, sgin, sgout, 249 data_len + prot->tag_size, 250 (u8 *)iv_recv); 251 252 if (darg->async) { 253 aead_request_set_callback(aead_req, 254 CRYPTO_TFM_REQ_MAY_BACKLOG, 255 tls_decrypt_done, aead_req); 256 atomic_inc(&ctx->decrypt_pending); 257 } else { 258 aead_request_set_callback(aead_req, 259 CRYPTO_TFM_REQ_MAY_BACKLOG, 260 crypto_req_done, &ctx->async_wait); 261 } 262 263 ret = crypto_aead_decrypt(aead_req); 264 if (ret == -EINPROGRESS) { 265 if (darg->async) 266 return 0; 267 268 ret = crypto_wait_req(ret, &ctx->async_wait); 269 } 270 darg->async = false; 271 272 return ret; 273 } 274 275 static void tls_trim_both_msgs(struct sock *sk, int target_size) 276 { 277 struct tls_context *tls_ctx = tls_get_ctx(sk); 278 struct tls_prot_info *prot = &tls_ctx->prot_info; 279 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 280 struct tls_rec *rec = ctx->open_rec; 281 282 sk_msg_trim(sk, &rec->msg_plaintext, target_size); 283 if (target_size > 0) 284 target_size += prot->overhead_size; 285 sk_msg_trim(sk, &rec->msg_encrypted, target_size); 286 } 287 288 static int tls_alloc_encrypted_msg(struct sock *sk, int len) 289 { 290 struct tls_context *tls_ctx = tls_get_ctx(sk); 291 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 292 struct tls_rec *rec = ctx->open_rec; 293 struct sk_msg *msg_en = &rec->msg_encrypted; 294 295 return sk_msg_alloc(sk, msg_en, len, 0); 296 } 297 298 static int tls_clone_plaintext_msg(struct sock *sk, int required) 299 { 300 struct tls_context *tls_ctx = tls_get_ctx(sk); 301 struct tls_prot_info *prot = &tls_ctx->prot_info; 302 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 303 struct tls_rec *rec = ctx->open_rec; 304 struct sk_msg *msg_pl = &rec->msg_plaintext; 305 struct sk_msg *msg_en = &rec->msg_encrypted; 306 int skip, len; 307 308 /* We add page references worth len bytes from encrypted sg 309 * at the end of plaintext sg. It is guaranteed that msg_en 310 * has enough required room (ensured by caller). 311 */ 312 len = required - msg_pl->sg.size; 313 314 /* Skip initial bytes in msg_en's data to be able to use 315 * same offset of both plain and encrypted data. 316 */ 317 skip = prot->prepend_size + msg_pl->sg.size; 318 319 return sk_msg_clone(sk, msg_pl, msg_en, skip, len); 320 } 321 322 static struct tls_rec *tls_get_rec(struct sock *sk) 323 { 324 struct tls_context *tls_ctx = tls_get_ctx(sk); 325 struct tls_prot_info *prot = &tls_ctx->prot_info; 326 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 327 struct sk_msg *msg_pl, *msg_en; 328 struct tls_rec *rec; 329 int mem_size; 330 331 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send); 332 333 rec = kzalloc(mem_size, sk->sk_allocation); 334 if (!rec) 335 return NULL; 336 337 msg_pl = &rec->msg_plaintext; 338 msg_en = &rec->msg_encrypted; 339 340 sk_msg_init(msg_pl); 341 sk_msg_init(msg_en); 342 343 sg_init_table(rec->sg_aead_in, 2); 344 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size); 345 sg_unmark_end(&rec->sg_aead_in[1]); 346 347 sg_init_table(rec->sg_aead_out, 2); 348 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size); 349 sg_unmark_end(&rec->sg_aead_out[1]); 350 351 rec->sk = sk; 352 353 return rec; 354 } 355 356 static void tls_free_rec(struct sock *sk, struct tls_rec *rec) 357 { 358 sk_msg_free(sk, &rec->msg_encrypted); 359 sk_msg_free(sk, &rec->msg_plaintext); 360 kfree(rec); 361 } 362 363 static void tls_free_open_rec(struct sock *sk) 364 { 365 struct tls_context *tls_ctx = tls_get_ctx(sk); 366 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 367 struct tls_rec *rec = ctx->open_rec; 368 369 if (rec) { 370 tls_free_rec(sk, rec); 371 ctx->open_rec = NULL; 372 } 373 } 374 375 int tls_tx_records(struct sock *sk, int flags) 376 { 377 struct tls_context *tls_ctx = tls_get_ctx(sk); 378 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 379 struct tls_rec *rec, *tmp; 380 struct sk_msg *msg_en; 381 int tx_flags, rc = 0; 382 383 if (tls_is_partially_sent_record(tls_ctx)) { 384 rec = list_first_entry(&ctx->tx_list, 385 struct tls_rec, list); 386 387 if (flags == -1) 388 tx_flags = rec->tx_flags; 389 else 390 tx_flags = flags; 391 392 rc = tls_push_partial_record(sk, tls_ctx, tx_flags); 393 if (rc) 394 goto tx_err; 395 396 /* Full record has been transmitted. 397 * Remove the head of tx_list 398 */ 399 list_del(&rec->list); 400 sk_msg_free(sk, &rec->msg_plaintext); 401 kfree(rec); 402 } 403 404 /* Tx all ready records */ 405 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 406 if (READ_ONCE(rec->tx_ready)) { 407 if (flags == -1) 408 tx_flags = rec->tx_flags; 409 else 410 tx_flags = flags; 411 412 msg_en = &rec->msg_encrypted; 413 rc = tls_push_sg(sk, tls_ctx, 414 &msg_en->sg.data[msg_en->sg.curr], 415 0, tx_flags); 416 if (rc) 417 goto tx_err; 418 419 list_del(&rec->list); 420 sk_msg_free(sk, &rec->msg_plaintext); 421 kfree(rec); 422 } else { 423 break; 424 } 425 } 426 427 tx_err: 428 if (rc < 0 && rc != -EAGAIN) 429 tls_err_abort(sk, -EBADMSG); 430 431 return rc; 432 } 433 434 static void tls_encrypt_done(void *data, int err) 435 { 436 struct tls_sw_context_tx *ctx; 437 struct tls_context *tls_ctx; 438 struct tls_prot_info *prot; 439 struct tls_rec *rec = data; 440 struct scatterlist *sge; 441 struct sk_msg *msg_en; 442 bool ready = false; 443 struct sock *sk; 444 int pending; 445 446 msg_en = &rec->msg_encrypted; 447 448 sk = rec->sk; 449 tls_ctx = tls_get_ctx(sk); 450 prot = &tls_ctx->prot_info; 451 ctx = tls_sw_ctx_tx(tls_ctx); 452 453 sge = sk_msg_elem(msg_en, msg_en->sg.curr); 454 sge->offset -= prot->prepend_size; 455 sge->length += prot->prepend_size; 456 457 /* Check if error is previously set on socket */ 458 if (err || sk->sk_err) { 459 rec = NULL; 460 461 /* If err is already set on socket, return the same code */ 462 if (sk->sk_err) { 463 ctx->async_wait.err = -sk->sk_err; 464 } else { 465 ctx->async_wait.err = err; 466 tls_err_abort(sk, err); 467 } 468 } 469 470 if (rec) { 471 struct tls_rec *first_rec; 472 473 /* Mark the record as ready for transmission */ 474 smp_store_mb(rec->tx_ready, true); 475 476 /* If received record is at head of tx_list, schedule tx */ 477 first_rec = list_first_entry(&ctx->tx_list, 478 struct tls_rec, list); 479 if (rec == first_rec) 480 ready = true; 481 } 482 483 spin_lock_bh(&ctx->encrypt_compl_lock); 484 pending = atomic_dec_return(&ctx->encrypt_pending); 485 486 if (!pending && ctx->async_notify) 487 complete(&ctx->async_wait.completion); 488 spin_unlock_bh(&ctx->encrypt_compl_lock); 489 490 if (!ready) 491 return; 492 493 /* Schedule the transmission */ 494 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 495 schedule_delayed_work(&ctx->tx_work.work, 1); 496 } 497 498 static int tls_do_encryption(struct sock *sk, 499 struct tls_context *tls_ctx, 500 struct tls_sw_context_tx *ctx, 501 struct aead_request *aead_req, 502 size_t data_len, u32 start) 503 { 504 struct tls_prot_info *prot = &tls_ctx->prot_info; 505 struct tls_rec *rec = ctx->open_rec; 506 struct sk_msg *msg_en = &rec->msg_encrypted; 507 struct scatterlist *sge = sk_msg_elem(msg_en, start); 508 int rc, iv_offset = 0; 509 510 /* For CCM based ciphers, first byte of IV is a constant */ 511 switch (prot->cipher_type) { 512 case TLS_CIPHER_AES_CCM_128: 513 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; 514 iv_offset = 1; 515 break; 516 case TLS_CIPHER_SM4_CCM: 517 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; 518 iv_offset = 1; 519 break; 520 } 521 522 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, 523 prot->iv_size + prot->salt_size); 524 525 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset, 526 tls_ctx->tx.rec_seq); 527 528 sge->offset += prot->prepend_size; 529 sge->length -= prot->prepend_size; 530 531 msg_en->sg.curr = start; 532 533 aead_request_set_tfm(aead_req, ctx->aead_send); 534 aead_request_set_ad(aead_req, prot->aad_size); 535 aead_request_set_crypt(aead_req, rec->sg_aead_in, 536 rec->sg_aead_out, 537 data_len, rec->iv_data); 538 539 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 540 tls_encrypt_done, rec); 541 542 /* Add the record in tx_list */ 543 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); 544 atomic_inc(&ctx->encrypt_pending); 545 546 rc = crypto_aead_encrypt(aead_req); 547 if (!rc || rc != -EINPROGRESS) { 548 atomic_dec(&ctx->encrypt_pending); 549 sge->offset -= prot->prepend_size; 550 sge->length += prot->prepend_size; 551 } 552 553 if (!rc) { 554 WRITE_ONCE(rec->tx_ready, true); 555 } else if (rc != -EINPROGRESS) { 556 list_del(&rec->list); 557 return rc; 558 } 559 560 /* Unhook the record from context if encryption is not failure */ 561 ctx->open_rec = NULL; 562 tls_advance_record_sn(sk, prot, &tls_ctx->tx); 563 return rc; 564 } 565 566 static int tls_split_open_record(struct sock *sk, struct tls_rec *from, 567 struct tls_rec **to, struct sk_msg *msg_opl, 568 struct sk_msg *msg_oen, u32 split_point, 569 u32 tx_overhead_size, u32 *orig_end) 570 { 571 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes; 572 struct scatterlist *sge, *osge, *nsge; 573 u32 orig_size = msg_opl->sg.size; 574 struct scatterlist tmp = { }; 575 struct sk_msg *msg_npl; 576 struct tls_rec *new; 577 int ret; 578 579 new = tls_get_rec(sk); 580 if (!new) 581 return -ENOMEM; 582 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size + 583 tx_overhead_size, 0); 584 if (ret < 0) { 585 tls_free_rec(sk, new); 586 return ret; 587 } 588 589 *orig_end = msg_opl->sg.end; 590 i = msg_opl->sg.start; 591 sge = sk_msg_elem(msg_opl, i); 592 while (apply && sge->length) { 593 if (sge->length > apply) { 594 u32 len = sge->length - apply; 595 596 get_page(sg_page(sge)); 597 sg_set_page(&tmp, sg_page(sge), len, 598 sge->offset + apply); 599 sge->length = apply; 600 bytes += apply; 601 apply = 0; 602 } else { 603 apply -= sge->length; 604 bytes += sge->length; 605 } 606 607 sk_msg_iter_var_next(i); 608 if (i == msg_opl->sg.end) 609 break; 610 sge = sk_msg_elem(msg_opl, i); 611 } 612 613 msg_opl->sg.end = i; 614 msg_opl->sg.curr = i; 615 msg_opl->sg.copybreak = 0; 616 msg_opl->apply_bytes = 0; 617 msg_opl->sg.size = bytes; 618 619 msg_npl = &new->msg_plaintext; 620 msg_npl->apply_bytes = apply; 621 msg_npl->sg.size = orig_size - bytes; 622 623 j = msg_npl->sg.start; 624 nsge = sk_msg_elem(msg_npl, j); 625 if (tmp.length) { 626 memcpy(nsge, &tmp, sizeof(*nsge)); 627 sk_msg_iter_var_next(j); 628 nsge = sk_msg_elem(msg_npl, j); 629 } 630 631 osge = sk_msg_elem(msg_opl, i); 632 while (osge->length) { 633 memcpy(nsge, osge, sizeof(*nsge)); 634 sg_unmark_end(nsge); 635 sk_msg_iter_var_next(i); 636 sk_msg_iter_var_next(j); 637 if (i == *orig_end) 638 break; 639 osge = sk_msg_elem(msg_opl, i); 640 nsge = sk_msg_elem(msg_npl, j); 641 } 642 643 msg_npl->sg.end = j; 644 msg_npl->sg.curr = j; 645 msg_npl->sg.copybreak = 0; 646 647 *to = new; 648 return 0; 649 } 650 651 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to, 652 struct tls_rec *from, u32 orig_end) 653 { 654 struct sk_msg *msg_npl = &from->msg_plaintext; 655 struct sk_msg *msg_opl = &to->msg_plaintext; 656 struct scatterlist *osge, *nsge; 657 u32 i, j; 658 659 i = msg_opl->sg.end; 660 sk_msg_iter_var_prev(i); 661 j = msg_npl->sg.start; 662 663 osge = sk_msg_elem(msg_opl, i); 664 nsge = sk_msg_elem(msg_npl, j); 665 666 if (sg_page(osge) == sg_page(nsge) && 667 osge->offset + osge->length == nsge->offset) { 668 osge->length += nsge->length; 669 put_page(sg_page(nsge)); 670 } 671 672 msg_opl->sg.end = orig_end; 673 msg_opl->sg.curr = orig_end; 674 msg_opl->sg.copybreak = 0; 675 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size; 676 msg_opl->sg.size += msg_npl->sg.size; 677 678 sk_msg_free(sk, &to->msg_encrypted); 679 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted); 680 681 kfree(from); 682 } 683 684 static int tls_push_record(struct sock *sk, int flags, 685 unsigned char record_type) 686 { 687 struct tls_context *tls_ctx = tls_get_ctx(sk); 688 struct tls_prot_info *prot = &tls_ctx->prot_info; 689 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 690 struct tls_rec *rec = ctx->open_rec, *tmp = NULL; 691 u32 i, split_point, orig_end; 692 struct sk_msg *msg_pl, *msg_en; 693 struct aead_request *req; 694 bool split; 695 int rc; 696 697 if (!rec) 698 return 0; 699 700 msg_pl = &rec->msg_plaintext; 701 msg_en = &rec->msg_encrypted; 702 703 split_point = msg_pl->apply_bytes; 704 split = split_point && split_point < msg_pl->sg.size; 705 if (unlikely((!split && 706 msg_pl->sg.size + 707 prot->overhead_size > msg_en->sg.size) || 708 (split && 709 split_point + 710 prot->overhead_size > msg_en->sg.size))) { 711 split = true; 712 split_point = msg_en->sg.size; 713 } 714 if (split) { 715 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, 716 split_point, prot->overhead_size, 717 &orig_end); 718 if (rc < 0) 719 return rc; 720 /* This can happen if above tls_split_open_record allocates 721 * a single large encryption buffer instead of two smaller 722 * ones. In this case adjust pointers and continue without 723 * split. 724 */ 725 if (!msg_pl->sg.size) { 726 tls_merge_open_record(sk, rec, tmp, orig_end); 727 msg_pl = &rec->msg_plaintext; 728 msg_en = &rec->msg_encrypted; 729 split = false; 730 } 731 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 732 prot->overhead_size); 733 } 734 735 rec->tx_flags = flags; 736 req = &rec->aead_req; 737 738 i = msg_pl->sg.end; 739 sk_msg_iter_var_prev(i); 740 741 rec->content_type = record_type; 742 if (prot->version == TLS_1_3_VERSION) { 743 /* Add content type to end of message. No padding added */ 744 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1); 745 sg_mark_end(&rec->sg_content_type); 746 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1, 747 &rec->sg_content_type); 748 } else { 749 sg_mark_end(sk_msg_elem(msg_pl, i)); 750 } 751 752 if (msg_pl->sg.end < msg_pl->sg.start) { 753 sg_chain(&msg_pl->sg.data[msg_pl->sg.start], 754 MAX_SKB_FRAGS - msg_pl->sg.start + 1, 755 msg_pl->sg.data); 756 } 757 758 i = msg_pl->sg.start; 759 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]); 760 761 i = msg_en->sg.end; 762 sk_msg_iter_var_prev(i); 763 sg_mark_end(sk_msg_elem(msg_en, i)); 764 765 i = msg_en->sg.start; 766 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); 767 768 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size, 769 tls_ctx->tx.rec_seq, record_type, prot); 770 771 tls_fill_prepend(tls_ctx, 772 page_address(sg_page(&msg_en->sg.data[i])) + 773 msg_en->sg.data[i].offset, 774 msg_pl->sg.size + prot->tail_size, 775 record_type); 776 777 tls_ctx->pending_open_record_frags = false; 778 779 rc = tls_do_encryption(sk, tls_ctx, ctx, req, 780 msg_pl->sg.size + prot->tail_size, i); 781 if (rc < 0) { 782 if (rc != -EINPROGRESS) { 783 tls_err_abort(sk, -EBADMSG); 784 if (split) { 785 tls_ctx->pending_open_record_frags = true; 786 tls_merge_open_record(sk, rec, tmp, orig_end); 787 } 788 } 789 ctx->async_capable = 1; 790 return rc; 791 } else if (split) { 792 msg_pl = &tmp->msg_plaintext; 793 msg_en = &tmp->msg_encrypted; 794 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size); 795 tls_ctx->pending_open_record_frags = true; 796 ctx->open_rec = tmp; 797 } 798 799 return tls_tx_records(sk, flags); 800 } 801 802 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 803 bool full_record, u8 record_type, 804 ssize_t *copied, int flags) 805 { 806 struct tls_context *tls_ctx = tls_get_ctx(sk); 807 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 808 struct sk_msg msg_redir = { }; 809 struct sk_psock *psock; 810 struct sock *sk_redir; 811 struct tls_rec *rec; 812 bool enospc, policy, redir_ingress; 813 int err = 0, send; 814 u32 delta = 0; 815 816 policy = !(flags & MSG_SENDPAGE_NOPOLICY); 817 psock = sk_psock_get(sk); 818 if (!psock || !policy) { 819 err = tls_push_record(sk, flags, record_type); 820 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 821 *copied -= sk_msg_free(sk, msg); 822 tls_free_open_rec(sk); 823 err = -sk->sk_err; 824 } 825 if (psock) 826 sk_psock_put(sk, psock); 827 return err; 828 } 829 more_data: 830 enospc = sk_msg_full(msg); 831 if (psock->eval == __SK_NONE) { 832 delta = msg->sg.size; 833 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 834 delta -= msg->sg.size; 835 } 836 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && 837 !enospc && !full_record) { 838 err = -ENOSPC; 839 goto out_err; 840 } 841 msg->cork_bytes = 0; 842 send = msg->sg.size; 843 if (msg->apply_bytes && msg->apply_bytes < send) 844 send = msg->apply_bytes; 845 846 switch (psock->eval) { 847 case __SK_PASS: 848 err = tls_push_record(sk, flags, record_type); 849 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 850 *copied -= sk_msg_free(sk, msg); 851 tls_free_open_rec(sk); 852 err = -sk->sk_err; 853 goto out_err; 854 } 855 break; 856 case __SK_REDIRECT: 857 redir_ingress = psock->redir_ingress; 858 sk_redir = psock->sk_redir; 859 memcpy(&msg_redir, msg, sizeof(*msg)); 860 if (msg->apply_bytes < send) 861 msg->apply_bytes = 0; 862 else 863 msg->apply_bytes -= send; 864 sk_msg_return_zero(sk, msg, send); 865 msg->sg.size -= send; 866 release_sock(sk); 867 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, 868 &msg_redir, send, flags); 869 lock_sock(sk); 870 if (err < 0) { 871 *copied -= sk_msg_free_nocharge(sk, &msg_redir); 872 msg->sg.size = 0; 873 } 874 if (msg->sg.size == 0) 875 tls_free_open_rec(sk); 876 break; 877 case __SK_DROP: 878 default: 879 sk_msg_free_partial(sk, msg, send); 880 if (msg->apply_bytes < send) 881 msg->apply_bytes = 0; 882 else 883 msg->apply_bytes -= send; 884 if (msg->sg.size == 0) 885 tls_free_open_rec(sk); 886 *copied -= (send + delta); 887 err = -EACCES; 888 } 889 890 if (likely(!err)) { 891 bool reset_eval = !ctx->open_rec; 892 893 rec = ctx->open_rec; 894 if (rec) { 895 msg = &rec->msg_plaintext; 896 if (!msg->apply_bytes) 897 reset_eval = true; 898 } 899 if (reset_eval) { 900 psock->eval = __SK_NONE; 901 if (psock->sk_redir) { 902 sock_put(psock->sk_redir); 903 psock->sk_redir = NULL; 904 } 905 } 906 if (rec) 907 goto more_data; 908 } 909 out_err: 910 sk_psock_put(sk, psock); 911 return err; 912 } 913 914 static int tls_sw_push_pending_record(struct sock *sk, int flags) 915 { 916 struct tls_context *tls_ctx = tls_get_ctx(sk); 917 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 918 struct tls_rec *rec = ctx->open_rec; 919 struct sk_msg *msg_pl; 920 size_t copied; 921 922 if (!rec) 923 return 0; 924 925 msg_pl = &rec->msg_plaintext; 926 copied = msg_pl->sg.size; 927 if (!copied) 928 return 0; 929 930 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA, 931 &copied, flags); 932 } 933 934 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg, 935 struct sk_msg *msg_pl, size_t try_to_copy, 936 ssize_t *copied) 937 { 938 struct page *page = NULL, **pages = &page; 939 940 do { 941 ssize_t part; 942 size_t off; 943 944 part = iov_iter_extract_pages(&msg->msg_iter, &pages, 945 try_to_copy, 1, 0, &off); 946 if (part <= 0) 947 return part ?: -EIO; 948 949 if (WARN_ON_ONCE(!sendpage_ok(page))) { 950 iov_iter_revert(&msg->msg_iter, part); 951 return -EIO; 952 } 953 954 sk_msg_page_add(msg_pl, page, part, off); 955 sk_mem_charge(sk, part); 956 *copied += part; 957 try_to_copy -= part; 958 } while (try_to_copy && !sk_msg_full(msg_pl)); 959 960 return 0; 961 } 962 963 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, 964 size_t size) 965 { 966 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 967 struct tls_context *tls_ctx = tls_get_ctx(sk); 968 struct tls_prot_info *prot = &tls_ctx->prot_info; 969 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 970 bool async_capable = ctx->async_capable; 971 unsigned char record_type = TLS_RECORD_TYPE_DATA; 972 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 973 bool eor = !(msg->msg_flags & MSG_MORE); 974 size_t try_to_copy; 975 ssize_t copied = 0; 976 struct sk_msg *msg_pl, *msg_en; 977 struct tls_rec *rec; 978 int required_size; 979 int num_async = 0; 980 bool full_record; 981 int record_room; 982 int num_zc = 0; 983 int orig_size; 984 int ret = 0; 985 int pending; 986 987 if (!eor && (msg->msg_flags & MSG_EOR)) 988 return -EINVAL; 989 990 if (unlikely(msg->msg_controllen)) { 991 ret = tls_process_cmsg(sk, msg, &record_type); 992 if (ret) { 993 if (ret == -EINPROGRESS) 994 num_async++; 995 else if (ret != -EAGAIN) 996 goto send_end; 997 } 998 } 999 1000 while (msg_data_left(msg)) { 1001 if (sk->sk_err) { 1002 ret = -sk->sk_err; 1003 goto send_end; 1004 } 1005 1006 if (ctx->open_rec) 1007 rec = ctx->open_rec; 1008 else 1009 rec = ctx->open_rec = tls_get_rec(sk); 1010 if (!rec) { 1011 ret = -ENOMEM; 1012 goto send_end; 1013 } 1014 1015 msg_pl = &rec->msg_plaintext; 1016 msg_en = &rec->msg_encrypted; 1017 1018 orig_size = msg_pl->sg.size; 1019 full_record = false; 1020 try_to_copy = msg_data_left(msg); 1021 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 1022 if (try_to_copy >= record_room) { 1023 try_to_copy = record_room; 1024 full_record = true; 1025 } 1026 1027 required_size = msg_pl->sg.size + try_to_copy + 1028 prot->overhead_size; 1029 1030 if (!sk_stream_memory_free(sk)) 1031 goto wait_for_sndbuf; 1032 1033 alloc_encrypted: 1034 ret = tls_alloc_encrypted_msg(sk, required_size); 1035 if (ret) { 1036 if (ret != -ENOSPC) 1037 goto wait_for_memory; 1038 1039 /* Adjust try_to_copy according to the amount that was 1040 * actually allocated. The difference is due 1041 * to max sg elements limit 1042 */ 1043 try_to_copy -= required_size - msg_en->sg.size; 1044 full_record = true; 1045 } 1046 1047 if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) { 1048 ret = tls_sw_sendmsg_splice(sk, msg, msg_pl, 1049 try_to_copy, &copied); 1050 if (ret < 0) 1051 goto send_end; 1052 tls_ctx->pending_open_record_frags = true; 1053 if (full_record || eor || sk_msg_full(msg_pl)) 1054 goto copied; 1055 continue; 1056 } 1057 1058 if (!is_kvec && (full_record || eor) && !async_capable) { 1059 u32 first = msg_pl->sg.end; 1060 1061 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter, 1062 msg_pl, try_to_copy); 1063 if (ret) 1064 goto fallback_to_reg_send; 1065 1066 num_zc++; 1067 copied += try_to_copy; 1068 1069 sk_msg_sg_copy_set(msg_pl, first); 1070 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1071 record_type, &copied, 1072 msg->msg_flags); 1073 if (ret) { 1074 if (ret == -EINPROGRESS) 1075 num_async++; 1076 else if (ret == -ENOMEM) 1077 goto wait_for_memory; 1078 else if (ctx->open_rec && ret == -ENOSPC) 1079 goto rollback_iter; 1080 else if (ret != -EAGAIN) 1081 goto send_end; 1082 } 1083 continue; 1084 rollback_iter: 1085 copied -= try_to_copy; 1086 sk_msg_sg_copy_clear(msg_pl, first); 1087 iov_iter_revert(&msg->msg_iter, 1088 msg_pl->sg.size - orig_size); 1089 fallback_to_reg_send: 1090 sk_msg_trim(sk, msg_pl, orig_size); 1091 } 1092 1093 required_size = msg_pl->sg.size + try_to_copy; 1094 1095 ret = tls_clone_plaintext_msg(sk, required_size); 1096 if (ret) { 1097 if (ret != -ENOSPC) 1098 goto send_end; 1099 1100 /* Adjust try_to_copy according to the amount that was 1101 * actually allocated. The difference is due 1102 * to max sg elements limit 1103 */ 1104 try_to_copy -= required_size - msg_pl->sg.size; 1105 full_record = true; 1106 sk_msg_trim(sk, msg_en, 1107 msg_pl->sg.size + prot->overhead_size); 1108 } 1109 1110 if (try_to_copy) { 1111 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, 1112 msg_pl, try_to_copy); 1113 if (ret < 0) 1114 goto trim_sgl; 1115 } 1116 1117 /* Open records defined only if successfully copied, otherwise 1118 * we would trim the sg but not reset the open record frags. 1119 */ 1120 tls_ctx->pending_open_record_frags = true; 1121 copied += try_to_copy; 1122 copied: 1123 if (full_record || eor) { 1124 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1125 record_type, &copied, 1126 msg->msg_flags); 1127 if (ret) { 1128 if (ret == -EINPROGRESS) 1129 num_async++; 1130 else if (ret == -ENOMEM) 1131 goto wait_for_memory; 1132 else if (ret != -EAGAIN) { 1133 if (ret == -ENOSPC) 1134 ret = 0; 1135 goto send_end; 1136 } 1137 } 1138 } 1139 1140 continue; 1141 1142 wait_for_sndbuf: 1143 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1144 wait_for_memory: 1145 ret = sk_stream_wait_memory(sk, &timeo); 1146 if (ret) { 1147 trim_sgl: 1148 if (ctx->open_rec) 1149 tls_trim_both_msgs(sk, orig_size); 1150 goto send_end; 1151 } 1152 1153 if (ctx->open_rec && msg_en->sg.size < required_size) 1154 goto alloc_encrypted; 1155 } 1156 1157 if (!num_async) { 1158 goto send_end; 1159 } else if (num_zc) { 1160 /* Wait for pending encryptions to get completed */ 1161 spin_lock_bh(&ctx->encrypt_compl_lock); 1162 ctx->async_notify = true; 1163 1164 pending = atomic_read(&ctx->encrypt_pending); 1165 spin_unlock_bh(&ctx->encrypt_compl_lock); 1166 if (pending) 1167 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1168 else 1169 reinit_completion(&ctx->async_wait.completion); 1170 1171 /* There can be no concurrent accesses, since we have no 1172 * pending encrypt operations 1173 */ 1174 WRITE_ONCE(ctx->async_notify, false); 1175 1176 if (ctx->async_wait.err) { 1177 ret = ctx->async_wait.err; 1178 copied = 0; 1179 } 1180 } 1181 1182 /* Transmit if any encryptions have completed */ 1183 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1184 cancel_delayed_work(&ctx->tx_work.work); 1185 tls_tx_records(sk, msg->msg_flags); 1186 } 1187 1188 send_end: 1189 ret = sk_stream_error(sk, msg->msg_flags, ret); 1190 return copied > 0 ? copied : ret; 1191 } 1192 1193 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1194 { 1195 struct tls_context *tls_ctx = tls_get_ctx(sk); 1196 int ret; 1197 1198 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1199 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR | 1200 MSG_SENDPAGE_NOPOLICY)) 1201 return -EOPNOTSUPP; 1202 1203 ret = mutex_lock_interruptible(&tls_ctx->tx_lock); 1204 if (ret) 1205 return ret; 1206 lock_sock(sk); 1207 ret = tls_sw_sendmsg_locked(sk, msg, size); 1208 release_sock(sk); 1209 mutex_unlock(&tls_ctx->tx_lock); 1210 return ret; 1211 } 1212 1213 /* 1214 * Handle unexpected EOF during splice without SPLICE_F_MORE set. 1215 */ 1216 void tls_sw_splice_eof(struct socket *sock) 1217 { 1218 struct sock *sk = sock->sk; 1219 struct tls_context *tls_ctx = tls_get_ctx(sk); 1220 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1221 struct tls_rec *rec; 1222 struct sk_msg *msg_pl; 1223 ssize_t copied = 0; 1224 bool retrying = false; 1225 int ret = 0; 1226 int pending; 1227 1228 if (!ctx->open_rec) 1229 return; 1230 1231 mutex_lock(&tls_ctx->tx_lock); 1232 lock_sock(sk); 1233 1234 retry: 1235 rec = ctx->open_rec; 1236 if (!rec) 1237 goto unlock; 1238 1239 msg_pl = &rec->msg_plaintext; 1240 1241 /* Check the BPF advisor and perform transmission. */ 1242 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA, 1243 &copied, 0); 1244 switch (ret) { 1245 case 0: 1246 case -EAGAIN: 1247 if (retrying) 1248 goto unlock; 1249 retrying = true; 1250 goto retry; 1251 case -EINPROGRESS: 1252 break; 1253 default: 1254 goto unlock; 1255 } 1256 1257 /* Wait for pending encryptions to get completed */ 1258 spin_lock_bh(&ctx->encrypt_compl_lock); 1259 ctx->async_notify = true; 1260 1261 pending = atomic_read(&ctx->encrypt_pending); 1262 spin_unlock_bh(&ctx->encrypt_compl_lock); 1263 if (pending) 1264 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1265 else 1266 reinit_completion(&ctx->async_wait.completion); 1267 1268 /* There can be no concurrent accesses, since we have no pending 1269 * encrypt operations 1270 */ 1271 WRITE_ONCE(ctx->async_notify, false); 1272 1273 if (ctx->async_wait.err) 1274 goto unlock; 1275 1276 /* Transmit if any encryptions have completed */ 1277 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1278 cancel_delayed_work(&ctx->tx_work.work); 1279 tls_tx_records(sk, 0); 1280 } 1281 1282 unlock: 1283 release_sock(sk); 1284 mutex_unlock(&tls_ctx->tx_lock); 1285 } 1286 1287 static int 1288 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, 1289 bool released) 1290 { 1291 struct tls_context *tls_ctx = tls_get_ctx(sk); 1292 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1293 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1294 int ret = 0; 1295 long timeo; 1296 1297 timeo = sock_rcvtimeo(sk, nonblock); 1298 1299 while (!tls_strp_msg_ready(ctx)) { 1300 if (!sk_psock_queue_empty(psock)) 1301 return 0; 1302 1303 if (sk->sk_err) 1304 return sock_error(sk); 1305 1306 if (ret < 0) 1307 return ret; 1308 1309 if (!skb_queue_empty(&sk->sk_receive_queue)) { 1310 tls_strp_check_rcv(&ctx->strp); 1311 if (tls_strp_msg_ready(ctx)) 1312 break; 1313 } 1314 1315 if (sk->sk_shutdown & RCV_SHUTDOWN) 1316 return 0; 1317 1318 if (sock_flag(sk, SOCK_DONE)) 1319 return 0; 1320 1321 if (!timeo) 1322 return -EAGAIN; 1323 1324 released = true; 1325 add_wait_queue(sk_sleep(sk), &wait); 1326 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1327 ret = sk_wait_event(sk, &timeo, 1328 tls_strp_msg_ready(ctx) || 1329 !sk_psock_queue_empty(psock), 1330 &wait); 1331 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1332 remove_wait_queue(sk_sleep(sk), &wait); 1333 1334 /* Handle signals */ 1335 if (signal_pending(current)) 1336 return sock_intr_errno(timeo); 1337 } 1338 1339 tls_strp_msg_load(&ctx->strp, released); 1340 1341 return 1; 1342 } 1343 1344 static int tls_setup_from_iter(struct iov_iter *from, 1345 int length, int *pages_used, 1346 struct scatterlist *to, 1347 int to_max_pages) 1348 { 1349 int rc = 0, i = 0, num_elem = *pages_used, maxpages; 1350 struct page *pages[MAX_SKB_FRAGS]; 1351 unsigned int size = 0; 1352 ssize_t copied, use; 1353 size_t offset; 1354 1355 while (length > 0) { 1356 i = 0; 1357 maxpages = to_max_pages - num_elem; 1358 if (maxpages == 0) { 1359 rc = -EFAULT; 1360 goto out; 1361 } 1362 copied = iov_iter_get_pages2(from, pages, 1363 length, 1364 maxpages, &offset); 1365 if (copied <= 0) { 1366 rc = -EFAULT; 1367 goto out; 1368 } 1369 1370 length -= copied; 1371 size += copied; 1372 while (copied) { 1373 use = min_t(int, copied, PAGE_SIZE - offset); 1374 1375 sg_set_page(&to[num_elem], 1376 pages[i], use, offset); 1377 sg_unmark_end(&to[num_elem]); 1378 /* We do not uncharge memory from this API */ 1379 1380 offset = 0; 1381 copied -= use; 1382 1383 i++; 1384 num_elem++; 1385 } 1386 } 1387 /* Mark the end in the last sg entry if newly added */ 1388 if (num_elem > *pages_used) 1389 sg_mark_end(&to[num_elem - 1]); 1390 out: 1391 if (rc) 1392 iov_iter_revert(from, size); 1393 *pages_used = num_elem; 1394 1395 return rc; 1396 } 1397 1398 static struct sk_buff * 1399 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb, 1400 unsigned int full_len) 1401 { 1402 struct strp_msg *clr_rxm; 1403 struct sk_buff *clr_skb; 1404 int err; 1405 1406 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER, 1407 &err, sk->sk_allocation); 1408 if (!clr_skb) 1409 return NULL; 1410 1411 skb_copy_header(clr_skb, skb); 1412 clr_skb->len = full_len; 1413 clr_skb->data_len = full_len; 1414 1415 clr_rxm = strp_msg(clr_skb); 1416 clr_rxm->offset = 0; 1417 1418 return clr_skb; 1419 } 1420 1421 /* Decrypt handlers 1422 * 1423 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers. 1424 * They must transform the darg in/out argument are as follows: 1425 * | Input | Output 1426 * ------------------------------------------------------------------- 1427 * zc | Zero-copy decrypt allowed | Zero-copy performed 1428 * async | Async decrypt allowed | Async crypto used / in progress 1429 * skb | * | Output skb 1430 * 1431 * If ZC decryption was performed darg.skb will point to the input skb. 1432 */ 1433 1434 /* This function decrypts the input skb into either out_iov or in out_sg 1435 * or in skb buffers itself. The input parameter 'darg->zc' indicates if 1436 * zero-copy mode needs to be tried or not. With zero-copy mode, either 1437 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are 1438 * NULL, then the decryption happens inside skb buffers itself, i.e. 1439 * zero-copy gets disabled and 'darg->zc' is updated. 1440 */ 1441 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, 1442 struct scatterlist *out_sg, 1443 struct tls_decrypt_arg *darg) 1444 { 1445 struct tls_context *tls_ctx = tls_get_ctx(sk); 1446 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1447 struct tls_prot_info *prot = &tls_ctx->prot_info; 1448 int n_sgin, n_sgout, aead_size, err, pages = 0; 1449 struct sk_buff *skb = tls_strp_msg(ctx); 1450 const struct strp_msg *rxm = strp_msg(skb); 1451 const struct tls_msg *tlm = tls_msg(skb); 1452 struct aead_request *aead_req; 1453 struct scatterlist *sgin = NULL; 1454 struct scatterlist *sgout = NULL; 1455 const int data_len = rxm->full_len - prot->overhead_size; 1456 int tail_pages = !!prot->tail_size; 1457 struct tls_decrypt_ctx *dctx; 1458 struct sk_buff *clear_skb; 1459 int iv_offset = 0; 1460 u8 *mem; 1461 1462 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size, 1463 rxm->full_len - prot->prepend_size); 1464 if (n_sgin < 1) 1465 return n_sgin ?: -EBADMSG; 1466 1467 if (darg->zc && (out_iov || out_sg)) { 1468 clear_skb = NULL; 1469 1470 if (out_iov) 1471 n_sgout = 1 + tail_pages + 1472 iov_iter_npages_cap(out_iov, INT_MAX, data_len); 1473 else 1474 n_sgout = sg_nents(out_sg); 1475 } else { 1476 darg->zc = false; 1477 1478 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len); 1479 if (!clear_skb) 1480 return -ENOMEM; 1481 1482 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags; 1483 } 1484 1485 /* Increment to accommodate AAD */ 1486 n_sgin = n_sgin + 1; 1487 1488 /* Allocate a single block of memory which contains 1489 * aead_req || tls_decrypt_ctx. 1490 * Both structs are variable length. 1491 */ 1492 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); 1493 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 1494 mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout), 1495 sk->sk_allocation); 1496 if (!mem) { 1497 err = -ENOMEM; 1498 goto exit_free_skb; 1499 } 1500 1501 /* Segment the allocated memory */ 1502 aead_req = (struct aead_request *)mem; 1503 dctx = (struct tls_decrypt_ctx *)(mem + aead_size); 1504 dctx->sk = sk; 1505 sgin = &dctx->sg[0]; 1506 sgout = &dctx->sg[n_sgin]; 1507 1508 /* For CCM based ciphers, first byte of nonce+iv is a constant */ 1509 switch (prot->cipher_type) { 1510 case TLS_CIPHER_AES_CCM_128: 1511 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE; 1512 iv_offset = 1; 1513 break; 1514 case TLS_CIPHER_SM4_CCM: 1515 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE; 1516 iv_offset = 1; 1517 break; 1518 } 1519 1520 /* Prepare IV */ 1521 if (prot->version == TLS_1_3_VERSION || 1522 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { 1523 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, 1524 prot->iv_size + prot->salt_size); 1525 } else { 1526 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 1527 &dctx->iv[iv_offset] + prot->salt_size, 1528 prot->iv_size); 1529 if (err < 0) 1530 goto exit_free; 1531 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size); 1532 } 1533 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq); 1534 1535 /* Prepare AAD */ 1536 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size + 1537 prot->tail_size, 1538 tls_ctx->rx.rec_seq, tlm->control, prot); 1539 1540 /* Prepare sgin */ 1541 sg_init_table(sgin, n_sgin); 1542 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size); 1543 err = skb_to_sgvec(skb, &sgin[1], 1544 rxm->offset + prot->prepend_size, 1545 rxm->full_len - prot->prepend_size); 1546 if (err < 0) 1547 goto exit_free; 1548 1549 if (clear_skb) { 1550 sg_init_table(sgout, n_sgout); 1551 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1552 1553 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size, 1554 data_len + prot->tail_size); 1555 if (err < 0) 1556 goto exit_free; 1557 } else if (out_iov) { 1558 sg_init_table(sgout, n_sgout); 1559 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1560 1561 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1], 1562 (n_sgout - 1 - tail_pages)); 1563 if (err < 0) 1564 goto exit_free_pages; 1565 1566 if (prot->tail_size) { 1567 sg_unmark_end(&sgout[pages]); 1568 sg_set_buf(&sgout[pages + 1], &dctx->tail, 1569 prot->tail_size); 1570 sg_mark_end(&sgout[pages + 1]); 1571 } 1572 } else if (out_sg) { 1573 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); 1574 } 1575 1576 /* Prepare and submit AEAD request */ 1577 err = tls_do_decryption(sk, sgin, sgout, dctx->iv, 1578 data_len + prot->tail_size, aead_req, darg); 1579 if (err) 1580 goto exit_free_pages; 1581 1582 darg->skb = clear_skb ?: tls_strp_msg(ctx); 1583 clear_skb = NULL; 1584 1585 if (unlikely(darg->async)) { 1586 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold); 1587 if (err) 1588 __skb_queue_tail(&ctx->async_hold, darg->skb); 1589 return err; 1590 } 1591 1592 if (prot->tail_size) 1593 darg->tail = dctx->tail; 1594 1595 exit_free_pages: 1596 /* Release the pages in case iov was mapped to pages */ 1597 for (; pages > 0; pages--) 1598 put_page(sg_page(&sgout[pages])); 1599 exit_free: 1600 kfree(mem); 1601 exit_free_skb: 1602 consume_skb(clear_skb); 1603 return err; 1604 } 1605 1606 static int 1607 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx, 1608 struct msghdr *msg, struct tls_decrypt_arg *darg) 1609 { 1610 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1611 struct tls_prot_info *prot = &tls_ctx->prot_info; 1612 struct strp_msg *rxm; 1613 int pad, err; 1614 1615 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg); 1616 if (err < 0) { 1617 if (err == -EBADMSG) 1618 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 1619 return err; 1620 } 1621 /* keep going even for ->async, the code below is TLS 1.3 */ 1622 1623 /* If opportunistic TLS 1.3 ZC failed retry without ZC */ 1624 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && 1625 darg->tail != TLS_RECORD_TYPE_DATA)) { 1626 darg->zc = false; 1627 if (!darg->tail) 1628 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL); 1629 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY); 1630 return tls_decrypt_sw(sk, tls_ctx, msg, darg); 1631 } 1632 1633 pad = tls_padding_length(prot, darg->skb, darg); 1634 if (pad < 0) { 1635 if (darg->skb != tls_strp_msg(ctx)) 1636 consume_skb(darg->skb); 1637 return pad; 1638 } 1639 1640 rxm = strp_msg(darg->skb); 1641 rxm->full_len -= pad; 1642 1643 return 0; 1644 } 1645 1646 static int 1647 tls_decrypt_device(struct sock *sk, struct msghdr *msg, 1648 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg) 1649 { 1650 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1651 struct tls_prot_info *prot = &tls_ctx->prot_info; 1652 struct strp_msg *rxm; 1653 int pad, err; 1654 1655 if (tls_ctx->rx_conf != TLS_HW) 1656 return 0; 1657 1658 err = tls_device_decrypted(sk, tls_ctx); 1659 if (err <= 0) 1660 return err; 1661 1662 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg); 1663 if (pad < 0) 1664 return pad; 1665 1666 darg->async = false; 1667 darg->skb = tls_strp_msg(ctx); 1668 /* ->zc downgrade check, in case TLS 1.3 gets here */ 1669 darg->zc &= !(prot->version == TLS_1_3_VERSION && 1670 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA); 1671 1672 rxm = strp_msg(darg->skb); 1673 rxm->full_len -= pad; 1674 1675 if (!darg->zc) { 1676 /* Non-ZC case needs a real skb */ 1677 darg->skb = tls_strp_msg_detach(ctx); 1678 if (!darg->skb) 1679 return -ENOMEM; 1680 } else { 1681 unsigned int off, len; 1682 1683 /* In ZC case nobody cares about the output skb. 1684 * Just copy the data here. Note the skb is not fully trimmed. 1685 */ 1686 off = rxm->offset + prot->prepend_size; 1687 len = rxm->full_len - prot->overhead_size; 1688 1689 err = skb_copy_datagram_msg(darg->skb, off, msg, len); 1690 if (err) 1691 return err; 1692 } 1693 return 1; 1694 } 1695 1696 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg, 1697 struct tls_decrypt_arg *darg) 1698 { 1699 struct tls_context *tls_ctx = tls_get_ctx(sk); 1700 struct tls_prot_info *prot = &tls_ctx->prot_info; 1701 struct strp_msg *rxm; 1702 int err; 1703 1704 err = tls_decrypt_device(sk, msg, tls_ctx, darg); 1705 if (!err) 1706 err = tls_decrypt_sw(sk, tls_ctx, msg, darg); 1707 if (err < 0) 1708 return err; 1709 1710 rxm = strp_msg(darg->skb); 1711 rxm->offset += prot->prepend_size; 1712 rxm->full_len -= prot->overhead_size; 1713 tls_advance_record_sn(sk, prot, &tls_ctx->rx); 1714 1715 return 0; 1716 } 1717 1718 int decrypt_skb(struct sock *sk, struct scatterlist *sgout) 1719 { 1720 struct tls_decrypt_arg darg = { .zc = true, }; 1721 1722 return tls_decrypt_sg(sk, NULL, sgout, &darg); 1723 } 1724 1725 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, 1726 u8 *control) 1727 { 1728 int err; 1729 1730 if (!*control) { 1731 *control = tlm->control; 1732 if (!*control) 1733 return -EBADMSG; 1734 1735 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 1736 sizeof(*control), control); 1737 if (*control != TLS_RECORD_TYPE_DATA) { 1738 if (err || msg->msg_flags & MSG_CTRUNC) 1739 return -EIO; 1740 } 1741 } else if (*control != tlm->control) { 1742 return 0; 1743 } 1744 1745 return 1; 1746 } 1747 1748 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx) 1749 { 1750 tls_strp_msg_done(&ctx->strp); 1751 } 1752 1753 /* This function traverses the rx_list in tls receive context to copies the 1754 * decrypted records into the buffer provided by caller zero copy is not 1755 * true. Further, the records are removed from the rx_list if it is not a peek 1756 * case and the record has been consumed completely. 1757 */ 1758 static int process_rx_list(struct tls_sw_context_rx *ctx, 1759 struct msghdr *msg, 1760 u8 *control, 1761 size_t skip, 1762 size_t len, 1763 bool is_peek) 1764 { 1765 struct sk_buff *skb = skb_peek(&ctx->rx_list); 1766 struct tls_msg *tlm; 1767 ssize_t copied = 0; 1768 int err; 1769 1770 while (skip && skb) { 1771 struct strp_msg *rxm = strp_msg(skb); 1772 tlm = tls_msg(skb); 1773 1774 err = tls_record_content_type(msg, tlm, control); 1775 if (err <= 0) 1776 goto out; 1777 1778 if (skip < rxm->full_len) 1779 break; 1780 1781 skip = skip - rxm->full_len; 1782 skb = skb_peek_next(skb, &ctx->rx_list); 1783 } 1784 1785 while (len && skb) { 1786 struct sk_buff *next_skb; 1787 struct strp_msg *rxm = strp_msg(skb); 1788 int chunk = min_t(unsigned int, rxm->full_len - skip, len); 1789 1790 tlm = tls_msg(skb); 1791 1792 err = tls_record_content_type(msg, tlm, control); 1793 if (err <= 0) 1794 goto out; 1795 1796 err = skb_copy_datagram_msg(skb, rxm->offset + skip, 1797 msg, chunk); 1798 if (err < 0) 1799 goto out; 1800 1801 len = len - chunk; 1802 copied = copied + chunk; 1803 1804 /* Consume the data from record if it is non-peek case*/ 1805 if (!is_peek) { 1806 rxm->offset = rxm->offset + chunk; 1807 rxm->full_len = rxm->full_len - chunk; 1808 1809 /* Return if there is unconsumed data in the record */ 1810 if (rxm->full_len - skip) 1811 break; 1812 } 1813 1814 /* The remaining skip-bytes must lie in 1st record in rx_list. 1815 * So from the 2nd record, 'skip' should be 0. 1816 */ 1817 skip = 0; 1818 1819 if (msg) 1820 msg->msg_flags |= MSG_EOR; 1821 1822 next_skb = skb_peek_next(skb, &ctx->rx_list); 1823 1824 if (!is_peek) { 1825 __skb_unlink(skb, &ctx->rx_list); 1826 consume_skb(skb); 1827 } 1828 1829 skb = next_skb; 1830 } 1831 err = 0; 1832 1833 out: 1834 return copied ? : err; 1835 } 1836 1837 static bool 1838 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, 1839 size_t len_left, size_t decrypted, ssize_t done, 1840 size_t *flushed_at) 1841 { 1842 size_t max_rec; 1843 1844 if (len_left <= decrypted) 1845 return false; 1846 1847 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE; 1848 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec) 1849 return false; 1850 1851 *flushed_at = done; 1852 return sk_flush_backlog(sk); 1853 } 1854 1855 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx, 1856 bool nonblock) 1857 { 1858 long timeo; 1859 int ret; 1860 1861 timeo = sock_rcvtimeo(sk, nonblock); 1862 1863 while (unlikely(ctx->reader_present)) { 1864 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1865 1866 ctx->reader_contended = 1; 1867 1868 add_wait_queue(&ctx->wq, &wait); 1869 ret = sk_wait_event(sk, &timeo, 1870 !READ_ONCE(ctx->reader_present), &wait); 1871 remove_wait_queue(&ctx->wq, &wait); 1872 1873 if (timeo <= 0) 1874 return -EAGAIN; 1875 if (signal_pending(current)) 1876 return sock_intr_errno(timeo); 1877 if (ret < 0) 1878 return ret; 1879 } 1880 1881 WRITE_ONCE(ctx->reader_present, 1); 1882 1883 return 0; 1884 } 1885 1886 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, 1887 bool nonblock) 1888 { 1889 int err; 1890 1891 lock_sock(sk); 1892 err = tls_rx_reader_acquire(sk, ctx, nonblock); 1893 if (err) 1894 release_sock(sk); 1895 return err; 1896 } 1897 1898 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx) 1899 { 1900 if (unlikely(ctx->reader_contended)) { 1901 if (wq_has_sleeper(&ctx->wq)) 1902 wake_up(&ctx->wq); 1903 else 1904 ctx->reader_contended = 0; 1905 1906 WARN_ON_ONCE(!ctx->reader_present); 1907 } 1908 1909 WRITE_ONCE(ctx->reader_present, 0); 1910 } 1911 1912 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) 1913 { 1914 tls_rx_reader_release(sk, ctx); 1915 release_sock(sk); 1916 } 1917 1918 int tls_sw_recvmsg(struct sock *sk, 1919 struct msghdr *msg, 1920 size_t len, 1921 int flags, 1922 int *addr_len) 1923 { 1924 struct tls_context *tls_ctx = tls_get_ctx(sk); 1925 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1926 struct tls_prot_info *prot = &tls_ctx->prot_info; 1927 ssize_t decrypted = 0, async_copy_bytes = 0; 1928 struct sk_psock *psock; 1929 unsigned char control = 0; 1930 size_t flushed_at = 0; 1931 struct strp_msg *rxm; 1932 struct tls_msg *tlm; 1933 ssize_t copied = 0; 1934 bool async = false; 1935 int target, err; 1936 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1937 bool is_peek = flags & MSG_PEEK; 1938 bool released = true; 1939 bool bpf_strp_enabled; 1940 bool zc_capable; 1941 1942 if (unlikely(flags & MSG_ERRQUEUE)) 1943 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 1944 1945 psock = sk_psock_get(sk); 1946 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT); 1947 if (err < 0) 1948 return err; 1949 bpf_strp_enabled = sk_psock_strp_enabled(psock); 1950 1951 /* If crypto failed the connection is broken */ 1952 err = ctx->async_wait.err; 1953 if (err) 1954 goto end; 1955 1956 /* Process pending decrypted records. It must be non-zero-copy */ 1957 err = process_rx_list(ctx, msg, &control, 0, len, is_peek); 1958 if (err < 0) 1959 goto end; 1960 1961 copied = err; 1962 if (len <= copied) 1963 goto end; 1964 1965 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1966 len = len - copied; 1967 1968 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek && 1969 ctx->zc_capable; 1970 decrypted = 0; 1971 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) { 1972 struct tls_decrypt_arg darg; 1973 int to_decrypt, chunk; 1974 1975 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, 1976 released); 1977 if (err <= 0) { 1978 if (psock) { 1979 chunk = sk_msg_recvmsg(sk, psock, msg, len, 1980 flags); 1981 if (chunk > 0) { 1982 decrypted += chunk; 1983 len -= chunk; 1984 continue; 1985 } 1986 } 1987 goto recv_end; 1988 } 1989 1990 memset(&darg.inargs, 0, sizeof(darg.inargs)); 1991 1992 rxm = strp_msg(tls_strp_msg(ctx)); 1993 tlm = tls_msg(tls_strp_msg(ctx)); 1994 1995 to_decrypt = rxm->full_len - prot->overhead_size; 1996 1997 if (zc_capable && to_decrypt <= len && 1998 tlm->control == TLS_RECORD_TYPE_DATA) 1999 darg.zc = true; 2000 2001 /* Do not use async mode if record is non-data */ 2002 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) 2003 darg.async = ctx->async_capable; 2004 else 2005 darg.async = false; 2006 2007 err = tls_rx_one_record(sk, msg, &darg); 2008 if (err < 0) { 2009 tls_err_abort(sk, -EBADMSG); 2010 goto recv_end; 2011 } 2012 2013 async |= darg.async; 2014 2015 /* If the type of records being processed is not known yet, 2016 * set it to record type just dequeued. If it is already known, 2017 * but does not match the record type just dequeued, go to end. 2018 * We always get record type here since for tls1.2, record type 2019 * is known just after record is dequeued from stream parser. 2020 * For tls1.3, we disable async. 2021 */ 2022 err = tls_record_content_type(msg, tls_msg(darg.skb), &control); 2023 if (err <= 0) { 2024 DEBUG_NET_WARN_ON_ONCE(darg.zc); 2025 tls_rx_rec_done(ctx); 2026 put_on_rx_list_err: 2027 __skb_queue_tail(&ctx->rx_list, darg.skb); 2028 goto recv_end; 2029 } 2030 2031 /* periodically flush backlog, and feed strparser */ 2032 released = tls_read_flush_backlog(sk, prot, len, to_decrypt, 2033 decrypted + copied, 2034 &flushed_at); 2035 2036 /* TLS 1.3 may have updated the length by more than overhead */ 2037 rxm = strp_msg(darg.skb); 2038 chunk = rxm->full_len; 2039 tls_rx_rec_done(ctx); 2040 2041 if (!darg.zc) { 2042 bool partially_consumed = chunk > len; 2043 struct sk_buff *skb = darg.skb; 2044 2045 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor); 2046 2047 if (async) { 2048 /* TLS 1.2-only, to_decrypt must be text len */ 2049 chunk = min_t(int, to_decrypt, len); 2050 async_copy_bytes += chunk; 2051 put_on_rx_list: 2052 decrypted += chunk; 2053 len -= chunk; 2054 __skb_queue_tail(&ctx->rx_list, skb); 2055 continue; 2056 } 2057 2058 if (bpf_strp_enabled) { 2059 released = true; 2060 err = sk_psock_tls_strp_read(psock, skb); 2061 if (err != __SK_PASS) { 2062 rxm->offset = rxm->offset + rxm->full_len; 2063 rxm->full_len = 0; 2064 if (err == __SK_DROP) 2065 consume_skb(skb); 2066 continue; 2067 } 2068 } 2069 2070 if (partially_consumed) 2071 chunk = len; 2072 2073 err = skb_copy_datagram_msg(skb, rxm->offset, 2074 msg, chunk); 2075 if (err < 0) 2076 goto put_on_rx_list_err; 2077 2078 if (is_peek) 2079 goto put_on_rx_list; 2080 2081 if (partially_consumed) { 2082 rxm->offset += chunk; 2083 rxm->full_len -= chunk; 2084 goto put_on_rx_list; 2085 } 2086 2087 consume_skb(skb); 2088 } 2089 2090 decrypted += chunk; 2091 len -= chunk; 2092 2093 /* Return full control message to userspace before trying 2094 * to parse another message type 2095 */ 2096 msg->msg_flags |= MSG_EOR; 2097 if (control != TLS_RECORD_TYPE_DATA) 2098 break; 2099 } 2100 2101 recv_end: 2102 if (async) { 2103 int ret, pending; 2104 2105 /* Wait for all previously submitted records to be decrypted */ 2106 spin_lock_bh(&ctx->decrypt_compl_lock); 2107 reinit_completion(&ctx->async_wait.completion); 2108 pending = atomic_read(&ctx->decrypt_pending); 2109 spin_unlock_bh(&ctx->decrypt_compl_lock); 2110 ret = 0; 2111 if (pending) 2112 ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2113 __skb_queue_purge(&ctx->async_hold); 2114 2115 if (ret) { 2116 if (err >= 0 || err == -EINPROGRESS) 2117 err = ret; 2118 decrypted = 0; 2119 goto end; 2120 } 2121 2122 /* Drain records from the rx_list & copy if required */ 2123 if (is_peek || is_kvec) 2124 err = process_rx_list(ctx, msg, &control, copied, 2125 decrypted, is_peek); 2126 else 2127 err = process_rx_list(ctx, msg, &control, 0, 2128 async_copy_bytes, is_peek); 2129 decrypted += max(err, 0); 2130 } 2131 2132 copied += decrypted; 2133 2134 end: 2135 tls_rx_reader_unlock(sk, ctx); 2136 if (psock) 2137 sk_psock_put(sk, psock); 2138 return copied ? : err; 2139 } 2140 2141 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 2142 struct pipe_inode_info *pipe, 2143 size_t len, unsigned int flags) 2144 { 2145 struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 2146 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2147 struct strp_msg *rxm = NULL; 2148 struct sock *sk = sock->sk; 2149 struct tls_msg *tlm; 2150 struct sk_buff *skb; 2151 ssize_t copied = 0; 2152 int chunk; 2153 int err; 2154 2155 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK); 2156 if (err < 0) 2157 return err; 2158 2159 if (!skb_queue_empty(&ctx->rx_list)) { 2160 skb = __skb_dequeue(&ctx->rx_list); 2161 } else { 2162 struct tls_decrypt_arg darg; 2163 2164 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK, 2165 true); 2166 if (err <= 0) 2167 goto splice_read_end; 2168 2169 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2170 2171 err = tls_rx_one_record(sk, NULL, &darg); 2172 if (err < 0) { 2173 tls_err_abort(sk, -EBADMSG); 2174 goto splice_read_end; 2175 } 2176 2177 tls_rx_rec_done(ctx); 2178 skb = darg.skb; 2179 } 2180 2181 rxm = strp_msg(skb); 2182 tlm = tls_msg(skb); 2183 2184 /* splice does not support reading control messages */ 2185 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2186 err = -EINVAL; 2187 goto splice_requeue; 2188 } 2189 2190 chunk = min_t(unsigned int, rxm->full_len, len); 2191 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); 2192 if (copied < 0) 2193 goto splice_requeue; 2194 2195 if (chunk < rxm->full_len) { 2196 rxm->offset += len; 2197 rxm->full_len -= len; 2198 goto splice_requeue; 2199 } 2200 2201 consume_skb(skb); 2202 2203 splice_read_end: 2204 tls_rx_reader_unlock(sk, ctx); 2205 return copied ? : err; 2206 2207 splice_requeue: 2208 __skb_queue_head(&ctx->rx_list, skb); 2209 goto splice_read_end; 2210 } 2211 2212 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc, 2213 sk_read_actor_t read_actor) 2214 { 2215 struct tls_context *tls_ctx = tls_get_ctx(sk); 2216 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2217 struct tls_prot_info *prot = &tls_ctx->prot_info; 2218 struct strp_msg *rxm = NULL; 2219 struct sk_buff *skb = NULL; 2220 struct sk_psock *psock; 2221 size_t flushed_at = 0; 2222 bool released = true; 2223 struct tls_msg *tlm; 2224 ssize_t copied = 0; 2225 ssize_t decrypted; 2226 int err, used; 2227 2228 psock = sk_psock_get(sk); 2229 if (psock) { 2230 sk_psock_put(sk, psock); 2231 return -EINVAL; 2232 } 2233 err = tls_rx_reader_acquire(sk, ctx, true); 2234 if (err < 0) 2235 return err; 2236 2237 /* If crypto failed the connection is broken */ 2238 err = ctx->async_wait.err; 2239 if (err) 2240 goto read_sock_end; 2241 2242 decrypted = 0; 2243 do { 2244 if (!skb_queue_empty(&ctx->rx_list)) { 2245 skb = __skb_dequeue(&ctx->rx_list); 2246 rxm = strp_msg(skb); 2247 tlm = tls_msg(skb); 2248 } else { 2249 struct tls_decrypt_arg darg; 2250 2251 err = tls_rx_rec_wait(sk, NULL, true, released); 2252 if (err <= 0) 2253 goto read_sock_end; 2254 2255 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2256 2257 err = tls_rx_one_record(sk, NULL, &darg); 2258 if (err < 0) { 2259 tls_err_abort(sk, -EBADMSG); 2260 goto read_sock_end; 2261 } 2262 2263 released = tls_read_flush_backlog(sk, prot, INT_MAX, 2264 0, decrypted, 2265 &flushed_at); 2266 skb = darg.skb; 2267 rxm = strp_msg(skb); 2268 tlm = tls_msg(skb); 2269 decrypted += rxm->full_len; 2270 2271 tls_rx_rec_done(ctx); 2272 } 2273 2274 /* read_sock does not support reading control messages */ 2275 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2276 err = -EINVAL; 2277 goto read_sock_requeue; 2278 } 2279 2280 used = read_actor(desc, skb, rxm->offset, rxm->full_len); 2281 if (used <= 0) { 2282 if (!copied) 2283 err = used; 2284 goto read_sock_requeue; 2285 } 2286 copied += used; 2287 if (used < rxm->full_len) { 2288 rxm->offset += used; 2289 rxm->full_len -= used; 2290 if (!desc->count) 2291 goto read_sock_requeue; 2292 } else { 2293 consume_skb(skb); 2294 if (!desc->count) 2295 skb = NULL; 2296 } 2297 } while (skb); 2298 2299 read_sock_end: 2300 tls_rx_reader_release(sk, ctx); 2301 return copied ? : err; 2302 2303 read_sock_requeue: 2304 __skb_queue_head(&ctx->rx_list, skb); 2305 goto read_sock_end; 2306 } 2307 2308 bool tls_sw_sock_is_readable(struct sock *sk) 2309 { 2310 struct tls_context *tls_ctx = tls_get_ctx(sk); 2311 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2312 bool ingress_empty = true; 2313 struct sk_psock *psock; 2314 2315 rcu_read_lock(); 2316 psock = sk_psock(sk); 2317 if (psock) 2318 ingress_empty = list_empty(&psock->ingress_msg); 2319 rcu_read_unlock(); 2320 2321 return !ingress_empty || tls_strp_msg_ready(ctx) || 2322 !skb_queue_empty(&ctx->rx_list); 2323 } 2324 2325 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb) 2326 { 2327 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 2328 struct tls_prot_info *prot = &tls_ctx->prot_info; 2329 char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; 2330 size_t cipher_overhead; 2331 size_t data_len = 0; 2332 int ret; 2333 2334 /* Verify that we have a full TLS header, or wait for more data */ 2335 if (strp->stm.offset + prot->prepend_size > skb->len) 2336 return 0; 2337 2338 /* Sanity-check size of on-stack buffer. */ 2339 if (WARN_ON(prot->prepend_size > sizeof(header))) { 2340 ret = -EINVAL; 2341 goto read_failure; 2342 } 2343 2344 /* Linearize header to local buffer */ 2345 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size); 2346 if (ret < 0) 2347 goto read_failure; 2348 2349 strp->mark = header[0]; 2350 2351 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 2352 2353 cipher_overhead = prot->tag_size; 2354 if (prot->version != TLS_1_3_VERSION && 2355 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) 2356 cipher_overhead += prot->iv_size; 2357 2358 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead + 2359 prot->tail_size) { 2360 ret = -EMSGSIZE; 2361 goto read_failure; 2362 } 2363 if (data_len < cipher_overhead) { 2364 ret = -EBADMSG; 2365 goto read_failure; 2366 } 2367 2368 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */ 2369 if (header[1] != TLS_1_2_VERSION_MINOR || 2370 header[2] != TLS_1_2_VERSION_MAJOR) { 2371 ret = -EINVAL; 2372 goto read_failure; 2373 } 2374 2375 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE, 2376 TCP_SKB_CB(skb)->seq + strp->stm.offset); 2377 return data_len + TLS_HEADER_SIZE; 2378 2379 read_failure: 2380 tls_err_abort(strp->sk, ret); 2381 2382 return ret; 2383 } 2384 2385 void tls_rx_msg_ready(struct tls_strparser *strp) 2386 { 2387 struct tls_sw_context_rx *ctx; 2388 2389 ctx = container_of(strp, struct tls_sw_context_rx, strp); 2390 ctx->saved_data_ready(strp->sk); 2391 } 2392 2393 static void tls_data_ready(struct sock *sk) 2394 { 2395 struct tls_context *tls_ctx = tls_get_ctx(sk); 2396 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2397 struct sk_psock *psock; 2398 gfp_t alloc_save; 2399 2400 trace_sk_data_ready(sk); 2401 2402 alloc_save = sk->sk_allocation; 2403 sk->sk_allocation = GFP_ATOMIC; 2404 tls_strp_data_ready(&ctx->strp); 2405 sk->sk_allocation = alloc_save; 2406 2407 psock = sk_psock_get(sk); 2408 if (psock) { 2409 if (!list_empty(&psock->ingress_msg)) 2410 ctx->saved_data_ready(sk); 2411 sk_psock_put(sk, psock); 2412 } 2413 } 2414 2415 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx) 2416 { 2417 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2418 2419 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask); 2420 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask); 2421 cancel_delayed_work_sync(&ctx->tx_work.work); 2422 } 2423 2424 void tls_sw_release_resources_tx(struct sock *sk) 2425 { 2426 struct tls_context *tls_ctx = tls_get_ctx(sk); 2427 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2428 struct tls_rec *rec, *tmp; 2429 int pending; 2430 2431 /* Wait for any pending async encryptions to complete */ 2432 spin_lock_bh(&ctx->encrypt_compl_lock); 2433 ctx->async_notify = true; 2434 pending = atomic_read(&ctx->encrypt_pending); 2435 spin_unlock_bh(&ctx->encrypt_compl_lock); 2436 2437 if (pending) 2438 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2439 2440 tls_tx_records(sk, -1); 2441 2442 /* Free up un-sent records in tx_list. First, free 2443 * the partially sent record if any at head of tx_list. 2444 */ 2445 if (tls_ctx->partially_sent_record) { 2446 tls_free_partial_record(sk, tls_ctx); 2447 rec = list_first_entry(&ctx->tx_list, 2448 struct tls_rec, list); 2449 list_del(&rec->list); 2450 sk_msg_free(sk, &rec->msg_plaintext); 2451 kfree(rec); 2452 } 2453 2454 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 2455 list_del(&rec->list); 2456 sk_msg_free(sk, &rec->msg_encrypted); 2457 sk_msg_free(sk, &rec->msg_plaintext); 2458 kfree(rec); 2459 } 2460 2461 crypto_free_aead(ctx->aead_send); 2462 tls_free_open_rec(sk); 2463 } 2464 2465 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx) 2466 { 2467 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2468 2469 kfree(ctx); 2470 } 2471 2472 void tls_sw_release_resources_rx(struct sock *sk) 2473 { 2474 struct tls_context *tls_ctx = tls_get_ctx(sk); 2475 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2476 2477 kfree(tls_ctx->rx.rec_seq); 2478 kfree(tls_ctx->rx.iv); 2479 2480 if (ctx->aead_recv) { 2481 __skb_queue_purge(&ctx->rx_list); 2482 crypto_free_aead(ctx->aead_recv); 2483 tls_strp_stop(&ctx->strp); 2484 /* If tls_sw_strparser_arm() was not called (cleanup paths) 2485 * we still want to tls_strp_stop(), but sk->sk_data_ready was 2486 * never swapped. 2487 */ 2488 if (ctx->saved_data_ready) { 2489 write_lock_bh(&sk->sk_callback_lock); 2490 sk->sk_data_ready = ctx->saved_data_ready; 2491 write_unlock_bh(&sk->sk_callback_lock); 2492 } 2493 } 2494 } 2495 2496 void tls_sw_strparser_done(struct tls_context *tls_ctx) 2497 { 2498 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2499 2500 tls_strp_done(&ctx->strp); 2501 } 2502 2503 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx) 2504 { 2505 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2506 2507 kfree(ctx); 2508 } 2509 2510 void tls_sw_free_resources_rx(struct sock *sk) 2511 { 2512 struct tls_context *tls_ctx = tls_get_ctx(sk); 2513 2514 tls_sw_release_resources_rx(sk); 2515 tls_sw_free_ctx_rx(tls_ctx); 2516 } 2517 2518 /* The work handler to transmitt the encrypted records in tx_list */ 2519 static void tx_work_handler(struct work_struct *work) 2520 { 2521 struct delayed_work *delayed_work = to_delayed_work(work); 2522 struct tx_work *tx_work = container_of(delayed_work, 2523 struct tx_work, work); 2524 struct sock *sk = tx_work->sk; 2525 struct tls_context *tls_ctx = tls_get_ctx(sk); 2526 struct tls_sw_context_tx *ctx; 2527 2528 if (unlikely(!tls_ctx)) 2529 return; 2530 2531 ctx = tls_sw_ctx_tx(tls_ctx); 2532 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask)) 2533 return; 2534 2535 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2536 return; 2537 2538 if (mutex_trylock(&tls_ctx->tx_lock)) { 2539 lock_sock(sk); 2540 tls_tx_records(sk, -1); 2541 release_sock(sk); 2542 mutex_unlock(&tls_ctx->tx_lock); 2543 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 2544 /* Someone is holding the tx_lock, they will likely run Tx 2545 * and cancel the work on their way out of the lock section. 2546 * Schedule a long delay just in case. 2547 */ 2548 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); 2549 } 2550 } 2551 2552 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx) 2553 { 2554 struct tls_rec *rec; 2555 2556 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list); 2557 if (!rec) 2558 return false; 2559 2560 return READ_ONCE(rec->tx_ready); 2561 } 2562 2563 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) 2564 { 2565 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 2566 2567 /* Schedule the transmission if tx list is ready */ 2568 if (tls_is_tx_ready(tx_ctx) && 2569 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 2570 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 2571 } 2572 2573 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) 2574 { 2575 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2576 2577 write_lock_bh(&sk->sk_callback_lock); 2578 rx_ctx->saved_data_ready = sk->sk_data_ready; 2579 sk->sk_data_ready = tls_data_ready; 2580 write_unlock_bh(&sk->sk_callback_lock); 2581 } 2582 2583 void tls_update_rx_zc_capable(struct tls_context *tls_ctx) 2584 { 2585 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2586 2587 rx_ctx->zc_capable = tls_ctx->rx_no_pad || 2588 tls_ctx->prot_info.version != TLS_1_3_VERSION; 2589 } 2590 2591 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 2592 { 2593 struct tls_context *tls_ctx = tls_get_ctx(sk); 2594 struct tls_prot_info *prot = &tls_ctx->prot_info; 2595 struct tls_crypto_info *crypto_info; 2596 struct tls_sw_context_tx *sw_ctx_tx = NULL; 2597 struct tls_sw_context_rx *sw_ctx_rx = NULL; 2598 struct cipher_context *cctx; 2599 struct crypto_aead **aead; 2600 struct crypto_tfm *tfm; 2601 char *iv, *rec_seq, *key, *salt; 2602 const struct tls_cipher_desc *cipher_desc; 2603 u16 nonce_size; 2604 int rc = 0; 2605 2606 if (!ctx) { 2607 rc = -EINVAL; 2608 goto out; 2609 } 2610 2611 if (tx) { 2612 if (!ctx->priv_ctx_tx) { 2613 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); 2614 if (!sw_ctx_tx) { 2615 rc = -ENOMEM; 2616 goto out; 2617 } 2618 ctx->priv_ctx_tx = sw_ctx_tx; 2619 } else { 2620 sw_ctx_tx = 2621 (struct tls_sw_context_tx *)ctx->priv_ctx_tx; 2622 } 2623 } else { 2624 if (!ctx->priv_ctx_rx) { 2625 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); 2626 if (!sw_ctx_rx) { 2627 rc = -ENOMEM; 2628 goto out; 2629 } 2630 ctx->priv_ctx_rx = sw_ctx_rx; 2631 } else { 2632 sw_ctx_rx = 2633 (struct tls_sw_context_rx *)ctx->priv_ctx_rx; 2634 } 2635 } 2636 2637 if (tx) { 2638 crypto_init_wait(&sw_ctx_tx->async_wait); 2639 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); 2640 crypto_info = &ctx->crypto_send.info; 2641 cctx = &ctx->tx; 2642 aead = &sw_ctx_tx->aead_send; 2643 INIT_LIST_HEAD(&sw_ctx_tx->tx_list); 2644 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); 2645 sw_ctx_tx->tx_work.sk = sk; 2646 } else { 2647 crypto_init_wait(&sw_ctx_rx->async_wait); 2648 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); 2649 init_waitqueue_head(&sw_ctx_rx->wq); 2650 crypto_info = &ctx->crypto_recv.info; 2651 cctx = &ctx->rx; 2652 skb_queue_head_init(&sw_ctx_rx->rx_list); 2653 skb_queue_head_init(&sw_ctx_rx->async_hold); 2654 aead = &sw_ctx_rx->aead_recv; 2655 } 2656 2657 cipher_desc = get_cipher_desc(crypto_info->cipher_type); 2658 if (!cipher_desc) { 2659 rc = -EINVAL; 2660 goto free_priv; 2661 } 2662 2663 nonce_size = cipher_desc->nonce; 2664 2665 iv = crypto_info_iv(crypto_info, cipher_desc); 2666 key = crypto_info_key(crypto_info, cipher_desc); 2667 salt = crypto_info_salt(crypto_info, cipher_desc); 2668 rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc); 2669 2670 if (crypto_info->version == TLS_1_3_VERSION) { 2671 nonce_size = 0; 2672 prot->aad_size = TLS_HEADER_SIZE; 2673 prot->tail_size = 1; 2674 } else { 2675 prot->aad_size = TLS_AAD_SPACE_SIZE; 2676 prot->tail_size = 0; 2677 } 2678 2679 /* Sanity-check the sizes for stack allocations. */ 2680 if (nonce_size > MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE) { 2681 rc = -EINVAL; 2682 goto free_priv; 2683 } 2684 2685 prot->version = crypto_info->version; 2686 prot->cipher_type = crypto_info->cipher_type; 2687 prot->prepend_size = TLS_HEADER_SIZE + nonce_size; 2688 prot->tag_size = cipher_desc->tag; 2689 prot->overhead_size = prot->prepend_size + 2690 prot->tag_size + prot->tail_size; 2691 prot->iv_size = cipher_desc->iv; 2692 prot->salt_size = cipher_desc->salt; 2693 cctx->iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL); 2694 if (!cctx->iv) { 2695 rc = -ENOMEM; 2696 goto free_priv; 2697 } 2698 /* Note: 128 & 256 bit salt are the same size */ 2699 prot->rec_seq_size = cipher_desc->rec_seq; 2700 memcpy(cctx->iv, salt, cipher_desc->salt); 2701 memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv); 2702 2703 cctx->rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL); 2704 if (!cctx->rec_seq) { 2705 rc = -ENOMEM; 2706 goto free_iv; 2707 } 2708 2709 if (!*aead) { 2710 *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0); 2711 if (IS_ERR(*aead)) { 2712 rc = PTR_ERR(*aead); 2713 *aead = NULL; 2714 goto free_rec_seq; 2715 } 2716 } 2717 2718 ctx->push_pending_record = tls_sw_push_pending_record; 2719 2720 rc = crypto_aead_setkey(*aead, key, cipher_desc->key); 2721 if (rc) 2722 goto free_aead; 2723 2724 rc = crypto_aead_setauthsize(*aead, prot->tag_size); 2725 if (rc) 2726 goto free_aead; 2727 2728 if (sw_ctx_rx) { 2729 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv); 2730 2731 tls_update_rx_zc_capable(ctx); 2732 sw_ctx_rx->async_capable = 2733 crypto_info->version != TLS_1_3_VERSION && 2734 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC); 2735 2736 rc = tls_strp_init(&sw_ctx_rx->strp, sk); 2737 if (rc) 2738 goto free_aead; 2739 } 2740 2741 goto out; 2742 2743 free_aead: 2744 crypto_free_aead(*aead); 2745 *aead = NULL; 2746 free_rec_seq: 2747 kfree(cctx->rec_seq); 2748 cctx->rec_seq = NULL; 2749 free_iv: 2750 kfree(cctx->iv); 2751 cctx->iv = NULL; 2752 free_priv: 2753 if (tx) { 2754 kfree(ctx->priv_ctx_tx); 2755 ctx->priv_ctx_tx = NULL; 2756 } else { 2757 kfree(ctx->priv_ctx_rx); 2758 ctx->priv_ctx_rx = NULL; 2759 } 2760 out: 2761 return rc; 2762 } 2763