1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <linux/bug.h> 39 #include <linux/sched/signal.h> 40 #include <linux/module.h> 41 #include <linux/kernel.h> 42 #include <linux/splice.h> 43 #include <crypto/aead.h> 44 45 #include <net/strparser.h> 46 #include <net/tls.h> 47 #include <trace/events/sock.h> 48 49 #include "tls.h" 50 51 struct tls_decrypt_arg { 52 struct_group(inargs, 53 bool zc; 54 bool async; 55 u8 tail; 56 ); 57 58 struct sk_buff *skb; 59 }; 60 61 struct tls_decrypt_ctx { 62 struct sock *sk; 63 u8 iv[MAX_IV_SIZE]; 64 u8 aad[TLS_MAX_AAD_SIZE]; 65 u8 tail; 66 struct scatterlist sg[]; 67 }; 68 69 noinline void tls_err_abort(struct sock *sk, int err) 70 { 71 WARN_ON_ONCE(err >= 0); 72 /* sk->sk_err should contain a positive error code. */ 73 WRITE_ONCE(sk->sk_err, -err); 74 /* Paired with smp_rmb() in tcp_poll() */ 75 smp_wmb(); 76 sk_error_report(sk); 77 } 78 79 static int __skb_nsg(struct sk_buff *skb, int offset, int len, 80 unsigned int recursion_level) 81 { 82 int start = skb_headlen(skb); 83 int i, chunk = start - offset; 84 struct sk_buff *frag_iter; 85 int elt = 0; 86 87 if (unlikely(recursion_level >= 24)) 88 return -EMSGSIZE; 89 90 if (chunk > 0) { 91 if (chunk > len) 92 chunk = len; 93 elt++; 94 len -= chunk; 95 if (len == 0) 96 return elt; 97 offset += chunk; 98 } 99 100 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 101 int end; 102 103 WARN_ON(start > offset + len); 104 105 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 106 chunk = end - offset; 107 if (chunk > 0) { 108 if (chunk > len) 109 chunk = len; 110 elt++; 111 len -= chunk; 112 if (len == 0) 113 return elt; 114 offset += chunk; 115 } 116 start = end; 117 } 118 119 if (unlikely(skb_has_frag_list(skb))) { 120 skb_walk_frags(skb, frag_iter) { 121 int end, ret; 122 123 WARN_ON(start > offset + len); 124 125 end = start + frag_iter->len; 126 chunk = end - offset; 127 if (chunk > 0) { 128 if (chunk > len) 129 chunk = len; 130 ret = __skb_nsg(frag_iter, offset - start, chunk, 131 recursion_level + 1); 132 if (unlikely(ret < 0)) 133 return ret; 134 elt += ret; 135 len -= chunk; 136 if (len == 0) 137 return elt; 138 offset += chunk; 139 } 140 start = end; 141 } 142 } 143 BUG_ON(len); 144 return elt; 145 } 146 147 /* Return the number of scatterlist elements required to completely map the 148 * skb, or -EMSGSIZE if the recursion depth is exceeded. 149 */ 150 static int skb_nsg(struct sk_buff *skb, int offset, int len) 151 { 152 return __skb_nsg(skb, offset, len, 0); 153 } 154 155 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb, 156 struct tls_decrypt_arg *darg) 157 { 158 struct strp_msg *rxm = strp_msg(skb); 159 struct tls_msg *tlm = tls_msg(skb); 160 int sub = 0; 161 162 /* Determine zero-padding length */ 163 if (prot->version == TLS_1_3_VERSION) { 164 int offset = rxm->full_len - TLS_TAG_SIZE - 1; 165 char content_type = darg->zc ? darg->tail : 0; 166 int err; 167 168 while (content_type == 0) { 169 if (offset < prot->prepend_size) 170 return -EBADMSG; 171 err = skb_copy_bits(skb, rxm->offset + offset, 172 &content_type, 1); 173 if (err) 174 return err; 175 if (content_type) 176 break; 177 sub++; 178 offset--; 179 } 180 tlm->control = content_type; 181 } 182 return sub; 183 } 184 185 static void tls_decrypt_done(void *data, int err) 186 { 187 struct aead_request *aead_req = data; 188 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 189 struct scatterlist *sgout = aead_req->dst; 190 struct scatterlist *sgin = aead_req->src; 191 struct tls_sw_context_rx *ctx; 192 struct tls_decrypt_ctx *dctx; 193 struct tls_context *tls_ctx; 194 struct scatterlist *sg; 195 unsigned int pages; 196 struct sock *sk; 197 int aead_size; 198 199 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead); 200 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 201 dctx = (void *)((u8 *)aead_req + aead_size); 202 203 sk = dctx->sk; 204 tls_ctx = tls_get_ctx(sk); 205 ctx = tls_sw_ctx_rx(tls_ctx); 206 207 /* Propagate if there was an err */ 208 if (err) { 209 if (err == -EBADMSG) 210 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 211 ctx->async_wait.err = err; 212 tls_err_abort(sk, err); 213 } 214 215 /* Free the destination pages if skb was not decrypted inplace */ 216 if (sgout != sgin) { 217 /* Skip the first S/G entry as it points to AAD */ 218 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 219 if (!sg) 220 break; 221 put_page(sg_page(sg)); 222 } 223 } 224 225 kfree(aead_req); 226 227 if (atomic_dec_and_test(&ctx->decrypt_pending)) 228 complete(&ctx->async_wait.completion); 229 } 230 231 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) 232 { 233 if (!atomic_dec_and_test(&ctx->decrypt_pending)) 234 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 235 atomic_inc(&ctx->decrypt_pending); 236 237 return ctx->async_wait.err; 238 } 239 240 static int tls_do_decryption(struct sock *sk, 241 struct scatterlist *sgin, 242 struct scatterlist *sgout, 243 char *iv_recv, 244 size_t data_len, 245 struct aead_request *aead_req, 246 struct tls_decrypt_arg *darg) 247 { 248 struct tls_context *tls_ctx = tls_get_ctx(sk); 249 struct tls_prot_info *prot = &tls_ctx->prot_info; 250 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 251 int ret; 252 253 aead_request_set_tfm(aead_req, ctx->aead_recv); 254 aead_request_set_ad(aead_req, prot->aad_size); 255 aead_request_set_crypt(aead_req, sgin, sgout, 256 data_len + prot->tag_size, 257 (u8 *)iv_recv); 258 259 if (darg->async) { 260 aead_request_set_callback(aead_req, 261 CRYPTO_TFM_REQ_MAY_BACKLOG, 262 tls_decrypt_done, aead_req); 263 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1); 264 atomic_inc(&ctx->decrypt_pending); 265 } else { 266 aead_request_set_callback(aead_req, 267 CRYPTO_TFM_REQ_MAY_BACKLOG, 268 crypto_req_done, &ctx->async_wait); 269 } 270 271 ret = crypto_aead_decrypt(aead_req); 272 if (ret == -EINPROGRESS) { 273 if (darg->async) 274 return 0; 275 276 ret = crypto_wait_req(ret, &ctx->async_wait); 277 } 278 darg->async = false; 279 280 return ret; 281 } 282 283 static void tls_trim_both_msgs(struct sock *sk, int target_size) 284 { 285 struct tls_context *tls_ctx = tls_get_ctx(sk); 286 struct tls_prot_info *prot = &tls_ctx->prot_info; 287 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 288 struct tls_rec *rec = ctx->open_rec; 289 290 sk_msg_trim(sk, &rec->msg_plaintext, target_size); 291 if (target_size > 0) 292 target_size += prot->overhead_size; 293 sk_msg_trim(sk, &rec->msg_encrypted, target_size); 294 } 295 296 static int tls_alloc_encrypted_msg(struct sock *sk, int len) 297 { 298 struct tls_context *tls_ctx = tls_get_ctx(sk); 299 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 300 struct tls_rec *rec = ctx->open_rec; 301 struct sk_msg *msg_en = &rec->msg_encrypted; 302 303 return sk_msg_alloc(sk, msg_en, len, 0); 304 } 305 306 static int tls_clone_plaintext_msg(struct sock *sk, int required) 307 { 308 struct tls_context *tls_ctx = tls_get_ctx(sk); 309 struct tls_prot_info *prot = &tls_ctx->prot_info; 310 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 311 struct tls_rec *rec = ctx->open_rec; 312 struct sk_msg *msg_pl = &rec->msg_plaintext; 313 struct sk_msg *msg_en = &rec->msg_encrypted; 314 int skip, len; 315 316 /* We add page references worth len bytes from encrypted sg 317 * at the end of plaintext sg. It is guaranteed that msg_en 318 * has enough required room (ensured by caller). 319 */ 320 len = required - msg_pl->sg.size; 321 322 /* Skip initial bytes in msg_en's data to be able to use 323 * same offset of both plain and encrypted data. 324 */ 325 skip = prot->prepend_size + msg_pl->sg.size; 326 327 return sk_msg_clone(sk, msg_pl, msg_en, skip, len); 328 } 329 330 static struct tls_rec *tls_get_rec(struct sock *sk) 331 { 332 struct tls_context *tls_ctx = tls_get_ctx(sk); 333 struct tls_prot_info *prot = &tls_ctx->prot_info; 334 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 335 struct sk_msg *msg_pl, *msg_en; 336 struct tls_rec *rec; 337 int mem_size; 338 339 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send); 340 341 rec = kzalloc(mem_size, sk->sk_allocation); 342 if (!rec) 343 return NULL; 344 345 msg_pl = &rec->msg_plaintext; 346 msg_en = &rec->msg_encrypted; 347 348 sk_msg_init(msg_pl); 349 sk_msg_init(msg_en); 350 351 sg_init_table(rec->sg_aead_in, 2); 352 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size); 353 sg_unmark_end(&rec->sg_aead_in[1]); 354 355 sg_init_table(rec->sg_aead_out, 2); 356 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size); 357 sg_unmark_end(&rec->sg_aead_out[1]); 358 359 rec->sk = sk; 360 361 return rec; 362 } 363 364 static void tls_free_rec(struct sock *sk, struct tls_rec *rec) 365 { 366 sk_msg_free(sk, &rec->msg_encrypted); 367 sk_msg_free(sk, &rec->msg_plaintext); 368 kfree(rec); 369 } 370 371 static void tls_free_open_rec(struct sock *sk) 372 { 373 struct tls_context *tls_ctx = tls_get_ctx(sk); 374 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 375 struct tls_rec *rec = ctx->open_rec; 376 377 if (rec) { 378 tls_free_rec(sk, rec); 379 ctx->open_rec = NULL; 380 } 381 } 382 383 int tls_tx_records(struct sock *sk, int flags) 384 { 385 struct tls_context *tls_ctx = tls_get_ctx(sk); 386 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 387 struct tls_rec *rec, *tmp; 388 struct sk_msg *msg_en; 389 int tx_flags, rc = 0; 390 391 if (tls_is_partially_sent_record(tls_ctx)) { 392 rec = list_first_entry(&ctx->tx_list, 393 struct tls_rec, list); 394 395 if (flags == -1) 396 tx_flags = rec->tx_flags; 397 else 398 tx_flags = flags; 399 400 rc = tls_push_partial_record(sk, tls_ctx, tx_flags); 401 if (rc) 402 goto tx_err; 403 404 /* Full record has been transmitted. 405 * Remove the head of tx_list 406 */ 407 list_del(&rec->list); 408 sk_msg_free(sk, &rec->msg_plaintext); 409 kfree(rec); 410 } 411 412 /* Tx all ready records */ 413 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 414 if (READ_ONCE(rec->tx_ready)) { 415 if (flags == -1) 416 tx_flags = rec->tx_flags; 417 else 418 tx_flags = flags; 419 420 msg_en = &rec->msg_encrypted; 421 rc = tls_push_sg(sk, tls_ctx, 422 &msg_en->sg.data[msg_en->sg.curr], 423 0, tx_flags); 424 if (rc) 425 goto tx_err; 426 427 list_del(&rec->list); 428 sk_msg_free(sk, &rec->msg_plaintext); 429 kfree(rec); 430 } else { 431 break; 432 } 433 } 434 435 tx_err: 436 if (rc < 0 && rc != -EAGAIN) 437 tls_err_abort(sk, -EBADMSG); 438 439 return rc; 440 } 441 442 static void tls_encrypt_done(void *data, int err) 443 { 444 struct tls_sw_context_tx *ctx; 445 struct tls_context *tls_ctx; 446 struct tls_prot_info *prot; 447 struct tls_rec *rec = data; 448 struct scatterlist *sge; 449 struct sk_msg *msg_en; 450 struct sock *sk; 451 452 msg_en = &rec->msg_encrypted; 453 454 sk = rec->sk; 455 tls_ctx = tls_get_ctx(sk); 456 prot = &tls_ctx->prot_info; 457 ctx = tls_sw_ctx_tx(tls_ctx); 458 459 sge = sk_msg_elem(msg_en, msg_en->sg.curr); 460 sge->offset -= prot->prepend_size; 461 sge->length += prot->prepend_size; 462 463 /* Check if error is previously set on socket */ 464 if (err || sk->sk_err) { 465 rec = NULL; 466 467 /* If err is already set on socket, return the same code */ 468 if (sk->sk_err) { 469 ctx->async_wait.err = -sk->sk_err; 470 } else { 471 ctx->async_wait.err = err; 472 tls_err_abort(sk, err); 473 } 474 } 475 476 if (rec) { 477 struct tls_rec *first_rec; 478 479 /* Mark the record as ready for transmission */ 480 smp_store_mb(rec->tx_ready, true); 481 482 /* If received record is at head of tx_list, schedule tx */ 483 first_rec = list_first_entry(&ctx->tx_list, 484 struct tls_rec, list); 485 if (rec == first_rec) { 486 /* Schedule the transmission */ 487 if (!test_and_set_bit(BIT_TX_SCHEDULED, 488 &ctx->tx_bitmask)) 489 schedule_delayed_work(&ctx->tx_work.work, 1); 490 } 491 } 492 493 if (atomic_dec_and_test(&ctx->encrypt_pending)) 494 complete(&ctx->async_wait.completion); 495 } 496 497 static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) 498 { 499 if (!atomic_dec_and_test(&ctx->encrypt_pending)) 500 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 501 atomic_inc(&ctx->encrypt_pending); 502 503 return ctx->async_wait.err; 504 } 505 506 static int tls_do_encryption(struct sock *sk, 507 struct tls_context *tls_ctx, 508 struct tls_sw_context_tx *ctx, 509 struct aead_request *aead_req, 510 size_t data_len, u32 start) 511 { 512 struct tls_prot_info *prot = &tls_ctx->prot_info; 513 struct tls_rec *rec = ctx->open_rec; 514 struct sk_msg *msg_en = &rec->msg_encrypted; 515 struct scatterlist *sge = sk_msg_elem(msg_en, start); 516 int rc, iv_offset = 0; 517 518 /* For CCM based ciphers, first byte of IV is a constant */ 519 switch (prot->cipher_type) { 520 case TLS_CIPHER_AES_CCM_128: 521 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; 522 iv_offset = 1; 523 break; 524 case TLS_CIPHER_SM4_CCM: 525 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; 526 iv_offset = 1; 527 break; 528 } 529 530 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, 531 prot->iv_size + prot->salt_size); 532 533 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset, 534 tls_ctx->tx.rec_seq); 535 536 sge->offset += prot->prepend_size; 537 sge->length -= prot->prepend_size; 538 539 msg_en->sg.curr = start; 540 541 aead_request_set_tfm(aead_req, ctx->aead_send); 542 aead_request_set_ad(aead_req, prot->aad_size); 543 aead_request_set_crypt(aead_req, rec->sg_aead_in, 544 rec->sg_aead_out, 545 data_len, rec->iv_data); 546 547 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 548 tls_encrypt_done, rec); 549 550 /* Add the record in tx_list */ 551 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); 552 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1); 553 atomic_inc(&ctx->encrypt_pending); 554 555 rc = crypto_aead_encrypt(aead_req); 556 if (!rc || rc != -EINPROGRESS) { 557 atomic_dec(&ctx->encrypt_pending); 558 sge->offset -= prot->prepend_size; 559 sge->length += prot->prepend_size; 560 } 561 562 if (!rc) { 563 WRITE_ONCE(rec->tx_ready, true); 564 } else if (rc != -EINPROGRESS) { 565 list_del(&rec->list); 566 return rc; 567 } 568 569 /* Unhook the record from context if encryption is not failure */ 570 ctx->open_rec = NULL; 571 tls_advance_record_sn(sk, prot, &tls_ctx->tx); 572 return rc; 573 } 574 575 static int tls_split_open_record(struct sock *sk, struct tls_rec *from, 576 struct tls_rec **to, struct sk_msg *msg_opl, 577 struct sk_msg *msg_oen, u32 split_point, 578 u32 tx_overhead_size, u32 *orig_end) 579 { 580 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes; 581 struct scatterlist *sge, *osge, *nsge; 582 u32 orig_size = msg_opl->sg.size; 583 struct scatterlist tmp = { }; 584 struct sk_msg *msg_npl; 585 struct tls_rec *new; 586 int ret; 587 588 new = tls_get_rec(sk); 589 if (!new) 590 return -ENOMEM; 591 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size + 592 tx_overhead_size, 0); 593 if (ret < 0) { 594 tls_free_rec(sk, new); 595 return ret; 596 } 597 598 *orig_end = msg_opl->sg.end; 599 i = msg_opl->sg.start; 600 sge = sk_msg_elem(msg_opl, i); 601 while (apply && sge->length) { 602 if (sge->length > apply) { 603 u32 len = sge->length - apply; 604 605 get_page(sg_page(sge)); 606 sg_set_page(&tmp, sg_page(sge), len, 607 sge->offset + apply); 608 sge->length = apply; 609 bytes += apply; 610 apply = 0; 611 } else { 612 apply -= sge->length; 613 bytes += sge->length; 614 } 615 616 sk_msg_iter_var_next(i); 617 if (i == msg_opl->sg.end) 618 break; 619 sge = sk_msg_elem(msg_opl, i); 620 } 621 622 msg_opl->sg.end = i; 623 msg_opl->sg.curr = i; 624 msg_opl->sg.copybreak = 0; 625 msg_opl->apply_bytes = 0; 626 msg_opl->sg.size = bytes; 627 628 msg_npl = &new->msg_plaintext; 629 msg_npl->apply_bytes = apply; 630 msg_npl->sg.size = orig_size - bytes; 631 632 j = msg_npl->sg.start; 633 nsge = sk_msg_elem(msg_npl, j); 634 if (tmp.length) { 635 memcpy(nsge, &tmp, sizeof(*nsge)); 636 sk_msg_iter_var_next(j); 637 nsge = sk_msg_elem(msg_npl, j); 638 } 639 640 osge = sk_msg_elem(msg_opl, i); 641 while (osge->length) { 642 memcpy(nsge, osge, sizeof(*nsge)); 643 sg_unmark_end(nsge); 644 sk_msg_iter_var_next(i); 645 sk_msg_iter_var_next(j); 646 if (i == *orig_end) 647 break; 648 osge = sk_msg_elem(msg_opl, i); 649 nsge = sk_msg_elem(msg_npl, j); 650 } 651 652 msg_npl->sg.end = j; 653 msg_npl->sg.curr = j; 654 msg_npl->sg.copybreak = 0; 655 656 *to = new; 657 return 0; 658 } 659 660 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to, 661 struct tls_rec *from, u32 orig_end) 662 { 663 struct sk_msg *msg_npl = &from->msg_plaintext; 664 struct sk_msg *msg_opl = &to->msg_plaintext; 665 struct scatterlist *osge, *nsge; 666 u32 i, j; 667 668 i = msg_opl->sg.end; 669 sk_msg_iter_var_prev(i); 670 j = msg_npl->sg.start; 671 672 osge = sk_msg_elem(msg_opl, i); 673 nsge = sk_msg_elem(msg_npl, j); 674 675 if (sg_page(osge) == sg_page(nsge) && 676 osge->offset + osge->length == nsge->offset) { 677 osge->length += nsge->length; 678 put_page(sg_page(nsge)); 679 } 680 681 msg_opl->sg.end = orig_end; 682 msg_opl->sg.curr = orig_end; 683 msg_opl->sg.copybreak = 0; 684 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size; 685 msg_opl->sg.size += msg_npl->sg.size; 686 687 sk_msg_free(sk, &to->msg_encrypted); 688 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted); 689 690 kfree(from); 691 } 692 693 static int tls_push_record(struct sock *sk, int flags, 694 unsigned char record_type) 695 { 696 struct tls_context *tls_ctx = tls_get_ctx(sk); 697 struct tls_prot_info *prot = &tls_ctx->prot_info; 698 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 699 struct tls_rec *rec = ctx->open_rec, *tmp = NULL; 700 u32 i, split_point, orig_end; 701 struct sk_msg *msg_pl, *msg_en; 702 struct aead_request *req; 703 bool split; 704 int rc; 705 706 if (!rec) 707 return 0; 708 709 msg_pl = &rec->msg_plaintext; 710 msg_en = &rec->msg_encrypted; 711 712 split_point = msg_pl->apply_bytes; 713 split = split_point && split_point < msg_pl->sg.size; 714 if (unlikely((!split && 715 msg_pl->sg.size + 716 prot->overhead_size > msg_en->sg.size) || 717 (split && 718 split_point + 719 prot->overhead_size > msg_en->sg.size))) { 720 split = true; 721 split_point = msg_en->sg.size; 722 } 723 if (split) { 724 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, 725 split_point, prot->overhead_size, 726 &orig_end); 727 if (rc < 0) 728 return rc; 729 /* This can happen if above tls_split_open_record allocates 730 * a single large encryption buffer instead of two smaller 731 * ones. In this case adjust pointers and continue without 732 * split. 733 */ 734 if (!msg_pl->sg.size) { 735 tls_merge_open_record(sk, rec, tmp, orig_end); 736 msg_pl = &rec->msg_plaintext; 737 msg_en = &rec->msg_encrypted; 738 split = false; 739 } 740 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 741 prot->overhead_size); 742 } 743 744 rec->tx_flags = flags; 745 req = &rec->aead_req; 746 747 i = msg_pl->sg.end; 748 sk_msg_iter_var_prev(i); 749 750 rec->content_type = record_type; 751 if (prot->version == TLS_1_3_VERSION) { 752 /* Add content type to end of message. No padding added */ 753 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1); 754 sg_mark_end(&rec->sg_content_type); 755 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1, 756 &rec->sg_content_type); 757 } else { 758 sg_mark_end(sk_msg_elem(msg_pl, i)); 759 } 760 761 if (msg_pl->sg.end < msg_pl->sg.start) { 762 sg_chain(&msg_pl->sg.data[msg_pl->sg.start], 763 MAX_SKB_FRAGS - msg_pl->sg.start + 1, 764 msg_pl->sg.data); 765 } 766 767 i = msg_pl->sg.start; 768 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]); 769 770 i = msg_en->sg.end; 771 sk_msg_iter_var_prev(i); 772 sg_mark_end(sk_msg_elem(msg_en, i)); 773 774 i = msg_en->sg.start; 775 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); 776 777 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size, 778 tls_ctx->tx.rec_seq, record_type, prot); 779 780 tls_fill_prepend(tls_ctx, 781 page_address(sg_page(&msg_en->sg.data[i])) + 782 msg_en->sg.data[i].offset, 783 msg_pl->sg.size + prot->tail_size, 784 record_type); 785 786 tls_ctx->pending_open_record_frags = false; 787 788 rc = tls_do_encryption(sk, tls_ctx, ctx, req, 789 msg_pl->sg.size + prot->tail_size, i); 790 if (rc < 0) { 791 if (rc != -EINPROGRESS) { 792 tls_err_abort(sk, -EBADMSG); 793 if (split) { 794 tls_ctx->pending_open_record_frags = true; 795 tls_merge_open_record(sk, rec, tmp, orig_end); 796 } 797 } 798 ctx->async_capable = 1; 799 return rc; 800 } else if (split) { 801 msg_pl = &tmp->msg_plaintext; 802 msg_en = &tmp->msg_encrypted; 803 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size); 804 tls_ctx->pending_open_record_frags = true; 805 ctx->open_rec = tmp; 806 } 807 808 return tls_tx_records(sk, flags); 809 } 810 811 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 812 bool full_record, u8 record_type, 813 ssize_t *copied, int flags) 814 { 815 struct tls_context *tls_ctx = tls_get_ctx(sk); 816 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 817 struct sk_msg msg_redir = { }; 818 struct sk_psock *psock; 819 struct sock *sk_redir; 820 struct tls_rec *rec; 821 bool enospc, policy, redir_ingress; 822 int err = 0, send; 823 u32 delta = 0; 824 825 policy = !(flags & MSG_SENDPAGE_NOPOLICY); 826 psock = sk_psock_get(sk); 827 if (!psock || !policy) { 828 err = tls_push_record(sk, flags, record_type); 829 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 830 *copied -= sk_msg_free(sk, msg); 831 tls_free_open_rec(sk); 832 err = -sk->sk_err; 833 } 834 if (psock) 835 sk_psock_put(sk, psock); 836 return err; 837 } 838 more_data: 839 enospc = sk_msg_full(msg); 840 if (psock->eval == __SK_NONE) { 841 delta = msg->sg.size; 842 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 843 delta -= msg->sg.size; 844 } 845 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && 846 !enospc && !full_record) { 847 err = -ENOSPC; 848 goto out_err; 849 } 850 msg->cork_bytes = 0; 851 send = msg->sg.size; 852 if (msg->apply_bytes && msg->apply_bytes < send) 853 send = msg->apply_bytes; 854 855 switch (psock->eval) { 856 case __SK_PASS: 857 err = tls_push_record(sk, flags, record_type); 858 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { 859 *copied -= sk_msg_free(sk, msg); 860 tls_free_open_rec(sk); 861 err = -sk->sk_err; 862 goto out_err; 863 } 864 break; 865 case __SK_REDIRECT: 866 redir_ingress = psock->redir_ingress; 867 sk_redir = psock->sk_redir; 868 memcpy(&msg_redir, msg, sizeof(*msg)); 869 if (msg->apply_bytes < send) 870 msg->apply_bytes = 0; 871 else 872 msg->apply_bytes -= send; 873 sk_msg_return_zero(sk, msg, send); 874 msg->sg.size -= send; 875 release_sock(sk); 876 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, 877 &msg_redir, send, flags); 878 lock_sock(sk); 879 if (err < 0) { 880 *copied -= sk_msg_free_nocharge(sk, &msg_redir); 881 msg->sg.size = 0; 882 } 883 if (msg->sg.size == 0) 884 tls_free_open_rec(sk); 885 break; 886 case __SK_DROP: 887 default: 888 sk_msg_free_partial(sk, msg, send); 889 if (msg->apply_bytes < send) 890 msg->apply_bytes = 0; 891 else 892 msg->apply_bytes -= send; 893 if (msg->sg.size == 0) 894 tls_free_open_rec(sk); 895 *copied -= (send + delta); 896 err = -EACCES; 897 } 898 899 if (likely(!err)) { 900 bool reset_eval = !ctx->open_rec; 901 902 rec = ctx->open_rec; 903 if (rec) { 904 msg = &rec->msg_plaintext; 905 if (!msg->apply_bytes) 906 reset_eval = true; 907 } 908 if (reset_eval) { 909 psock->eval = __SK_NONE; 910 if (psock->sk_redir) { 911 sock_put(psock->sk_redir); 912 psock->sk_redir = NULL; 913 } 914 } 915 if (rec) 916 goto more_data; 917 } 918 out_err: 919 sk_psock_put(sk, psock); 920 return err; 921 } 922 923 static int tls_sw_push_pending_record(struct sock *sk, int flags) 924 { 925 struct tls_context *tls_ctx = tls_get_ctx(sk); 926 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 927 struct tls_rec *rec = ctx->open_rec; 928 struct sk_msg *msg_pl; 929 size_t copied; 930 931 if (!rec) 932 return 0; 933 934 msg_pl = &rec->msg_plaintext; 935 copied = msg_pl->sg.size; 936 if (!copied) 937 return 0; 938 939 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA, 940 &copied, flags); 941 } 942 943 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg, 944 struct sk_msg *msg_pl, size_t try_to_copy, 945 ssize_t *copied) 946 { 947 struct page *page = NULL, **pages = &page; 948 949 do { 950 ssize_t part; 951 size_t off; 952 953 part = iov_iter_extract_pages(&msg->msg_iter, &pages, 954 try_to_copy, 1, 0, &off); 955 if (part <= 0) 956 return part ?: -EIO; 957 958 if (WARN_ON_ONCE(!sendpage_ok(page))) { 959 iov_iter_revert(&msg->msg_iter, part); 960 return -EIO; 961 } 962 963 sk_msg_page_add(msg_pl, page, part, off); 964 msg_pl->sg.copybreak = 0; 965 msg_pl->sg.curr = msg_pl->sg.end; 966 sk_mem_charge(sk, part); 967 *copied += part; 968 try_to_copy -= part; 969 } while (try_to_copy && !sk_msg_full(msg_pl)); 970 971 return 0; 972 } 973 974 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, 975 size_t size) 976 { 977 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 978 struct tls_context *tls_ctx = tls_get_ctx(sk); 979 struct tls_prot_info *prot = &tls_ctx->prot_info; 980 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 981 bool async_capable = ctx->async_capable; 982 unsigned char record_type = TLS_RECORD_TYPE_DATA; 983 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 984 bool eor = !(msg->msg_flags & MSG_MORE); 985 size_t try_to_copy; 986 ssize_t copied = 0; 987 struct sk_msg *msg_pl, *msg_en; 988 struct tls_rec *rec; 989 int required_size; 990 int num_async = 0; 991 bool full_record; 992 int record_room; 993 int num_zc = 0; 994 int orig_size; 995 int ret = 0; 996 997 if (!eor && (msg->msg_flags & MSG_EOR)) 998 return -EINVAL; 999 1000 if (unlikely(msg->msg_controllen)) { 1001 ret = tls_process_cmsg(sk, msg, &record_type); 1002 if (ret) { 1003 if (ret == -EINPROGRESS) 1004 num_async++; 1005 else if (ret != -EAGAIN) 1006 goto send_end; 1007 } 1008 } 1009 1010 while (msg_data_left(msg)) { 1011 if (sk->sk_err) { 1012 ret = -sk->sk_err; 1013 goto send_end; 1014 } 1015 1016 if (ctx->open_rec) 1017 rec = ctx->open_rec; 1018 else 1019 rec = ctx->open_rec = tls_get_rec(sk); 1020 if (!rec) { 1021 ret = -ENOMEM; 1022 goto send_end; 1023 } 1024 1025 msg_pl = &rec->msg_plaintext; 1026 msg_en = &rec->msg_encrypted; 1027 1028 orig_size = msg_pl->sg.size; 1029 full_record = false; 1030 try_to_copy = msg_data_left(msg); 1031 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 1032 if (try_to_copy >= record_room) { 1033 try_to_copy = record_room; 1034 full_record = true; 1035 } 1036 1037 required_size = msg_pl->sg.size + try_to_copy + 1038 prot->overhead_size; 1039 1040 if (!sk_stream_memory_free(sk)) 1041 goto wait_for_sndbuf; 1042 1043 alloc_encrypted: 1044 ret = tls_alloc_encrypted_msg(sk, required_size); 1045 if (ret) { 1046 if (ret != -ENOSPC) 1047 goto wait_for_memory; 1048 1049 /* Adjust try_to_copy according to the amount that was 1050 * actually allocated. The difference is due 1051 * to max sg elements limit 1052 */ 1053 try_to_copy -= required_size - msg_en->sg.size; 1054 full_record = true; 1055 } 1056 1057 if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) { 1058 ret = tls_sw_sendmsg_splice(sk, msg, msg_pl, 1059 try_to_copy, &copied); 1060 if (ret < 0) 1061 goto send_end; 1062 tls_ctx->pending_open_record_frags = true; 1063 1064 if (sk_msg_full(msg_pl)) 1065 full_record = true; 1066 1067 if (full_record || eor) 1068 goto copied; 1069 continue; 1070 } 1071 1072 if (!is_kvec && (full_record || eor) && !async_capable) { 1073 u32 first = msg_pl->sg.end; 1074 1075 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter, 1076 msg_pl, try_to_copy); 1077 if (ret) 1078 goto fallback_to_reg_send; 1079 1080 num_zc++; 1081 copied += try_to_copy; 1082 1083 sk_msg_sg_copy_set(msg_pl, first); 1084 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1085 record_type, &copied, 1086 msg->msg_flags); 1087 if (ret) { 1088 if (ret == -EINPROGRESS) 1089 num_async++; 1090 else if (ret == -ENOMEM) 1091 goto wait_for_memory; 1092 else if (ctx->open_rec && ret == -ENOSPC) 1093 goto rollback_iter; 1094 else if (ret != -EAGAIN) 1095 goto send_end; 1096 } 1097 continue; 1098 rollback_iter: 1099 copied -= try_to_copy; 1100 sk_msg_sg_copy_clear(msg_pl, first); 1101 iov_iter_revert(&msg->msg_iter, 1102 msg_pl->sg.size - orig_size); 1103 fallback_to_reg_send: 1104 sk_msg_trim(sk, msg_pl, orig_size); 1105 } 1106 1107 required_size = msg_pl->sg.size + try_to_copy; 1108 1109 ret = tls_clone_plaintext_msg(sk, required_size); 1110 if (ret) { 1111 if (ret != -ENOSPC) 1112 goto send_end; 1113 1114 /* Adjust try_to_copy according to the amount that was 1115 * actually allocated. The difference is due 1116 * to max sg elements limit 1117 */ 1118 try_to_copy -= required_size - msg_pl->sg.size; 1119 full_record = true; 1120 sk_msg_trim(sk, msg_en, 1121 msg_pl->sg.size + prot->overhead_size); 1122 } 1123 1124 if (try_to_copy) { 1125 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, 1126 msg_pl, try_to_copy); 1127 if (ret < 0) 1128 goto trim_sgl; 1129 } 1130 1131 /* Open records defined only if successfully copied, otherwise 1132 * we would trim the sg but not reset the open record frags. 1133 */ 1134 tls_ctx->pending_open_record_frags = true; 1135 copied += try_to_copy; 1136 copied: 1137 if (full_record || eor) { 1138 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1139 record_type, &copied, 1140 msg->msg_flags); 1141 if (ret) { 1142 if (ret == -EINPROGRESS) 1143 num_async++; 1144 else if (ret == -ENOMEM) 1145 goto wait_for_memory; 1146 else if (ret != -EAGAIN) { 1147 if (ret == -ENOSPC) 1148 ret = 0; 1149 goto send_end; 1150 } 1151 } 1152 } 1153 1154 continue; 1155 1156 wait_for_sndbuf: 1157 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1158 wait_for_memory: 1159 ret = sk_stream_wait_memory(sk, &timeo); 1160 if (ret) { 1161 trim_sgl: 1162 if (ctx->open_rec) 1163 tls_trim_both_msgs(sk, orig_size); 1164 goto send_end; 1165 } 1166 1167 if (ctx->open_rec && msg_en->sg.size < required_size) 1168 goto alloc_encrypted; 1169 } 1170 1171 if (!num_async) { 1172 goto send_end; 1173 } else if (num_zc) { 1174 int err; 1175 1176 /* Wait for pending encryptions to get completed */ 1177 err = tls_encrypt_async_wait(ctx); 1178 if (err) { 1179 ret = err; 1180 copied = 0; 1181 } 1182 } 1183 1184 /* Transmit if any encryptions have completed */ 1185 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1186 cancel_delayed_work(&ctx->tx_work.work); 1187 tls_tx_records(sk, msg->msg_flags); 1188 } 1189 1190 send_end: 1191 ret = sk_stream_error(sk, msg->msg_flags, ret); 1192 return copied > 0 ? copied : ret; 1193 } 1194 1195 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1196 { 1197 struct tls_context *tls_ctx = tls_get_ctx(sk); 1198 int ret; 1199 1200 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1201 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR | 1202 MSG_SENDPAGE_NOPOLICY)) 1203 return -EOPNOTSUPP; 1204 1205 ret = mutex_lock_interruptible(&tls_ctx->tx_lock); 1206 if (ret) 1207 return ret; 1208 lock_sock(sk); 1209 ret = tls_sw_sendmsg_locked(sk, msg, size); 1210 release_sock(sk); 1211 mutex_unlock(&tls_ctx->tx_lock); 1212 return ret; 1213 } 1214 1215 /* 1216 * Handle unexpected EOF during splice without SPLICE_F_MORE set. 1217 */ 1218 void tls_sw_splice_eof(struct socket *sock) 1219 { 1220 struct sock *sk = sock->sk; 1221 struct tls_context *tls_ctx = tls_get_ctx(sk); 1222 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1223 struct tls_rec *rec; 1224 struct sk_msg *msg_pl; 1225 ssize_t copied = 0; 1226 bool retrying = false; 1227 int ret = 0; 1228 1229 if (!ctx->open_rec) 1230 return; 1231 1232 mutex_lock(&tls_ctx->tx_lock); 1233 lock_sock(sk); 1234 1235 retry: 1236 /* same checks as in tls_sw_push_pending_record() */ 1237 rec = ctx->open_rec; 1238 if (!rec) 1239 goto unlock; 1240 1241 msg_pl = &rec->msg_plaintext; 1242 if (msg_pl->sg.size == 0) 1243 goto unlock; 1244 1245 /* Check the BPF advisor and perform transmission. */ 1246 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA, 1247 &copied, 0); 1248 switch (ret) { 1249 case 0: 1250 case -EAGAIN: 1251 if (retrying) 1252 goto unlock; 1253 retrying = true; 1254 goto retry; 1255 case -EINPROGRESS: 1256 break; 1257 default: 1258 goto unlock; 1259 } 1260 1261 /* Wait for pending encryptions to get completed */ 1262 if (tls_encrypt_async_wait(ctx)) 1263 goto unlock; 1264 1265 /* Transmit if any encryptions have completed */ 1266 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1267 cancel_delayed_work(&ctx->tx_work.work); 1268 tls_tx_records(sk, 0); 1269 } 1270 1271 unlock: 1272 release_sock(sk); 1273 mutex_unlock(&tls_ctx->tx_lock); 1274 } 1275 1276 static int 1277 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, 1278 bool released) 1279 { 1280 struct tls_context *tls_ctx = tls_get_ctx(sk); 1281 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1282 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1283 int ret = 0; 1284 long timeo; 1285 1286 timeo = sock_rcvtimeo(sk, nonblock); 1287 1288 while (!tls_strp_msg_ready(ctx)) { 1289 if (!sk_psock_queue_empty(psock)) 1290 return 0; 1291 1292 if (sk->sk_err) 1293 return sock_error(sk); 1294 1295 if (ret < 0) 1296 return ret; 1297 1298 if (!skb_queue_empty(&sk->sk_receive_queue)) { 1299 tls_strp_check_rcv(&ctx->strp); 1300 if (tls_strp_msg_ready(ctx)) 1301 break; 1302 } 1303 1304 if (sk->sk_shutdown & RCV_SHUTDOWN) 1305 return 0; 1306 1307 if (sock_flag(sk, SOCK_DONE)) 1308 return 0; 1309 1310 if (!timeo) 1311 return -EAGAIN; 1312 1313 released = true; 1314 add_wait_queue(sk_sleep(sk), &wait); 1315 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1316 ret = sk_wait_event(sk, &timeo, 1317 tls_strp_msg_ready(ctx) || 1318 !sk_psock_queue_empty(psock), 1319 &wait); 1320 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1321 remove_wait_queue(sk_sleep(sk), &wait); 1322 1323 /* Handle signals */ 1324 if (signal_pending(current)) 1325 return sock_intr_errno(timeo); 1326 } 1327 1328 tls_strp_msg_load(&ctx->strp, released); 1329 1330 return 1; 1331 } 1332 1333 static int tls_setup_from_iter(struct iov_iter *from, 1334 int length, int *pages_used, 1335 struct scatterlist *to, 1336 int to_max_pages) 1337 { 1338 int rc = 0, i = 0, num_elem = *pages_used, maxpages; 1339 struct page *pages[MAX_SKB_FRAGS]; 1340 unsigned int size = 0; 1341 ssize_t copied, use; 1342 size_t offset; 1343 1344 while (length > 0) { 1345 i = 0; 1346 maxpages = to_max_pages - num_elem; 1347 if (maxpages == 0) { 1348 rc = -EFAULT; 1349 goto out; 1350 } 1351 copied = iov_iter_get_pages2(from, pages, 1352 length, 1353 maxpages, &offset); 1354 if (copied <= 0) { 1355 rc = -EFAULT; 1356 goto out; 1357 } 1358 1359 length -= copied; 1360 size += copied; 1361 while (copied) { 1362 use = min_t(int, copied, PAGE_SIZE - offset); 1363 1364 sg_set_page(&to[num_elem], 1365 pages[i], use, offset); 1366 sg_unmark_end(&to[num_elem]); 1367 /* We do not uncharge memory from this API */ 1368 1369 offset = 0; 1370 copied -= use; 1371 1372 i++; 1373 num_elem++; 1374 } 1375 } 1376 /* Mark the end in the last sg entry if newly added */ 1377 if (num_elem > *pages_used) 1378 sg_mark_end(&to[num_elem - 1]); 1379 out: 1380 if (rc) 1381 iov_iter_revert(from, size); 1382 *pages_used = num_elem; 1383 1384 return rc; 1385 } 1386 1387 static struct sk_buff * 1388 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb, 1389 unsigned int full_len) 1390 { 1391 struct strp_msg *clr_rxm; 1392 struct sk_buff *clr_skb; 1393 int err; 1394 1395 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER, 1396 &err, sk->sk_allocation); 1397 if (!clr_skb) 1398 return NULL; 1399 1400 skb_copy_header(clr_skb, skb); 1401 clr_skb->len = full_len; 1402 clr_skb->data_len = full_len; 1403 1404 clr_rxm = strp_msg(clr_skb); 1405 clr_rxm->offset = 0; 1406 1407 return clr_skb; 1408 } 1409 1410 /* Decrypt handlers 1411 * 1412 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers. 1413 * They must transform the darg in/out argument are as follows: 1414 * | Input | Output 1415 * ------------------------------------------------------------------- 1416 * zc | Zero-copy decrypt allowed | Zero-copy performed 1417 * async | Async decrypt allowed | Async crypto used / in progress 1418 * skb | * | Output skb 1419 * 1420 * If ZC decryption was performed darg.skb will point to the input skb. 1421 */ 1422 1423 /* This function decrypts the input skb into either out_iov or in out_sg 1424 * or in skb buffers itself. The input parameter 'darg->zc' indicates if 1425 * zero-copy mode needs to be tried or not. With zero-copy mode, either 1426 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are 1427 * NULL, then the decryption happens inside skb buffers itself, i.e. 1428 * zero-copy gets disabled and 'darg->zc' is updated. 1429 */ 1430 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, 1431 struct scatterlist *out_sg, 1432 struct tls_decrypt_arg *darg) 1433 { 1434 struct tls_context *tls_ctx = tls_get_ctx(sk); 1435 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1436 struct tls_prot_info *prot = &tls_ctx->prot_info; 1437 int n_sgin, n_sgout, aead_size, err, pages = 0; 1438 struct sk_buff *skb = tls_strp_msg(ctx); 1439 const struct strp_msg *rxm = strp_msg(skb); 1440 const struct tls_msg *tlm = tls_msg(skb); 1441 struct aead_request *aead_req; 1442 struct scatterlist *sgin = NULL; 1443 struct scatterlist *sgout = NULL; 1444 const int data_len = rxm->full_len - prot->overhead_size; 1445 int tail_pages = !!prot->tail_size; 1446 struct tls_decrypt_ctx *dctx; 1447 struct sk_buff *clear_skb; 1448 int iv_offset = 0; 1449 u8 *mem; 1450 1451 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size, 1452 rxm->full_len - prot->prepend_size); 1453 if (n_sgin < 1) 1454 return n_sgin ?: -EBADMSG; 1455 1456 if (darg->zc && (out_iov || out_sg)) { 1457 clear_skb = NULL; 1458 1459 if (out_iov) 1460 n_sgout = 1 + tail_pages + 1461 iov_iter_npages_cap(out_iov, INT_MAX, data_len); 1462 else 1463 n_sgout = sg_nents(out_sg); 1464 } else { 1465 darg->zc = false; 1466 1467 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len); 1468 if (!clear_skb) 1469 return -ENOMEM; 1470 1471 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags; 1472 } 1473 1474 /* Increment to accommodate AAD */ 1475 n_sgin = n_sgin + 1; 1476 1477 /* Allocate a single block of memory which contains 1478 * aead_req || tls_decrypt_ctx. 1479 * Both structs are variable length. 1480 */ 1481 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); 1482 aead_size = ALIGN(aead_size, __alignof__(*dctx)); 1483 mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)), 1484 sk->sk_allocation); 1485 if (!mem) { 1486 err = -ENOMEM; 1487 goto exit_free_skb; 1488 } 1489 1490 /* Segment the allocated memory */ 1491 aead_req = (struct aead_request *)mem; 1492 dctx = (struct tls_decrypt_ctx *)(mem + aead_size); 1493 dctx->sk = sk; 1494 sgin = &dctx->sg[0]; 1495 sgout = &dctx->sg[n_sgin]; 1496 1497 /* For CCM based ciphers, first byte of nonce+iv is a constant */ 1498 switch (prot->cipher_type) { 1499 case TLS_CIPHER_AES_CCM_128: 1500 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE; 1501 iv_offset = 1; 1502 break; 1503 case TLS_CIPHER_SM4_CCM: 1504 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE; 1505 iv_offset = 1; 1506 break; 1507 } 1508 1509 /* Prepare IV */ 1510 if (prot->version == TLS_1_3_VERSION || 1511 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { 1512 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, 1513 prot->iv_size + prot->salt_size); 1514 } else { 1515 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 1516 &dctx->iv[iv_offset] + prot->salt_size, 1517 prot->iv_size); 1518 if (err < 0) 1519 goto exit_free; 1520 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size); 1521 } 1522 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq); 1523 1524 /* Prepare AAD */ 1525 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size + 1526 prot->tail_size, 1527 tls_ctx->rx.rec_seq, tlm->control, prot); 1528 1529 /* Prepare sgin */ 1530 sg_init_table(sgin, n_sgin); 1531 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size); 1532 err = skb_to_sgvec(skb, &sgin[1], 1533 rxm->offset + prot->prepend_size, 1534 rxm->full_len - prot->prepend_size); 1535 if (err < 0) 1536 goto exit_free; 1537 1538 if (clear_skb) { 1539 sg_init_table(sgout, n_sgout); 1540 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1541 1542 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size, 1543 data_len + prot->tail_size); 1544 if (err < 0) 1545 goto exit_free; 1546 } else if (out_iov) { 1547 sg_init_table(sgout, n_sgout); 1548 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); 1549 1550 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1], 1551 (n_sgout - 1 - tail_pages)); 1552 if (err < 0) 1553 goto exit_free_pages; 1554 1555 if (prot->tail_size) { 1556 sg_unmark_end(&sgout[pages]); 1557 sg_set_buf(&sgout[pages + 1], &dctx->tail, 1558 prot->tail_size); 1559 sg_mark_end(&sgout[pages + 1]); 1560 } 1561 } else if (out_sg) { 1562 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); 1563 } 1564 1565 /* Prepare and submit AEAD request */ 1566 err = tls_do_decryption(sk, sgin, sgout, dctx->iv, 1567 data_len + prot->tail_size, aead_req, darg); 1568 if (err) 1569 goto exit_free_pages; 1570 1571 darg->skb = clear_skb ?: tls_strp_msg(ctx); 1572 clear_skb = NULL; 1573 1574 if (unlikely(darg->async)) { 1575 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold); 1576 if (err) 1577 __skb_queue_tail(&ctx->async_hold, darg->skb); 1578 return err; 1579 } 1580 1581 if (prot->tail_size) 1582 darg->tail = dctx->tail; 1583 1584 exit_free_pages: 1585 /* Release the pages in case iov was mapped to pages */ 1586 for (; pages > 0; pages--) 1587 put_page(sg_page(&sgout[pages])); 1588 exit_free: 1589 kfree(mem); 1590 exit_free_skb: 1591 consume_skb(clear_skb); 1592 return err; 1593 } 1594 1595 static int 1596 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx, 1597 struct msghdr *msg, struct tls_decrypt_arg *darg) 1598 { 1599 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1600 struct tls_prot_info *prot = &tls_ctx->prot_info; 1601 struct strp_msg *rxm; 1602 int pad, err; 1603 1604 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg); 1605 if (err < 0) { 1606 if (err == -EBADMSG) 1607 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); 1608 return err; 1609 } 1610 /* keep going even for ->async, the code below is TLS 1.3 */ 1611 1612 /* If opportunistic TLS 1.3 ZC failed retry without ZC */ 1613 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION && 1614 darg->tail != TLS_RECORD_TYPE_DATA)) { 1615 darg->zc = false; 1616 if (!darg->tail) 1617 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL); 1618 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY); 1619 return tls_decrypt_sw(sk, tls_ctx, msg, darg); 1620 } 1621 1622 pad = tls_padding_length(prot, darg->skb, darg); 1623 if (pad < 0) { 1624 if (darg->skb != tls_strp_msg(ctx)) 1625 consume_skb(darg->skb); 1626 return pad; 1627 } 1628 1629 rxm = strp_msg(darg->skb); 1630 rxm->full_len -= pad; 1631 1632 return 0; 1633 } 1634 1635 static int 1636 tls_decrypt_device(struct sock *sk, struct msghdr *msg, 1637 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg) 1638 { 1639 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1640 struct tls_prot_info *prot = &tls_ctx->prot_info; 1641 struct strp_msg *rxm; 1642 int pad, err; 1643 1644 if (tls_ctx->rx_conf != TLS_HW) 1645 return 0; 1646 1647 err = tls_device_decrypted(sk, tls_ctx); 1648 if (err <= 0) 1649 return err; 1650 1651 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg); 1652 if (pad < 0) 1653 return pad; 1654 1655 darg->async = false; 1656 darg->skb = tls_strp_msg(ctx); 1657 /* ->zc downgrade check, in case TLS 1.3 gets here */ 1658 darg->zc &= !(prot->version == TLS_1_3_VERSION && 1659 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA); 1660 1661 rxm = strp_msg(darg->skb); 1662 rxm->full_len -= pad; 1663 1664 if (!darg->zc) { 1665 /* Non-ZC case needs a real skb */ 1666 darg->skb = tls_strp_msg_detach(ctx); 1667 if (!darg->skb) 1668 return -ENOMEM; 1669 } else { 1670 unsigned int off, len; 1671 1672 /* In ZC case nobody cares about the output skb. 1673 * Just copy the data here. Note the skb is not fully trimmed. 1674 */ 1675 off = rxm->offset + prot->prepend_size; 1676 len = rxm->full_len - prot->overhead_size; 1677 1678 err = skb_copy_datagram_msg(darg->skb, off, msg, len); 1679 if (err) 1680 return err; 1681 } 1682 return 1; 1683 } 1684 1685 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg, 1686 struct tls_decrypt_arg *darg) 1687 { 1688 struct tls_context *tls_ctx = tls_get_ctx(sk); 1689 struct tls_prot_info *prot = &tls_ctx->prot_info; 1690 struct strp_msg *rxm; 1691 int err; 1692 1693 err = tls_decrypt_device(sk, msg, tls_ctx, darg); 1694 if (!err) 1695 err = tls_decrypt_sw(sk, tls_ctx, msg, darg); 1696 if (err < 0) 1697 return err; 1698 1699 rxm = strp_msg(darg->skb); 1700 rxm->offset += prot->prepend_size; 1701 rxm->full_len -= prot->overhead_size; 1702 tls_advance_record_sn(sk, prot, &tls_ctx->rx); 1703 1704 return 0; 1705 } 1706 1707 int decrypt_skb(struct sock *sk, struct scatterlist *sgout) 1708 { 1709 struct tls_decrypt_arg darg = { .zc = true, }; 1710 1711 return tls_decrypt_sg(sk, NULL, sgout, &darg); 1712 } 1713 1714 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, 1715 u8 *control) 1716 { 1717 int err; 1718 1719 if (!*control) { 1720 *control = tlm->control; 1721 if (!*control) 1722 return -EBADMSG; 1723 1724 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 1725 sizeof(*control), control); 1726 if (*control != TLS_RECORD_TYPE_DATA) { 1727 if (err || msg->msg_flags & MSG_CTRUNC) 1728 return -EIO; 1729 } 1730 } else if (*control != tlm->control) { 1731 return 0; 1732 } 1733 1734 return 1; 1735 } 1736 1737 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx) 1738 { 1739 tls_strp_msg_done(&ctx->strp); 1740 } 1741 1742 /* This function traverses the rx_list in tls receive context to copies the 1743 * decrypted records into the buffer provided by caller zero copy is not 1744 * true. Further, the records are removed from the rx_list if it is not a peek 1745 * case and the record has been consumed completely. 1746 */ 1747 static int process_rx_list(struct tls_sw_context_rx *ctx, 1748 struct msghdr *msg, 1749 u8 *control, 1750 size_t skip, 1751 size_t len, 1752 bool is_peek) 1753 { 1754 struct sk_buff *skb = skb_peek(&ctx->rx_list); 1755 struct tls_msg *tlm; 1756 ssize_t copied = 0; 1757 int err; 1758 1759 while (skip && skb) { 1760 struct strp_msg *rxm = strp_msg(skb); 1761 tlm = tls_msg(skb); 1762 1763 err = tls_record_content_type(msg, tlm, control); 1764 if (err <= 0) 1765 goto out; 1766 1767 if (skip < rxm->full_len) 1768 break; 1769 1770 skip = skip - rxm->full_len; 1771 skb = skb_peek_next(skb, &ctx->rx_list); 1772 } 1773 1774 while (len && skb) { 1775 struct sk_buff *next_skb; 1776 struct strp_msg *rxm = strp_msg(skb); 1777 int chunk = min_t(unsigned int, rxm->full_len - skip, len); 1778 1779 tlm = tls_msg(skb); 1780 1781 err = tls_record_content_type(msg, tlm, control); 1782 if (err <= 0) 1783 goto out; 1784 1785 err = skb_copy_datagram_msg(skb, rxm->offset + skip, 1786 msg, chunk); 1787 if (err < 0) 1788 goto out; 1789 1790 len = len - chunk; 1791 copied = copied + chunk; 1792 1793 /* Consume the data from record if it is non-peek case*/ 1794 if (!is_peek) { 1795 rxm->offset = rxm->offset + chunk; 1796 rxm->full_len = rxm->full_len - chunk; 1797 1798 /* Return if there is unconsumed data in the record */ 1799 if (rxm->full_len - skip) 1800 break; 1801 } 1802 1803 /* The remaining skip-bytes must lie in 1st record in rx_list. 1804 * So from the 2nd record, 'skip' should be 0. 1805 */ 1806 skip = 0; 1807 1808 if (msg) 1809 msg->msg_flags |= MSG_EOR; 1810 1811 next_skb = skb_peek_next(skb, &ctx->rx_list); 1812 1813 if (!is_peek) { 1814 __skb_unlink(skb, &ctx->rx_list); 1815 consume_skb(skb); 1816 } 1817 1818 skb = next_skb; 1819 } 1820 err = 0; 1821 1822 out: 1823 return copied ? : err; 1824 } 1825 1826 static bool 1827 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot, 1828 size_t len_left, size_t decrypted, ssize_t done, 1829 size_t *flushed_at) 1830 { 1831 size_t max_rec; 1832 1833 if (len_left <= decrypted) 1834 return false; 1835 1836 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE; 1837 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec) 1838 return false; 1839 1840 *flushed_at = done; 1841 return sk_flush_backlog(sk); 1842 } 1843 1844 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx, 1845 bool nonblock) 1846 { 1847 long timeo; 1848 int ret; 1849 1850 timeo = sock_rcvtimeo(sk, nonblock); 1851 1852 while (unlikely(ctx->reader_present)) { 1853 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1854 1855 ctx->reader_contended = 1; 1856 1857 add_wait_queue(&ctx->wq, &wait); 1858 ret = sk_wait_event(sk, &timeo, 1859 !READ_ONCE(ctx->reader_present), &wait); 1860 remove_wait_queue(&ctx->wq, &wait); 1861 1862 if (timeo <= 0) 1863 return -EAGAIN; 1864 if (signal_pending(current)) 1865 return sock_intr_errno(timeo); 1866 if (ret < 0) 1867 return ret; 1868 } 1869 1870 WRITE_ONCE(ctx->reader_present, 1); 1871 1872 return 0; 1873 } 1874 1875 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, 1876 bool nonblock) 1877 { 1878 int err; 1879 1880 lock_sock(sk); 1881 err = tls_rx_reader_acquire(sk, ctx, nonblock); 1882 if (err) 1883 release_sock(sk); 1884 return err; 1885 } 1886 1887 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx) 1888 { 1889 if (unlikely(ctx->reader_contended)) { 1890 if (wq_has_sleeper(&ctx->wq)) 1891 wake_up(&ctx->wq); 1892 else 1893 ctx->reader_contended = 0; 1894 1895 WARN_ON_ONCE(!ctx->reader_present); 1896 } 1897 1898 WRITE_ONCE(ctx->reader_present, 0); 1899 } 1900 1901 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) 1902 { 1903 tls_rx_reader_release(sk, ctx); 1904 release_sock(sk); 1905 } 1906 1907 int tls_sw_recvmsg(struct sock *sk, 1908 struct msghdr *msg, 1909 size_t len, 1910 int flags, 1911 int *addr_len) 1912 { 1913 struct tls_context *tls_ctx = tls_get_ctx(sk); 1914 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1915 struct tls_prot_info *prot = &tls_ctx->prot_info; 1916 ssize_t decrypted = 0, async_copy_bytes = 0; 1917 struct sk_psock *psock; 1918 unsigned char control = 0; 1919 size_t flushed_at = 0; 1920 struct strp_msg *rxm; 1921 struct tls_msg *tlm; 1922 ssize_t copied = 0; 1923 bool async = false; 1924 int target, err; 1925 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1926 bool is_peek = flags & MSG_PEEK; 1927 bool released = true; 1928 bool bpf_strp_enabled; 1929 bool zc_capable; 1930 1931 if (unlikely(flags & MSG_ERRQUEUE)) 1932 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 1933 1934 psock = sk_psock_get(sk); 1935 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT); 1936 if (err < 0) 1937 return err; 1938 bpf_strp_enabled = sk_psock_strp_enabled(psock); 1939 1940 /* If crypto failed the connection is broken */ 1941 err = ctx->async_wait.err; 1942 if (err) 1943 goto end; 1944 1945 /* Process pending decrypted records. It must be non-zero-copy */ 1946 err = process_rx_list(ctx, msg, &control, 0, len, is_peek); 1947 if (err < 0) 1948 goto end; 1949 1950 copied = err; 1951 if (len <= copied) 1952 goto end; 1953 1954 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1955 len = len - copied; 1956 1957 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek && 1958 ctx->zc_capable; 1959 decrypted = 0; 1960 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) { 1961 struct tls_decrypt_arg darg; 1962 int to_decrypt, chunk; 1963 1964 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, 1965 released); 1966 if (err <= 0) { 1967 if (psock) { 1968 chunk = sk_msg_recvmsg(sk, psock, msg, len, 1969 flags); 1970 if (chunk > 0) { 1971 decrypted += chunk; 1972 len -= chunk; 1973 continue; 1974 } 1975 } 1976 goto recv_end; 1977 } 1978 1979 memset(&darg.inargs, 0, sizeof(darg.inargs)); 1980 1981 rxm = strp_msg(tls_strp_msg(ctx)); 1982 tlm = tls_msg(tls_strp_msg(ctx)); 1983 1984 to_decrypt = rxm->full_len - prot->overhead_size; 1985 1986 if (zc_capable && to_decrypt <= len && 1987 tlm->control == TLS_RECORD_TYPE_DATA) 1988 darg.zc = true; 1989 1990 /* Do not use async mode if record is non-data */ 1991 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) 1992 darg.async = ctx->async_capable; 1993 else 1994 darg.async = false; 1995 1996 err = tls_rx_one_record(sk, msg, &darg); 1997 if (err < 0) { 1998 tls_err_abort(sk, -EBADMSG); 1999 goto recv_end; 2000 } 2001 2002 async |= darg.async; 2003 2004 /* If the type of records being processed is not known yet, 2005 * set it to record type just dequeued. If it is already known, 2006 * but does not match the record type just dequeued, go to end. 2007 * We always get record type here since for tls1.2, record type 2008 * is known just after record is dequeued from stream parser. 2009 * For tls1.3, we disable async. 2010 */ 2011 err = tls_record_content_type(msg, tls_msg(darg.skb), &control); 2012 if (err <= 0) { 2013 DEBUG_NET_WARN_ON_ONCE(darg.zc); 2014 tls_rx_rec_done(ctx); 2015 put_on_rx_list_err: 2016 __skb_queue_tail(&ctx->rx_list, darg.skb); 2017 goto recv_end; 2018 } 2019 2020 /* periodically flush backlog, and feed strparser */ 2021 released = tls_read_flush_backlog(sk, prot, len, to_decrypt, 2022 decrypted + copied, 2023 &flushed_at); 2024 2025 /* TLS 1.3 may have updated the length by more than overhead */ 2026 rxm = strp_msg(darg.skb); 2027 chunk = rxm->full_len; 2028 tls_rx_rec_done(ctx); 2029 2030 if (!darg.zc) { 2031 bool partially_consumed = chunk > len; 2032 struct sk_buff *skb = darg.skb; 2033 2034 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor); 2035 2036 if (async) { 2037 /* TLS 1.2-only, to_decrypt must be text len */ 2038 chunk = min_t(int, to_decrypt, len); 2039 async_copy_bytes += chunk; 2040 put_on_rx_list: 2041 decrypted += chunk; 2042 len -= chunk; 2043 __skb_queue_tail(&ctx->rx_list, skb); 2044 continue; 2045 } 2046 2047 if (bpf_strp_enabled) { 2048 released = true; 2049 err = sk_psock_tls_strp_read(psock, skb); 2050 if (err != __SK_PASS) { 2051 rxm->offset = rxm->offset + rxm->full_len; 2052 rxm->full_len = 0; 2053 if (err == __SK_DROP) 2054 consume_skb(skb); 2055 continue; 2056 } 2057 } 2058 2059 if (partially_consumed) 2060 chunk = len; 2061 2062 err = skb_copy_datagram_msg(skb, rxm->offset, 2063 msg, chunk); 2064 if (err < 0) 2065 goto put_on_rx_list_err; 2066 2067 if (is_peek) 2068 goto put_on_rx_list; 2069 2070 if (partially_consumed) { 2071 rxm->offset += chunk; 2072 rxm->full_len -= chunk; 2073 goto put_on_rx_list; 2074 } 2075 2076 consume_skb(skb); 2077 } 2078 2079 decrypted += chunk; 2080 len -= chunk; 2081 2082 /* Return full control message to userspace before trying 2083 * to parse another message type 2084 */ 2085 msg->msg_flags |= MSG_EOR; 2086 if (control != TLS_RECORD_TYPE_DATA) 2087 break; 2088 } 2089 2090 recv_end: 2091 if (async) { 2092 int ret; 2093 2094 /* Wait for all previously submitted records to be decrypted */ 2095 ret = tls_decrypt_async_wait(ctx); 2096 __skb_queue_purge(&ctx->async_hold); 2097 2098 if (ret) { 2099 if (err >= 0 || err == -EINPROGRESS) 2100 err = ret; 2101 decrypted = 0; 2102 goto end; 2103 } 2104 2105 /* Drain records from the rx_list & copy if required */ 2106 if (is_peek || is_kvec) 2107 err = process_rx_list(ctx, msg, &control, copied, 2108 decrypted, is_peek); 2109 else 2110 err = process_rx_list(ctx, msg, &control, 0, 2111 async_copy_bytes, is_peek); 2112 decrypted += max(err, 0); 2113 } 2114 2115 copied += decrypted; 2116 2117 end: 2118 tls_rx_reader_unlock(sk, ctx); 2119 if (psock) 2120 sk_psock_put(sk, psock); 2121 return copied ? : err; 2122 } 2123 2124 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 2125 struct pipe_inode_info *pipe, 2126 size_t len, unsigned int flags) 2127 { 2128 struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 2129 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2130 struct strp_msg *rxm = NULL; 2131 struct sock *sk = sock->sk; 2132 struct tls_msg *tlm; 2133 struct sk_buff *skb; 2134 ssize_t copied = 0; 2135 int chunk; 2136 int err; 2137 2138 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK); 2139 if (err < 0) 2140 return err; 2141 2142 if (!skb_queue_empty(&ctx->rx_list)) { 2143 skb = __skb_dequeue(&ctx->rx_list); 2144 } else { 2145 struct tls_decrypt_arg darg; 2146 2147 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK, 2148 true); 2149 if (err <= 0) 2150 goto splice_read_end; 2151 2152 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2153 2154 err = tls_rx_one_record(sk, NULL, &darg); 2155 if (err < 0) { 2156 tls_err_abort(sk, -EBADMSG); 2157 goto splice_read_end; 2158 } 2159 2160 tls_rx_rec_done(ctx); 2161 skb = darg.skb; 2162 } 2163 2164 rxm = strp_msg(skb); 2165 tlm = tls_msg(skb); 2166 2167 /* splice does not support reading control messages */ 2168 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2169 err = -EINVAL; 2170 goto splice_requeue; 2171 } 2172 2173 chunk = min_t(unsigned int, rxm->full_len, len); 2174 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); 2175 if (copied < 0) 2176 goto splice_requeue; 2177 2178 if (chunk < rxm->full_len) { 2179 rxm->offset += len; 2180 rxm->full_len -= len; 2181 goto splice_requeue; 2182 } 2183 2184 consume_skb(skb); 2185 2186 splice_read_end: 2187 tls_rx_reader_unlock(sk, ctx); 2188 return copied ? : err; 2189 2190 splice_requeue: 2191 __skb_queue_head(&ctx->rx_list, skb); 2192 goto splice_read_end; 2193 } 2194 2195 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc, 2196 sk_read_actor_t read_actor) 2197 { 2198 struct tls_context *tls_ctx = tls_get_ctx(sk); 2199 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2200 struct tls_prot_info *prot = &tls_ctx->prot_info; 2201 struct strp_msg *rxm = NULL; 2202 struct sk_buff *skb = NULL; 2203 struct sk_psock *psock; 2204 size_t flushed_at = 0; 2205 bool released = true; 2206 struct tls_msg *tlm; 2207 ssize_t copied = 0; 2208 ssize_t decrypted; 2209 int err, used; 2210 2211 psock = sk_psock_get(sk); 2212 if (psock) { 2213 sk_psock_put(sk, psock); 2214 return -EINVAL; 2215 } 2216 err = tls_rx_reader_acquire(sk, ctx, true); 2217 if (err < 0) 2218 return err; 2219 2220 /* If crypto failed the connection is broken */ 2221 err = ctx->async_wait.err; 2222 if (err) 2223 goto read_sock_end; 2224 2225 decrypted = 0; 2226 do { 2227 if (!skb_queue_empty(&ctx->rx_list)) { 2228 skb = __skb_dequeue(&ctx->rx_list); 2229 rxm = strp_msg(skb); 2230 tlm = tls_msg(skb); 2231 } else { 2232 struct tls_decrypt_arg darg; 2233 2234 err = tls_rx_rec_wait(sk, NULL, true, released); 2235 if (err <= 0) 2236 goto read_sock_end; 2237 2238 memset(&darg.inargs, 0, sizeof(darg.inargs)); 2239 2240 err = tls_rx_one_record(sk, NULL, &darg); 2241 if (err < 0) { 2242 tls_err_abort(sk, -EBADMSG); 2243 goto read_sock_end; 2244 } 2245 2246 released = tls_read_flush_backlog(sk, prot, INT_MAX, 2247 0, decrypted, 2248 &flushed_at); 2249 skb = darg.skb; 2250 rxm = strp_msg(skb); 2251 tlm = tls_msg(skb); 2252 decrypted += rxm->full_len; 2253 2254 tls_rx_rec_done(ctx); 2255 } 2256 2257 /* read_sock does not support reading control messages */ 2258 if (tlm->control != TLS_RECORD_TYPE_DATA) { 2259 err = -EINVAL; 2260 goto read_sock_requeue; 2261 } 2262 2263 used = read_actor(desc, skb, rxm->offset, rxm->full_len); 2264 if (used <= 0) { 2265 if (!copied) 2266 err = used; 2267 goto read_sock_requeue; 2268 } 2269 copied += used; 2270 if (used < rxm->full_len) { 2271 rxm->offset += used; 2272 rxm->full_len -= used; 2273 if (!desc->count) 2274 goto read_sock_requeue; 2275 } else { 2276 consume_skb(skb); 2277 if (!desc->count) 2278 skb = NULL; 2279 } 2280 } while (skb); 2281 2282 read_sock_end: 2283 tls_rx_reader_release(sk, ctx); 2284 return copied ? : err; 2285 2286 read_sock_requeue: 2287 __skb_queue_head(&ctx->rx_list, skb); 2288 goto read_sock_end; 2289 } 2290 2291 bool tls_sw_sock_is_readable(struct sock *sk) 2292 { 2293 struct tls_context *tls_ctx = tls_get_ctx(sk); 2294 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2295 bool ingress_empty = true; 2296 struct sk_psock *psock; 2297 2298 rcu_read_lock(); 2299 psock = sk_psock(sk); 2300 if (psock) 2301 ingress_empty = list_empty(&psock->ingress_msg); 2302 rcu_read_unlock(); 2303 2304 return !ingress_empty || tls_strp_msg_ready(ctx) || 2305 !skb_queue_empty(&ctx->rx_list); 2306 } 2307 2308 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb) 2309 { 2310 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 2311 struct tls_prot_info *prot = &tls_ctx->prot_info; 2312 char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; 2313 size_t cipher_overhead; 2314 size_t data_len = 0; 2315 int ret; 2316 2317 /* Verify that we have a full TLS header, or wait for more data */ 2318 if (strp->stm.offset + prot->prepend_size > skb->len) 2319 return 0; 2320 2321 /* Sanity-check size of on-stack buffer. */ 2322 if (WARN_ON(prot->prepend_size > sizeof(header))) { 2323 ret = -EINVAL; 2324 goto read_failure; 2325 } 2326 2327 /* Linearize header to local buffer */ 2328 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size); 2329 if (ret < 0) 2330 goto read_failure; 2331 2332 strp->mark = header[0]; 2333 2334 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 2335 2336 cipher_overhead = prot->tag_size; 2337 if (prot->version != TLS_1_3_VERSION && 2338 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) 2339 cipher_overhead += prot->iv_size; 2340 2341 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead + 2342 prot->tail_size) { 2343 ret = -EMSGSIZE; 2344 goto read_failure; 2345 } 2346 if (data_len < cipher_overhead) { 2347 ret = -EBADMSG; 2348 goto read_failure; 2349 } 2350 2351 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */ 2352 if (header[1] != TLS_1_2_VERSION_MINOR || 2353 header[2] != TLS_1_2_VERSION_MAJOR) { 2354 ret = -EINVAL; 2355 goto read_failure; 2356 } 2357 2358 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE, 2359 TCP_SKB_CB(skb)->seq + strp->stm.offset); 2360 return data_len + TLS_HEADER_SIZE; 2361 2362 read_failure: 2363 tls_err_abort(strp->sk, ret); 2364 2365 return ret; 2366 } 2367 2368 void tls_rx_msg_ready(struct tls_strparser *strp) 2369 { 2370 struct tls_sw_context_rx *ctx; 2371 2372 ctx = container_of(strp, struct tls_sw_context_rx, strp); 2373 ctx->saved_data_ready(strp->sk); 2374 } 2375 2376 static void tls_data_ready(struct sock *sk) 2377 { 2378 struct tls_context *tls_ctx = tls_get_ctx(sk); 2379 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2380 struct sk_psock *psock; 2381 gfp_t alloc_save; 2382 2383 trace_sk_data_ready(sk); 2384 2385 alloc_save = sk->sk_allocation; 2386 sk->sk_allocation = GFP_ATOMIC; 2387 tls_strp_data_ready(&ctx->strp); 2388 sk->sk_allocation = alloc_save; 2389 2390 psock = sk_psock_get(sk); 2391 if (psock) { 2392 if (!list_empty(&psock->ingress_msg)) 2393 ctx->saved_data_ready(sk); 2394 sk_psock_put(sk, psock); 2395 } 2396 } 2397 2398 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx) 2399 { 2400 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2401 2402 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask); 2403 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask); 2404 cancel_delayed_work_sync(&ctx->tx_work.work); 2405 } 2406 2407 void tls_sw_release_resources_tx(struct sock *sk) 2408 { 2409 struct tls_context *tls_ctx = tls_get_ctx(sk); 2410 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2411 struct tls_rec *rec, *tmp; 2412 2413 /* Wait for any pending async encryptions to complete */ 2414 tls_encrypt_async_wait(ctx); 2415 2416 tls_tx_records(sk, -1); 2417 2418 /* Free up un-sent records in tx_list. First, free 2419 * the partially sent record if any at head of tx_list. 2420 */ 2421 if (tls_ctx->partially_sent_record) { 2422 tls_free_partial_record(sk, tls_ctx); 2423 rec = list_first_entry(&ctx->tx_list, 2424 struct tls_rec, list); 2425 list_del(&rec->list); 2426 sk_msg_free(sk, &rec->msg_plaintext); 2427 kfree(rec); 2428 } 2429 2430 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 2431 list_del(&rec->list); 2432 sk_msg_free(sk, &rec->msg_encrypted); 2433 sk_msg_free(sk, &rec->msg_plaintext); 2434 kfree(rec); 2435 } 2436 2437 crypto_free_aead(ctx->aead_send); 2438 tls_free_open_rec(sk); 2439 } 2440 2441 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx) 2442 { 2443 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2444 2445 kfree(ctx); 2446 } 2447 2448 void tls_sw_release_resources_rx(struct sock *sk) 2449 { 2450 struct tls_context *tls_ctx = tls_get_ctx(sk); 2451 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2452 2453 kfree(tls_ctx->rx.rec_seq); 2454 kfree(tls_ctx->rx.iv); 2455 2456 if (ctx->aead_recv) { 2457 __skb_queue_purge(&ctx->rx_list); 2458 crypto_free_aead(ctx->aead_recv); 2459 tls_strp_stop(&ctx->strp); 2460 /* If tls_sw_strparser_arm() was not called (cleanup paths) 2461 * we still want to tls_strp_stop(), but sk->sk_data_ready was 2462 * never swapped. 2463 */ 2464 if (ctx->saved_data_ready) { 2465 write_lock_bh(&sk->sk_callback_lock); 2466 sk->sk_data_ready = ctx->saved_data_ready; 2467 write_unlock_bh(&sk->sk_callback_lock); 2468 } 2469 } 2470 } 2471 2472 void tls_sw_strparser_done(struct tls_context *tls_ctx) 2473 { 2474 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2475 2476 tls_strp_done(&ctx->strp); 2477 } 2478 2479 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx) 2480 { 2481 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2482 2483 kfree(ctx); 2484 } 2485 2486 void tls_sw_free_resources_rx(struct sock *sk) 2487 { 2488 struct tls_context *tls_ctx = tls_get_ctx(sk); 2489 2490 tls_sw_release_resources_rx(sk); 2491 tls_sw_free_ctx_rx(tls_ctx); 2492 } 2493 2494 /* The work handler to transmitt the encrypted records in tx_list */ 2495 static void tx_work_handler(struct work_struct *work) 2496 { 2497 struct delayed_work *delayed_work = to_delayed_work(work); 2498 struct tx_work *tx_work = container_of(delayed_work, 2499 struct tx_work, work); 2500 struct sock *sk = tx_work->sk; 2501 struct tls_context *tls_ctx = tls_get_ctx(sk); 2502 struct tls_sw_context_tx *ctx; 2503 2504 if (unlikely(!tls_ctx)) 2505 return; 2506 2507 ctx = tls_sw_ctx_tx(tls_ctx); 2508 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask)) 2509 return; 2510 2511 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2512 return; 2513 2514 if (mutex_trylock(&tls_ctx->tx_lock)) { 2515 lock_sock(sk); 2516 tls_tx_records(sk, -1); 2517 release_sock(sk); 2518 mutex_unlock(&tls_ctx->tx_lock); 2519 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 2520 /* Someone is holding the tx_lock, they will likely run Tx 2521 * and cancel the work on their way out of the lock section. 2522 * Schedule a long delay just in case. 2523 */ 2524 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); 2525 } 2526 } 2527 2528 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx) 2529 { 2530 struct tls_rec *rec; 2531 2532 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list); 2533 if (!rec) 2534 return false; 2535 2536 return READ_ONCE(rec->tx_ready); 2537 } 2538 2539 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) 2540 { 2541 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 2542 2543 /* Schedule the transmission if tx list is ready */ 2544 if (tls_is_tx_ready(tx_ctx) && 2545 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 2546 schedule_delayed_work(&tx_ctx->tx_work.work, 0); 2547 } 2548 2549 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) 2550 { 2551 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2552 2553 write_lock_bh(&sk->sk_callback_lock); 2554 rx_ctx->saved_data_ready = sk->sk_data_ready; 2555 sk->sk_data_ready = tls_data_ready; 2556 write_unlock_bh(&sk->sk_callback_lock); 2557 } 2558 2559 void tls_update_rx_zc_capable(struct tls_context *tls_ctx) 2560 { 2561 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2562 2563 rx_ctx->zc_capable = tls_ctx->rx_no_pad || 2564 tls_ctx->prot_info.version != TLS_1_3_VERSION; 2565 } 2566 2567 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk) 2568 { 2569 struct tls_sw_context_tx *sw_ctx_tx; 2570 2571 if (!ctx->priv_ctx_tx) { 2572 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); 2573 if (!sw_ctx_tx) 2574 return NULL; 2575 } else { 2576 sw_ctx_tx = ctx->priv_ctx_tx; 2577 } 2578 2579 crypto_init_wait(&sw_ctx_tx->async_wait); 2580 atomic_set(&sw_ctx_tx->encrypt_pending, 1); 2581 INIT_LIST_HEAD(&sw_ctx_tx->tx_list); 2582 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); 2583 sw_ctx_tx->tx_work.sk = sk; 2584 2585 return sw_ctx_tx; 2586 } 2587 2588 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) 2589 { 2590 struct tls_sw_context_rx *sw_ctx_rx; 2591 2592 if (!ctx->priv_ctx_rx) { 2593 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); 2594 if (!sw_ctx_rx) 2595 return NULL; 2596 } else { 2597 sw_ctx_rx = ctx->priv_ctx_rx; 2598 } 2599 2600 crypto_init_wait(&sw_ctx_rx->async_wait); 2601 atomic_set(&sw_ctx_rx->decrypt_pending, 1); 2602 init_waitqueue_head(&sw_ctx_rx->wq); 2603 skb_queue_head_init(&sw_ctx_rx->rx_list); 2604 skb_queue_head_init(&sw_ctx_rx->async_hold); 2605 2606 return sw_ctx_rx; 2607 } 2608 2609 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 2610 { 2611 struct tls_context *tls_ctx = tls_get_ctx(sk); 2612 struct tls_prot_info *prot = &tls_ctx->prot_info; 2613 struct tls_crypto_info *crypto_info; 2614 struct tls_sw_context_tx *sw_ctx_tx = NULL; 2615 struct tls_sw_context_rx *sw_ctx_rx = NULL; 2616 struct cipher_context *cctx; 2617 struct crypto_aead **aead; 2618 struct crypto_tfm *tfm; 2619 char *iv, *rec_seq, *key, *salt; 2620 const struct tls_cipher_desc *cipher_desc; 2621 u16 nonce_size; 2622 int rc = 0; 2623 2624 if (!ctx) { 2625 rc = -EINVAL; 2626 goto out; 2627 } 2628 2629 if (tx) { 2630 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk); 2631 if (!ctx->priv_ctx_tx) 2632 return -ENOMEM; 2633 2634 sw_ctx_tx = ctx->priv_ctx_tx; 2635 crypto_info = &ctx->crypto_send.info; 2636 cctx = &ctx->tx; 2637 aead = &sw_ctx_tx->aead_send; 2638 } else { 2639 ctx->priv_ctx_rx = init_ctx_rx(ctx); 2640 if (!ctx->priv_ctx_rx) 2641 return -ENOMEM; 2642 2643 sw_ctx_rx = ctx->priv_ctx_rx; 2644 crypto_info = &ctx->crypto_recv.info; 2645 cctx = &ctx->rx; 2646 aead = &sw_ctx_rx->aead_recv; 2647 } 2648 2649 cipher_desc = get_cipher_desc(crypto_info->cipher_type); 2650 if (!cipher_desc) { 2651 rc = -EINVAL; 2652 goto free_priv; 2653 } 2654 2655 nonce_size = cipher_desc->nonce; 2656 2657 iv = crypto_info_iv(crypto_info, cipher_desc); 2658 key = crypto_info_key(crypto_info, cipher_desc); 2659 salt = crypto_info_salt(crypto_info, cipher_desc); 2660 rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc); 2661 2662 if (crypto_info->version == TLS_1_3_VERSION) { 2663 nonce_size = 0; 2664 prot->aad_size = TLS_HEADER_SIZE; 2665 prot->tail_size = 1; 2666 } else { 2667 prot->aad_size = TLS_AAD_SPACE_SIZE; 2668 prot->tail_size = 0; 2669 } 2670 2671 /* Sanity-check the sizes for stack allocations. */ 2672 if (nonce_size > MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE) { 2673 rc = -EINVAL; 2674 goto free_priv; 2675 } 2676 2677 prot->version = crypto_info->version; 2678 prot->cipher_type = crypto_info->cipher_type; 2679 prot->prepend_size = TLS_HEADER_SIZE + nonce_size; 2680 prot->tag_size = cipher_desc->tag; 2681 prot->overhead_size = prot->prepend_size + 2682 prot->tag_size + prot->tail_size; 2683 prot->iv_size = cipher_desc->iv; 2684 prot->salt_size = cipher_desc->salt; 2685 cctx->iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL); 2686 if (!cctx->iv) { 2687 rc = -ENOMEM; 2688 goto free_priv; 2689 } 2690 /* Note: 128 & 256 bit salt are the same size */ 2691 prot->rec_seq_size = cipher_desc->rec_seq; 2692 memcpy(cctx->iv, salt, cipher_desc->salt); 2693 memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv); 2694 2695 cctx->rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL); 2696 if (!cctx->rec_seq) { 2697 rc = -ENOMEM; 2698 goto free_iv; 2699 } 2700 2701 if (!*aead) { 2702 *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0); 2703 if (IS_ERR(*aead)) { 2704 rc = PTR_ERR(*aead); 2705 *aead = NULL; 2706 goto free_rec_seq; 2707 } 2708 } 2709 2710 ctx->push_pending_record = tls_sw_push_pending_record; 2711 2712 rc = crypto_aead_setkey(*aead, key, cipher_desc->key); 2713 if (rc) 2714 goto free_aead; 2715 2716 rc = crypto_aead_setauthsize(*aead, prot->tag_size); 2717 if (rc) 2718 goto free_aead; 2719 2720 if (sw_ctx_rx) { 2721 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv); 2722 2723 tls_update_rx_zc_capable(ctx); 2724 sw_ctx_rx->async_capable = 2725 crypto_info->version != TLS_1_3_VERSION && 2726 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC); 2727 2728 rc = tls_strp_init(&sw_ctx_rx->strp, sk); 2729 if (rc) 2730 goto free_aead; 2731 } 2732 2733 goto out; 2734 2735 free_aead: 2736 crypto_free_aead(*aead); 2737 *aead = NULL; 2738 free_rec_seq: 2739 kfree(cctx->rec_seq); 2740 cctx->rec_seq = NULL; 2741 free_iv: 2742 kfree(cctx->iv); 2743 cctx->iv = NULL; 2744 free_priv: 2745 if (tx) { 2746 kfree(ctx->priv_ctx_tx); 2747 ctx->priv_ctx_tx = NULL; 2748 } else { 2749 kfree(ctx->priv_ctx_rx); 2750 ctx->priv_ctx_rx = NULL; 2751 } 2752 out: 2753 return rc; 2754 } 2755