1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <linux/sched/signal.h> 39 #include <linux/module.h> 40 #include <crypto/aead.h> 41 42 #include <net/strparser.h> 43 #include <net/tls.h> 44 45 #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE 46 47 static int __skb_nsg(struct sk_buff *skb, int offset, int len, 48 unsigned int recursion_level) 49 { 50 int start = skb_headlen(skb); 51 int i, chunk = start - offset; 52 struct sk_buff *frag_iter; 53 int elt = 0; 54 55 if (unlikely(recursion_level >= 24)) 56 return -EMSGSIZE; 57 58 if (chunk > 0) { 59 if (chunk > len) 60 chunk = len; 61 elt++; 62 len -= chunk; 63 if (len == 0) 64 return elt; 65 offset += chunk; 66 } 67 68 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 69 int end; 70 71 WARN_ON(start > offset + len); 72 73 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 74 chunk = end - offset; 75 if (chunk > 0) { 76 if (chunk > len) 77 chunk = len; 78 elt++; 79 len -= chunk; 80 if (len == 0) 81 return elt; 82 offset += chunk; 83 } 84 start = end; 85 } 86 87 if (unlikely(skb_has_frag_list(skb))) { 88 skb_walk_frags(skb, frag_iter) { 89 int end, ret; 90 91 WARN_ON(start > offset + len); 92 93 end = start + frag_iter->len; 94 chunk = end - offset; 95 if (chunk > 0) { 96 if (chunk > len) 97 chunk = len; 98 ret = __skb_nsg(frag_iter, offset - start, chunk, 99 recursion_level + 1); 100 if (unlikely(ret < 0)) 101 return ret; 102 elt += ret; 103 len -= chunk; 104 if (len == 0) 105 return elt; 106 offset += chunk; 107 } 108 start = end; 109 } 110 } 111 BUG_ON(len); 112 return elt; 113 } 114 115 /* Return the number of scatterlist elements required to completely map the 116 * skb, or -EMSGSIZE if the recursion depth is exceeded. 117 */ 118 static int skb_nsg(struct sk_buff *skb, int offset, int len) 119 { 120 return __skb_nsg(skb, offset, len, 0); 121 } 122 123 static void tls_decrypt_done(struct crypto_async_request *req, int err) 124 { 125 struct aead_request *aead_req = (struct aead_request *)req; 126 struct scatterlist *sgout = aead_req->dst; 127 struct tls_sw_context_rx *ctx; 128 struct tls_context *tls_ctx; 129 struct scatterlist *sg; 130 struct sk_buff *skb; 131 unsigned int pages; 132 int pending; 133 134 skb = (struct sk_buff *)req->data; 135 tls_ctx = tls_get_ctx(skb->sk); 136 ctx = tls_sw_ctx_rx(tls_ctx); 137 pending = atomic_dec_return(&ctx->decrypt_pending); 138 139 /* Propagate if there was an err */ 140 if (err) { 141 ctx->async_wait.err = err; 142 tls_err_abort(skb->sk, err); 143 } 144 145 /* After using skb->sk to propagate sk through crypto async callback 146 * we need to NULL it again. 147 */ 148 skb->sk = NULL; 149 150 /* Release the skb, pages and memory allocated for crypto req */ 151 kfree_skb(skb); 152 153 /* Skip the first S/G entry as it points to AAD */ 154 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 155 if (!sg) 156 break; 157 put_page(sg_page(sg)); 158 } 159 160 kfree(aead_req); 161 162 if (!pending && READ_ONCE(ctx->async_notify)) 163 complete(&ctx->async_wait.completion); 164 } 165 166 static int tls_do_decryption(struct sock *sk, 167 struct sk_buff *skb, 168 struct scatterlist *sgin, 169 struct scatterlist *sgout, 170 char *iv_recv, 171 size_t data_len, 172 struct aead_request *aead_req, 173 bool async) 174 { 175 struct tls_context *tls_ctx = tls_get_ctx(sk); 176 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 177 int ret; 178 179 aead_request_set_tfm(aead_req, ctx->aead_recv); 180 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); 181 aead_request_set_crypt(aead_req, sgin, sgout, 182 data_len + tls_ctx->rx.tag_size, 183 (u8 *)iv_recv); 184 185 if (async) { 186 /* Using skb->sk to push sk through to crypto async callback 187 * handler. This allows propagating errors up to the socket 188 * if needed. It _must_ be cleared in the async handler 189 * before kfree_skb is called. We _know_ skb->sk is NULL 190 * because it is a clone from strparser. 191 */ 192 skb->sk = sk; 193 aead_request_set_callback(aead_req, 194 CRYPTO_TFM_REQ_MAY_BACKLOG, 195 tls_decrypt_done, skb); 196 atomic_inc(&ctx->decrypt_pending); 197 } else { 198 aead_request_set_callback(aead_req, 199 CRYPTO_TFM_REQ_MAY_BACKLOG, 200 crypto_req_done, &ctx->async_wait); 201 } 202 203 ret = crypto_aead_decrypt(aead_req); 204 if (ret == -EINPROGRESS) { 205 if (async) 206 return ret; 207 208 ret = crypto_wait_req(ret, &ctx->async_wait); 209 } 210 211 if (async) 212 atomic_dec(&ctx->decrypt_pending); 213 214 return ret; 215 } 216 217 static void tls_trim_both_msgs(struct sock *sk, int target_size) 218 { 219 struct tls_context *tls_ctx = tls_get_ctx(sk); 220 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 221 struct tls_rec *rec = ctx->open_rec; 222 223 sk_msg_trim(sk, &rec->msg_plaintext, target_size); 224 if (target_size > 0) 225 target_size += tls_ctx->tx.overhead_size; 226 sk_msg_trim(sk, &rec->msg_encrypted, target_size); 227 } 228 229 static int tls_alloc_encrypted_msg(struct sock *sk, int len) 230 { 231 struct tls_context *tls_ctx = tls_get_ctx(sk); 232 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 233 struct tls_rec *rec = ctx->open_rec; 234 struct sk_msg *msg_en = &rec->msg_encrypted; 235 236 return sk_msg_alloc(sk, msg_en, len, 0); 237 } 238 239 static int tls_clone_plaintext_msg(struct sock *sk, int required) 240 { 241 struct tls_context *tls_ctx = tls_get_ctx(sk); 242 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 243 struct tls_rec *rec = ctx->open_rec; 244 struct sk_msg *msg_pl = &rec->msg_plaintext; 245 struct sk_msg *msg_en = &rec->msg_encrypted; 246 int skip, len; 247 248 /* We add page references worth len bytes from encrypted sg 249 * at the end of plaintext sg. It is guaranteed that msg_en 250 * has enough required room (ensured by caller). 251 */ 252 len = required - msg_pl->sg.size; 253 254 /* Skip initial bytes in msg_en's data to be able to use 255 * same offset of both plain and encrypted data. 256 */ 257 skip = tls_ctx->tx.prepend_size + msg_pl->sg.size; 258 259 return sk_msg_clone(sk, msg_pl, msg_en, skip, len); 260 } 261 262 static struct tls_rec *tls_get_rec(struct sock *sk) 263 { 264 struct tls_context *tls_ctx = tls_get_ctx(sk); 265 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 266 struct sk_msg *msg_pl, *msg_en; 267 struct tls_rec *rec; 268 int mem_size; 269 270 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send); 271 272 rec = kzalloc(mem_size, sk->sk_allocation); 273 if (!rec) 274 return NULL; 275 276 msg_pl = &rec->msg_plaintext; 277 msg_en = &rec->msg_encrypted; 278 279 sk_msg_init(msg_pl); 280 sk_msg_init(msg_en); 281 282 sg_init_table(rec->sg_aead_in, 2); 283 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, 284 sizeof(rec->aad_space)); 285 sg_unmark_end(&rec->sg_aead_in[1]); 286 287 sg_init_table(rec->sg_aead_out, 2); 288 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, 289 sizeof(rec->aad_space)); 290 sg_unmark_end(&rec->sg_aead_out[1]); 291 292 return rec; 293 } 294 295 static void tls_free_rec(struct sock *sk, struct tls_rec *rec) 296 { 297 sk_msg_free(sk, &rec->msg_encrypted); 298 sk_msg_free(sk, &rec->msg_plaintext); 299 kfree(rec); 300 } 301 302 static void tls_free_open_rec(struct sock *sk) 303 { 304 struct tls_context *tls_ctx = tls_get_ctx(sk); 305 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 306 struct tls_rec *rec = ctx->open_rec; 307 308 if (rec) { 309 tls_free_rec(sk, rec); 310 ctx->open_rec = NULL; 311 } 312 } 313 314 int tls_tx_records(struct sock *sk, int flags) 315 { 316 struct tls_context *tls_ctx = tls_get_ctx(sk); 317 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 318 struct tls_rec *rec, *tmp; 319 struct sk_msg *msg_en; 320 int tx_flags, rc = 0; 321 322 if (tls_is_partially_sent_record(tls_ctx)) { 323 rec = list_first_entry(&ctx->tx_list, 324 struct tls_rec, list); 325 326 if (flags == -1) 327 tx_flags = rec->tx_flags; 328 else 329 tx_flags = flags; 330 331 rc = tls_push_partial_record(sk, tls_ctx, tx_flags); 332 if (rc) 333 goto tx_err; 334 335 /* Full record has been transmitted. 336 * Remove the head of tx_list 337 */ 338 list_del(&rec->list); 339 sk_msg_free(sk, &rec->msg_plaintext); 340 kfree(rec); 341 } 342 343 /* Tx all ready records */ 344 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 345 if (READ_ONCE(rec->tx_ready)) { 346 if (flags == -1) 347 tx_flags = rec->tx_flags; 348 else 349 tx_flags = flags; 350 351 msg_en = &rec->msg_encrypted; 352 rc = tls_push_sg(sk, tls_ctx, 353 &msg_en->sg.data[msg_en->sg.curr], 354 0, tx_flags); 355 if (rc) 356 goto tx_err; 357 358 list_del(&rec->list); 359 sk_msg_free(sk, &rec->msg_plaintext); 360 kfree(rec); 361 } else { 362 break; 363 } 364 } 365 366 tx_err: 367 if (rc < 0 && rc != -EAGAIN) 368 tls_err_abort(sk, EBADMSG); 369 370 return rc; 371 } 372 373 static void tls_encrypt_done(struct crypto_async_request *req, int err) 374 { 375 struct aead_request *aead_req = (struct aead_request *)req; 376 struct sock *sk = req->data; 377 struct tls_context *tls_ctx = tls_get_ctx(sk); 378 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 379 struct scatterlist *sge; 380 struct sk_msg *msg_en; 381 struct tls_rec *rec; 382 bool ready = false; 383 int pending; 384 385 rec = container_of(aead_req, struct tls_rec, aead_req); 386 msg_en = &rec->msg_encrypted; 387 388 sge = sk_msg_elem(msg_en, msg_en->sg.curr); 389 sge->offset -= tls_ctx->tx.prepend_size; 390 sge->length += tls_ctx->tx.prepend_size; 391 392 /* Check if error is previously set on socket */ 393 if (err || sk->sk_err) { 394 rec = NULL; 395 396 /* If err is already set on socket, return the same code */ 397 if (sk->sk_err) { 398 ctx->async_wait.err = sk->sk_err; 399 } else { 400 ctx->async_wait.err = err; 401 tls_err_abort(sk, err); 402 } 403 } 404 405 if (rec) { 406 struct tls_rec *first_rec; 407 408 /* Mark the record as ready for transmission */ 409 smp_store_mb(rec->tx_ready, true); 410 411 /* If received record is at head of tx_list, schedule tx */ 412 first_rec = list_first_entry(&ctx->tx_list, 413 struct tls_rec, list); 414 if (rec == first_rec) 415 ready = true; 416 } 417 418 pending = atomic_dec_return(&ctx->encrypt_pending); 419 420 if (!pending && READ_ONCE(ctx->async_notify)) 421 complete(&ctx->async_wait.completion); 422 423 if (!ready) 424 return; 425 426 /* Schedule the transmission */ 427 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 428 schedule_delayed_work(&ctx->tx_work.work, 1); 429 } 430 431 static int tls_do_encryption(struct sock *sk, 432 struct tls_context *tls_ctx, 433 struct tls_sw_context_tx *ctx, 434 struct aead_request *aead_req, 435 size_t data_len, u32 start) 436 { 437 struct tls_rec *rec = ctx->open_rec; 438 struct sk_msg *msg_en = &rec->msg_encrypted; 439 struct scatterlist *sge = sk_msg_elem(msg_en, start); 440 int rc; 441 442 memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data)); 443 444 sge->offset += tls_ctx->tx.prepend_size; 445 sge->length -= tls_ctx->tx.prepend_size; 446 447 msg_en->sg.curr = start; 448 449 aead_request_set_tfm(aead_req, ctx->aead_send); 450 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); 451 aead_request_set_crypt(aead_req, rec->sg_aead_in, 452 rec->sg_aead_out, 453 data_len, rec->iv_data); 454 455 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 456 tls_encrypt_done, sk); 457 458 /* Add the record in tx_list */ 459 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); 460 atomic_inc(&ctx->encrypt_pending); 461 462 rc = crypto_aead_encrypt(aead_req); 463 if (!rc || rc != -EINPROGRESS) { 464 atomic_dec(&ctx->encrypt_pending); 465 sge->offset -= tls_ctx->tx.prepend_size; 466 sge->length += tls_ctx->tx.prepend_size; 467 } 468 469 if (!rc) { 470 WRITE_ONCE(rec->tx_ready, true); 471 } else if (rc != -EINPROGRESS) { 472 list_del(&rec->list); 473 return rc; 474 } 475 476 /* Unhook the record from context if encryption is not failure */ 477 ctx->open_rec = NULL; 478 tls_advance_record_sn(sk, &tls_ctx->tx); 479 return rc; 480 } 481 482 static int tls_split_open_record(struct sock *sk, struct tls_rec *from, 483 struct tls_rec **to, struct sk_msg *msg_opl, 484 struct sk_msg *msg_oen, u32 split_point, 485 u32 tx_overhead_size, u32 *orig_end) 486 { 487 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes; 488 struct scatterlist *sge, *osge, *nsge; 489 u32 orig_size = msg_opl->sg.size; 490 struct scatterlist tmp = { }; 491 struct sk_msg *msg_npl; 492 struct tls_rec *new; 493 int ret; 494 495 new = tls_get_rec(sk); 496 if (!new) 497 return -ENOMEM; 498 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size + 499 tx_overhead_size, 0); 500 if (ret < 0) { 501 tls_free_rec(sk, new); 502 return ret; 503 } 504 505 *orig_end = msg_opl->sg.end; 506 i = msg_opl->sg.start; 507 sge = sk_msg_elem(msg_opl, i); 508 while (apply && sge->length) { 509 if (sge->length > apply) { 510 u32 len = sge->length - apply; 511 512 get_page(sg_page(sge)); 513 sg_set_page(&tmp, sg_page(sge), len, 514 sge->offset + apply); 515 sge->length = apply; 516 bytes += apply; 517 apply = 0; 518 } else { 519 apply -= sge->length; 520 bytes += sge->length; 521 } 522 523 sk_msg_iter_var_next(i); 524 if (i == msg_opl->sg.end) 525 break; 526 sge = sk_msg_elem(msg_opl, i); 527 } 528 529 msg_opl->sg.end = i; 530 msg_opl->sg.curr = i; 531 msg_opl->sg.copybreak = 0; 532 msg_opl->apply_bytes = 0; 533 msg_opl->sg.size = bytes; 534 535 msg_npl = &new->msg_plaintext; 536 msg_npl->apply_bytes = apply; 537 msg_npl->sg.size = orig_size - bytes; 538 539 j = msg_npl->sg.start; 540 nsge = sk_msg_elem(msg_npl, j); 541 if (tmp.length) { 542 memcpy(nsge, &tmp, sizeof(*nsge)); 543 sk_msg_iter_var_next(j); 544 nsge = sk_msg_elem(msg_npl, j); 545 } 546 547 osge = sk_msg_elem(msg_opl, i); 548 while (osge->length) { 549 memcpy(nsge, osge, sizeof(*nsge)); 550 sg_unmark_end(nsge); 551 sk_msg_iter_var_next(i); 552 sk_msg_iter_var_next(j); 553 if (i == *orig_end) 554 break; 555 osge = sk_msg_elem(msg_opl, i); 556 nsge = sk_msg_elem(msg_npl, j); 557 } 558 559 msg_npl->sg.end = j; 560 msg_npl->sg.curr = j; 561 msg_npl->sg.copybreak = 0; 562 563 *to = new; 564 return 0; 565 } 566 567 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to, 568 struct tls_rec *from, u32 orig_end) 569 { 570 struct sk_msg *msg_npl = &from->msg_plaintext; 571 struct sk_msg *msg_opl = &to->msg_plaintext; 572 struct scatterlist *osge, *nsge; 573 u32 i, j; 574 575 i = msg_opl->sg.end; 576 sk_msg_iter_var_prev(i); 577 j = msg_npl->sg.start; 578 579 osge = sk_msg_elem(msg_opl, i); 580 nsge = sk_msg_elem(msg_npl, j); 581 582 if (sg_page(osge) == sg_page(nsge) && 583 osge->offset + osge->length == nsge->offset) { 584 osge->length += nsge->length; 585 put_page(sg_page(nsge)); 586 } 587 588 msg_opl->sg.end = orig_end; 589 msg_opl->sg.curr = orig_end; 590 msg_opl->sg.copybreak = 0; 591 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size; 592 msg_opl->sg.size += msg_npl->sg.size; 593 594 sk_msg_free(sk, &to->msg_encrypted); 595 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted); 596 597 kfree(from); 598 } 599 600 static int tls_push_record(struct sock *sk, int flags, 601 unsigned char record_type) 602 { 603 struct tls_context *tls_ctx = tls_get_ctx(sk); 604 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 605 struct tls_rec *rec = ctx->open_rec, *tmp = NULL; 606 u32 i, split_point, uninitialized_var(orig_end); 607 struct sk_msg *msg_pl, *msg_en; 608 struct aead_request *req; 609 bool split; 610 int rc; 611 612 if (!rec) 613 return 0; 614 615 msg_pl = &rec->msg_plaintext; 616 msg_en = &rec->msg_encrypted; 617 618 split_point = msg_pl->apply_bytes; 619 split = split_point && split_point < msg_pl->sg.size; 620 if (split) { 621 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, 622 split_point, tls_ctx->tx.overhead_size, 623 &orig_end); 624 if (rc < 0) 625 return rc; 626 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 627 tls_ctx->tx.overhead_size); 628 } 629 630 rec->tx_flags = flags; 631 req = &rec->aead_req; 632 633 i = msg_pl->sg.end; 634 sk_msg_iter_var_prev(i); 635 sg_mark_end(sk_msg_elem(msg_pl, i)); 636 637 i = msg_pl->sg.start; 638 sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ? 639 &msg_en->sg.data[i] : &msg_pl->sg.data[i]); 640 641 i = msg_en->sg.end; 642 sk_msg_iter_var_prev(i); 643 sg_mark_end(sk_msg_elem(msg_en, i)); 644 645 i = msg_en->sg.start; 646 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); 647 648 tls_make_aad(rec->aad_space, msg_pl->sg.size, 649 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size, 650 record_type); 651 652 tls_fill_prepend(tls_ctx, 653 page_address(sg_page(&msg_en->sg.data[i])) + 654 msg_en->sg.data[i].offset, msg_pl->sg.size, 655 record_type); 656 657 tls_ctx->pending_open_record_frags = false; 658 659 rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i); 660 if (rc < 0) { 661 if (rc != -EINPROGRESS) { 662 tls_err_abort(sk, EBADMSG); 663 if (split) { 664 tls_ctx->pending_open_record_frags = true; 665 tls_merge_open_record(sk, rec, tmp, orig_end); 666 } 667 } 668 return rc; 669 } else if (split) { 670 msg_pl = &tmp->msg_plaintext; 671 msg_en = &tmp->msg_encrypted; 672 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 673 tls_ctx->tx.overhead_size); 674 tls_ctx->pending_open_record_frags = true; 675 ctx->open_rec = tmp; 676 } 677 678 return tls_tx_records(sk, flags); 679 } 680 681 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 682 bool full_record, u8 record_type, 683 size_t *copied, int flags) 684 { 685 struct tls_context *tls_ctx = tls_get_ctx(sk); 686 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 687 struct sk_msg msg_redir = { }; 688 struct sk_psock *psock; 689 struct sock *sk_redir; 690 struct tls_rec *rec; 691 bool enospc, policy; 692 int err = 0, send; 693 u32 delta = 0; 694 695 policy = !(flags & MSG_SENDPAGE_NOPOLICY); 696 psock = sk_psock_get(sk); 697 if (!psock || !policy) 698 return tls_push_record(sk, flags, record_type); 699 more_data: 700 enospc = sk_msg_full(msg); 701 if (psock->eval == __SK_NONE) { 702 delta = msg->sg.size; 703 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 704 if (delta < msg->sg.size) 705 delta -= msg->sg.size; 706 else 707 delta = 0; 708 } 709 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && 710 !enospc && !full_record) { 711 err = -ENOSPC; 712 goto out_err; 713 } 714 msg->cork_bytes = 0; 715 send = msg->sg.size; 716 if (msg->apply_bytes && msg->apply_bytes < send) 717 send = msg->apply_bytes; 718 719 switch (psock->eval) { 720 case __SK_PASS: 721 err = tls_push_record(sk, flags, record_type); 722 if (err < 0) { 723 *copied -= sk_msg_free(sk, msg); 724 tls_free_open_rec(sk); 725 goto out_err; 726 } 727 break; 728 case __SK_REDIRECT: 729 sk_redir = psock->sk_redir; 730 memcpy(&msg_redir, msg, sizeof(*msg)); 731 if (msg->apply_bytes < send) 732 msg->apply_bytes = 0; 733 else 734 msg->apply_bytes -= send; 735 sk_msg_return_zero(sk, msg, send); 736 msg->sg.size -= send; 737 release_sock(sk); 738 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags); 739 lock_sock(sk); 740 if (err < 0) { 741 *copied -= sk_msg_free_nocharge(sk, &msg_redir); 742 msg->sg.size = 0; 743 } 744 if (msg->sg.size == 0) 745 tls_free_open_rec(sk); 746 break; 747 case __SK_DROP: 748 default: 749 sk_msg_free_partial(sk, msg, send); 750 if (msg->apply_bytes < send) 751 msg->apply_bytes = 0; 752 else 753 msg->apply_bytes -= send; 754 if (msg->sg.size == 0) 755 tls_free_open_rec(sk); 756 *copied -= (send + delta); 757 err = -EACCES; 758 } 759 760 if (likely(!err)) { 761 bool reset_eval = !ctx->open_rec; 762 763 rec = ctx->open_rec; 764 if (rec) { 765 msg = &rec->msg_plaintext; 766 if (!msg->apply_bytes) 767 reset_eval = true; 768 } 769 if (reset_eval) { 770 psock->eval = __SK_NONE; 771 if (psock->sk_redir) { 772 sock_put(psock->sk_redir); 773 psock->sk_redir = NULL; 774 } 775 } 776 if (rec) 777 goto more_data; 778 } 779 out_err: 780 sk_psock_put(sk, psock); 781 return err; 782 } 783 784 static int tls_sw_push_pending_record(struct sock *sk, int flags) 785 { 786 struct tls_context *tls_ctx = tls_get_ctx(sk); 787 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 788 struct tls_rec *rec = ctx->open_rec; 789 struct sk_msg *msg_pl; 790 size_t copied; 791 792 if (!rec) 793 return 0; 794 795 msg_pl = &rec->msg_plaintext; 796 copied = msg_pl->sg.size; 797 if (!copied) 798 return 0; 799 800 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA, 801 &copied, flags); 802 } 803 804 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 805 { 806 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 807 struct tls_context *tls_ctx = tls_get_ctx(sk); 808 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 809 struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send); 810 bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; 811 unsigned char record_type = TLS_RECORD_TYPE_DATA; 812 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 813 bool eor = !(msg->msg_flags & MSG_MORE); 814 size_t try_to_copy, copied = 0; 815 struct sk_msg *msg_pl, *msg_en; 816 struct tls_rec *rec; 817 int required_size; 818 int num_async = 0; 819 bool full_record; 820 int record_room; 821 int num_zc = 0; 822 int orig_size; 823 int ret = 0; 824 825 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 826 return -ENOTSUPP; 827 828 lock_sock(sk); 829 830 /* Wait till there is any pending write on socket */ 831 if (unlikely(sk->sk_write_pending)) { 832 ret = wait_on_pending_writer(sk, &timeo); 833 if (unlikely(ret)) 834 goto send_end; 835 } 836 837 if (unlikely(msg->msg_controllen)) { 838 ret = tls_proccess_cmsg(sk, msg, &record_type); 839 if (ret) { 840 if (ret == -EINPROGRESS) 841 num_async++; 842 else if (ret != -EAGAIN) 843 goto send_end; 844 } 845 } 846 847 while (msg_data_left(msg)) { 848 if (sk->sk_err) { 849 ret = -sk->sk_err; 850 goto send_end; 851 } 852 853 if (ctx->open_rec) 854 rec = ctx->open_rec; 855 else 856 rec = ctx->open_rec = tls_get_rec(sk); 857 if (!rec) { 858 ret = -ENOMEM; 859 goto send_end; 860 } 861 862 msg_pl = &rec->msg_plaintext; 863 msg_en = &rec->msg_encrypted; 864 865 orig_size = msg_pl->sg.size; 866 full_record = false; 867 try_to_copy = msg_data_left(msg); 868 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 869 if (try_to_copy >= record_room) { 870 try_to_copy = record_room; 871 full_record = true; 872 } 873 874 required_size = msg_pl->sg.size + try_to_copy + 875 tls_ctx->tx.overhead_size; 876 877 if (!sk_stream_memory_free(sk)) 878 goto wait_for_sndbuf; 879 880 alloc_encrypted: 881 ret = tls_alloc_encrypted_msg(sk, required_size); 882 if (ret) { 883 if (ret != -ENOSPC) 884 goto wait_for_memory; 885 886 /* Adjust try_to_copy according to the amount that was 887 * actually allocated. The difference is due 888 * to max sg elements limit 889 */ 890 try_to_copy -= required_size - msg_en->sg.size; 891 full_record = true; 892 } 893 894 if (!is_kvec && (full_record || eor) && !async_capable) { 895 u32 first = msg_pl->sg.end; 896 897 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter, 898 msg_pl, try_to_copy); 899 if (ret) 900 goto fallback_to_reg_send; 901 902 rec->inplace_crypto = 0; 903 904 num_zc++; 905 copied += try_to_copy; 906 907 sk_msg_sg_copy_set(msg_pl, first); 908 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 909 record_type, &copied, 910 msg->msg_flags); 911 if (ret) { 912 if (ret == -EINPROGRESS) 913 num_async++; 914 else if (ret == -ENOMEM) 915 goto wait_for_memory; 916 else if (ret == -ENOSPC) 917 goto rollback_iter; 918 else if (ret != -EAGAIN) 919 goto send_end; 920 } 921 continue; 922 rollback_iter: 923 copied -= try_to_copy; 924 sk_msg_sg_copy_clear(msg_pl, first); 925 iov_iter_revert(&msg->msg_iter, 926 msg_pl->sg.size - orig_size); 927 fallback_to_reg_send: 928 sk_msg_trim(sk, msg_pl, orig_size); 929 } 930 931 required_size = msg_pl->sg.size + try_to_copy; 932 933 ret = tls_clone_plaintext_msg(sk, required_size); 934 if (ret) { 935 if (ret != -ENOSPC) 936 goto send_end; 937 938 /* Adjust try_to_copy according to the amount that was 939 * actually allocated. The difference is due 940 * to max sg elements limit 941 */ 942 try_to_copy -= required_size - msg_pl->sg.size; 943 full_record = true; 944 sk_msg_trim(sk, msg_en, msg_pl->sg.size + 945 tls_ctx->tx.overhead_size); 946 } 947 948 if (try_to_copy) { 949 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, 950 msg_pl, try_to_copy); 951 if (ret < 0) 952 goto trim_sgl; 953 } 954 955 /* Open records defined only if successfully copied, otherwise 956 * we would trim the sg but not reset the open record frags. 957 */ 958 tls_ctx->pending_open_record_frags = true; 959 copied += try_to_copy; 960 if (full_record || eor) { 961 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 962 record_type, &copied, 963 msg->msg_flags); 964 if (ret) { 965 if (ret == -EINPROGRESS) 966 num_async++; 967 else if (ret == -ENOMEM) 968 goto wait_for_memory; 969 else if (ret != -EAGAIN) { 970 if (ret == -ENOSPC) 971 ret = 0; 972 goto send_end; 973 } 974 } 975 } 976 977 continue; 978 979 wait_for_sndbuf: 980 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 981 wait_for_memory: 982 ret = sk_stream_wait_memory(sk, &timeo); 983 if (ret) { 984 trim_sgl: 985 tls_trim_both_msgs(sk, orig_size); 986 goto send_end; 987 } 988 989 if (msg_en->sg.size < required_size) 990 goto alloc_encrypted; 991 } 992 993 if (!num_async) { 994 goto send_end; 995 } else if (num_zc) { 996 /* Wait for pending encryptions to get completed */ 997 smp_store_mb(ctx->async_notify, true); 998 999 if (atomic_read(&ctx->encrypt_pending)) 1000 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1001 else 1002 reinit_completion(&ctx->async_wait.completion); 1003 1004 WRITE_ONCE(ctx->async_notify, false); 1005 1006 if (ctx->async_wait.err) { 1007 ret = ctx->async_wait.err; 1008 copied = 0; 1009 } 1010 } 1011 1012 /* Transmit if any encryptions have completed */ 1013 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1014 cancel_delayed_work(&ctx->tx_work.work); 1015 tls_tx_records(sk, msg->msg_flags); 1016 } 1017 1018 send_end: 1019 ret = sk_stream_error(sk, msg->msg_flags, ret); 1020 1021 release_sock(sk); 1022 return copied ? copied : ret; 1023 } 1024 1025 int tls_sw_do_sendpage(struct sock *sk, struct page *page, 1026 int offset, size_t size, int flags) 1027 { 1028 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1029 struct tls_context *tls_ctx = tls_get_ctx(sk); 1030 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1031 unsigned char record_type = TLS_RECORD_TYPE_DATA; 1032 struct sk_msg *msg_pl; 1033 struct tls_rec *rec; 1034 int num_async = 0; 1035 size_t copied = 0; 1036 bool full_record; 1037 int record_room; 1038 int ret = 0; 1039 bool eor; 1040 1041 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); 1042 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1043 1044 /* Wait till there is any pending write on socket */ 1045 if (unlikely(sk->sk_write_pending)) { 1046 ret = wait_on_pending_writer(sk, &timeo); 1047 if (unlikely(ret)) 1048 goto sendpage_end; 1049 } 1050 1051 /* Call the sk_stream functions to manage the sndbuf mem. */ 1052 while (size > 0) { 1053 size_t copy, required_size; 1054 1055 if (sk->sk_err) { 1056 ret = -sk->sk_err; 1057 goto sendpage_end; 1058 } 1059 1060 if (ctx->open_rec) 1061 rec = ctx->open_rec; 1062 else 1063 rec = ctx->open_rec = tls_get_rec(sk); 1064 if (!rec) { 1065 ret = -ENOMEM; 1066 goto sendpage_end; 1067 } 1068 1069 msg_pl = &rec->msg_plaintext; 1070 1071 full_record = false; 1072 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 1073 copied = 0; 1074 copy = size; 1075 if (copy >= record_room) { 1076 copy = record_room; 1077 full_record = true; 1078 } 1079 1080 required_size = msg_pl->sg.size + copy + 1081 tls_ctx->tx.overhead_size; 1082 1083 if (!sk_stream_memory_free(sk)) 1084 goto wait_for_sndbuf; 1085 alloc_payload: 1086 ret = tls_alloc_encrypted_msg(sk, required_size); 1087 if (ret) { 1088 if (ret != -ENOSPC) 1089 goto wait_for_memory; 1090 1091 /* Adjust copy according to the amount that was 1092 * actually allocated. The difference is due 1093 * to max sg elements limit 1094 */ 1095 copy -= required_size - msg_pl->sg.size; 1096 full_record = true; 1097 } 1098 1099 sk_msg_page_add(msg_pl, page, copy, offset); 1100 sk_mem_charge(sk, copy); 1101 1102 offset += copy; 1103 size -= copy; 1104 copied += copy; 1105 1106 tls_ctx->pending_open_record_frags = true; 1107 if (full_record || eor || sk_msg_full(msg_pl)) { 1108 rec->inplace_crypto = 0; 1109 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1110 record_type, &copied, flags); 1111 if (ret) { 1112 if (ret == -EINPROGRESS) 1113 num_async++; 1114 else if (ret == -ENOMEM) 1115 goto wait_for_memory; 1116 else if (ret != -EAGAIN) { 1117 if (ret == -ENOSPC) 1118 ret = 0; 1119 goto sendpage_end; 1120 } 1121 } 1122 } 1123 continue; 1124 wait_for_sndbuf: 1125 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1126 wait_for_memory: 1127 ret = sk_stream_wait_memory(sk, &timeo); 1128 if (ret) { 1129 tls_trim_both_msgs(sk, msg_pl->sg.size); 1130 goto sendpage_end; 1131 } 1132 1133 goto alloc_payload; 1134 } 1135 1136 if (num_async) { 1137 /* Transmit if any encryptions have completed */ 1138 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1139 cancel_delayed_work(&ctx->tx_work.work); 1140 tls_tx_records(sk, flags); 1141 } 1142 } 1143 sendpage_end: 1144 ret = sk_stream_error(sk, flags, ret); 1145 return copied ? copied : ret; 1146 } 1147 1148 int tls_sw_sendpage_locked(struct sock *sk, struct page *page, 1149 int offset, size_t size, int flags) 1150 { 1151 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1152 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) 1153 return -ENOTSUPP; 1154 1155 return tls_sw_do_sendpage(sk, page, offset, size, flags); 1156 } 1157 1158 int tls_sw_sendpage(struct sock *sk, struct page *page, 1159 int offset, size_t size, int flags) 1160 { 1161 int ret; 1162 1163 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1164 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) 1165 return -ENOTSUPP; 1166 1167 lock_sock(sk); 1168 ret = tls_sw_do_sendpage(sk, page, offset, size, flags); 1169 release_sock(sk); 1170 return ret; 1171 } 1172 1173 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock, 1174 int flags, long timeo, int *err) 1175 { 1176 struct tls_context *tls_ctx = tls_get_ctx(sk); 1177 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1178 struct sk_buff *skb; 1179 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1180 1181 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) { 1182 if (sk->sk_err) { 1183 *err = sock_error(sk); 1184 return NULL; 1185 } 1186 1187 if (sk->sk_shutdown & RCV_SHUTDOWN) 1188 return NULL; 1189 1190 if (sock_flag(sk, SOCK_DONE)) 1191 return NULL; 1192 1193 if ((flags & MSG_DONTWAIT) || !timeo) { 1194 *err = -EAGAIN; 1195 return NULL; 1196 } 1197 1198 add_wait_queue(sk_sleep(sk), &wait); 1199 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1200 sk_wait_event(sk, &timeo, 1201 ctx->recv_pkt != skb || 1202 !sk_psock_queue_empty(psock), 1203 &wait); 1204 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1205 remove_wait_queue(sk_sleep(sk), &wait); 1206 1207 /* Handle signals */ 1208 if (signal_pending(current)) { 1209 *err = sock_intr_errno(timeo); 1210 return NULL; 1211 } 1212 } 1213 1214 return skb; 1215 } 1216 1217 static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from, 1218 int length, int *pages_used, 1219 unsigned int *size_used, 1220 struct scatterlist *to, 1221 int to_max_pages) 1222 { 1223 int rc = 0, i = 0, num_elem = *pages_used, maxpages; 1224 struct page *pages[MAX_SKB_FRAGS]; 1225 unsigned int size = *size_used; 1226 ssize_t copied, use; 1227 size_t offset; 1228 1229 while (length > 0) { 1230 i = 0; 1231 maxpages = to_max_pages - num_elem; 1232 if (maxpages == 0) { 1233 rc = -EFAULT; 1234 goto out; 1235 } 1236 copied = iov_iter_get_pages(from, pages, 1237 length, 1238 maxpages, &offset); 1239 if (copied <= 0) { 1240 rc = -EFAULT; 1241 goto out; 1242 } 1243 1244 iov_iter_advance(from, copied); 1245 1246 length -= copied; 1247 size += copied; 1248 while (copied) { 1249 use = min_t(int, copied, PAGE_SIZE - offset); 1250 1251 sg_set_page(&to[num_elem], 1252 pages[i], use, offset); 1253 sg_unmark_end(&to[num_elem]); 1254 /* We do not uncharge memory from this API */ 1255 1256 offset = 0; 1257 copied -= use; 1258 1259 i++; 1260 num_elem++; 1261 } 1262 } 1263 /* Mark the end in the last sg entry if newly added */ 1264 if (num_elem > *pages_used) 1265 sg_mark_end(&to[num_elem - 1]); 1266 out: 1267 if (rc) 1268 iov_iter_revert(from, size - *size_used); 1269 *size_used = size; 1270 *pages_used = num_elem; 1271 1272 return rc; 1273 } 1274 1275 /* This function decrypts the input skb into either out_iov or in out_sg 1276 * or in skb buffers itself. The input parameter 'zc' indicates if 1277 * zero-copy mode needs to be tried or not. With zero-copy mode, either 1278 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are 1279 * NULL, then the decryption happens inside skb buffers itself, i.e. 1280 * zero-copy gets disabled and 'zc' is updated. 1281 */ 1282 1283 static int decrypt_internal(struct sock *sk, struct sk_buff *skb, 1284 struct iov_iter *out_iov, 1285 struct scatterlist *out_sg, 1286 int *chunk, bool *zc) 1287 { 1288 struct tls_context *tls_ctx = tls_get_ctx(sk); 1289 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1290 struct strp_msg *rxm = strp_msg(skb); 1291 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0; 1292 struct aead_request *aead_req; 1293 struct sk_buff *unused; 1294 u8 *aad, *iv, *mem = NULL; 1295 struct scatterlist *sgin = NULL; 1296 struct scatterlist *sgout = NULL; 1297 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size; 1298 1299 if (*zc && (out_iov || out_sg)) { 1300 if (out_iov) 1301 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1; 1302 else 1303 n_sgout = sg_nents(out_sg); 1304 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size, 1305 rxm->full_len - tls_ctx->rx.prepend_size); 1306 } else { 1307 n_sgout = 0; 1308 *zc = false; 1309 n_sgin = skb_cow_data(skb, 0, &unused); 1310 } 1311 1312 if (n_sgin < 1) 1313 return -EBADMSG; 1314 1315 /* Increment to accommodate AAD */ 1316 n_sgin = n_sgin + 1; 1317 1318 nsg = n_sgin + n_sgout; 1319 1320 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); 1321 mem_size = aead_size + (nsg * sizeof(struct scatterlist)); 1322 mem_size = mem_size + TLS_AAD_SPACE_SIZE; 1323 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv); 1324 1325 /* Allocate a single block of memory which contains 1326 * aead_req || sgin[] || sgout[] || aad || iv. 1327 * This order achieves correct alignment for aead_req, sgin, sgout. 1328 */ 1329 mem = kmalloc(mem_size, sk->sk_allocation); 1330 if (!mem) 1331 return -ENOMEM; 1332 1333 /* Segment the allocated memory */ 1334 aead_req = (struct aead_request *)mem; 1335 sgin = (struct scatterlist *)(mem + aead_size); 1336 sgout = sgin + n_sgin; 1337 aad = (u8 *)(sgout + n_sgout); 1338 iv = aad + TLS_AAD_SPACE_SIZE; 1339 1340 /* Prepare IV */ 1341 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 1342 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 1343 tls_ctx->rx.iv_size); 1344 if (err < 0) { 1345 kfree(mem); 1346 return err; 1347 } 1348 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 1349 1350 /* Prepare AAD */ 1351 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size, 1352 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size, 1353 ctx->control); 1354 1355 /* Prepare sgin */ 1356 sg_init_table(sgin, n_sgin); 1357 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE); 1358 err = skb_to_sgvec(skb, &sgin[1], 1359 rxm->offset + tls_ctx->rx.prepend_size, 1360 rxm->full_len - tls_ctx->rx.prepend_size); 1361 if (err < 0) { 1362 kfree(mem); 1363 return err; 1364 } 1365 1366 if (n_sgout) { 1367 if (out_iov) { 1368 sg_init_table(sgout, n_sgout); 1369 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE); 1370 1371 *chunk = 0; 1372 err = tls_setup_from_iter(sk, out_iov, data_len, 1373 &pages, chunk, &sgout[1], 1374 (n_sgout - 1)); 1375 if (err < 0) 1376 goto fallback_to_reg_recv; 1377 } else if (out_sg) { 1378 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); 1379 } else { 1380 goto fallback_to_reg_recv; 1381 } 1382 } else { 1383 fallback_to_reg_recv: 1384 sgout = sgin; 1385 pages = 0; 1386 *chunk = 0; 1387 *zc = false; 1388 } 1389 1390 /* Prepare and submit AEAD request */ 1391 err = tls_do_decryption(sk, skb, sgin, sgout, iv, 1392 data_len, aead_req, *zc); 1393 if (err == -EINPROGRESS) 1394 return err; 1395 1396 /* Release the pages in case iov was mapped to pages */ 1397 for (; pages > 0; pages--) 1398 put_page(sg_page(&sgout[pages])); 1399 1400 kfree(mem); 1401 return err; 1402 } 1403 1404 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, 1405 struct iov_iter *dest, int *chunk, bool *zc) 1406 { 1407 struct tls_context *tls_ctx = tls_get_ctx(sk); 1408 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1409 struct strp_msg *rxm = strp_msg(skb); 1410 int err = 0; 1411 1412 #ifdef CONFIG_TLS_DEVICE 1413 err = tls_device_decrypted(sk, skb); 1414 if (err < 0) 1415 return err; 1416 #endif 1417 if (!ctx->decrypted) { 1418 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc); 1419 if (err < 0) { 1420 if (err == -EINPROGRESS) 1421 tls_advance_record_sn(sk, &tls_ctx->rx); 1422 1423 return err; 1424 } 1425 } else { 1426 *zc = false; 1427 } 1428 1429 rxm->offset += tls_ctx->rx.prepend_size; 1430 rxm->full_len -= tls_ctx->rx.overhead_size; 1431 tls_advance_record_sn(sk, &tls_ctx->rx); 1432 ctx->decrypted = true; 1433 ctx->saved_data_ready(sk); 1434 1435 return err; 1436 } 1437 1438 int decrypt_skb(struct sock *sk, struct sk_buff *skb, 1439 struct scatterlist *sgout) 1440 { 1441 bool zc = true; 1442 int chunk; 1443 1444 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc); 1445 } 1446 1447 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb, 1448 unsigned int len) 1449 { 1450 struct tls_context *tls_ctx = tls_get_ctx(sk); 1451 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1452 1453 if (skb) { 1454 struct strp_msg *rxm = strp_msg(skb); 1455 1456 if (len < rxm->full_len) { 1457 rxm->offset += len; 1458 rxm->full_len -= len; 1459 return false; 1460 } 1461 kfree_skb(skb); 1462 } 1463 1464 /* Finished with message */ 1465 ctx->recv_pkt = NULL; 1466 __strp_unpause(&ctx->strp); 1467 1468 return true; 1469 } 1470 1471 int tls_sw_recvmsg(struct sock *sk, 1472 struct msghdr *msg, 1473 size_t len, 1474 int nonblock, 1475 int flags, 1476 int *addr_len) 1477 { 1478 struct tls_context *tls_ctx = tls_get_ctx(sk); 1479 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1480 struct sk_psock *psock; 1481 unsigned char control; 1482 struct strp_msg *rxm; 1483 struct sk_buff *skb; 1484 ssize_t copied = 0; 1485 bool cmsg = false; 1486 int target, err = 0; 1487 long timeo; 1488 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1489 int num_async = 0; 1490 1491 flags |= nonblock; 1492 1493 if (unlikely(flags & MSG_ERRQUEUE)) 1494 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 1495 1496 psock = sk_psock_get(sk); 1497 lock_sock(sk); 1498 1499 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1500 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1501 do { 1502 bool zc = false; 1503 bool async = false; 1504 int chunk = 0; 1505 1506 skb = tls_wait_data(sk, psock, flags, timeo, &err); 1507 if (!skb) { 1508 if (psock) { 1509 int ret = __tcp_bpf_recvmsg(sk, psock, 1510 msg, len, flags); 1511 1512 if (ret > 0) { 1513 copied += ret; 1514 len -= ret; 1515 continue; 1516 } 1517 } 1518 goto recv_end; 1519 } 1520 1521 rxm = strp_msg(skb); 1522 1523 if (!cmsg) { 1524 int cerr; 1525 1526 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 1527 sizeof(ctx->control), &ctx->control); 1528 cmsg = true; 1529 control = ctx->control; 1530 if (ctx->control != TLS_RECORD_TYPE_DATA) { 1531 if (cerr || msg->msg_flags & MSG_CTRUNC) { 1532 err = -EIO; 1533 goto recv_end; 1534 } 1535 } 1536 } else if (control != ctx->control) { 1537 goto recv_end; 1538 } 1539 1540 if (!ctx->decrypted) { 1541 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size; 1542 1543 if (!is_kvec && to_copy <= len && 1544 likely(!(flags & MSG_PEEK))) 1545 zc = true; 1546 1547 err = decrypt_skb_update(sk, skb, &msg->msg_iter, 1548 &chunk, &zc); 1549 if (err < 0 && err != -EINPROGRESS) { 1550 tls_err_abort(sk, EBADMSG); 1551 goto recv_end; 1552 } 1553 1554 if (err == -EINPROGRESS) { 1555 async = true; 1556 num_async++; 1557 goto pick_next_record; 1558 } 1559 1560 ctx->decrypted = true; 1561 } 1562 1563 if (!zc) { 1564 chunk = min_t(unsigned int, rxm->full_len, len); 1565 1566 err = skb_copy_datagram_msg(skb, rxm->offset, msg, 1567 chunk); 1568 if (err < 0) 1569 goto recv_end; 1570 } 1571 1572 pick_next_record: 1573 copied += chunk; 1574 len -= chunk; 1575 if (likely(!(flags & MSG_PEEK))) { 1576 u8 control = ctx->control; 1577 1578 /* For async, drop current skb reference */ 1579 if (async) 1580 skb = NULL; 1581 1582 if (tls_sw_advance_skb(sk, skb, chunk)) { 1583 /* Return full control message to 1584 * userspace before trying to parse 1585 * another message type 1586 */ 1587 msg->msg_flags |= MSG_EOR; 1588 if (control != TLS_RECORD_TYPE_DATA) 1589 goto recv_end; 1590 } else { 1591 break; 1592 } 1593 } else { 1594 /* MSG_PEEK right now cannot look beyond current skb 1595 * from strparser, meaning we cannot advance skb here 1596 * and thus unpause strparser since we'd loose original 1597 * one. 1598 */ 1599 break; 1600 } 1601 1602 /* If we have a new message from strparser, continue now. */ 1603 if (copied >= target && !ctx->recv_pkt) 1604 break; 1605 } while (len); 1606 1607 recv_end: 1608 if (num_async) { 1609 /* Wait for all previously submitted records to be decrypted */ 1610 smp_store_mb(ctx->async_notify, true); 1611 if (atomic_read(&ctx->decrypt_pending)) { 1612 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1613 if (err) { 1614 /* one of async decrypt failed */ 1615 tls_err_abort(sk, err); 1616 copied = 0; 1617 } 1618 } else { 1619 reinit_completion(&ctx->async_wait.completion); 1620 } 1621 WRITE_ONCE(ctx->async_notify, false); 1622 } 1623 1624 release_sock(sk); 1625 if (psock) 1626 sk_psock_put(sk, psock); 1627 return copied ? : err; 1628 } 1629 1630 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 1631 struct pipe_inode_info *pipe, 1632 size_t len, unsigned int flags) 1633 { 1634 struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 1635 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1636 struct strp_msg *rxm = NULL; 1637 struct sock *sk = sock->sk; 1638 struct sk_buff *skb; 1639 ssize_t copied = 0; 1640 int err = 0; 1641 long timeo; 1642 int chunk; 1643 bool zc = false; 1644 1645 lock_sock(sk); 1646 1647 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1648 1649 skb = tls_wait_data(sk, NULL, flags, timeo, &err); 1650 if (!skb) 1651 goto splice_read_end; 1652 1653 /* splice does not support reading control messages */ 1654 if (ctx->control != TLS_RECORD_TYPE_DATA) { 1655 err = -ENOTSUPP; 1656 goto splice_read_end; 1657 } 1658 1659 if (!ctx->decrypted) { 1660 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc); 1661 1662 if (err < 0) { 1663 tls_err_abort(sk, EBADMSG); 1664 goto splice_read_end; 1665 } 1666 ctx->decrypted = true; 1667 } 1668 rxm = strp_msg(skb); 1669 1670 chunk = min_t(unsigned int, rxm->full_len, len); 1671 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); 1672 if (copied < 0) 1673 goto splice_read_end; 1674 1675 if (likely(!(flags & MSG_PEEK))) 1676 tls_sw_advance_skb(sk, skb, copied); 1677 1678 splice_read_end: 1679 release_sock(sk); 1680 return copied ? : err; 1681 } 1682 1683 bool tls_sw_stream_read(const struct sock *sk) 1684 { 1685 struct tls_context *tls_ctx = tls_get_ctx(sk); 1686 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1687 bool ingress_empty = true; 1688 struct sk_psock *psock; 1689 1690 rcu_read_lock(); 1691 psock = sk_psock(sk); 1692 if (psock) 1693 ingress_empty = list_empty(&psock->ingress_msg); 1694 rcu_read_unlock(); 1695 1696 return !ingress_empty || ctx->recv_pkt; 1697 } 1698 1699 static int tls_read_size(struct strparser *strp, struct sk_buff *skb) 1700 { 1701 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 1702 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1703 char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; 1704 struct strp_msg *rxm = strp_msg(skb); 1705 size_t cipher_overhead; 1706 size_t data_len = 0; 1707 int ret; 1708 1709 /* Verify that we have a full TLS header, or wait for more data */ 1710 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len) 1711 return 0; 1712 1713 /* Sanity-check size of on-stack buffer. */ 1714 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) { 1715 ret = -EINVAL; 1716 goto read_failure; 1717 } 1718 1719 /* Linearize header to local buffer */ 1720 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size); 1721 1722 if (ret < 0) 1723 goto read_failure; 1724 1725 ctx->control = header[0]; 1726 1727 data_len = ((header[4] & 0xFF) | (header[3] << 8)); 1728 1729 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size; 1730 1731 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) { 1732 ret = -EMSGSIZE; 1733 goto read_failure; 1734 } 1735 if (data_len < cipher_overhead) { 1736 ret = -EBADMSG; 1737 goto read_failure; 1738 } 1739 1740 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) || 1741 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) { 1742 ret = -EINVAL; 1743 goto read_failure; 1744 } 1745 1746 #ifdef CONFIG_TLS_DEVICE 1747 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset, 1748 *(u64*)tls_ctx->rx.rec_seq); 1749 #endif 1750 return data_len + TLS_HEADER_SIZE; 1751 1752 read_failure: 1753 tls_err_abort(strp->sk, ret); 1754 1755 return ret; 1756 } 1757 1758 static void tls_queue(struct strparser *strp, struct sk_buff *skb) 1759 { 1760 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 1761 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1762 1763 ctx->decrypted = false; 1764 1765 ctx->recv_pkt = skb; 1766 strp_pause(strp); 1767 1768 ctx->saved_data_ready(strp->sk); 1769 } 1770 1771 static void tls_data_ready(struct sock *sk) 1772 { 1773 struct tls_context *tls_ctx = tls_get_ctx(sk); 1774 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1775 struct sk_psock *psock; 1776 1777 strp_data_ready(&ctx->strp); 1778 1779 psock = sk_psock_get(sk); 1780 if (psock && !list_empty(&psock->ingress_msg)) { 1781 ctx->saved_data_ready(sk); 1782 sk_psock_put(sk, psock); 1783 } 1784 } 1785 1786 void tls_sw_free_resources_tx(struct sock *sk) 1787 { 1788 struct tls_context *tls_ctx = tls_get_ctx(sk); 1789 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1790 struct tls_rec *rec, *tmp; 1791 1792 /* Wait for any pending async encryptions to complete */ 1793 smp_store_mb(ctx->async_notify, true); 1794 if (atomic_read(&ctx->encrypt_pending)) 1795 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1796 1797 release_sock(sk); 1798 cancel_delayed_work_sync(&ctx->tx_work.work); 1799 lock_sock(sk); 1800 1801 /* Tx whatever records we can transmit and abandon the rest */ 1802 tls_tx_records(sk, -1); 1803 1804 /* Free up un-sent records in tx_list. First, free 1805 * the partially sent record if any at head of tx_list. 1806 */ 1807 if (tls_ctx->partially_sent_record) { 1808 struct scatterlist *sg = tls_ctx->partially_sent_record; 1809 1810 while (1) { 1811 put_page(sg_page(sg)); 1812 sk_mem_uncharge(sk, sg->length); 1813 1814 if (sg_is_last(sg)) 1815 break; 1816 sg++; 1817 } 1818 1819 tls_ctx->partially_sent_record = NULL; 1820 1821 rec = list_first_entry(&ctx->tx_list, 1822 struct tls_rec, list); 1823 list_del(&rec->list); 1824 sk_msg_free(sk, &rec->msg_plaintext); 1825 kfree(rec); 1826 } 1827 1828 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 1829 list_del(&rec->list); 1830 sk_msg_free(sk, &rec->msg_encrypted); 1831 sk_msg_free(sk, &rec->msg_plaintext); 1832 kfree(rec); 1833 } 1834 1835 crypto_free_aead(ctx->aead_send); 1836 tls_free_open_rec(sk); 1837 1838 kfree(ctx); 1839 } 1840 1841 void tls_sw_release_resources_rx(struct sock *sk) 1842 { 1843 struct tls_context *tls_ctx = tls_get_ctx(sk); 1844 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1845 1846 if (ctx->aead_recv) { 1847 kfree_skb(ctx->recv_pkt); 1848 ctx->recv_pkt = NULL; 1849 crypto_free_aead(ctx->aead_recv); 1850 strp_stop(&ctx->strp); 1851 write_lock_bh(&sk->sk_callback_lock); 1852 sk->sk_data_ready = ctx->saved_data_ready; 1853 write_unlock_bh(&sk->sk_callback_lock); 1854 release_sock(sk); 1855 strp_done(&ctx->strp); 1856 lock_sock(sk); 1857 } 1858 } 1859 1860 void tls_sw_free_resources_rx(struct sock *sk) 1861 { 1862 struct tls_context *tls_ctx = tls_get_ctx(sk); 1863 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1864 1865 tls_sw_release_resources_rx(sk); 1866 1867 kfree(ctx); 1868 } 1869 1870 /* The work handler to transmitt the encrypted records in tx_list */ 1871 static void tx_work_handler(struct work_struct *work) 1872 { 1873 struct delayed_work *delayed_work = to_delayed_work(work); 1874 struct tx_work *tx_work = container_of(delayed_work, 1875 struct tx_work, work); 1876 struct sock *sk = tx_work->sk; 1877 struct tls_context *tls_ctx = tls_get_ctx(sk); 1878 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 1879 1880 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 1881 return; 1882 1883 lock_sock(sk); 1884 tls_tx_records(sk, -1); 1885 release_sock(sk); 1886 } 1887 1888 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 1889 { 1890 struct tls_crypto_info *crypto_info; 1891 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; 1892 struct tls_sw_context_tx *sw_ctx_tx = NULL; 1893 struct tls_sw_context_rx *sw_ctx_rx = NULL; 1894 struct cipher_context *cctx; 1895 struct crypto_aead **aead; 1896 struct strp_callbacks cb; 1897 u16 nonce_size, tag_size, iv_size, rec_seq_size; 1898 char *iv, *rec_seq; 1899 int rc = 0; 1900 1901 if (!ctx) { 1902 rc = -EINVAL; 1903 goto out; 1904 } 1905 1906 if (tx) { 1907 if (!ctx->priv_ctx_tx) { 1908 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); 1909 if (!sw_ctx_tx) { 1910 rc = -ENOMEM; 1911 goto out; 1912 } 1913 ctx->priv_ctx_tx = sw_ctx_tx; 1914 } else { 1915 sw_ctx_tx = 1916 (struct tls_sw_context_tx *)ctx->priv_ctx_tx; 1917 } 1918 } else { 1919 if (!ctx->priv_ctx_rx) { 1920 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); 1921 if (!sw_ctx_rx) { 1922 rc = -ENOMEM; 1923 goto out; 1924 } 1925 ctx->priv_ctx_rx = sw_ctx_rx; 1926 } else { 1927 sw_ctx_rx = 1928 (struct tls_sw_context_rx *)ctx->priv_ctx_rx; 1929 } 1930 } 1931 1932 if (tx) { 1933 crypto_init_wait(&sw_ctx_tx->async_wait); 1934 crypto_info = &ctx->crypto_send.info; 1935 cctx = &ctx->tx; 1936 aead = &sw_ctx_tx->aead_send; 1937 INIT_LIST_HEAD(&sw_ctx_tx->tx_list); 1938 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); 1939 sw_ctx_tx->tx_work.sk = sk; 1940 } else { 1941 crypto_init_wait(&sw_ctx_rx->async_wait); 1942 crypto_info = &ctx->crypto_recv.info; 1943 cctx = &ctx->rx; 1944 aead = &sw_ctx_rx->aead_recv; 1945 } 1946 1947 switch (crypto_info->cipher_type) { 1948 case TLS_CIPHER_AES_GCM_128: { 1949 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 1950 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; 1951 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 1952 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; 1953 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; 1954 rec_seq = 1955 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; 1956 gcm_128_info = 1957 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 1958 break; 1959 } 1960 default: 1961 rc = -EINVAL; 1962 goto free_priv; 1963 } 1964 1965 /* Sanity-check the IV size for stack allocations. */ 1966 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) { 1967 rc = -EINVAL; 1968 goto free_priv; 1969 } 1970 1971 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; 1972 cctx->tag_size = tag_size; 1973 cctx->overhead_size = cctx->prepend_size + cctx->tag_size; 1974 cctx->iv_size = iv_size; 1975 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, 1976 GFP_KERNEL); 1977 if (!cctx->iv) { 1978 rc = -ENOMEM; 1979 goto free_priv; 1980 } 1981 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); 1982 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); 1983 cctx->rec_seq_size = rec_seq_size; 1984 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); 1985 if (!cctx->rec_seq) { 1986 rc = -ENOMEM; 1987 goto free_iv; 1988 } 1989 1990 if (!*aead) { 1991 *aead = crypto_alloc_aead("gcm(aes)", 0, 0); 1992 if (IS_ERR(*aead)) { 1993 rc = PTR_ERR(*aead); 1994 *aead = NULL; 1995 goto free_rec_seq; 1996 } 1997 } 1998 1999 ctx->push_pending_record = tls_sw_push_pending_record; 2000 2001 rc = crypto_aead_setkey(*aead, gcm_128_info->key, 2002 TLS_CIPHER_AES_GCM_128_KEY_SIZE); 2003 if (rc) 2004 goto free_aead; 2005 2006 rc = crypto_aead_setauthsize(*aead, cctx->tag_size); 2007 if (rc) 2008 goto free_aead; 2009 2010 if (sw_ctx_rx) { 2011 /* Set up strparser */ 2012 memset(&cb, 0, sizeof(cb)); 2013 cb.rcv_msg = tls_queue; 2014 cb.parse_msg = tls_read_size; 2015 2016 strp_init(&sw_ctx_rx->strp, sk, &cb); 2017 2018 write_lock_bh(&sk->sk_callback_lock); 2019 sw_ctx_rx->saved_data_ready = sk->sk_data_ready; 2020 sk->sk_data_ready = tls_data_ready; 2021 write_unlock_bh(&sk->sk_callback_lock); 2022 2023 strp_check_rcv(&sw_ctx_rx->strp); 2024 } 2025 2026 goto out; 2027 2028 free_aead: 2029 crypto_free_aead(*aead); 2030 *aead = NULL; 2031 free_rec_seq: 2032 kfree(cctx->rec_seq); 2033 cctx->rec_seq = NULL; 2034 free_iv: 2035 kfree(cctx->iv); 2036 cctx->iv = NULL; 2037 free_priv: 2038 if (tx) { 2039 kfree(ctx->priv_ctx_tx); 2040 ctx->priv_ctx_tx = NULL; 2041 } else { 2042 kfree(ctx->priv_ctx_rx); 2043 ctx->priv_ctx_rx = NULL; 2044 } 2045 out: 2046 return rc; 2047 } 2048