13c4d7559SDave Watson /* 23c4d7559SDave Watson * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 33c4d7559SDave Watson * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 43c4d7559SDave Watson * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. 53c4d7559SDave Watson * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. 63c4d7559SDave Watson * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. 7d3b18ad3SJohn Fastabend * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io 83c4d7559SDave Watson * 93c4d7559SDave Watson * This software is available to you under a choice of one of two 103c4d7559SDave Watson * licenses. You may choose to be licensed under the terms of the GNU 113c4d7559SDave Watson * General Public License (GPL) Version 2, available from the file 123c4d7559SDave Watson * COPYING in the main directory of this source tree, or the 133c4d7559SDave Watson * OpenIB.org BSD license below: 143c4d7559SDave Watson * 153c4d7559SDave Watson * Redistribution and use in source and binary forms, with or 163c4d7559SDave Watson * without modification, are permitted provided that the following 173c4d7559SDave Watson * conditions are met: 183c4d7559SDave Watson * 193c4d7559SDave Watson * - Redistributions of source code must retain the above 203c4d7559SDave Watson * copyright notice, this list of conditions and the following 213c4d7559SDave Watson * disclaimer. 223c4d7559SDave Watson * 233c4d7559SDave Watson * - Redistributions in binary form must reproduce the above 243c4d7559SDave Watson * copyright notice, this list of conditions and the following 253c4d7559SDave Watson * disclaimer in the documentation and/or other materials 263c4d7559SDave Watson * provided with the distribution. 273c4d7559SDave Watson * 283c4d7559SDave Watson * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 293c4d7559SDave Watson * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 303c4d7559SDave Watson * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 313c4d7559SDave Watson * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 323c4d7559SDave Watson * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 333c4d7559SDave Watson * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 343c4d7559SDave Watson * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 353c4d7559SDave Watson * SOFTWARE. 363c4d7559SDave Watson */ 373c4d7559SDave Watson 38c46234ebSDave Watson #include <linux/sched/signal.h> 393c4d7559SDave Watson #include <linux/module.h> 403c4d7559SDave Watson #include <crypto/aead.h> 413c4d7559SDave Watson 42c46234ebSDave Watson #include <net/strparser.h> 433c4d7559SDave Watson #include <net/tls.h> 443c4d7559SDave Watson 450927f71dSDoron Roberts-Kedes static int __skb_nsg(struct sk_buff *skb, int offset, int len, 460927f71dSDoron Roberts-Kedes unsigned int recursion_level) 470927f71dSDoron Roberts-Kedes { 480927f71dSDoron Roberts-Kedes int start = skb_headlen(skb); 490927f71dSDoron Roberts-Kedes int i, chunk = start - offset; 500927f71dSDoron Roberts-Kedes struct sk_buff *frag_iter; 510927f71dSDoron Roberts-Kedes int elt = 0; 520927f71dSDoron Roberts-Kedes 530927f71dSDoron Roberts-Kedes if (unlikely(recursion_level >= 24)) 540927f71dSDoron Roberts-Kedes return -EMSGSIZE; 550927f71dSDoron Roberts-Kedes 560927f71dSDoron Roberts-Kedes if (chunk > 0) { 570927f71dSDoron Roberts-Kedes if (chunk > len) 580927f71dSDoron Roberts-Kedes chunk = len; 590927f71dSDoron Roberts-Kedes elt++; 600927f71dSDoron Roberts-Kedes len -= chunk; 610927f71dSDoron Roberts-Kedes if (len == 0) 620927f71dSDoron Roberts-Kedes return elt; 630927f71dSDoron Roberts-Kedes offset += chunk; 640927f71dSDoron Roberts-Kedes } 650927f71dSDoron Roberts-Kedes 660927f71dSDoron Roberts-Kedes for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 670927f71dSDoron Roberts-Kedes int end; 680927f71dSDoron Roberts-Kedes 690927f71dSDoron Roberts-Kedes WARN_ON(start > offset + len); 700927f71dSDoron Roberts-Kedes 710927f71dSDoron Roberts-Kedes end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 720927f71dSDoron Roberts-Kedes chunk = end - offset; 730927f71dSDoron Roberts-Kedes if (chunk > 0) { 740927f71dSDoron Roberts-Kedes if (chunk > len) 750927f71dSDoron Roberts-Kedes chunk = len; 760927f71dSDoron Roberts-Kedes elt++; 770927f71dSDoron Roberts-Kedes len -= chunk; 780927f71dSDoron Roberts-Kedes if (len == 0) 790927f71dSDoron Roberts-Kedes return elt; 800927f71dSDoron Roberts-Kedes offset += chunk; 810927f71dSDoron Roberts-Kedes } 820927f71dSDoron Roberts-Kedes start = end; 830927f71dSDoron Roberts-Kedes } 840927f71dSDoron Roberts-Kedes 850927f71dSDoron Roberts-Kedes if (unlikely(skb_has_frag_list(skb))) { 860927f71dSDoron Roberts-Kedes skb_walk_frags(skb, frag_iter) { 870927f71dSDoron Roberts-Kedes int end, ret; 880927f71dSDoron Roberts-Kedes 890927f71dSDoron Roberts-Kedes WARN_ON(start > offset + len); 900927f71dSDoron Roberts-Kedes 910927f71dSDoron Roberts-Kedes end = start + frag_iter->len; 920927f71dSDoron Roberts-Kedes chunk = end - offset; 930927f71dSDoron Roberts-Kedes if (chunk > 0) { 940927f71dSDoron Roberts-Kedes if (chunk > len) 950927f71dSDoron Roberts-Kedes chunk = len; 960927f71dSDoron Roberts-Kedes ret = __skb_nsg(frag_iter, offset - start, chunk, 970927f71dSDoron Roberts-Kedes recursion_level + 1); 980927f71dSDoron Roberts-Kedes if (unlikely(ret < 0)) 990927f71dSDoron Roberts-Kedes return ret; 1000927f71dSDoron Roberts-Kedes elt += ret; 1010927f71dSDoron Roberts-Kedes len -= chunk; 1020927f71dSDoron Roberts-Kedes if (len == 0) 1030927f71dSDoron Roberts-Kedes return elt; 1040927f71dSDoron Roberts-Kedes offset += chunk; 1050927f71dSDoron Roberts-Kedes } 1060927f71dSDoron Roberts-Kedes start = end; 1070927f71dSDoron Roberts-Kedes } 1080927f71dSDoron Roberts-Kedes } 1090927f71dSDoron Roberts-Kedes BUG_ON(len); 1100927f71dSDoron Roberts-Kedes return elt; 1110927f71dSDoron Roberts-Kedes } 1120927f71dSDoron Roberts-Kedes 1130927f71dSDoron Roberts-Kedes /* Return the number of scatterlist elements required to completely map the 1140927f71dSDoron Roberts-Kedes * skb, or -EMSGSIZE if the recursion depth is exceeded. 1150927f71dSDoron Roberts-Kedes */ 1160927f71dSDoron Roberts-Kedes static int skb_nsg(struct sk_buff *skb, int offset, int len) 1170927f71dSDoron Roberts-Kedes { 1180927f71dSDoron Roberts-Kedes return __skb_nsg(skb, offset, len, 0); 1190927f71dSDoron Roberts-Kedes } 1200927f71dSDoron Roberts-Kedes 121130b392cSDave Watson static int padding_length(struct tls_sw_context_rx *ctx, 122b53f4976SJakub Kicinski struct tls_prot_info *prot, struct sk_buff *skb) 123130b392cSDave Watson { 124130b392cSDave Watson struct strp_msg *rxm = strp_msg(skb); 125130b392cSDave Watson int sub = 0; 126130b392cSDave Watson 127130b392cSDave Watson /* Determine zero-padding length */ 128b53f4976SJakub Kicinski if (prot->version == TLS_1_3_VERSION) { 129130b392cSDave Watson char content_type = 0; 130130b392cSDave Watson int err; 131130b392cSDave Watson int back = 17; 132130b392cSDave Watson 133130b392cSDave Watson while (content_type == 0) { 134b53f4976SJakub Kicinski if (back > rxm->full_len - prot->prepend_size) 135130b392cSDave Watson return -EBADMSG; 136130b392cSDave Watson err = skb_copy_bits(skb, 137130b392cSDave Watson rxm->offset + rxm->full_len - back, 138130b392cSDave Watson &content_type, 1); 139b53f4976SJakub Kicinski if (err) 140b53f4976SJakub Kicinski return err; 141130b392cSDave Watson if (content_type) 142130b392cSDave Watson break; 143130b392cSDave Watson sub++; 144130b392cSDave Watson back++; 145130b392cSDave Watson } 146130b392cSDave Watson ctx->control = content_type; 147130b392cSDave Watson } 148130b392cSDave Watson return sub; 149130b392cSDave Watson } 150130b392cSDave Watson 15194524d8fSVakul Garg static void tls_decrypt_done(struct crypto_async_request *req, int err) 15294524d8fSVakul Garg { 15394524d8fSVakul Garg struct aead_request *aead_req = (struct aead_request *)req; 15494524d8fSVakul Garg struct scatterlist *sgout = aead_req->dst; 155692d7b5dSVakul Garg struct scatterlist *sgin = aead_req->src; 1567a3dd8c8SJohn Fastabend struct tls_sw_context_rx *ctx; 1577a3dd8c8SJohn Fastabend struct tls_context *tls_ctx; 1584509de14SVakul Garg struct tls_prot_info *prot; 15994524d8fSVakul Garg struct scatterlist *sg; 1607a3dd8c8SJohn Fastabend struct sk_buff *skb; 16194524d8fSVakul Garg unsigned int pages; 1627a3dd8c8SJohn Fastabend int pending; 1637a3dd8c8SJohn Fastabend 1647a3dd8c8SJohn Fastabend skb = (struct sk_buff *)req->data; 1657a3dd8c8SJohn Fastabend tls_ctx = tls_get_ctx(skb->sk); 1667a3dd8c8SJohn Fastabend ctx = tls_sw_ctx_rx(tls_ctx); 1674509de14SVakul Garg prot = &tls_ctx->prot_info; 16894524d8fSVakul Garg 16994524d8fSVakul Garg /* Propagate if there was an err */ 17094524d8fSVakul Garg if (err) { 1715c5ec668SJakub Kicinski if (err == -EBADMSG) 1725c5ec668SJakub Kicinski TLS_INC_STATS(sock_net(skb->sk), 1735c5ec668SJakub Kicinski LINUX_MIB_TLSDECRYPTERROR); 17494524d8fSVakul Garg ctx->async_wait.err = err; 1757a3dd8c8SJohn Fastabend tls_err_abort(skb->sk, err); 176692d7b5dSVakul Garg } else { 177692d7b5dSVakul Garg struct strp_msg *rxm = strp_msg(skb); 178b53f4976SJakub Kicinski int pad; 179b53f4976SJakub Kicinski 180b53f4976SJakub Kicinski pad = padding_length(ctx, prot, skb); 181b53f4976SJakub Kicinski if (pad < 0) { 182b53f4976SJakub Kicinski ctx->async_wait.err = pad; 183b53f4976SJakub Kicinski tls_err_abort(skb->sk, pad); 184b53f4976SJakub Kicinski } else { 185b53f4976SJakub Kicinski rxm->full_len -= pad; 1864509de14SVakul Garg rxm->offset += prot->prepend_size; 1874509de14SVakul Garg rxm->full_len -= prot->overhead_size; 18894524d8fSVakul Garg } 189b53f4976SJakub Kicinski } 19094524d8fSVakul Garg 1917a3dd8c8SJohn Fastabend /* After using skb->sk to propagate sk through crypto async callback 1927a3dd8c8SJohn Fastabend * we need to NULL it again. 1937a3dd8c8SJohn Fastabend */ 1947a3dd8c8SJohn Fastabend skb->sk = NULL; 1957a3dd8c8SJohn Fastabend 19694524d8fSVakul Garg 197692d7b5dSVakul Garg /* Free the destination pages if skb was not decrypted inplace */ 198692d7b5dSVakul Garg if (sgout != sgin) { 19994524d8fSVakul Garg /* Skip the first S/G entry as it points to AAD */ 20094524d8fSVakul Garg for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { 20194524d8fSVakul Garg if (!sg) 20294524d8fSVakul Garg break; 20394524d8fSVakul Garg put_page(sg_page(sg)); 20494524d8fSVakul Garg } 205692d7b5dSVakul Garg } 20694524d8fSVakul Garg 20794524d8fSVakul Garg kfree(aead_req); 20894524d8fSVakul Garg 2090cada332SVinay Kumar Yadav spin_lock_bh(&ctx->decrypt_compl_lock); 210692d7b5dSVakul Garg pending = atomic_dec_return(&ctx->decrypt_pending); 211692d7b5dSVakul Garg 2120cada332SVinay Kumar Yadav if (!pending && ctx->async_notify) 21394524d8fSVakul Garg complete(&ctx->async_wait.completion); 2140cada332SVinay Kumar Yadav spin_unlock_bh(&ctx->decrypt_compl_lock); 21594524d8fSVakul Garg } 21694524d8fSVakul Garg 217c46234ebSDave Watson static int tls_do_decryption(struct sock *sk, 21894524d8fSVakul Garg struct sk_buff *skb, 219c46234ebSDave Watson struct scatterlist *sgin, 220c46234ebSDave Watson struct scatterlist *sgout, 221c46234ebSDave Watson char *iv_recv, 222c46234ebSDave Watson size_t data_len, 22394524d8fSVakul Garg struct aead_request *aead_req, 22494524d8fSVakul Garg bool async) 225c46234ebSDave Watson { 226c46234ebSDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 2274509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 228f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 229c46234ebSDave Watson int ret; 230c46234ebSDave Watson 2310b243d00SVakul Garg aead_request_set_tfm(aead_req, ctx->aead_recv); 2324509de14SVakul Garg aead_request_set_ad(aead_req, prot->aad_size); 233c46234ebSDave Watson aead_request_set_crypt(aead_req, sgin, sgout, 2344509de14SVakul Garg data_len + prot->tag_size, 235c46234ebSDave Watson (u8 *)iv_recv); 236c46234ebSDave Watson 23794524d8fSVakul Garg if (async) { 2387a3dd8c8SJohn Fastabend /* Using skb->sk to push sk through to crypto async callback 2397a3dd8c8SJohn Fastabend * handler. This allows propagating errors up to the socket 2407a3dd8c8SJohn Fastabend * if needed. It _must_ be cleared in the async handler 241a88c26f6SVakul Garg * before consume_skb is called. We _know_ skb->sk is NULL 2427a3dd8c8SJohn Fastabend * because it is a clone from strparser. 2437a3dd8c8SJohn Fastabend */ 2447a3dd8c8SJohn Fastabend skb->sk = sk; 24594524d8fSVakul Garg aead_request_set_callback(aead_req, 24694524d8fSVakul Garg CRYPTO_TFM_REQ_MAY_BACKLOG, 24794524d8fSVakul Garg tls_decrypt_done, skb); 24894524d8fSVakul Garg atomic_inc(&ctx->decrypt_pending); 24994524d8fSVakul Garg } else { 25094524d8fSVakul Garg aead_request_set_callback(aead_req, 25194524d8fSVakul Garg CRYPTO_TFM_REQ_MAY_BACKLOG, 25294524d8fSVakul Garg crypto_req_done, &ctx->async_wait); 25394524d8fSVakul Garg } 25494524d8fSVakul Garg 25594524d8fSVakul Garg ret = crypto_aead_decrypt(aead_req); 25694524d8fSVakul Garg if (ret == -EINPROGRESS) { 25794524d8fSVakul Garg if (async) 25894524d8fSVakul Garg return ret; 25994524d8fSVakul Garg 26094524d8fSVakul Garg ret = crypto_wait_req(ret, &ctx->async_wait); 26194524d8fSVakul Garg } 26294524d8fSVakul Garg 26394524d8fSVakul Garg if (async) 26494524d8fSVakul Garg atomic_dec(&ctx->decrypt_pending); 26594524d8fSVakul Garg 266c46234ebSDave Watson return ret; 267c46234ebSDave Watson } 268c46234ebSDave Watson 269d829e9c4SDaniel Borkmann static void tls_trim_both_msgs(struct sock *sk, int target_size) 2703c4d7559SDave Watson { 2713c4d7559SDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 2724509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 273f66de3eeSBoris Pismenny struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 274a42055e8SVakul Garg struct tls_rec *rec = ctx->open_rec; 2753c4d7559SDave Watson 276d829e9c4SDaniel Borkmann sk_msg_trim(sk, &rec->msg_plaintext, target_size); 2773c4d7559SDave Watson if (target_size > 0) 2784509de14SVakul Garg target_size += prot->overhead_size; 279d829e9c4SDaniel Borkmann sk_msg_trim(sk, &rec->msg_encrypted, target_size); 2803c4d7559SDave Watson } 2813c4d7559SDave Watson 282d829e9c4SDaniel Borkmann static int tls_alloc_encrypted_msg(struct sock *sk, int len) 2833c4d7559SDave Watson { 2843c4d7559SDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 285f66de3eeSBoris Pismenny struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 286a42055e8SVakul Garg struct tls_rec *rec = ctx->open_rec; 287d829e9c4SDaniel Borkmann struct sk_msg *msg_en = &rec->msg_encrypted; 2883c4d7559SDave Watson 289d829e9c4SDaniel Borkmann return sk_msg_alloc(sk, msg_en, len, 0); 2903c4d7559SDave Watson } 2913c4d7559SDave Watson 292d829e9c4SDaniel Borkmann static int tls_clone_plaintext_msg(struct sock *sk, int required) 2933c4d7559SDave Watson { 2943c4d7559SDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 2954509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 296f66de3eeSBoris Pismenny struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 297a42055e8SVakul Garg struct tls_rec *rec = ctx->open_rec; 298d829e9c4SDaniel Borkmann struct sk_msg *msg_pl = &rec->msg_plaintext; 299d829e9c4SDaniel Borkmann struct sk_msg *msg_en = &rec->msg_encrypted; 3004e6d4720SVakul Garg int skip, len; 3013c4d7559SDave Watson 302d829e9c4SDaniel Borkmann /* We add page references worth len bytes from encrypted sg 303d829e9c4SDaniel Borkmann * at the end of plaintext sg. It is guaranteed that msg_en 3044e6d4720SVakul Garg * has enough required room (ensured by caller). 3054e6d4720SVakul Garg */ 306d829e9c4SDaniel Borkmann len = required - msg_pl->sg.size; 30752ea992cSVakul Garg 308d829e9c4SDaniel Borkmann /* Skip initial bytes in msg_en's data to be able to use 309d829e9c4SDaniel Borkmann * same offset of both plain and encrypted data. 3104e6d4720SVakul Garg */ 3114509de14SVakul Garg skip = prot->prepend_size + msg_pl->sg.size; 3124e6d4720SVakul Garg 313d829e9c4SDaniel Borkmann return sk_msg_clone(sk, msg_pl, msg_en, skip, len); 3143c4d7559SDave Watson } 3153c4d7559SDave Watson 316d3b18ad3SJohn Fastabend static struct tls_rec *tls_get_rec(struct sock *sk) 317d3b18ad3SJohn Fastabend { 318d3b18ad3SJohn Fastabend struct tls_context *tls_ctx = tls_get_ctx(sk); 3194509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 320d3b18ad3SJohn Fastabend struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 321d3b18ad3SJohn Fastabend struct sk_msg *msg_pl, *msg_en; 322d3b18ad3SJohn Fastabend struct tls_rec *rec; 323d3b18ad3SJohn Fastabend int mem_size; 324d3b18ad3SJohn Fastabend 325d3b18ad3SJohn Fastabend mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send); 326d3b18ad3SJohn Fastabend 327d3b18ad3SJohn Fastabend rec = kzalloc(mem_size, sk->sk_allocation); 328d3b18ad3SJohn Fastabend if (!rec) 329d3b18ad3SJohn Fastabend return NULL; 330d3b18ad3SJohn Fastabend 331d3b18ad3SJohn Fastabend msg_pl = &rec->msg_plaintext; 332d3b18ad3SJohn Fastabend msg_en = &rec->msg_encrypted; 333d3b18ad3SJohn Fastabend 334d3b18ad3SJohn Fastabend sk_msg_init(msg_pl); 335d3b18ad3SJohn Fastabend sk_msg_init(msg_en); 336d3b18ad3SJohn Fastabend 337d3b18ad3SJohn Fastabend sg_init_table(rec->sg_aead_in, 2); 3384509de14SVakul Garg sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size); 339d3b18ad3SJohn Fastabend sg_unmark_end(&rec->sg_aead_in[1]); 340d3b18ad3SJohn Fastabend 341d3b18ad3SJohn Fastabend sg_init_table(rec->sg_aead_out, 2); 3424509de14SVakul Garg sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size); 343d3b18ad3SJohn Fastabend sg_unmark_end(&rec->sg_aead_out[1]); 344d3b18ad3SJohn Fastabend 345d3b18ad3SJohn Fastabend return rec; 346d3b18ad3SJohn Fastabend } 347d3b18ad3SJohn Fastabend 348d3b18ad3SJohn Fastabend static void tls_free_rec(struct sock *sk, struct tls_rec *rec) 349d3b18ad3SJohn Fastabend { 350d3b18ad3SJohn Fastabend sk_msg_free(sk, &rec->msg_encrypted); 351d3b18ad3SJohn Fastabend sk_msg_free(sk, &rec->msg_plaintext); 352d3b18ad3SJohn Fastabend kfree(rec); 353d3b18ad3SJohn Fastabend } 354d3b18ad3SJohn Fastabend 355c774973eSVakul Garg static void tls_free_open_rec(struct sock *sk) 3563c4d7559SDave Watson { 3573c4d7559SDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 358f66de3eeSBoris Pismenny struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 359a42055e8SVakul Garg struct tls_rec *rec = ctx->open_rec; 3603c4d7559SDave Watson 361d3b18ad3SJohn Fastabend if (rec) { 362d3b18ad3SJohn Fastabend tls_free_rec(sk, rec); 363d3b18ad3SJohn Fastabend ctx->open_rec = NULL; 364d3b18ad3SJohn Fastabend } 3653c4d7559SDave Watson } 3663c4d7559SDave Watson 367a42055e8SVakul Garg int tls_tx_records(struct sock *sk, int flags) 368a42055e8SVakul Garg { 369a42055e8SVakul Garg struct tls_context *tls_ctx = tls_get_ctx(sk); 370a42055e8SVakul Garg struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 371a42055e8SVakul Garg struct tls_rec *rec, *tmp; 372d829e9c4SDaniel Borkmann struct sk_msg *msg_en; 373a42055e8SVakul Garg int tx_flags, rc = 0; 374a42055e8SVakul Garg 375a42055e8SVakul Garg if (tls_is_partially_sent_record(tls_ctx)) { 3769932a29aSVakul Garg rec = list_first_entry(&ctx->tx_list, 377a42055e8SVakul Garg struct tls_rec, list); 378a42055e8SVakul Garg 379a42055e8SVakul Garg if (flags == -1) 380a42055e8SVakul Garg tx_flags = rec->tx_flags; 381a42055e8SVakul Garg else 382a42055e8SVakul Garg tx_flags = flags; 383a42055e8SVakul Garg 384a42055e8SVakul Garg rc = tls_push_partial_record(sk, tls_ctx, tx_flags); 385a42055e8SVakul Garg if (rc) 386a42055e8SVakul Garg goto tx_err; 387a42055e8SVakul Garg 388a42055e8SVakul Garg /* Full record has been transmitted. 3899932a29aSVakul Garg * Remove the head of tx_list 390a42055e8SVakul Garg */ 391a42055e8SVakul Garg list_del(&rec->list); 392d829e9c4SDaniel Borkmann sk_msg_free(sk, &rec->msg_plaintext); 393a42055e8SVakul Garg kfree(rec); 394a42055e8SVakul Garg } 395a42055e8SVakul Garg 3969932a29aSVakul Garg /* Tx all ready records */ 3979932a29aSVakul Garg list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 3989932a29aSVakul Garg if (READ_ONCE(rec->tx_ready)) { 399a42055e8SVakul Garg if (flags == -1) 400a42055e8SVakul Garg tx_flags = rec->tx_flags; 401a42055e8SVakul Garg else 402a42055e8SVakul Garg tx_flags = flags; 403a42055e8SVakul Garg 404d829e9c4SDaniel Borkmann msg_en = &rec->msg_encrypted; 405a42055e8SVakul Garg rc = tls_push_sg(sk, tls_ctx, 406d829e9c4SDaniel Borkmann &msg_en->sg.data[msg_en->sg.curr], 407a42055e8SVakul Garg 0, tx_flags); 408a42055e8SVakul Garg if (rc) 409a42055e8SVakul Garg goto tx_err; 410a42055e8SVakul Garg 411a42055e8SVakul Garg list_del(&rec->list); 412d829e9c4SDaniel Borkmann sk_msg_free(sk, &rec->msg_plaintext); 413a42055e8SVakul Garg kfree(rec); 414a42055e8SVakul Garg } else { 415a42055e8SVakul Garg break; 416a42055e8SVakul Garg } 417a42055e8SVakul Garg } 418a42055e8SVakul Garg 419a42055e8SVakul Garg tx_err: 420a42055e8SVakul Garg if (rc < 0 && rc != -EAGAIN) 421a42055e8SVakul Garg tls_err_abort(sk, EBADMSG); 422a42055e8SVakul Garg 423a42055e8SVakul Garg return rc; 424a42055e8SVakul Garg } 425a42055e8SVakul Garg 426a42055e8SVakul Garg static void tls_encrypt_done(struct crypto_async_request *req, int err) 427a42055e8SVakul Garg { 428a42055e8SVakul Garg struct aead_request *aead_req = (struct aead_request *)req; 429a42055e8SVakul Garg struct sock *sk = req->data; 430a42055e8SVakul Garg struct tls_context *tls_ctx = tls_get_ctx(sk); 4314509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 432a42055e8SVakul Garg struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 433d829e9c4SDaniel Borkmann struct scatterlist *sge; 434d829e9c4SDaniel Borkmann struct sk_msg *msg_en; 435a42055e8SVakul Garg struct tls_rec *rec; 436a42055e8SVakul Garg bool ready = false; 437a42055e8SVakul Garg int pending; 438a42055e8SVakul Garg 439a42055e8SVakul Garg rec = container_of(aead_req, struct tls_rec, aead_req); 440d829e9c4SDaniel Borkmann msg_en = &rec->msg_encrypted; 441a42055e8SVakul Garg 442d829e9c4SDaniel Borkmann sge = sk_msg_elem(msg_en, msg_en->sg.curr); 4434509de14SVakul Garg sge->offset -= prot->prepend_size; 4444509de14SVakul Garg sge->length += prot->prepend_size; 445a42055e8SVakul Garg 44680ece6a0SVakul Garg /* Check if error is previously set on socket */ 447a42055e8SVakul Garg if (err || sk->sk_err) { 448a42055e8SVakul Garg rec = NULL; 449a42055e8SVakul Garg 450a42055e8SVakul Garg /* If err is already set on socket, return the same code */ 451a42055e8SVakul Garg if (sk->sk_err) { 452a42055e8SVakul Garg ctx->async_wait.err = sk->sk_err; 453a42055e8SVakul Garg } else { 454a42055e8SVakul Garg ctx->async_wait.err = err; 455a42055e8SVakul Garg tls_err_abort(sk, err); 456a42055e8SVakul Garg } 457a42055e8SVakul Garg } 458a42055e8SVakul Garg 4599932a29aSVakul Garg if (rec) { 4609932a29aSVakul Garg struct tls_rec *first_rec; 4619932a29aSVakul Garg 4629932a29aSVakul Garg /* Mark the record as ready for transmission */ 4639932a29aSVakul Garg smp_store_mb(rec->tx_ready, true); 4649932a29aSVakul Garg 4659932a29aSVakul Garg /* If received record is at head of tx_list, schedule tx */ 4669932a29aSVakul Garg first_rec = list_first_entry(&ctx->tx_list, 4679932a29aSVakul Garg struct tls_rec, list); 4689932a29aSVakul Garg if (rec == first_rec) 4699932a29aSVakul Garg ready = true; 4709932a29aSVakul Garg } 471a42055e8SVakul Garg 4720cada332SVinay Kumar Yadav spin_lock_bh(&ctx->encrypt_compl_lock); 473a42055e8SVakul Garg pending = atomic_dec_return(&ctx->encrypt_pending); 474a42055e8SVakul Garg 4750cada332SVinay Kumar Yadav if (!pending && ctx->async_notify) 476a42055e8SVakul Garg complete(&ctx->async_wait.completion); 4770cada332SVinay Kumar Yadav spin_unlock_bh(&ctx->encrypt_compl_lock); 478a42055e8SVakul Garg 479a42055e8SVakul Garg if (!ready) 480a42055e8SVakul Garg return; 481a42055e8SVakul Garg 482a42055e8SVakul Garg /* Schedule the transmission */ 483a42055e8SVakul Garg if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 484d829e9c4SDaniel Borkmann schedule_delayed_work(&ctx->tx_work.work, 1); 485a42055e8SVakul Garg } 486a42055e8SVakul Garg 487a42055e8SVakul Garg static int tls_do_encryption(struct sock *sk, 488a42055e8SVakul Garg struct tls_context *tls_ctx, 489a447da7dSDaniel Borkmann struct tls_sw_context_tx *ctx, 490a447da7dSDaniel Borkmann struct aead_request *aead_req, 491d829e9c4SDaniel Borkmann size_t data_len, u32 start) 4923c4d7559SDave Watson { 4934509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 494a42055e8SVakul Garg struct tls_rec *rec = ctx->open_rec; 495d829e9c4SDaniel Borkmann struct sk_msg *msg_en = &rec->msg_encrypted; 496d829e9c4SDaniel Borkmann struct scatterlist *sge = sk_msg_elem(msg_en, start); 497f295b3aeSVakul Garg int rc, iv_offset = 0; 4983c4d7559SDave Watson 499f295b3aeSVakul Garg /* For CCM based ciphers, first byte of IV is a constant */ 500f295b3aeSVakul Garg if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { 501f295b3aeSVakul Garg rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; 502f295b3aeSVakul Garg iv_offset = 1; 503f295b3aeSVakul Garg } 504f295b3aeSVakul Garg 505f295b3aeSVakul Garg memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, 506f295b3aeSVakul Garg prot->iv_size + prot->salt_size); 507f295b3aeSVakul Garg 508f295b3aeSVakul Garg xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq); 50932eb67b9SDave Watson 5104509de14SVakul Garg sge->offset += prot->prepend_size; 5114509de14SVakul Garg sge->length -= prot->prepend_size; 5123c4d7559SDave Watson 513d829e9c4SDaniel Borkmann msg_en->sg.curr = start; 5144e6d4720SVakul Garg 5153c4d7559SDave Watson aead_request_set_tfm(aead_req, ctx->aead_send); 5164509de14SVakul Garg aead_request_set_ad(aead_req, prot->aad_size); 517d829e9c4SDaniel Borkmann aead_request_set_crypt(aead_req, rec->sg_aead_in, 518d829e9c4SDaniel Borkmann rec->sg_aead_out, 51932eb67b9SDave Watson data_len, rec->iv_data); 520a54667f6SVakul Garg 521a54667f6SVakul Garg aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 522a42055e8SVakul Garg tls_encrypt_done, sk); 523a54667f6SVakul Garg 5249932a29aSVakul Garg /* Add the record in tx_list */ 5259932a29aSVakul Garg list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); 526a42055e8SVakul Garg atomic_inc(&ctx->encrypt_pending); 5273c4d7559SDave Watson 528a42055e8SVakul Garg rc = crypto_aead_encrypt(aead_req); 529a42055e8SVakul Garg if (!rc || rc != -EINPROGRESS) { 530a42055e8SVakul Garg atomic_dec(&ctx->encrypt_pending); 5314509de14SVakul Garg sge->offset -= prot->prepend_size; 5324509de14SVakul Garg sge->length += prot->prepend_size; 533a42055e8SVakul Garg } 5343c4d7559SDave Watson 5359932a29aSVakul Garg if (!rc) { 5369932a29aSVakul Garg WRITE_ONCE(rec->tx_ready, true); 5379932a29aSVakul Garg } else if (rc != -EINPROGRESS) { 5389932a29aSVakul Garg list_del(&rec->list); 539a42055e8SVakul Garg return rc; 5409932a29aSVakul Garg } 541a42055e8SVakul Garg 542a42055e8SVakul Garg /* Unhook the record from context if encryption is not failure */ 543a42055e8SVakul Garg ctx->open_rec = NULL; 544fb0f886fSJakub Kicinski tls_advance_record_sn(sk, prot, &tls_ctx->tx); 5453c4d7559SDave Watson return rc; 5463c4d7559SDave Watson } 5473c4d7559SDave Watson 548d3b18ad3SJohn Fastabend static int tls_split_open_record(struct sock *sk, struct tls_rec *from, 549d3b18ad3SJohn Fastabend struct tls_rec **to, struct sk_msg *msg_opl, 550d3b18ad3SJohn Fastabend struct sk_msg *msg_oen, u32 split_point, 551d3b18ad3SJohn Fastabend u32 tx_overhead_size, u32 *orig_end) 552d3b18ad3SJohn Fastabend { 553d3b18ad3SJohn Fastabend u32 i, j, bytes = 0, apply = msg_opl->apply_bytes; 554d3b18ad3SJohn Fastabend struct scatterlist *sge, *osge, *nsge; 555d3b18ad3SJohn Fastabend u32 orig_size = msg_opl->sg.size; 556d3b18ad3SJohn Fastabend struct scatterlist tmp = { }; 557d3b18ad3SJohn Fastabend struct sk_msg *msg_npl; 558d3b18ad3SJohn Fastabend struct tls_rec *new; 559d3b18ad3SJohn Fastabend int ret; 560d3b18ad3SJohn Fastabend 561d3b18ad3SJohn Fastabend new = tls_get_rec(sk); 562d3b18ad3SJohn Fastabend if (!new) 563d3b18ad3SJohn Fastabend return -ENOMEM; 564d3b18ad3SJohn Fastabend ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size + 565d3b18ad3SJohn Fastabend tx_overhead_size, 0); 566d3b18ad3SJohn Fastabend if (ret < 0) { 567d3b18ad3SJohn Fastabend tls_free_rec(sk, new); 568d3b18ad3SJohn Fastabend return ret; 569d3b18ad3SJohn Fastabend } 570d3b18ad3SJohn Fastabend 571d3b18ad3SJohn Fastabend *orig_end = msg_opl->sg.end; 572d3b18ad3SJohn Fastabend i = msg_opl->sg.start; 573d3b18ad3SJohn Fastabend sge = sk_msg_elem(msg_opl, i); 574d3b18ad3SJohn Fastabend while (apply && sge->length) { 575d3b18ad3SJohn Fastabend if (sge->length > apply) { 576d3b18ad3SJohn Fastabend u32 len = sge->length - apply; 577d3b18ad3SJohn Fastabend 578d3b18ad3SJohn Fastabend get_page(sg_page(sge)); 579d3b18ad3SJohn Fastabend sg_set_page(&tmp, sg_page(sge), len, 580d3b18ad3SJohn Fastabend sge->offset + apply); 581d3b18ad3SJohn Fastabend sge->length = apply; 582d3b18ad3SJohn Fastabend bytes += apply; 583d3b18ad3SJohn Fastabend apply = 0; 584d3b18ad3SJohn Fastabend } else { 585d3b18ad3SJohn Fastabend apply -= sge->length; 586d3b18ad3SJohn Fastabend bytes += sge->length; 587d3b18ad3SJohn Fastabend } 588d3b18ad3SJohn Fastabend 589d3b18ad3SJohn Fastabend sk_msg_iter_var_next(i); 590d3b18ad3SJohn Fastabend if (i == msg_opl->sg.end) 591d3b18ad3SJohn Fastabend break; 592d3b18ad3SJohn Fastabend sge = sk_msg_elem(msg_opl, i); 593d3b18ad3SJohn Fastabend } 594d3b18ad3SJohn Fastabend 595d3b18ad3SJohn Fastabend msg_opl->sg.end = i; 596d3b18ad3SJohn Fastabend msg_opl->sg.curr = i; 597d3b18ad3SJohn Fastabend msg_opl->sg.copybreak = 0; 598d3b18ad3SJohn Fastabend msg_opl->apply_bytes = 0; 599d3b18ad3SJohn Fastabend msg_opl->sg.size = bytes; 600d3b18ad3SJohn Fastabend 601d3b18ad3SJohn Fastabend msg_npl = &new->msg_plaintext; 602d3b18ad3SJohn Fastabend msg_npl->apply_bytes = apply; 603d3b18ad3SJohn Fastabend msg_npl->sg.size = orig_size - bytes; 604d3b18ad3SJohn Fastabend 605d3b18ad3SJohn Fastabend j = msg_npl->sg.start; 606d3b18ad3SJohn Fastabend nsge = sk_msg_elem(msg_npl, j); 607d3b18ad3SJohn Fastabend if (tmp.length) { 608d3b18ad3SJohn Fastabend memcpy(nsge, &tmp, sizeof(*nsge)); 609d3b18ad3SJohn Fastabend sk_msg_iter_var_next(j); 610d3b18ad3SJohn Fastabend nsge = sk_msg_elem(msg_npl, j); 611d3b18ad3SJohn Fastabend } 612d3b18ad3SJohn Fastabend 613d3b18ad3SJohn Fastabend osge = sk_msg_elem(msg_opl, i); 614d3b18ad3SJohn Fastabend while (osge->length) { 615d3b18ad3SJohn Fastabend memcpy(nsge, osge, sizeof(*nsge)); 616d3b18ad3SJohn Fastabend sg_unmark_end(nsge); 617d3b18ad3SJohn Fastabend sk_msg_iter_var_next(i); 618d3b18ad3SJohn Fastabend sk_msg_iter_var_next(j); 619d3b18ad3SJohn Fastabend if (i == *orig_end) 620d3b18ad3SJohn Fastabend break; 621d3b18ad3SJohn Fastabend osge = sk_msg_elem(msg_opl, i); 622d3b18ad3SJohn Fastabend nsge = sk_msg_elem(msg_npl, j); 623d3b18ad3SJohn Fastabend } 624d3b18ad3SJohn Fastabend 625d3b18ad3SJohn Fastabend msg_npl->sg.end = j; 626d3b18ad3SJohn Fastabend msg_npl->sg.curr = j; 627d3b18ad3SJohn Fastabend msg_npl->sg.copybreak = 0; 628d3b18ad3SJohn Fastabend 629d3b18ad3SJohn Fastabend *to = new; 630d3b18ad3SJohn Fastabend return 0; 631d3b18ad3SJohn Fastabend } 632d3b18ad3SJohn Fastabend 633d3b18ad3SJohn Fastabend static void tls_merge_open_record(struct sock *sk, struct tls_rec *to, 634d3b18ad3SJohn Fastabend struct tls_rec *from, u32 orig_end) 635d3b18ad3SJohn Fastabend { 636d3b18ad3SJohn Fastabend struct sk_msg *msg_npl = &from->msg_plaintext; 637d3b18ad3SJohn Fastabend struct sk_msg *msg_opl = &to->msg_plaintext; 638d3b18ad3SJohn Fastabend struct scatterlist *osge, *nsge; 639d3b18ad3SJohn Fastabend u32 i, j; 640d3b18ad3SJohn Fastabend 641d3b18ad3SJohn Fastabend i = msg_opl->sg.end; 642d3b18ad3SJohn Fastabend sk_msg_iter_var_prev(i); 643d3b18ad3SJohn Fastabend j = msg_npl->sg.start; 644d3b18ad3SJohn Fastabend 645d3b18ad3SJohn Fastabend osge = sk_msg_elem(msg_opl, i); 646d3b18ad3SJohn Fastabend nsge = sk_msg_elem(msg_npl, j); 647d3b18ad3SJohn Fastabend 648d3b18ad3SJohn Fastabend if (sg_page(osge) == sg_page(nsge) && 649d3b18ad3SJohn Fastabend osge->offset + osge->length == nsge->offset) { 650d3b18ad3SJohn Fastabend osge->length += nsge->length; 651d3b18ad3SJohn Fastabend put_page(sg_page(nsge)); 652d3b18ad3SJohn Fastabend } 653d3b18ad3SJohn Fastabend 654d3b18ad3SJohn Fastabend msg_opl->sg.end = orig_end; 655d3b18ad3SJohn Fastabend msg_opl->sg.curr = orig_end; 656d3b18ad3SJohn Fastabend msg_opl->sg.copybreak = 0; 657d3b18ad3SJohn Fastabend msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size; 658d3b18ad3SJohn Fastabend msg_opl->sg.size += msg_npl->sg.size; 659d3b18ad3SJohn Fastabend 660d3b18ad3SJohn Fastabend sk_msg_free(sk, &to->msg_encrypted); 661d3b18ad3SJohn Fastabend sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted); 662d3b18ad3SJohn Fastabend 663d3b18ad3SJohn Fastabend kfree(from); 664d3b18ad3SJohn Fastabend } 665d3b18ad3SJohn Fastabend 6663c4d7559SDave Watson static int tls_push_record(struct sock *sk, int flags, 6673c4d7559SDave Watson unsigned char record_type) 6683c4d7559SDave Watson { 6693c4d7559SDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 6704509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 671f66de3eeSBoris Pismenny struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 672d3b18ad3SJohn Fastabend struct tls_rec *rec = ctx->open_rec, *tmp = NULL; 6733f649ab7SKees Cook u32 i, split_point, orig_end; 674d829e9c4SDaniel Borkmann struct sk_msg *msg_pl, *msg_en; 675a447da7dSDaniel Borkmann struct aead_request *req; 676d3b18ad3SJohn Fastabend bool split; 6773c4d7559SDave Watson int rc; 6783c4d7559SDave Watson 679a42055e8SVakul Garg if (!rec) 680a42055e8SVakul Garg return 0; 681a447da7dSDaniel Borkmann 682d829e9c4SDaniel Borkmann msg_pl = &rec->msg_plaintext; 683d829e9c4SDaniel Borkmann msg_en = &rec->msg_encrypted; 684d829e9c4SDaniel Borkmann 685d3b18ad3SJohn Fastabend split_point = msg_pl->apply_bytes; 686d3b18ad3SJohn Fastabend split = split_point && split_point < msg_pl->sg.size; 687d468e477SJohn Fastabend if (unlikely((!split && 688d468e477SJohn Fastabend msg_pl->sg.size + 689d468e477SJohn Fastabend prot->overhead_size > msg_en->sg.size) || 690d468e477SJohn Fastabend (split && 691d468e477SJohn Fastabend split_point + 692d468e477SJohn Fastabend prot->overhead_size > msg_en->sg.size))) { 693d468e477SJohn Fastabend split = true; 694d468e477SJohn Fastabend split_point = msg_en->sg.size; 695d468e477SJohn Fastabend } 696d3b18ad3SJohn Fastabend if (split) { 697d3b18ad3SJohn Fastabend rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en, 6984509de14SVakul Garg split_point, prot->overhead_size, 699d3b18ad3SJohn Fastabend &orig_end); 700d3b18ad3SJohn Fastabend if (rc < 0) 701d3b18ad3SJohn Fastabend return rc; 702d468e477SJohn Fastabend /* This can happen if above tls_split_open_record allocates 703d468e477SJohn Fastabend * a single large encryption buffer instead of two smaller 704d468e477SJohn Fastabend * ones. In this case adjust pointers and continue without 705d468e477SJohn Fastabend * split. 706d468e477SJohn Fastabend */ 707d468e477SJohn Fastabend if (!msg_pl->sg.size) { 708d468e477SJohn Fastabend tls_merge_open_record(sk, rec, tmp, orig_end); 709d468e477SJohn Fastabend msg_pl = &rec->msg_plaintext; 710d468e477SJohn Fastabend msg_en = &rec->msg_encrypted; 711d468e477SJohn Fastabend split = false; 712d468e477SJohn Fastabend } 713d3b18ad3SJohn Fastabend sk_msg_trim(sk, msg_en, msg_pl->sg.size + 7144509de14SVakul Garg prot->overhead_size); 715d3b18ad3SJohn Fastabend } 716d3b18ad3SJohn Fastabend 717a42055e8SVakul Garg rec->tx_flags = flags; 718a42055e8SVakul Garg req = &rec->aead_req; 7193c4d7559SDave Watson 720d829e9c4SDaniel Borkmann i = msg_pl->sg.end; 721d829e9c4SDaniel Borkmann sk_msg_iter_var_prev(i); 722130b392cSDave Watson 723130b392cSDave Watson rec->content_type = record_type; 7244509de14SVakul Garg if (prot->version == TLS_1_3_VERSION) { 725130b392cSDave Watson /* Add content type to end of message. No padding added */ 726130b392cSDave Watson sg_set_buf(&rec->sg_content_type, &rec->content_type, 1); 727130b392cSDave Watson sg_mark_end(&rec->sg_content_type); 728130b392cSDave Watson sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1, 729130b392cSDave Watson &rec->sg_content_type); 730130b392cSDave Watson } else { 731d829e9c4SDaniel Borkmann sg_mark_end(sk_msg_elem(msg_pl, i)); 732130b392cSDave Watson } 733a42055e8SVakul Garg 7349aaaa568SJohn Fastabend if (msg_pl->sg.end < msg_pl->sg.start) { 7359aaaa568SJohn Fastabend sg_chain(&msg_pl->sg.data[msg_pl->sg.start], 7369aaaa568SJohn Fastabend MAX_SKB_FRAGS - msg_pl->sg.start + 1, 7379aaaa568SJohn Fastabend msg_pl->sg.data); 7389aaaa568SJohn Fastabend } 7399aaaa568SJohn Fastabend 740d829e9c4SDaniel Borkmann i = msg_pl->sg.start; 7419e5ffed3SJakub Kicinski sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]); 742d829e9c4SDaniel Borkmann 743d829e9c4SDaniel Borkmann i = msg_en->sg.end; 744d829e9c4SDaniel Borkmann sk_msg_iter_var_prev(i); 745d829e9c4SDaniel Borkmann sg_mark_end(sk_msg_elem(msg_en, i)); 746d829e9c4SDaniel Borkmann 747d829e9c4SDaniel Borkmann i = msg_en->sg.start; 748d829e9c4SDaniel Borkmann sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); 749d829e9c4SDaniel Borkmann 7504509de14SVakul Garg tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size, 7514509de14SVakul Garg tls_ctx->tx.rec_seq, prot->rec_seq_size, 7524509de14SVakul Garg record_type, prot->version); 7533c4d7559SDave Watson 7543c4d7559SDave Watson tls_fill_prepend(tls_ctx, 755d829e9c4SDaniel Borkmann page_address(sg_page(&msg_en->sg.data[i])) + 756130b392cSDave Watson msg_en->sg.data[i].offset, 7574509de14SVakul Garg msg_pl->sg.size + prot->tail_size, 7584509de14SVakul Garg record_type, prot->version); 7593c4d7559SDave Watson 760d829e9c4SDaniel Borkmann tls_ctx->pending_open_record_frags = false; 7613c4d7559SDave Watson 762130b392cSDave Watson rc = tls_do_encryption(sk, tls_ctx, ctx, req, 7634509de14SVakul Garg msg_pl->sg.size + prot->tail_size, i); 7643c4d7559SDave Watson if (rc < 0) { 765d3b18ad3SJohn Fastabend if (rc != -EINPROGRESS) { 766a42055e8SVakul Garg tls_err_abort(sk, EBADMSG); 767d3b18ad3SJohn Fastabend if (split) { 768d3b18ad3SJohn Fastabend tls_ctx->pending_open_record_frags = true; 769d3b18ad3SJohn Fastabend tls_merge_open_record(sk, rec, tmp, orig_end); 770d3b18ad3SJohn Fastabend } 771d3b18ad3SJohn Fastabend } 7725b053e12SDave Watson ctx->async_capable = 1; 773a42055e8SVakul Garg return rc; 774d3b18ad3SJohn Fastabend } else if (split) { 775d3b18ad3SJohn Fastabend msg_pl = &tmp->msg_plaintext; 776d3b18ad3SJohn Fastabend msg_en = &tmp->msg_encrypted; 7774509de14SVakul Garg sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size); 778d3b18ad3SJohn Fastabend tls_ctx->pending_open_record_frags = true; 779d3b18ad3SJohn Fastabend ctx->open_rec = tmp; 7803c4d7559SDave Watson } 7813c4d7559SDave Watson 782a42055e8SVakul Garg return tls_tx_records(sk, flags); 7833c4d7559SDave Watson } 7843c4d7559SDave Watson 785d3b18ad3SJohn Fastabend static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 786d3b18ad3SJohn Fastabend bool full_record, u8 record_type, 787a7bff11fSVadim Fedorenko ssize_t *copied, int flags) 7883c4d7559SDave Watson { 7893c4d7559SDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 790f66de3eeSBoris Pismenny struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 791d3b18ad3SJohn Fastabend struct sk_msg msg_redir = { }; 792d3b18ad3SJohn Fastabend struct sk_psock *psock; 793d3b18ad3SJohn Fastabend struct sock *sk_redir; 794a42055e8SVakul Garg struct tls_rec *rec; 7950608c69cSJohn Fastabend bool enospc, policy; 796d3b18ad3SJohn Fastabend int err = 0, send; 7977246d8edSJohn Fastabend u32 delta = 0; 798a42055e8SVakul Garg 7990608c69cSJohn Fastabend policy = !(flags & MSG_SENDPAGE_NOPOLICY); 800d3b18ad3SJohn Fastabend psock = sk_psock_get(sk); 801d10523d0SJakub Kicinski if (!psock || !policy) { 802d10523d0SJakub Kicinski err = tls_push_record(sk, flags, record_type); 803635d9398SVadim Fedorenko if (err && sk->sk_err == EBADMSG) { 804d10523d0SJakub Kicinski *copied -= sk_msg_free(sk, msg); 805d10523d0SJakub Kicinski tls_free_open_rec(sk); 806635d9398SVadim Fedorenko err = -sk->sk_err; 807d10523d0SJakub Kicinski } 808095f5614SXiyu Yang if (psock) 809095f5614SXiyu Yang sk_psock_put(sk, psock); 810d10523d0SJakub Kicinski return err; 811d10523d0SJakub Kicinski } 812d3b18ad3SJohn Fastabend more_data: 813d3b18ad3SJohn Fastabend enospc = sk_msg_full(msg); 8147246d8edSJohn Fastabend if (psock->eval == __SK_NONE) { 8157246d8edSJohn Fastabend delta = msg->sg.size; 816d3b18ad3SJohn Fastabend psock->eval = sk_psock_msg_verdict(sk, psock, msg); 8177246d8edSJohn Fastabend delta -= msg->sg.size; 8187246d8edSJohn Fastabend } 819d3b18ad3SJohn Fastabend if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && 820d3b18ad3SJohn Fastabend !enospc && !full_record) { 821d3b18ad3SJohn Fastabend err = -ENOSPC; 822d3b18ad3SJohn Fastabend goto out_err; 823d3b18ad3SJohn Fastabend } 824d3b18ad3SJohn Fastabend msg->cork_bytes = 0; 825d3b18ad3SJohn Fastabend send = msg->sg.size; 826d3b18ad3SJohn Fastabend if (msg->apply_bytes && msg->apply_bytes < send) 827d3b18ad3SJohn Fastabend send = msg->apply_bytes; 828a42055e8SVakul Garg 829d3b18ad3SJohn Fastabend switch (psock->eval) { 830d3b18ad3SJohn Fastabend case __SK_PASS: 831d3b18ad3SJohn Fastabend err = tls_push_record(sk, flags, record_type); 832635d9398SVadim Fedorenko if (err && sk->sk_err == EBADMSG) { 833d3b18ad3SJohn Fastabend *copied -= sk_msg_free(sk, msg); 834d3b18ad3SJohn Fastabend tls_free_open_rec(sk); 835635d9398SVadim Fedorenko err = -sk->sk_err; 836d3b18ad3SJohn Fastabend goto out_err; 837d3b18ad3SJohn Fastabend } 838d3b18ad3SJohn Fastabend break; 839d3b18ad3SJohn Fastabend case __SK_REDIRECT: 840d3b18ad3SJohn Fastabend sk_redir = psock->sk_redir; 841d3b18ad3SJohn Fastabend memcpy(&msg_redir, msg, sizeof(*msg)); 842d3b18ad3SJohn Fastabend if (msg->apply_bytes < send) 843d3b18ad3SJohn Fastabend msg->apply_bytes = 0; 844d3b18ad3SJohn Fastabend else 845d3b18ad3SJohn Fastabend msg->apply_bytes -= send; 846d3b18ad3SJohn Fastabend sk_msg_return_zero(sk, msg, send); 847d3b18ad3SJohn Fastabend msg->sg.size -= send; 848d3b18ad3SJohn Fastabend release_sock(sk); 849d3b18ad3SJohn Fastabend err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags); 850d3b18ad3SJohn Fastabend lock_sock(sk); 851d3b18ad3SJohn Fastabend if (err < 0) { 852d3b18ad3SJohn Fastabend *copied -= sk_msg_free_nocharge(sk, &msg_redir); 853d3b18ad3SJohn Fastabend msg->sg.size = 0; 854d3b18ad3SJohn Fastabend } 855d3b18ad3SJohn Fastabend if (msg->sg.size == 0) 856d3b18ad3SJohn Fastabend tls_free_open_rec(sk); 857d3b18ad3SJohn Fastabend break; 858d3b18ad3SJohn Fastabend case __SK_DROP: 859d3b18ad3SJohn Fastabend default: 860d3b18ad3SJohn Fastabend sk_msg_free_partial(sk, msg, send); 861d3b18ad3SJohn Fastabend if (msg->apply_bytes < send) 862d3b18ad3SJohn Fastabend msg->apply_bytes = 0; 863d3b18ad3SJohn Fastabend else 864d3b18ad3SJohn Fastabend msg->apply_bytes -= send; 865d3b18ad3SJohn Fastabend if (msg->sg.size == 0) 866d3b18ad3SJohn Fastabend tls_free_open_rec(sk); 8677246d8edSJohn Fastabend *copied -= (send + delta); 868d3b18ad3SJohn Fastabend err = -EACCES; 869d3b18ad3SJohn Fastabend } 870a42055e8SVakul Garg 871d3b18ad3SJohn Fastabend if (likely(!err)) { 872d3b18ad3SJohn Fastabend bool reset_eval = !ctx->open_rec; 873d3b18ad3SJohn Fastabend 874d3b18ad3SJohn Fastabend rec = ctx->open_rec; 875d3b18ad3SJohn Fastabend if (rec) { 876d3b18ad3SJohn Fastabend msg = &rec->msg_plaintext; 877d3b18ad3SJohn Fastabend if (!msg->apply_bytes) 878d3b18ad3SJohn Fastabend reset_eval = true; 879d3b18ad3SJohn Fastabend } 880d3b18ad3SJohn Fastabend if (reset_eval) { 881d3b18ad3SJohn Fastabend psock->eval = __SK_NONE; 882d3b18ad3SJohn Fastabend if (psock->sk_redir) { 883d3b18ad3SJohn Fastabend sock_put(psock->sk_redir); 884d3b18ad3SJohn Fastabend psock->sk_redir = NULL; 885d3b18ad3SJohn Fastabend } 886d3b18ad3SJohn Fastabend } 887d3b18ad3SJohn Fastabend if (rec) 888d3b18ad3SJohn Fastabend goto more_data; 889d3b18ad3SJohn Fastabend } 890d3b18ad3SJohn Fastabend out_err: 891d3b18ad3SJohn Fastabend sk_psock_put(sk, psock); 892d3b18ad3SJohn Fastabend return err; 893d3b18ad3SJohn Fastabend } 894d3b18ad3SJohn Fastabend 895d3b18ad3SJohn Fastabend static int tls_sw_push_pending_record(struct sock *sk, int flags) 896d3b18ad3SJohn Fastabend { 897d3b18ad3SJohn Fastabend struct tls_context *tls_ctx = tls_get_ctx(sk); 898d3b18ad3SJohn Fastabend struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 899d3b18ad3SJohn Fastabend struct tls_rec *rec = ctx->open_rec; 900d3b18ad3SJohn Fastabend struct sk_msg *msg_pl; 901d3b18ad3SJohn Fastabend size_t copied; 902d3b18ad3SJohn Fastabend 903a42055e8SVakul Garg if (!rec) 904d3b18ad3SJohn Fastabend return 0; 905a42055e8SVakul Garg 906d829e9c4SDaniel Borkmann msg_pl = &rec->msg_plaintext; 907d3b18ad3SJohn Fastabend copied = msg_pl->sg.size; 908d3b18ad3SJohn Fastabend if (!copied) 909d3b18ad3SJohn Fastabend return 0; 910a42055e8SVakul Garg 911d3b18ad3SJohn Fastabend return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA, 912d3b18ad3SJohn Fastabend &copied, flags); 913a42055e8SVakul Garg } 914a42055e8SVakul Garg 915a42055e8SVakul Garg int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 916a42055e8SVakul Garg { 9173c4d7559SDave Watson long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 918a42055e8SVakul Garg struct tls_context *tls_ctx = tls_get_ctx(sk); 9194509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 920a42055e8SVakul Garg struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 9215b053e12SDave Watson bool async_capable = ctx->async_capable; 922a42055e8SVakul Garg unsigned char record_type = TLS_RECORD_TYPE_DATA; 92300e23707SDavid Howells bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 9243c4d7559SDave Watson bool eor = !(msg->msg_flags & MSG_MORE); 925a7bff11fSVadim Fedorenko size_t try_to_copy; 926a7bff11fSVadim Fedorenko ssize_t copied = 0; 927d829e9c4SDaniel Borkmann struct sk_msg *msg_pl, *msg_en; 928a42055e8SVakul Garg struct tls_rec *rec; 929a42055e8SVakul Garg int required_size; 930a42055e8SVakul Garg int num_async = 0; 9313c4d7559SDave Watson bool full_record; 932a42055e8SVakul Garg int record_room; 933a42055e8SVakul Garg int num_zc = 0; 9343c4d7559SDave Watson int orig_size; 9354128c0cfSVakul Garg int ret = 0; 9360cada332SVinay Kumar Yadav int pending; 9373c4d7559SDave Watson 9381c3b63f1SRouven Czerwinski if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 9391c3b63f1SRouven Czerwinski MSG_CMSG_COMPAT)) 9404a5cdc60SValentin Vidic return -EOPNOTSUPP; 9413c4d7559SDave Watson 94279ffe608SJakub Kicinski mutex_lock(&tls_ctx->tx_lock); 9433c4d7559SDave Watson lock_sock(sk); 9443c4d7559SDave Watson 9453c4d7559SDave Watson if (unlikely(msg->msg_controllen)) { 9463c4d7559SDave Watson ret = tls_proccess_cmsg(sk, msg, &record_type); 947a42055e8SVakul Garg if (ret) { 948a42055e8SVakul Garg if (ret == -EINPROGRESS) 949a42055e8SVakul Garg num_async++; 950a42055e8SVakul Garg else if (ret != -EAGAIN) 9513c4d7559SDave Watson goto send_end; 9523c4d7559SDave Watson } 953a42055e8SVakul Garg } 9543c4d7559SDave Watson 9553c4d7559SDave Watson while (msg_data_left(msg)) { 9563c4d7559SDave Watson if (sk->sk_err) { 95730be8f8dSr.hering@avm.de ret = -sk->sk_err; 9583c4d7559SDave Watson goto send_end; 9593c4d7559SDave Watson } 9603c4d7559SDave Watson 961d3b18ad3SJohn Fastabend if (ctx->open_rec) 962d3b18ad3SJohn Fastabend rec = ctx->open_rec; 963d3b18ad3SJohn Fastabend else 964d3b18ad3SJohn Fastabend rec = ctx->open_rec = tls_get_rec(sk); 965a42055e8SVakul Garg if (!rec) { 966a42055e8SVakul Garg ret = -ENOMEM; 967a42055e8SVakul Garg goto send_end; 968a42055e8SVakul Garg } 969a42055e8SVakul Garg 970d829e9c4SDaniel Borkmann msg_pl = &rec->msg_plaintext; 971d829e9c4SDaniel Borkmann msg_en = &rec->msg_encrypted; 972d829e9c4SDaniel Borkmann 973d829e9c4SDaniel Borkmann orig_size = msg_pl->sg.size; 9743c4d7559SDave Watson full_record = false; 9753c4d7559SDave Watson try_to_copy = msg_data_left(msg); 976d829e9c4SDaniel Borkmann record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 9773c4d7559SDave Watson if (try_to_copy >= record_room) { 9783c4d7559SDave Watson try_to_copy = record_room; 9793c4d7559SDave Watson full_record = true; 9803c4d7559SDave Watson } 9813c4d7559SDave Watson 982d829e9c4SDaniel Borkmann required_size = msg_pl->sg.size + try_to_copy + 9834509de14SVakul Garg prot->overhead_size; 9843c4d7559SDave Watson 9853c4d7559SDave Watson if (!sk_stream_memory_free(sk)) 9863c4d7559SDave Watson goto wait_for_sndbuf; 987a42055e8SVakul Garg 9883c4d7559SDave Watson alloc_encrypted: 989d829e9c4SDaniel Borkmann ret = tls_alloc_encrypted_msg(sk, required_size); 9903c4d7559SDave Watson if (ret) { 9913c4d7559SDave Watson if (ret != -ENOSPC) 9923c4d7559SDave Watson goto wait_for_memory; 9933c4d7559SDave Watson 9943c4d7559SDave Watson /* Adjust try_to_copy according to the amount that was 9953c4d7559SDave Watson * actually allocated. The difference is due 9963c4d7559SDave Watson * to max sg elements limit 9973c4d7559SDave Watson */ 998d829e9c4SDaniel Borkmann try_to_copy -= required_size - msg_en->sg.size; 9993c4d7559SDave Watson full_record = true; 10003c4d7559SDave Watson } 1001a42055e8SVakul Garg 1002a42055e8SVakul Garg if (!is_kvec && (full_record || eor) && !async_capable) { 1003d3b18ad3SJohn Fastabend u32 first = msg_pl->sg.end; 1004d3b18ad3SJohn Fastabend 1005d829e9c4SDaniel Borkmann ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter, 1006d829e9c4SDaniel Borkmann msg_pl, try_to_copy); 10073c4d7559SDave Watson if (ret) 10083c4d7559SDave Watson goto fallback_to_reg_send; 10093c4d7559SDave Watson 1010a42055e8SVakul Garg num_zc++; 10113c4d7559SDave Watson copied += try_to_copy; 1012d3b18ad3SJohn Fastabend 1013d3b18ad3SJohn Fastabend sk_msg_sg_copy_set(msg_pl, first); 1014d3b18ad3SJohn Fastabend ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1015d3b18ad3SJohn Fastabend record_type, &copied, 1016d3b18ad3SJohn Fastabend msg->msg_flags); 1017a42055e8SVakul Garg if (ret) { 1018a42055e8SVakul Garg if (ret == -EINPROGRESS) 1019a42055e8SVakul Garg num_async++; 1020d3b18ad3SJohn Fastabend else if (ret == -ENOMEM) 1021d3b18ad3SJohn Fastabend goto wait_for_memory; 1022c329ef96SJakub Kicinski else if (ctx->open_rec && ret == -ENOSPC) 1023d3b18ad3SJohn Fastabend goto rollback_iter; 1024a42055e8SVakul Garg else if (ret != -EAGAIN) 10253c4d7559SDave Watson goto send_end; 1026a42055e8SVakul Garg } 10275a3611efSDoron Roberts-Kedes continue; 1028d3b18ad3SJohn Fastabend rollback_iter: 1029d3b18ad3SJohn Fastabend copied -= try_to_copy; 1030d3b18ad3SJohn Fastabend sk_msg_sg_copy_clear(msg_pl, first); 1031d3b18ad3SJohn Fastabend iov_iter_revert(&msg->msg_iter, 1032d3b18ad3SJohn Fastabend msg_pl->sg.size - orig_size); 10333c4d7559SDave Watson fallback_to_reg_send: 1034d829e9c4SDaniel Borkmann sk_msg_trim(sk, msg_pl, orig_size); 10353c4d7559SDave Watson } 10363c4d7559SDave Watson 1037d829e9c4SDaniel Borkmann required_size = msg_pl->sg.size + try_to_copy; 10384e6d4720SVakul Garg 1039d829e9c4SDaniel Borkmann ret = tls_clone_plaintext_msg(sk, required_size); 10403c4d7559SDave Watson if (ret) { 10413c4d7559SDave Watson if (ret != -ENOSPC) 10424e6d4720SVakul Garg goto send_end; 10433c4d7559SDave Watson 10443c4d7559SDave Watson /* Adjust try_to_copy according to the amount that was 10453c4d7559SDave Watson * actually allocated. The difference is due 10463c4d7559SDave Watson * to max sg elements limit 10473c4d7559SDave Watson */ 1048d829e9c4SDaniel Borkmann try_to_copy -= required_size - msg_pl->sg.size; 10493c4d7559SDave Watson full_record = true; 10504509de14SVakul Garg sk_msg_trim(sk, msg_en, 10514509de14SVakul Garg msg_pl->sg.size + prot->overhead_size); 10523c4d7559SDave Watson } 10533c4d7559SDave Watson 105465a10e28SVakul Garg if (try_to_copy) { 105565a10e28SVakul Garg ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, 105665a10e28SVakul Garg msg_pl, try_to_copy); 1057d829e9c4SDaniel Borkmann if (ret < 0) 10583c4d7559SDave Watson goto trim_sgl; 105965a10e28SVakul Garg } 10603c4d7559SDave Watson 1061d829e9c4SDaniel Borkmann /* Open records defined only if successfully copied, otherwise 1062d829e9c4SDaniel Borkmann * we would trim the sg but not reset the open record frags. 1063d829e9c4SDaniel Borkmann */ 1064d829e9c4SDaniel Borkmann tls_ctx->pending_open_record_frags = true; 10653c4d7559SDave Watson copied += try_to_copy; 10663c4d7559SDave Watson if (full_record || eor) { 1067d3b18ad3SJohn Fastabend ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1068d3b18ad3SJohn Fastabend record_type, &copied, 1069d3b18ad3SJohn Fastabend msg->msg_flags); 10703c4d7559SDave Watson if (ret) { 1071a42055e8SVakul Garg if (ret == -EINPROGRESS) 1072a42055e8SVakul Garg num_async++; 1073d3b18ad3SJohn Fastabend else if (ret == -ENOMEM) 1074d3b18ad3SJohn Fastabend goto wait_for_memory; 1075d3b18ad3SJohn Fastabend else if (ret != -EAGAIN) { 1076d3b18ad3SJohn Fastabend if (ret == -ENOSPC) 1077d3b18ad3SJohn Fastabend ret = 0; 10783c4d7559SDave Watson goto send_end; 10793c4d7559SDave Watson } 10803c4d7559SDave Watson } 1081d3b18ad3SJohn Fastabend } 10823c4d7559SDave Watson 10833c4d7559SDave Watson continue; 10843c4d7559SDave Watson 10853c4d7559SDave Watson wait_for_sndbuf: 10863c4d7559SDave Watson set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 10873c4d7559SDave Watson wait_for_memory: 10883c4d7559SDave Watson ret = sk_stream_wait_memory(sk, &timeo); 10893c4d7559SDave Watson if (ret) { 10903c4d7559SDave Watson trim_sgl: 1091c329ef96SJakub Kicinski if (ctx->open_rec) 1092d829e9c4SDaniel Borkmann tls_trim_both_msgs(sk, orig_size); 10933c4d7559SDave Watson goto send_end; 10943c4d7559SDave Watson } 10953c4d7559SDave Watson 1096c329ef96SJakub Kicinski if (ctx->open_rec && msg_en->sg.size < required_size) 10973c4d7559SDave Watson goto alloc_encrypted; 10983c4d7559SDave Watson } 10993c4d7559SDave Watson 1100a42055e8SVakul Garg if (!num_async) { 1101a42055e8SVakul Garg goto send_end; 1102a42055e8SVakul Garg } else if (num_zc) { 1103a42055e8SVakul Garg /* Wait for pending encryptions to get completed */ 11040cada332SVinay Kumar Yadav spin_lock_bh(&ctx->encrypt_compl_lock); 11050cada332SVinay Kumar Yadav ctx->async_notify = true; 1106a42055e8SVakul Garg 11070cada332SVinay Kumar Yadav pending = atomic_read(&ctx->encrypt_pending); 11080cada332SVinay Kumar Yadav spin_unlock_bh(&ctx->encrypt_compl_lock); 11090cada332SVinay Kumar Yadav if (pending) 1110a42055e8SVakul Garg crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1111a42055e8SVakul Garg else 1112a42055e8SVakul Garg reinit_completion(&ctx->async_wait.completion); 1113a42055e8SVakul Garg 11140cada332SVinay Kumar Yadav /* There can be no concurrent accesses, since we have no 11150cada332SVinay Kumar Yadav * pending encrypt operations 11160cada332SVinay Kumar Yadav */ 1117a42055e8SVakul Garg WRITE_ONCE(ctx->async_notify, false); 1118a42055e8SVakul Garg 1119a42055e8SVakul Garg if (ctx->async_wait.err) { 1120a42055e8SVakul Garg ret = ctx->async_wait.err; 1121a42055e8SVakul Garg copied = 0; 1122a42055e8SVakul Garg } 1123a42055e8SVakul Garg } 1124a42055e8SVakul Garg 1125a42055e8SVakul Garg /* Transmit if any encryptions have completed */ 1126a42055e8SVakul Garg if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1127a42055e8SVakul Garg cancel_delayed_work(&ctx->tx_work.work); 1128a42055e8SVakul Garg tls_tx_records(sk, msg->msg_flags); 1129a42055e8SVakul Garg } 1130a42055e8SVakul Garg 11313c4d7559SDave Watson send_end: 11323c4d7559SDave Watson ret = sk_stream_error(sk, msg->msg_flags, ret); 11333c4d7559SDave Watson 11343c4d7559SDave Watson release_sock(sk); 113579ffe608SJakub Kicinski mutex_unlock(&tls_ctx->tx_lock); 1136a7bff11fSVadim Fedorenko return copied > 0 ? copied : ret; 11373c4d7559SDave Watson } 11383c4d7559SDave Watson 113901cb8a1aSYueHaibing static int tls_sw_do_sendpage(struct sock *sk, struct page *page, 11403c4d7559SDave Watson int offset, size_t size, int flags) 11413c4d7559SDave Watson { 1142a42055e8SVakul Garg long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 11433c4d7559SDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 1144f66de3eeSBoris Pismenny struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 11454509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 11463c4d7559SDave Watson unsigned char record_type = TLS_RECORD_TYPE_DATA; 1147d829e9c4SDaniel Borkmann struct sk_msg *msg_pl; 1148a42055e8SVakul Garg struct tls_rec *rec; 1149a42055e8SVakul Garg int num_async = 0; 1150a7bff11fSVadim Fedorenko ssize_t copied = 0; 11513c4d7559SDave Watson bool full_record; 11523c4d7559SDave Watson int record_room; 11534128c0cfSVakul Garg int ret = 0; 1154a42055e8SVakul Garg bool eor; 11553c4d7559SDave Watson 11563c4d7559SDave Watson eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); 11573c4d7559SDave Watson sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 11583c4d7559SDave Watson 11593c4d7559SDave Watson /* Call the sk_stream functions to manage the sndbuf mem. */ 11603c4d7559SDave Watson while (size > 0) { 11613c4d7559SDave Watson size_t copy, required_size; 11623c4d7559SDave Watson 11633c4d7559SDave Watson if (sk->sk_err) { 116430be8f8dSr.hering@avm.de ret = -sk->sk_err; 11653c4d7559SDave Watson goto sendpage_end; 11663c4d7559SDave Watson } 11673c4d7559SDave Watson 1168d3b18ad3SJohn Fastabend if (ctx->open_rec) 1169d3b18ad3SJohn Fastabend rec = ctx->open_rec; 1170d3b18ad3SJohn Fastabend else 1171d3b18ad3SJohn Fastabend rec = ctx->open_rec = tls_get_rec(sk); 1172a42055e8SVakul Garg if (!rec) { 1173a42055e8SVakul Garg ret = -ENOMEM; 1174a42055e8SVakul Garg goto sendpage_end; 1175a42055e8SVakul Garg } 1176a42055e8SVakul Garg 1177d829e9c4SDaniel Borkmann msg_pl = &rec->msg_plaintext; 1178d829e9c4SDaniel Borkmann 11793c4d7559SDave Watson full_record = false; 1180d829e9c4SDaniel Borkmann record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; 11813c4d7559SDave Watson copy = size; 11823c4d7559SDave Watson if (copy >= record_room) { 11833c4d7559SDave Watson copy = record_room; 11843c4d7559SDave Watson full_record = true; 11853c4d7559SDave Watson } 1186d829e9c4SDaniel Borkmann 11874509de14SVakul Garg required_size = msg_pl->sg.size + copy + prot->overhead_size; 11883c4d7559SDave Watson 11893c4d7559SDave Watson if (!sk_stream_memory_free(sk)) 11903c4d7559SDave Watson goto wait_for_sndbuf; 11913c4d7559SDave Watson alloc_payload: 1192d829e9c4SDaniel Borkmann ret = tls_alloc_encrypted_msg(sk, required_size); 11933c4d7559SDave Watson if (ret) { 11943c4d7559SDave Watson if (ret != -ENOSPC) 11953c4d7559SDave Watson goto wait_for_memory; 11963c4d7559SDave Watson 11973c4d7559SDave Watson /* Adjust copy according to the amount that was 11983c4d7559SDave Watson * actually allocated. The difference is due 11993c4d7559SDave Watson * to max sg elements limit 12003c4d7559SDave Watson */ 1201d829e9c4SDaniel Borkmann copy -= required_size - msg_pl->sg.size; 12023c4d7559SDave Watson full_record = true; 12033c4d7559SDave Watson } 12043c4d7559SDave Watson 1205d829e9c4SDaniel Borkmann sk_msg_page_add(msg_pl, page, copy, offset); 12063c4d7559SDave Watson sk_mem_charge(sk, copy); 1207d829e9c4SDaniel Borkmann 12083c4d7559SDave Watson offset += copy; 12093c4d7559SDave Watson size -= copy; 1210d3b18ad3SJohn Fastabend copied += copy; 12113c4d7559SDave Watson 1212d829e9c4SDaniel Borkmann tls_ctx->pending_open_record_frags = true; 1213d829e9c4SDaniel Borkmann if (full_record || eor || sk_msg_full(msg_pl)) { 1214d3b18ad3SJohn Fastabend ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, 1215d3b18ad3SJohn Fastabend record_type, &copied, flags); 12163c4d7559SDave Watson if (ret) { 1217a42055e8SVakul Garg if (ret == -EINPROGRESS) 1218a42055e8SVakul Garg num_async++; 1219d3b18ad3SJohn Fastabend else if (ret == -ENOMEM) 1220d3b18ad3SJohn Fastabend goto wait_for_memory; 1221d3b18ad3SJohn Fastabend else if (ret != -EAGAIN) { 1222d3b18ad3SJohn Fastabend if (ret == -ENOSPC) 1223d3b18ad3SJohn Fastabend ret = 0; 12243c4d7559SDave Watson goto sendpage_end; 12253c4d7559SDave Watson } 12263c4d7559SDave Watson } 1227d3b18ad3SJohn Fastabend } 12283c4d7559SDave Watson continue; 12293c4d7559SDave Watson wait_for_sndbuf: 12303c4d7559SDave Watson set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 12313c4d7559SDave Watson wait_for_memory: 12323c4d7559SDave Watson ret = sk_stream_wait_memory(sk, &timeo); 12333c4d7559SDave Watson if (ret) { 1234c329ef96SJakub Kicinski if (ctx->open_rec) 1235d829e9c4SDaniel Borkmann tls_trim_both_msgs(sk, msg_pl->sg.size); 12363c4d7559SDave Watson goto sendpage_end; 12373c4d7559SDave Watson } 12383c4d7559SDave Watson 1239c329ef96SJakub Kicinski if (ctx->open_rec) 12403c4d7559SDave Watson goto alloc_payload; 12413c4d7559SDave Watson } 12423c4d7559SDave Watson 1243a42055e8SVakul Garg if (num_async) { 1244a42055e8SVakul Garg /* Transmit if any encryptions have completed */ 1245a42055e8SVakul Garg if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { 1246a42055e8SVakul Garg cancel_delayed_work(&ctx->tx_work.work); 1247a42055e8SVakul Garg tls_tx_records(sk, flags); 1248a42055e8SVakul Garg } 1249a42055e8SVakul Garg } 12503c4d7559SDave Watson sendpage_end: 12513c4d7559SDave Watson ret = sk_stream_error(sk, flags, ret); 1252a7bff11fSVadim Fedorenko return copied > 0 ? copied : ret; 12533c4d7559SDave Watson } 12543c4d7559SDave Watson 1255d4ffb02dSWillem de Bruijn int tls_sw_sendpage_locked(struct sock *sk, struct page *page, 1256d4ffb02dSWillem de Bruijn int offset, size_t size, int flags) 1257d4ffb02dSWillem de Bruijn { 1258d4ffb02dSWillem de Bruijn if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1259d4ffb02dSWillem de Bruijn MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY | 1260d4ffb02dSWillem de Bruijn MSG_NO_SHARED_FRAGS)) 12614a5cdc60SValentin Vidic return -EOPNOTSUPP; 1262d4ffb02dSWillem de Bruijn 1263d4ffb02dSWillem de Bruijn return tls_sw_do_sendpage(sk, page, offset, size, flags); 1264d4ffb02dSWillem de Bruijn } 1265d4ffb02dSWillem de Bruijn 12660608c69cSJohn Fastabend int tls_sw_sendpage(struct sock *sk, struct page *page, 12670608c69cSJohn Fastabend int offset, size_t size, int flags) 12680608c69cSJohn Fastabend { 126979ffe608SJakub Kicinski struct tls_context *tls_ctx = tls_get_ctx(sk); 12700608c69cSJohn Fastabend int ret; 12710608c69cSJohn Fastabend 12720608c69cSJohn Fastabend if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 12730608c69cSJohn Fastabend MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) 12744a5cdc60SValentin Vidic return -EOPNOTSUPP; 12750608c69cSJohn Fastabend 127679ffe608SJakub Kicinski mutex_lock(&tls_ctx->tx_lock); 12770608c69cSJohn Fastabend lock_sock(sk); 12780608c69cSJohn Fastabend ret = tls_sw_do_sendpage(sk, page, offset, size, flags); 12790608c69cSJohn Fastabend release_sock(sk); 128079ffe608SJakub Kicinski mutex_unlock(&tls_ctx->tx_lock); 12810608c69cSJohn Fastabend return ret; 12820608c69cSJohn Fastabend } 12830608c69cSJohn Fastabend 1284d3b18ad3SJohn Fastabend static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock, 1285d3b18ad3SJohn Fastabend int flags, long timeo, int *err) 1286c46234ebSDave Watson { 1287c46234ebSDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 1288f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1289c46234ebSDave Watson struct sk_buff *skb; 1290c46234ebSDave Watson DEFINE_WAIT_FUNC(wait, woken_wake_function); 1291c46234ebSDave Watson 1292d3b18ad3SJohn Fastabend while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) { 1293c46234ebSDave Watson if (sk->sk_err) { 1294c46234ebSDave Watson *err = sock_error(sk); 1295c46234ebSDave Watson return NULL; 1296c46234ebSDave Watson } 1297c46234ebSDave Watson 1298fcf4793eSDoron Roberts-Kedes if (sk->sk_shutdown & RCV_SHUTDOWN) 1299fcf4793eSDoron Roberts-Kedes return NULL; 1300fcf4793eSDoron Roberts-Kedes 1301c46234ebSDave Watson if (sock_flag(sk, SOCK_DONE)) 1302c46234ebSDave Watson return NULL; 1303c46234ebSDave Watson 1304c46234ebSDave Watson if ((flags & MSG_DONTWAIT) || !timeo) { 1305c46234ebSDave Watson *err = -EAGAIN; 1306c46234ebSDave Watson return NULL; 1307c46234ebSDave Watson } 1308c46234ebSDave Watson 1309c46234ebSDave Watson add_wait_queue(sk_sleep(sk), &wait); 1310c46234ebSDave Watson sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1311d3b18ad3SJohn Fastabend sk_wait_event(sk, &timeo, 1312d3b18ad3SJohn Fastabend ctx->recv_pkt != skb || 1313d3b18ad3SJohn Fastabend !sk_psock_queue_empty(psock), 1314d3b18ad3SJohn Fastabend &wait); 1315c46234ebSDave Watson sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1316c46234ebSDave Watson remove_wait_queue(sk_sleep(sk), &wait); 1317c46234ebSDave Watson 1318c46234ebSDave Watson /* Handle signals */ 1319c46234ebSDave Watson if (signal_pending(current)) { 1320c46234ebSDave Watson *err = sock_intr_errno(timeo); 1321c46234ebSDave Watson return NULL; 1322c46234ebSDave Watson } 1323c46234ebSDave Watson } 1324c46234ebSDave Watson 1325c46234ebSDave Watson return skb; 1326c46234ebSDave Watson } 1327c46234ebSDave Watson 1328d829e9c4SDaniel Borkmann static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from, 1329d829e9c4SDaniel Borkmann int length, int *pages_used, 1330d829e9c4SDaniel Borkmann unsigned int *size_used, 1331d829e9c4SDaniel Borkmann struct scatterlist *to, 1332d829e9c4SDaniel Borkmann int to_max_pages) 1333d829e9c4SDaniel Borkmann { 1334d829e9c4SDaniel Borkmann int rc = 0, i = 0, num_elem = *pages_used, maxpages; 1335d829e9c4SDaniel Borkmann struct page *pages[MAX_SKB_FRAGS]; 1336d829e9c4SDaniel Borkmann unsigned int size = *size_used; 1337d829e9c4SDaniel Borkmann ssize_t copied, use; 1338d829e9c4SDaniel Borkmann size_t offset; 1339d829e9c4SDaniel Borkmann 1340d829e9c4SDaniel Borkmann while (length > 0) { 1341d829e9c4SDaniel Borkmann i = 0; 1342d829e9c4SDaniel Borkmann maxpages = to_max_pages - num_elem; 1343d829e9c4SDaniel Borkmann if (maxpages == 0) { 1344d829e9c4SDaniel Borkmann rc = -EFAULT; 1345d829e9c4SDaniel Borkmann goto out; 1346d829e9c4SDaniel Borkmann } 1347d829e9c4SDaniel Borkmann copied = iov_iter_get_pages(from, pages, 1348d829e9c4SDaniel Borkmann length, 1349d829e9c4SDaniel Borkmann maxpages, &offset); 1350d829e9c4SDaniel Borkmann if (copied <= 0) { 1351d829e9c4SDaniel Borkmann rc = -EFAULT; 1352d829e9c4SDaniel Borkmann goto out; 1353d829e9c4SDaniel Borkmann } 1354d829e9c4SDaniel Borkmann 1355d829e9c4SDaniel Borkmann iov_iter_advance(from, copied); 1356d829e9c4SDaniel Borkmann 1357d829e9c4SDaniel Borkmann length -= copied; 1358d829e9c4SDaniel Borkmann size += copied; 1359d829e9c4SDaniel Borkmann while (copied) { 1360d829e9c4SDaniel Borkmann use = min_t(int, copied, PAGE_SIZE - offset); 1361d829e9c4SDaniel Borkmann 1362d829e9c4SDaniel Borkmann sg_set_page(&to[num_elem], 1363d829e9c4SDaniel Borkmann pages[i], use, offset); 1364d829e9c4SDaniel Borkmann sg_unmark_end(&to[num_elem]); 1365d829e9c4SDaniel Borkmann /* We do not uncharge memory from this API */ 1366d829e9c4SDaniel Borkmann 1367d829e9c4SDaniel Borkmann offset = 0; 1368d829e9c4SDaniel Borkmann copied -= use; 1369d829e9c4SDaniel Borkmann 1370d829e9c4SDaniel Borkmann i++; 1371d829e9c4SDaniel Borkmann num_elem++; 1372d829e9c4SDaniel Borkmann } 1373d829e9c4SDaniel Borkmann } 1374d829e9c4SDaniel Borkmann /* Mark the end in the last sg entry if newly added */ 1375d829e9c4SDaniel Borkmann if (num_elem > *pages_used) 1376d829e9c4SDaniel Borkmann sg_mark_end(&to[num_elem - 1]); 1377d829e9c4SDaniel Borkmann out: 1378d829e9c4SDaniel Borkmann if (rc) 1379d829e9c4SDaniel Borkmann iov_iter_revert(from, size - *size_used); 1380d829e9c4SDaniel Borkmann *size_used = size; 1381d829e9c4SDaniel Borkmann *pages_used = num_elem; 1382d829e9c4SDaniel Borkmann 1383d829e9c4SDaniel Borkmann return rc; 1384d829e9c4SDaniel Borkmann } 1385d829e9c4SDaniel Borkmann 13860b243d00SVakul Garg /* This function decrypts the input skb into either out_iov or in out_sg 13870b243d00SVakul Garg * or in skb buffers itself. The input parameter 'zc' indicates if 13880b243d00SVakul Garg * zero-copy mode needs to be tried or not. With zero-copy mode, either 13890b243d00SVakul Garg * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are 13900b243d00SVakul Garg * NULL, then the decryption happens inside skb buffers itself, i.e. 13910b243d00SVakul Garg * zero-copy gets disabled and 'zc' is updated. 13920b243d00SVakul Garg */ 13930b243d00SVakul Garg 13940b243d00SVakul Garg static int decrypt_internal(struct sock *sk, struct sk_buff *skb, 13950b243d00SVakul Garg struct iov_iter *out_iov, 13960b243d00SVakul Garg struct scatterlist *out_sg, 1397692d7b5dSVakul Garg int *chunk, bool *zc, bool async) 13980b243d00SVakul Garg { 13990b243d00SVakul Garg struct tls_context *tls_ctx = tls_get_ctx(sk); 14000b243d00SVakul Garg struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 14014509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 14020b243d00SVakul Garg struct strp_msg *rxm = strp_msg(skb); 14030b243d00SVakul Garg int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0; 14040b243d00SVakul Garg struct aead_request *aead_req; 14050b243d00SVakul Garg struct sk_buff *unused; 14060b243d00SVakul Garg u8 *aad, *iv, *mem = NULL; 14070b243d00SVakul Garg struct scatterlist *sgin = NULL; 14080b243d00SVakul Garg struct scatterlist *sgout = NULL; 14094509de14SVakul Garg const int data_len = rxm->full_len - prot->overhead_size + 14104509de14SVakul Garg prot->tail_size; 1411f295b3aeSVakul Garg int iv_offset = 0; 14120b243d00SVakul Garg 14130b243d00SVakul Garg if (*zc && (out_iov || out_sg)) { 14140b243d00SVakul Garg if (out_iov) 14150b243d00SVakul Garg n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1; 14160b243d00SVakul Garg else 14170b243d00SVakul Garg n_sgout = sg_nents(out_sg); 14184509de14SVakul Garg n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size, 14194509de14SVakul Garg rxm->full_len - prot->prepend_size); 14200b243d00SVakul Garg } else { 14210b243d00SVakul Garg n_sgout = 0; 14220b243d00SVakul Garg *zc = false; 14230927f71dSDoron Roberts-Kedes n_sgin = skb_cow_data(skb, 0, &unused); 14240b243d00SVakul Garg } 14250b243d00SVakul Garg 14260b243d00SVakul Garg if (n_sgin < 1) 14270b243d00SVakul Garg return -EBADMSG; 14280b243d00SVakul Garg 14290b243d00SVakul Garg /* Increment to accommodate AAD */ 14300b243d00SVakul Garg n_sgin = n_sgin + 1; 14310b243d00SVakul Garg 14320b243d00SVakul Garg nsg = n_sgin + n_sgout; 14330b243d00SVakul Garg 14340b243d00SVakul Garg aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); 14350b243d00SVakul Garg mem_size = aead_size + (nsg * sizeof(struct scatterlist)); 14364509de14SVakul Garg mem_size = mem_size + prot->aad_size; 14370b243d00SVakul Garg mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv); 14380b243d00SVakul Garg 14390b243d00SVakul Garg /* Allocate a single block of memory which contains 14400b243d00SVakul Garg * aead_req || sgin[] || sgout[] || aad || iv. 14410b243d00SVakul Garg * This order achieves correct alignment for aead_req, sgin, sgout. 14420b243d00SVakul Garg */ 14430b243d00SVakul Garg mem = kmalloc(mem_size, sk->sk_allocation); 14440b243d00SVakul Garg if (!mem) 14450b243d00SVakul Garg return -ENOMEM; 14460b243d00SVakul Garg 14470b243d00SVakul Garg /* Segment the allocated memory */ 14480b243d00SVakul Garg aead_req = (struct aead_request *)mem; 14490b243d00SVakul Garg sgin = (struct scatterlist *)(mem + aead_size); 14500b243d00SVakul Garg sgout = sgin + n_sgin; 14510b243d00SVakul Garg aad = (u8 *)(sgout + n_sgout); 14524509de14SVakul Garg iv = aad + prot->aad_size; 14530b243d00SVakul Garg 1454f295b3aeSVakul Garg /* For CCM based ciphers, first byte of nonce+iv is always '2' */ 1455f295b3aeSVakul Garg if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { 1456f295b3aeSVakul Garg iv[0] = 2; 1457f295b3aeSVakul Garg iv_offset = 1; 1458f295b3aeSVakul Garg } 1459f295b3aeSVakul Garg 14600b243d00SVakul Garg /* Prepare IV */ 14610b243d00SVakul Garg err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 1462f295b3aeSVakul Garg iv + iv_offset + prot->salt_size, 14634509de14SVakul Garg prot->iv_size); 14640b243d00SVakul Garg if (err < 0) { 14650b243d00SVakul Garg kfree(mem); 14660b243d00SVakul Garg return err; 14670b243d00SVakul Garg } 14684509de14SVakul Garg if (prot->version == TLS_1_3_VERSION) 1469f295b3aeSVakul Garg memcpy(iv + iv_offset, tls_ctx->rx.iv, 1470f295b3aeSVakul Garg crypto_aead_ivsize(ctx->aead_recv)); 1471130b392cSDave Watson else 1472f295b3aeSVakul Garg memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size); 14730b243d00SVakul Garg 14744509de14SVakul Garg xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq); 1475130b392cSDave Watson 14760b243d00SVakul Garg /* Prepare AAD */ 14774509de14SVakul Garg tls_make_aad(aad, rxm->full_len - prot->overhead_size + 14784509de14SVakul Garg prot->tail_size, 14794509de14SVakul Garg tls_ctx->rx.rec_seq, prot->rec_seq_size, 14804509de14SVakul Garg ctx->control, prot->version); 14810b243d00SVakul Garg 14820b243d00SVakul Garg /* Prepare sgin */ 14830b243d00SVakul Garg sg_init_table(sgin, n_sgin); 14844509de14SVakul Garg sg_set_buf(&sgin[0], aad, prot->aad_size); 14850b243d00SVakul Garg err = skb_to_sgvec(skb, &sgin[1], 14864509de14SVakul Garg rxm->offset + prot->prepend_size, 14874509de14SVakul Garg rxm->full_len - prot->prepend_size); 14880b243d00SVakul Garg if (err < 0) { 14890b243d00SVakul Garg kfree(mem); 14900b243d00SVakul Garg return err; 14910b243d00SVakul Garg } 14920b243d00SVakul Garg 14930b243d00SVakul Garg if (n_sgout) { 14940b243d00SVakul Garg if (out_iov) { 14950b243d00SVakul Garg sg_init_table(sgout, n_sgout); 14964509de14SVakul Garg sg_set_buf(&sgout[0], aad, prot->aad_size); 14970b243d00SVakul Garg 14980b243d00SVakul Garg *chunk = 0; 1499d829e9c4SDaniel Borkmann err = tls_setup_from_iter(sk, out_iov, data_len, 1500d829e9c4SDaniel Borkmann &pages, chunk, &sgout[1], 1501d829e9c4SDaniel Borkmann (n_sgout - 1)); 15020b243d00SVakul Garg if (err < 0) 15030b243d00SVakul Garg goto fallback_to_reg_recv; 15040b243d00SVakul Garg } else if (out_sg) { 15050b243d00SVakul Garg memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); 15060b243d00SVakul Garg } else { 15070b243d00SVakul Garg goto fallback_to_reg_recv; 15080b243d00SVakul Garg } 15090b243d00SVakul Garg } else { 15100b243d00SVakul Garg fallback_to_reg_recv: 15110b243d00SVakul Garg sgout = sgin; 15120b243d00SVakul Garg pages = 0; 1513692d7b5dSVakul Garg *chunk = data_len; 15140b243d00SVakul Garg *zc = false; 15150b243d00SVakul Garg } 15160b243d00SVakul Garg 15170b243d00SVakul Garg /* Prepare and submit AEAD request */ 151894524d8fSVakul Garg err = tls_do_decryption(sk, skb, sgin, sgout, iv, 1519692d7b5dSVakul Garg data_len, aead_req, async); 152094524d8fSVakul Garg if (err == -EINPROGRESS) 152194524d8fSVakul Garg return err; 15220b243d00SVakul Garg 15230b243d00SVakul Garg /* Release the pages in case iov was mapped to pages */ 15240b243d00SVakul Garg for (; pages > 0; pages--) 15250b243d00SVakul Garg put_page(sg_page(&sgout[pages])); 15260b243d00SVakul Garg 15270b243d00SVakul Garg kfree(mem); 15280b243d00SVakul Garg return err; 15290b243d00SVakul Garg } 15300b243d00SVakul Garg 1531dafb67f3SBoris Pismenny static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, 1532692d7b5dSVakul Garg struct iov_iter *dest, int *chunk, bool *zc, 1533692d7b5dSVakul Garg bool async) 1534dafb67f3SBoris Pismenny { 1535dafb67f3SBoris Pismenny struct tls_context *tls_ctx = tls_get_ctx(sk); 1536dafb67f3SBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 15374509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 1538dafb67f3SBoris Pismenny struct strp_msg *rxm = strp_msg(skb); 1539b53f4976SJakub Kicinski int pad, err = 0; 1540dafb67f3SBoris Pismenny 1541d069b780SBoris Pismenny if (!ctx->decrypted) { 1542b9d8fec9SJakub Kicinski if (tls_ctx->rx_conf == TLS_HW) { 15434de30a8dSJakub Kicinski err = tls_device_decrypted(sk, tls_ctx, skb, rxm); 15444799ac81SBoris Pismenny if (err < 0) 15454799ac81SBoris Pismenny return err; 1546b9d8fec9SJakub Kicinski } 1547be2fbc15SJakub Kicinski 1548d069b780SBoris Pismenny /* Still not decrypted after tls_device */ 15494799ac81SBoris Pismenny if (!ctx->decrypted) { 1550d069b780SBoris Pismenny err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, 1551d069b780SBoris Pismenny async); 155294524d8fSVakul Garg if (err < 0) { 155394524d8fSVakul Garg if (err == -EINPROGRESS) 1554fb0f886fSJakub Kicinski tls_advance_record_sn(sk, prot, 1555fb0f886fSJakub Kicinski &tls_ctx->rx); 15565c5d22a7SJakub Kicinski else if (err == -EBADMSG) 15575c5d22a7SJakub Kicinski TLS_INC_STATS(sock_net(sk), 15585c5d22a7SJakub Kicinski LINUX_MIB_TLSDECRYPTERROR); 1559dafb67f3SBoris Pismenny return err; 156094524d8fSVakul Garg } 1561c43ac97bSJakub Kicinski } else { 1562c43ac97bSJakub Kicinski *zc = false; 1563d069b780SBoris Pismenny } 1564130b392cSDave Watson 1565b53f4976SJakub Kicinski pad = padding_length(ctx, prot, skb); 1566b53f4976SJakub Kicinski if (pad < 0) 1567b53f4976SJakub Kicinski return pad; 1568b53f4976SJakub Kicinski 1569b53f4976SJakub Kicinski rxm->full_len -= pad; 15704509de14SVakul Garg rxm->offset += prot->prepend_size; 15714509de14SVakul Garg rxm->full_len -= prot->overhead_size; 1572fb0f886fSJakub Kicinski tls_advance_record_sn(sk, prot, &tls_ctx->rx); 1573bc76e5bbSJakub Kicinski ctx->decrypted = 1; 1574dafb67f3SBoris Pismenny ctx->saved_data_ready(sk); 1575fedf201eSDave Watson } else { 1576fedf201eSDave Watson *zc = false; 1577fedf201eSDave Watson } 1578dafb67f3SBoris Pismenny 1579dafb67f3SBoris Pismenny return err; 1580dafb67f3SBoris Pismenny } 1581dafb67f3SBoris Pismenny 1582dafb67f3SBoris Pismenny int decrypt_skb(struct sock *sk, struct sk_buff *skb, 1583c46234ebSDave Watson struct scatterlist *sgout) 1584c46234ebSDave Watson { 15850b243d00SVakul Garg bool zc = true; 15860b243d00SVakul Garg int chunk; 1587c46234ebSDave Watson 1588692d7b5dSVakul Garg return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false); 1589c46234ebSDave Watson } 1590c46234ebSDave Watson 1591c46234ebSDave Watson static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb, 1592c46234ebSDave Watson unsigned int len) 1593c46234ebSDave Watson { 1594c46234ebSDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 1595f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 159694524d8fSVakul Garg 159794524d8fSVakul Garg if (skb) { 1598c46234ebSDave Watson struct strp_msg *rxm = strp_msg(skb); 1599c46234ebSDave Watson 1600c46234ebSDave Watson if (len < rxm->full_len) { 1601c46234ebSDave Watson rxm->offset += len; 1602c46234ebSDave Watson rxm->full_len -= len; 1603c46234ebSDave Watson return false; 1604c46234ebSDave Watson } 1605a88c26f6SVakul Garg consume_skb(skb); 160694524d8fSVakul Garg } 1607c46234ebSDave Watson 1608c46234ebSDave Watson /* Finished with message */ 1609c46234ebSDave Watson ctx->recv_pkt = NULL; 16107170e604SDoron Roberts-Kedes __strp_unpause(&ctx->strp); 1611c46234ebSDave Watson 1612c46234ebSDave Watson return true; 1613c46234ebSDave Watson } 1614c46234ebSDave Watson 1615692d7b5dSVakul Garg /* This function traverses the rx_list in tls receive context to copies the 16162b794c40SVakul Garg * decrypted records into the buffer provided by caller zero copy is not 1617692d7b5dSVakul Garg * true. Further, the records are removed from the rx_list if it is not a peek 1618692d7b5dSVakul Garg * case and the record has been consumed completely. 1619692d7b5dSVakul Garg */ 1620692d7b5dSVakul Garg static int process_rx_list(struct tls_sw_context_rx *ctx, 1621692d7b5dSVakul Garg struct msghdr *msg, 16222b794c40SVakul Garg u8 *control, 16232b794c40SVakul Garg bool *cmsg, 1624692d7b5dSVakul Garg size_t skip, 1625692d7b5dSVakul Garg size_t len, 1626692d7b5dSVakul Garg bool zc, 1627692d7b5dSVakul Garg bool is_peek) 1628692d7b5dSVakul Garg { 1629692d7b5dSVakul Garg struct sk_buff *skb = skb_peek(&ctx->rx_list); 16302b794c40SVakul Garg u8 ctrl = *control; 16312b794c40SVakul Garg u8 msgc = *cmsg; 16322b794c40SVakul Garg struct tls_msg *tlm; 1633692d7b5dSVakul Garg ssize_t copied = 0; 1634692d7b5dSVakul Garg 16352b794c40SVakul Garg /* Set the record type in 'control' if caller didn't pass it */ 16362b794c40SVakul Garg if (!ctrl && skb) { 16372b794c40SVakul Garg tlm = tls_msg(skb); 16382b794c40SVakul Garg ctrl = tlm->control; 16392b794c40SVakul Garg } 16402b794c40SVakul Garg 1641692d7b5dSVakul Garg while (skip && skb) { 1642692d7b5dSVakul Garg struct strp_msg *rxm = strp_msg(skb); 16432b794c40SVakul Garg tlm = tls_msg(skb); 16442b794c40SVakul Garg 16452b794c40SVakul Garg /* Cannot process a record of different type */ 16462b794c40SVakul Garg if (ctrl != tlm->control) 16472b794c40SVakul Garg return 0; 1648692d7b5dSVakul Garg 1649692d7b5dSVakul Garg if (skip < rxm->full_len) 1650692d7b5dSVakul Garg break; 1651692d7b5dSVakul Garg 1652692d7b5dSVakul Garg skip = skip - rxm->full_len; 1653692d7b5dSVakul Garg skb = skb_peek_next(skb, &ctx->rx_list); 1654692d7b5dSVakul Garg } 1655692d7b5dSVakul Garg 1656692d7b5dSVakul Garg while (len && skb) { 1657692d7b5dSVakul Garg struct sk_buff *next_skb; 1658692d7b5dSVakul Garg struct strp_msg *rxm = strp_msg(skb); 1659692d7b5dSVakul Garg int chunk = min_t(unsigned int, rxm->full_len - skip, len); 1660692d7b5dSVakul Garg 16612b794c40SVakul Garg tlm = tls_msg(skb); 16622b794c40SVakul Garg 16632b794c40SVakul Garg /* Cannot process a record of different type */ 16642b794c40SVakul Garg if (ctrl != tlm->control) 16652b794c40SVakul Garg return 0; 16662b794c40SVakul Garg 16672b794c40SVakul Garg /* Set record type if not already done. For a non-data record, 16682b794c40SVakul Garg * do not proceed if record type could not be copied. 16692b794c40SVakul Garg */ 16702b794c40SVakul Garg if (!msgc) { 16712b794c40SVakul Garg int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 16722b794c40SVakul Garg sizeof(ctrl), &ctrl); 16732b794c40SVakul Garg msgc = true; 16742b794c40SVakul Garg if (ctrl != TLS_RECORD_TYPE_DATA) { 16752b794c40SVakul Garg if (cerr || msg->msg_flags & MSG_CTRUNC) 16762b794c40SVakul Garg return -EIO; 16772b794c40SVakul Garg 16782b794c40SVakul Garg *cmsg = msgc; 16792b794c40SVakul Garg } 16802b794c40SVakul Garg } 16812b794c40SVakul Garg 1682692d7b5dSVakul Garg if (!zc || (rxm->full_len - skip) > len) { 1683692d7b5dSVakul Garg int err = skb_copy_datagram_msg(skb, rxm->offset + skip, 1684692d7b5dSVakul Garg msg, chunk); 1685692d7b5dSVakul Garg if (err < 0) 1686692d7b5dSVakul Garg return err; 1687692d7b5dSVakul Garg } 1688692d7b5dSVakul Garg 1689692d7b5dSVakul Garg len = len - chunk; 1690692d7b5dSVakul Garg copied = copied + chunk; 1691692d7b5dSVakul Garg 1692692d7b5dSVakul Garg /* Consume the data from record if it is non-peek case*/ 1693692d7b5dSVakul Garg if (!is_peek) { 1694692d7b5dSVakul Garg rxm->offset = rxm->offset + chunk; 1695692d7b5dSVakul Garg rxm->full_len = rxm->full_len - chunk; 1696692d7b5dSVakul Garg 1697692d7b5dSVakul Garg /* Return if there is unconsumed data in the record */ 1698692d7b5dSVakul Garg if (rxm->full_len - skip) 1699692d7b5dSVakul Garg break; 1700692d7b5dSVakul Garg } 1701692d7b5dSVakul Garg 1702692d7b5dSVakul Garg /* The remaining skip-bytes must lie in 1st record in rx_list. 1703692d7b5dSVakul Garg * So from the 2nd record, 'skip' should be 0. 1704692d7b5dSVakul Garg */ 1705692d7b5dSVakul Garg skip = 0; 1706692d7b5dSVakul Garg 1707692d7b5dSVakul Garg if (msg) 1708692d7b5dSVakul Garg msg->msg_flags |= MSG_EOR; 1709692d7b5dSVakul Garg 1710692d7b5dSVakul Garg next_skb = skb_peek_next(skb, &ctx->rx_list); 1711692d7b5dSVakul Garg 1712692d7b5dSVakul Garg if (!is_peek) { 1713692d7b5dSVakul Garg skb_unlink(skb, &ctx->rx_list); 1714a88c26f6SVakul Garg consume_skb(skb); 1715692d7b5dSVakul Garg } 1716692d7b5dSVakul Garg 1717692d7b5dSVakul Garg skb = next_skb; 1718692d7b5dSVakul Garg } 1719692d7b5dSVakul Garg 17202b794c40SVakul Garg *control = ctrl; 1721692d7b5dSVakul Garg return copied; 1722692d7b5dSVakul Garg } 1723692d7b5dSVakul Garg 1724c46234ebSDave Watson int tls_sw_recvmsg(struct sock *sk, 1725c46234ebSDave Watson struct msghdr *msg, 1726c46234ebSDave Watson size_t len, 1727c46234ebSDave Watson int nonblock, 1728c46234ebSDave Watson int flags, 1729c46234ebSDave Watson int *addr_len) 1730c46234ebSDave Watson { 1731c46234ebSDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 1732f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 17334509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 1734d3b18ad3SJohn Fastabend struct sk_psock *psock; 1735692d7b5dSVakul Garg unsigned char control = 0; 1736692d7b5dSVakul Garg ssize_t decrypted = 0; 1737c46234ebSDave Watson struct strp_msg *rxm; 17382b794c40SVakul Garg struct tls_msg *tlm; 1739c46234ebSDave Watson struct sk_buff *skb; 1740c46234ebSDave Watson ssize_t copied = 0; 1741c46234ebSDave Watson bool cmsg = false; 174206030dbaSDaniel Borkmann int target, err = 0; 1743c46234ebSDave Watson long timeo; 174400e23707SDavid Howells bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1745692d7b5dSVakul Garg bool is_peek = flags & MSG_PEEK; 1746e91de6afSJohn Fastabend bool bpf_strp_enabled; 174794524d8fSVakul Garg int num_async = 0; 17480cada332SVinay Kumar Yadav int pending; 1749c46234ebSDave Watson 1750c46234ebSDave Watson flags |= nonblock; 1751c46234ebSDave Watson 1752c46234ebSDave Watson if (unlikely(flags & MSG_ERRQUEUE)) 1753c46234ebSDave Watson return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 1754c46234ebSDave Watson 1755d3b18ad3SJohn Fastabend psock = sk_psock_get(sk); 1756c46234ebSDave Watson lock_sock(sk); 1757e91de6afSJohn Fastabend bpf_strp_enabled = sk_psock_strp_enabled(psock); 1758c46234ebSDave Watson 1759692d7b5dSVakul Garg /* Process pending decrypted records. It must be non-zero-copy */ 17602b794c40SVakul Garg err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false, 17612b794c40SVakul Garg is_peek); 1762692d7b5dSVakul Garg if (err < 0) { 1763692d7b5dSVakul Garg tls_err_abort(sk, err); 1764692d7b5dSVakul Garg goto end; 1765692d7b5dSVakul Garg } else { 1766692d7b5dSVakul Garg copied = err; 1767692d7b5dSVakul Garg } 1768692d7b5dSVakul Garg 176946a16959SJakub Kicinski if (len <= copied) 1770692d7b5dSVakul Garg goto recv_end; 177146a16959SJakub Kicinski 177246a16959SJakub Kicinski target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 177346a16959SJakub Kicinski len = len - copied; 177446a16959SJakub Kicinski timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1775692d7b5dSVakul Garg 177604b25a54SJakub Kicinski while (len && (decrypted + copied < target || ctx->recv_pkt)) { 1777692d7b5dSVakul Garg bool retain_skb = false; 1778692d7b5dSVakul Garg bool zc = false; 1779692d7b5dSVakul Garg int to_decrypt; 1780c46234ebSDave Watson int chunk = 0; 17817754bd63SEran Ben Elisha bool async_capable; 17827754bd63SEran Ben Elisha bool async = false; 1783c46234ebSDave Watson 1784d3b18ad3SJohn Fastabend skb = tls_wait_data(sk, psock, flags, timeo, &err); 1785d3b18ad3SJohn Fastabend if (!skb) { 1786d3b18ad3SJohn Fastabend if (psock) { 178702c558b2SJohn Fastabend int ret = __tcp_bpf_recvmsg(sk, psock, 178802c558b2SJohn Fastabend msg, len, flags); 1789d3b18ad3SJohn Fastabend 1790d3b18ad3SJohn Fastabend if (ret > 0) { 1791692d7b5dSVakul Garg decrypted += ret; 1792d3b18ad3SJohn Fastabend len -= ret; 1793d3b18ad3SJohn Fastabend continue; 1794d3b18ad3SJohn Fastabend } 1795d3b18ad3SJohn Fastabend } 1796c46234ebSDave Watson goto recv_end; 17972b794c40SVakul Garg } else { 17982b794c40SVakul Garg tlm = tls_msg(skb); 17992b794c40SVakul Garg if (prot->version == TLS_1_3_VERSION) 18002b794c40SVakul Garg tlm->control = 0; 18012b794c40SVakul Garg else 18022b794c40SVakul Garg tlm->control = ctx->control; 1803d3b18ad3SJohn Fastabend } 1804c46234ebSDave Watson 1805c46234ebSDave Watson rxm = strp_msg(skb); 180694524d8fSVakul Garg 18074509de14SVakul Garg to_decrypt = rxm->full_len - prot->overhead_size; 1808fedf201eSDave Watson 1809fedf201eSDave Watson if (to_decrypt <= len && !is_kvec && !is_peek && 1810130b392cSDave Watson ctx->control == TLS_RECORD_TYPE_DATA && 1811e91de6afSJohn Fastabend prot->version != TLS_1_3_VERSION && 1812e91de6afSJohn Fastabend !bpf_strp_enabled) 1813fedf201eSDave Watson zc = true; 1814fedf201eSDave Watson 1815c0ab4732SVakul Garg /* Do not use async mode if record is non-data */ 1816e91de6afSJohn Fastabend if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) 18177754bd63SEran Ben Elisha async_capable = ctx->async_capable; 1818c0ab4732SVakul Garg else 18197754bd63SEran Ben Elisha async_capable = false; 1820c0ab4732SVakul Garg 1821fedf201eSDave Watson err = decrypt_skb_update(sk, skb, &msg->msg_iter, 18227754bd63SEran Ben Elisha &chunk, &zc, async_capable); 1823fedf201eSDave Watson if (err < 0 && err != -EINPROGRESS) { 1824fedf201eSDave Watson tls_err_abort(sk, EBADMSG); 1825fedf201eSDave Watson goto recv_end; 1826fedf201eSDave Watson } 1827fedf201eSDave Watson 18287754bd63SEran Ben Elisha if (err == -EINPROGRESS) { 18297754bd63SEran Ben Elisha async = true; 1830fedf201eSDave Watson num_async++; 18317754bd63SEran Ben Elisha } else if (prot->version == TLS_1_3_VERSION) { 18322b794c40SVakul Garg tlm->control = ctx->control; 18337754bd63SEran Ben Elisha } 18342b794c40SVakul Garg 18352b794c40SVakul Garg /* If the type of records being processed is not known yet, 18362b794c40SVakul Garg * set it to record type just dequeued. If it is already known, 18372b794c40SVakul Garg * but does not match the record type just dequeued, go to end. 18382b794c40SVakul Garg * We always get record type here since for tls1.2, record type 18392b794c40SVakul Garg * is known just after record is dequeued from stream parser. 18402b794c40SVakul Garg * For tls1.3, we disable async. 18412b794c40SVakul Garg */ 18422b794c40SVakul Garg 18432b794c40SVakul Garg if (!control) 18442b794c40SVakul Garg control = tlm->control; 18452b794c40SVakul Garg else if (control != tlm->control) 18462b794c40SVakul Garg goto recv_end; 1847fedf201eSDave Watson 1848c46234ebSDave Watson if (!cmsg) { 1849c46234ebSDave Watson int cerr; 1850c46234ebSDave Watson 1851c46234ebSDave Watson cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, 18522b794c40SVakul Garg sizeof(control), &control); 1853c46234ebSDave Watson cmsg = true; 18542b794c40SVakul Garg if (control != TLS_RECORD_TYPE_DATA) { 1855c46234ebSDave Watson if (cerr || msg->msg_flags & MSG_CTRUNC) { 1856c46234ebSDave Watson err = -EIO; 1857c46234ebSDave Watson goto recv_end; 1858c46234ebSDave Watson } 1859c46234ebSDave Watson } 1860c46234ebSDave Watson } 1861c46234ebSDave Watson 1862c0ab4732SVakul Garg if (async) 1863c0ab4732SVakul Garg goto pick_next_record; 1864c0ab4732SVakul Garg 1865c46234ebSDave Watson if (!zc) { 1866e91de6afSJohn Fastabend if (bpf_strp_enabled) { 1867e91de6afSJohn Fastabend err = sk_psock_tls_strp_read(psock, skb); 1868e91de6afSJohn Fastabend if (err != __SK_PASS) { 1869e91de6afSJohn Fastabend rxm->offset = rxm->offset + rxm->full_len; 1870e91de6afSJohn Fastabend rxm->full_len = 0; 1871e91de6afSJohn Fastabend if (err == __SK_DROP) 1872e91de6afSJohn Fastabend consume_skb(skb); 1873e91de6afSJohn Fastabend ctx->recv_pkt = NULL; 1874e91de6afSJohn Fastabend __strp_unpause(&ctx->strp); 1875e91de6afSJohn Fastabend continue; 1876e91de6afSJohn Fastabend } 1877e91de6afSJohn Fastabend } 1878e91de6afSJohn Fastabend 1879692d7b5dSVakul Garg if (rxm->full_len > len) { 1880692d7b5dSVakul Garg retain_skb = true; 1881692d7b5dSVakul Garg chunk = len; 1882692d7b5dSVakul Garg } else { 1883692d7b5dSVakul Garg chunk = rxm->full_len; 1884692d7b5dSVakul Garg } 188594524d8fSVakul Garg 1886692d7b5dSVakul Garg err = skb_copy_datagram_msg(skb, rxm->offset, 1887692d7b5dSVakul Garg msg, chunk); 1888c46234ebSDave Watson if (err < 0) 1889c46234ebSDave Watson goto recv_end; 1890692d7b5dSVakul Garg 1891692d7b5dSVakul Garg if (!is_peek) { 1892692d7b5dSVakul Garg rxm->offset = rxm->offset + chunk; 1893692d7b5dSVakul Garg rxm->full_len = rxm->full_len - chunk; 1894692d7b5dSVakul Garg } 1895692d7b5dSVakul Garg } 1896c46234ebSDave Watson 189794524d8fSVakul Garg pick_next_record: 1898692d7b5dSVakul Garg if (chunk > len) 1899692d7b5dSVakul Garg chunk = len; 1900c46234ebSDave Watson 1901692d7b5dSVakul Garg decrypted += chunk; 1902692d7b5dSVakul Garg len -= chunk; 1903692d7b5dSVakul Garg 1904692d7b5dSVakul Garg /* For async or peek case, queue the current skb */ 1905692d7b5dSVakul Garg if (async || is_peek || retain_skb) { 1906692d7b5dSVakul Garg skb_queue_tail(&ctx->rx_list, skb); 190794524d8fSVakul Garg skb = NULL; 1908692d7b5dSVakul Garg } 190994524d8fSVakul Garg 1910c46234ebSDave Watson if (tls_sw_advance_skb(sk, skb, chunk)) { 1911c46234ebSDave Watson /* Return full control message to 1912c46234ebSDave Watson * userspace before trying to parse 1913c46234ebSDave Watson * another message type 1914c46234ebSDave Watson */ 1915c46234ebSDave Watson msg->msg_flags |= MSG_EOR; 1916692d7b5dSVakul Garg if (ctx->control != TLS_RECORD_TYPE_DATA) 1917c46234ebSDave Watson goto recv_end; 191894524d8fSVakul Garg } else { 191994524d8fSVakul Garg break; 1920c46234ebSDave Watson } 192104b25a54SJakub Kicinski } 1922c46234ebSDave Watson 1923c46234ebSDave Watson recv_end: 192494524d8fSVakul Garg if (num_async) { 192594524d8fSVakul Garg /* Wait for all previously submitted records to be decrypted */ 19260cada332SVinay Kumar Yadav spin_lock_bh(&ctx->decrypt_compl_lock); 19270cada332SVinay Kumar Yadav ctx->async_notify = true; 19280cada332SVinay Kumar Yadav pending = atomic_read(&ctx->decrypt_pending); 19290cada332SVinay Kumar Yadav spin_unlock_bh(&ctx->decrypt_compl_lock); 19300cada332SVinay Kumar Yadav if (pending) { 193194524d8fSVakul Garg err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 193294524d8fSVakul Garg if (err) { 193394524d8fSVakul Garg /* one of async decrypt failed */ 193494524d8fSVakul Garg tls_err_abort(sk, err); 193594524d8fSVakul Garg copied = 0; 1936692d7b5dSVakul Garg decrypted = 0; 1937692d7b5dSVakul Garg goto end; 193894524d8fSVakul Garg } 193994524d8fSVakul Garg } else { 194094524d8fSVakul Garg reinit_completion(&ctx->async_wait.completion); 194194524d8fSVakul Garg } 19420cada332SVinay Kumar Yadav 19430cada332SVinay Kumar Yadav /* There can be no concurrent accesses, since we have no 19440cada332SVinay Kumar Yadav * pending decrypt operations 19450cada332SVinay Kumar Yadav */ 194694524d8fSVakul Garg WRITE_ONCE(ctx->async_notify, false); 1947692d7b5dSVakul Garg 1948692d7b5dSVakul Garg /* Drain records from the rx_list & copy if required */ 1949692d7b5dSVakul Garg if (is_peek || is_kvec) 19502b794c40SVakul Garg err = process_rx_list(ctx, msg, &control, &cmsg, copied, 1951692d7b5dSVakul Garg decrypted, false, is_peek); 1952692d7b5dSVakul Garg else 19532b794c40SVakul Garg err = process_rx_list(ctx, msg, &control, &cmsg, 0, 1954692d7b5dSVakul Garg decrypted, true, is_peek); 1955692d7b5dSVakul Garg if (err < 0) { 1956692d7b5dSVakul Garg tls_err_abort(sk, err); 1957692d7b5dSVakul Garg copied = 0; 1958692d7b5dSVakul Garg goto end; 195994524d8fSVakul Garg } 1960692d7b5dSVakul Garg } 1961692d7b5dSVakul Garg 1962692d7b5dSVakul Garg copied += decrypted; 1963692d7b5dSVakul Garg 1964692d7b5dSVakul Garg end: 1965c46234ebSDave Watson release_sock(sk); 1966d3b18ad3SJohn Fastabend if (psock) 1967d3b18ad3SJohn Fastabend sk_psock_put(sk, psock); 1968c46234ebSDave Watson return copied ? : err; 1969c46234ebSDave Watson } 1970c46234ebSDave Watson 1971c46234ebSDave Watson ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 1972c46234ebSDave Watson struct pipe_inode_info *pipe, 1973c46234ebSDave Watson size_t len, unsigned int flags) 1974c46234ebSDave Watson { 1975c46234ebSDave Watson struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 1976f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 1977c46234ebSDave Watson struct strp_msg *rxm = NULL; 1978c46234ebSDave Watson struct sock *sk = sock->sk; 1979c46234ebSDave Watson struct sk_buff *skb; 1980c46234ebSDave Watson ssize_t copied = 0; 1981c46234ebSDave Watson int err = 0; 1982c46234ebSDave Watson long timeo; 1983c46234ebSDave Watson int chunk; 19840b243d00SVakul Garg bool zc = false; 1985c46234ebSDave Watson 1986c46234ebSDave Watson lock_sock(sk); 1987c46234ebSDave Watson 1988c46234ebSDave Watson timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1989c46234ebSDave Watson 1990d3b18ad3SJohn Fastabend skb = tls_wait_data(sk, NULL, flags, timeo, &err); 1991c46234ebSDave Watson if (!skb) 1992c46234ebSDave Watson goto splice_read_end; 1993c46234ebSDave Watson 1994fedf201eSDave Watson if (!ctx->decrypted) { 1995fedf201eSDave Watson err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); 1996fedf201eSDave Watson 1997c46234ebSDave Watson /* splice does not support reading control messages */ 1998c46234ebSDave Watson if (ctx->control != TLS_RECORD_TYPE_DATA) { 19994a5cdc60SValentin Vidic err = -EINVAL; 2000c46234ebSDave Watson goto splice_read_end; 2001c46234ebSDave Watson } 2002c46234ebSDave Watson 2003c46234ebSDave Watson if (err < 0) { 2004c46234ebSDave Watson tls_err_abort(sk, EBADMSG); 2005c46234ebSDave Watson goto splice_read_end; 2006c46234ebSDave Watson } 2007bc76e5bbSJakub Kicinski ctx->decrypted = 1; 2008c46234ebSDave Watson } 2009c46234ebSDave Watson rxm = strp_msg(skb); 2010c46234ebSDave Watson 2011c46234ebSDave Watson chunk = min_t(unsigned int, rxm->full_len, len); 2012c46234ebSDave Watson copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); 2013c46234ebSDave Watson if (copied < 0) 2014c46234ebSDave Watson goto splice_read_end; 2015c46234ebSDave Watson 2016c46234ebSDave Watson if (likely(!(flags & MSG_PEEK))) 2017c46234ebSDave Watson tls_sw_advance_skb(sk, skb, copied); 2018c46234ebSDave Watson 2019c46234ebSDave Watson splice_read_end: 2020c46234ebSDave Watson release_sock(sk); 2021c46234ebSDave Watson return copied ? : err; 2022c46234ebSDave Watson } 2023c46234ebSDave Watson 2024924ad65eSJohn Fastabend bool tls_sw_stream_read(const struct sock *sk) 2025c46234ebSDave Watson { 2026c46234ebSDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 2027f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2028d3b18ad3SJohn Fastabend bool ingress_empty = true; 2029d3b18ad3SJohn Fastabend struct sk_psock *psock; 2030c46234ebSDave Watson 2031d3b18ad3SJohn Fastabend rcu_read_lock(); 2032d3b18ad3SJohn Fastabend psock = sk_psock(sk); 2033d3b18ad3SJohn Fastabend if (psock) 2034d3b18ad3SJohn Fastabend ingress_empty = list_empty(&psock->ingress_msg); 2035d3b18ad3SJohn Fastabend rcu_read_unlock(); 2036c46234ebSDave Watson 203713aecb17SJakub Kicinski return !ingress_empty || ctx->recv_pkt || 203813aecb17SJakub Kicinski !skb_queue_empty(&ctx->rx_list); 2039c46234ebSDave Watson } 2040c46234ebSDave Watson 2041c46234ebSDave Watson static int tls_read_size(struct strparser *strp, struct sk_buff *skb) 2042c46234ebSDave Watson { 2043c46234ebSDave Watson struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 2044f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 20454509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 20463463e51dSKees Cook char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; 2047c46234ebSDave Watson struct strp_msg *rxm = strp_msg(skb); 2048c46234ebSDave Watson size_t cipher_overhead; 2049c46234ebSDave Watson size_t data_len = 0; 2050c46234ebSDave Watson int ret; 2051c46234ebSDave Watson 2052c46234ebSDave Watson /* Verify that we have a full TLS header, or wait for more data */ 20534509de14SVakul Garg if (rxm->offset + prot->prepend_size > skb->len) 2054c46234ebSDave Watson return 0; 2055c46234ebSDave Watson 20563463e51dSKees Cook /* Sanity-check size of on-stack buffer. */ 20574509de14SVakul Garg if (WARN_ON(prot->prepend_size > sizeof(header))) { 20583463e51dSKees Cook ret = -EINVAL; 20593463e51dSKees Cook goto read_failure; 20603463e51dSKees Cook } 20613463e51dSKees Cook 2062c46234ebSDave Watson /* Linearize header to local buffer */ 20634509de14SVakul Garg ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size); 2064c46234ebSDave Watson 2065c46234ebSDave Watson if (ret < 0) 2066c46234ebSDave Watson goto read_failure; 2067c46234ebSDave Watson 2068c46234ebSDave Watson ctx->control = header[0]; 2069c46234ebSDave Watson 2070c46234ebSDave Watson data_len = ((header[4] & 0xFF) | (header[3] << 8)); 2071c46234ebSDave Watson 20724509de14SVakul Garg cipher_overhead = prot->tag_size; 20734509de14SVakul Garg if (prot->version != TLS_1_3_VERSION) 20744509de14SVakul Garg cipher_overhead += prot->iv_size; 2075c46234ebSDave Watson 2076130b392cSDave Watson if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead + 20774509de14SVakul Garg prot->tail_size) { 2078c46234ebSDave Watson ret = -EMSGSIZE; 2079c46234ebSDave Watson goto read_failure; 2080c46234ebSDave Watson } 2081c46234ebSDave Watson if (data_len < cipher_overhead) { 2082c46234ebSDave Watson ret = -EBADMSG; 2083c46234ebSDave Watson goto read_failure; 2084c46234ebSDave Watson } 2085c46234ebSDave Watson 2086130b392cSDave Watson /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */ 2087130b392cSDave Watson if (header[1] != TLS_1_2_VERSION_MINOR || 2088130b392cSDave Watson header[2] != TLS_1_2_VERSION_MAJOR) { 2089c46234ebSDave Watson ret = -EINVAL; 2090c46234ebSDave Watson goto read_failure; 2091c46234ebSDave Watson } 2092be2fbc15SJakub Kicinski 2093f953d33bSJakub Kicinski tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE, 2094fe58a5a0SJakub Kicinski TCP_SKB_CB(skb)->seq + rxm->offset); 2095c46234ebSDave Watson return data_len + TLS_HEADER_SIZE; 2096c46234ebSDave Watson 2097c46234ebSDave Watson read_failure: 2098c46234ebSDave Watson tls_err_abort(strp->sk, ret); 2099c46234ebSDave Watson 2100c46234ebSDave Watson return ret; 2101c46234ebSDave Watson } 2102c46234ebSDave Watson 2103c46234ebSDave Watson static void tls_queue(struct strparser *strp, struct sk_buff *skb) 2104c46234ebSDave Watson { 2105c46234ebSDave Watson struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 2106f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2107c46234ebSDave Watson 2108bc76e5bbSJakub Kicinski ctx->decrypted = 0; 2109c46234ebSDave Watson 2110c46234ebSDave Watson ctx->recv_pkt = skb; 2111c46234ebSDave Watson strp_pause(strp); 2112c46234ebSDave Watson 2113ad13acceSVakul Garg ctx->saved_data_ready(strp->sk); 2114c46234ebSDave Watson } 2115c46234ebSDave Watson 2116c46234ebSDave Watson static void tls_data_ready(struct sock *sk) 2117c46234ebSDave Watson { 2118c46234ebSDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 2119f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2120d3b18ad3SJohn Fastabend struct sk_psock *psock; 2121c46234ebSDave Watson 2122c46234ebSDave Watson strp_data_ready(&ctx->strp); 2123d3b18ad3SJohn Fastabend 2124d3b18ad3SJohn Fastabend psock = sk_psock_get(sk); 212562b4011fSXiyu Yang if (psock) { 212662b4011fSXiyu Yang if (!list_empty(&psock->ingress_msg)) 2127d3b18ad3SJohn Fastabend ctx->saved_data_ready(sk); 2128d3b18ad3SJohn Fastabend sk_psock_put(sk, psock); 2129d3b18ad3SJohn Fastabend } 2130c46234ebSDave Watson } 2131c46234ebSDave Watson 2132f87e62d4SJohn Fastabend void tls_sw_cancel_work_tx(struct tls_context *tls_ctx) 2133f87e62d4SJohn Fastabend { 2134f87e62d4SJohn Fastabend struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2135f87e62d4SJohn Fastabend 2136f87e62d4SJohn Fastabend set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask); 2137f87e62d4SJohn Fastabend set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask); 2138f87e62d4SJohn Fastabend cancel_delayed_work_sync(&ctx->tx_work.work); 2139f87e62d4SJohn Fastabend } 2140f87e62d4SJohn Fastabend 2141313ab004SJohn Fastabend void tls_sw_release_resources_tx(struct sock *sk) 21423c4d7559SDave Watson { 21433c4d7559SDave Watson struct tls_context *tls_ctx = tls_get_ctx(sk); 2144f66de3eeSBoris Pismenny struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2145a42055e8SVakul Garg struct tls_rec *rec, *tmp; 2146a42055e8SVakul Garg 2147a42055e8SVakul Garg /* Wait for any pending async encryptions to complete */ 2148a42055e8SVakul Garg smp_store_mb(ctx->async_notify, true); 2149a42055e8SVakul Garg if (atomic_read(&ctx->encrypt_pending)) 2150a42055e8SVakul Garg crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 2151a42055e8SVakul Garg 2152a42055e8SVakul Garg tls_tx_records(sk, -1); 2153a42055e8SVakul Garg 21549932a29aSVakul Garg /* Free up un-sent records in tx_list. First, free 2155a42055e8SVakul Garg * the partially sent record if any at head of tx_list. 2156a42055e8SVakul Garg */ 2157c5daa6ccSJakub Kicinski if (tls_ctx->partially_sent_record) { 2158c5daa6ccSJakub Kicinski tls_free_partial_record(sk, tls_ctx); 21599932a29aSVakul Garg rec = list_first_entry(&ctx->tx_list, 2160a42055e8SVakul Garg struct tls_rec, list); 2161a42055e8SVakul Garg list_del(&rec->list); 2162d829e9c4SDaniel Borkmann sk_msg_free(sk, &rec->msg_plaintext); 2163a42055e8SVakul Garg kfree(rec); 2164a42055e8SVakul Garg } 2165a42055e8SVakul Garg 21669932a29aSVakul Garg list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) { 2167a42055e8SVakul Garg list_del(&rec->list); 2168d829e9c4SDaniel Borkmann sk_msg_free(sk, &rec->msg_encrypted); 2169d829e9c4SDaniel Borkmann sk_msg_free(sk, &rec->msg_plaintext); 2170a42055e8SVakul Garg kfree(rec); 2171a42055e8SVakul Garg } 21723c4d7559SDave Watson 21733c4d7559SDave Watson crypto_free_aead(ctx->aead_send); 2174c774973eSVakul Garg tls_free_open_rec(sk); 2175313ab004SJohn Fastabend } 2176313ab004SJohn Fastabend 2177313ab004SJohn Fastabend void tls_sw_free_ctx_tx(struct tls_context *tls_ctx) 2178313ab004SJohn Fastabend { 2179313ab004SJohn Fastabend struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); 2180f66de3eeSBoris Pismenny 2181f66de3eeSBoris Pismenny kfree(ctx); 2182f66de3eeSBoris Pismenny } 2183f66de3eeSBoris Pismenny 218439f56e1aSBoris Pismenny void tls_sw_release_resources_rx(struct sock *sk) 2185f66de3eeSBoris Pismenny { 2186f66de3eeSBoris Pismenny struct tls_context *tls_ctx = tls_get_ctx(sk); 2187f66de3eeSBoris Pismenny struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2188f66de3eeSBoris Pismenny 218912c76861SJakub Kicinski kfree(tls_ctx->rx.rec_seq); 219012c76861SJakub Kicinski kfree(tls_ctx->rx.iv); 219112c76861SJakub Kicinski 2192c46234ebSDave Watson if (ctx->aead_recv) { 2193c46234ebSDave Watson kfree_skb(ctx->recv_pkt); 2194c46234ebSDave Watson ctx->recv_pkt = NULL; 2195692d7b5dSVakul Garg skb_queue_purge(&ctx->rx_list); 2196c46234ebSDave Watson crypto_free_aead(ctx->aead_recv); 2197c46234ebSDave Watson strp_stop(&ctx->strp); 2198313ab004SJohn Fastabend /* If tls_sw_strparser_arm() was not called (cleanup paths) 2199313ab004SJohn Fastabend * we still want to strp_stop(), but sk->sk_data_ready was 2200313ab004SJohn Fastabend * never swapped. 2201313ab004SJohn Fastabend */ 2202313ab004SJohn Fastabend if (ctx->saved_data_ready) { 2203c46234ebSDave Watson write_lock_bh(&sk->sk_callback_lock); 2204c46234ebSDave Watson sk->sk_data_ready = ctx->saved_data_ready; 2205c46234ebSDave Watson write_unlock_bh(&sk->sk_callback_lock); 2206c46234ebSDave Watson } 220739f56e1aSBoris Pismenny } 2208313ab004SJohn Fastabend } 2209313ab004SJohn Fastabend 2210313ab004SJohn Fastabend void tls_sw_strparser_done(struct tls_context *tls_ctx) 2211313ab004SJohn Fastabend { 2212313ab004SJohn Fastabend struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2213313ab004SJohn Fastabend 2214313ab004SJohn Fastabend strp_done(&ctx->strp); 2215313ab004SJohn Fastabend } 2216313ab004SJohn Fastabend 2217313ab004SJohn Fastabend void tls_sw_free_ctx_rx(struct tls_context *tls_ctx) 2218313ab004SJohn Fastabend { 2219313ab004SJohn Fastabend struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); 2220313ab004SJohn Fastabend 2221313ab004SJohn Fastabend kfree(ctx); 2222313ab004SJohn Fastabend } 222339f56e1aSBoris Pismenny 222439f56e1aSBoris Pismenny void tls_sw_free_resources_rx(struct sock *sk) 222539f56e1aSBoris Pismenny { 222639f56e1aSBoris Pismenny struct tls_context *tls_ctx = tls_get_ctx(sk); 222739f56e1aSBoris Pismenny 222839f56e1aSBoris Pismenny tls_sw_release_resources_rx(sk); 2229313ab004SJohn Fastabend tls_sw_free_ctx_rx(tls_ctx); 22303c4d7559SDave Watson } 22313c4d7559SDave Watson 22329932a29aSVakul Garg /* The work handler to transmitt the encrypted records in tx_list */ 2233a42055e8SVakul Garg static void tx_work_handler(struct work_struct *work) 2234a42055e8SVakul Garg { 2235a42055e8SVakul Garg struct delayed_work *delayed_work = to_delayed_work(work); 2236a42055e8SVakul Garg struct tx_work *tx_work = container_of(delayed_work, 2237a42055e8SVakul Garg struct tx_work, work); 2238a42055e8SVakul Garg struct sock *sk = tx_work->sk; 2239a42055e8SVakul Garg struct tls_context *tls_ctx = tls_get_ctx(sk); 2240f87e62d4SJohn Fastabend struct tls_sw_context_tx *ctx; 2241f87e62d4SJohn Fastabend 2242f87e62d4SJohn Fastabend if (unlikely(!tls_ctx)) 2243f87e62d4SJohn Fastabend return; 2244f87e62d4SJohn Fastabend 2245f87e62d4SJohn Fastabend ctx = tls_sw_ctx_tx(tls_ctx); 2246f87e62d4SJohn Fastabend if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask)) 2247f87e62d4SJohn Fastabend return; 2248a42055e8SVakul Garg 2249a42055e8SVakul Garg if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2250a42055e8SVakul Garg return; 225179ffe608SJakub Kicinski mutex_lock(&tls_ctx->tx_lock); 2252a42055e8SVakul Garg lock_sock(sk); 2253a42055e8SVakul Garg tls_tx_records(sk, -1); 2254a42055e8SVakul Garg release_sock(sk); 225579ffe608SJakub Kicinski mutex_unlock(&tls_ctx->tx_lock); 2256a42055e8SVakul Garg } 2257a42055e8SVakul Garg 22587463d3a2SBoris Pismenny void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) 22597463d3a2SBoris Pismenny { 22607463d3a2SBoris Pismenny struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 22617463d3a2SBoris Pismenny 22627463d3a2SBoris Pismenny /* Schedule the transmission if tx list is ready */ 226302b1fa07SJakub Kicinski if (is_tx_ready(tx_ctx) && 226402b1fa07SJakub Kicinski !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) 22657463d3a2SBoris Pismenny schedule_delayed_work(&tx_ctx->tx_work.work, 0); 22667463d3a2SBoris Pismenny } 22677463d3a2SBoris Pismenny 2268318892acSJakub Kicinski void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) 2269318892acSJakub Kicinski { 2270318892acSJakub Kicinski struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); 2271318892acSJakub Kicinski 2272318892acSJakub Kicinski write_lock_bh(&sk->sk_callback_lock); 2273318892acSJakub Kicinski rx_ctx->saved_data_ready = sk->sk_data_ready; 2274318892acSJakub Kicinski sk->sk_data_ready = tls_data_ready; 2275318892acSJakub Kicinski write_unlock_bh(&sk->sk_callback_lock); 2276318892acSJakub Kicinski 2277318892acSJakub Kicinski strp_check_rcv(&rx_ctx->strp); 2278318892acSJakub Kicinski } 2279318892acSJakub Kicinski 2280c46234ebSDave Watson int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 22813c4d7559SDave Watson { 22824509de14SVakul Garg struct tls_context *tls_ctx = tls_get_ctx(sk); 22834509de14SVakul Garg struct tls_prot_info *prot = &tls_ctx->prot_info; 22843c4d7559SDave Watson struct tls_crypto_info *crypto_info; 22853c4d7559SDave Watson struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; 2286fb99bce7SDave Watson struct tls12_crypto_info_aes_gcm_256 *gcm_256_info; 2287f295b3aeSVakul Garg struct tls12_crypto_info_aes_ccm_128 *ccm_128_info; 2288f66de3eeSBoris Pismenny struct tls_sw_context_tx *sw_ctx_tx = NULL; 2289f66de3eeSBoris Pismenny struct tls_sw_context_rx *sw_ctx_rx = NULL; 2290c46234ebSDave Watson struct cipher_context *cctx; 2291c46234ebSDave Watson struct crypto_aead **aead; 2292c46234ebSDave Watson struct strp_callbacks cb; 2293f295b3aeSVakul Garg u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size; 2294692d7b5dSVakul Garg struct crypto_tfm *tfm; 2295f295b3aeSVakul Garg char *iv, *rec_seq, *key, *salt, *cipher_name; 2296fb99bce7SDave Watson size_t keysize; 22973c4d7559SDave Watson int rc = 0; 22983c4d7559SDave Watson 22993c4d7559SDave Watson if (!ctx) { 23003c4d7559SDave Watson rc = -EINVAL; 23013c4d7559SDave Watson goto out; 23023c4d7559SDave Watson } 23033c4d7559SDave Watson 2304f66de3eeSBoris Pismenny if (tx) { 2305b190a587SBoris Pismenny if (!ctx->priv_ctx_tx) { 2306f66de3eeSBoris Pismenny sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); 2307f66de3eeSBoris Pismenny if (!sw_ctx_tx) { 23083c4d7559SDave Watson rc = -ENOMEM; 23093c4d7559SDave Watson goto out; 23103c4d7559SDave Watson } 2311f66de3eeSBoris Pismenny ctx->priv_ctx_tx = sw_ctx_tx; 2312c46234ebSDave Watson } else { 2313b190a587SBoris Pismenny sw_ctx_tx = 2314b190a587SBoris Pismenny (struct tls_sw_context_tx *)ctx->priv_ctx_tx; 2315b190a587SBoris Pismenny } 2316b190a587SBoris Pismenny } else { 2317b190a587SBoris Pismenny if (!ctx->priv_ctx_rx) { 2318f66de3eeSBoris Pismenny sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); 2319f66de3eeSBoris Pismenny if (!sw_ctx_rx) { 2320f66de3eeSBoris Pismenny rc = -ENOMEM; 2321f66de3eeSBoris Pismenny goto out; 2322c46234ebSDave Watson } 2323f66de3eeSBoris Pismenny ctx->priv_ctx_rx = sw_ctx_rx; 2324b190a587SBoris Pismenny } else { 2325b190a587SBoris Pismenny sw_ctx_rx = 2326b190a587SBoris Pismenny (struct tls_sw_context_rx *)ctx->priv_ctx_rx; 2327b190a587SBoris Pismenny } 2328f66de3eeSBoris Pismenny } 23293c4d7559SDave Watson 2330c46234ebSDave Watson if (tx) { 2331b190a587SBoris Pismenny crypto_init_wait(&sw_ctx_tx->async_wait); 23320cada332SVinay Kumar Yadav spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); 233386029d10SSabrina Dubroca crypto_info = &ctx->crypto_send.info; 2334c46234ebSDave Watson cctx = &ctx->tx; 2335f66de3eeSBoris Pismenny aead = &sw_ctx_tx->aead_send; 23369932a29aSVakul Garg INIT_LIST_HEAD(&sw_ctx_tx->tx_list); 2337a42055e8SVakul Garg INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); 2338a42055e8SVakul Garg sw_ctx_tx->tx_work.sk = sk; 2339c46234ebSDave Watson } else { 2340b190a587SBoris Pismenny crypto_init_wait(&sw_ctx_rx->async_wait); 23410cada332SVinay Kumar Yadav spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); 234286029d10SSabrina Dubroca crypto_info = &ctx->crypto_recv.info; 2343c46234ebSDave Watson cctx = &ctx->rx; 2344692d7b5dSVakul Garg skb_queue_head_init(&sw_ctx_rx->rx_list); 2345f66de3eeSBoris Pismenny aead = &sw_ctx_rx->aead_recv; 2346c46234ebSDave Watson } 2347c46234ebSDave Watson 23483c4d7559SDave Watson switch (crypto_info->cipher_type) { 23493c4d7559SDave Watson case TLS_CIPHER_AES_GCM_128: { 23503c4d7559SDave Watson nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 23513c4d7559SDave Watson tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; 23523c4d7559SDave Watson iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 23533c4d7559SDave Watson iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; 23543c4d7559SDave Watson rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; 23553c4d7559SDave Watson rec_seq = 23563c4d7559SDave Watson ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; 23573c4d7559SDave Watson gcm_128_info = 23583c4d7559SDave Watson (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 2359fb99bce7SDave Watson keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE; 2360fb99bce7SDave Watson key = gcm_128_info->key; 2361fb99bce7SDave Watson salt = gcm_128_info->salt; 2362f295b3aeSVakul Garg salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; 2363f295b3aeSVakul Garg cipher_name = "gcm(aes)"; 2364fb99bce7SDave Watson break; 2365fb99bce7SDave Watson } 2366fb99bce7SDave Watson case TLS_CIPHER_AES_GCM_256: { 2367fb99bce7SDave Watson nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; 2368fb99bce7SDave Watson tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE; 2369fb99bce7SDave Watson iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE; 2370fb99bce7SDave Watson iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv; 2371fb99bce7SDave Watson rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE; 2372fb99bce7SDave Watson rec_seq = 2373fb99bce7SDave Watson ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq; 2374fb99bce7SDave Watson gcm_256_info = 2375fb99bce7SDave Watson (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; 2376fb99bce7SDave Watson keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE; 2377fb99bce7SDave Watson key = gcm_256_info->key; 2378fb99bce7SDave Watson salt = gcm_256_info->salt; 2379f295b3aeSVakul Garg salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE; 2380f295b3aeSVakul Garg cipher_name = "gcm(aes)"; 2381f295b3aeSVakul Garg break; 2382f295b3aeSVakul Garg } 2383f295b3aeSVakul Garg case TLS_CIPHER_AES_CCM_128: { 2384f295b3aeSVakul Garg nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE; 2385f295b3aeSVakul Garg tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE; 2386f295b3aeSVakul Garg iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE; 2387f295b3aeSVakul Garg iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv; 2388f295b3aeSVakul Garg rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE; 2389f295b3aeSVakul Garg rec_seq = 2390f295b3aeSVakul Garg ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq; 2391f295b3aeSVakul Garg ccm_128_info = 2392f295b3aeSVakul Garg (struct tls12_crypto_info_aes_ccm_128 *)crypto_info; 2393f295b3aeSVakul Garg keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE; 2394f295b3aeSVakul Garg key = ccm_128_info->key; 2395f295b3aeSVakul Garg salt = ccm_128_info->salt; 2396f295b3aeSVakul Garg salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE; 2397f295b3aeSVakul Garg cipher_name = "ccm(aes)"; 23983c4d7559SDave Watson break; 23993c4d7559SDave Watson } 24003c4d7559SDave Watson default: 24013c4d7559SDave Watson rc = -EINVAL; 2402cf6d43efSSabrina Dubroca goto free_priv; 24033c4d7559SDave Watson } 24043c4d7559SDave Watson 240589fec474SJakub Kicinski /* Sanity-check the sizes for stack allocations. */ 240689fec474SJakub Kicinski if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE || 240789fec474SJakub Kicinski rec_seq_size > TLS_MAX_REC_SEQ_SIZE) { 2408b16520f7SKees Cook rc = -EINVAL; 2409b16520f7SKees Cook goto free_priv; 2410b16520f7SKees Cook } 2411b16520f7SKees Cook 2412130b392cSDave Watson if (crypto_info->version == TLS_1_3_VERSION) { 2413130b392cSDave Watson nonce_size = 0; 24144509de14SVakul Garg prot->aad_size = TLS_HEADER_SIZE; 24154509de14SVakul Garg prot->tail_size = 1; 2416130b392cSDave Watson } else { 24174509de14SVakul Garg prot->aad_size = TLS_AAD_SPACE_SIZE; 24184509de14SVakul Garg prot->tail_size = 0; 2419130b392cSDave Watson } 2420130b392cSDave Watson 24214509de14SVakul Garg prot->version = crypto_info->version; 24224509de14SVakul Garg prot->cipher_type = crypto_info->cipher_type; 24234509de14SVakul Garg prot->prepend_size = TLS_HEADER_SIZE + nonce_size; 24244509de14SVakul Garg prot->tag_size = tag_size; 24254509de14SVakul Garg prot->overhead_size = prot->prepend_size + 24264509de14SVakul Garg prot->tag_size + prot->tail_size; 24274509de14SVakul Garg prot->iv_size = iv_size; 2428f295b3aeSVakul Garg prot->salt_size = salt_size; 2429f295b3aeSVakul Garg cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL); 2430c46234ebSDave Watson if (!cctx->iv) { 24313c4d7559SDave Watson rc = -ENOMEM; 2432cf6d43efSSabrina Dubroca goto free_priv; 24333c4d7559SDave Watson } 2434fb99bce7SDave Watson /* Note: 128 & 256 bit salt are the same size */ 24354509de14SVakul Garg prot->rec_seq_size = rec_seq_size; 2436f295b3aeSVakul Garg memcpy(cctx->iv, salt, salt_size); 2437f295b3aeSVakul Garg memcpy(cctx->iv + salt_size, iv, iv_size); 2438969d5090Szhong jiang cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); 2439c46234ebSDave Watson if (!cctx->rec_seq) { 24403c4d7559SDave Watson rc = -ENOMEM; 24413c4d7559SDave Watson goto free_iv; 24423c4d7559SDave Watson } 24433c4d7559SDave Watson 2444c46234ebSDave Watson if (!*aead) { 2445f295b3aeSVakul Garg *aead = crypto_alloc_aead(cipher_name, 0, 0); 2446c46234ebSDave Watson if (IS_ERR(*aead)) { 2447c46234ebSDave Watson rc = PTR_ERR(*aead); 2448c46234ebSDave Watson *aead = NULL; 24493c4d7559SDave Watson goto free_rec_seq; 24503c4d7559SDave Watson } 24513c4d7559SDave Watson } 24523c4d7559SDave Watson 24533c4d7559SDave Watson ctx->push_pending_record = tls_sw_push_pending_record; 24543c4d7559SDave Watson 2455fb99bce7SDave Watson rc = crypto_aead_setkey(*aead, key, keysize); 2456fb99bce7SDave Watson 24573c4d7559SDave Watson if (rc) 24583c4d7559SDave Watson goto free_aead; 24593c4d7559SDave Watson 24604509de14SVakul Garg rc = crypto_aead_setauthsize(*aead, prot->tag_size); 2461c46234ebSDave Watson if (rc) 2462c46234ebSDave Watson goto free_aead; 2463c46234ebSDave Watson 2464f66de3eeSBoris Pismenny if (sw_ctx_rx) { 2465692d7b5dSVakul Garg tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv); 24668497ded2SVakul Garg 24678497ded2SVakul Garg if (crypto_info->version == TLS_1_3_VERSION) 24685c5458ecSJakub Kicinski sw_ctx_rx->async_capable = 0; 24698497ded2SVakul Garg else 2470692d7b5dSVakul Garg sw_ctx_rx->async_capable = 24715c5458ecSJakub Kicinski !!(tfm->__crt_alg->cra_flags & 24725c5458ecSJakub Kicinski CRYPTO_ALG_ASYNC); 2473692d7b5dSVakul Garg 2474c46234ebSDave Watson /* Set up strparser */ 2475c46234ebSDave Watson memset(&cb, 0, sizeof(cb)); 2476c46234ebSDave Watson cb.rcv_msg = tls_queue; 2477c46234ebSDave Watson cb.parse_msg = tls_read_size; 2478c46234ebSDave Watson 2479f66de3eeSBoris Pismenny strp_init(&sw_ctx_rx->strp, sk, &cb); 2480c46234ebSDave Watson } 2481c46234ebSDave Watson 2482c46234ebSDave Watson goto out; 24833c4d7559SDave Watson 24843c4d7559SDave Watson free_aead: 2485c46234ebSDave Watson crypto_free_aead(*aead); 2486c46234ebSDave Watson *aead = NULL; 24873c4d7559SDave Watson free_rec_seq: 2488c46234ebSDave Watson kfree(cctx->rec_seq); 2489c46234ebSDave Watson cctx->rec_seq = NULL; 24903c4d7559SDave Watson free_iv: 2491f66de3eeSBoris Pismenny kfree(cctx->iv); 2492f66de3eeSBoris Pismenny cctx->iv = NULL; 2493cf6d43efSSabrina Dubroca free_priv: 2494f66de3eeSBoris Pismenny if (tx) { 2495f66de3eeSBoris Pismenny kfree(ctx->priv_ctx_tx); 2496f66de3eeSBoris Pismenny ctx->priv_ctx_tx = NULL; 2497f66de3eeSBoris Pismenny } else { 2498f66de3eeSBoris Pismenny kfree(ctx->priv_ctx_rx); 2499f66de3eeSBoris Pismenny ctx->priv_ctx_rx = NULL; 2500f66de3eeSBoris Pismenny } 25013c4d7559SDave Watson out: 25023c4d7559SDave Watson return rc; 25033c4d7559SDave Watson } 2504