xref: /openbmc/linux/net/tls/tls_sw.c (revision 662fbcec)
13c4d7559SDave Watson /*
23c4d7559SDave Watson  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
33c4d7559SDave Watson  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
43c4d7559SDave Watson  * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
53c4d7559SDave Watson  * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
63c4d7559SDave Watson  * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7d3b18ad3SJohn Fastabend  * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
83c4d7559SDave Watson  *
93c4d7559SDave Watson  * This software is available to you under a choice of one of two
103c4d7559SDave Watson  * licenses.  You may choose to be licensed under the terms of the GNU
113c4d7559SDave Watson  * General Public License (GPL) Version 2, available from the file
123c4d7559SDave Watson  * COPYING in the main directory of this source tree, or the
133c4d7559SDave Watson  * OpenIB.org BSD license below:
143c4d7559SDave Watson  *
153c4d7559SDave Watson  *     Redistribution and use in source and binary forms, with or
163c4d7559SDave Watson  *     without modification, are permitted provided that the following
173c4d7559SDave Watson  *     conditions are met:
183c4d7559SDave Watson  *
193c4d7559SDave Watson  *      - Redistributions of source code must retain the above
203c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
213c4d7559SDave Watson  *        disclaimer.
223c4d7559SDave Watson  *
233c4d7559SDave Watson  *      - Redistributions in binary form must reproduce the above
243c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
253c4d7559SDave Watson  *        disclaimer in the documentation and/or other materials
263c4d7559SDave Watson  *        provided with the distribution.
273c4d7559SDave Watson  *
283c4d7559SDave Watson  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
293c4d7559SDave Watson  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
303c4d7559SDave Watson  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
313c4d7559SDave Watson  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
323c4d7559SDave Watson  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
333c4d7559SDave Watson  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
343c4d7559SDave Watson  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
353c4d7559SDave Watson  * SOFTWARE.
363c4d7559SDave Watson  */
373c4d7559SDave Watson 
38da353facSDaniel Jordan #include <linux/bug.h>
39c46234ebSDave Watson #include <linux/sched/signal.h>
403c4d7559SDave Watson #include <linux/module.h>
418d338c76SHerbert Xu #include <linux/kernel.h>
42974271e5SJim Ma #include <linux/splice.h>
433c4d7559SDave Watson #include <crypto/aead.h>
443c4d7559SDave Watson 
45c46234ebSDave Watson #include <net/strparser.h>
463c4d7559SDave Watson #include <net/tls.h>
4740e0b090SPeilin Ye #include <trace/events/sock.h>
483c4d7559SDave Watson 
4958790314SJakub Kicinski #include "tls.h"
5058790314SJakub Kicinski 
514175eac3SJakub Kicinski struct tls_decrypt_arg {
526bd116c8SJakub Kicinski 	struct_group(inargs,
534175eac3SJakub Kicinski 	bool zc;
544175eac3SJakub Kicinski 	bool async;
55ce61327cSJakub Kicinski 	u8 tail;
566bd116c8SJakub Kicinski 	);
576bd116c8SJakub Kicinski 
586bd116c8SJakub Kicinski 	struct sk_buff *skb;
594175eac3SJakub Kicinski };
604175eac3SJakub Kicinski 
61b89fec54SJakub Kicinski struct tls_decrypt_ctx {
628d338c76SHerbert Xu 	struct sock *sk;
63b89fec54SJakub Kicinski 	u8 iv[MAX_IV_SIZE];
64b89fec54SJakub Kicinski 	u8 aad[TLS_MAX_AAD_SIZE];
65b89fec54SJakub Kicinski 	u8 tail;
66b89fec54SJakub Kicinski 	struct scatterlist sg[];
67b89fec54SJakub Kicinski };
68b89fec54SJakub Kicinski 
69da353facSDaniel Jordan noinline void tls_err_abort(struct sock *sk, int err)
70da353facSDaniel Jordan {
71da353facSDaniel Jordan 	WARN_ON_ONCE(err >= 0);
72da353facSDaniel Jordan 	/* sk->sk_err should contain a positive error code. */
738a0d57dfSJakub Kicinski 	WRITE_ONCE(sk->sk_err, -err);
748a0d57dfSJakub Kicinski 	/* Paired with smp_rmb() in tcp_poll() */
758a0d57dfSJakub Kicinski 	smp_wmb();
76da353facSDaniel Jordan 	sk_error_report(sk);
77da353facSDaniel Jordan }
78da353facSDaniel Jordan 
790927f71dSDoron Roberts-Kedes static int __skb_nsg(struct sk_buff *skb, int offset, int len,
800927f71dSDoron Roberts-Kedes                      unsigned int recursion_level)
810927f71dSDoron Roberts-Kedes {
820927f71dSDoron Roberts-Kedes         int start = skb_headlen(skb);
830927f71dSDoron Roberts-Kedes         int i, chunk = start - offset;
840927f71dSDoron Roberts-Kedes         struct sk_buff *frag_iter;
850927f71dSDoron Roberts-Kedes         int elt = 0;
860927f71dSDoron Roberts-Kedes 
870927f71dSDoron Roberts-Kedes         if (unlikely(recursion_level >= 24))
880927f71dSDoron Roberts-Kedes                 return -EMSGSIZE;
890927f71dSDoron Roberts-Kedes 
900927f71dSDoron Roberts-Kedes         if (chunk > 0) {
910927f71dSDoron Roberts-Kedes                 if (chunk > len)
920927f71dSDoron Roberts-Kedes                         chunk = len;
930927f71dSDoron Roberts-Kedes                 elt++;
940927f71dSDoron Roberts-Kedes                 len -= chunk;
950927f71dSDoron Roberts-Kedes                 if (len == 0)
960927f71dSDoron Roberts-Kedes                         return elt;
970927f71dSDoron Roberts-Kedes                 offset += chunk;
980927f71dSDoron Roberts-Kedes         }
990927f71dSDoron Roberts-Kedes 
1000927f71dSDoron Roberts-Kedes         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1010927f71dSDoron Roberts-Kedes                 int end;
1020927f71dSDoron Roberts-Kedes 
1030927f71dSDoron Roberts-Kedes                 WARN_ON(start > offset + len);
1040927f71dSDoron Roberts-Kedes 
1050927f71dSDoron Roberts-Kedes                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1060927f71dSDoron Roberts-Kedes                 chunk = end - offset;
1070927f71dSDoron Roberts-Kedes                 if (chunk > 0) {
1080927f71dSDoron Roberts-Kedes                         if (chunk > len)
1090927f71dSDoron Roberts-Kedes                                 chunk = len;
1100927f71dSDoron Roberts-Kedes                         elt++;
1110927f71dSDoron Roberts-Kedes                         len -= chunk;
1120927f71dSDoron Roberts-Kedes                         if (len == 0)
1130927f71dSDoron Roberts-Kedes                                 return elt;
1140927f71dSDoron Roberts-Kedes                         offset += chunk;
1150927f71dSDoron Roberts-Kedes                 }
1160927f71dSDoron Roberts-Kedes                 start = end;
1170927f71dSDoron Roberts-Kedes         }
1180927f71dSDoron Roberts-Kedes 
1190927f71dSDoron Roberts-Kedes         if (unlikely(skb_has_frag_list(skb))) {
1200927f71dSDoron Roberts-Kedes                 skb_walk_frags(skb, frag_iter) {
1210927f71dSDoron Roberts-Kedes                         int end, ret;
1220927f71dSDoron Roberts-Kedes 
1230927f71dSDoron Roberts-Kedes                         WARN_ON(start > offset + len);
1240927f71dSDoron Roberts-Kedes 
1250927f71dSDoron Roberts-Kedes                         end = start + frag_iter->len;
1260927f71dSDoron Roberts-Kedes                         chunk = end - offset;
1270927f71dSDoron Roberts-Kedes                         if (chunk > 0) {
1280927f71dSDoron Roberts-Kedes                                 if (chunk > len)
1290927f71dSDoron Roberts-Kedes                                         chunk = len;
1300927f71dSDoron Roberts-Kedes                                 ret = __skb_nsg(frag_iter, offset - start, chunk,
1310927f71dSDoron Roberts-Kedes                                                 recursion_level + 1);
1320927f71dSDoron Roberts-Kedes                                 if (unlikely(ret < 0))
1330927f71dSDoron Roberts-Kedes                                         return ret;
1340927f71dSDoron Roberts-Kedes                                 elt += ret;
1350927f71dSDoron Roberts-Kedes                                 len -= chunk;
1360927f71dSDoron Roberts-Kedes                                 if (len == 0)
1370927f71dSDoron Roberts-Kedes                                         return elt;
1380927f71dSDoron Roberts-Kedes                                 offset += chunk;
1390927f71dSDoron Roberts-Kedes                         }
1400927f71dSDoron Roberts-Kedes                         start = end;
1410927f71dSDoron Roberts-Kedes                 }
1420927f71dSDoron Roberts-Kedes         }
1430927f71dSDoron Roberts-Kedes         BUG_ON(len);
1440927f71dSDoron Roberts-Kedes         return elt;
1450927f71dSDoron Roberts-Kedes }
1460927f71dSDoron Roberts-Kedes 
1470927f71dSDoron Roberts-Kedes /* Return the number of scatterlist elements required to completely map the
1480927f71dSDoron Roberts-Kedes  * skb, or -EMSGSIZE if the recursion depth is exceeded.
1490927f71dSDoron Roberts-Kedes  */
1500927f71dSDoron Roberts-Kedes static int skb_nsg(struct sk_buff *skb, int offset, int len)
1510927f71dSDoron Roberts-Kedes {
1520927f71dSDoron Roberts-Kedes         return __skb_nsg(skb, offset, len, 0);
1530927f71dSDoron Roberts-Kedes }
1540927f71dSDoron Roberts-Kedes 
155ce61327cSJakub Kicinski static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
156ce61327cSJakub Kicinski 			      struct tls_decrypt_arg *darg)
157130b392cSDave Watson {
158130b392cSDave Watson 	struct strp_msg *rxm = strp_msg(skb);
159c3f6bb74SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
160130b392cSDave Watson 	int sub = 0;
161130b392cSDave Watson 
162130b392cSDave Watson 	/* Determine zero-padding length */
163b53f4976SJakub Kicinski 	if (prot->version == TLS_1_3_VERSION) {
1645deee41bSJakub Kicinski 		int offset = rxm->full_len - TLS_TAG_SIZE - 1;
165ce61327cSJakub Kicinski 		char content_type = darg->zc ? darg->tail : 0;
166130b392cSDave Watson 		int err;
167130b392cSDave Watson 
168130b392cSDave Watson 		while (content_type == 0) {
1695deee41bSJakub Kicinski 			if (offset < prot->prepend_size)
170130b392cSDave Watson 				return -EBADMSG;
1715deee41bSJakub Kicinski 			err = skb_copy_bits(skb, rxm->offset + offset,
172130b392cSDave Watson 					    &content_type, 1);
173b53f4976SJakub Kicinski 			if (err)
174b53f4976SJakub Kicinski 				return err;
175130b392cSDave Watson 			if (content_type)
176130b392cSDave Watson 				break;
177130b392cSDave Watson 			sub++;
1785deee41bSJakub Kicinski 			offset--;
179130b392cSDave Watson 		}
180c3f6bb74SJakub Kicinski 		tlm->control = content_type;
181130b392cSDave Watson 	}
182130b392cSDave Watson 	return sub;
183130b392cSDave Watson }
184130b392cSDave Watson 
1858580e55aSHerbert Xu static void tls_decrypt_done(void *data, int err)
18694524d8fSVakul Garg {
1878580e55aSHerbert Xu 	struct aead_request *aead_req = data;
1888d338c76SHerbert Xu 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
18994524d8fSVakul Garg 	struct scatterlist *sgout = aead_req->dst;
190692d7b5dSVakul Garg 	struct scatterlist *sgin = aead_req->src;
1917a3dd8c8SJohn Fastabend 	struct tls_sw_context_rx *ctx;
1928d338c76SHerbert Xu 	struct tls_decrypt_ctx *dctx;
1937a3dd8c8SJohn Fastabend 	struct tls_context *tls_ctx;
19494524d8fSVakul Garg 	struct scatterlist *sg;
19594524d8fSVakul Garg 	unsigned int pages;
1966ececdc5SJakub Kicinski 	struct sock *sk;
1978d338c76SHerbert Xu 	int aead_size;
1987a3dd8c8SJohn Fastabend 
1998d338c76SHerbert Xu 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
2008d338c76SHerbert Xu 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
2018d338c76SHerbert Xu 	dctx = (void *)((u8 *)aead_req + aead_size);
2028d338c76SHerbert Xu 
2038d338c76SHerbert Xu 	sk = dctx->sk;
2046ececdc5SJakub Kicinski 	tls_ctx = tls_get_ctx(sk);
2057a3dd8c8SJohn Fastabend 	ctx = tls_sw_ctx_rx(tls_ctx);
20694524d8fSVakul Garg 
20794524d8fSVakul Garg 	/* Propagate if there was an err */
20894524d8fSVakul Garg 	if (err) {
2095c5ec668SJakub Kicinski 		if (err == -EBADMSG)
2106ececdc5SJakub Kicinski 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
21194524d8fSVakul Garg 		ctx->async_wait.err = err;
2126ececdc5SJakub Kicinski 		tls_err_abort(sk, err);
21394524d8fSVakul Garg 	}
21494524d8fSVakul Garg 
215692d7b5dSVakul Garg 	/* Free the destination pages if skb was not decrypted inplace */
216692d7b5dSVakul Garg 	if (sgout != sgin) {
21794524d8fSVakul Garg 		/* Skip the first S/G entry as it points to AAD */
21894524d8fSVakul Garg 		for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
21994524d8fSVakul Garg 			if (!sg)
22094524d8fSVakul Garg 				break;
22194524d8fSVakul Garg 			put_page(sg_page(sg));
22294524d8fSVakul Garg 		}
223692d7b5dSVakul Garg 	}
22494524d8fSVakul Garg 
22594524d8fSVakul Garg 	kfree(aead_req);
22694524d8fSVakul Garg 
2270cada332SVinay Kumar Yadav 	spin_lock_bh(&ctx->decrypt_compl_lock);
22837943f04SJakub Kicinski 	if (!atomic_dec_return(&ctx->decrypt_pending))
22994524d8fSVakul Garg 		complete(&ctx->async_wait.completion);
2300cada332SVinay Kumar Yadav 	spin_unlock_bh(&ctx->decrypt_compl_lock);
23194524d8fSVakul Garg }
23294524d8fSVakul Garg 
233c46234ebSDave Watson static int tls_do_decryption(struct sock *sk,
234c46234ebSDave Watson 			     struct scatterlist *sgin,
235c46234ebSDave Watson 			     struct scatterlist *sgout,
236c46234ebSDave Watson 			     char *iv_recv,
237c46234ebSDave Watson 			     size_t data_len,
23894524d8fSVakul Garg 			     struct aead_request *aead_req,
2393547a1f9SJakub Kicinski 			     struct tls_decrypt_arg *darg)
240c46234ebSDave Watson {
241c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2424509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
243f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
244c46234ebSDave Watson 	int ret;
245c46234ebSDave Watson 
2460b243d00SVakul Garg 	aead_request_set_tfm(aead_req, ctx->aead_recv);
2474509de14SVakul Garg 	aead_request_set_ad(aead_req, prot->aad_size);
248c46234ebSDave Watson 	aead_request_set_crypt(aead_req, sgin, sgout,
2494509de14SVakul Garg 			       data_len + prot->tag_size,
250c46234ebSDave Watson 			       (u8 *)iv_recv);
251c46234ebSDave Watson 
2523547a1f9SJakub Kicinski 	if (darg->async) {
25394524d8fSVakul Garg 		aead_request_set_callback(aead_req,
25494524d8fSVakul Garg 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
2558d338c76SHerbert Xu 					  tls_decrypt_done, aead_req);
25694524d8fSVakul Garg 		atomic_inc(&ctx->decrypt_pending);
25794524d8fSVakul Garg 	} else {
25894524d8fSVakul Garg 		aead_request_set_callback(aead_req,
25994524d8fSVakul Garg 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
26094524d8fSVakul Garg 					  crypto_req_done, &ctx->async_wait);
26194524d8fSVakul Garg 	}
26294524d8fSVakul Garg 
26394524d8fSVakul Garg 	ret = crypto_aead_decrypt(aead_req);
26494524d8fSVakul Garg 	if (ret == -EINPROGRESS) {
2653547a1f9SJakub Kicinski 		if (darg->async)
2663547a1f9SJakub Kicinski 			return 0;
26794524d8fSVakul Garg 
26894524d8fSVakul Garg 		ret = crypto_wait_req(ret, &ctx->async_wait);
26994524d8fSVakul Garg 	}
2703547a1f9SJakub Kicinski 	darg->async = false;
2713547a1f9SJakub Kicinski 
272c46234ebSDave Watson 	return ret;
273c46234ebSDave Watson }
274c46234ebSDave Watson 
275d829e9c4SDaniel Borkmann static void tls_trim_both_msgs(struct sock *sk, int target_size)
2763c4d7559SDave Watson {
2773c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2784509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
279f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
280a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
2813c4d7559SDave Watson 
282d829e9c4SDaniel Borkmann 	sk_msg_trim(sk, &rec->msg_plaintext, target_size);
2833c4d7559SDave Watson 	if (target_size > 0)
2844509de14SVakul Garg 		target_size += prot->overhead_size;
285d829e9c4SDaniel Borkmann 	sk_msg_trim(sk, &rec->msg_encrypted, target_size);
2863c4d7559SDave Watson }
2873c4d7559SDave Watson 
288d829e9c4SDaniel Borkmann static int tls_alloc_encrypted_msg(struct sock *sk, int len)
2893c4d7559SDave Watson {
2903c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
291f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
292a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
293d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
2943c4d7559SDave Watson 
295d829e9c4SDaniel Borkmann 	return sk_msg_alloc(sk, msg_en, len, 0);
2963c4d7559SDave Watson }
2973c4d7559SDave Watson 
298d829e9c4SDaniel Borkmann static int tls_clone_plaintext_msg(struct sock *sk, int required)
2993c4d7559SDave Watson {
3003c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
3014509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
302f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
303a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
304d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl = &rec->msg_plaintext;
305d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
3064e6d4720SVakul Garg 	int skip, len;
3073c4d7559SDave Watson 
308d829e9c4SDaniel Borkmann 	/* We add page references worth len bytes from encrypted sg
309d829e9c4SDaniel Borkmann 	 * at the end of plaintext sg. It is guaranteed that msg_en
3104e6d4720SVakul Garg 	 * has enough required room (ensured by caller).
3114e6d4720SVakul Garg 	 */
312d829e9c4SDaniel Borkmann 	len = required - msg_pl->sg.size;
31352ea992cSVakul Garg 
314d829e9c4SDaniel Borkmann 	/* Skip initial bytes in msg_en's data to be able to use
315d829e9c4SDaniel Borkmann 	 * same offset of both plain and encrypted data.
3164e6d4720SVakul Garg 	 */
3174509de14SVakul Garg 	skip = prot->prepend_size + msg_pl->sg.size;
3184e6d4720SVakul Garg 
319d829e9c4SDaniel Borkmann 	return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
3203c4d7559SDave Watson }
3213c4d7559SDave Watson 
322d3b18ad3SJohn Fastabend static struct tls_rec *tls_get_rec(struct sock *sk)
323d3b18ad3SJohn Fastabend {
324d3b18ad3SJohn Fastabend 	struct tls_context *tls_ctx = tls_get_ctx(sk);
3254509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
326d3b18ad3SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
327d3b18ad3SJohn Fastabend 	struct sk_msg *msg_pl, *msg_en;
328d3b18ad3SJohn Fastabend 	struct tls_rec *rec;
329d3b18ad3SJohn Fastabend 	int mem_size;
330d3b18ad3SJohn Fastabend 
331d3b18ad3SJohn Fastabend 	mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
332d3b18ad3SJohn Fastabend 
333d3b18ad3SJohn Fastabend 	rec = kzalloc(mem_size, sk->sk_allocation);
334d3b18ad3SJohn Fastabend 	if (!rec)
335d3b18ad3SJohn Fastabend 		return NULL;
336d3b18ad3SJohn Fastabend 
337d3b18ad3SJohn Fastabend 	msg_pl = &rec->msg_plaintext;
338d3b18ad3SJohn Fastabend 	msg_en = &rec->msg_encrypted;
339d3b18ad3SJohn Fastabend 
340d3b18ad3SJohn Fastabend 	sk_msg_init(msg_pl);
341d3b18ad3SJohn Fastabend 	sk_msg_init(msg_en);
342d3b18ad3SJohn Fastabend 
343d3b18ad3SJohn Fastabend 	sg_init_table(rec->sg_aead_in, 2);
3444509de14SVakul Garg 	sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
345d3b18ad3SJohn Fastabend 	sg_unmark_end(&rec->sg_aead_in[1]);
346d3b18ad3SJohn Fastabend 
347d3b18ad3SJohn Fastabend 	sg_init_table(rec->sg_aead_out, 2);
3484509de14SVakul Garg 	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
349d3b18ad3SJohn Fastabend 	sg_unmark_end(&rec->sg_aead_out[1]);
350d3b18ad3SJohn Fastabend 
3518d338c76SHerbert Xu 	rec->sk = sk;
3528d338c76SHerbert Xu 
353d3b18ad3SJohn Fastabend 	return rec;
354d3b18ad3SJohn Fastabend }
355d3b18ad3SJohn Fastabend 
356d3b18ad3SJohn Fastabend static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
357d3b18ad3SJohn Fastabend {
358d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &rec->msg_encrypted);
359d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &rec->msg_plaintext);
360d3b18ad3SJohn Fastabend 	kfree(rec);
361d3b18ad3SJohn Fastabend }
362d3b18ad3SJohn Fastabend 
363c774973eSVakul Garg static void tls_free_open_rec(struct sock *sk)
3643c4d7559SDave Watson {
3653c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
366f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
367a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
3683c4d7559SDave Watson 
369d3b18ad3SJohn Fastabend 	if (rec) {
370d3b18ad3SJohn Fastabend 		tls_free_rec(sk, rec);
371d3b18ad3SJohn Fastabend 		ctx->open_rec = NULL;
372d3b18ad3SJohn Fastabend 	}
3733c4d7559SDave Watson }
3743c4d7559SDave Watson 
375a42055e8SVakul Garg int tls_tx_records(struct sock *sk, int flags)
376a42055e8SVakul Garg {
377a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
378a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
379a42055e8SVakul Garg 	struct tls_rec *rec, *tmp;
380d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en;
381a42055e8SVakul Garg 	int tx_flags, rc = 0;
382a42055e8SVakul Garg 
383a42055e8SVakul Garg 	if (tls_is_partially_sent_record(tls_ctx)) {
3849932a29aSVakul Garg 		rec = list_first_entry(&ctx->tx_list,
385a42055e8SVakul Garg 				       struct tls_rec, list);
386a42055e8SVakul Garg 
387a42055e8SVakul Garg 		if (flags == -1)
388a42055e8SVakul Garg 			tx_flags = rec->tx_flags;
389a42055e8SVakul Garg 		else
390a42055e8SVakul Garg 			tx_flags = flags;
391a42055e8SVakul Garg 
392a42055e8SVakul Garg 		rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
393a42055e8SVakul Garg 		if (rc)
394a42055e8SVakul Garg 			goto tx_err;
395a42055e8SVakul Garg 
396a42055e8SVakul Garg 		/* Full record has been transmitted.
3979932a29aSVakul Garg 		 * Remove the head of tx_list
398a42055e8SVakul Garg 		 */
399a42055e8SVakul Garg 		list_del(&rec->list);
400d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
401a42055e8SVakul Garg 		kfree(rec);
402a42055e8SVakul Garg 	}
403a42055e8SVakul Garg 
4049932a29aSVakul Garg 	/* Tx all ready records */
4059932a29aSVakul Garg 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
4069932a29aSVakul Garg 		if (READ_ONCE(rec->tx_ready)) {
407a42055e8SVakul Garg 			if (flags == -1)
408a42055e8SVakul Garg 				tx_flags = rec->tx_flags;
409a42055e8SVakul Garg 			else
410a42055e8SVakul Garg 				tx_flags = flags;
411a42055e8SVakul Garg 
412d829e9c4SDaniel Borkmann 			msg_en = &rec->msg_encrypted;
413a42055e8SVakul Garg 			rc = tls_push_sg(sk, tls_ctx,
414d829e9c4SDaniel Borkmann 					 &msg_en->sg.data[msg_en->sg.curr],
415a42055e8SVakul Garg 					 0, tx_flags);
416a42055e8SVakul Garg 			if (rc)
417a42055e8SVakul Garg 				goto tx_err;
418a42055e8SVakul Garg 
419a42055e8SVakul Garg 			list_del(&rec->list);
420d829e9c4SDaniel Borkmann 			sk_msg_free(sk, &rec->msg_plaintext);
421a42055e8SVakul Garg 			kfree(rec);
422a42055e8SVakul Garg 		} else {
423a42055e8SVakul Garg 			break;
424a42055e8SVakul Garg 		}
425a42055e8SVakul Garg 	}
426a42055e8SVakul Garg 
427a42055e8SVakul Garg tx_err:
428a42055e8SVakul Garg 	if (rc < 0 && rc != -EAGAIN)
429da353facSDaniel Jordan 		tls_err_abort(sk, -EBADMSG);
430a42055e8SVakul Garg 
431a42055e8SVakul Garg 	return rc;
432a42055e8SVakul Garg }
433a42055e8SVakul Garg 
4348580e55aSHerbert Xu static void tls_encrypt_done(void *data, int err)
435a42055e8SVakul Garg {
4368d338c76SHerbert Xu 	struct tls_sw_context_tx *ctx;
4378d338c76SHerbert Xu 	struct tls_context *tls_ctx;
4388d338c76SHerbert Xu 	struct tls_prot_info *prot;
439d3777ceaSHerbert Xu 	struct tls_rec *rec = data;
440d829e9c4SDaniel Borkmann 	struct scatterlist *sge;
441d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en;
442a42055e8SVakul Garg 	bool ready = false;
4438d338c76SHerbert Xu 	struct sock *sk;
444a42055e8SVakul Garg 	int pending;
445a42055e8SVakul Garg 
446d829e9c4SDaniel Borkmann 	msg_en = &rec->msg_encrypted;
447a42055e8SVakul Garg 
4488d338c76SHerbert Xu 	sk = rec->sk;
4498d338c76SHerbert Xu 	tls_ctx = tls_get_ctx(sk);
4508d338c76SHerbert Xu 	prot = &tls_ctx->prot_info;
4518d338c76SHerbert Xu 	ctx = tls_sw_ctx_tx(tls_ctx);
4528d338c76SHerbert Xu 
453d829e9c4SDaniel Borkmann 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
4544509de14SVakul Garg 	sge->offset -= prot->prepend_size;
4554509de14SVakul Garg 	sge->length += prot->prepend_size;
456a42055e8SVakul Garg 
45780ece6a0SVakul Garg 	/* Check if error is previously set on socket */
458a42055e8SVakul Garg 	if (err || sk->sk_err) {
459a42055e8SVakul Garg 		rec = NULL;
460a42055e8SVakul Garg 
461a42055e8SVakul Garg 		/* If err is already set on socket, return the same code */
462a42055e8SVakul Garg 		if (sk->sk_err) {
4631d9d6fd2SDaniel Jordan 			ctx->async_wait.err = -sk->sk_err;
464a42055e8SVakul Garg 		} else {
465a42055e8SVakul Garg 			ctx->async_wait.err = err;
466a42055e8SVakul Garg 			tls_err_abort(sk, err);
467a42055e8SVakul Garg 		}
468a42055e8SVakul Garg 	}
469a42055e8SVakul Garg 
4709932a29aSVakul Garg 	if (rec) {
4719932a29aSVakul Garg 		struct tls_rec *first_rec;
4729932a29aSVakul Garg 
4739932a29aSVakul Garg 		/* Mark the record as ready for transmission */
4749932a29aSVakul Garg 		smp_store_mb(rec->tx_ready, true);
4759932a29aSVakul Garg 
4769932a29aSVakul Garg 		/* If received record is at head of tx_list, schedule tx */
4779932a29aSVakul Garg 		first_rec = list_first_entry(&ctx->tx_list,
4789932a29aSVakul Garg 					     struct tls_rec, list);
4799932a29aSVakul Garg 		if (rec == first_rec)
4809932a29aSVakul Garg 			ready = true;
4819932a29aSVakul Garg 	}
482a42055e8SVakul Garg 
4830cada332SVinay Kumar Yadav 	spin_lock_bh(&ctx->encrypt_compl_lock);
484a42055e8SVakul Garg 	pending = atomic_dec_return(&ctx->encrypt_pending);
485a42055e8SVakul Garg 
4860cada332SVinay Kumar Yadav 	if (!pending && ctx->async_notify)
487a42055e8SVakul Garg 		complete(&ctx->async_wait.completion);
4880cada332SVinay Kumar Yadav 	spin_unlock_bh(&ctx->encrypt_compl_lock);
489a42055e8SVakul Garg 
490a42055e8SVakul Garg 	if (!ready)
491a42055e8SVakul Garg 		return;
492a42055e8SVakul Garg 
493a42055e8SVakul Garg 	/* Schedule the transmission */
494a42055e8SVakul Garg 	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
495d829e9c4SDaniel Borkmann 		schedule_delayed_work(&ctx->tx_work.work, 1);
496a42055e8SVakul Garg }
497a42055e8SVakul Garg 
498a42055e8SVakul Garg static int tls_do_encryption(struct sock *sk,
499a42055e8SVakul Garg 			     struct tls_context *tls_ctx,
500a447da7dSDaniel Borkmann 			     struct tls_sw_context_tx *ctx,
501a447da7dSDaniel Borkmann 			     struct aead_request *aead_req,
502d829e9c4SDaniel Borkmann 			     size_t data_len, u32 start)
5033c4d7559SDave Watson {
5044509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
505a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
506d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
507d829e9c4SDaniel Borkmann 	struct scatterlist *sge = sk_msg_elem(msg_en, start);
508f295b3aeSVakul Garg 	int rc, iv_offset = 0;
5093c4d7559SDave Watson 
510f295b3aeSVakul Garg 	/* For CCM based ciphers, first byte of IV is a constant */
511128cfb88STianjia Zhang 	switch (prot->cipher_type) {
512128cfb88STianjia Zhang 	case TLS_CIPHER_AES_CCM_128:
513f295b3aeSVakul Garg 		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
514f295b3aeSVakul Garg 		iv_offset = 1;
515128cfb88STianjia Zhang 		break;
516128cfb88STianjia Zhang 	case TLS_CIPHER_SM4_CCM:
517128cfb88STianjia Zhang 		rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
518128cfb88STianjia Zhang 		iv_offset = 1;
519128cfb88STianjia Zhang 		break;
520f295b3aeSVakul Garg 	}
521f295b3aeSVakul Garg 
522f295b3aeSVakul Garg 	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
523f295b3aeSVakul Garg 	       prot->iv_size + prot->salt_size);
524f295b3aeSVakul Garg 
52558790314SJakub Kicinski 	tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
52658790314SJakub Kicinski 			    tls_ctx->tx.rec_seq);
52732eb67b9SDave Watson 
5284509de14SVakul Garg 	sge->offset += prot->prepend_size;
5294509de14SVakul Garg 	sge->length -= prot->prepend_size;
5303c4d7559SDave Watson 
531d829e9c4SDaniel Borkmann 	msg_en->sg.curr = start;
5324e6d4720SVakul Garg 
5333c4d7559SDave Watson 	aead_request_set_tfm(aead_req, ctx->aead_send);
5344509de14SVakul Garg 	aead_request_set_ad(aead_req, prot->aad_size);
535d829e9c4SDaniel Borkmann 	aead_request_set_crypt(aead_req, rec->sg_aead_in,
536d829e9c4SDaniel Borkmann 			       rec->sg_aead_out,
53732eb67b9SDave Watson 			       data_len, rec->iv_data);
538a54667f6SVakul Garg 
539a54667f6SVakul Garg 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
540d3777ceaSHerbert Xu 				  tls_encrypt_done, rec);
541a54667f6SVakul Garg 
5429932a29aSVakul Garg 	/* Add the record in tx_list */
5439932a29aSVakul Garg 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
544a42055e8SVakul Garg 	atomic_inc(&ctx->encrypt_pending);
5453c4d7559SDave Watson 
546a42055e8SVakul Garg 	rc = crypto_aead_encrypt(aead_req);
547a42055e8SVakul Garg 	if (!rc || rc != -EINPROGRESS) {
548a42055e8SVakul Garg 		atomic_dec(&ctx->encrypt_pending);
5494509de14SVakul Garg 		sge->offset -= prot->prepend_size;
5504509de14SVakul Garg 		sge->length += prot->prepend_size;
551a42055e8SVakul Garg 	}
5523c4d7559SDave Watson 
5539932a29aSVakul Garg 	if (!rc) {
5549932a29aSVakul Garg 		WRITE_ONCE(rec->tx_ready, true);
5559932a29aSVakul Garg 	} else if (rc != -EINPROGRESS) {
5569932a29aSVakul Garg 		list_del(&rec->list);
557a42055e8SVakul Garg 		return rc;
5589932a29aSVakul Garg 	}
559a42055e8SVakul Garg 
560a42055e8SVakul Garg 	/* Unhook the record from context if encryption is not failure */
561a42055e8SVakul Garg 	ctx->open_rec = NULL;
562fb0f886fSJakub Kicinski 	tls_advance_record_sn(sk, prot, &tls_ctx->tx);
5633c4d7559SDave Watson 	return rc;
5643c4d7559SDave Watson }
5653c4d7559SDave Watson 
566d3b18ad3SJohn Fastabend static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
567d3b18ad3SJohn Fastabend 				 struct tls_rec **to, struct sk_msg *msg_opl,
568d3b18ad3SJohn Fastabend 				 struct sk_msg *msg_oen, u32 split_point,
569d3b18ad3SJohn Fastabend 				 u32 tx_overhead_size, u32 *orig_end)
570d3b18ad3SJohn Fastabend {
571d3b18ad3SJohn Fastabend 	u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
572d3b18ad3SJohn Fastabend 	struct scatterlist *sge, *osge, *nsge;
573d3b18ad3SJohn Fastabend 	u32 orig_size = msg_opl->sg.size;
574d3b18ad3SJohn Fastabend 	struct scatterlist tmp = { };
575d3b18ad3SJohn Fastabend 	struct sk_msg *msg_npl;
576d3b18ad3SJohn Fastabend 	struct tls_rec *new;
577d3b18ad3SJohn Fastabend 	int ret;
578d3b18ad3SJohn Fastabend 
579d3b18ad3SJohn Fastabend 	new = tls_get_rec(sk);
580d3b18ad3SJohn Fastabend 	if (!new)
581d3b18ad3SJohn Fastabend 		return -ENOMEM;
582d3b18ad3SJohn Fastabend 	ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
583d3b18ad3SJohn Fastabend 			   tx_overhead_size, 0);
584d3b18ad3SJohn Fastabend 	if (ret < 0) {
585d3b18ad3SJohn Fastabend 		tls_free_rec(sk, new);
586d3b18ad3SJohn Fastabend 		return ret;
587d3b18ad3SJohn Fastabend 	}
588d3b18ad3SJohn Fastabend 
589d3b18ad3SJohn Fastabend 	*orig_end = msg_opl->sg.end;
590d3b18ad3SJohn Fastabend 	i = msg_opl->sg.start;
591d3b18ad3SJohn Fastabend 	sge = sk_msg_elem(msg_opl, i);
592d3b18ad3SJohn Fastabend 	while (apply && sge->length) {
593d3b18ad3SJohn Fastabend 		if (sge->length > apply) {
594d3b18ad3SJohn Fastabend 			u32 len = sge->length - apply;
595d3b18ad3SJohn Fastabend 
596d3b18ad3SJohn Fastabend 			get_page(sg_page(sge));
597d3b18ad3SJohn Fastabend 			sg_set_page(&tmp, sg_page(sge), len,
598d3b18ad3SJohn Fastabend 				    sge->offset + apply);
599d3b18ad3SJohn Fastabend 			sge->length = apply;
600d3b18ad3SJohn Fastabend 			bytes += apply;
601d3b18ad3SJohn Fastabend 			apply = 0;
602d3b18ad3SJohn Fastabend 		} else {
603d3b18ad3SJohn Fastabend 			apply -= sge->length;
604d3b18ad3SJohn Fastabend 			bytes += sge->length;
605d3b18ad3SJohn Fastabend 		}
606d3b18ad3SJohn Fastabend 
607d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(i);
608d3b18ad3SJohn Fastabend 		if (i == msg_opl->sg.end)
609d3b18ad3SJohn Fastabend 			break;
610d3b18ad3SJohn Fastabend 		sge = sk_msg_elem(msg_opl, i);
611d3b18ad3SJohn Fastabend 	}
612d3b18ad3SJohn Fastabend 
613d3b18ad3SJohn Fastabend 	msg_opl->sg.end = i;
614d3b18ad3SJohn Fastabend 	msg_opl->sg.curr = i;
615d3b18ad3SJohn Fastabend 	msg_opl->sg.copybreak = 0;
616d3b18ad3SJohn Fastabend 	msg_opl->apply_bytes = 0;
617d3b18ad3SJohn Fastabend 	msg_opl->sg.size = bytes;
618d3b18ad3SJohn Fastabend 
619d3b18ad3SJohn Fastabend 	msg_npl = &new->msg_plaintext;
620d3b18ad3SJohn Fastabend 	msg_npl->apply_bytes = apply;
621d3b18ad3SJohn Fastabend 	msg_npl->sg.size = orig_size - bytes;
622d3b18ad3SJohn Fastabend 
623d3b18ad3SJohn Fastabend 	j = msg_npl->sg.start;
624d3b18ad3SJohn Fastabend 	nsge = sk_msg_elem(msg_npl, j);
625d3b18ad3SJohn Fastabend 	if (tmp.length) {
626d3b18ad3SJohn Fastabend 		memcpy(nsge, &tmp, sizeof(*nsge));
627d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(j);
628d3b18ad3SJohn Fastabend 		nsge = sk_msg_elem(msg_npl, j);
629d3b18ad3SJohn Fastabend 	}
630d3b18ad3SJohn Fastabend 
631d3b18ad3SJohn Fastabend 	osge = sk_msg_elem(msg_opl, i);
632d3b18ad3SJohn Fastabend 	while (osge->length) {
633d3b18ad3SJohn Fastabend 		memcpy(nsge, osge, sizeof(*nsge));
634d3b18ad3SJohn Fastabend 		sg_unmark_end(nsge);
635d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(i);
636d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(j);
637d3b18ad3SJohn Fastabend 		if (i == *orig_end)
638d3b18ad3SJohn Fastabend 			break;
639d3b18ad3SJohn Fastabend 		osge = sk_msg_elem(msg_opl, i);
640d3b18ad3SJohn Fastabend 		nsge = sk_msg_elem(msg_npl, j);
641d3b18ad3SJohn Fastabend 	}
642d3b18ad3SJohn Fastabend 
643d3b18ad3SJohn Fastabend 	msg_npl->sg.end = j;
644d3b18ad3SJohn Fastabend 	msg_npl->sg.curr = j;
645d3b18ad3SJohn Fastabend 	msg_npl->sg.copybreak = 0;
646d3b18ad3SJohn Fastabend 
647d3b18ad3SJohn Fastabend 	*to = new;
648d3b18ad3SJohn Fastabend 	return 0;
649d3b18ad3SJohn Fastabend }
650d3b18ad3SJohn Fastabend 
651d3b18ad3SJohn Fastabend static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
652d3b18ad3SJohn Fastabend 				  struct tls_rec *from, u32 orig_end)
653d3b18ad3SJohn Fastabend {
654d3b18ad3SJohn Fastabend 	struct sk_msg *msg_npl = &from->msg_plaintext;
655d3b18ad3SJohn Fastabend 	struct sk_msg *msg_opl = &to->msg_plaintext;
656d3b18ad3SJohn Fastabend 	struct scatterlist *osge, *nsge;
657d3b18ad3SJohn Fastabend 	u32 i, j;
658d3b18ad3SJohn Fastabend 
659d3b18ad3SJohn Fastabend 	i = msg_opl->sg.end;
660d3b18ad3SJohn Fastabend 	sk_msg_iter_var_prev(i);
661d3b18ad3SJohn Fastabend 	j = msg_npl->sg.start;
662d3b18ad3SJohn Fastabend 
663d3b18ad3SJohn Fastabend 	osge = sk_msg_elem(msg_opl, i);
664d3b18ad3SJohn Fastabend 	nsge = sk_msg_elem(msg_npl, j);
665d3b18ad3SJohn Fastabend 
666d3b18ad3SJohn Fastabend 	if (sg_page(osge) == sg_page(nsge) &&
667d3b18ad3SJohn Fastabend 	    osge->offset + osge->length == nsge->offset) {
668d3b18ad3SJohn Fastabend 		osge->length += nsge->length;
669d3b18ad3SJohn Fastabend 		put_page(sg_page(nsge));
670d3b18ad3SJohn Fastabend 	}
671d3b18ad3SJohn Fastabend 
672d3b18ad3SJohn Fastabend 	msg_opl->sg.end = orig_end;
673d3b18ad3SJohn Fastabend 	msg_opl->sg.curr = orig_end;
674d3b18ad3SJohn Fastabend 	msg_opl->sg.copybreak = 0;
675d3b18ad3SJohn Fastabend 	msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
676d3b18ad3SJohn Fastabend 	msg_opl->sg.size += msg_npl->sg.size;
677d3b18ad3SJohn Fastabend 
678d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &to->msg_encrypted);
679d3b18ad3SJohn Fastabend 	sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
680d3b18ad3SJohn Fastabend 
681d3b18ad3SJohn Fastabend 	kfree(from);
682d3b18ad3SJohn Fastabend }
683d3b18ad3SJohn Fastabend 
6843c4d7559SDave Watson static int tls_push_record(struct sock *sk, int flags,
6853c4d7559SDave Watson 			   unsigned char record_type)
6863c4d7559SDave Watson {
6873c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
6884509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
689f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
690d3b18ad3SJohn Fastabend 	struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
6913f649ab7SKees Cook 	u32 i, split_point, orig_end;
692d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl, *msg_en;
693a447da7dSDaniel Borkmann 	struct aead_request *req;
694d3b18ad3SJohn Fastabend 	bool split;
6953c4d7559SDave Watson 	int rc;
6963c4d7559SDave Watson 
697a42055e8SVakul Garg 	if (!rec)
698a42055e8SVakul Garg 		return 0;
699a447da7dSDaniel Borkmann 
700d829e9c4SDaniel Borkmann 	msg_pl = &rec->msg_plaintext;
701d829e9c4SDaniel Borkmann 	msg_en = &rec->msg_encrypted;
702d829e9c4SDaniel Borkmann 
703d3b18ad3SJohn Fastabend 	split_point = msg_pl->apply_bytes;
704d3b18ad3SJohn Fastabend 	split = split_point && split_point < msg_pl->sg.size;
705d468e477SJohn Fastabend 	if (unlikely((!split &&
706d468e477SJohn Fastabend 		      msg_pl->sg.size +
707d468e477SJohn Fastabend 		      prot->overhead_size > msg_en->sg.size) ||
708d468e477SJohn Fastabend 		     (split &&
709d468e477SJohn Fastabend 		      split_point +
710d468e477SJohn Fastabend 		      prot->overhead_size > msg_en->sg.size))) {
711d468e477SJohn Fastabend 		split = true;
712d468e477SJohn Fastabend 		split_point = msg_en->sg.size;
713d468e477SJohn Fastabend 	}
714d3b18ad3SJohn Fastabend 	if (split) {
715d3b18ad3SJohn Fastabend 		rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
7164509de14SVakul Garg 					   split_point, prot->overhead_size,
717d3b18ad3SJohn Fastabend 					   &orig_end);
718d3b18ad3SJohn Fastabend 		if (rc < 0)
719d3b18ad3SJohn Fastabend 			return rc;
720d468e477SJohn Fastabend 		/* This can happen if above tls_split_open_record allocates
721d468e477SJohn Fastabend 		 * a single large encryption buffer instead of two smaller
722d468e477SJohn Fastabend 		 * ones. In this case adjust pointers and continue without
723d468e477SJohn Fastabend 		 * split.
724d468e477SJohn Fastabend 		 */
725d468e477SJohn Fastabend 		if (!msg_pl->sg.size) {
726d468e477SJohn Fastabend 			tls_merge_open_record(sk, rec, tmp, orig_end);
727d468e477SJohn Fastabend 			msg_pl = &rec->msg_plaintext;
728d468e477SJohn Fastabend 			msg_en = &rec->msg_encrypted;
729d468e477SJohn Fastabend 			split = false;
730d468e477SJohn Fastabend 		}
731d3b18ad3SJohn Fastabend 		sk_msg_trim(sk, msg_en, msg_pl->sg.size +
7324509de14SVakul Garg 			    prot->overhead_size);
733d3b18ad3SJohn Fastabend 	}
734d3b18ad3SJohn Fastabend 
735a42055e8SVakul Garg 	rec->tx_flags = flags;
736a42055e8SVakul Garg 	req = &rec->aead_req;
7373c4d7559SDave Watson 
738d829e9c4SDaniel Borkmann 	i = msg_pl->sg.end;
739d829e9c4SDaniel Borkmann 	sk_msg_iter_var_prev(i);
740130b392cSDave Watson 
741130b392cSDave Watson 	rec->content_type = record_type;
7424509de14SVakul Garg 	if (prot->version == TLS_1_3_VERSION) {
743130b392cSDave Watson 		/* Add content type to end of message.  No padding added */
744130b392cSDave Watson 		sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
745130b392cSDave Watson 		sg_mark_end(&rec->sg_content_type);
746130b392cSDave Watson 		sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
747130b392cSDave Watson 			 &rec->sg_content_type);
748130b392cSDave Watson 	} else {
749d829e9c4SDaniel Borkmann 		sg_mark_end(sk_msg_elem(msg_pl, i));
750130b392cSDave Watson 	}
751a42055e8SVakul Garg 
7529aaaa568SJohn Fastabend 	if (msg_pl->sg.end < msg_pl->sg.start) {
7539aaaa568SJohn Fastabend 		sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
7549aaaa568SJohn Fastabend 			 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
7559aaaa568SJohn Fastabend 			 msg_pl->sg.data);
7569aaaa568SJohn Fastabend 	}
7579aaaa568SJohn Fastabend 
758d829e9c4SDaniel Borkmann 	i = msg_pl->sg.start;
7599e5ffed3SJakub Kicinski 	sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
760d829e9c4SDaniel Borkmann 
761d829e9c4SDaniel Borkmann 	i = msg_en->sg.end;
762d829e9c4SDaniel Borkmann 	sk_msg_iter_var_prev(i);
763d829e9c4SDaniel Borkmann 	sg_mark_end(sk_msg_elem(msg_en, i));
764d829e9c4SDaniel Borkmann 
765d829e9c4SDaniel Borkmann 	i = msg_en->sg.start;
766d829e9c4SDaniel Borkmann 	sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
767d829e9c4SDaniel Borkmann 
7684509de14SVakul Garg 	tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
7696942a284SVadim Fedorenko 		     tls_ctx->tx.rec_seq, record_type, prot);
7703c4d7559SDave Watson 
7713c4d7559SDave Watson 	tls_fill_prepend(tls_ctx,
772d829e9c4SDaniel Borkmann 			 page_address(sg_page(&msg_en->sg.data[i])) +
773130b392cSDave Watson 			 msg_en->sg.data[i].offset,
7744509de14SVakul Garg 			 msg_pl->sg.size + prot->tail_size,
7756942a284SVadim Fedorenko 			 record_type);
7763c4d7559SDave Watson 
777d829e9c4SDaniel Borkmann 	tls_ctx->pending_open_record_frags = false;
7783c4d7559SDave Watson 
779130b392cSDave Watson 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
7804509de14SVakul Garg 			       msg_pl->sg.size + prot->tail_size, i);
7813c4d7559SDave Watson 	if (rc < 0) {
782d3b18ad3SJohn Fastabend 		if (rc != -EINPROGRESS) {
783da353facSDaniel Jordan 			tls_err_abort(sk, -EBADMSG);
784d3b18ad3SJohn Fastabend 			if (split) {
785d3b18ad3SJohn Fastabend 				tls_ctx->pending_open_record_frags = true;
786d3b18ad3SJohn Fastabend 				tls_merge_open_record(sk, rec, tmp, orig_end);
787d3b18ad3SJohn Fastabend 			}
788d3b18ad3SJohn Fastabend 		}
7895b053e12SDave Watson 		ctx->async_capable = 1;
790a42055e8SVakul Garg 		return rc;
791d3b18ad3SJohn Fastabend 	} else if (split) {
792d3b18ad3SJohn Fastabend 		msg_pl = &tmp->msg_plaintext;
793d3b18ad3SJohn Fastabend 		msg_en = &tmp->msg_encrypted;
7944509de14SVakul Garg 		sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
795d3b18ad3SJohn Fastabend 		tls_ctx->pending_open_record_frags = true;
796d3b18ad3SJohn Fastabend 		ctx->open_rec = tmp;
7973c4d7559SDave Watson 	}
7983c4d7559SDave Watson 
799a42055e8SVakul Garg 	return tls_tx_records(sk, flags);
8003c4d7559SDave Watson }
8013c4d7559SDave Watson 
802d3b18ad3SJohn Fastabend static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
803d3b18ad3SJohn Fastabend 			       bool full_record, u8 record_type,
804a7bff11fSVadim Fedorenko 			       ssize_t *copied, int flags)
8053c4d7559SDave Watson {
8063c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
807f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
808d3b18ad3SJohn Fastabend 	struct sk_msg msg_redir = { };
809d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
810d3b18ad3SJohn Fastabend 	struct sock *sk_redir;
811a42055e8SVakul Garg 	struct tls_rec *rec;
812a351d608SPengcheng Yang 	bool enospc, policy, redir_ingress;
813d3b18ad3SJohn Fastabend 	int err = 0, send;
8147246d8edSJohn Fastabend 	u32 delta = 0;
815a42055e8SVakul Garg 
8160608c69cSJohn Fastabend 	policy = !(flags & MSG_SENDPAGE_NOPOLICY);
817d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
818d10523d0SJakub Kicinski 	if (!psock || !policy) {
819d10523d0SJakub Kicinski 		err = tls_push_record(sk, flags, record_type);
820635d9398SVadim Fedorenko 		if (err && sk->sk_err == EBADMSG) {
821d10523d0SJakub Kicinski 			*copied -= sk_msg_free(sk, msg);
822d10523d0SJakub Kicinski 			tls_free_open_rec(sk);
823635d9398SVadim Fedorenko 			err = -sk->sk_err;
824d10523d0SJakub Kicinski 		}
825095f5614SXiyu Yang 		if (psock)
826095f5614SXiyu Yang 			sk_psock_put(sk, psock);
827d10523d0SJakub Kicinski 		return err;
828d10523d0SJakub Kicinski 	}
829d3b18ad3SJohn Fastabend more_data:
830d3b18ad3SJohn Fastabend 	enospc = sk_msg_full(msg);
8317246d8edSJohn Fastabend 	if (psock->eval == __SK_NONE) {
8327246d8edSJohn Fastabend 		delta = msg->sg.size;
833d3b18ad3SJohn Fastabend 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
8347246d8edSJohn Fastabend 		delta -= msg->sg.size;
8357246d8edSJohn Fastabend 	}
836d3b18ad3SJohn Fastabend 	if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
837d3b18ad3SJohn Fastabend 	    !enospc && !full_record) {
838d3b18ad3SJohn Fastabend 		err = -ENOSPC;
839d3b18ad3SJohn Fastabend 		goto out_err;
840d3b18ad3SJohn Fastabend 	}
841d3b18ad3SJohn Fastabend 	msg->cork_bytes = 0;
842d3b18ad3SJohn Fastabend 	send = msg->sg.size;
843d3b18ad3SJohn Fastabend 	if (msg->apply_bytes && msg->apply_bytes < send)
844d3b18ad3SJohn Fastabend 		send = msg->apply_bytes;
845a42055e8SVakul Garg 
846d3b18ad3SJohn Fastabend 	switch (psock->eval) {
847d3b18ad3SJohn Fastabend 	case __SK_PASS:
848d3b18ad3SJohn Fastabend 		err = tls_push_record(sk, flags, record_type);
849635d9398SVadim Fedorenko 		if (err && sk->sk_err == EBADMSG) {
850d3b18ad3SJohn Fastabend 			*copied -= sk_msg_free(sk, msg);
851d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
852635d9398SVadim Fedorenko 			err = -sk->sk_err;
853d3b18ad3SJohn Fastabend 			goto out_err;
854d3b18ad3SJohn Fastabend 		}
855d3b18ad3SJohn Fastabend 		break;
856d3b18ad3SJohn Fastabend 	case __SK_REDIRECT:
857a351d608SPengcheng Yang 		redir_ingress = psock->redir_ingress;
858d3b18ad3SJohn Fastabend 		sk_redir = psock->sk_redir;
859d3b18ad3SJohn Fastabend 		memcpy(&msg_redir, msg, sizeof(*msg));
860d3b18ad3SJohn Fastabend 		if (msg->apply_bytes < send)
861d3b18ad3SJohn Fastabend 			msg->apply_bytes = 0;
862d3b18ad3SJohn Fastabend 		else
863d3b18ad3SJohn Fastabend 			msg->apply_bytes -= send;
864d3b18ad3SJohn Fastabend 		sk_msg_return_zero(sk, msg, send);
865d3b18ad3SJohn Fastabend 		msg->sg.size -= send;
866d3b18ad3SJohn Fastabend 		release_sock(sk);
867a351d608SPengcheng Yang 		err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
868a351d608SPengcheng Yang 					    &msg_redir, send, flags);
869d3b18ad3SJohn Fastabend 		lock_sock(sk);
870d3b18ad3SJohn Fastabend 		if (err < 0) {
871d3b18ad3SJohn Fastabend 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
872d3b18ad3SJohn Fastabend 			msg->sg.size = 0;
873d3b18ad3SJohn Fastabend 		}
874d3b18ad3SJohn Fastabend 		if (msg->sg.size == 0)
875d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
876d3b18ad3SJohn Fastabend 		break;
877d3b18ad3SJohn Fastabend 	case __SK_DROP:
878d3b18ad3SJohn Fastabend 	default:
879d3b18ad3SJohn Fastabend 		sk_msg_free_partial(sk, msg, send);
880d3b18ad3SJohn Fastabend 		if (msg->apply_bytes < send)
881d3b18ad3SJohn Fastabend 			msg->apply_bytes = 0;
882d3b18ad3SJohn Fastabend 		else
883d3b18ad3SJohn Fastabend 			msg->apply_bytes -= send;
884d3b18ad3SJohn Fastabend 		if (msg->sg.size == 0)
885d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
8867246d8edSJohn Fastabend 		*copied -= (send + delta);
887d3b18ad3SJohn Fastabend 		err = -EACCES;
888d3b18ad3SJohn Fastabend 	}
889a42055e8SVakul Garg 
890d3b18ad3SJohn Fastabend 	if (likely(!err)) {
891d3b18ad3SJohn Fastabend 		bool reset_eval = !ctx->open_rec;
892d3b18ad3SJohn Fastabend 
893d3b18ad3SJohn Fastabend 		rec = ctx->open_rec;
894d3b18ad3SJohn Fastabend 		if (rec) {
895d3b18ad3SJohn Fastabend 			msg = &rec->msg_plaintext;
896d3b18ad3SJohn Fastabend 			if (!msg->apply_bytes)
897d3b18ad3SJohn Fastabend 				reset_eval = true;
898d3b18ad3SJohn Fastabend 		}
899d3b18ad3SJohn Fastabend 		if (reset_eval) {
900d3b18ad3SJohn Fastabend 			psock->eval = __SK_NONE;
901d3b18ad3SJohn Fastabend 			if (psock->sk_redir) {
902d3b18ad3SJohn Fastabend 				sock_put(psock->sk_redir);
903d3b18ad3SJohn Fastabend 				psock->sk_redir = NULL;
904d3b18ad3SJohn Fastabend 			}
905d3b18ad3SJohn Fastabend 		}
906d3b18ad3SJohn Fastabend 		if (rec)
907d3b18ad3SJohn Fastabend 			goto more_data;
908d3b18ad3SJohn Fastabend 	}
909d3b18ad3SJohn Fastabend  out_err:
910d3b18ad3SJohn Fastabend 	sk_psock_put(sk, psock);
911d3b18ad3SJohn Fastabend 	return err;
912d3b18ad3SJohn Fastabend }
913d3b18ad3SJohn Fastabend 
914d3b18ad3SJohn Fastabend static int tls_sw_push_pending_record(struct sock *sk, int flags)
915d3b18ad3SJohn Fastabend {
916d3b18ad3SJohn Fastabend 	struct tls_context *tls_ctx = tls_get_ctx(sk);
917d3b18ad3SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
918d3b18ad3SJohn Fastabend 	struct tls_rec *rec = ctx->open_rec;
919d3b18ad3SJohn Fastabend 	struct sk_msg *msg_pl;
920d3b18ad3SJohn Fastabend 	size_t copied;
921d3b18ad3SJohn Fastabend 
922a42055e8SVakul Garg 	if (!rec)
923d3b18ad3SJohn Fastabend 		return 0;
924a42055e8SVakul Garg 
925d829e9c4SDaniel Borkmann 	msg_pl = &rec->msg_plaintext;
926d3b18ad3SJohn Fastabend 	copied = msg_pl->sg.size;
927d3b18ad3SJohn Fastabend 	if (!copied)
928d3b18ad3SJohn Fastabend 		return 0;
929a42055e8SVakul Garg 
930d3b18ad3SJohn Fastabend 	return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
931d3b18ad3SJohn Fastabend 				   &copied, flags);
932a42055e8SVakul Garg }
933a42055e8SVakul Garg 
934fe1e81d4SDavid Howells static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
935fe1e81d4SDavid Howells 				 struct sk_msg *msg_pl, size_t try_to_copy,
936fe1e81d4SDavid Howells 				 ssize_t *copied)
937fe1e81d4SDavid Howells {
938fe1e81d4SDavid Howells 	struct page *page = NULL, **pages = &page;
939fe1e81d4SDavid Howells 
940fe1e81d4SDavid Howells 	do {
941fe1e81d4SDavid Howells 		ssize_t part;
942fe1e81d4SDavid Howells 		size_t off;
943fe1e81d4SDavid Howells 
944fe1e81d4SDavid Howells 		part = iov_iter_extract_pages(&msg->msg_iter, &pages,
945fe1e81d4SDavid Howells 					      try_to_copy, 1, 0, &off);
946fe1e81d4SDavid Howells 		if (part <= 0)
947fe1e81d4SDavid Howells 			return part ?: -EIO;
948fe1e81d4SDavid Howells 
949fe1e81d4SDavid Howells 		if (WARN_ON_ONCE(!sendpage_ok(page))) {
950fe1e81d4SDavid Howells 			iov_iter_revert(&msg->msg_iter, part);
951fe1e81d4SDavid Howells 			return -EIO;
952fe1e81d4SDavid Howells 		}
953fe1e81d4SDavid Howells 
954fe1e81d4SDavid Howells 		sk_msg_page_add(msg_pl, page, part, off);
955fe1e81d4SDavid Howells 		sk_mem_charge(sk, part);
956fe1e81d4SDavid Howells 		*copied += part;
957fe1e81d4SDavid Howells 		try_to_copy -= part;
958fe1e81d4SDavid Howells 	} while (try_to_copy && !sk_msg_full(msg_pl));
959fe1e81d4SDavid Howells 
960fe1e81d4SDavid Howells 	return 0;
961fe1e81d4SDavid Howells }
962fe1e81d4SDavid Howells 
96345e5be84SDavid Howells static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
96445e5be84SDavid Howells 				 size_t size)
965a42055e8SVakul Garg {
9663c4d7559SDave Watson 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
967a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
9684509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
969a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
9705b053e12SDave Watson 	bool async_capable = ctx->async_capable;
971a42055e8SVakul Garg 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
97200e23707SDavid Howells 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
9733c4d7559SDave Watson 	bool eor = !(msg->msg_flags & MSG_MORE);
974a7bff11fSVadim Fedorenko 	size_t try_to_copy;
975a7bff11fSVadim Fedorenko 	ssize_t copied = 0;
976d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl, *msg_en;
977a42055e8SVakul Garg 	struct tls_rec *rec;
978a42055e8SVakul Garg 	int required_size;
979a42055e8SVakul Garg 	int num_async = 0;
9803c4d7559SDave Watson 	bool full_record;
981a42055e8SVakul Garg 	int record_room;
982a42055e8SVakul Garg 	int num_zc = 0;
9833c4d7559SDave Watson 	int orig_size;
9844128c0cfSVakul Garg 	int ret = 0;
9850cada332SVinay Kumar Yadav 	int pending;
9863c4d7559SDave Watson 
987e22e358bSHannes Reinecke 	if (!eor && (msg->msg_flags & MSG_EOR))
988e22e358bSHannes Reinecke 		return -EINVAL;
989e22e358bSHannes Reinecke 
9903c4d7559SDave Watson 	if (unlikely(msg->msg_controllen)) {
99158790314SJakub Kicinski 		ret = tls_process_cmsg(sk, msg, &record_type);
992a42055e8SVakul Garg 		if (ret) {
993a42055e8SVakul Garg 			if (ret == -EINPROGRESS)
994a42055e8SVakul Garg 				num_async++;
995a42055e8SVakul Garg 			else if (ret != -EAGAIN)
9963c4d7559SDave Watson 				goto send_end;
9973c4d7559SDave Watson 		}
998a42055e8SVakul Garg 	}
9993c4d7559SDave Watson 
10003c4d7559SDave Watson 	while (msg_data_left(msg)) {
10013c4d7559SDave Watson 		if (sk->sk_err) {
100230be8f8dSr.hering@avm.de 			ret = -sk->sk_err;
10033c4d7559SDave Watson 			goto send_end;
10043c4d7559SDave Watson 		}
10053c4d7559SDave Watson 
1006d3b18ad3SJohn Fastabend 		if (ctx->open_rec)
1007d3b18ad3SJohn Fastabend 			rec = ctx->open_rec;
1008d3b18ad3SJohn Fastabend 		else
1009d3b18ad3SJohn Fastabend 			rec = ctx->open_rec = tls_get_rec(sk);
1010a42055e8SVakul Garg 		if (!rec) {
1011a42055e8SVakul Garg 			ret = -ENOMEM;
1012a42055e8SVakul Garg 			goto send_end;
1013a42055e8SVakul Garg 		}
1014a42055e8SVakul Garg 
1015d829e9c4SDaniel Borkmann 		msg_pl = &rec->msg_plaintext;
1016d829e9c4SDaniel Borkmann 		msg_en = &rec->msg_encrypted;
1017d829e9c4SDaniel Borkmann 
1018d829e9c4SDaniel Borkmann 		orig_size = msg_pl->sg.size;
10193c4d7559SDave Watson 		full_record = false;
10203c4d7559SDave Watson 		try_to_copy = msg_data_left(msg);
1021d829e9c4SDaniel Borkmann 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
10223c4d7559SDave Watson 		if (try_to_copy >= record_room) {
10233c4d7559SDave Watson 			try_to_copy = record_room;
10243c4d7559SDave Watson 			full_record = true;
10253c4d7559SDave Watson 		}
10263c4d7559SDave Watson 
1027d829e9c4SDaniel Borkmann 		required_size = msg_pl->sg.size + try_to_copy +
10284509de14SVakul Garg 				prot->overhead_size;
10293c4d7559SDave Watson 
10303c4d7559SDave Watson 		if (!sk_stream_memory_free(sk))
10313c4d7559SDave Watson 			goto wait_for_sndbuf;
1032a42055e8SVakul Garg 
10333c4d7559SDave Watson alloc_encrypted:
1034d829e9c4SDaniel Borkmann 		ret = tls_alloc_encrypted_msg(sk, required_size);
10353c4d7559SDave Watson 		if (ret) {
10363c4d7559SDave Watson 			if (ret != -ENOSPC)
10373c4d7559SDave Watson 				goto wait_for_memory;
10383c4d7559SDave Watson 
10393c4d7559SDave Watson 			/* Adjust try_to_copy according to the amount that was
10403c4d7559SDave Watson 			 * actually allocated. The difference is due
10413c4d7559SDave Watson 			 * to max sg elements limit
10423c4d7559SDave Watson 			 */
1043d829e9c4SDaniel Borkmann 			try_to_copy -= required_size - msg_en->sg.size;
10443c4d7559SDave Watson 			full_record = true;
10453c4d7559SDave Watson 		}
1046a42055e8SVakul Garg 
1047fe1e81d4SDavid Howells 		if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
1048fe1e81d4SDavid Howells 			ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
1049fe1e81d4SDavid Howells 						    try_to_copy, &copied);
1050fe1e81d4SDavid Howells 			if (ret < 0)
1051fe1e81d4SDavid Howells 				goto send_end;
1052fe1e81d4SDavid Howells 			tls_ctx->pending_open_record_frags = true;
1053fe1e81d4SDavid Howells 			if (full_record || eor || sk_msg_full(msg_pl))
1054fe1e81d4SDavid Howells 				goto copied;
1055fe1e81d4SDavid Howells 			continue;
1056fe1e81d4SDavid Howells 		}
1057fe1e81d4SDavid Howells 
1058a42055e8SVakul Garg 		if (!is_kvec && (full_record || eor) && !async_capable) {
1059d3b18ad3SJohn Fastabend 			u32 first = msg_pl->sg.end;
1060d3b18ad3SJohn Fastabend 
1061d829e9c4SDaniel Borkmann 			ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1062d829e9c4SDaniel Borkmann 							msg_pl, try_to_copy);
10633c4d7559SDave Watson 			if (ret)
10643c4d7559SDave Watson 				goto fallback_to_reg_send;
10653c4d7559SDave Watson 
1066a42055e8SVakul Garg 			num_zc++;
10673c4d7559SDave Watson 			copied += try_to_copy;
1068d3b18ad3SJohn Fastabend 
1069d3b18ad3SJohn Fastabend 			sk_msg_sg_copy_set(msg_pl, first);
1070d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1071d3b18ad3SJohn Fastabend 						  record_type, &copied,
1072d3b18ad3SJohn Fastabend 						  msg->msg_flags);
1073a42055e8SVakul Garg 			if (ret) {
1074a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1075a42055e8SVakul Garg 					num_async++;
1076d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1077d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1078c329ef96SJakub Kicinski 				else if (ctx->open_rec && ret == -ENOSPC)
1079d3b18ad3SJohn Fastabend 					goto rollback_iter;
1080a42055e8SVakul Garg 				else if (ret != -EAGAIN)
10813c4d7559SDave Watson 					goto send_end;
1082a42055e8SVakul Garg 			}
10835a3611efSDoron Roberts-Kedes 			continue;
1084d3b18ad3SJohn Fastabend rollback_iter:
1085d3b18ad3SJohn Fastabend 			copied -= try_to_copy;
1086d3b18ad3SJohn Fastabend 			sk_msg_sg_copy_clear(msg_pl, first);
1087d3b18ad3SJohn Fastabend 			iov_iter_revert(&msg->msg_iter,
1088d3b18ad3SJohn Fastabend 					msg_pl->sg.size - orig_size);
10893c4d7559SDave Watson fallback_to_reg_send:
1090d829e9c4SDaniel Borkmann 			sk_msg_trim(sk, msg_pl, orig_size);
10913c4d7559SDave Watson 		}
10923c4d7559SDave Watson 
1093d829e9c4SDaniel Borkmann 		required_size = msg_pl->sg.size + try_to_copy;
10944e6d4720SVakul Garg 
1095d829e9c4SDaniel Borkmann 		ret = tls_clone_plaintext_msg(sk, required_size);
10963c4d7559SDave Watson 		if (ret) {
10973c4d7559SDave Watson 			if (ret != -ENOSPC)
10984e6d4720SVakul Garg 				goto send_end;
10993c4d7559SDave Watson 
11003c4d7559SDave Watson 			/* Adjust try_to_copy according to the amount that was
11013c4d7559SDave Watson 			 * actually allocated. The difference is due
11023c4d7559SDave Watson 			 * to max sg elements limit
11033c4d7559SDave Watson 			 */
1104d829e9c4SDaniel Borkmann 			try_to_copy -= required_size - msg_pl->sg.size;
11053c4d7559SDave Watson 			full_record = true;
11064509de14SVakul Garg 			sk_msg_trim(sk, msg_en,
11074509de14SVakul Garg 				    msg_pl->sg.size + prot->overhead_size);
11083c4d7559SDave Watson 		}
11093c4d7559SDave Watson 
111065a10e28SVakul Garg 		if (try_to_copy) {
111165a10e28SVakul Garg 			ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
111265a10e28SVakul Garg 						       msg_pl, try_to_copy);
1113d829e9c4SDaniel Borkmann 			if (ret < 0)
11143c4d7559SDave Watson 				goto trim_sgl;
111565a10e28SVakul Garg 		}
11163c4d7559SDave Watson 
1117d829e9c4SDaniel Borkmann 		/* Open records defined only if successfully copied, otherwise
1118d829e9c4SDaniel Borkmann 		 * we would trim the sg but not reset the open record frags.
1119d829e9c4SDaniel Borkmann 		 */
1120d829e9c4SDaniel Borkmann 		tls_ctx->pending_open_record_frags = true;
11213c4d7559SDave Watson 		copied += try_to_copy;
1122fe1e81d4SDavid Howells copied:
11233c4d7559SDave Watson 		if (full_record || eor) {
1124d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1125d3b18ad3SJohn Fastabend 						  record_type, &copied,
1126d3b18ad3SJohn Fastabend 						  msg->msg_flags);
11273c4d7559SDave Watson 			if (ret) {
1128a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1129a42055e8SVakul Garg 					num_async++;
1130d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1131d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1132d3b18ad3SJohn Fastabend 				else if (ret != -EAGAIN) {
1133d3b18ad3SJohn Fastabend 					if (ret == -ENOSPC)
1134d3b18ad3SJohn Fastabend 						ret = 0;
11353c4d7559SDave Watson 					goto send_end;
11363c4d7559SDave Watson 				}
11373c4d7559SDave Watson 			}
1138d3b18ad3SJohn Fastabend 		}
11393c4d7559SDave Watson 
11403c4d7559SDave Watson 		continue;
11413c4d7559SDave Watson 
11423c4d7559SDave Watson wait_for_sndbuf:
11433c4d7559SDave Watson 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
11443c4d7559SDave Watson wait_for_memory:
11453c4d7559SDave Watson 		ret = sk_stream_wait_memory(sk, &timeo);
11463c4d7559SDave Watson 		if (ret) {
11473c4d7559SDave Watson trim_sgl:
1148c329ef96SJakub Kicinski 			if (ctx->open_rec)
1149d829e9c4SDaniel Borkmann 				tls_trim_both_msgs(sk, orig_size);
11503c4d7559SDave Watson 			goto send_end;
11513c4d7559SDave Watson 		}
11523c4d7559SDave Watson 
1153c329ef96SJakub Kicinski 		if (ctx->open_rec && msg_en->sg.size < required_size)
11543c4d7559SDave Watson 			goto alloc_encrypted;
11553c4d7559SDave Watson 	}
11563c4d7559SDave Watson 
1157a42055e8SVakul Garg 	if (!num_async) {
1158a42055e8SVakul Garg 		goto send_end;
1159a42055e8SVakul Garg 	} else if (num_zc) {
1160a42055e8SVakul Garg 		/* Wait for pending encryptions to get completed */
11610cada332SVinay Kumar Yadav 		spin_lock_bh(&ctx->encrypt_compl_lock);
11620cada332SVinay Kumar Yadav 		ctx->async_notify = true;
1163a42055e8SVakul Garg 
11640cada332SVinay Kumar Yadav 		pending = atomic_read(&ctx->encrypt_pending);
11650cada332SVinay Kumar Yadav 		spin_unlock_bh(&ctx->encrypt_compl_lock);
11660cada332SVinay Kumar Yadav 		if (pending)
1167a42055e8SVakul Garg 			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1168a42055e8SVakul Garg 		else
1169a42055e8SVakul Garg 			reinit_completion(&ctx->async_wait.completion);
1170a42055e8SVakul Garg 
11710cada332SVinay Kumar Yadav 		/* There can be no concurrent accesses, since we have no
11720cada332SVinay Kumar Yadav 		 * pending encrypt operations
11730cada332SVinay Kumar Yadav 		 */
1174a42055e8SVakul Garg 		WRITE_ONCE(ctx->async_notify, false);
1175a42055e8SVakul Garg 
1176a42055e8SVakul Garg 		if (ctx->async_wait.err) {
1177a42055e8SVakul Garg 			ret = ctx->async_wait.err;
1178a42055e8SVakul Garg 			copied = 0;
1179a42055e8SVakul Garg 		}
1180a42055e8SVakul Garg 	}
1181a42055e8SVakul Garg 
1182a42055e8SVakul Garg 	/* Transmit if any encryptions have completed */
1183a42055e8SVakul Garg 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1184a42055e8SVakul Garg 		cancel_delayed_work(&ctx->tx_work.work);
1185a42055e8SVakul Garg 		tls_tx_records(sk, msg->msg_flags);
1186a42055e8SVakul Garg 	}
1187a42055e8SVakul Garg 
11883c4d7559SDave Watson send_end:
11893c4d7559SDave Watson 	ret = sk_stream_error(sk, msg->msg_flags, ret);
119045e5be84SDavid Howells 	return copied > 0 ? copied : ret;
119145e5be84SDavid Howells }
11923c4d7559SDave Watson 
119345e5be84SDavid Howells int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
119445e5be84SDavid Howells {
119545e5be84SDavid Howells 	struct tls_context *tls_ctx = tls_get_ctx(sk);
119645e5be84SDavid Howells 	int ret;
119745e5be84SDavid Howells 
119845e5be84SDavid Howells 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1199e22e358bSHannes Reinecke 			       MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR |
1200b848b26cSDavid Howells 			       MSG_SENDPAGE_NOPOLICY))
120145e5be84SDavid Howells 		return -EOPNOTSUPP;
120245e5be84SDavid Howells 
120345e5be84SDavid Howells 	ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
120445e5be84SDavid Howells 	if (ret)
120545e5be84SDavid Howells 		return ret;
120645e5be84SDavid Howells 	lock_sock(sk);
120745e5be84SDavid Howells 	ret = tls_sw_sendmsg_locked(sk, msg, size);
12083c4d7559SDave Watson 	release_sock(sk);
120979ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
121045e5be84SDavid Howells 	return ret;
12113c4d7559SDave Watson }
12123c4d7559SDave Watson 
1213df720d28SDavid Howells /*
1214df720d28SDavid Howells  * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1215df720d28SDavid Howells  */
1216df720d28SDavid Howells void tls_sw_splice_eof(struct socket *sock)
1217df720d28SDavid Howells {
1218df720d28SDavid Howells 	struct sock *sk = sock->sk;
1219df720d28SDavid Howells 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1220df720d28SDavid Howells 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1221df720d28SDavid Howells 	struct tls_rec *rec;
1222df720d28SDavid Howells 	struct sk_msg *msg_pl;
1223df720d28SDavid Howells 	ssize_t copied = 0;
1224df720d28SDavid Howells 	bool retrying = false;
1225df720d28SDavid Howells 	int ret = 0;
1226df720d28SDavid Howells 	int pending;
1227df720d28SDavid Howells 
1228df720d28SDavid Howells 	if (!ctx->open_rec)
1229df720d28SDavid Howells 		return;
1230df720d28SDavid Howells 
1231df720d28SDavid Howells 	mutex_lock(&tls_ctx->tx_lock);
1232df720d28SDavid Howells 	lock_sock(sk);
1233df720d28SDavid Howells 
1234df720d28SDavid Howells retry:
1235df720d28SDavid Howells 	rec = ctx->open_rec;
1236df720d28SDavid Howells 	if (!rec)
1237df720d28SDavid Howells 		goto unlock;
1238df720d28SDavid Howells 
1239df720d28SDavid Howells 	msg_pl = &rec->msg_plaintext;
1240df720d28SDavid Howells 
1241df720d28SDavid Howells 	/* Check the BPF advisor and perform transmission. */
1242df720d28SDavid Howells 	ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
1243df720d28SDavid Howells 				  &copied, 0);
1244df720d28SDavid Howells 	switch (ret) {
1245df720d28SDavid Howells 	case 0:
1246df720d28SDavid Howells 	case -EAGAIN:
1247df720d28SDavid Howells 		if (retrying)
1248df720d28SDavid Howells 			goto unlock;
1249df720d28SDavid Howells 		retrying = true;
1250df720d28SDavid Howells 		goto retry;
1251df720d28SDavid Howells 	case -EINPROGRESS:
1252df720d28SDavid Howells 		break;
1253df720d28SDavid Howells 	default:
1254df720d28SDavid Howells 		goto unlock;
1255df720d28SDavid Howells 	}
1256df720d28SDavid Howells 
1257df720d28SDavid Howells 	/* Wait for pending encryptions to get completed */
1258df720d28SDavid Howells 	spin_lock_bh(&ctx->encrypt_compl_lock);
1259df720d28SDavid Howells 	ctx->async_notify = true;
1260df720d28SDavid Howells 
1261df720d28SDavid Howells 	pending = atomic_read(&ctx->encrypt_pending);
1262df720d28SDavid Howells 	spin_unlock_bh(&ctx->encrypt_compl_lock);
1263df720d28SDavid Howells 	if (pending)
1264df720d28SDavid Howells 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1265df720d28SDavid Howells 	else
1266df720d28SDavid Howells 		reinit_completion(&ctx->async_wait.completion);
1267df720d28SDavid Howells 
1268df720d28SDavid Howells 	/* There can be no concurrent accesses, since we have no pending
1269df720d28SDavid Howells 	 * encrypt operations
1270df720d28SDavid Howells 	 */
1271df720d28SDavid Howells 	WRITE_ONCE(ctx->async_notify, false);
1272df720d28SDavid Howells 
1273df720d28SDavid Howells 	if (ctx->async_wait.err)
1274df720d28SDavid Howells 		goto unlock;
1275df720d28SDavid Howells 
1276df720d28SDavid Howells 	/* Transmit if any encryptions have completed */
1277df720d28SDavid Howells 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1278df720d28SDavid Howells 		cancel_delayed_work(&ctx->tx_work.work);
1279df720d28SDavid Howells 		tls_tx_records(sk, 0);
1280df720d28SDavid Howells 	}
1281df720d28SDavid Howells 
1282df720d28SDavid Howells unlock:
1283df720d28SDavid Howells 	release_sock(sk);
1284df720d28SDavid Howells 	mutex_unlock(&tls_ctx->tx_lock);
1285df720d28SDavid Howells }
1286df720d28SDavid Howells 
128735560b7fSJakub Kicinski static int
128835560b7fSJakub Kicinski tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
128970f03fc2SJakub Kicinski 		bool released)
1290c46234ebSDave Watson {
1291c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1292f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1293c46234ebSDave Watson 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
129470f03fc2SJakub Kicinski 	long timeo;
129570f03fc2SJakub Kicinski 
129670f03fc2SJakub Kicinski 	timeo = sock_rcvtimeo(sk, nonblock);
1297c46234ebSDave Watson 
1298b92a13d4SJakub Kicinski 	while (!tls_strp_msg_ready(ctx)) {
129935560b7fSJakub Kicinski 		if (!sk_psock_queue_empty(psock))
130035560b7fSJakub Kicinski 			return 0;
130135560b7fSJakub Kicinski 
130235560b7fSJakub Kicinski 		if (sk->sk_err)
130335560b7fSJakub Kicinski 			return sock_error(sk);
1304c46234ebSDave Watson 
130520ffc7adSVadim Fedorenko 		if (!skb_queue_empty(&sk->sk_receive_queue)) {
130684c61fe1SJakub Kicinski 			tls_strp_check_rcv(&ctx->strp);
1307b92a13d4SJakub Kicinski 			if (tls_strp_msg_ready(ctx))
130835560b7fSJakub Kicinski 				break;
130920ffc7adSVadim Fedorenko 		}
131020ffc7adSVadim Fedorenko 
1311fcf4793eSDoron Roberts-Kedes 		if (sk->sk_shutdown & RCV_SHUTDOWN)
131235560b7fSJakub Kicinski 			return 0;
1313fcf4793eSDoron Roberts-Kedes 
1314c46234ebSDave Watson 		if (sock_flag(sk, SOCK_DONE))
131535560b7fSJakub Kicinski 			return 0;
1316c46234ebSDave Watson 
131770f03fc2SJakub Kicinski 		if (!timeo)
131835560b7fSJakub Kicinski 			return -EAGAIN;
1319c46234ebSDave Watson 
132084c61fe1SJakub Kicinski 		released = true;
1321c46234ebSDave Watson 		add_wait_queue(sk_sleep(sk), &wait);
1322c46234ebSDave Watson 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1323d3b18ad3SJohn Fastabend 		sk_wait_event(sk, &timeo,
1324b92a13d4SJakub Kicinski 			      tls_strp_msg_ready(ctx) ||
1325b92a13d4SJakub Kicinski 			      !sk_psock_queue_empty(psock),
1326d3b18ad3SJohn Fastabend 			      &wait);
1327c46234ebSDave Watson 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1328c46234ebSDave Watson 		remove_wait_queue(sk_sleep(sk), &wait);
1329c46234ebSDave Watson 
1330c46234ebSDave Watson 		/* Handle signals */
133135560b7fSJakub Kicinski 		if (signal_pending(current))
133235560b7fSJakub Kicinski 			return sock_intr_errno(timeo);
1333c46234ebSDave Watson 	}
1334c46234ebSDave Watson 
133584c61fe1SJakub Kicinski 	tls_strp_msg_load(&ctx->strp, released);
133684c61fe1SJakub Kicinski 
133735560b7fSJakub Kicinski 	return 1;
1338c46234ebSDave Watson }
1339c46234ebSDave Watson 
1340d4bd88e6SJakub Kicinski static int tls_setup_from_iter(struct iov_iter *from,
1341d829e9c4SDaniel Borkmann 			       int length, int *pages_used,
1342d829e9c4SDaniel Borkmann 			       struct scatterlist *to,
1343d829e9c4SDaniel Borkmann 			       int to_max_pages)
1344d829e9c4SDaniel Borkmann {
1345d829e9c4SDaniel Borkmann 	int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1346d829e9c4SDaniel Borkmann 	struct page *pages[MAX_SKB_FRAGS];
1347d4bd88e6SJakub Kicinski 	unsigned int size = 0;
1348d829e9c4SDaniel Borkmann 	ssize_t copied, use;
1349d829e9c4SDaniel Borkmann 	size_t offset;
1350d829e9c4SDaniel Borkmann 
1351d829e9c4SDaniel Borkmann 	while (length > 0) {
1352d829e9c4SDaniel Borkmann 		i = 0;
1353d829e9c4SDaniel Borkmann 		maxpages = to_max_pages - num_elem;
1354d829e9c4SDaniel Borkmann 		if (maxpages == 0) {
1355d829e9c4SDaniel Borkmann 			rc = -EFAULT;
1356d829e9c4SDaniel Borkmann 			goto out;
1357d829e9c4SDaniel Borkmann 		}
13581ef255e2SAl Viro 		copied = iov_iter_get_pages2(from, pages,
1359d829e9c4SDaniel Borkmann 					    length,
1360d829e9c4SDaniel Borkmann 					    maxpages, &offset);
1361d829e9c4SDaniel Borkmann 		if (copied <= 0) {
1362d829e9c4SDaniel Borkmann 			rc = -EFAULT;
1363d829e9c4SDaniel Borkmann 			goto out;
1364d829e9c4SDaniel Borkmann 		}
1365d829e9c4SDaniel Borkmann 
1366d829e9c4SDaniel Borkmann 		length -= copied;
1367d829e9c4SDaniel Borkmann 		size += copied;
1368d829e9c4SDaniel Borkmann 		while (copied) {
1369d829e9c4SDaniel Borkmann 			use = min_t(int, copied, PAGE_SIZE - offset);
1370d829e9c4SDaniel Borkmann 
1371d829e9c4SDaniel Borkmann 			sg_set_page(&to[num_elem],
1372d829e9c4SDaniel Borkmann 				    pages[i], use, offset);
1373d829e9c4SDaniel Borkmann 			sg_unmark_end(&to[num_elem]);
1374d829e9c4SDaniel Borkmann 			/* We do not uncharge memory from this API */
1375d829e9c4SDaniel Borkmann 
1376d829e9c4SDaniel Borkmann 			offset = 0;
1377d829e9c4SDaniel Borkmann 			copied -= use;
1378d829e9c4SDaniel Borkmann 
1379d829e9c4SDaniel Borkmann 			i++;
1380d829e9c4SDaniel Borkmann 			num_elem++;
1381d829e9c4SDaniel Borkmann 		}
1382d829e9c4SDaniel Borkmann 	}
1383d829e9c4SDaniel Borkmann 	/* Mark the end in the last sg entry if newly added */
1384d829e9c4SDaniel Borkmann 	if (num_elem > *pages_used)
1385d829e9c4SDaniel Borkmann 		sg_mark_end(&to[num_elem - 1]);
1386d829e9c4SDaniel Borkmann out:
1387d829e9c4SDaniel Borkmann 	if (rc)
1388d4bd88e6SJakub Kicinski 		iov_iter_revert(from, size);
1389d829e9c4SDaniel Borkmann 	*pages_used = num_elem;
1390d829e9c4SDaniel Borkmann 
1391d829e9c4SDaniel Borkmann 	return rc;
1392d829e9c4SDaniel Borkmann }
1393d829e9c4SDaniel Borkmann 
1394fd31f399SJakub Kicinski static struct sk_buff *
1395fd31f399SJakub Kicinski tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1396fd31f399SJakub Kicinski 		     unsigned int full_len)
1397fd31f399SJakub Kicinski {
1398fd31f399SJakub Kicinski 	struct strp_msg *clr_rxm;
1399fd31f399SJakub Kicinski 	struct sk_buff *clr_skb;
1400fd31f399SJakub Kicinski 	int err;
1401fd31f399SJakub Kicinski 
1402fd31f399SJakub Kicinski 	clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1403fd31f399SJakub Kicinski 				       &err, sk->sk_allocation);
1404fd31f399SJakub Kicinski 	if (!clr_skb)
1405fd31f399SJakub Kicinski 		return NULL;
1406fd31f399SJakub Kicinski 
1407fd31f399SJakub Kicinski 	skb_copy_header(clr_skb, skb);
1408fd31f399SJakub Kicinski 	clr_skb->len = full_len;
1409fd31f399SJakub Kicinski 	clr_skb->data_len = full_len;
1410fd31f399SJakub Kicinski 
1411fd31f399SJakub Kicinski 	clr_rxm = strp_msg(clr_skb);
1412fd31f399SJakub Kicinski 	clr_rxm->offset = 0;
1413fd31f399SJakub Kicinski 
1414fd31f399SJakub Kicinski 	return clr_skb;
1415fd31f399SJakub Kicinski }
1416fd31f399SJakub Kicinski 
14178a958732SJakub Kicinski /* Decrypt handlers
14188a958732SJakub Kicinski  *
1419dd47ed36SJakub Kicinski  * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
14208a958732SJakub Kicinski  * They must transform the darg in/out argument are as follows:
14218a958732SJakub Kicinski  *       |          Input            |         Output
14228a958732SJakub Kicinski  * -------------------------------------------------------------------
14238a958732SJakub Kicinski  *    zc | Zero-copy decrypt allowed | Zero-copy performed
14248a958732SJakub Kicinski  * async | Async decrypt allowed     | Async crypto used / in progress
14256bd116c8SJakub Kicinski  *   skb |            *              | Output skb
1426b93f5700SJakub Kicinski  *
1427b93f5700SJakub Kicinski  * If ZC decryption was performed darg.skb will point to the input skb.
14288a958732SJakub Kicinski  */
14298a958732SJakub Kicinski 
14300b243d00SVakul Garg /* This function decrypts the input skb into either out_iov or in out_sg
14318a958732SJakub Kicinski  * or in skb buffers itself. The input parameter 'darg->zc' indicates if
14320b243d00SVakul Garg  * zero-copy mode needs to be tried or not. With zero-copy mode, either
14330b243d00SVakul Garg  * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
14340b243d00SVakul Garg  * NULL, then the decryption happens inside skb buffers itself, i.e.
14358a958732SJakub Kicinski  * zero-copy gets disabled and 'darg->zc' is updated.
14360b243d00SVakul Garg  */
1437541cc48bSJakub Kicinski static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
14380b243d00SVakul Garg 			  struct scatterlist *out_sg,
14394175eac3SJakub Kicinski 			  struct tls_decrypt_arg *darg)
14400b243d00SVakul Garg {
14410b243d00SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
14420b243d00SVakul Garg 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
14434509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1444b89fec54SJakub Kicinski 	int n_sgin, n_sgout, aead_size, err, pages = 0;
1445541cc48bSJakub Kicinski 	struct sk_buff *skb = tls_strp_msg(ctx);
1446fd31f399SJakub Kicinski 	const struct strp_msg *rxm = strp_msg(skb);
1447fd31f399SJakub Kicinski 	const struct tls_msg *tlm = tls_msg(skb);
14480b243d00SVakul Garg 	struct aead_request *aead_req;
14490b243d00SVakul Garg 	struct scatterlist *sgin = NULL;
14500b243d00SVakul Garg 	struct scatterlist *sgout = NULL;
1451603380f5SJakub Kicinski 	const int data_len = rxm->full_len - prot->overhead_size;
1452ce61327cSJakub Kicinski 	int tail_pages = !!prot->tail_size;
1453b89fec54SJakub Kicinski 	struct tls_decrypt_ctx *dctx;
1454fd31f399SJakub Kicinski 	struct sk_buff *clear_skb;
1455f295b3aeSVakul Garg 	int iv_offset = 0;
1456b89fec54SJakub Kicinski 	u8 *mem;
14570b243d00SVakul Garg 
1458fd31f399SJakub Kicinski 	n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1459fd31f399SJakub Kicinski 			 rxm->full_len - prot->prepend_size);
1460fd31f399SJakub Kicinski 	if (n_sgin < 1)
1461fd31f399SJakub Kicinski 		return n_sgin ?: -EBADMSG;
1462fd31f399SJakub Kicinski 
14634175eac3SJakub Kicinski 	if (darg->zc && (out_iov || out_sg)) {
1464fd31f399SJakub Kicinski 		clear_skb = NULL;
1465fd31f399SJakub Kicinski 
14660b243d00SVakul Garg 		if (out_iov)
1467ce61327cSJakub Kicinski 			n_sgout = 1 + tail_pages +
1468b93235e6SJakub Kicinski 				iov_iter_npages_cap(out_iov, INT_MAX, data_len);
14690b243d00SVakul Garg 		else
14700b243d00SVakul Garg 			n_sgout = sg_nents(out_sg);
14710b243d00SVakul Garg 	} else {
14724175eac3SJakub Kicinski 		darg->zc = false;
14730b243d00SVakul Garg 
1474fd31f399SJakub Kicinski 		clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1475fd31f399SJakub Kicinski 		if (!clear_skb)
1476fd31f399SJakub Kicinski 			return -ENOMEM;
1477fd31f399SJakub Kicinski 
1478fd31f399SJakub Kicinski 		n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1479fd31f399SJakub Kicinski 	}
14800b243d00SVakul Garg 
14810b243d00SVakul Garg 	/* Increment to accommodate AAD */
14820b243d00SVakul Garg 	n_sgin = n_sgin + 1;
14830b243d00SVakul Garg 
14840b243d00SVakul Garg 	/* Allocate a single block of memory which contains
1485b89fec54SJakub Kicinski 	 *   aead_req || tls_decrypt_ctx.
1486b89fec54SJakub Kicinski 	 * Both structs are variable length.
14870b243d00SVakul Garg 	 */
1488b89fec54SJakub Kicinski 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
14898d338c76SHerbert Xu 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
1490b89fec54SJakub Kicinski 	mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
1491b89fec54SJakub Kicinski 		      sk->sk_allocation);
1492fd31f399SJakub Kicinski 	if (!mem) {
1493fd31f399SJakub Kicinski 		err = -ENOMEM;
1494fd31f399SJakub Kicinski 		goto exit_free_skb;
1495fd31f399SJakub Kicinski 	}
14960b243d00SVakul Garg 
14970b243d00SVakul Garg 	/* Segment the allocated memory */
14980b243d00SVakul Garg 	aead_req = (struct aead_request *)mem;
1499b89fec54SJakub Kicinski 	dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
15008d338c76SHerbert Xu 	dctx->sk = sk;
1501b89fec54SJakub Kicinski 	sgin = &dctx->sg[0];
1502b89fec54SJakub Kicinski 	sgout = &dctx->sg[n_sgin];
15030b243d00SVakul Garg 
1504128cfb88STianjia Zhang 	/* For CCM based ciphers, first byte of nonce+iv is a constant */
1505128cfb88STianjia Zhang 	switch (prot->cipher_type) {
1506128cfb88STianjia Zhang 	case TLS_CIPHER_AES_CCM_128:
1507b89fec54SJakub Kicinski 		dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1508f295b3aeSVakul Garg 		iv_offset = 1;
1509128cfb88STianjia Zhang 		break;
1510128cfb88STianjia Zhang 	case TLS_CIPHER_SM4_CCM:
1511b89fec54SJakub Kicinski 		dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1512128cfb88STianjia Zhang 		iv_offset = 1;
1513128cfb88STianjia Zhang 		break;
1514f295b3aeSVakul Garg 	}
1515f295b3aeSVakul Garg 
15160b243d00SVakul Garg 	/* Prepare IV */
1517a4ae58cdSJakub Kicinski 	if (prot->version == TLS_1_3_VERSION ||
1518a4ae58cdSJakub Kicinski 	    prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1519b89fec54SJakub Kicinski 		memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1520a4ae58cdSJakub Kicinski 		       prot->iv_size + prot->salt_size);
1521a4ae58cdSJakub Kicinski 	} else {
15220b243d00SVakul Garg 		err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1523b89fec54SJakub Kicinski 				    &dctx->iv[iv_offset] + prot->salt_size,
15244509de14SVakul Garg 				    prot->iv_size);
152503957d84SJakub Kicinski 		if (err < 0)
152603957d84SJakub Kicinski 			goto exit_free;
1527b89fec54SJakub Kicinski 		memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1528a4ae58cdSJakub Kicinski 	}
152958790314SJakub Kicinski 	tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1530130b392cSDave Watson 
15310b243d00SVakul Garg 	/* Prepare AAD */
1532b89fec54SJakub Kicinski 	tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
15334509de14SVakul Garg 		     prot->tail_size,
1534c3f6bb74SJakub Kicinski 		     tls_ctx->rx.rec_seq, tlm->control, prot);
15350b243d00SVakul Garg 
15360b243d00SVakul Garg 	/* Prepare sgin */
15370b243d00SVakul Garg 	sg_init_table(sgin, n_sgin);
1538b89fec54SJakub Kicinski 	sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
15390b243d00SVakul Garg 	err = skb_to_sgvec(skb, &sgin[1],
15404509de14SVakul Garg 			   rxm->offset + prot->prepend_size,
15414509de14SVakul Garg 			   rxm->full_len - prot->prepend_size);
154203957d84SJakub Kicinski 	if (err < 0)
154303957d84SJakub Kicinski 		goto exit_free;
15440b243d00SVakul Garg 
1545fd31f399SJakub Kicinski 	if (clear_skb) {
15460b243d00SVakul Garg 		sg_init_table(sgout, n_sgout);
1547b89fec54SJakub Kicinski 		sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
15480b243d00SVakul Garg 
1549fd31f399SJakub Kicinski 		err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1550fd31f399SJakub Kicinski 				   data_len + prot->tail_size);
1551fd31f399SJakub Kicinski 		if (err < 0)
1552fd31f399SJakub Kicinski 			goto exit_free;
1553fd31f399SJakub Kicinski 	} else if (out_iov) {
1554fd31f399SJakub Kicinski 		sg_init_table(sgout, n_sgout);
1555fd31f399SJakub Kicinski 		sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1556fd31f399SJakub Kicinski 
1557fd31f399SJakub Kicinski 		err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1558ce61327cSJakub Kicinski 					  (n_sgout - 1 - tail_pages));
15590b243d00SVakul Garg 		if (err < 0)
1560fd31f399SJakub Kicinski 			goto exit_free_pages;
1561ce61327cSJakub Kicinski 
1562ce61327cSJakub Kicinski 		if (prot->tail_size) {
1563ce61327cSJakub Kicinski 			sg_unmark_end(&sgout[pages]);
1564b89fec54SJakub Kicinski 			sg_set_buf(&sgout[pages + 1], &dctx->tail,
1565ce61327cSJakub Kicinski 				   prot->tail_size);
1566ce61327cSJakub Kicinski 			sg_mark_end(&sgout[pages + 1]);
1567ce61327cSJakub Kicinski 		}
15680b243d00SVakul Garg 	} else if (out_sg) {
15690b243d00SVakul Garg 		memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
15700b243d00SVakul Garg 	}
15710b243d00SVakul Garg 
15720b243d00SVakul Garg 	/* Prepare and submit AEAD request */
15736ececdc5SJakub Kicinski 	err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1574603380f5SJakub Kicinski 				data_len + prot->tail_size, aead_req, darg);
15756bd116c8SJakub Kicinski 	if (err)
15766bd116c8SJakub Kicinski 		goto exit_free_pages;
15776bd116c8SJakub Kicinski 
1578fd31f399SJakub Kicinski 	darg->skb = clear_skb ?: tls_strp_msg(ctx);
1579fd31f399SJakub Kicinski 	clear_skb = NULL;
1580c618db2aSJakub Kicinski 
1581c618db2aSJakub Kicinski 	if (unlikely(darg->async)) {
158284c61fe1SJakub Kicinski 		err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1583c618db2aSJakub Kicinski 		if (err)
1584c618db2aSJakub Kicinski 			__skb_queue_tail(&ctx->async_hold, darg->skb);
1585c618db2aSJakub Kicinski 		return err;
1586c618db2aSJakub Kicinski 	}
15870b243d00SVakul Garg 
1588ce61327cSJakub Kicinski 	if (prot->tail_size)
1589b89fec54SJakub Kicinski 		darg->tail = dctx->tail;
1590ce61327cSJakub Kicinski 
15916bd116c8SJakub Kicinski exit_free_pages:
15920b243d00SVakul Garg 	/* Release the pages in case iov was mapped to pages */
15930b243d00SVakul Garg 	for (; pages > 0; pages--)
15940b243d00SVakul Garg 		put_page(sg_page(&sgout[pages]));
159503957d84SJakub Kicinski exit_free:
15960b243d00SVakul Garg 	kfree(mem);
1597fd31f399SJakub Kicinski exit_free_skb:
1598fd31f399SJakub Kicinski 	consume_skb(clear_skb);
15990b243d00SVakul Garg 	return err;
16000b243d00SVakul Garg }
16010b243d00SVakul Garg 
16028a958732SJakub Kicinski static int
1603dd47ed36SJakub Kicinski tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1604dd47ed36SJakub Kicinski 	       struct msghdr *msg, struct tls_decrypt_arg *darg)
16058a958732SJakub Kicinski {
16066bd116c8SJakub Kicinski 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
16074509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1608541cc48bSJakub Kicinski 	struct strp_msg *rxm;
16093764ae5bSJakub Kicinski 	int pad, err;
1610dafb67f3SBoris Pismenny 
1611dd47ed36SJakub Kicinski 	err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1612a069a905SGal Pressman 	if (err < 0) {
1613a069a905SGal Pressman 		if (err == -EBADMSG)
1614a069a905SGal Pressman 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1615dafb67f3SBoris Pismenny 		return err;
1616a069a905SGal Pressman 	}
1617dd47ed36SJakub Kicinski 	/* keep going even for ->async, the code below is TLS 1.3 */
1618dd47ed36SJakub Kicinski 
1619ce61327cSJakub Kicinski 	/* If opportunistic TLS 1.3 ZC failed retry without ZC */
1620ce61327cSJakub Kicinski 	if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1621ce61327cSJakub Kicinski 		     darg->tail != TLS_RECORD_TYPE_DATA)) {
1622ce61327cSJakub Kicinski 		darg->zc = false;
1623bb56cea9SJakub Kicinski 		if (!darg->tail)
1624bb56cea9SJakub Kicinski 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
16251090c1eaSJakub Kicinski 		TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1626dd47ed36SJakub Kicinski 		return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1627ce61327cSJakub Kicinski 	}
1628130b392cSDave Watson 
16296bd116c8SJakub Kicinski 	pad = tls_padding_length(prot, darg->skb, darg);
16306bd116c8SJakub Kicinski 	if (pad < 0) {
1631b93f5700SJakub Kicinski 		if (darg->skb != tls_strp_msg(ctx))
16326bd116c8SJakub Kicinski 			consume_skb(darg->skb);
16336bd116c8SJakub Kicinski 		return pad;
16346bd116c8SJakub Kicinski 	}
16356bd116c8SJakub Kicinski 
16366bd116c8SJakub Kicinski 	rxm = strp_msg(darg->skb);
1637b53f4976SJakub Kicinski 	rxm->full_len -= pad;
1638dd47ed36SJakub Kicinski 
1639dd47ed36SJakub Kicinski 	return 0;
1640dd47ed36SJakub Kicinski }
1641dd47ed36SJakub Kicinski 
1642dd47ed36SJakub Kicinski static int
1643d4e5db64SJakub Kicinski tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1644d4e5db64SJakub Kicinski 		   struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1645dd47ed36SJakub Kicinski {
1646dd47ed36SJakub Kicinski 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1647dd47ed36SJakub Kicinski 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1648dd47ed36SJakub Kicinski 	struct strp_msg *rxm;
1649dd47ed36SJakub Kicinski 	int pad, err;
1650dd47ed36SJakub Kicinski 
1651dd47ed36SJakub Kicinski 	if (tls_ctx->rx_conf != TLS_HW)
1652dd47ed36SJakub Kicinski 		return 0;
1653dd47ed36SJakub Kicinski 
1654dd47ed36SJakub Kicinski 	err = tls_device_decrypted(sk, tls_ctx);
1655dd47ed36SJakub Kicinski 	if (err <= 0)
1656dd47ed36SJakub Kicinski 		return err;
1657dd47ed36SJakub Kicinski 
1658dd47ed36SJakub Kicinski 	pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1659dd47ed36SJakub Kicinski 	if (pad < 0)
1660dd47ed36SJakub Kicinski 		return pad;
1661dd47ed36SJakub Kicinski 
1662dd47ed36SJakub Kicinski 	darg->async = false;
1663dd47ed36SJakub Kicinski 	darg->skb = tls_strp_msg(ctx);
1664d4e5db64SJakub Kicinski 	/* ->zc downgrade check, in case TLS 1.3 gets here */
1665d4e5db64SJakub Kicinski 	darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1666d4e5db64SJakub Kicinski 		      tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1667dd47ed36SJakub Kicinski 
1668dd47ed36SJakub Kicinski 	rxm = strp_msg(darg->skb);
1669dd47ed36SJakub Kicinski 	rxm->full_len -= pad;
1670d4e5db64SJakub Kicinski 
1671d4e5db64SJakub Kicinski 	if (!darg->zc) {
1672d4e5db64SJakub Kicinski 		/* Non-ZC case needs a real skb */
1673d4e5db64SJakub Kicinski 		darg->skb = tls_strp_msg_detach(ctx);
1674d4e5db64SJakub Kicinski 		if (!darg->skb)
1675d4e5db64SJakub Kicinski 			return -ENOMEM;
1676d4e5db64SJakub Kicinski 	} else {
1677d4e5db64SJakub Kicinski 		unsigned int off, len;
1678d4e5db64SJakub Kicinski 
1679d4e5db64SJakub Kicinski 		/* In ZC case nobody cares about the output skb.
1680d4e5db64SJakub Kicinski 		 * Just copy the data here. Note the skb is not fully trimmed.
1681d4e5db64SJakub Kicinski 		 */
1682d4e5db64SJakub Kicinski 		off = rxm->offset + prot->prepend_size;
1683d4e5db64SJakub Kicinski 		len = rxm->full_len - prot->overhead_size;
1684d4e5db64SJakub Kicinski 
1685d4e5db64SJakub Kicinski 		err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1686d4e5db64SJakub Kicinski 		if (err)
1687d4e5db64SJakub Kicinski 			return err;
1688d4e5db64SJakub Kicinski 	}
1689dd47ed36SJakub Kicinski 	return 1;
1690dd47ed36SJakub Kicinski }
1691dd47ed36SJakub Kicinski 
1692dd47ed36SJakub Kicinski static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1693dd47ed36SJakub Kicinski 			     struct tls_decrypt_arg *darg)
1694dd47ed36SJakub Kicinski {
1695dd47ed36SJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1696dd47ed36SJakub Kicinski 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1697dd47ed36SJakub Kicinski 	struct strp_msg *rxm;
1698dd47ed36SJakub Kicinski 	int err;
1699dd47ed36SJakub Kicinski 
1700d4e5db64SJakub Kicinski 	err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1701dd47ed36SJakub Kicinski 	if (!err)
1702dd47ed36SJakub Kicinski 		err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1703dd47ed36SJakub Kicinski 	if (err < 0)
1704dd47ed36SJakub Kicinski 		return err;
1705dd47ed36SJakub Kicinski 
1706dd47ed36SJakub Kicinski 	rxm = strp_msg(darg->skb);
17074509de14SVakul Garg 	rxm->offset += prot->prepend_size;
17084509de14SVakul Garg 	rxm->full_len -= prot->overhead_size;
17093547a1f9SJakub Kicinski 	tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1710dafb67f3SBoris Pismenny 
17113764ae5bSJakub Kicinski 	return 0;
1712dafb67f3SBoris Pismenny }
1713dafb67f3SBoris Pismenny 
1714541cc48bSJakub Kicinski int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1715c46234ebSDave Watson {
17164175eac3SJakub Kicinski 	struct tls_decrypt_arg darg = { .zc = true, };
1717c46234ebSDave Watson 
1718541cc48bSJakub Kicinski 	return tls_decrypt_sg(sk, NULL, sgout, &darg);
1719c46234ebSDave Watson }
1720c46234ebSDave Watson 
172106554f4fSJakub Kicinski static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
172206554f4fSJakub Kicinski 				   u8 *control)
172306554f4fSJakub Kicinski {
172406554f4fSJakub Kicinski 	int err;
172506554f4fSJakub Kicinski 
172606554f4fSJakub Kicinski 	if (!*control) {
172706554f4fSJakub Kicinski 		*control = tlm->control;
172806554f4fSJakub Kicinski 		if (!*control)
172906554f4fSJakub Kicinski 			return -EBADMSG;
173006554f4fSJakub Kicinski 
173106554f4fSJakub Kicinski 		err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
173206554f4fSJakub Kicinski 			       sizeof(*control), control);
173306554f4fSJakub Kicinski 		if (*control != TLS_RECORD_TYPE_DATA) {
173406554f4fSJakub Kicinski 			if (err || msg->msg_flags & MSG_CTRUNC)
173506554f4fSJakub Kicinski 				return -EIO;
173606554f4fSJakub Kicinski 		}
173706554f4fSJakub Kicinski 	} else if (*control != tlm->control) {
173806554f4fSJakub Kicinski 		return 0;
173906554f4fSJakub Kicinski 	}
174006554f4fSJakub Kicinski 
174106554f4fSJakub Kicinski 	return 1;
174206554f4fSJakub Kicinski }
174306554f4fSJakub Kicinski 
1744abb47dc9SJakub Kicinski static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1745abb47dc9SJakub Kicinski {
174684c61fe1SJakub Kicinski 	tls_strp_msg_done(&ctx->strp);
1747abb47dc9SJakub Kicinski }
1748abb47dc9SJakub Kicinski 
1749692d7b5dSVakul Garg /* This function traverses the rx_list in tls receive context to copies the
17502b794c40SVakul Garg  * decrypted records into the buffer provided by caller zero copy is not
1751692d7b5dSVakul Garg  * true. Further, the records are removed from the rx_list if it is not a peek
1752692d7b5dSVakul Garg  * case and the record has been consumed completely.
1753692d7b5dSVakul Garg  */
1754692d7b5dSVakul Garg static int process_rx_list(struct tls_sw_context_rx *ctx,
1755692d7b5dSVakul Garg 			   struct msghdr *msg,
17562b794c40SVakul Garg 			   u8 *control,
1757692d7b5dSVakul Garg 			   size_t skip,
1758692d7b5dSVakul Garg 			   size_t len,
1759692d7b5dSVakul Garg 			   bool is_peek)
1760692d7b5dSVakul Garg {
1761692d7b5dSVakul Garg 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
17622b794c40SVakul Garg 	struct tls_msg *tlm;
1763692d7b5dSVakul Garg 	ssize_t copied = 0;
176406554f4fSJakub Kicinski 	int err;
17652b794c40SVakul Garg 
1766692d7b5dSVakul Garg 	while (skip && skb) {
1767692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
17682b794c40SVakul Garg 		tlm = tls_msg(skb);
17692b794c40SVakul Garg 
177006554f4fSJakub Kicinski 		err = tls_record_content_type(msg, tlm, control);
177106554f4fSJakub Kicinski 		if (err <= 0)
17724dcdd971SJakub Kicinski 			goto out;
1773692d7b5dSVakul Garg 
1774692d7b5dSVakul Garg 		if (skip < rxm->full_len)
1775692d7b5dSVakul Garg 			break;
1776692d7b5dSVakul Garg 
1777692d7b5dSVakul Garg 		skip = skip - rxm->full_len;
1778692d7b5dSVakul Garg 		skb = skb_peek_next(skb, &ctx->rx_list);
1779692d7b5dSVakul Garg 	}
1780692d7b5dSVakul Garg 
1781692d7b5dSVakul Garg 	while (len && skb) {
1782692d7b5dSVakul Garg 		struct sk_buff *next_skb;
1783692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
1784692d7b5dSVakul Garg 		int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1785692d7b5dSVakul Garg 
17862b794c40SVakul Garg 		tlm = tls_msg(skb);
17872b794c40SVakul Garg 
178806554f4fSJakub Kicinski 		err = tls_record_content_type(msg, tlm, control);
178906554f4fSJakub Kicinski 		if (err <= 0)
17904dcdd971SJakub Kicinski 			goto out;
17912b794c40SVakul Garg 
179206554f4fSJakub Kicinski 		err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1793692d7b5dSVakul Garg 					    msg, chunk);
1794692d7b5dSVakul Garg 		if (err < 0)
17954dcdd971SJakub Kicinski 			goto out;
1796692d7b5dSVakul Garg 
1797692d7b5dSVakul Garg 		len = len - chunk;
1798692d7b5dSVakul Garg 		copied = copied + chunk;
1799692d7b5dSVakul Garg 
1800692d7b5dSVakul Garg 		/* Consume the data from record if it is non-peek case*/
1801692d7b5dSVakul Garg 		if (!is_peek) {
1802692d7b5dSVakul Garg 			rxm->offset = rxm->offset + chunk;
1803692d7b5dSVakul Garg 			rxm->full_len = rxm->full_len - chunk;
1804692d7b5dSVakul Garg 
1805692d7b5dSVakul Garg 			/* Return if there is unconsumed data in the record */
1806692d7b5dSVakul Garg 			if (rxm->full_len - skip)
1807692d7b5dSVakul Garg 				break;
1808692d7b5dSVakul Garg 		}
1809692d7b5dSVakul Garg 
1810692d7b5dSVakul Garg 		/* The remaining skip-bytes must lie in 1st record in rx_list.
1811692d7b5dSVakul Garg 		 * So from the 2nd record, 'skip' should be 0.
1812692d7b5dSVakul Garg 		 */
1813692d7b5dSVakul Garg 		skip = 0;
1814692d7b5dSVakul Garg 
1815692d7b5dSVakul Garg 		if (msg)
1816692d7b5dSVakul Garg 			msg->msg_flags |= MSG_EOR;
1817692d7b5dSVakul Garg 
1818692d7b5dSVakul Garg 		next_skb = skb_peek_next(skb, &ctx->rx_list);
1819692d7b5dSVakul Garg 
1820692d7b5dSVakul Garg 		if (!is_peek) {
1821a30295c4SJakub Kicinski 			__skb_unlink(skb, &ctx->rx_list);
1822a88c26f6SVakul Garg 			consume_skb(skb);
1823692d7b5dSVakul Garg 		}
1824692d7b5dSVakul Garg 
1825692d7b5dSVakul Garg 		skb = next_skb;
1826692d7b5dSVakul Garg 	}
18274dcdd971SJakub Kicinski 	err = 0;
1828692d7b5dSVakul Garg 
18294dcdd971SJakub Kicinski out:
18304dcdd971SJakub Kicinski 	return copied ? : err;
1831692d7b5dSVakul Garg }
1832692d7b5dSVakul Garg 
183384c61fe1SJakub Kicinski static bool
1834c46b0183SJakub Kicinski tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1835c46b0183SJakub Kicinski 		       size_t len_left, size_t decrypted, ssize_t done,
1836c46b0183SJakub Kicinski 		       size_t *flushed_at)
1837c46b0183SJakub Kicinski {
1838c46b0183SJakub Kicinski 	size_t max_rec;
1839c46b0183SJakub Kicinski 
1840c46b0183SJakub Kicinski 	if (len_left <= decrypted)
184184c61fe1SJakub Kicinski 		return false;
1842c46b0183SJakub Kicinski 
1843c46b0183SJakub Kicinski 	max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1844c46b0183SJakub Kicinski 	if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
184584c61fe1SJakub Kicinski 		return false;
1846c46b0183SJakub Kicinski 
1847c46b0183SJakub Kicinski 	*flushed_at = done;
184884c61fe1SJakub Kicinski 	return sk_flush_backlog(sk);
1849c46b0183SJakub Kicinski }
1850c46b0183SJakub Kicinski 
1851f9ae3204SHannes Reinecke static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
18524cbc325eSJakub Kicinski 				 bool nonblock)
18534cbc325eSJakub Kicinski {
18544cbc325eSJakub Kicinski 	long timeo;
18554cbc325eSJakub Kicinski 
18564cbc325eSJakub Kicinski 	timeo = sock_rcvtimeo(sk, nonblock);
18574cbc325eSJakub Kicinski 
18584cbc325eSJakub Kicinski 	while (unlikely(ctx->reader_present)) {
18594cbc325eSJakub Kicinski 		DEFINE_WAIT_FUNC(wait, woken_wake_function);
18604cbc325eSJakub Kicinski 
18614cbc325eSJakub Kicinski 		ctx->reader_contended = 1;
18624cbc325eSJakub Kicinski 
18634cbc325eSJakub Kicinski 		add_wait_queue(&ctx->wq, &wait);
18644cbc325eSJakub Kicinski 		sk_wait_event(sk, &timeo,
18654cbc325eSJakub Kicinski 			      !READ_ONCE(ctx->reader_present), &wait);
18664cbc325eSJakub Kicinski 		remove_wait_queue(&ctx->wq, &wait);
18674cbc325eSJakub Kicinski 
1868f9ae3204SHannes Reinecke 		if (timeo <= 0)
1869f9ae3204SHannes Reinecke 			return -EAGAIN;
1870f9ae3204SHannes Reinecke 		if (signal_pending(current))
1871f9ae3204SHannes Reinecke 			return sock_intr_errno(timeo);
18724cbc325eSJakub Kicinski 	}
18734cbc325eSJakub Kicinski 
18744cbc325eSJakub Kicinski 	WRITE_ONCE(ctx->reader_present, 1);
18754cbc325eSJakub Kicinski 
187670f03fc2SJakub Kicinski 	return 0;
1877f9ae3204SHannes Reinecke }
1878dde06aaaSJakub Kicinski 
1879f9ae3204SHannes Reinecke static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
1880f9ae3204SHannes Reinecke 			      bool nonblock)
1881f9ae3204SHannes Reinecke {
1882f9ae3204SHannes Reinecke 	int err;
1883f9ae3204SHannes Reinecke 
1884f9ae3204SHannes Reinecke 	lock_sock(sk);
1885f9ae3204SHannes Reinecke 	err = tls_rx_reader_acquire(sk, ctx, nonblock);
1886f9ae3204SHannes Reinecke 	if (err)
1887dde06aaaSJakub Kicinski 		release_sock(sk);
1888dde06aaaSJakub Kicinski 	return err;
18894cbc325eSJakub Kicinski }
18904cbc325eSJakub Kicinski 
1891f9ae3204SHannes Reinecke static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
18924cbc325eSJakub Kicinski {
18934cbc325eSJakub Kicinski 	if (unlikely(ctx->reader_contended)) {
18944cbc325eSJakub Kicinski 		if (wq_has_sleeper(&ctx->wq))
18954cbc325eSJakub Kicinski 			wake_up(&ctx->wq);
18964cbc325eSJakub Kicinski 		else
18974cbc325eSJakub Kicinski 			ctx->reader_contended = 0;
18984cbc325eSJakub Kicinski 
18994cbc325eSJakub Kicinski 		WARN_ON_ONCE(!ctx->reader_present);
19004cbc325eSJakub Kicinski 	}
19014cbc325eSJakub Kicinski 
19024cbc325eSJakub Kicinski 	WRITE_ONCE(ctx->reader_present, 0);
1903f9ae3204SHannes Reinecke }
1904f9ae3204SHannes Reinecke 
1905f9ae3204SHannes Reinecke static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
1906f9ae3204SHannes Reinecke {
1907f9ae3204SHannes Reinecke 	tls_rx_reader_release(sk, ctx);
19084cbc325eSJakub Kicinski 	release_sock(sk);
19094cbc325eSJakub Kicinski }
19104cbc325eSJakub Kicinski 
1911c46234ebSDave Watson int tls_sw_recvmsg(struct sock *sk,
1912c46234ebSDave Watson 		   struct msghdr *msg,
1913c46234ebSDave Watson 		   size_t len,
1914c46234ebSDave Watson 		   int flags,
1915c46234ebSDave Watson 		   int *addr_len)
1916c46234ebSDave Watson {
1917c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1918f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
19194509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1920cbbdee99SJakub Kicinski 	ssize_t decrypted = 0, async_copy_bytes = 0;
1921d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
1922692d7b5dSVakul Garg 	unsigned char control = 0;
1923c46b0183SJakub Kicinski 	size_t flushed_at = 0;
1924c46234ebSDave Watson 	struct strp_msg *rxm;
19252b794c40SVakul Garg 	struct tls_msg *tlm;
1926c46234ebSDave Watson 	ssize_t copied = 0;
19277da18bccSJakub Kicinski 	bool async = false;
192870f03fc2SJakub Kicinski 	int target, err;
192900e23707SDavid Howells 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1930692d7b5dSVakul Garg 	bool is_peek = flags & MSG_PEEK;
193184c61fe1SJakub Kicinski 	bool released = true;
1932e91de6afSJohn Fastabend 	bool bpf_strp_enabled;
1933ba13609dSJakub Kicinski 	bool zc_capable;
1934c46234ebSDave Watson 
1935c46234ebSDave Watson 	if (unlikely(flags & MSG_ERRQUEUE))
1936c46234ebSDave Watson 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1937c46234ebSDave Watson 
1938d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
193970f03fc2SJakub Kicinski 	err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
194070f03fc2SJakub Kicinski 	if (err < 0)
194170f03fc2SJakub Kicinski 		return err;
1942e91de6afSJohn Fastabend 	bpf_strp_enabled = sk_psock_strp_enabled(psock);
1943c46234ebSDave Watson 
1944f314bfeeSJakub Kicinski 	/* If crypto failed the connection is broken */
1945f314bfeeSJakub Kicinski 	err = ctx->async_wait.err;
1946f314bfeeSJakub Kicinski 	if (err)
1947f314bfeeSJakub Kicinski 		goto end;
1948f314bfeeSJakub Kicinski 
1949692d7b5dSVakul Garg 	/* Process pending decrypted records. It must be non-zero-copy */
1950cbbdee99SJakub Kicinski 	err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
19514dcdd971SJakub Kicinski 	if (err < 0)
1952692d7b5dSVakul Garg 		goto end;
1953692d7b5dSVakul Garg 
1954d5123eddSJakub Kicinski 	copied = err;
195546a16959SJakub Kicinski 	if (len <= copied)
1956bfc06e1aSJakub Kicinski 		goto end;
195746a16959SJakub Kicinski 
195846a16959SJakub Kicinski 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
195946a16959SJakub Kicinski 	len = len - copied;
1960692d7b5dSVakul Garg 
1961ba13609dSJakub Kicinski 	zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
196288527790SJakub Kicinski 		ctx->zc_capable;
1963bfc06e1aSJakub Kicinski 	decrypted = 0;
1964b92a13d4SJakub Kicinski 	while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
19656bd116c8SJakub Kicinski 		struct tls_decrypt_arg darg;
19669bdf75ccSJakub Kicinski 		int to_decrypt, chunk;
1967c46234ebSDave Watson 
196870f03fc2SJakub Kicinski 		err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
196970f03fc2SJakub Kicinski 				      released);
197035560b7fSJakub Kicinski 		if (err <= 0) {
1971d3b18ad3SJohn Fastabend 			if (psock) {
19720775639cSJakub Kicinski 				chunk = sk_msg_recvmsg(sk, psock, msg, len,
19732bc793e3SCong Wang 						       flags);
1974008141deSJakub Kicinski 				if (chunk > 0) {
1975008141deSJakub Kicinski 					decrypted += chunk;
1976008141deSJakub Kicinski 					len -= chunk;
1977008141deSJakub Kicinski 					continue;
1978008141deSJakub Kicinski 				}
1979d3b18ad3SJohn Fastabend 			}
1980c46234ebSDave Watson 			goto recv_end;
1981d3b18ad3SJohn Fastabend 		}
1982c46234ebSDave Watson 
19836bd116c8SJakub Kicinski 		memset(&darg.inargs, 0, sizeof(darg.inargs));
19846bd116c8SJakub Kicinski 
198584c61fe1SJakub Kicinski 		rxm = strp_msg(tls_strp_msg(ctx));
198684c61fe1SJakub Kicinski 		tlm = tls_msg(tls_strp_msg(ctx));
198794524d8fSVakul Garg 
19884509de14SVakul Garg 		to_decrypt = rxm->full_len - prot->overhead_size;
1989fedf201eSDave Watson 
1990ba13609dSJakub Kicinski 		if (zc_capable && to_decrypt <= len &&
1991ba13609dSJakub Kicinski 		    tlm->control == TLS_RECORD_TYPE_DATA)
19924175eac3SJakub Kicinski 			darg.zc = true;
1993fedf201eSDave Watson 
1994c0ab4732SVakul Garg 		/* Do not use async mode if record is non-data */
1995c3f6bb74SJakub Kicinski 		if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
19964175eac3SJakub Kicinski 			darg.async = ctx->async_capable;
1997c0ab4732SVakul Garg 		else
19984175eac3SJakub Kicinski 			darg.async = false;
1999c0ab4732SVakul Garg 
2000dd47ed36SJakub Kicinski 		err = tls_rx_one_record(sk, msg, &darg);
20013547a1f9SJakub Kicinski 		if (err < 0) {
2002da353facSDaniel Jordan 			tls_err_abort(sk, -EBADMSG);
2003fedf201eSDave Watson 			goto recv_end;
2004fedf201eSDave Watson 		}
2005fedf201eSDave Watson 
20063547a1f9SJakub Kicinski 		async |= darg.async;
20072b794c40SVakul Garg 
20082b794c40SVakul Garg 		/* If the type of records being processed is not known yet,
20092b794c40SVakul Garg 		 * set it to record type just dequeued. If it is already known,
20102b794c40SVakul Garg 		 * but does not match the record type just dequeued, go to end.
20112b794c40SVakul Garg 		 * We always get record type here since for tls1.2, record type
20122b794c40SVakul Garg 		 * is known just after record is dequeued from stream parser.
20132b794c40SVakul Garg 		 * For tls1.3, we disable async.
20142b794c40SVakul Garg 		 */
2015b93f5700SJakub Kicinski 		err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2016abb47dc9SJakub Kicinski 		if (err <= 0) {
2017b93f5700SJakub Kicinski 			DEBUG_NET_WARN_ON_ONCE(darg.zc);
2018abb47dc9SJakub Kicinski 			tls_rx_rec_done(ctx);
2019abb47dc9SJakub Kicinski put_on_rx_list_err:
2020b93f5700SJakub Kicinski 			__skb_queue_tail(&ctx->rx_list, darg.skb);
20212b794c40SVakul Garg 			goto recv_end;
2022abb47dc9SJakub Kicinski 		}
2023fedf201eSDave Watson 
2024c46b0183SJakub Kicinski 		/* periodically flush backlog, and feed strparser */
202584c61fe1SJakub Kicinski 		released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
202684c61fe1SJakub Kicinski 						  decrypted + copied,
202784c61fe1SJakub Kicinski 						  &flushed_at);
2028c46b0183SJakub Kicinski 
2029abb47dc9SJakub Kicinski 		/* TLS 1.3 may have updated the length by more than overhead */
2030b93f5700SJakub Kicinski 		rxm = strp_msg(darg.skb);
2031abb47dc9SJakub Kicinski 		chunk = rxm->full_len;
2032abb47dc9SJakub Kicinski 		tls_rx_rec_done(ctx);
2033b1a2c178SJakub Kicinski 
2034cbbdee99SJakub Kicinski 		if (!darg.zc) {
2035cbbdee99SJakub Kicinski 			bool partially_consumed = chunk > len;
2036b93f5700SJakub Kicinski 			struct sk_buff *skb = darg.skb;
2037b93f5700SJakub Kicinski 
2038e20691faSJakub Kicinski 			DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2039cbbdee99SJakub Kicinski 
20409bdf75ccSJakub Kicinski 			if (async) {
2041cbbdee99SJakub Kicinski 				/* TLS 1.2-only, to_decrypt must be text len */
20429bdf75ccSJakub Kicinski 				chunk = min_t(int, to_decrypt, len);
2043cbbdee99SJakub Kicinski 				async_copy_bytes += chunk;
2044008141deSJakub Kicinski put_on_rx_list:
2045f940b6efSJakub Kicinski 				decrypted += chunk;
2046f940b6efSJakub Kicinski 				len -= chunk;
2047008141deSJakub Kicinski 				__skb_queue_tail(&ctx->rx_list, skb);
2048f940b6efSJakub Kicinski 				continue;
20499bdf75ccSJakub Kicinski 			}
2050c0ab4732SVakul Garg 
2051e91de6afSJohn Fastabend 			if (bpf_strp_enabled) {
205284c61fe1SJakub Kicinski 				released = true;
2053e91de6afSJohn Fastabend 				err = sk_psock_tls_strp_read(psock, skb);
2054e91de6afSJohn Fastabend 				if (err != __SK_PASS) {
2055e91de6afSJohn Fastabend 					rxm->offset = rxm->offset + rxm->full_len;
2056e91de6afSJohn Fastabend 					rxm->full_len = 0;
2057e91de6afSJohn Fastabend 					if (err == __SK_DROP)
2058e91de6afSJohn Fastabend 						consume_skb(skb);
2059e91de6afSJohn Fastabend 					continue;
2060e91de6afSJohn Fastabend 				}
2061e91de6afSJohn Fastabend 			}
2062e91de6afSJohn Fastabend 
2063f940b6efSJakub Kicinski 			if (partially_consumed)
2064692d7b5dSVakul Garg 				chunk = len;
206594524d8fSVakul Garg 
2066692d7b5dSVakul Garg 			err = skb_copy_datagram_msg(skb, rxm->offset,
2067692d7b5dSVakul Garg 						    msg, chunk);
2068abb47dc9SJakub Kicinski 			if (err < 0)
2069abb47dc9SJakub Kicinski 				goto put_on_rx_list_err;
2070692d7b5dSVakul Garg 
2071f940b6efSJakub Kicinski 			if (is_peek)
2072008141deSJakub Kicinski 				goto put_on_rx_list;
2073f940b6efSJakub Kicinski 
2074f940b6efSJakub Kicinski 			if (partially_consumed) {
2075f940b6efSJakub Kicinski 				rxm->offset += chunk;
2076f940b6efSJakub Kicinski 				rxm->full_len -= chunk;
2077008141deSJakub Kicinski 				goto put_on_rx_list;
2078692d7b5dSVakul Garg 			}
2079b93f5700SJakub Kicinski 
2080b93f5700SJakub Kicinski 			consume_skb(skb);
2081692d7b5dSVakul Garg 		}
2082c46234ebSDave Watson 
2083692d7b5dSVakul Garg 		decrypted += chunk;
2084692d7b5dSVakul Garg 		len -= chunk;
2085692d7b5dSVakul Garg 
2086f940b6efSJakub Kicinski 		/* Return full control message to userspace before trying
2087f940b6efSJakub Kicinski 		 * to parse another message type
2088c46234ebSDave Watson 		 */
2089c46234ebSDave Watson 		msg->msg_flags |= MSG_EOR;
20903fe16edfSVadim Fedorenko 		if (control != TLS_RECORD_TYPE_DATA)
2091f940b6efSJakub Kicinski 			break;
209204b25a54SJakub Kicinski 	}
2093c46234ebSDave Watson 
2094c46234ebSDave Watson recv_end:
20957da18bccSJakub Kicinski 	if (async) {
2096f314bfeeSJakub Kicinski 		int ret, pending;
20977da18bccSJakub Kicinski 
209894524d8fSVakul Garg 		/* Wait for all previously submitted records to be decrypted */
20990cada332SVinay Kumar Yadav 		spin_lock_bh(&ctx->decrypt_compl_lock);
210037943f04SJakub Kicinski 		reinit_completion(&ctx->async_wait.completion);
21010cada332SVinay Kumar Yadav 		pending = atomic_read(&ctx->decrypt_pending);
21020cada332SVinay Kumar Yadav 		spin_unlock_bh(&ctx->decrypt_compl_lock);
2103c618db2aSJakub Kicinski 		ret = 0;
2104c618db2aSJakub Kicinski 		if (pending)
2105f314bfeeSJakub Kicinski 			ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2106c618db2aSJakub Kicinski 		__skb_queue_purge(&ctx->async_hold);
2107c618db2aSJakub Kicinski 
2108f314bfeeSJakub Kicinski 		if (ret) {
2109f314bfeeSJakub Kicinski 			if (err >= 0 || err == -EINPROGRESS)
2110f314bfeeSJakub Kicinski 				err = ret;
2111692d7b5dSVakul Garg 			decrypted = 0;
2112692d7b5dSVakul Garg 			goto end;
211394524d8fSVakul Garg 		}
21140cada332SVinay Kumar Yadav 
2115692d7b5dSVakul Garg 		/* Drain records from the rx_list & copy if required */
2116692d7b5dSVakul Garg 		if (is_peek || is_kvec)
211706554f4fSJakub Kicinski 			err = process_rx_list(ctx, msg, &control, copied,
2118cbbdee99SJakub Kicinski 					      decrypted, is_peek);
2119692d7b5dSVakul Garg 		else
212006554f4fSJakub Kicinski 			err = process_rx_list(ctx, msg, &control, 0,
2121cbbdee99SJakub Kicinski 					      async_copy_bytes, is_peek);
21224d42cd6bSJakub Kicinski 		decrypted += max(err, 0);
2123692d7b5dSVakul Garg 	}
2124692d7b5dSVakul Garg 
2125692d7b5dSVakul Garg 	copied += decrypted;
2126692d7b5dSVakul Garg 
2127692d7b5dSVakul Garg end:
21284cbc325eSJakub Kicinski 	tls_rx_reader_unlock(sk, ctx);
2129d3b18ad3SJohn Fastabend 	if (psock)
2130d3b18ad3SJohn Fastabend 		sk_psock_put(sk, psock);
2131c46234ebSDave Watson 	return copied ? : err;
2132c46234ebSDave Watson }
2133c46234ebSDave Watson 
2134c46234ebSDave Watson ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
2135c46234ebSDave Watson 			   struct pipe_inode_info *pipe,
2136c46234ebSDave Watson 			   size_t len, unsigned int flags)
2137c46234ebSDave Watson {
2138c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2139f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2140c46234ebSDave Watson 	struct strp_msg *rxm = NULL;
2141c46234ebSDave Watson 	struct sock *sk = sock->sk;
2142c3f6bb74SJakub Kicinski 	struct tls_msg *tlm;
2143c46234ebSDave Watson 	struct sk_buff *skb;
2144c46234ebSDave Watson 	ssize_t copied = 0;
2145c46234ebSDave Watson 	int chunk;
214670f03fc2SJakub Kicinski 	int err;
2147c46234ebSDave Watson 
214870f03fc2SJakub Kicinski 	err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
214970f03fc2SJakub Kicinski 	if (err < 0)
215070f03fc2SJakub Kicinski 		return err;
2151c46234ebSDave Watson 
2152abb47dc9SJakub Kicinski 	if (!skb_queue_empty(&ctx->rx_list)) {
2153e062fe99SJakub Kicinski 		skb = __skb_dequeue(&ctx->rx_list);
2154e062fe99SJakub Kicinski 	} else {
21556bd116c8SJakub Kicinski 		struct tls_decrypt_arg darg;
21564175eac3SJakub Kicinski 
215735560b7fSJakub Kicinski 		err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
215870f03fc2SJakub Kicinski 				      true);
215935560b7fSJakub Kicinski 		if (err <= 0)
2160c46234ebSDave Watson 			goto splice_read_end;
2161c46234ebSDave Watson 
21626bd116c8SJakub Kicinski 		memset(&darg.inargs, 0, sizeof(darg.inargs));
21636bd116c8SJakub Kicinski 
2164541cc48bSJakub Kicinski 		err = tls_rx_one_record(sk, NULL, &darg);
2165520493f6SJakub Kicinski 		if (err < 0) {
2166520493f6SJakub Kicinski 			tls_err_abort(sk, -EBADMSG);
2167520493f6SJakub Kicinski 			goto splice_read_end;
2168520493f6SJakub Kicinski 		}
2169abb47dc9SJakub Kicinski 
2170abb47dc9SJakub Kicinski 		tls_rx_rec_done(ctx);
21716bd116c8SJakub Kicinski 		skb = darg.skb;
2172e062fe99SJakub Kicinski 	}
2173fedf201eSDave Watson 
2174c3f6bb74SJakub Kicinski 	rxm = strp_msg(skb);
2175c3f6bb74SJakub Kicinski 	tlm = tls_msg(skb);
2176c3f6bb74SJakub Kicinski 
2177c46234ebSDave Watson 	/* splice does not support reading control messages */
2178c3f6bb74SJakub Kicinski 	if (tlm->control != TLS_RECORD_TYPE_DATA) {
21794a5cdc60SValentin Vidic 		err = -EINVAL;
2180abb47dc9SJakub Kicinski 		goto splice_requeue;
2181c46234ebSDave Watson 	}
2182c46234ebSDave Watson 
2183c46234ebSDave Watson 	chunk = min_t(unsigned int, rxm->full_len, len);
2184c46234ebSDave Watson 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2185c46234ebSDave Watson 	if (copied < 0)
2186abb47dc9SJakub Kicinski 		goto splice_requeue;
2187c46234ebSDave Watson 
2188e062fe99SJakub Kicinski 	if (chunk < rxm->full_len) {
2189e062fe99SJakub Kicinski 		rxm->offset += len;
2190e062fe99SJakub Kicinski 		rxm->full_len -= len;
2191abb47dc9SJakub Kicinski 		goto splice_requeue;
2192e062fe99SJakub Kicinski 	}
2193c46234ebSDave Watson 
2194abb47dc9SJakub Kicinski 	consume_skb(skb);
2195abb47dc9SJakub Kicinski 
2196c46234ebSDave Watson splice_read_end:
21974cbc325eSJakub Kicinski 	tls_rx_reader_unlock(sk, ctx);
2198c46234ebSDave Watson 	return copied ? : err;
2199abb47dc9SJakub Kicinski 
2200abb47dc9SJakub Kicinski splice_requeue:
2201abb47dc9SJakub Kicinski 	__skb_queue_head(&ctx->rx_list, skb);
2202abb47dc9SJakub Kicinski 	goto splice_read_end;
2203c46234ebSDave Watson }
2204c46234ebSDave Watson 
2205*662fbcecSHannes Reinecke int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
2206*662fbcecSHannes Reinecke 		     sk_read_actor_t read_actor)
2207*662fbcecSHannes Reinecke {
2208*662fbcecSHannes Reinecke 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2209*662fbcecSHannes Reinecke 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2210*662fbcecSHannes Reinecke 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2211*662fbcecSHannes Reinecke 	struct strp_msg *rxm = NULL;
2212*662fbcecSHannes Reinecke 	struct sk_buff *skb = NULL;
2213*662fbcecSHannes Reinecke 	struct sk_psock *psock;
2214*662fbcecSHannes Reinecke 	size_t flushed_at = 0;
2215*662fbcecSHannes Reinecke 	bool released = true;
2216*662fbcecSHannes Reinecke 	struct tls_msg *tlm;
2217*662fbcecSHannes Reinecke 	ssize_t copied = 0;
2218*662fbcecSHannes Reinecke 	ssize_t decrypted;
2219*662fbcecSHannes Reinecke 	int err, used;
2220*662fbcecSHannes Reinecke 
2221*662fbcecSHannes Reinecke 	psock = sk_psock_get(sk);
2222*662fbcecSHannes Reinecke 	if (psock) {
2223*662fbcecSHannes Reinecke 		sk_psock_put(sk, psock);
2224*662fbcecSHannes Reinecke 		return -EINVAL;
2225*662fbcecSHannes Reinecke 	}
2226*662fbcecSHannes Reinecke 	err = tls_rx_reader_acquire(sk, ctx, true);
2227*662fbcecSHannes Reinecke 	if (err < 0)
2228*662fbcecSHannes Reinecke 		return err;
2229*662fbcecSHannes Reinecke 
2230*662fbcecSHannes Reinecke 	/* If crypto failed the connection is broken */
2231*662fbcecSHannes Reinecke 	err = ctx->async_wait.err;
2232*662fbcecSHannes Reinecke 	if (err)
2233*662fbcecSHannes Reinecke 		goto read_sock_end;
2234*662fbcecSHannes Reinecke 
2235*662fbcecSHannes Reinecke 	decrypted = 0;
2236*662fbcecSHannes Reinecke 	do {
2237*662fbcecSHannes Reinecke 		if (!skb_queue_empty(&ctx->rx_list)) {
2238*662fbcecSHannes Reinecke 			skb = __skb_dequeue(&ctx->rx_list);
2239*662fbcecSHannes Reinecke 			rxm = strp_msg(skb);
2240*662fbcecSHannes Reinecke 			tlm = tls_msg(skb);
2241*662fbcecSHannes Reinecke 		} else {
2242*662fbcecSHannes Reinecke 			struct tls_decrypt_arg darg;
2243*662fbcecSHannes Reinecke 			int to_decrypt;
2244*662fbcecSHannes Reinecke 
2245*662fbcecSHannes Reinecke 			err = tls_rx_rec_wait(sk, NULL, true, released);
2246*662fbcecSHannes Reinecke 			if (err <= 0)
2247*662fbcecSHannes Reinecke 				goto read_sock_end;
2248*662fbcecSHannes Reinecke 
2249*662fbcecSHannes Reinecke 			memset(&darg.inargs, 0, sizeof(darg.inargs));
2250*662fbcecSHannes Reinecke 
2251*662fbcecSHannes Reinecke 			rxm = strp_msg(tls_strp_msg(ctx));
2252*662fbcecSHannes Reinecke 			tlm = tls_msg(tls_strp_msg(ctx));
2253*662fbcecSHannes Reinecke 
2254*662fbcecSHannes Reinecke 			to_decrypt = rxm->full_len - prot->overhead_size;
2255*662fbcecSHannes Reinecke 
2256*662fbcecSHannes Reinecke 			err = tls_rx_one_record(sk, NULL, &darg);
2257*662fbcecSHannes Reinecke 			if (err < 0) {
2258*662fbcecSHannes Reinecke 				tls_err_abort(sk, -EBADMSG);
2259*662fbcecSHannes Reinecke 				goto read_sock_end;
2260*662fbcecSHannes Reinecke 			}
2261*662fbcecSHannes Reinecke 
2262*662fbcecSHannes Reinecke 			released = tls_read_flush_backlog(sk, prot, rxm->full_len, to_decrypt,
2263*662fbcecSHannes Reinecke 							  decrypted, &flushed_at);
2264*662fbcecSHannes Reinecke 			skb = darg.skb;
2265*662fbcecSHannes Reinecke 			decrypted += rxm->full_len;
2266*662fbcecSHannes Reinecke 
2267*662fbcecSHannes Reinecke 			tls_rx_rec_done(ctx);
2268*662fbcecSHannes Reinecke 		}
2269*662fbcecSHannes Reinecke 
2270*662fbcecSHannes Reinecke 		/* read_sock does not support reading control messages */
2271*662fbcecSHannes Reinecke 		if (tlm->control != TLS_RECORD_TYPE_DATA) {
2272*662fbcecSHannes Reinecke 			err = -EINVAL;
2273*662fbcecSHannes Reinecke 			goto read_sock_requeue;
2274*662fbcecSHannes Reinecke 		}
2275*662fbcecSHannes Reinecke 
2276*662fbcecSHannes Reinecke 		used = read_actor(desc, skb, rxm->offset, rxm->full_len);
2277*662fbcecSHannes Reinecke 		if (used <= 0) {
2278*662fbcecSHannes Reinecke 			if (!copied)
2279*662fbcecSHannes Reinecke 				err = used;
2280*662fbcecSHannes Reinecke 			goto read_sock_requeue;
2281*662fbcecSHannes Reinecke 		}
2282*662fbcecSHannes Reinecke 		copied += used;
2283*662fbcecSHannes Reinecke 		if (used < rxm->full_len) {
2284*662fbcecSHannes Reinecke 			rxm->offset += used;
2285*662fbcecSHannes Reinecke 			rxm->full_len -= used;
2286*662fbcecSHannes Reinecke 			if (!desc->count)
2287*662fbcecSHannes Reinecke 				goto read_sock_requeue;
2288*662fbcecSHannes Reinecke 		} else {
2289*662fbcecSHannes Reinecke 			consume_skb(skb);
2290*662fbcecSHannes Reinecke 			if (!desc->count)
2291*662fbcecSHannes Reinecke 				skb = NULL;
2292*662fbcecSHannes Reinecke 		}
2293*662fbcecSHannes Reinecke 	} while (skb);
2294*662fbcecSHannes Reinecke 
2295*662fbcecSHannes Reinecke read_sock_end:
2296*662fbcecSHannes Reinecke 	tls_rx_reader_release(sk, ctx);
2297*662fbcecSHannes Reinecke 	return copied ? : err;
2298*662fbcecSHannes Reinecke 
2299*662fbcecSHannes Reinecke read_sock_requeue:
2300*662fbcecSHannes Reinecke 	__skb_queue_head(&ctx->rx_list, skb);
2301*662fbcecSHannes Reinecke 	goto read_sock_end;
2302*662fbcecSHannes Reinecke }
2303*662fbcecSHannes Reinecke 
23047b50ecfcSCong Wang bool tls_sw_sock_is_readable(struct sock *sk)
2305c46234ebSDave Watson {
2306c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2307f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2308d3b18ad3SJohn Fastabend 	bool ingress_empty = true;
2309d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
2310c46234ebSDave Watson 
2311d3b18ad3SJohn Fastabend 	rcu_read_lock();
2312d3b18ad3SJohn Fastabend 	psock = sk_psock(sk);
2313d3b18ad3SJohn Fastabend 	if (psock)
2314d3b18ad3SJohn Fastabend 		ingress_empty = list_empty(&psock->ingress_msg);
2315d3b18ad3SJohn Fastabend 	rcu_read_unlock();
2316c46234ebSDave Watson 
2317b92a13d4SJakub Kicinski 	return !ingress_empty || tls_strp_msg_ready(ctx) ||
231813aecb17SJakub Kicinski 		!skb_queue_empty(&ctx->rx_list);
2319c46234ebSDave Watson }
2320c46234ebSDave Watson 
232184c61fe1SJakub Kicinski int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2322c46234ebSDave Watson {
2323c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
23244509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
23253463e51dSKees Cook 	char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2326c46234ebSDave Watson 	size_t cipher_overhead;
2327c46234ebSDave Watson 	size_t data_len = 0;
2328c46234ebSDave Watson 	int ret;
2329c46234ebSDave Watson 
2330c46234ebSDave Watson 	/* Verify that we have a full TLS header, or wait for more data */
233184c61fe1SJakub Kicinski 	if (strp->stm.offset + prot->prepend_size > skb->len)
2332c46234ebSDave Watson 		return 0;
2333c46234ebSDave Watson 
23343463e51dSKees Cook 	/* Sanity-check size of on-stack buffer. */
23354509de14SVakul Garg 	if (WARN_ON(prot->prepend_size > sizeof(header))) {
23363463e51dSKees Cook 		ret = -EINVAL;
23373463e51dSKees Cook 		goto read_failure;
23383463e51dSKees Cook 	}
23393463e51dSKees Cook 
2340c46234ebSDave Watson 	/* Linearize header to local buffer */
234184c61fe1SJakub Kicinski 	ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2342c46234ebSDave Watson 	if (ret < 0)
2343c46234ebSDave Watson 		goto read_failure;
2344c46234ebSDave Watson 
234584c61fe1SJakub Kicinski 	strp->mark = header[0];
2346c46234ebSDave Watson 
2347c46234ebSDave Watson 	data_len = ((header[4] & 0xFF) | (header[3] << 8));
2348c46234ebSDave Watson 
23494509de14SVakul Garg 	cipher_overhead = prot->tag_size;
2350a6acbe62SVadim Fedorenko 	if (prot->version != TLS_1_3_VERSION &&
2351a6acbe62SVadim Fedorenko 	    prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
23524509de14SVakul Garg 		cipher_overhead += prot->iv_size;
2353c46234ebSDave Watson 
2354130b392cSDave Watson 	if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
23554509de14SVakul Garg 	    prot->tail_size) {
2356c46234ebSDave Watson 		ret = -EMSGSIZE;
2357c46234ebSDave Watson 		goto read_failure;
2358c46234ebSDave Watson 	}
2359c46234ebSDave Watson 	if (data_len < cipher_overhead) {
2360c46234ebSDave Watson 		ret = -EBADMSG;
2361c46234ebSDave Watson 		goto read_failure;
2362c46234ebSDave Watson 	}
2363c46234ebSDave Watson 
2364130b392cSDave Watson 	/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2365130b392cSDave Watson 	if (header[1] != TLS_1_2_VERSION_MINOR ||
2366130b392cSDave Watson 	    header[2] != TLS_1_2_VERSION_MAJOR) {
2367c46234ebSDave Watson 		ret = -EINVAL;
2368c46234ebSDave Watson 		goto read_failure;
2369c46234ebSDave Watson 	}
2370be2fbc15SJakub Kicinski 
2371f953d33bSJakub Kicinski 	tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
237284c61fe1SJakub Kicinski 				     TCP_SKB_CB(skb)->seq + strp->stm.offset);
2373c46234ebSDave Watson 	return data_len + TLS_HEADER_SIZE;
2374c46234ebSDave Watson 
2375c46234ebSDave Watson read_failure:
2376c46234ebSDave Watson 	tls_err_abort(strp->sk, ret);
2377c46234ebSDave Watson 
2378c46234ebSDave Watson 	return ret;
2379c46234ebSDave Watson }
2380c46234ebSDave Watson 
238184c61fe1SJakub Kicinski void tls_rx_msg_ready(struct tls_strparser *strp)
2382c46234ebSDave Watson {
238384c61fe1SJakub Kicinski 	struct tls_sw_context_rx *ctx;
2384c46234ebSDave Watson 
238584c61fe1SJakub Kicinski 	ctx = container_of(strp, struct tls_sw_context_rx, strp);
2386ad13acceSVakul Garg 	ctx->saved_data_ready(strp->sk);
2387c46234ebSDave Watson }
2388c46234ebSDave Watson 
2389c46234ebSDave Watson static void tls_data_ready(struct sock *sk)
2390c46234ebSDave Watson {
2391c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2392f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2393d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
239474836ec8SJakub Kicinski 	gfp_t alloc_save;
2395c46234ebSDave Watson 
239640e0b090SPeilin Ye 	trace_sk_data_ready(sk);
239740e0b090SPeilin Ye 
239874836ec8SJakub Kicinski 	alloc_save = sk->sk_allocation;
239974836ec8SJakub Kicinski 	sk->sk_allocation = GFP_ATOMIC;
240084c61fe1SJakub Kicinski 	tls_strp_data_ready(&ctx->strp);
240174836ec8SJakub Kicinski 	sk->sk_allocation = alloc_save;
2402d3b18ad3SJohn Fastabend 
2403d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
240462b4011fSXiyu Yang 	if (psock) {
240562b4011fSXiyu Yang 		if (!list_empty(&psock->ingress_msg))
2406d3b18ad3SJohn Fastabend 			ctx->saved_data_ready(sk);
2407d3b18ad3SJohn Fastabend 		sk_psock_put(sk, psock);
2408d3b18ad3SJohn Fastabend 	}
2409c46234ebSDave Watson }
2410c46234ebSDave Watson 
2411f87e62d4SJohn Fastabend void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2412f87e62d4SJohn Fastabend {
2413f87e62d4SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2414f87e62d4SJohn Fastabend 
2415f87e62d4SJohn Fastabend 	set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2416f87e62d4SJohn Fastabend 	set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2417f87e62d4SJohn Fastabend 	cancel_delayed_work_sync(&ctx->tx_work.work);
2418f87e62d4SJohn Fastabend }
2419f87e62d4SJohn Fastabend 
2420313ab004SJohn Fastabend void tls_sw_release_resources_tx(struct sock *sk)
24213c4d7559SDave Watson {
24223c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2423f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2424a42055e8SVakul Garg 	struct tls_rec *rec, *tmp;
242538f7e1c0SRohit Maheshwari 	int pending;
2426a42055e8SVakul Garg 
2427a42055e8SVakul Garg 	/* Wait for any pending async encryptions to complete */
242838f7e1c0SRohit Maheshwari 	spin_lock_bh(&ctx->encrypt_compl_lock);
242938f7e1c0SRohit Maheshwari 	ctx->async_notify = true;
243038f7e1c0SRohit Maheshwari 	pending = atomic_read(&ctx->encrypt_pending);
243138f7e1c0SRohit Maheshwari 	spin_unlock_bh(&ctx->encrypt_compl_lock);
243238f7e1c0SRohit Maheshwari 
243338f7e1c0SRohit Maheshwari 	if (pending)
2434a42055e8SVakul Garg 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2435a42055e8SVakul Garg 
2436a42055e8SVakul Garg 	tls_tx_records(sk, -1);
2437a42055e8SVakul Garg 
24389932a29aSVakul Garg 	/* Free up un-sent records in tx_list. First, free
2439a42055e8SVakul Garg 	 * the partially sent record if any at head of tx_list.
2440a42055e8SVakul Garg 	 */
2441c5daa6ccSJakub Kicinski 	if (tls_ctx->partially_sent_record) {
2442c5daa6ccSJakub Kicinski 		tls_free_partial_record(sk, tls_ctx);
24439932a29aSVakul Garg 		rec = list_first_entry(&ctx->tx_list,
2444a42055e8SVakul Garg 				       struct tls_rec, list);
2445a42055e8SVakul Garg 		list_del(&rec->list);
2446d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
2447a42055e8SVakul Garg 		kfree(rec);
2448a42055e8SVakul Garg 	}
2449a42055e8SVakul Garg 
24509932a29aSVakul Garg 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2451a42055e8SVakul Garg 		list_del(&rec->list);
2452d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_encrypted);
2453d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
2454a42055e8SVakul Garg 		kfree(rec);
2455a42055e8SVakul Garg 	}
24563c4d7559SDave Watson 
24573c4d7559SDave Watson 	crypto_free_aead(ctx->aead_send);
2458c774973eSVakul Garg 	tls_free_open_rec(sk);
2459313ab004SJohn Fastabend }
2460313ab004SJohn Fastabend 
2461313ab004SJohn Fastabend void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2462313ab004SJohn Fastabend {
2463313ab004SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2464f66de3eeSBoris Pismenny 
2465f66de3eeSBoris Pismenny 	kfree(ctx);
2466f66de3eeSBoris Pismenny }
2467f66de3eeSBoris Pismenny 
246839f56e1aSBoris Pismenny void tls_sw_release_resources_rx(struct sock *sk)
2469f66de3eeSBoris Pismenny {
2470f66de3eeSBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2471f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2472f66de3eeSBoris Pismenny 
247312c76861SJakub Kicinski 	kfree(tls_ctx->rx.rec_seq);
247412c76861SJakub Kicinski 	kfree(tls_ctx->rx.iv);
247512c76861SJakub Kicinski 
2476c46234ebSDave Watson 	if (ctx->aead_recv) {
2477a30295c4SJakub Kicinski 		__skb_queue_purge(&ctx->rx_list);
2478c46234ebSDave Watson 		crypto_free_aead(ctx->aead_recv);
247984c61fe1SJakub Kicinski 		tls_strp_stop(&ctx->strp);
2480313ab004SJohn Fastabend 		/* If tls_sw_strparser_arm() was not called (cleanup paths)
248184c61fe1SJakub Kicinski 		 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2482313ab004SJohn Fastabend 		 * never swapped.
2483313ab004SJohn Fastabend 		 */
2484313ab004SJohn Fastabend 		if (ctx->saved_data_ready) {
2485c46234ebSDave Watson 			write_lock_bh(&sk->sk_callback_lock);
2486c46234ebSDave Watson 			sk->sk_data_ready = ctx->saved_data_ready;
2487c46234ebSDave Watson 			write_unlock_bh(&sk->sk_callback_lock);
2488c46234ebSDave Watson 		}
248939f56e1aSBoris Pismenny 	}
2490313ab004SJohn Fastabend }
2491313ab004SJohn Fastabend 
2492313ab004SJohn Fastabend void tls_sw_strparser_done(struct tls_context *tls_ctx)
2493313ab004SJohn Fastabend {
2494313ab004SJohn Fastabend 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2495313ab004SJohn Fastabend 
249684c61fe1SJakub Kicinski 	tls_strp_done(&ctx->strp);
2497313ab004SJohn Fastabend }
2498313ab004SJohn Fastabend 
2499313ab004SJohn Fastabend void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2500313ab004SJohn Fastabend {
2501313ab004SJohn Fastabend 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2502313ab004SJohn Fastabend 
2503313ab004SJohn Fastabend 	kfree(ctx);
2504313ab004SJohn Fastabend }
250539f56e1aSBoris Pismenny 
250639f56e1aSBoris Pismenny void tls_sw_free_resources_rx(struct sock *sk)
250739f56e1aSBoris Pismenny {
250839f56e1aSBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
250939f56e1aSBoris Pismenny 
251039f56e1aSBoris Pismenny 	tls_sw_release_resources_rx(sk);
2511313ab004SJohn Fastabend 	tls_sw_free_ctx_rx(tls_ctx);
25123c4d7559SDave Watson }
25133c4d7559SDave Watson 
25149932a29aSVakul Garg /* The work handler to transmitt the encrypted records in tx_list */
2515a42055e8SVakul Garg static void tx_work_handler(struct work_struct *work)
2516a42055e8SVakul Garg {
2517a42055e8SVakul Garg 	struct delayed_work *delayed_work = to_delayed_work(work);
2518a42055e8SVakul Garg 	struct tx_work *tx_work = container_of(delayed_work,
2519a42055e8SVakul Garg 					       struct tx_work, work);
2520a42055e8SVakul Garg 	struct sock *sk = tx_work->sk;
2521a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2522f87e62d4SJohn Fastabend 	struct tls_sw_context_tx *ctx;
2523f87e62d4SJohn Fastabend 
2524f87e62d4SJohn Fastabend 	if (unlikely(!tls_ctx))
2525f87e62d4SJohn Fastabend 		return;
2526f87e62d4SJohn Fastabend 
2527f87e62d4SJohn Fastabend 	ctx = tls_sw_ctx_tx(tls_ctx);
2528f87e62d4SJohn Fastabend 	if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2529f87e62d4SJohn Fastabend 		return;
2530a42055e8SVakul Garg 
2531a42055e8SVakul Garg 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2532a42055e8SVakul Garg 		return;
2533f3221361SJakub Kicinski 
2534f3221361SJakub Kicinski 	if (mutex_trylock(&tls_ctx->tx_lock)) {
2535a42055e8SVakul Garg 		lock_sock(sk);
2536a42055e8SVakul Garg 		tls_tx_records(sk, -1);
2537a42055e8SVakul Garg 		release_sock(sk);
253879ffe608SJakub Kicinski 		mutex_unlock(&tls_ctx->tx_lock);
2539f3221361SJakub Kicinski 	} else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2540f3221361SJakub Kicinski 		/* Someone is holding the tx_lock, they will likely run Tx
2541f3221361SJakub Kicinski 		 * and cancel the work on their way out of the lock section.
2542f3221361SJakub Kicinski 		 * Schedule a long delay just in case.
2543f3221361SJakub Kicinski 		 */
2544f3221361SJakub Kicinski 		schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2545f3221361SJakub Kicinski 	}
2546a42055e8SVakul Garg }
2547a42055e8SVakul Garg 
254858790314SJakub Kicinski static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
254958790314SJakub Kicinski {
255058790314SJakub Kicinski 	struct tls_rec *rec;
255158790314SJakub Kicinski 
2552ffe2a225SPietro Borrello 	rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
255358790314SJakub Kicinski 	if (!rec)
255458790314SJakub Kicinski 		return false;
255558790314SJakub Kicinski 
255658790314SJakub Kicinski 	return READ_ONCE(rec->tx_ready);
255758790314SJakub Kicinski }
255858790314SJakub Kicinski 
25597463d3a2SBoris Pismenny void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
25607463d3a2SBoris Pismenny {
25617463d3a2SBoris Pismenny 	struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
25627463d3a2SBoris Pismenny 
25637463d3a2SBoris Pismenny 	/* Schedule the transmission if tx list is ready */
256458790314SJakub Kicinski 	if (tls_is_tx_ready(tx_ctx) &&
256502b1fa07SJakub Kicinski 	    !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
25667463d3a2SBoris Pismenny 		schedule_delayed_work(&tx_ctx->tx_work.work, 0);
25677463d3a2SBoris Pismenny }
25687463d3a2SBoris Pismenny 
2569318892acSJakub Kicinski void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2570318892acSJakub Kicinski {
2571318892acSJakub Kicinski 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2572318892acSJakub Kicinski 
2573318892acSJakub Kicinski 	write_lock_bh(&sk->sk_callback_lock);
2574318892acSJakub Kicinski 	rx_ctx->saved_data_ready = sk->sk_data_ready;
2575318892acSJakub Kicinski 	sk->sk_data_ready = tls_data_ready;
2576318892acSJakub Kicinski 	write_unlock_bh(&sk->sk_callback_lock);
2577318892acSJakub Kicinski }
2578318892acSJakub Kicinski 
257988527790SJakub Kicinski void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
258088527790SJakub Kicinski {
258188527790SJakub Kicinski 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
258288527790SJakub Kicinski 
258388527790SJakub Kicinski 	rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
258488527790SJakub Kicinski 		tls_ctx->prot_info.version != TLS_1_3_VERSION;
258588527790SJakub Kicinski }
258688527790SJakub Kicinski 
2587c46234ebSDave Watson int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
25883c4d7559SDave Watson {
25894509de14SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
25904509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
25913c4d7559SDave Watson 	struct tls_crypto_info *crypto_info;
2592f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *sw_ctx_tx = NULL;
2593f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *sw_ctx_rx = NULL;
2594c46234ebSDave Watson 	struct cipher_context *cctx;
2595c46234ebSDave Watson 	struct crypto_aead **aead;
2596f295b3aeSVakul Garg 	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2597692d7b5dSVakul Garg 	struct crypto_tfm *tfm;
2598f295b3aeSVakul Garg 	char *iv, *rec_seq, *key, *salt, *cipher_name;
2599fb99bce7SDave Watson 	size_t keysize;
26003c4d7559SDave Watson 	int rc = 0;
26013c4d7559SDave Watson 
26023c4d7559SDave Watson 	if (!ctx) {
26033c4d7559SDave Watson 		rc = -EINVAL;
26043c4d7559SDave Watson 		goto out;
26053c4d7559SDave Watson 	}
26063c4d7559SDave Watson 
2607f66de3eeSBoris Pismenny 	if (tx) {
2608b190a587SBoris Pismenny 		if (!ctx->priv_ctx_tx) {
2609f66de3eeSBoris Pismenny 			sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2610f66de3eeSBoris Pismenny 			if (!sw_ctx_tx) {
26113c4d7559SDave Watson 				rc = -ENOMEM;
26123c4d7559SDave Watson 				goto out;
26133c4d7559SDave Watson 			}
2614f66de3eeSBoris Pismenny 			ctx->priv_ctx_tx = sw_ctx_tx;
2615c46234ebSDave Watson 		} else {
2616b190a587SBoris Pismenny 			sw_ctx_tx =
2617b190a587SBoris Pismenny 				(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2618b190a587SBoris Pismenny 		}
2619b190a587SBoris Pismenny 	} else {
2620b190a587SBoris Pismenny 		if (!ctx->priv_ctx_rx) {
2621f66de3eeSBoris Pismenny 			sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2622f66de3eeSBoris Pismenny 			if (!sw_ctx_rx) {
2623f66de3eeSBoris Pismenny 				rc = -ENOMEM;
2624f66de3eeSBoris Pismenny 				goto out;
2625c46234ebSDave Watson 			}
2626f66de3eeSBoris Pismenny 			ctx->priv_ctx_rx = sw_ctx_rx;
2627b190a587SBoris Pismenny 		} else {
2628b190a587SBoris Pismenny 			sw_ctx_rx =
2629b190a587SBoris Pismenny 				(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2630b190a587SBoris Pismenny 		}
2631f66de3eeSBoris Pismenny 	}
26323c4d7559SDave Watson 
2633c46234ebSDave Watson 	if (tx) {
2634b190a587SBoris Pismenny 		crypto_init_wait(&sw_ctx_tx->async_wait);
26350cada332SVinay Kumar Yadav 		spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
263686029d10SSabrina Dubroca 		crypto_info = &ctx->crypto_send.info;
2637c46234ebSDave Watson 		cctx = &ctx->tx;
2638f66de3eeSBoris Pismenny 		aead = &sw_ctx_tx->aead_send;
26399932a29aSVakul Garg 		INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2640a42055e8SVakul Garg 		INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2641a42055e8SVakul Garg 		sw_ctx_tx->tx_work.sk = sk;
2642c46234ebSDave Watson 	} else {
2643b190a587SBoris Pismenny 		crypto_init_wait(&sw_ctx_rx->async_wait);
26440cada332SVinay Kumar Yadav 		spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
26454cbc325eSJakub Kicinski 		init_waitqueue_head(&sw_ctx_rx->wq);
264686029d10SSabrina Dubroca 		crypto_info = &ctx->crypto_recv.info;
2647c46234ebSDave Watson 		cctx = &ctx->rx;
2648692d7b5dSVakul Garg 		skb_queue_head_init(&sw_ctx_rx->rx_list);
2649c618db2aSJakub Kicinski 		skb_queue_head_init(&sw_ctx_rx->async_hold);
2650f66de3eeSBoris Pismenny 		aead = &sw_ctx_rx->aead_recv;
2651c46234ebSDave Watson 	}
2652c46234ebSDave Watson 
26533c4d7559SDave Watson 	switch (crypto_info->cipher_type) {
26543c4d7559SDave Watson 	case TLS_CIPHER_AES_GCM_128: {
2655dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2656dc2724a6STianjia Zhang 
2657dc2724a6STianjia Zhang 		gcm_128_info = (void *)crypto_info;
26583c4d7559SDave Watson 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
26593c4d7559SDave Watson 		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
26603c4d7559SDave Watson 		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2661dc2724a6STianjia Zhang 		iv = gcm_128_info->iv;
26623c4d7559SDave Watson 		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2663dc2724a6STianjia Zhang 		rec_seq = gcm_128_info->rec_seq;
2664fb99bce7SDave Watson 		keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2665fb99bce7SDave Watson 		key = gcm_128_info->key;
2666fb99bce7SDave Watson 		salt = gcm_128_info->salt;
2667f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2668f295b3aeSVakul Garg 		cipher_name = "gcm(aes)";
2669fb99bce7SDave Watson 		break;
2670fb99bce7SDave Watson 	}
2671fb99bce7SDave Watson 	case TLS_CIPHER_AES_GCM_256: {
2672dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2673dc2724a6STianjia Zhang 
2674dc2724a6STianjia Zhang 		gcm_256_info = (void *)crypto_info;
2675fb99bce7SDave Watson 		nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2676fb99bce7SDave Watson 		tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2677fb99bce7SDave Watson 		iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2678dc2724a6STianjia Zhang 		iv = gcm_256_info->iv;
2679fb99bce7SDave Watson 		rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2680dc2724a6STianjia Zhang 		rec_seq = gcm_256_info->rec_seq;
2681fb99bce7SDave Watson 		keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2682fb99bce7SDave Watson 		key = gcm_256_info->key;
2683fb99bce7SDave Watson 		salt = gcm_256_info->salt;
2684f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2685f295b3aeSVakul Garg 		cipher_name = "gcm(aes)";
2686f295b3aeSVakul Garg 		break;
2687f295b3aeSVakul Garg 	}
2688f295b3aeSVakul Garg 	case TLS_CIPHER_AES_CCM_128: {
2689dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2690dc2724a6STianjia Zhang 
2691dc2724a6STianjia Zhang 		ccm_128_info = (void *)crypto_info;
2692f295b3aeSVakul Garg 		nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2693f295b3aeSVakul Garg 		tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2694f295b3aeSVakul Garg 		iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2695dc2724a6STianjia Zhang 		iv = ccm_128_info->iv;
2696f295b3aeSVakul Garg 		rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2697dc2724a6STianjia Zhang 		rec_seq = ccm_128_info->rec_seq;
2698f295b3aeSVakul Garg 		keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2699f295b3aeSVakul Garg 		key = ccm_128_info->key;
2700f295b3aeSVakul Garg 		salt = ccm_128_info->salt;
2701f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2702f295b3aeSVakul Garg 		cipher_name = "ccm(aes)";
27033c4d7559SDave Watson 		break;
27043c4d7559SDave Watson 	}
270574ea6106SVadim Fedorenko 	case TLS_CIPHER_CHACHA20_POLY1305: {
2706dc2724a6STianjia Zhang 		struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2707dc2724a6STianjia Zhang 
270874ea6106SVadim Fedorenko 		chacha20_poly1305_info = (void *)crypto_info;
270974ea6106SVadim Fedorenko 		nonce_size = 0;
271074ea6106SVadim Fedorenko 		tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
271174ea6106SVadim Fedorenko 		iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
271274ea6106SVadim Fedorenko 		iv = chacha20_poly1305_info->iv;
271374ea6106SVadim Fedorenko 		rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
271474ea6106SVadim Fedorenko 		rec_seq = chacha20_poly1305_info->rec_seq;
271574ea6106SVadim Fedorenko 		keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
271674ea6106SVadim Fedorenko 		key = chacha20_poly1305_info->key;
271774ea6106SVadim Fedorenko 		salt = chacha20_poly1305_info->salt;
271874ea6106SVadim Fedorenko 		salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
271974ea6106SVadim Fedorenko 		cipher_name = "rfc7539(chacha20,poly1305)";
272074ea6106SVadim Fedorenko 		break;
272174ea6106SVadim Fedorenko 	}
2722227b9644STianjia Zhang 	case TLS_CIPHER_SM4_GCM: {
2723227b9644STianjia Zhang 		struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2724227b9644STianjia Zhang 
2725227b9644STianjia Zhang 		sm4_gcm_info = (void *)crypto_info;
2726227b9644STianjia Zhang 		nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2727227b9644STianjia Zhang 		tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2728227b9644STianjia Zhang 		iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2729227b9644STianjia Zhang 		iv = sm4_gcm_info->iv;
2730227b9644STianjia Zhang 		rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2731227b9644STianjia Zhang 		rec_seq = sm4_gcm_info->rec_seq;
2732227b9644STianjia Zhang 		keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2733227b9644STianjia Zhang 		key = sm4_gcm_info->key;
2734227b9644STianjia Zhang 		salt = sm4_gcm_info->salt;
2735227b9644STianjia Zhang 		salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2736227b9644STianjia Zhang 		cipher_name = "gcm(sm4)";
2737227b9644STianjia Zhang 		break;
2738227b9644STianjia Zhang 	}
2739227b9644STianjia Zhang 	case TLS_CIPHER_SM4_CCM: {
2740227b9644STianjia Zhang 		struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2741227b9644STianjia Zhang 
2742227b9644STianjia Zhang 		sm4_ccm_info = (void *)crypto_info;
2743227b9644STianjia Zhang 		nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2744227b9644STianjia Zhang 		tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2745227b9644STianjia Zhang 		iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2746227b9644STianjia Zhang 		iv = sm4_ccm_info->iv;
2747227b9644STianjia Zhang 		rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2748227b9644STianjia Zhang 		rec_seq = sm4_ccm_info->rec_seq;
2749227b9644STianjia Zhang 		keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2750227b9644STianjia Zhang 		key = sm4_ccm_info->key;
2751227b9644STianjia Zhang 		salt = sm4_ccm_info->salt;
2752227b9644STianjia Zhang 		salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2753227b9644STianjia Zhang 		cipher_name = "ccm(sm4)";
2754227b9644STianjia Zhang 		break;
2755227b9644STianjia Zhang 	}
275662e56ef5STaehee Yoo 	case TLS_CIPHER_ARIA_GCM_128: {
275762e56ef5STaehee Yoo 		struct tls12_crypto_info_aria_gcm_128 *aria_gcm_128_info;
275862e56ef5STaehee Yoo 
275962e56ef5STaehee Yoo 		aria_gcm_128_info = (void *)crypto_info;
276062e56ef5STaehee Yoo 		nonce_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
276162e56ef5STaehee Yoo 		tag_size = TLS_CIPHER_ARIA_GCM_128_TAG_SIZE;
276262e56ef5STaehee Yoo 		iv_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
276362e56ef5STaehee Yoo 		iv = aria_gcm_128_info->iv;
276462e56ef5STaehee Yoo 		rec_seq_size = TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE;
276562e56ef5STaehee Yoo 		rec_seq = aria_gcm_128_info->rec_seq;
276662e56ef5STaehee Yoo 		keysize = TLS_CIPHER_ARIA_GCM_128_KEY_SIZE;
276762e56ef5STaehee Yoo 		key = aria_gcm_128_info->key;
276862e56ef5STaehee Yoo 		salt = aria_gcm_128_info->salt;
276962e56ef5STaehee Yoo 		salt_size = TLS_CIPHER_ARIA_GCM_128_SALT_SIZE;
277062e56ef5STaehee Yoo 		cipher_name = "gcm(aria)";
277162e56ef5STaehee Yoo 		break;
277262e56ef5STaehee Yoo 	}
277362e56ef5STaehee Yoo 	case TLS_CIPHER_ARIA_GCM_256: {
277462e56ef5STaehee Yoo 		struct tls12_crypto_info_aria_gcm_256 *gcm_256_info;
277562e56ef5STaehee Yoo 
277662e56ef5STaehee Yoo 		gcm_256_info = (void *)crypto_info;
277762e56ef5STaehee Yoo 		nonce_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
277862e56ef5STaehee Yoo 		tag_size = TLS_CIPHER_ARIA_GCM_256_TAG_SIZE;
277962e56ef5STaehee Yoo 		iv_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
278062e56ef5STaehee Yoo 		iv = gcm_256_info->iv;
278162e56ef5STaehee Yoo 		rec_seq_size = TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE;
278262e56ef5STaehee Yoo 		rec_seq = gcm_256_info->rec_seq;
278362e56ef5STaehee Yoo 		keysize = TLS_CIPHER_ARIA_GCM_256_KEY_SIZE;
278462e56ef5STaehee Yoo 		key = gcm_256_info->key;
278562e56ef5STaehee Yoo 		salt = gcm_256_info->salt;
278662e56ef5STaehee Yoo 		salt_size = TLS_CIPHER_ARIA_GCM_256_SALT_SIZE;
278762e56ef5STaehee Yoo 		cipher_name = "gcm(aria)";
278862e56ef5STaehee Yoo 		break;
278962e56ef5STaehee Yoo 	}
27903c4d7559SDave Watson 	default:
27913c4d7559SDave Watson 		rc = -EINVAL;
2792cf6d43efSSabrina Dubroca 		goto free_priv;
27933c4d7559SDave Watson 	}
27943c4d7559SDave Watson 
2795130b392cSDave Watson 	if (crypto_info->version == TLS_1_3_VERSION) {
2796130b392cSDave Watson 		nonce_size = 0;
27974509de14SVakul Garg 		prot->aad_size = TLS_HEADER_SIZE;
27984509de14SVakul Garg 		prot->tail_size = 1;
2799130b392cSDave Watson 	} else {
28004509de14SVakul Garg 		prot->aad_size = TLS_AAD_SPACE_SIZE;
28014509de14SVakul Garg 		prot->tail_size = 0;
2802130b392cSDave Watson 	}
2803130b392cSDave Watson 
280450a07aa5SJakub Kicinski 	/* Sanity-check the sizes for stack allocations. */
280550a07aa5SJakub Kicinski 	if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
280650a07aa5SJakub Kicinski 	    rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
280750a07aa5SJakub Kicinski 	    prot->aad_size > TLS_MAX_AAD_SIZE) {
280850a07aa5SJakub Kicinski 		rc = -EINVAL;
280950a07aa5SJakub Kicinski 		goto free_priv;
281050a07aa5SJakub Kicinski 	}
281150a07aa5SJakub Kicinski 
28124509de14SVakul Garg 	prot->version = crypto_info->version;
28134509de14SVakul Garg 	prot->cipher_type = crypto_info->cipher_type;
28144509de14SVakul Garg 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
28154509de14SVakul Garg 	prot->tag_size = tag_size;
28164509de14SVakul Garg 	prot->overhead_size = prot->prepend_size +
28174509de14SVakul Garg 			      prot->tag_size + prot->tail_size;
28184509de14SVakul Garg 	prot->iv_size = iv_size;
2819f295b3aeSVakul Garg 	prot->salt_size = salt_size;
2820f295b3aeSVakul Garg 	cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2821c46234ebSDave Watson 	if (!cctx->iv) {
28223c4d7559SDave Watson 		rc = -ENOMEM;
2823cf6d43efSSabrina Dubroca 		goto free_priv;
28243c4d7559SDave Watson 	}
2825fb99bce7SDave Watson 	/* Note: 128 & 256 bit salt are the same size */
28264509de14SVakul Garg 	prot->rec_seq_size = rec_seq_size;
2827f295b3aeSVakul Garg 	memcpy(cctx->iv, salt, salt_size);
2828f295b3aeSVakul Garg 	memcpy(cctx->iv + salt_size, iv, iv_size);
2829969d5090Szhong jiang 	cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2830c46234ebSDave Watson 	if (!cctx->rec_seq) {
28313c4d7559SDave Watson 		rc = -ENOMEM;
28323c4d7559SDave Watson 		goto free_iv;
28333c4d7559SDave Watson 	}
28343c4d7559SDave Watson 
2835c46234ebSDave Watson 	if (!*aead) {
2836f295b3aeSVakul Garg 		*aead = crypto_alloc_aead(cipher_name, 0, 0);
2837c46234ebSDave Watson 		if (IS_ERR(*aead)) {
2838c46234ebSDave Watson 			rc = PTR_ERR(*aead);
2839c46234ebSDave Watson 			*aead = NULL;
28403c4d7559SDave Watson 			goto free_rec_seq;
28413c4d7559SDave Watson 		}
28423c4d7559SDave Watson 	}
28433c4d7559SDave Watson 
28443c4d7559SDave Watson 	ctx->push_pending_record = tls_sw_push_pending_record;
28453c4d7559SDave Watson 
2846fb99bce7SDave Watson 	rc = crypto_aead_setkey(*aead, key, keysize);
2847fb99bce7SDave Watson 
28483c4d7559SDave Watson 	if (rc)
28493c4d7559SDave Watson 		goto free_aead;
28503c4d7559SDave Watson 
28514509de14SVakul Garg 	rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2852c46234ebSDave Watson 	if (rc)
2853c46234ebSDave Watson 		goto free_aead;
2854c46234ebSDave Watson 
2855f66de3eeSBoris Pismenny 	if (sw_ctx_rx) {
2856692d7b5dSVakul Garg 		tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
28578497ded2SVakul Garg 
285888527790SJakub Kicinski 		tls_update_rx_zc_capable(ctx);
2859692d7b5dSVakul Garg 		sw_ctx_rx->async_capable =
286088527790SJakub Kicinski 			crypto_info->version != TLS_1_3_VERSION &&
286188527790SJakub Kicinski 			!!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2862692d7b5dSVakul Garg 
2863849f16bbSJakub Kicinski 		rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2864849f16bbSJakub Kicinski 		if (rc)
2865849f16bbSJakub Kicinski 			goto free_aead;
2866c46234ebSDave Watson 	}
2867c46234ebSDave Watson 
2868c46234ebSDave Watson 	goto out;
28693c4d7559SDave Watson 
28703c4d7559SDave Watson free_aead:
2871c46234ebSDave Watson 	crypto_free_aead(*aead);
2872c46234ebSDave Watson 	*aead = NULL;
28733c4d7559SDave Watson free_rec_seq:
2874c46234ebSDave Watson 	kfree(cctx->rec_seq);
2875c46234ebSDave Watson 	cctx->rec_seq = NULL;
28763c4d7559SDave Watson free_iv:
2877f66de3eeSBoris Pismenny 	kfree(cctx->iv);
2878f66de3eeSBoris Pismenny 	cctx->iv = NULL;
2879cf6d43efSSabrina Dubroca free_priv:
2880f66de3eeSBoris Pismenny 	if (tx) {
2881f66de3eeSBoris Pismenny 		kfree(ctx->priv_ctx_tx);
2882f66de3eeSBoris Pismenny 		ctx->priv_ctx_tx = NULL;
2883f66de3eeSBoris Pismenny 	} else {
2884f66de3eeSBoris Pismenny 		kfree(ctx->priv_ctx_rx);
2885f66de3eeSBoris Pismenny 		ctx->priv_ctx_rx = NULL;
2886f66de3eeSBoris Pismenny 	}
28873c4d7559SDave Watson out:
28883c4d7559SDave Watson 	return rc;
28893c4d7559SDave Watson }
2890