xref: /openbmc/linux/net/tls/tls_sw.c (revision 74836ec8)
13c4d7559SDave Watson /*
23c4d7559SDave Watson  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
33c4d7559SDave Watson  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
43c4d7559SDave Watson  * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
53c4d7559SDave Watson  * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
63c4d7559SDave Watson  * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7d3b18ad3SJohn Fastabend  * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
83c4d7559SDave Watson  *
93c4d7559SDave Watson  * This software is available to you under a choice of one of two
103c4d7559SDave Watson  * licenses.  You may choose to be licensed under the terms of the GNU
113c4d7559SDave Watson  * General Public License (GPL) Version 2, available from the file
123c4d7559SDave Watson  * COPYING in the main directory of this source tree, or the
133c4d7559SDave Watson  * OpenIB.org BSD license below:
143c4d7559SDave Watson  *
153c4d7559SDave Watson  *     Redistribution and use in source and binary forms, with or
163c4d7559SDave Watson  *     without modification, are permitted provided that the following
173c4d7559SDave Watson  *     conditions are met:
183c4d7559SDave Watson  *
193c4d7559SDave Watson  *      - Redistributions of source code must retain the above
203c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
213c4d7559SDave Watson  *        disclaimer.
223c4d7559SDave Watson  *
233c4d7559SDave Watson  *      - Redistributions in binary form must reproduce the above
243c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
253c4d7559SDave Watson  *        disclaimer in the documentation and/or other materials
263c4d7559SDave Watson  *        provided with the distribution.
273c4d7559SDave Watson  *
283c4d7559SDave Watson  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
293c4d7559SDave Watson  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
303c4d7559SDave Watson  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
313c4d7559SDave Watson  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
323c4d7559SDave Watson  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
333c4d7559SDave Watson  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
343c4d7559SDave Watson  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
353c4d7559SDave Watson  * SOFTWARE.
363c4d7559SDave Watson  */
373c4d7559SDave Watson 
38da353facSDaniel Jordan #include <linux/bug.h>
39c46234ebSDave Watson #include <linux/sched/signal.h>
403c4d7559SDave Watson #include <linux/module.h>
418d338c76SHerbert Xu #include <linux/kernel.h>
42974271e5SJim Ma #include <linux/splice.h>
433c4d7559SDave Watson #include <crypto/aead.h>
443c4d7559SDave Watson 
45c46234ebSDave Watson #include <net/strparser.h>
463c4d7559SDave Watson #include <net/tls.h>
4740e0b090SPeilin Ye #include <trace/events/sock.h>
483c4d7559SDave Watson 
4958790314SJakub Kicinski #include "tls.h"
5058790314SJakub Kicinski 
514175eac3SJakub Kicinski struct tls_decrypt_arg {
526bd116c8SJakub Kicinski 	struct_group(inargs,
534175eac3SJakub Kicinski 	bool zc;
544175eac3SJakub Kicinski 	bool async;
55ce61327cSJakub Kicinski 	u8 tail;
566bd116c8SJakub Kicinski 	);
576bd116c8SJakub Kicinski 
586bd116c8SJakub Kicinski 	struct sk_buff *skb;
594175eac3SJakub Kicinski };
604175eac3SJakub Kicinski 
61b89fec54SJakub Kicinski struct tls_decrypt_ctx {
628d338c76SHerbert Xu 	struct sock *sk;
63b89fec54SJakub Kicinski 	u8 iv[MAX_IV_SIZE];
64b89fec54SJakub Kicinski 	u8 aad[TLS_MAX_AAD_SIZE];
65b89fec54SJakub Kicinski 	u8 tail;
66b89fec54SJakub Kicinski 	struct scatterlist sg[];
67b89fec54SJakub Kicinski };
68b89fec54SJakub Kicinski 
69da353facSDaniel Jordan noinline void tls_err_abort(struct sock *sk, int err)
70da353facSDaniel Jordan {
71da353facSDaniel Jordan 	WARN_ON_ONCE(err >= 0);
72da353facSDaniel Jordan 	/* sk->sk_err should contain a positive error code. */
73da353facSDaniel Jordan 	sk->sk_err = -err;
74da353facSDaniel Jordan 	sk_error_report(sk);
75da353facSDaniel Jordan }
76da353facSDaniel Jordan 
770927f71dSDoron Roberts-Kedes static int __skb_nsg(struct sk_buff *skb, int offset, int len,
780927f71dSDoron Roberts-Kedes                      unsigned int recursion_level)
790927f71dSDoron Roberts-Kedes {
800927f71dSDoron Roberts-Kedes         int start = skb_headlen(skb);
810927f71dSDoron Roberts-Kedes         int i, chunk = start - offset;
820927f71dSDoron Roberts-Kedes         struct sk_buff *frag_iter;
830927f71dSDoron Roberts-Kedes         int elt = 0;
840927f71dSDoron Roberts-Kedes 
850927f71dSDoron Roberts-Kedes         if (unlikely(recursion_level >= 24))
860927f71dSDoron Roberts-Kedes                 return -EMSGSIZE;
870927f71dSDoron Roberts-Kedes 
880927f71dSDoron Roberts-Kedes         if (chunk > 0) {
890927f71dSDoron Roberts-Kedes                 if (chunk > len)
900927f71dSDoron Roberts-Kedes                         chunk = len;
910927f71dSDoron Roberts-Kedes                 elt++;
920927f71dSDoron Roberts-Kedes                 len -= chunk;
930927f71dSDoron Roberts-Kedes                 if (len == 0)
940927f71dSDoron Roberts-Kedes                         return elt;
950927f71dSDoron Roberts-Kedes                 offset += chunk;
960927f71dSDoron Roberts-Kedes         }
970927f71dSDoron Roberts-Kedes 
980927f71dSDoron Roberts-Kedes         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
990927f71dSDoron Roberts-Kedes                 int end;
1000927f71dSDoron Roberts-Kedes 
1010927f71dSDoron Roberts-Kedes                 WARN_ON(start > offset + len);
1020927f71dSDoron Roberts-Kedes 
1030927f71dSDoron Roberts-Kedes                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1040927f71dSDoron Roberts-Kedes                 chunk = end - offset;
1050927f71dSDoron Roberts-Kedes                 if (chunk > 0) {
1060927f71dSDoron Roberts-Kedes                         if (chunk > len)
1070927f71dSDoron Roberts-Kedes                                 chunk = len;
1080927f71dSDoron Roberts-Kedes                         elt++;
1090927f71dSDoron Roberts-Kedes                         len -= chunk;
1100927f71dSDoron Roberts-Kedes                         if (len == 0)
1110927f71dSDoron Roberts-Kedes                                 return elt;
1120927f71dSDoron Roberts-Kedes                         offset += chunk;
1130927f71dSDoron Roberts-Kedes                 }
1140927f71dSDoron Roberts-Kedes                 start = end;
1150927f71dSDoron Roberts-Kedes         }
1160927f71dSDoron Roberts-Kedes 
1170927f71dSDoron Roberts-Kedes         if (unlikely(skb_has_frag_list(skb))) {
1180927f71dSDoron Roberts-Kedes                 skb_walk_frags(skb, frag_iter) {
1190927f71dSDoron Roberts-Kedes                         int end, ret;
1200927f71dSDoron Roberts-Kedes 
1210927f71dSDoron Roberts-Kedes                         WARN_ON(start > offset + len);
1220927f71dSDoron Roberts-Kedes 
1230927f71dSDoron Roberts-Kedes                         end = start + frag_iter->len;
1240927f71dSDoron Roberts-Kedes                         chunk = end - offset;
1250927f71dSDoron Roberts-Kedes                         if (chunk > 0) {
1260927f71dSDoron Roberts-Kedes                                 if (chunk > len)
1270927f71dSDoron Roberts-Kedes                                         chunk = len;
1280927f71dSDoron Roberts-Kedes                                 ret = __skb_nsg(frag_iter, offset - start, chunk,
1290927f71dSDoron Roberts-Kedes                                                 recursion_level + 1);
1300927f71dSDoron Roberts-Kedes                                 if (unlikely(ret < 0))
1310927f71dSDoron Roberts-Kedes                                         return ret;
1320927f71dSDoron Roberts-Kedes                                 elt += ret;
1330927f71dSDoron Roberts-Kedes                                 len -= chunk;
1340927f71dSDoron Roberts-Kedes                                 if (len == 0)
1350927f71dSDoron Roberts-Kedes                                         return elt;
1360927f71dSDoron Roberts-Kedes                                 offset += chunk;
1370927f71dSDoron Roberts-Kedes                         }
1380927f71dSDoron Roberts-Kedes                         start = end;
1390927f71dSDoron Roberts-Kedes                 }
1400927f71dSDoron Roberts-Kedes         }
1410927f71dSDoron Roberts-Kedes         BUG_ON(len);
1420927f71dSDoron Roberts-Kedes         return elt;
1430927f71dSDoron Roberts-Kedes }
1440927f71dSDoron Roberts-Kedes 
1450927f71dSDoron Roberts-Kedes /* Return the number of scatterlist elements required to completely map the
1460927f71dSDoron Roberts-Kedes  * skb, or -EMSGSIZE if the recursion depth is exceeded.
1470927f71dSDoron Roberts-Kedes  */
1480927f71dSDoron Roberts-Kedes static int skb_nsg(struct sk_buff *skb, int offset, int len)
1490927f71dSDoron Roberts-Kedes {
1500927f71dSDoron Roberts-Kedes         return __skb_nsg(skb, offset, len, 0);
1510927f71dSDoron Roberts-Kedes }
1520927f71dSDoron Roberts-Kedes 
153ce61327cSJakub Kicinski static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
154ce61327cSJakub Kicinski 			      struct tls_decrypt_arg *darg)
155130b392cSDave Watson {
156130b392cSDave Watson 	struct strp_msg *rxm = strp_msg(skb);
157c3f6bb74SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
158130b392cSDave Watson 	int sub = 0;
159130b392cSDave Watson 
160130b392cSDave Watson 	/* Determine zero-padding length */
161b53f4976SJakub Kicinski 	if (prot->version == TLS_1_3_VERSION) {
1625deee41bSJakub Kicinski 		int offset = rxm->full_len - TLS_TAG_SIZE - 1;
163ce61327cSJakub Kicinski 		char content_type = darg->zc ? darg->tail : 0;
164130b392cSDave Watson 		int err;
165130b392cSDave Watson 
166130b392cSDave Watson 		while (content_type == 0) {
1675deee41bSJakub Kicinski 			if (offset < prot->prepend_size)
168130b392cSDave Watson 				return -EBADMSG;
1695deee41bSJakub Kicinski 			err = skb_copy_bits(skb, rxm->offset + offset,
170130b392cSDave Watson 					    &content_type, 1);
171b53f4976SJakub Kicinski 			if (err)
172b53f4976SJakub Kicinski 				return err;
173130b392cSDave Watson 			if (content_type)
174130b392cSDave Watson 				break;
175130b392cSDave Watson 			sub++;
1765deee41bSJakub Kicinski 			offset--;
177130b392cSDave Watson 		}
178c3f6bb74SJakub Kicinski 		tlm->control = content_type;
179130b392cSDave Watson 	}
180130b392cSDave Watson 	return sub;
181130b392cSDave Watson }
182130b392cSDave Watson 
1838580e55aSHerbert Xu static void tls_decrypt_done(void *data, int err)
18494524d8fSVakul Garg {
1858580e55aSHerbert Xu 	struct aead_request *aead_req = data;
1868d338c76SHerbert Xu 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
18794524d8fSVakul Garg 	struct scatterlist *sgout = aead_req->dst;
188692d7b5dSVakul Garg 	struct scatterlist *sgin = aead_req->src;
1897a3dd8c8SJohn Fastabend 	struct tls_sw_context_rx *ctx;
1908d338c76SHerbert Xu 	struct tls_decrypt_ctx *dctx;
1917a3dd8c8SJohn Fastabend 	struct tls_context *tls_ctx;
19294524d8fSVakul Garg 	struct scatterlist *sg;
19394524d8fSVakul Garg 	unsigned int pages;
1946ececdc5SJakub Kicinski 	struct sock *sk;
1958d338c76SHerbert Xu 	int aead_size;
1967a3dd8c8SJohn Fastabend 
1978d338c76SHerbert Xu 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
1988d338c76SHerbert Xu 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
1998d338c76SHerbert Xu 	dctx = (void *)((u8 *)aead_req + aead_size);
2008d338c76SHerbert Xu 
2018d338c76SHerbert Xu 	sk = dctx->sk;
2026ececdc5SJakub Kicinski 	tls_ctx = tls_get_ctx(sk);
2037a3dd8c8SJohn Fastabend 	ctx = tls_sw_ctx_rx(tls_ctx);
20494524d8fSVakul Garg 
20594524d8fSVakul Garg 	/* Propagate if there was an err */
20694524d8fSVakul Garg 	if (err) {
2075c5ec668SJakub Kicinski 		if (err == -EBADMSG)
2086ececdc5SJakub Kicinski 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
20994524d8fSVakul Garg 		ctx->async_wait.err = err;
2106ececdc5SJakub Kicinski 		tls_err_abort(sk, err);
21194524d8fSVakul Garg 	}
21294524d8fSVakul Garg 
213692d7b5dSVakul Garg 	/* Free the destination pages if skb was not decrypted inplace */
214692d7b5dSVakul Garg 	if (sgout != sgin) {
21594524d8fSVakul Garg 		/* Skip the first S/G entry as it points to AAD */
21694524d8fSVakul Garg 		for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
21794524d8fSVakul Garg 			if (!sg)
21894524d8fSVakul Garg 				break;
21994524d8fSVakul Garg 			put_page(sg_page(sg));
22094524d8fSVakul Garg 		}
221692d7b5dSVakul Garg 	}
22294524d8fSVakul Garg 
22394524d8fSVakul Garg 	kfree(aead_req);
22494524d8fSVakul Garg 
2250cada332SVinay Kumar Yadav 	spin_lock_bh(&ctx->decrypt_compl_lock);
22637943f04SJakub Kicinski 	if (!atomic_dec_return(&ctx->decrypt_pending))
22794524d8fSVakul Garg 		complete(&ctx->async_wait.completion);
2280cada332SVinay Kumar Yadav 	spin_unlock_bh(&ctx->decrypt_compl_lock);
22994524d8fSVakul Garg }
23094524d8fSVakul Garg 
231c46234ebSDave Watson static int tls_do_decryption(struct sock *sk,
232c46234ebSDave Watson 			     struct scatterlist *sgin,
233c46234ebSDave Watson 			     struct scatterlist *sgout,
234c46234ebSDave Watson 			     char *iv_recv,
235c46234ebSDave Watson 			     size_t data_len,
23694524d8fSVakul Garg 			     struct aead_request *aead_req,
2373547a1f9SJakub Kicinski 			     struct tls_decrypt_arg *darg)
238c46234ebSDave Watson {
239c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2404509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
241f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
242c46234ebSDave Watson 	int ret;
243c46234ebSDave Watson 
2440b243d00SVakul Garg 	aead_request_set_tfm(aead_req, ctx->aead_recv);
2454509de14SVakul Garg 	aead_request_set_ad(aead_req, prot->aad_size);
246c46234ebSDave Watson 	aead_request_set_crypt(aead_req, sgin, sgout,
2474509de14SVakul Garg 			       data_len + prot->tag_size,
248c46234ebSDave Watson 			       (u8 *)iv_recv);
249c46234ebSDave Watson 
2503547a1f9SJakub Kicinski 	if (darg->async) {
25194524d8fSVakul Garg 		aead_request_set_callback(aead_req,
25294524d8fSVakul Garg 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
2538d338c76SHerbert Xu 					  tls_decrypt_done, aead_req);
25494524d8fSVakul Garg 		atomic_inc(&ctx->decrypt_pending);
25594524d8fSVakul Garg 	} else {
25694524d8fSVakul Garg 		aead_request_set_callback(aead_req,
25794524d8fSVakul Garg 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
25894524d8fSVakul Garg 					  crypto_req_done, &ctx->async_wait);
25994524d8fSVakul Garg 	}
26094524d8fSVakul Garg 
26194524d8fSVakul Garg 	ret = crypto_aead_decrypt(aead_req);
26294524d8fSVakul Garg 	if (ret == -EINPROGRESS) {
2633547a1f9SJakub Kicinski 		if (darg->async)
2643547a1f9SJakub Kicinski 			return 0;
26594524d8fSVakul Garg 
26694524d8fSVakul Garg 		ret = crypto_wait_req(ret, &ctx->async_wait);
26794524d8fSVakul Garg 	}
2683547a1f9SJakub Kicinski 	darg->async = false;
2693547a1f9SJakub Kicinski 
270c46234ebSDave Watson 	return ret;
271c46234ebSDave Watson }
272c46234ebSDave Watson 
273d829e9c4SDaniel Borkmann static void tls_trim_both_msgs(struct sock *sk, int target_size)
2743c4d7559SDave Watson {
2753c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2764509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
277f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
278a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
2793c4d7559SDave Watson 
280d829e9c4SDaniel Borkmann 	sk_msg_trim(sk, &rec->msg_plaintext, target_size);
2813c4d7559SDave Watson 	if (target_size > 0)
2824509de14SVakul Garg 		target_size += prot->overhead_size;
283d829e9c4SDaniel Borkmann 	sk_msg_trim(sk, &rec->msg_encrypted, target_size);
2843c4d7559SDave Watson }
2853c4d7559SDave Watson 
286d829e9c4SDaniel Borkmann static int tls_alloc_encrypted_msg(struct sock *sk, int len)
2873c4d7559SDave Watson {
2883c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
289f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
290a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
291d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
2923c4d7559SDave Watson 
293d829e9c4SDaniel Borkmann 	return sk_msg_alloc(sk, msg_en, len, 0);
2943c4d7559SDave Watson }
2953c4d7559SDave Watson 
296d829e9c4SDaniel Borkmann static int tls_clone_plaintext_msg(struct sock *sk, int required)
2973c4d7559SDave Watson {
2983c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2994509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
300f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
301a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
302d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl = &rec->msg_plaintext;
303d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
3044e6d4720SVakul Garg 	int skip, len;
3053c4d7559SDave Watson 
306d829e9c4SDaniel Borkmann 	/* We add page references worth len bytes from encrypted sg
307d829e9c4SDaniel Borkmann 	 * at the end of plaintext sg. It is guaranteed that msg_en
3084e6d4720SVakul Garg 	 * has enough required room (ensured by caller).
3094e6d4720SVakul Garg 	 */
310d829e9c4SDaniel Borkmann 	len = required - msg_pl->sg.size;
31152ea992cSVakul Garg 
312d829e9c4SDaniel Borkmann 	/* Skip initial bytes in msg_en's data to be able to use
313d829e9c4SDaniel Borkmann 	 * same offset of both plain and encrypted data.
3144e6d4720SVakul Garg 	 */
3154509de14SVakul Garg 	skip = prot->prepend_size + msg_pl->sg.size;
3164e6d4720SVakul Garg 
317d829e9c4SDaniel Borkmann 	return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
3183c4d7559SDave Watson }
3193c4d7559SDave Watson 
320d3b18ad3SJohn Fastabend static struct tls_rec *tls_get_rec(struct sock *sk)
321d3b18ad3SJohn Fastabend {
322d3b18ad3SJohn Fastabend 	struct tls_context *tls_ctx = tls_get_ctx(sk);
3234509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
324d3b18ad3SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
325d3b18ad3SJohn Fastabend 	struct sk_msg *msg_pl, *msg_en;
326d3b18ad3SJohn Fastabend 	struct tls_rec *rec;
327d3b18ad3SJohn Fastabend 	int mem_size;
328d3b18ad3SJohn Fastabend 
329d3b18ad3SJohn Fastabend 	mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
330d3b18ad3SJohn Fastabend 
331d3b18ad3SJohn Fastabend 	rec = kzalloc(mem_size, sk->sk_allocation);
332d3b18ad3SJohn Fastabend 	if (!rec)
333d3b18ad3SJohn Fastabend 		return NULL;
334d3b18ad3SJohn Fastabend 
335d3b18ad3SJohn Fastabend 	msg_pl = &rec->msg_plaintext;
336d3b18ad3SJohn Fastabend 	msg_en = &rec->msg_encrypted;
337d3b18ad3SJohn Fastabend 
338d3b18ad3SJohn Fastabend 	sk_msg_init(msg_pl);
339d3b18ad3SJohn Fastabend 	sk_msg_init(msg_en);
340d3b18ad3SJohn Fastabend 
341d3b18ad3SJohn Fastabend 	sg_init_table(rec->sg_aead_in, 2);
3424509de14SVakul Garg 	sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
343d3b18ad3SJohn Fastabend 	sg_unmark_end(&rec->sg_aead_in[1]);
344d3b18ad3SJohn Fastabend 
345d3b18ad3SJohn Fastabend 	sg_init_table(rec->sg_aead_out, 2);
3464509de14SVakul Garg 	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
347d3b18ad3SJohn Fastabend 	sg_unmark_end(&rec->sg_aead_out[1]);
348d3b18ad3SJohn Fastabend 
3498d338c76SHerbert Xu 	rec->sk = sk;
3508d338c76SHerbert Xu 
351d3b18ad3SJohn Fastabend 	return rec;
352d3b18ad3SJohn Fastabend }
353d3b18ad3SJohn Fastabend 
354d3b18ad3SJohn Fastabend static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
355d3b18ad3SJohn Fastabend {
356d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &rec->msg_encrypted);
357d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &rec->msg_plaintext);
358d3b18ad3SJohn Fastabend 	kfree(rec);
359d3b18ad3SJohn Fastabend }
360d3b18ad3SJohn Fastabend 
361c774973eSVakul Garg static void tls_free_open_rec(struct sock *sk)
3623c4d7559SDave Watson {
3633c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
364f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
365a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
3663c4d7559SDave Watson 
367d3b18ad3SJohn Fastabend 	if (rec) {
368d3b18ad3SJohn Fastabend 		tls_free_rec(sk, rec);
369d3b18ad3SJohn Fastabend 		ctx->open_rec = NULL;
370d3b18ad3SJohn Fastabend 	}
3713c4d7559SDave Watson }
3723c4d7559SDave Watson 
373a42055e8SVakul Garg int tls_tx_records(struct sock *sk, int flags)
374a42055e8SVakul Garg {
375a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
376a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
377a42055e8SVakul Garg 	struct tls_rec *rec, *tmp;
378d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en;
379a42055e8SVakul Garg 	int tx_flags, rc = 0;
380a42055e8SVakul Garg 
381a42055e8SVakul Garg 	if (tls_is_partially_sent_record(tls_ctx)) {
3829932a29aSVakul Garg 		rec = list_first_entry(&ctx->tx_list,
383a42055e8SVakul Garg 				       struct tls_rec, list);
384a42055e8SVakul Garg 
385a42055e8SVakul Garg 		if (flags == -1)
386a42055e8SVakul Garg 			tx_flags = rec->tx_flags;
387a42055e8SVakul Garg 		else
388a42055e8SVakul Garg 			tx_flags = flags;
389a42055e8SVakul Garg 
390a42055e8SVakul Garg 		rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
391a42055e8SVakul Garg 		if (rc)
392a42055e8SVakul Garg 			goto tx_err;
393a42055e8SVakul Garg 
394a42055e8SVakul Garg 		/* Full record has been transmitted.
3959932a29aSVakul Garg 		 * Remove the head of tx_list
396a42055e8SVakul Garg 		 */
397a42055e8SVakul Garg 		list_del(&rec->list);
398d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
399a42055e8SVakul Garg 		kfree(rec);
400a42055e8SVakul Garg 	}
401a42055e8SVakul Garg 
4029932a29aSVakul Garg 	/* Tx all ready records */
4039932a29aSVakul Garg 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
4049932a29aSVakul Garg 		if (READ_ONCE(rec->tx_ready)) {
405a42055e8SVakul Garg 			if (flags == -1)
406a42055e8SVakul Garg 				tx_flags = rec->tx_flags;
407a42055e8SVakul Garg 			else
408a42055e8SVakul Garg 				tx_flags = flags;
409a42055e8SVakul Garg 
410d829e9c4SDaniel Borkmann 			msg_en = &rec->msg_encrypted;
411a42055e8SVakul Garg 			rc = tls_push_sg(sk, tls_ctx,
412d829e9c4SDaniel Borkmann 					 &msg_en->sg.data[msg_en->sg.curr],
413a42055e8SVakul Garg 					 0, tx_flags);
414a42055e8SVakul Garg 			if (rc)
415a42055e8SVakul Garg 				goto tx_err;
416a42055e8SVakul Garg 
417a42055e8SVakul Garg 			list_del(&rec->list);
418d829e9c4SDaniel Borkmann 			sk_msg_free(sk, &rec->msg_plaintext);
419a42055e8SVakul Garg 			kfree(rec);
420a42055e8SVakul Garg 		} else {
421a42055e8SVakul Garg 			break;
422a42055e8SVakul Garg 		}
423a42055e8SVakul Garg 	}
424a42055e8SVakul Garg 
425a42055e8SVakul Garg tx_err:
426a42055e8SVakul Garg 	if (rc < 0 && rc != -EAGAIN)
427da353facSDaniel Jordan 		tls_err_abort(sk, -EBADMSG);
428a42055e8SVakul Garg 
429a42055e8SVakul Garg 	return rc;
430a42055e8SVakul Garg }
431a42055e8SVakul Garg 
4328580e55aSHerbert Xu static void tls_encrypt_done(void *data, int err)
433a42055e8SVakul Garg {
4348d338c76SHerbert Xu 	struct tls_sw_context_tx *ctx;
4358d338c76SHerbert Xu 	struct tls_context *tls_ctx;
4368d338c76SHerbert Xu 	struct tls_prot_info *prot;
437d3777ceaSHerbert Xu 	struct tls_rec *rec = data;
438d829e9c4SDaniel Borkmann 	struct scatterlist *sge;
439d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en;
440a42055e8SVakul Garg 	bool ready = false;
4418d338c76SHerbert Xu 	struct sock *sk;
442a42055e8SVakul Garg 	int pending;
443a42055e8SVakul Garg 
444d829e9c4SDaniel Borkmann 	msg_en = &rec->msg_encrypted;
445a42055e8SVakul Garg 
4468d338c76SHerbert Xu 	sk = rec->sk;
4478d338c76SHerbert Xu 	tls_ctx = tls_get_ctx(sk);
4488d338c76SHerbert Xu 	prot = &tls_ctx->prot_info;
4498d338c76SHerbert Xu 	ctx = tls_sw_ctx_tx(tls_ctx);
4508d338c76SHerbert Xu 
451d829e9c4SDaniel Borkmann 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
4524509de14SVakul Garg 	sge->offset -= prot->prepend_size;
4534509de14SVakul Garg 	sge->length += prot->prepend_size;
454a42055e8SVakul Garg 
45580ece6a0SVakul Garg 	/* Check if error is previously set on socket */
456a42055e8SVakul Garg 	if (err || sk->sk_err) {
457a42055e8SVakul Garg 		rec = NULL;
458a42055e8SVakul Garg 
459a42055e8SVakul Garg 		/* If err is already set on socket, return the same code */
460a42055e8SVakul Garg 		if (sk->sk_err) {
4611d9d6fd2SDaniel Jordan 			ctx->async_wait.err = -sk->sk_err;
462a42055e8SVakul Garg 		} else {
463a42055e8SVakul Garg 			ctx->async_wait.err = err;
464a42055e8SVakul Garg 			tls_err_abort(sk, err);
465a42055e8SVakul Garg 		}
466a42055e8SVakul Garg 	}
467a42055e8SVakul Garg 
4689932a29aSVakul Garg 	if (rec) {
4699932a29aSVakul Garg 		struct tls_rec *first_rec;
4709932a29aSVakul Garg 
4719932a29aSVakul Garg 		/* Mark the record as ready for transmission */
4729932a29aSVakul Garg 		smp_store_mb(rec->tx_ready, true);
4739932a29aSVakul Garg 
4749932a29aSVakul Garg 		/* If received record is at head of tx_list, schedule tx */
4759932a29aSVakul Garg 		first_rec = list_first_entry(&ctx->tx_list,
4769932a29aSVakul Garg 					     struct tls_rec, list);
4779932a29aSVakul Garg 		if (rec == first_rec)
4789932a29aSVakul Garg 			ready = true;
4799932a29aSVakul Garg 	}
480a42055e8SVakul Garg 
4810cada332SVinay Kumar Yadav 	spin_lock_bh(&ctx->encrypt_compl_lock);
482a42055e8SVakul Garg 	pending = atomic_dec_return(&ctx->encrypt_pending);
483a42055e8SVakul Garg 
4840cada332SVinay Kumar Yadav 	if (!pending && ctx->async_notify)
485a42055e8SVakul Garg 		complete(&ctx->async_wait.completion);
4860cada332SVinay Kumar Yadav 	spin_unlock_bh(&ctx->encrypt_compl_lock);
487a42055e8SVakul Garg 
488a42055e8SVakul Garg 	if (!ready)
489a42055e8SVakul Garg 		return;
490a42055e8SVakul Garg 
491a42055e8SVakul Garg 	/* Schedule the transmission */
492a42055e8SVakul Garg 	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
493d829e9c4SDaniel Borkmann 		schedule_delayed_work(&ctx->tx_work.work, 1);
494a42055e8SVakul Garg }
495a42055e8SVakul Garg 
496a42055e8SVakul Garg static int tls_do_encryption(struct sock *sk,
497a42055e8SVakul Garg 			     struct tls_context *tls_ctx,
498a447da7dSDaniel Borkmann 			     struct tls_sw_context_tx *ctx,
499a447da7dSDaniel Borkmann 			     struct aead_request *aead_req,
500d829e9c4SDaniel Borkmann 			     size_t data_len, u32 start)
5013c4d7559SDave Watson {
5024509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
503a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
504d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
505d829e9c4SDaniel Borkmann 	struct scatterlist *sge = sk_msg_elem(msg_en, start);
506f295b3aeSVakul Garg 	int rc, iv_offset = 0;
5073c4d7559SDave Watson 
508f295b3aeSVakul Garg 	/* For CCM based ciphers, first byte of IV is a constant */
509128cfb88STianjia Zhang 	switch (prot->cipher_type) {
510128cfb88STianjia Zhang 	case TLS_CIPHER_AES_CCM_128:
511f295b3aeSVakul Garg 		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
512f295b3aeSVakul Garg 		iv_offset = 1;
513128cfb88STianjia Zhang 		break;
514128cfb88STianjia Zhang 	case TLS_CIPHER_SM4_CCM:
515128cfb88STianjia Zhang 		rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
516128cfb88STianjia Zhang 		iv_offset = 1;
517128cfb88STianjia Zhang 		break;
518f295b3aeSVakul Garg 	}
519f295b3aeSVakul Garg 
520f295b3aeSVakul Garg 	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
521f295b3aeSVakul Garg 	       prot->iv_size + prot->salt_size);
522f295b3aeSVakul Garg 
52358790314SJakub Kicinski 	tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
52458790314SJakub Kicinski 			    tls_ctx->tx.rec_seq);
52532eb67b9SDave Watson 
5264509de14SVakul Garg 	sge->offset += prot->prepend_size;
5274509de14SVakul Garg 	sge->length -= prot->prepend_size;
5283c4d7559SDave Watson 
529d829e9c4SDaniel Borkmann 	msg_en->sg.curr = start;
5304e6d4720SVakul Garg 
5313c4d7559SDave Watson 	aead_request_set_tfm(aead_req, ctx->aead_send);
5324509de14SVakul Garg 	aead_request_set_ad(aead_req, prot->aad_size);
533d829e9c4SDaniel Borkmann 	aead_request_set_crypt(aead_req, rec->sg_aead_in,
534d829e9c4SDaniel Borkmann 			       rec->sg_aead_out,
53532eb67b9SDave Watson 			       data_len, rec->iv_data);
536a54667f6SVakul Garg 
537a54667f6SVakul Garg 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
538d3777ceaSHerbert Xu 				  tls_encrypt_done, rec);
539a54667f6SVakul Garg 
5409932a29aSVakul Garg 	/* Add the record in tx_list */
5419932a29aSVakul Garg 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
542a42055e8SVakul Garg 	atomic_inc(&ctx->encrypt_pending);
5433c4d7559SDave Watson 
544a42055e8SVakul Garg 	rc = crypto_aead_encrypt(aead_req);
545a42055e8SVakul Garg 	if (!rc || rc != -EINPROGRESS) {
546a42055e8SVakul Garg 		atomic_dec(&ctx->encrypt_pending);
5474509de14SVakul Garg 		sge->offset -= prot->prepend_size;
5484509de14SVakul Garg 		sge->length += prot->prepend_size;
549a42055e8SVakul Garg 	}
5503c4d7559SDave Watson 
5519932a29aSVakul Garg 	if (!rc) {
5529932a29aSVakul Garg 		WRITE_ONCE(rec->tx_ready, true);
5539932a29aSVakul Garg 	} else if (rc != -EINPROGRESS) {
5549932a29aSVakul Garg 		list_del(&rec->list);
555a42055e8SVakul Garg 		return rc;
5569932a29aSVakul Garg 	}
557a42055e8SVakul Garg 
558a42055e8SVakul Garg 	/* Unhook the record from context if encryption is not failure */
559a42055e8SVakul Garg 	ctx->open_rec = NULL;
560fb0f886fSJakub Kicinski 	tls_advance_record_sn(sk, prot, &tls_ctx->tx);
5613c4d7559SDave Watson 	return rc;
5623c4d7559SDave Watson }
5633c4d7559SDave Watson 
564d3b18ad3SJohn Fastabend static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
565d3b18ad3SJohn Fastabend 				 struct tls_rec **to, struct sk_msg *msg_opl,
566d3b18ad3SJohn Fastabend 				 struct sk_msg *msg_oen, u32 split_point,
567d3b18ad3SJohn Fastabend 				 u32 tx_overhead_size, u32 *orig_end)
568d3b18ad3SJohn Fastabend {
569d3b18ad3SJohn Fastabend 	u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
570d3b18ad3SJohn Fastabend 	struct scatterlist *sge, *osge, *nsge;
571d3b18ad3SJohn Fastabend 	u32 orig_size = msg_opl->sg.size;
572d3b18ad3SJohn Fastabend 	struct scatterlist tmp = { };
573d3b18ad3SJohn Fastabend 	struct sk_msg *msg_npl;
574d3b18ad3SJohn Fastabend 	struct tls_rec *new;
575d3b18ad3SJohn Fastabend 	int ret;
576d3b18ad3SJohn Fastabend 
577d3b18ad3SJohn Fastabend 	new = tls_get_rec(sk);
578d3b18ad3SJohn Fastabend 	if (!new)
579d3b18ad3SJohn Fastabend 		return -ENOMEM;
580d3b18ad3SJohn Fastabend 	ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
581d3b18ad3SJohn Fastabend 			   tx_overhead_size, 0);
582d3b18ad3SJohn Fastabend 	if (ret < 0) {
583d3b18ad3SJohn Fastabend 		tls_free_rec(sk, new);
584d3b18ad3SJohn Fastabend 		return ret;
585d3b18ad3SJohn Fastabend 	}
586d3b18ad3SJohn Fastabend 
587d3b18ad3SJohn Fastabend 	*orig_end = msg_opl->sg.end;
588d3b18ad3SJohn Fastabend 	i = msg_opl->sg.start;
589d3b18ad3SJohn Fastabend 	sge = sk_msg_elem(msg_opl, i);
590d3b18ad3SJohn Fastabend 	while (apply && sge->length) {
591d3b18ad3SJohn Fastabend 		if (sge->length > apply) {
592d3b18ad3SJohn Fastabend 			u32 len = sge->length - apply;
593d3b18ad3SJohn Fastabend 
594d3b18ad3SJohn Fastabend 			get_page(sg_page(sge));
595d3b18ad3SJohn Fastabend 			sg_set_page(&tmp, sg_page(sge), len,
596d3b18ad3SJohn Fastabend 				    sge->offset + apply);
597d3b18ad3SJohn Fastabend 			sge->length = apply;
598d3b18ad3SJohn Fastabend 			bytes += apply;
599d3b18ad3SJohn Fastabend 			apply = 0;
600d3b18ad3SJohn Fastabend 		} else {
601d3b18ad3SJohn Fastabend 			apply -= sge->length;
602d3b18ad3SJohn Fastabend 			bytes += sge->length;
603d3b18ad3SJohn Fastabend 		}
604d3b18ad3SJohn Fastabend 
605d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(i);
606d3b18ad3SJohn Fastabend 		if (i == msg_opl->sg.end)
607d3b18ad3SJohn Fastabend 			break;
608d3b18ad3SJohn Fastabend 		sge = sk_msg_elem(msg_opl, i);
609d3b18ad3SJohn Fastabend 	}
610d3b18ad3SJohn Fastabend 
611d3b18ad3SJohn Fastabend 	msg_opl->sg.end = i;
612d3b18ad3SJohn Fastabend 	msg_opl->sg.curr = i;
613d3b18ad3SJohn Fastabend 	msg_opl->sg.copybreak = 0;
614d3b18ad3SJohn Fastabend 	msg_opl->apply_bytes = 0;
615d3b18ad3SJohn Fastabend 	msg_opl->sg.size = bytes;
616d3b18ad3SJohn Fastabend 
617d3b18ad3SJohn Fastabend 	msg_npl = &new->msg_plaintext;
618d3b18ad3SJohn Fastabend 	msg_npl->apply_bytes = apply;
619d3b18ad3SJohn Fastabend 	msg_npl->sg.size = orig_size - bytes;
620d3b18ad3SJohn Fastabend 
621d3b18ad3SJohn Fastabend 	j = msg_npl->sg.start;
622d3b18ad3SJohn Fastabend 	nsge = sk_msg_elem(msg_npl, j);
623d3b18ad3SJohn Fastabend 	if (tmp.length) {
624d3b18ad3SJohn Fastabend 		memcpy(nsge, &tmp, sizeof(*nsge));
625d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(j);
626d3b18ad3SJohn Fastabend 		nsge = sk_msg_elem(msg_npl, j);
627d3b18ad3SJohn Fastabend 	}
628d3b18ad3SJohn Fastabend 
629d3b18ad3SJohn Fastabend 	osge = sk_msg_elem(msg_opl, i);
630d3b18ad3SJohn Fastabend 	while (osge->length) {
631d3b18ad3SJohn Fastabend 		memcpy(nsge, osge, sizeof(*nsge));
632d3b18ad3SJohn Fastabend 		sg_unmark_end(nsge);
633d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(i);
634d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(j);
635d3b18ad3SJohn Fastabend 		if (i == *orig_end)
636d3b18ad3SJohn Fastabend 			break;
637d3b18ad3SJohn Fastabend 		osge = sk_msg_elem(msg_opl, i);
638d3b18ad3SJohn Fastabend 		nsge = sk_msg_elem(msg_npl, j);
639d3b18ad3SJohn Fastabend 	}
640d3b18ad3SJohn Fastabend 
641d3b18ad3SJohn Fastabend 	msg_npl->sg.end = j;
642d3b18ad3SJohn Fastabend 	msg_npl->sg.curr = j;
643d3b18ad3SJohn Fastabend 	msg_npl->sg.copybreak = 0;
644d3b18ad3SJohn Fastabend 
645d3b18ad3SJohn Fastabend 	*to = new;
646d3b18ad3SJohn Fastabend 	return 0;
647d3b18ad3SJohn Fastabend }
648d3b18ad3SJohn Fastabend 
649d3b18ad3SJohn Fastabend static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
650d3b18ad3SJohn Fastabend 				  struct tls_rec *from, u32 orig_end)
651d3b18ad3SJohn Fastabend {
652d3b18ad3SJohn Fastabend 	struct sk_msg *msg_npl = &from->msg_plaintext;
653d3b18ad3SJohn Fastabend 	struct sk_msg *msg_opl = &to->msg_plaintext;
654d3b18ad3SJohn Fastabend 	struct scatterlist *osge, *nsge;
655d3b18ad3SJohn Fastabend 	u32 i, j;
656d3b18ad3SJohn Fastabend 
657d3b18ad3SJohn Fastabend 	i = msg_opl->sg.end;
658d3b18ad3SJohn Fastabend 	sk_msg_iter_var_prev(i);
659d3b18ad3SJohn Fastabend 	j = msg_npl->sg.start;
660d3b18ad3SJohn Fastabend 
661d3b18ad3SJohn Fastabend 	osge = sk_msg_elem(msg_opl, i);
662d3b18ad3SJohn Fastabend 	nsge = sk_msg_elem(msg_npl, j);
663d3b18ad3SJohn Fastabend 
664d3b18ad3SJohn Fastabend 	if (sg_page(osge) == sg_page(nsge) &&
665d3b18ad3SJohn Fastabend 	    osge->offset + osge->length == nsge->offset) {
666d3b18ad3SJohn Fastabend 		osge->length += nsge->length;
667d3b18ad3SJohn Fastabend 		put_page(sg_page(nsge));
668d3b18ad3SJohn Fastabend 	}
669d3b18ad3SJohn Fastabend 
670d3b18ad3SJohn Fastabend 	msg_opl->sg.end = orig_end;
671d3b18ad3SJohn Fastabend 	msg_opl->sg.curr = orig_end;
672d3b18ad3SJohn Fastabend 	msg_opl->sg.copybreak = 0;
673d3b18ad3SJohn Fastabend 	msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
674d3b18ad3SJohn Fastabend 	msg_opl->sg.size += msg_npl->sg.size;
675d3b18ad3SJohn Fastabend 
676d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &to->msg_encrypted);
677d3b18ad3SJohn Fastabend 	sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
678d3b18ad3SJohn Fastabend 
679d3b18ad3SJohn Fastabend 	kfree(from);
680d3b18ad3SJohn Fastabend }
681d3b18ad3SJohn Fastabend 
6823c4d7559SDave Watson static int tls_push_record(struct sock *sk, int flags,
6833c4d7559SDave Watson 			   unsigned char record_type)
6843c4d7559SDave Watson {
6853c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
6864509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
687f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
688d3b18ad3SJohn Fastabend 	struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
6893f649ab7SKees Cook 	u32 i, split_point, orig_end;
690d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl, *msg_en;
691a447da7dSDaniel Borkmann 	struct aead_request *req;
692d3b18ad3SJohn Fastabend 	bool split;
6933c4d7559SDave Watson 	int rc;
6943c4d7559SDave Watson 
695a42055e8SVakul Garg 	if (!rec)
696a42055e8SVakul Garg 		return 0;
697a447da7dSDaniel Borkmann 
698d829e9c4SDaniel Borkmann 	msg_pl = &rec->msg_plaintext;
699d829e9c4SDaniel Borkmann 	msg_en = &rec->msg_encrypted;
700d829e9c4SDaniel Borkmann 
701d3b18ad3SJohn Fastabend 	split_point = msg_pl->apply_bytes;
702d3b18ad3SJohn Fastabend 	split = split_point && split_point < msg_pl->sg.size;
703d468e477SJohn Fastabend 	if (unlikely((!split &&
704d468e477SJohn Fastabend 		      msg_pl->sg.size +
705d468e477SJohn Fastabend 		      prot->overhead_size > msg_en->sg.size) ||
706d468e477SJohn Fastabend 		     (split &&
707d468e477SJohn Fastabend 		      split_point +
708d468e477SJohn Fastabend 		      prot->overhead_size > msg_en->sg.size))) {
709d468e477SJohn Fastabend 		split = true;
710d468e477SJohn Fastabend 		split_point = msg_en->sg.size;
711d468e477SJohn Fastabend 	}
712d3b18ad3SJohn Fastabend 	if (split) {
713d3b18ad3SJohn Fastabend 		rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
7144509de14SVakul Garg 					   split_point, prot->overhead_size,
715d3b18ad3SJohn Fastabend 					   &orig_end);
716d3b18ad3SJohn Fastabend 		if (rc < 0)
717d3b18ad3SJohn Fastabend 			return rc;
718d468e477SJohn Fastabend 		/* This can happen if above tls_split_open_record allocates
719d468e477SJohn Fastabend 		 * a single large encryption buffer instead of two smaller
720d468e477SJohn Fastabend 		 * ones. In this case adjust pointers and continue without
721d468e477SJohn Fastabend 		 * split.
722d468e477SJohn Fastabend 		 */
723d468e477SJohn Fastabend 		if (!msg_pl->sg.size) {
724d468e477SJohn Fastabend 			tls_merge_open_record(sk, rec, tmp, orig_end);
725d468e477SJohn Fastabend 			msg_pl = &rec->msg_plaintext;
726d468e477SJohn Fastabend 			msg_en = &rec->msg_encrypted;
727d468e477SJohn Fastabend 			split = false;
728d468e477SJohn Fastabend 		}
729d3b18ad3SJohn Fastabend 		sk_msg_trim(sk, msg_en, msg_pl->sg.size +
7304509de14SVakul Garg 			    prot->overhead_size);
731d3b18ad3SJohn Fastabend 	}
732d3b18ad3SJohn Fastabend 
733a42055e8SVakul Garg 	rec->tx_flags = flags;
734a42055e8SVakul Garg 	req = &rec->aead_req;
7353c4d7559SDave Watson 
736d829e9c4SDaniel Borkmann 	i = msg_pl->sg.end;
737d829e9c4SDaniel Borkmann 	sk_msg_iter_var_prev(i);
738130b392cSDave Watson 
739130b392cSDave Watson 	rec->content_type = record_type;
7404509de14SVakul Garg 	if (prot->version == TLS_1_3_VERSION) {
741130b392cSDave Watson 		/* Add content type to end of message.  No padding added */
742130b392cSDave Watson 		sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
743130b392cSDave Watson 		sg_mark_end(&rec->sg_content_type);
744130b392cSDave Watson 		sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
745130b392cSDave Watson 			 &rec->sg_content_type);
746130b392cSDave Watson 	} else {
747d829e9c4SDaniel Borkmann 		sg_mark_end(sk_msg_elem(msg_pl, i));
748130b392cSDave Watson 	}
749a42055e8SVakul Garg 
7509aaaa568SJohn Fastabend 	if (msg_pl->sg.end < msg_pl->sg.start) {
7519aaaa568SJohn Fastabend 		sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
7529aaaa568SJohn Fastabend 			 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
7539aaaa568SJohn Fastabend 			 msg_pl->sg.data);
7549aaaa568SJohn Fastabend 	}
7559aaaa568SJohn Fastabend 
756d829e9c4SDaniel Borkmann 	i = msg_pl->sg.start;
7579e5ffed3SJakub Kicinski 	sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
758d829e9c4SDaniel Borkmann 
759d829e9c4SDaniel Borkmann 	i = msg_en->sg.end;
760d829e9c4SDaniel Borkmann 	sk_msg_iter_var_prev(i);
761d829e9c4SDaniel Borkmann 	sg_mark_end(sk_msg_elem(msg_en, i));
762d829e9c4SDaniel Borkmann 
763d829e9c4SDaniel Borkmann 	i = msg_en->sg.start;
764d829e9c4SDaniel Borkmann 	sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
765d829e9c4SDaniel Borkmann 
7664509de14SVakul Garg 	tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
7676942a284SVadim Fedorenko 		     tls_ctx->tx.rec_seq, record_type, prot);
7683c4d7559SDave Watson 
7693c4d7559SDave Watson 	tls_fill_prepend(tls_ctx,
770d829e9c4SDaniel Borkmann 			 page_address(sg_page(&msg_en->sg.data[i])) +
771130b392cSDave Watson 			 msg_en->sg.data[i].offset,
7724509de14SVakul Garg 			 msg_pl->sg.size + prot->tail_size,
7736942a284SVadim Fedorenko 			 record_type);
7743c4d7559SDave Watson 
775d829e9c4SDaniel Borkmann 	tls_ctx->pending_open_record_frags = false;
7763c4d7559SDave Watson 
777130b392cSDave Watson 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
7784509de14SVakul Garg 			       msg_pl->sg.size + prot->tail_size, i);
7793c4d7559SDave Watson 	if (rc < 0) {
780d3b18ad3SJohn Fastabend 		if (rc != -EINPROGRESS) {
781da353facSDaniel Jordan 			tls_err_abort(sk, -EBADMSG);
782d3b18ad3SJohn Fastabend 			if (split) {
783d3b18ad3SJohn Fastabend 				tls_ctx->pending_open_record_frags = true;
784d3b18ad3SJohn Fastabend 				tls_merge_open_record(sk, rec, tmp, orig_end);
785d3b18ad3SJohn Fastabend 			}
786d3b18ad3SJohn Fastabend 		}
7875b053e12SDave Watson 		ctx->async_capable = 1;
788a42055e8SVakul Garg 		return rc;
789d3b18ad3SJohn Fastabend 	} else if (split) {
790d3b18ad3SJohn Fastabend 		msg_pl = &tmp->msg_plaintext;
791d3b18ad3SJohn Fastabend 		msg_en = &tmp->msg_encrypted;
7924509de14SVakul Garg 		sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
793d3b18ad3SJohn Fastabend 		tls_ctx->pending_open_record_frags = true;
794d3b18ad3SJohn Fastabend 		ctx->open_rec = tmp;
7953c4d7559SDave Watson 	}
7963c4d7559SDave Watson 
797a42055e8SVakul Garg 	return tls_tx_records(sk, flags);
7983c4d7559SDave Watson }
7993c4d7559SDave Watson 
800d3b18ad3SJohn Fastabend static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
801d3b18ad3SJohn Fastabend 			       bool full_record, u8 record_type,
802a7bff11fSVadim Fedorenko 			       ssize_t *copied, int flags)
8033c4d7559SDave Watson {
8043c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
805f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
806d3b18ad3SJohn Fastabend 	struct sk_msg msg_redir = { };
807d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
808d3b18ad3SJohn Fastabend 	struct sock *sk_redir;
809a42055e8SVakul Garg 	struct tls_rec *rec;
810a351d608SPengcheng Yang 	bool enospc, policy, redir_ingress;
811d3b18ad3SJohn Fastabend 	int err = 0, send;
8127246d8edSJohn Fastabend 	u32 delta = 0;
813a42055e8SVakul Garg 
8140608c69cSJohn Fastabend 	policy = !(flags & MSG_SENDPAGE_NOPOLICY);
815d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
816d10523d0SJakub Kicinski 	if (!psock || !policy) {
817d10523d0SJakub Kicinski 		err = tls_push_record(sk, flags, record_type);
818635d9398SVadim Fedorenko 		if (err && sk->sk_err == EBADMSG) {
819d10523d0SJakub Kicinski 			*copied -= sk_msg_free(sk, msg);
820d10523d0SJakub Kicinski 			tls_free_open_rec(sk);
821635d9398SVadim Fedorenko 			err = -sk->sk_err;
822d10523d0SJakub Kicinski 		}
823095f5614SXiyu Yang 		if (psock)
824095f5614SXiyu Yang 			sk_psock_put(sk, psock);
825d10523d0SJakub Kicinski 		return err;
826d10523d0SJakub Kicinski 	}
827d3b18ad3SJohn Fastabend more_data:
828d3b18ad3SJohn Fastabend 	enospc = sk_msg_full(msg);
8297246d8edSJohn Fastabend 	if (psock->eval == __SK_NONE) {
8307246d8edSJohn Fastabend 		delta = msg->sg.size;
831d3b18ad3SJohn Fastabend 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
8327246d8edSJohn Fastabend 		delta -= msg->sg.size;
8337246d8edSJohn Fastabend 	}
834d3b18ad3SJohn Fastabend 	if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
835d3b18ad3SJohn Fastabend 	    !enospc && !full_record) {
836d3b18ad3SJohn Fastabend 		err = -ENOSPC;
837d3b18ad3SJohn Fastabend 		goto out_err;
838d3b18ad3SJohn Fastabend 	}
839d3b18ad3SJohn Fastabend 	msg->cork_bytes = 0;
840d3b18ad3SJohn Fastabend 	send = msg->sg.size;
841d3b18ad3SJohn Fastabend 	if (msg->apply_bytes && msg->apply_bytes < send)
842d3b18ad3SJohn Fastabend 		send = msg->apply_bytes;
843a42055e8SVakul Garg 
844d3b18ad3SJohn Fastabend 	switch (psock->eval) {
845d3b18ad3SJohn Fastabend 	case __SK_PASS:
846d3b18ad3SJohn Fastabend 		err = tls_push_record(sk, flags, record_type);
847635d9398SVadim Fedorenko 		if (err && sk->sk_err == EBADMSG) {
848d3b18ad3SJohn Fastabend 			*copied -= sk_msg_free(sk, msg);
849d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
850635d9398SVadim Fedorenko 			err = -sk->sk_err;
851d3b18ad3SJohn Fastabend 			goto out_err;
852d3b18ad3SJohn Fastabend 		}
853d3b18ad3SJohn Fastabend 		break;
854d3b18ad3SJohn Fastabend 	case __SK_REDIRECT:
855a351d608SPengcheng Yang 		redir_ingress = psock->redir_ingress;
856d3b18ad3SJohn Fastabend 		sk_redir = psock->sk_redir;
857d3b18ad3SJohn Fastabend 		memcpy(&msg_redir, msg, sizeof(*msg));
858d3b18ad3SJohn Fastabend 		if (msg->apply_bytes < send)
859d3b18ad3SJohn Fastabend 			msg->apply_bytes = 0;
860d3b18ad3SJohn Fastabend 		else
861d3b18ad3SJohn Fastabend 			msg->apply_bytes -= send;
862d3b18ad3SJohn Fastabend 		sk_msg_return_zero(sk, msg, send);
863d3b18ad3SJohn Fastabend 		msg->sg.size -= send;
864d3b18ad3SJohn Fastabend 		release_sock(sk);
865a351d608SPengcheng Yang 		err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
866a351d608SPengcheng Yang 					    &msg_redir, send, flags);
867d3b18ad3SJohn Fastabend 		lock_sock(sk);
868d3b18ad3SJohn Fastabend 		if (err < 0) {
869d3b18ad3SJohn Fastabend 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
870d3b18ad3SJohn Fastabend 			msg->sg.size = 0;
871d3b18ad3SJohn Fastabend 		}
872d3b18ad3SJohn Fastabend 		if (msg->sg.size == 0)
873d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
874d3b18ad3SJohn Fastabend 		break;
875d3b18ad3SJohn Fastabend 	case __SK_DROP:
876d3b18ad3SJohn Fastabend 	default:
877d3b18ad3SJohn Fastabend 		sk_msg_free_partial(sk, msg, send);
878d3b18ad3SJohn Fastabend 		if (msg->apply_bytes < send)
879d3b18ad3SJohn Fastabend 			msg->apply_bytes = 0;
880d3b18ad3SJohn Fastabend 		else
881d3b18ad3SJohn Fastabend 			msg->apply_bytes -= send;
882d3b18ad3SJohn Fastabend 		if (msg->sg.size == 0)
883d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
8847246d8edSJohn Fastabend 		*copied -= (send + delta);
885d3b18ad3SJohn Fastabend 		err = -EACCES;
886d3b18ad3SJohn Fastabend 	}
887a42055e8SVakul Garg 
888d3b18ad3SJohn Fastabend 	if (likely(!err)) {
889d3b18ad3SJohn Fastabend 		bool reset_eval = !ctx->open_rec;
890d3b18ad3SJohn Fastabend 
891d3b18ad3SJohn Fastabend 		rec = ctx->open_rec;
892d3b18ad3SJohn Fastabend 		if (rec) {
893d3b18ad3SJohn Fastabend 			msg = &rec->msg_plaintext;
894d3b18ad3SJohn Fastabend 			if (!msg->apply_bytes)
895d3b18ad3SJohn Fastabend 				reset_eval = true;
896d3b18ad3SJohn Fastabend 		}
897d3b18ad3SJohn Fastabend 		if (reset_eval) {
898d3b18ad3SJohn Fastabend 			psock->eval = __SK_NONE;
899d3b18ad3SJohn Fastabend 			if (psock->sk_redir) {
900d3b18ad3SJohn Fastabend 				sock_put(psock->sk_redir);
901d3b18ad3SJohn Fastabend 				psock->sk_redir = NULL;
902d3b18ad3SJohn Fastabend 			}
903d3b18ad3SJohn Fastabend 		}
904d3b18ad3SJohn Fastabend 		if (rec)
905d3b18ad3SJohn Fastabend 			goto more_data;
906d3b18ad3SJohn Fastabend 	}
907d3b18ad3SJohn Fastabend  out_err:
908d3b18ad3SJohn Fastabend 	sk_psock_put(sk, psock);
909d3b18ad3SJohn Fastabend 	return err;
910d3b18ad3SJohn Fastabend }
911d3b18ad3SJohn Fastabend 
912d3b18ad3SJohn Fastabend static int tls_sw_push_pending_record(struct sock *sk, int flags)
913d3b18ad3SJohn Fastabend {
914d3b18ad3SJohn Fastabend 	struct tls_context *tls_ctx = tls_get_ctx(sk);
915d3b18ad3SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
916d3b18ad3SJohn Fastabend 	struct tls_rec *rec = ctx->open_rec;
917d3b18ad3SJohn Fastabend 	struct sk_msg *msg_pl;
918d3b18ad3SJohn Fastabend 	size_t copied;
919d3b18ad3SJohn Fastabend 
920a42055e8SVakul Garg 	if (!rec)
921d3b18ad3SJohn Fastabend 		return 0;
922a42055e8SVakul Garg 
923d829e9c4SDaniel Borkmann 	msg_pl = &rec->msg_plaintext;
924d3b18ad3SJohn Fastabend 	copied = msg_pl->sg.size;
925d3b18ad3SJohn Fastabend 	if (!copied)
926d3b18ad3SJohn Fastabend 		return 0;
927a42055e8SVakul Garg 
928d3b18ad3SJohn Fastabend 	return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
929d3b18ad3SJohn Fastabend 				   &copied, flags);
930a42055e8SVakul Garg }
931a42055e8SVakul Garg 
932a42055e8SVakul Garg int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
933a42055e8SVakul Garg {
9343c4d7559SDave Watson 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
935a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
9364509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
937a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
9385b053e12SDave Watson 	bool async_capable = ctx->async_capable;
939a42055e8SVakul Garg 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
94000e23707SDavid Howells 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
9413c4d7559SDave Watson 	bool eor = !(msg->msg_flags & MSG_MORE);
942a7bff11fSVadim Fedorenko 	size_t try_to_copy;
943a7bff11fSVadim Fedorenko 	ssize_t copied = 0;
944d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl, *msg_en;
945a42055e8SVakul Garg 	struct tls_rec *rec;
946a42055e8SVakul Garg 	int required_size;
947a42055e8SVakul Garg 	int num_async = 0;
9483c4d7559SDave Watson 	bool full_record;
949a42055e8SVakul Garg 	int record_room;
950a42055e8SVakul Garg 	int num_zc = 0;
9513c4d7559SDave Watson 	int orig_size;
9524128c0cfSVakul Garg 	int ret = 0;
9530cada332SVinay Kumar Yadav 	int pending;
9543c4d7559SDave Watson 
9551c3b63f1SRouven Czerwinski 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
9561c3b63f1SRouven Czerwinski 			       MSG_CMSG_COMPAT))
9574a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
9583c4d7559SDave Watson 
959f3221361SJakub Kicinski 	ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
960f3221361SJakub Kicinski 	if (ret)
961f3221361SJakub Kicinski 		return ret;
9623c4d7559SDave Watson 	lock_sock(sk);
9633c4d7559SDave Watson 
9643c4d7559SDave Watson 	if (unlikely(msg->msg_controllen)) {
96558790314SJakub Kicinski 		ret = tls_process_cmsg(sk, msg, &record_type);
966a42055e8SVakul Garg 		if (ret) {
967a42055e8SVakul Garg 			if (ret == -EINPROGRESS)
968a42055e8SVakul Garg 				num_async++;
969a42055e8SVakul Garg 			else if (ret != -EAGAIN)
9703c4d7559SDave Watson 				goto send_end;
9713c4d7559SDave Watson 		}
972a42055e8SVakul Garg 	}
9733c4d7559SDave Watson 
9743c4d7559SDave Watson 	while (msg_data_left(msg)) {
9753c4d7559SDave Watson 		if (sk->sk_err) {
97630be8f8dSr.hering@avm.de 			ret = -sk->sk_err;
9773c4d7559SDave Watson 			goto send_end;
9783c4d7559SDave Watson 		}
9793c4d7559SDave Watson 
980d3b18ad3SJohn Fastabend 		if (ctx->open_rec)
981d3b18ad3SJohn Fastabend 			rec = ctx->open_rec;
982d3b18ad3SJohn Fastabend 		else
983d3b18ad3SJohn Fastabend 			rec = ctx->open_rec = tls_get_rec(sk);
984a42055e8SVakul Garg 		if (!rec) {
985a42055e8SVakul Garg 			ret = -ENOMEM;
986a42055e8SVakul Garg 			goto send_end;
987a42055e8SVakul Garg 		}
988a42055e8SVakul Garg 
989d829e9c4SDaniel Borkmann 		msg_pl = &rec->msg_plaintext;
990d829e9c4SDaniel Borkmann 		msg_en = &rec->msg_encrypted;
991d829e9c4SDaniel Borkmann 
992d829e9c4SDaniel Borkmann 		orig_size = msg_pl->sg.size;
9933c4d7559SDave Watson 		full_record = false;
9943c4d7559SDave Watson 		try_to_copy = msg_data_left(msg);
995d829e9c4SDaniel Borkmann 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
9963c4d7559SDave Watson 		if (try_to_copy >= record_room) {
9973c4d7559SDave Watson 			try_to_copy = record_room;
9983c4d7559SDave Watson 			full_record = true;
9993c4d7559SDave Watson 		}
10003c4d7559SDave Watson 
1001d829e9c4SDaniel Borkmann 		required_size = msg_pl->sg.size + try_to_copy +
10024509de14SVakul Garg 				prot->overhead_size;
10033c4d7559SDave Watson 
10043c4d7559SDave Watson 		if (!sk_stream_memory_free(sk))
10053c4d7559SDave Watson 			goto wait_for_sndbuf;
1006a42055e8SVakul Garg 
10073c4d7559SDave Watson alloc_encrypted:
1008d829e9c4SDaniel Borkmann 		ret = tls_alloc_encrypted_msg(sk, required_size);
10093c4d7559SDave Watson 		if (ret) {
10103c4d7559SDave Watson 			if (ret != -ENOSPC)
10113c4d7559SDave Watson 				goto wait_for_memory;
10123c4d7559SDave Watson 
10133c4d7559SDave Watson 			/* Adjust try_to_copy according to the amount that was
10143c4d7559SDave Watson 			 * actually allocated. The difference is due
10153c4d7559SDave Watson 			 * to max sg elements limit
10163c4d7559SDave Watson 			 */
1017d829e9c4SDaniel Borkmann 			try_to_copy -= required_size - msg_en->sg.size;
10183c4d7559SDave Watson 			full_record = true;
10193c4d7559SDave Watson 		}
1020a42055e8SVakul Garg 
1021a42055e8SVakul Garg 		if (!is_kvec && (full_record || eor) && !async_capable) {
1022d3b18ad3SJohn Fastabend 			u32 first = msg_pl->sg.end;
1023d3b18ad3SJohn Fastabend 
1024d829e9c4SDaniel Borkmann 			ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1025d829e9c4SDaniel Borkmann 							msg_pl, try_to_copy);
10263c4d7559SDave Watson 			if (ret)
10273c4d7559SDave Watson 				goto fallback_to_reg_send;
10283c4d7559SDave Watson 
1029a42055e8SVakul Garg 			num_zc++;
10303c4d7559SDave Watson 			copied += try_to_copy;
1031d3b18ad3SJohn Fastabend 
1032d3b18ad3SJohn Fastabend 			sk_msg_sg_copy_set(msg_pl, first);
1033d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1034d3b18ad3SJohn Fastabend 						  record_type, &copied,
1035d3b18ad3SJohn Fastabend 						  msg->msg_flags);
1036a42055e8SVakul Garg 			if (ret) {
1037a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1038a42055e8SVakul Garg 					num_async++;
1039d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1040d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1041c329ef96SJakub Kicinski 				else if (ctx->open_rec && ret == -ENOSPC)
1042d3b18ad3SJohn Fastabend 					goto rollback_iter;
1043a42055e8SVakul Garg 				else if (ret != -EAGAIN)
10443c4d7559SDave Watson 					goto send_end;
1045a42055e8SVakul Garg 			}
10465a3611efSDoron Roberts-Kedes 			continue;
1047d3b18ad3SJohn Fastabend rollback_iter:
1048d3b18ad3SJohn Fastabend 			copied -= try_to_copy;
1049d3b18ad3SJohn Fastabend 			sk_msg_sg_copy_clear(msg_pl, first);
1050d3b18ad3SJohn Fastabend 			iov_iter_revert(&msg->msg_iter,
1051d3b18ad3SJohn Fastabend 					msg_pl->sg.size - orig_size);
10523c4d7559SDave Watson fallback_to_reg_send:
1053d829e9c4SDaniel Borkmann 			sk_msg_trim(sk, msg_pl, orig_size);
10543c4d7559SDave Watson 		}
10553c4d7559SDave Watson 
1056d829e9c4SDaniel Borkmann 		required_size = msg_pl->sg.size + try_to_copy;
10574e6d4720SVakul Garg 
1058d829e9c4SDaniel Borkmann 		ret = tls_clone_plaintext_msg(sk, required_size);
10593c4d7559SDave Watson 		if (ret) {
10603c4d7559SDave Watson 			if (ret != -ENOSPC)
10614e6d4720SVakul Garg 				goto send_end;
10623c4d7559SDave Watson 
10633c4d7559SDave Watson 			/* Adjust try_to_copy according to the amount that was
10643c4d7559SDave Watson 			 * actually allocated. The difference is due
10653c4d7559SDave Watson 			 * to max sg elements limit
10663c4d7559SDave Watson 			 */
1067d829e9c4SDaniel Borkmann 			try_to_copy -= required_size - msg_pl->sg.size;
10683c4d7559SDave Watson 			full_record = true;
10694509de14SVakul Garg 			sk_msg_trim(sk, msg_en,
10704509de14SVakul Garg 				    msg_pl->sg.size + prot->overhead_size);
10713c4d7559SDave Watson 		}
10723c4d7559SDave Watson 
107365a10e28SVakul Garg 		if (try_to_copy) {
107465a10e28SVakul Garg 			ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
107565a10e28SVakul Garg 						       msg_pl, try_to_copy);
1076d829e9c4SDaniel Borkmann 			if (ret < 0)
10773c4d7559SDave Watson 				goto trim_sgl;
107865a10e28SVakul Garg 		}
10793c4d7559SDave Watson 
1080d829e9c4SDaniel Borkmann 		/* Open records defined only if successfully copied, otherwise
1081d829e9c4SDaniel Borkmann 		 * we would trim the sg but not reset the open record frags.
1082d829e9c4SDaniel Borkmann 		 */
1083d829e9c4SDaniel Borkmann 		tls_ctx->pending_open_record_frags = true;
10843c4d7559SDave Watson 		copied += try_to_copy;
10853c4d7559SDave Watson 		if (full_record || eor) {
1086d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1087d3b18ad3SJohn Fastabend 						  record_type, &copied,
1088d3b18ad3SJohn Fastabend 						  msg->msg_flags);
10893c4d7559SDave Watson 			if (ret) {
1090a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1091a42055e8SVakul Garg 					num_async++;
1092d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1093d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1094d3b18ad3SJohn Fastabend 				else if (ret != -EAGAIN) {
1095d3b18ad3SJohn Fastabend 					if (ret == -ENOSPC)
1096d3b18ad3SJohn Fastabend 						ret = 0;
10973c4d7559SDave Watson 					goto send_end;
10983c4d7559SDave Watson 				}
10993c4d7559SDave Watson 			}
1100d3b18ad3SJohn Fastabend 		}
11013c4d7559SDave Watson 
11023c4d7559SDave Watson 		continue;
11033c4d7559SDave Watson 
11043c4d7559SDave Watson wait_for_sndbuf:
11053c4d7559SDave Watson 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
11063c4d7559SDave Watson wait_for_memory:
11073c4d7559SDave Watson 		ret = sk_stream_wait_memory(sk, &timeo);
11083c4d7559SDave Watson 		if (ret) {
11093c4d7559SDave Watson trim_sgl:
1110c329ef96SJakub Kicinski 			if (ctx->open_rec)
1111d829e9c4SDaniel Borkmann 				tls_trim_both_msgs(sk, orig_size);
11123c4d7559SDave Watson 			goto send_end;
11133c4d7559SDave Watson 		}
11143c4d7559SDave Watson 
1115c329ef96SJakub Kicinski 		if (ctx->open_rec && msg_en->sg.size < required_size)
11163c4d7559SDave Watson 			goto alloc_encrypted;
11173c4d7559SDave Watson 	}
11183c4d7559SDave Watson 
1119a42055e8SVakul Garg 	if (!num_async) {
1120a42055e8SVakul Garg 		goto send_end;
1121a42055e8SVakul Garg 	} else if (num_zc) {
1122a42055e8SVakul Garg 		/* Wait for pending encryptions to get completed */
11230cada332SVinay Kumar Yadav 		spin_lock_bh(&ctx->encrypt_compl_lock);
11240cada332SVinay Kumar Yadav 		ctx->async_notify = true;
1125a42055e8SVakul Garg 
11260cada332SVinay Kumar Yadav 		pending = atomic_read(&ctx->encrypt_pending);
11270cada332SVinay Kumar Yadav 		spin_unlock_bh(&ctx->encrypt_compl_lock);
11280cada332SVinay Kumar Yadav 		if (pending)
1129a42055e8SVakul Garg 			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1130a42055e8SVakul Garg 		else
1131a42055e8SVakul Garg 			reinit_completion(&ctx->async_wait.completion);
1132a42055e8SVakul Garg 
11330cada332SVinay Kumar Yadav 		/* There can be no concurrent accesses, since we have no
11340cada332SVinay Kumar Yadav 		 * pending encrypt operations
11350cada332SVinay Kumar Yadav 		 */
1136a42055e8SVakul Garg 		WRITE_ONCE(ctx->async_notify, false);
1137a42055e8SVakul Garg 
1138a42055e8SVakul Garg 		if (ctx->async_wait.err) {
1139a42055e8SVakul Garg 			ret = ctx->async_wait.err;
1140a42055e8SVakul Garg 			copied = 0;
1141a42055e8SVakul Garg 		}
1142a42055e8SVakul Garg 	}
1143a42055e8SVakul Garg 
1144a42055e8SVakul Garg 	/* Transmit if any encryptions have completed */
1145a42055e8SVakul Garg 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1146a42055e8SVakul Garg 		cancel_delayed_work(&ctx->tx_work.work);
1147a42055e8SVakul Garg 		tls_tx_records(sk, msg->msg_flags);
1148a42055e8SVakul Garg 	}
1149a42055e8SVakul Garg 
11503c4d7559SDave Watson send_end:
11513c4d7559SDave Watson 	ret = sk_stream_error(sk, msg->msg_flags, ret);
11523c4d7559SDave Watson 
11533c4d7559SDave Watson 	release_sock(sk);
115479ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
1155a7bff11fSVadim Fedorenko 	return copied > 0 ? copied : ret;
11563c4d7559SDave Watson }
11573c4d7559SDave Watson 
115801cb8a1aSYueHaibing static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
11593c4d7559SDave Watson 			      int offset, size_t size, int flags)
11603c4d7559SDave Watson {
1161a42055e8SVakul Garg 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
11623c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1163f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
11644509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
11653c4d7559SDave Watson 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
1166d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl;
1167a42055e8SVakul Garg 	struct tls_rec *rec;
1168a42055e8SVakul Garg 	int num_async = 0;
1169a7bff11fSVadim Fedorenko 	ssize_t copied = 0;
11703c4d7559SDave Watson 	bool full_record;
11713c4d7559SDave Watson 	int record_room;
11724128c0cfSVakul Garg 	int ret = 0;
1173a42055e8SVakul Garg 	bool eor;
11743c4d7559SDave Watson 
1175d452d48bSJakub Kicinski 	eor = !(flags & MSG_SENDPAGE_NOTLAST);
11763c4d7559SDave Watson 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
11773c4d7559SDave Watson 
11783c4d7559SDave Watson 	/* Call the sk_stream functions to manage the sndbuf mem. */
11793c4d7559SDave Watson 	while (size > 0) {
11803c4d7559SDave Watson 		size_t copy, required_size;
11813c4d7559SDave Watson 
11823c4d7559SDave Watson 		if (sk->sk_err) {
118330be8f8dSr.hering@avm.de 			ret = -sk->sk_err;
11843c4d7559SDave Watson 			goto sendpage_end;
11853c4d7559SDave Watson 		}
11863c4d7559SDave Watson 
1187d3b18ad3SJohn Fastabend 		if (ctx->open_rec)
1188d3b18ad3SJohn Fastabend 			rec = ctx->open_rec;
1189d3b18ad3SJohn Fastabend 		else
1190d3b18ad3SJohn Fastabend 			rec = ctx->open_rec = tls_get_rec(sk);
1191a42055e8SVakul Garg 		if (!rec) {
1192a42055e8SVakul Garg 			ret = -ENOMEM;
1193a42055e8SVakul Garg 			goto sendpage_end;
1194a42055e8SVakul Garg 		}
1195a42055e8SVakul Garg 
1196d829e9c4SDaniel Borkmann 		msg_pl = &rec->msg_plaintext;
1197d829e9c4SDaniel Borkmann 
11983c4d7559SDave Watson 		full_record = false;
1199d829e9c4SDaniel Borkmann 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
12003c4d7559SDave Watson 		copy = size;
12013c4d7559SDave Watson 		if (copy >= record_room) {
12023c4d7559SDave Watson 			copy = record_room;
12033c4d7559SDave Watson 			full_record = true;
12043c4d7559SDave Watson 		}
1205d829e9c4SDaniel Borkmann 
12064509de14SVakul Garg 		required_size = msg_pl->sg.size + copy + prot->overhead_size;
12073c4d7559SDave Watson 
12083c4d7559SDave Watson 		if (!sk_stream_memory_free(sk))
12093c4d7559SDave Watson 			goto wait_for_sndbuf;
12103c4d7559SDave Watson alloc_payload:
1211d829e9c4SDaniel Borkmann 		ret = tls_alloc_encrypted_msg(sk, required_size);
12123c4d7559SDave Watson 		if (ret) {
12133c4d7559SDave Watson 			if (ret != -ENOSPC)
12143c4d7559SDave Watson 				goto wait_for_memory;
12153c4d7559SDave Watson 
12163c4d7559SDave Watson 			/* Adjust copy according to the amount that was
12173c4d7559SDave Watson 			 * actually allocated. The difference is due
12183c4d7559SDave Watson 			 * to max sg elements limit
12193c4d7559SDave Watson 			 */
1220d829e9c4SDaniel Borkmann 			copy -= required_size - msg_pl->sg.size;
12213c4d7559SDave Watson 			full_record = true;
12223c4d7559SDave Watson 		}
12233c4d7559SDave Watson 
1224d829e9c4SDaniel Borkmann 		sk_msg_page_add(msg_pl, page, copy, offset);
12253c4d7559SDave Watson 		sk_mem_charge(sk, copy);
1226d829e9c4SDaniel Borkmann 
12273c4d7559SDave Watson 		offset += copy;
12283c4d7559SDave Watson 		size -= copy;
1229d3b18ad3SJohn Fastabend 		copied += copy;
12303c4d7559SDave Watson 
1231d829e9c4SDaniel Borkmann 		tls_ctx->pending_open_record_frags = true;
1232d829e9c4SDaniel Borkmann 		if (full_record || eor || sk_msg_full(msg_pl)) {
1233d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1234d3b18ad3SJohn Fastabend 						  record_type, &copied, flags);
12353c4d7559SDave Watson 			if (ret) {
1236a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1237a42055e8SVakul Garg 					num_async++;
1238d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1239d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1240d3b18ad3SJohn Fastabend 				else if (ret != -EAGAIN) {
1241d3b18ad3SJohn Fastabend 					if (ret == -ENOSPC)
1242d3b18ad3SJohn Fastabend 						ret = 0;
12433c4d7559SDave Watson 					goto sendpage_end;
12443c4d7559SDave Watson 				}
12453c4d7559SDave Watson 			}
1246d3b18ad3SJohn Fastabend 		}
12473c4d7559SDave Watson 		continue;
12483c4d7559SDave Watson wait_for_sndbuf:
12493c4d7559SDave Watson 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
12503c4d7559SDave Watson wait_for_memory:
12513c4d7559SDave Watson 		ret = sk_stream_wait_memory(sk, &timeo);
12523c4d7559SDave Watson 		if (ret) {
1253c329ef96SJakub Kicinski 			if (ctx->open_rec)
1254d829e9c4SDaniel Borkmann 				tls_trim_both_msgs(sk, msg_pl->sg.size);
12553c4d7559SDave Watson 			goto sendpage_end;
12563c4d7559SDave Watson 		}
12573c4d7559SDave Watson 
1258c329ef96SJakub Kicinski 		if (ctx->open_rec)
12593c4d7559SDave Watson 			goto alloc_payload;
12603c4d7559SDave Watson 	}
12613c4d7559SDave Watson 
1262a42055e8SVakul Garg 	if (num_async) {
1263a42055e8SVakul Garg 		/* Transmit if any encryptions have completed */
1264a42055e8SVakul Garg 		if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1265a42055e8SVakul Garg 			cancel_delayed_work(&ctx->tx_work.work);
1266a42055e8SVakul Garg 			tls_tx_records(sk, flags);
1267a42055e8SVakul Garg 		}
1268a42055e8SVakul Garg 	}
12693c4d7559SDave Watson sendpage_end:
12703c4d7559SDave Watson 	ret = sk_stream_error(sk, flags, ret);
1271a7bff11fSVadim Fedorenko 	return copied > 0 ? copied : ret;
12723c4d7559SDave Watson }
12733c4d7559SDave Watson 
1274d4ffb02dSWillem de Bruijn int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1275d4ffb02dSWillem de Bruijn 			   int offset, size_t size, int flags)
1276d4ffb02dSWillem de Bruijn {
1277d4ffb02dSWillem de Bruijn 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1278d4ffb02dSWillem de Bruijn 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1279d4ffb02dSWillem de Bruijn 		      MSG_NO_SHARED_FRAGS))
12804a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
1281d4ffb02dSWillem de Bruijn 
1282d4ffb02dSWillem de Bruijn 	return tls_sw_do_sendpage(sk, page, offset, size, flags);
1283d4ffb02dSWillem de Bruijn }
1284d4ffb02dSWillem de Bruijn 
12850608c69cSJohn Fastabend int tls_sw_sendpage(struct sock *sk, struct page *page,
12860608c69cSJohn Fastabend 		    int offset, size_t size, int flags)
12870608c69cSJohn Fastabend {
128879ffe608SJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
12890608c69cSJohn Fastabend 	int ret;
12900608c69cSJohn Fastabend 
12910608c69cSJohn Fastabend 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
12920608c69cSJohn Fastabend 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
12934a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
12940608c69cSJohn Fastabend 
1295f3221361SJakub Kicinski 	ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1296f3221361SJakub Kicinski 	if (ret)
1297f3221361SJakub Kicinski 		return ret;
12980608c69cSJohn Fastabend 	lock_sock(sk);
12990608c69cSJohn Fastabend 	ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
13000608c69cSJohn Fastabend 	release_sock(sk);
130179ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
13020608c69cSJohn Fastabend 	return ret;
13030608c69cSJohn Fastabend }
13040608c69cSJohn Fastabend 
130535560b7fSJakub Kicinski static int
130635560b7fSJakub Kicinski tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
130770f03fc2SJakub Kicinski 		bool released)
1308c46234ebSDave Watson {
1309c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1310f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1311c46234ebSDave Watson 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
131270f03fc2SJakub Kicinski 	long timeo;
131370f03fc2SJakub Kicinski 
131470f03fc2SJakub Kicinski 	timeo = sock_rcvtimeo(sk, nonblock);
1315c46234ebSDave Watson 
1316b92a13d4SJakub Kicinski 	while (!tls_strp_msg_ready(ctx)) {
131735560b7fSJakub Kicinski 		if (!sk_psock_queue_empty(psock))
131835560b7fSJakub Kicinski 			return 0;
131935560b7fSJakub Kicinski 
132035560b7fSJakub Kicinski 		if (sk->sk_err)
132135560b7fSJakub Kicinski 			return sock_error(sk);
1322c46234ebSDave Watson 
132320ffc7adSVadim Fedorenko 		if (!skb_queue_empty(&sk->sk_receive_queue)) {
132484c61fe1SJakub Kicinski 			tls_strp_check_rcv(&ctx->strp);
1325b92a13d4SJakub Kicinski 			if (tls_strp_msg_ready(ctx))
132635560b7fSJakub Kicinski 				break;
132720ffc7adSVadim Fedorenko 		}
132820ffc7adSVadim Fedorenko 
1329fcf4793eSDoron Roberts-Kedes 		if (sk->sk_shutdown & RCV_SHUTDOWN)
133035560b7fSJakub Kicinski 			return 0;
1331fcf4793eSDoron Roberts-Kedes 
1332c46234ebSDave Watson 		if (sock_flag(sk, SOCK_DONE))
133335560b7fSJakub Kicinski 			return 0;
1334c46234ebSDave Watson 
133570f03fc2SJakub Kicinski 		if (!timeo)
133635560b7fSJakub Kicinski 			return -EAGAIN;
1337c46234ebSDave Watson 
133884c61fe1SJakub Kicinski 		released = true;
1339c46234ebSDave Watson 		add_wait_queue(sk_sleep(sk), &wait);
1340c46234ebSDave Watson 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1341d3b18ad3SJohn Fastabend 		sk_wait_event(sk, &timeo,
1342b92a13d4SJakub Kicinski 			      tls_strp_msg_ready(ctx) ||
1343b92a13d4SJakub Kicinski 			      !sk_psock_queue_empty(psock),
1344d3b18ad3SJohn Fastabend 			      &wait);
1345c46234ebSDave Watson 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1346c46234ebSDave Watson 		remove_wait_queue(sk_sleep(sk), &wait);
1347c46234ebSDave Watson 
1348c46234ebSDave Watson 		/* Handle signals */
134935560b7fSJakub Kicinski 		if (signal_pending(current))
135035560b7fSJakub Kicinski 			return sock_intr_errno(timeo);
1351c46234ebSDave Watson 	}
1352c46234ebSDave Watson 
135384c61fe1SJakub Kicinski 	tls_strp_msg_load(&ctx->strp, released);
135484c61fe1SJakub Kicinski 
135535560b7fSJakub Kicinski 	return 1;
1356c46234ebSDave Watson }
1357c46234ebSDave Watson 
1358d4bd88e6SJakub Kicinski static int tls_setup_from_iter(struct iov_iter *from,
1359d829e9c4SDaniel Borkmann 			       int length, int *pages_used,
1360d829e9c4SDaniel Borkmann 			       struct scatterlist *to,
1361d829e9c4SDaniel Borkmann 			       int to_max_pages)
1362d829e9c4SDaniel Borkmann {
1363d829e9c4SDaniel Borkmann 	int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1364d829e9c4SDaniel Borkmann 	struct page *pages[MAX_SKB_FRAGS];
1365d4bd88e6SJakub Kicinski 	unsigned int size = 0;
1366d829e9c4SDaniel Borkmann 	ssize_t copied, use;
1367d829e9c4SDaniel Borkmann 	size_t offset;
1368d829e9c4SDaniel Borkmann 
1369d829e9c4SDaniel Borkmann 	while (length > 0) {
1370d829e9c4SDaniel Borkmann 		i = 0;
1371d829e9c4SDaniel Borkmann 		maxpages = to_max_pages - num_elem;
1372d829e9c4SDaniel Borkmann 		if (maxpages == 0) {
1373d829e9c4SDaniel Borkmann 			rc = -EFAULT;
1374d829e9c4SDaniel Borkmann 			goto out;
1375d829e9c4SDaniel Borkmann 		}
13761ef255e2SAl Viro 		copied = iov_iter_get_pages2(from, pages,
1377d829e9c4SDaniel Borkmann 					    length,
1378d829e9c4SDaniel Borkmann 					    maxpages, &offset);
1379d829e9c4SDaniel Borkmann 		if (copied <= 0) {
1380d829e9c4SDaniel Borkmann 			rc = -EFAULT;
1381d829e9c4SDaniel Borkmann 			goto out;
1382d829e9c4SDaniel Borkmann 		}
1383d829e9c4SDaniel Borkmann 
1384d829e9c4SDaniel Borkmann 		length -= copied;
1385d829e9c4SDaniel Borkmann 		size += copied;
1386d829e9c4SDaniel Borkmann 		while (copied) {
1387d829e9c4SDaniel Borkmann 			use = min_t(int, copied, PAGE_SIZE - offset);
1388d829e9c4SDaniel Borkmann 
1389d829e9c4SDaniel Borkmann 			sg_set_page(&to[num_elem],
1390d829e9c4SDaniel Borkmann 				    pages[i], use, offset);
1391d829e9c4SDaniel Borkmann 			sg_unmark_end(&to[num_elem]);
1392d829e9c4SDaniel Borkmann 			/* We do not uncharge memory from this API */
1393d829e9c4SDaniel Borkmann 
1394d829e9c4SDaniel Borkmann 			offset = 0;
1395d829e9c4SDaniel Borkmann 			copied -= use;
1396d829e9c4SDaniel Borkmann 
1397d829e9c4SDaniel Borkmann 			i++;
1398d829e9c4SDaniel Borkmann 			num_elem++;
1399d829e9c4SDaniel Borkmann 		}
1400d829e9c4SDaniel Borkmann 	}
1401d829e9c4SDaniel Borkmann 	/* Mark the end in the last sg entry if newly added */
1402d829e9c4SDaniel Borkmann 	if (num_elem > *pages_used)
1403d829e9c4SDaniel Borkmann 		sg_mark_end(&to[num_elem - 1]);
1404d829e9c4SDaniel Borkmann out:
1405d829e9c4SDaniel Borkmann 	if (rc)
1406d4bd88e6SJakub Kicinski 		iov_iter_revert(from, size);
1407d829e9c4SDaniel Borkmann 	*pages_used = num_elem;
1408d829e9c4SDaniel Borkmann 
1409d829e9c4SDaniel Borkmann 	return rc;
1410d829e9c4SDaniel Borkmann }
1411d829e9c4SDaniel Borkmann 
1412fd31f399SJakub Kicinski static struct sk_buff *
1413fd31f399SJakub Kicinski tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1414fd31f399SJakub Kicinski 		     unsigned int full_len)
1415fd31f399SJakub Kicinski {
1416fd31f399SJakub Kicinski 	struct strp_msg *clr_rxm;
1417fd31f399SJakub Kicinski 	struct sk_buff *clr_skb;
1418fd31f399SJakub Kicinski 	int err;
1419fd31f399SJakub Kicinski 
1420fd31f399SJakub Kicinski 	clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1421fd31f399SJakub Kicinski 				       &err, sk->sk_allocation);
1422fd31f399SJakub Kicinski 	if (!clr_skb)
1423fd31f399SJakub Kicinski 		return NULL;
1424fd31f399SJakub Kicinski 
1425fd31f399SJakub Kicinski 	skb_copy_header(clr_skb, skb);
1426fd31f399SJakub Kicinski 	clr_skb->len = full_len;
1427fd31f399SJakub Kicinski 	clr_skb->data_len = full_len;
1428fd31f399SJakub Kicinski 
1429fd31f399SJakub Kicinski 	clr_rxm = strp_msg(clr_skb);
1430fd31f399SJakub Kicinski 	clr_rxm->offset = 0;
1431fd31f399SJakub Kicinski 
1432fd31f399SJakub Kicinski 	return clr_skb;
1433fd31f399SJakub Kicinski }
1434fd31f399SJakub Kicinski 
14358a958732SJakub Kicinski /* Decrypt handlers
14368a958732SJakub Kicinski  *
1437dd47ed36SJakub Kicinski  * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
14388a958732SJakub Kicinski  * They must transform the darg in/out argument are as follows:
14398a958732SJakub Kicinski  *       |          Input            |         Output
14408a958732SJakub Kicinski  * -------------------------------------------------------------------
14418a958732SJakub Kicinski  *    zc | Zero-copy decrypt allowed | Zero-copy performed
14428a958732SJakub Kicinski  * async | Async decrypt allowed     | Async crypto used / in progress
14436bd116c8SJakub Kicinski  *   skb |            *              | Output skb
1444b93f5700SJakub Kicinski  *
1445b93f5700SJakub Kicinski  * If ZC decryption was performed darg.skb will point to the input skb.
14468a958732SJakub Kicinski  */
14478a958732SJakub Kicinski 
14480b243d00SVakul Garg /* This function decrypts the input skb into either out_iov or in out_sg
14498a958732SJakub Kicinski  * or in skb buffers itself. The input parameter 'darg->zc' indicates if
14500b243d00SVakul Garg  * zero-copy mode needs to be tried or not. With zero-copy mode, either
14510b243d00SVakul Garg  * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
14520b243d00SVakul Garg  * NULL, then the decryption happens inside skb buffers itself, i.e.
14538a958732SJakub Kicinski  * zero-copy gets disabled and 'darg->zc' is updated.
14540b243d00SVakul Garg  */
1455541cc48bSJakub Kicinski static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
14560b243d00SVakul Garg 			  struct scatterlist *out_sg,
14574175eac3SJakub Kicinski 			  struct tls_decrypt_arg *darg)
14580b243d00SVakul Garg {
14590b243d00SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
14600b243d00SVakul Garg 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
14614509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1462b89fec54SJakub Kicinski 	int n_sgin, n_sgout, aead_size, err, pages = 0;
1463541cc48bSJakub Kicinski 	struct sk_buff *skb = tls_strp_msg(ctx);
1464fd31f399SJakub Kicinski 	const struct strp_msg *rxm = strp_msg(skb);
1465fd31f399SJakub Kicinski 	const struct tls_msg *tlm = tls_msg(skb);
14660b243d00SVakul Garg 	struct aead_request *aead_req;
14670b243d00SVakul Garg 	struct scatterlist *sgin = NULL;
14680b243d00SVakul Garg 	struct scatterlist *sgout = NULL;
1469603380f5SJakub Kicinski 	const int data_len = rxm->full_len - prot->overhead_size;
1470ce61327cSJakub Kicinski 	int tail_pages = !!prot->tail_size;
1471b89fec54SJakub Kicinski 	struct tls_decrypt_ctx *dctx;
1472fd31f399SJakub Kicinski 	struct sk_buff *clear_skb;
1473f295b3aeSVakul Garg 	int iv_offset = 0;
1474b89fec54SJakub Kicinski 	u8 *mem;
14750b243d00SVakul Garg 
1476fd31f399SJakub Kicinski 	n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1477fd31f399SJakub Kicinski 			 rxm->full_len - prot->prepend_size);
1478fd31f399SJakub Kicinski 	if (n_sgin < 1)
1479fd31f399SJakub Kicinski 		return n_sgin ?: -EBADMSG;
1480fd31f399SJakub Kicinski 
14814175eac3SJakub Kicinski 	if (darg->zc && (out_iov || out_sg)) {
1482fd31f399SJakub Kicinski 		clear_skb = NULL;
1483fd31f399SJakub Kicinski 
14840b243d00SVakul Garg 		if (out_iov)
1485ce61327cSJakub Kicinski 			n_sgout = 1 + tail_pages +
1486b93235e6SJakub Kicinski 				iov_iter_npages_cap(out_iov, INT_MAX, data_len);
14870b243d00SVakul Garg 		else
14880b243d00SVakul Garg 			n_sgout = sg_nents(out_sg);
14890b243d00SVakul Garg 	} else {
14904175eac3SJakub Kicinski 		darg->zc = false;
14910b243d00SVakul Garg 
1492fd31f399SJakub Kicinski 		clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1493fd31f399SJakub Kicinski 		if (!clear_skb)
1494fd31f399SJakub Kicinski 			return -ENOMEM;
1495fd31f399SJakub Kicinski 
1496fd31f399SJakub Kicinski 		n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1497fd31f399SJakub Kicinski 	}
14980b243d00SVakul Garg 
14990b243d00SVakul Garg 	/* Increment to accommodate AAD */
15000b243d00SVakul Garg 	n_sgin = n_sgin + 1;
15010b243d00SVakul Garg 
15020b243d00SVakul Garg 	/* Allocate a single block of memory which contains
1503b89fec54SJakub Kicinski 	 *   aead_req || tls_decrypt_ctx.
1504b89fec54SJakub Kicinski 	 * Both structs are variable length.
15050b243d00SVakul Garg 	 */
1506b89fec54SJakub Kicinski 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
15078d338c76SHerbert Xu 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
1508b89fec54SJakub Kicinski 	mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
1509b89fec54SJakub Kicinski 		      sk->sk_allocation);
1510fd31f399SJakub Kicinski 	if (!mem) {
1511fd31f399SJakub Kicinski 		err = -ENOMEM;
1512fd31f399SJakub Kicinski 		goto exit_free_skb;
1513fd31f399SJakub Kicinski 	}
15140b243d00SVakul Garg 
15150b243d00SVakul Garg 	/* Segment the allocated memory */
15160b243d00SVakul Garg 	aead_req = (struct aead_request *)mem;
1517b89fec54SJakub Kicinski 	dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
15188d338c76SHerbert Xu 	dctx->sk = sk;
1519b89fec54SJakub Kicinski 	sgin = &dctx->sg[0];
1520b89fec54SJakub Kicinski 	sgout = &dctx->sg[n_sgin];
15210b243d00SVakul Garg 
1522128cfb88STianjia Zhang 	/* For CCM based ciphers, first byte of nonce+iv is a constant */
1523128cfb88STianjia Zhang 	switch (prot->cipher_type) {
1524128cfb88STianjia Zhang 	case TLS_CIPHER_AES_CCM_128:
1525b89fec54SJakub Kicinski 		dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1526f295b3aeSVakul Garg 		iv_offset = 1;
1527128cfb88STianjia Zhang 		break;
1528128cfb88STianjia Zhang 	case TLS_CIPHER_SM4_CCM:
1529b89fec54SJakub Kicinski 		dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1530128cfb88STianjia Zhang 		iv_offset = 1;
1531128cfb88STianjia Zhang 		break;
1532f295b3aeSVakul Garg 	}
1533f295b3aeSVakul Garg 
15340b243d00SVakul Garg 	/* Prepare IV */
1535a4ae58cdSJakub Kicinski 	if (prot->version == TLS_1_3_VERSION ||
1536a4ae58cdSJakub Kicinski 	    prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1537b89fec54SJakub Kicinski 		memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1538a4ae58cdSJakub Kicinski 		       prot->iv_size + prot->salt_size);
1539a4ae58cdSJakub Kicinski 	} else {
15400b243d00SVakul Garg 		err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1541b89fec54SJakub Kicinski 				    &dctx->iv[iv_offset] + prot->salt_size,
15424509de14SVakul Garg 				    prot->iv_size);
154303957d84SJakub Kicinski 		if (err < 0)
154403957d84SJakub Kicinski 			goto exit_free;
1545b89fec54SJakub Kicinski 		memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1546a4ae58cdSJakub Kicinski 	}
154758790314SJakub Kicinski 	tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1548130b392cSDave Watson 
15490b243d00SVakul Garg 	/* Prepare AAD */
1550b89fec54SJakub Kicinski 	tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
15514509de14SVakul Garg 		     prot->tail_size,
1552c3f6bb74SJakub Kicinski 		     tls_ctx->rx.rec_seq, tlm->control, prot);
15530b243d00SVakul Garg 
15540b243d00SVakul Garg 	/* Prepare sgin */
15550b243d00SVakul Garg 	sg_init_table(sgin, n_sgin);
1556b89fec54SJakub Kicinski 	sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
15570b243d00SVakul Garg 	err = skb_to_sgvec(skb, &sgin[1],
15584509de14SVakul Garg 			   rxm->offset + prot->prepend_size,
15594509de14SVakul Garg 			   rxm->full_len - prot->prepend_size);
156003957d84SJakub Kicinski 	if (err < 0)
156103957d84SJakub Kicinski 		goto exit_free;
15620b243d00SVakul Garg 
1563fd31f399SJakub Kicinski 	if (clear_skb) {
15640b243d00SVakul Garg 		sg_init_table(sgout, n_sgout);
1565b89fec54SJakub Kicinski 		sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
15660b243d00SVakul Garg 
1567fd31f399SJakub Kicinski 		err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1568fd31f399SJakub Kicinski 				   data_len + prot->tail_size);
1569fd31f399SJakub Kicinski 		if (err < 0)
1570fd31f399SJakub Kicinski 			goto exit_free;
1571fd31f399SJakub Kicinski 	} else if (out_iov) {
1572fd31f399SJakub Kicinski 		sg_init_table(sgout, n_sgout);
1573fd31f399SJakub Kicinski 		sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1574fd31f399SJakub Kicinski 
1575fd31f399SJakub Kicinski 		err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1576ce61327cSJakub Kicinski 					  (n_sgout - 1 - tail_pages));
15770b243d00SVakul Garg 		if (err < 0)
1578fd31f399SJakub Kicinski 			goto exit_free_pages;
1579ce61327cSJakub Kicinski 
1580ce61327cSJakub Kicinski 		if (prot->tail_size) {
1581ce61327cSJakub Kicinski 			sg_unmark_end(&sgout[pages]);
1582b89fec54SJakub Kicinski 			sg_set_buf(&sgout[pages + 1], &dctx->tail,
1583ce61327cSJakub Kicinski 				   prot->tail_size);
1584ce61327cSJakub Kicinski 			sg_mark_end(&sgout[pages + 1]);
1585ce61327cSJakub Kicinski 		}
15860b243d00SVakul Garg 	} else if (out_sg) {
15870b243d00SVakul Garg 		memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
15880b243d00SVakul Garg 	}
15890b243d00SVakul Garg 
15900b243d00SVakul Garg 	/* Prepare and submit AEAD request */
15916ececdc5SJakub Kicinski 	err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1592603380f5SJakub Kicinski 				data_len + prot->tail_size, aead_req, darg);
15936bd116c8SJakub Kicinski 	if (err)
15946bd116c8SJakub Kicinski 		goto exit_free_pages;
15956bd116c8SJakub Kicinski 
1596fd31f399SJakub Kicinski 	darg->skb = clear_skb ?: tls_strp_msg(ctx);
1597fd31f399SJakub Kicinski 	clear_skb = NULL;
1598c618db2aSJakub Kicinski 
1599c618db2aSJakub Kicinski 	if (unlikely(darg->async)) {
160084c61fe1SJakub Kicinski 		err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1601c618db2aSJakub Kicinski 		if (err)
1602c618db2aSJakub Kicinski 			__skb_queue_tail(&ctx->async_hold, darg->skb);
1603c618db2aSJakub Kicinski 		return err;
1604c618db2aSJakub Kicinski 	}
16050b243d00SVakul Garg 
1606ce61327cSJakub Kicinski 	if (prot->tail_size)
1607b89fec54SJakub Kicinski 		darg->tail = dctx->tail;
1608ce61327cSJakub Kicinski 
16096bd116c8SJakub Kicinski exit_free_pages:
16100b243d00SVakul Garg 	/* Release the pages in case iov was mapped to pages */
16110b243d00SVakul Garg 	for (; pages > 0; pages--)
16120b243d00SVakul Garg 		put_page(sg_page(&sgout[pages]));
161303957d84SJakub Kicinski exit_free:
16140b243d00SVakul Garg 	kfree(mem);
1615fd31f399SJakub Kicinski exit_free_skb:
1616fd31f399SJakub Kicinski 	consume_skb(clear_skb);
16170b243d00SVakul Garg 	return err;
16180b243d00SVakul Garg }
16190b243d00SVakul Garg 
16208a958732SJakub Kicinski static int
1621dd47ed36SJakub Kicinski tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1622dd47ed36SJakub Kicinski 	       struct msghdr *msg, struct tls_decrypt_arg *darg)
16238a958732SJakub Kicinski {
16246bd116c8SJakub Kicinski 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
16254509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1626541cc48bSJakub Kicinski 	struct strp_msg *rxm;
16273764ae5bSJakub Kicinski 	int pad, err;
1628dafb67f3SBoris Pismenny 
1629dd47ed36SJakub Kicinski 	err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1630a069a905SGal Pressman 	if (err < 0) {
1631a069a905SGal Pressman 		if (err == -EBADMSG)
1632a069a905SGal Pressman 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1633dafb67f3SBoris Pismenny 		return err;
1634a069a905SGal Pressman 	}
1635dd47ed36SJakub Kicinski 	/* keep going even for ->async, the code below is TLS 1.3 */
1636dd47ed36SJakub Kicinski 
1637ce61327cSJakub Kicinski 	/* If opportunistic TLS 1.3 ZC failed retry without ZC */
1638ce61327cSJakub Kicinski 	if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1639ce61327cSJakub Kicinski 		     darg->tail != TLS_RECORD_TYPE_DATA)) {
1640ce61327cSJakub Kicinski 		darg->zc = false;
1641bb56cea9SJakub Kicinski 		if (!darg->tail)
1642bb56cea9SJakub Kicinski 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
16431090c1eaSJakub Kicinski 		TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1644dd47ed36SJakub Kicinski 		return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1645ce61327cSJakub Kicinski 	}
1646130b392cSDave Watson 
16476bd116c8SJakub Kicinski 	pad = tls_padding_length(prot, darg->skb, darg);
16486bd116c8SJakub Kicinski 	if (pad < 0) {
1649b93f5700SJakub Kicinski 		if (darg->skb != tls_strp_msg(ctx))
16506bd116c8SJakub Kicinski 			consume_skb(darg->skb);
16516bd116c8SJakub Kicinski 		return pad;
16526bd116c8SJakub Kicinski 	}
16536bd116c8SJakub Kicinski 
16546bd116c8SJakub Kicinski 	rxm = strp_msg(darg->skb);
1655b53f4976SJakub Kicinski 	rxm->full_len -= pad;
1656dd47ed36SJakub Kicinski 
1657dd47ed36SJakub Kicinski 	return 0;
1658dd47ed36SJakub Kicinski }
1659dd47ed36SJakub Kicinski 
1660dd47ed36SJakub Kicinski static int
1661d4e5db64SJakub Kicinski tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1662d4e5db64SJakub Kicinski 		   struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1663dd47ed36SJakub Kicinski {
1664dd47ed36SJakub Kicinski 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1665dd47ed36SJakub Kicinski 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1666dd47ed36SJakub Kicinski 	struct strp_msg *rxm;
1667dd47ed36SJakub Kicinski 	int pad, err;
1668dd47ed36SJakub Kicinski 
1669dd47ed36SJakub Kicinski 	if (tls_ctx->rx_conf != TLS_HW)
1670dd47ed36SJakub Kicinski 		return 0;
1671dd47ed36SJakub Kicinski 
1672dd47ed36SJakub Kicinski 	err = tls_device_decrypted(sk, tls_ctx);
1673dd47ed36SJakub Kicinski 	if (err <= 0)
1674dd47ed36SJakub Kicinski 		return err;
1675dd47ed36SJakub Kicinski 
1676dd47ed36SJakub Kicinski 	pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1677dd47ed36SJakub Kicinski 	if (pad < 0)
1678dd47ed36SJakub Kicinski 		return pad;
1679dd47ed36SJakub Kicinski 
1680dd47ed36SJakub Kicinski 	darg->async = false;
1681dd47ed36SJakub Kicinski 	darg->skb = tls_strp_msg(ctx);
1682d4e5db64SJakub Kicinski 	/* ->zc downgrade check, in case TLS 1.3 gets here */
1683d4e5db64SJakub Kicinski 	darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1684d4e5db64SJakub Kicinski 		      tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1685dd47ed36SJakub Kicinski 
1686dd47ed36SJakub Kicinski 	rxm = strp_msg(darg->skb);
1687dd47ed36SJakub Kicinski 	rxm->full_len -= pad;
1688d4e5db64SJakub Kicinski 
1689d4e5db64SJakub Kicinski 	if (!darg->zc) {
1690d4e5db64SJakub Kicinski 		/* Non-ZC case needs a real skb */
1691d4e5db64SJakub Kicinski 		darg->skb = tls_strp_msg_detach(ctx);
1692d4e5db64SJakub Kicinski 		if (!darg->skb)
1693d4e5db64SJakub Kicinski 			return -ENOMEM;
1694d4e5db64SJakub Kicinski 	} else {
1695d4e5db64SJakub Kicinski 		unsigned int off, len;
1696d4e5db64SJakub Kicinski 
1697d4e5db64SJakub Kicinski 		/* In ZC case nobody cares about the output skb.
1698d4e5db64SJakub Kicinski 		 * Just copy the data here. Note the skb is not fully trimmed.
1699d4e5db64SJakub Kicinski 		 */
1700d4e5db64SJakub Kicinski 		off = rxm->offset + prot->prepend_size;
1701d4e5db64SJakub Kicinski 		len = rxm->full_len - prot->overhead_size;
1702d4e5db64SJakub Kicinski 
1703d4e5db64SJakub Kicinski 		err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1704d4e5db64SJakub Kicinski 		if (err)
1705d4e5db64SJakub Kicinski 			return err;
1706d4e5db64SJakub Kicinski 	}
1707dd47ed36SJakub Kicinski 	return 1;
1708dd47ed36SJakub Kicinski }
1709dd47ed36SJakub Kicinski 
1710dd47ed36SJakub Kicinski static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1711dd47ed36SJakub Kicinski 			     struct tls_decrypt_arg *darg)
1712dd47ed36SJakub Kicinski {
1713dd47ed36SJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1714dd47ed36SJakub Kicinski 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1715dd47ed36SJakub Kicinski 	struct strp_msg *rxm;
1716dd47ed36SJakub Kicinski 	int err;
1717dd47ed36SJakub Kicinski 
1718d4e5db64SJakub Kicinski 	err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1719dd47ed36SJakub Kicinski 	if (!err)
1720dd47ed36SJakub Kicinski 		err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1721dd47ed36SJakub Kicinski 	if (err < 0)
1722dd47ed36SJakub Kicinski 		return err;
1723dd47ed36SJakub Kicinski 
1724dd47ed36SJakub Kicinski 	rxm = strp_msg(darg->skb);
17254509de14SVakul Garg 	rxm->offset += prot->prepend_size;
17264509de14SVakul Garg 	rxm->full_len -= prot->overhead_size;
17273547a1f9SJakub Kicinski 	tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1728dafb67f3SBoris Pismenny 
17293764ae5bSJakub Kicinski 	return 0;
1730dafb67f3SBoris Pismenny }
1731dafb67f3SBoris Pismenny 
1732541cc48bSJakub Kicinski int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1733c46234ebSDave Watson {
17344175eac3SJakub Kicinski 	struct tls_decrypt_arg darg = { .zc = true, };
1735c46234ebSDave Watson 
1736541cc48bSJakub Kicinski 	return tls_decrypt_sg(sk, NULL, sgout, &darg);
1737c46234ebSDave Watson }
1738c46234ebSDave Watson 
173906554f4fSJakub Kicinski static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
174006554f4fSJakub Kicinski 				   u8 *control)
174106554f4fSJakub Kicinski {
174206554f4fSJakub Kicinski 	int err;
174306554f4fSJakub Kicinski 
174406554f4fSJakub Kicinski 	if (!*control) {
174506554f4fSJakub Kicinski 		*control = tlm->control;
174606554f4fSJakub Kicinski 		if (!*control)
174706554f4fSJakub Kicinski 			return -EBADMSG;
174806554f4fSJakub Kicinski 
174906554f4fSJakub Kicinski 		err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
175006554f4fSJakub Kicinski 			       sizeof(*control), control);
175106554f4fSJakub Kicinski 		if (*control != TLS_RECORD_TYPE_DATA) {
175206554f4fSJakub Kicinski 			if (err || msg->msg_flags & MSG_CTRUNC)
175306554f4fSJakub Kicinski 				return -EIO;
175406554f4fSJakub Kicinski 		}
175506554f4fSJakub Kicinski 	} else if (*control != tlm->control) {
175606554f4fSJakub Kicinski 		return 0;
175706554f4fSJakub Kicinski 	}
175806554f4fSJakub Kicinski 
175906554f4fSJakub Kicinski 	return 1;
176006554f4fSJakub Kicinski }
176106554f4fSJakub Kicinski 
1762abb47dc9SJakub Kicinski static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1763abb47dc9SJakub Kicinski {
176484c61fe1SJakub Kicinski 	tls_strp_msg_done(&ctx->strp);
1765abb47dc9SJakub Kicinski }
1766abb47dc9SJakub Kicinski 
1767692d7b5dSVakul Garg /* This function traverses the rx_list in tls receive context to copies the
17682b794c40SVakul Garg  * decrypted records into the buffer provided by caller zero copy is not
1769692d7b5dSVakul Garg  * true. Further, the records are removed from the rx_list if it is not a peek
1770692d7b5dSVakul Garg  * case and the record has been consumed completely.
1771692d7b5dSVakul Garg  */
1772692d7b5dSVakul Garg static int process_rx_list(struct tls_sw_context_rx *ctx,
1773692d7b5dSVakul Garg 			   struct msghdr *msg,
17742b794c40SVakul Garg 			   u8 *control,
1775692d7b5dSVakul Garg 			   size_t skip,
1776692d7b5dSVakul Garg 			   size_t len,
1777692d7b5dSVakul Garg 			   bool is_peek)
1778692d7b5dSVakul Garg {
1779692d7b5dSVakul Garg 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
17802b794c40SVakul Garg 	struct tls_msg *tlm;
1781692d7b5dSVakul Garg 	ssize_t copied = 0;
178206554f4fSJakub Kicinski 	int err;
17832b794c40SVakul Garg 
1784692d7b5dSVakul Garg 	while (skip && skb) {
1785692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
17862b794c40SVakul Garg 		tlm = tls_msg(skb);
17872b794c40SVakul Garg 
178806554f4fSJakub Kicinski 		err = tls_record_content_type(msg, tlm, control);
178906554f4fSJakub Kicinski 		if (err <= 0)
17904dcdd971SJakub Kicinski 			goto out;
1791692d7b5dSVakul Garg 
1792692d7b5dSVakul Garg 		if (skip < rxm->full_len)
1793692d7b5dSVakul Garg 			break;
1794692d7b5dSVakul Garg 
1795692d7b5dSVakul Garg 		skip = skip - rxm->full_len;
1796692d7b5dSVakul Garg 		skb = skb_peek_next(skb, &ctx->rx_list);
1797692d7b5dSVakul Garg 	}
1798692d7b5dSVakul Garg 
1799692d7b5dSVakul Garg 	while (len && skb) {
1800692d7b5dSVakul Garg 		struct sk_buff *next_skb;
1801692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
1802692d7b5dSVakul Garg 		int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1803692d7b5dSVakul Garg 
18042b794c40SVakul Garg 		tlm = tls_msg(skb);
18052b794c40SVakul Garg 
180606554f4fSJakub Kicinski 		err = tls_record_content_type(msg, tlm, control);
180706554f4fSJakub Kicinski 		if (err <= 0)
18084dcdd971SJakub Kicinski 			goto out;
18092b794c40SVakul Garg 
181006554f4fSJakub Kicinski 		err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1811692d7b5dSVakul Garg 					    msg, chunk);
1812692d7b5dSVakul Garg 		if (err < 0)
18134dcdd971SJakub Kicinski 			goto out;
1814692d7b5dSVakul Garg 
1815692d7b5dSVakul Garg 		len = len - chunk;
1816692d7b5dSVakul Garg 		copied = copied + chunk;
1817692d7b5dSVakul Garg 
1818692d7b5dSVakul Garg 		/* Consume the data from record if it is non-peek case*/
1819692d7b5dSVakul Garg 		if (!is_peek) {
1820692d7b5dSVakul Garg 			rxm->offset = rxm->offset + chunk;
1821692d7b5dSVakul Garg 			rxm->full_len = rxm->full_len - chunk;
1822692d7b5dSVakul Garg 
1823692d7b5dSVakul Garg 			/* Return if there is unconsumed data in the record */
1824692d7b5dSVakul Garg 			if (rxm->full_len - skip)
1825692d7b5dSVakul Garg 				break;
1826692d7b5dSVakul Garg 		}
1827692d7b5dSVakul Garg 
1828692d7b5dSVakul Garg 		/* The remaining skip-bytes must lie in 1st record in rx_list.
1829692d7b5dSVakul Garg 		 * So from the 2nd record, 'skip' should be 0.
1830692d7b5dSVakul Garg 		 */
1831692d7b5dSVakul Garg 		skip = 0;
1832692d7b5dSVakul Garg 
1833692d7b5dSVakul Garg 		if (msg)
1834692d7b5dSVakul Garg 			msg->msg_flags |= MSG_EOR;
1835692d7b5dSVakul Garg 
1836692d7b5dSVakul Garg 		next_skb = skb_peek_next(skb, &ctx->rx_list);
1837692d7b5dSVakul Garg 
1838692d7b5dSVakul Garg 		if (!is_peek) {
1839a30295c4SJakub Kicinski 			__skb_unlink(skb, &ctx->rx_list);
1840a88c26f6SVakul Garg 			consume_skb(skb);
1841692d7b5dSVakul Garg 		}
1842692d7b5dSVakul Garg 
1843692d7b5dSVakul Garg 		skb = next_skb;
1844692d7b5dSVakul Garg 	}
18454dcdd971SJakub Kicinski 	err = 0;
1846692d7b5dSVakul Garg 
18474dcdd971SJakub Kicinski out:
18484dcdd971SJakub Kicinski 	return copied ? : err;
1849692d7b5dSVakul Garg }
1850692d7b5dSVakul Garg 
185184c61fe1SJakub Kicinski static bool
1852c46b0183SJakub Kicinski tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1853c46b0183SJakub Kicinski 		       size_t len_left, size_t decrypted, ssize_t done,
1854c46b0183SJakub Kicinski 		       size_t *flushed_at)
1855c46b0183SJakub Kicinski {
1856c46b0183SJakub Kicinski 	size_t max_rec;
1857c46b0183SJakub Kicinski 
1858c46b0183SJakub Kicinski 	if (len_left <= decrypted)
185984c61fe1SJakub Kicinski 		return false;
1860c46b0183SJakub Kicinski 
1861c46b0183SJakub Kicinski 	max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1862c46b0183SJakub Kicinski 	if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
186384c61fe1SJakub Kicinski 		return false;
1864c46b0183SJakub Kicinski 
1865c46b0183SJakub Kicinski 	*flushed_at = done;
186684c61fe1SJakub Kicinski 	return sk_flush_backlog(sk);
1867c46b0183SJakub Kicinski }
1868c46b0183SJakub Kicinski 
186970f03fc2SJakub Kicinski static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
18704cbc325eSJakub Kicinski 			      bool nonblock)
18714cbc325eSJakub Kicinski {
18724cbc325eSJakub Kicinski 	long timeo;
1873dde06aaaSJakub Kicinski 	int err;
18744cbc325eSJakub Kicinski 
18754cbc325eSJakub Kicinski 	lock_sock(sk);
18764cbc325eSJakub Kicinski 
18774cbc325eSJakub Kicinski 	timeo = sock_rcvtimeo(sk, nonblock);
18784cbc325eSJakub Kicinski 
18794cbc325eSJakub Kicinski 	while (unlikely(ctx->reader_present)) {
18804cbc325eSJakub Kicinski 		DEFINE_WAIT_FUNC(wait, woken_wake_function);
18814cbc325eSJakub Kicinski 
18824cbc325eSJakub Kicinski 		ctx->reader_contended = 1;
18834cbc325eSJakub Kicinski 
18844cbc325eSJakub Kicinski 		add_wait_queue(&ctx->wq, &wait);
18854cbc325eSJakub Kicinski 		sk_wait_event(sk, &timeo,
18864cbc325eSJakub Kicinski 			      !READ_ONCE(ctx->reader_present), &wait);
18874cbc325eSJakub Kicinski 		remove_wait_queue(&ctx->wq, &wait);
18884cbc325eSJakub Kicinski 
1889dde06aaaSJakub Kicinski 		if (timeo <= 0) {
1890dde06aaaSJakub Kicinski 			err = -EAGAIN;
1891dde06aaaSJakub Kicinski 			goto err_unlock;
1892dde06aaaSJakub Kicinski 		}
1893dde06aaaSJakub Kicinski 		if (signal_pending(current)) {
1894dde06aaaSJakub Kicinski 			err = sock_intr_errno(timeo);
1895dde06aaaSJakub Kicinski 			goto err_unlock;
1896dde06aaaSJakub Kicinski 		}
18974cbc325eSJakub Kicinski 	}
18984cbc325eSJakub Kicinski 
18994cbc325eSJakub Kicinski 	WRITE_ONCE(ctx->reader_present, 1);
19004cbc325eSJakub Kicinski 
190170f03fc2SJakub Kicinski 	return 0;
1902dde06aaaSJakub Kicinski 
1903dde06aaaSJakub Kicinski err_unlock:
1904dde06aaaSJakub Kicinski 	release_sock(sk);
1905dde06aaaSJakub Kicinski 	return err;
19064cbc325eSJakub Kicinski }
19074cbc325eSJakub Kicinski 
19084cbc325eSJakub Kicinski static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
19094cbc325eSJakub Kicinski {
19104cbc325eSJakub Kicinski 	if (unlikely(ctx->reader_contended)) {
19114cbc325eSJakub Kicinski 		if (wq_has_sleeper(&ctx->wq))
19124cbc325eSJakub Kicinski 			wake_up(&ctx->wq);
19134cbc325eSJakub Kicinski 		else
19144cbc325eSJakub Kicinski 			ctx->reader_contended = 0;
19154cbc325eSJakub Kicinski 
19164cbc325eSJakub Kicinski 		WARN_ON_ONCE(!ctx->reader_present);
19174cbc325eSJakub Kicinski 	}
19184cbc325eSJakub Kicinski 
19194cbc325eSJakub Kicinski 	WRITE_ONCE(ctx->reader_present, 0);
19204cbc325eSJakub Kicinski 	release_sock(sk);
19214cbc325eSJakub Kicinski }
19224cbc325eSJakub Kicinski 
1923c46234ebSDave Watson int tls_sw_recvmsg(struct sock *sk,
1924c46234ebSDave Watson 		   struct msghdr *msg,
1925c46234ebSDave Watson 		   size_t len,
1926c46234ebSDave Watson 		   int flags,
1927c46234ebSDave Watson 		   int *addr_len)
1928c46234ebSDave Watson {
1929c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1930f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
19314509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1932cbbdee99SJakub Kicinski 	ssize_t decrypted = 0, async_copy_bytes = 0;
1933d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
1934692d7b5dSVakul Garg 	unsigned char control = 0;
1935c46b0183SJakub Kicinski 	size_t flushed_at = 0;
1936c46234ebSDave Watson 	struct strp_msg *rxm;
19372b794c40SVakul Garg 	struct tls_msg *tlm;
1938c46234ebSDave Watson 	ssize_t copied = 0;
19397da18bccSJakub Kicinski 	bool async = false;
194070f03fc2SJakub Kicinski 	int target, err;
194100e23707SDavid Howells 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1942692d7b5dSVakul Garg 	bool is_peek = flags & MSG_PEEK;
194384c61fe1SJakub Kicinski 	bool released = true;
1944e91de6afSJohn Fastabend 	bool bpf_strp_enabled;
1945ba13609dSJakub Kicinski 	bool zc_capable;
1946c46234ebSDave Watson 
1947c46234ebSDave Watson 	if (unlikely(flags & MSG_ERRQUEUE))
1948c46234ebSDave Watson 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1949c46234ebSDave Watson 
1950d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
195170f03fc2SJakub Kicinski 	err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
195270f03fc2SJakub Kicinski 	if (err < 0)
195370f03fc2SJakub Kicinski 		return err;
1954e91de6afSJohn Fastabend 	bpf_strp_enabled = sk_psock_strp_enabled(psock);
1955c46234ebSDave Watson 
1956f314bfeeSJakub Kicinski 	/* If crypto failed the connection is broken */
1957f314bfeeSJakub Kicinski 	err = ctx->async_wait.err;
1958f314bfeeSJakub Kicinski 	if (err)
1959f314bfeeSJakub Kicinski 		goto end;
1960f314bfeeSJakub Kicinski 
1961692d7b5dSVakul Garg 	/* Process pending decrypted records. It must be non-zero-copy */
1962cbbdee99SJakub Kicinski 	err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
19634dcdd971SJakub Kicinski 	if (err < 0)
1964692d7b5dSVakul Garg 		goto end;
1965692d7b5dSVakul Garg 
1966d5123eddSJakub Kicinski 	copied = err;
196746a16959SJakub Kicinski 	if (len <= copied)
1968bfc06e1aSJakub Kicinski 		goto end;
196946a16959SJakub Kicinski 
197046a16959SJakub Kicinski 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
197146a16959SJakub Kicinski 	len = len - copied;
1972692d7b5dSVakul Garg 
1973ba13609dSJakub Kicinski 	zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
197488527790SJakub Kicinski 		ctx->zc_capable;
1975bfc06e1aSJakub Kicinski 	decrypted = 0;
1976b92a13d4SJakub Kicinski 	while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
19776bd116c8SJakub Kicinski 		struct tls_decrypt_arg darg;
19789bdf75ccSJakub Kicinski 		int to_decrypt, chunk;
1979c46234ebSDave Watson 
198070f03fc2SJakub Kicinski 		err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
198170f03fc2SJakub Kicinski 				      released);
198235560b7fSJakub Kicinski 		if (err <= 0) {
1983d3b18ad3SJohn Fastabend 			if (psock) {
19840775639cSJakub Kicinski 				chunk = sk_msg_recvmsg(sk, psock, msg, len,
19852bc793e3SCong Wang 						       flags);
1986008141deSJakub Kicinski 				if (chunk > 0) {
1987008141deSJakub Kicinski 					decrypted += chunk;
1988008141deSJakub Kicinski 					len -= chunk;
1989008141deSJakub Kicinski 					continue;
1990008141deSJakub Kicinski 				}
1991d3b18ad3SJohn Fastabend 			}
1992c46234ebSDave Watson 			goto recv_end;
1993d3b18ad3SJohn Fastabend 		}
1994c46234ebSDave Watson 
19956bd116c8SJakub Kicinski 		memset(&darg.inargs, 0, sizeof(darg.inargs));
19966bd116c8SJakub Kicinski 
199784c61fe1SJakub Kicinski 		rxm = strp_msg(tls_strp_msg(ctx));
199884c61fe1SJakub Kicinski 		tlm = tls_msg(tls_strp_msg(ctx));
199994524d8fSVakul Garg 
20004509de14SVakul Garg 		to_decrypt = rxm->full_len - prot->overhead_size;
2001fedf201eSDave Watson 
2002ba13609dSJakub Kicinski 		if (zc_capable && to_decrypt <= len &&
2003ba13609dSJakub Kicinski 		    tlm->control == TLS_RECORD_TYPE_DATA)
20044175eac3SJakub Kicinski 			darg.zc = true;
2005fedf201eSDave Watson 
2006c0ab4732SVakul Garg 		/* Do not use async mode if record is non-data */
2007c3f6bb74SJakub Kicinski 		if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
20084175eac3SJakub Kicinski 			darg.async = ctx->async_capable;
2009c0ab4732SVakul Garg 		else
20104175eac3SJakub Kicinski 			darg.async = false;
2011c0ab4732SVakul Garg 
2012dd47ed36SJakub Kicinski 		err = tls_rx_one_record(sk, msg, &darg);
20133547a1f9SJakub Kicinski 		if (err < 0) {
2014da353facSDaniel Jordan 			tls_err_abort(sk, -EBADMSG);
2015fedf201eSDave Watson 			goto recv_end;
2016fedf201eSDave Watson 		}
2017fedf201eSDave Watson 
20183547a1f9SJakub Kicinski 		async |= darg.async;
20192b794c40SVakul Garg 
20202b794c40SVakul Garg 		/* If the type of records being processed is not known yet,
20212b794c40SVakul Garg 		 * set it to record type just dequeued. If it is already known,
20222b794c40SVakul Garg 		 * but does not match the record type just dequeued, go to end.
20232b794c40SVakul Garg 		 * We always get record type here since for tls1.2, record type
20242b794c40SVakul Garg 		 * is known just after record is dequeued from stream parser.
20252b794c40SVakul Garg 		 * For tls1.3, we disable async.
20262b794c40SVakul Garg 		 */
2027b93f5700SJakub Kicinski 		err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2028abb47dc9SJakub Kicinski 		if (err <= 0) {
2029b93f5700SJakub Kicinski 			DEBUG_NET_WARN_ON_ONCE(darg.zc);
2030abb47dc9SJakub Kicinski 			tls_rx_rec_done(ctx);
2031abb47dc9SJakub Kicinski put_on_rx_list_err:
2032b93f5700SJakub Kicinski 			__skb_queue_tail(&ctx->rx_list, darg.skb);
20332b794c40SVakul Garg 			goto recv_end;
2034abb47dc9SJakub Kicinski 		}
2035fedf201eSDave Watson 
2036c46b0183SJakub Kicinski 		/* periodically flush backlog, and feed strparser */
203784c61fe1SJakub Kicinski 		released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
203884c61fe1SJakub Kicinski 						  decrypted + copied,
203984c61fe1SJakub Kicinski 						  &flushed_at);
2040c46b0183SJakub Kicinski 
2041abb47dc9SJakub Kicinski 		/* TLS 1.3 may have updated the length by more than overhead */
2042b93f5700SJakub Kicinski 		rxm = strp_msg(darg.skb);
2043abb47dc9SJakub Kicinski 		chunk = rxm->full_len;
2044abb47dc9SJakub Kicinski 		tls_rx_rec_done(ctx);
2045b1a2c178SJakub Kicinski 
2046cbbdee99SJakub Kicinski 		if (!darg.zc) {
2047cbbdee99SJakub Kicinski 			bool partially_consumed = chunk > len;
2048b93f5700SJakub Kicinski 			struct sk_buff *skb = darg.skb;
2049b93f5700SJakub Kicinski 
2050e20691faSJakub Kicinski 			DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2051cbbdee99SJakub Kicinski 
20529bdf75ccSJakub Kicinski 			if (async) {
2053cbbdee99SJakub Kicinski 				/* TLS 1.2-only, to_decrypt must be text len */
20549bdf75ccSJakub Kicinski 				chunk = min_t(int, to_decrypt, len);
2055cbbdee99SJakub Kicinski 				async_copy_bytes += chunk;
2056008141deSJakub Kicinski put_on_rx_list:
2057f940b6efSJakub Kicinski 				decrypted += chunk;
2058f940b6efSJakub Kicinski 				len -= chunk;
2059008141deSJakub Kicinski 				__skb_queue_tail(&ctx->rx_list, skb);
2060f940b6efSJakub Kicinski 				continue;
20619bdf75ccSJakub Kicinski 			}
2062c0ab4732SVakul Garg 
2063e91de6afSJohn Fastabend 			if (bpf_strp_enabled) {
206484c61fe1SJakub Kicinski 				released = true;
2065e91de6afSJohn Fastabend 				err = sk_psock_tls_strp_read(psock, skb);
2066e91de6afSJohn Fastabend 				if (err != __SK_PASS) {
2067e91de6afSJohn Fastabend 					rxm->offset = rxm->offset + rxm->full_len;
2068e91de6afSJohn Fastabend 					rxm->full_len = 0;
2069e91de6afSJohn Fastabend 					if (err == __SK_DROP)
2070e91de6afSJohn Fastabend 						consume_skb(skb);
2071e91de6afSJohn Fastabend 					continue;
2072e91de6afSJohn Fastabend 				}
2073e91de6afSJohn Fastabend 			}
2074e91de6afSJohn Fastabend 
2075f940b6efSJakub Kicinski 			if (partially_consumed)
2076692d7b5dSVakul Garg 				chunk = len;
207794524d8fSVakul Garg 
2078692d7b5dSVakul Garg 			err = skb_copy_datagram_msg(skb, rxm->offset,
2079692d7b5dSVakul Garg 						    msg, chunk);
2080abb47dc9SJakub Kicinski 			if (err < 0)
2081abb47dc9SJakub Kicinski 				goto put_on_rx_list_err;
2082692d7b5dSVakul Garg 
2083f940b6efSJakub Kicinski 			if (is_peek)
2084008141deSJakub Kicinski 				goto put_on_rx_list;
2085f940b6efSJakub Kicinski 
2086f940b6efSJakub Kicinski 			if (partially_consumed) {
2087f940b6efSJakub Kicinski 				rxm->offset += chunk;
2088f940b6efSJakub Kicinski 				rxm->full_len -= chunk;
2089008141deSJakub Kicinski 				goto put_on_rx_list;
2090692d7b5dSVakul Garg 			}
2091b93f5700SJakub Kicinski 
2092b93f5700SJakub Kicinski 			consume_skb(skb);
2093692d7b5dSVakul Garg 		}
2094c46234ebSDave Watson 
2095692d7b5dSVakul Garg 		decrypted += chunk;
2096692d7b5dSVakul Garg 		len -= chunk;
2097692d7b5dSVakul Garg 
2098f940b6efSJakub Kicinski 		/* Return full control message to userspace before trying
2099f940b6efSJakub Kicinski 		 * to parse another message type
2100c46234ebSDave Watson 		 */
2101c46234ebSDave Watson 		msg->msg_flags |= MSG_EOR;
21023fe16edfSVadim Fedorenko 		if (control != TLS_RECORD_TYPE_DATA)
2103f940b6efSJakub Kicinski 			break;
210404b25a54SJakub Kicinski 	}
2105c46234ebSDave Watson 
2106c46234ebSDave Watson recv_end:
21077da18bccSJakub Kicinski 	if (async) {
2108f314bfeeSJakub Kicinski 		int ret, pending;
21097da18bccSJakub Kicinski 
211094524d8fSVakul Garg 		/* Wait for all previously submitted records to be decrypted */
21110cada332SVinay Kumar Yadav 		spin_lock_bh(&ctx->decrypt_compl_lock);
211237943f04SJakub Kicinski 		reinit_completion(&ctx->async_wait.completion);
21130cada332SVinay Kumar Yadav 		pending = atomic_read(&ctx->decrypt_pending);
21140cada332SVinay Kumar Yadav 		spin_unlock_bh(&ctx->decrypt_compl_lock);
2115c618db2aSJakub Kicinski 		ret = 0;
2116c618db2aSJakub Kicinski 		if (pending)
2117f314bfeeSJakub Kicinski 			ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2118c618db2aSJakub Kicinski 		__skb_queue_purge(&ctx->async_hold);
2119c618db2aSJakub Kicinski 
2120f314bfeeSJakub Kicinski 		if (ret) {
2121f314bfeeSJakub Kicinski 			if (err >= 0 || err == -EINPROGRESS)
2122f314bfeeSJakub Kicinski 				err = ret;
2123692d7b5dSVakul Garg 			decrypted = 0;
2124692d7b5dSVakul Garg 			goto end;
212594524d8fSVakul Garg 		}
21260cada332SVinay Kumar Yadav 
2127692d7b5dSVakul Garg 		/* Drain records from the rx_list & copy if required */
2128692d7b5dSVakul Garg 		if (is_peek || is_kvec)
212906554f4fSJakub Kicinski 			err = process_rx_list(ctx, msg, &control, copied,
2130cbbdee99SJakub Kicinski 					      decrypted, is_peek);
2131692d7b5dSVakul Garg 		else
213206554f4fSJakub Kicinski 			err = process_rx_list(ctx, msg, &control, 0,
2133cbbdee99SJakub Kicinski 					      async_copy_bytes, is_peek);
21344d42cd6bSJakub Kicinski 		decrypted += max(err, 0);
2135692d7b5dSVakul Garg 	}
2136692d7b5dSVakul Garg 
2137692d7b5dSVakul Garg 	copied += decrypted;
2138692d7b5dSVakul Garg 
2139692d7b5dSVakul Garg end:
21404cbc325eSJakub Kicinski 	tls_rx_reader_unlock(sk, ctx);
2141d3b18ad3SJohn Fastabend 	if (psock)
2142d3b18ad3SJohn Fastabend 		sk_psock_put(sk, psock);
2143c46234ebSDave Watson 	return copied ? : err;
2144c46234ebSDave Watson }
2145c46234ebSDave Watson 
2146c46234ebSDave Watson ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
2147c46234ebSDave Watson 			   struct pipe_inode_info *pipe,
2148c46234ebSDave Watson 			   size_t len, unsigned int flags)
2149c46234ebSDave Watson {
2150c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2151f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2152c46234ebSDave Watson 	struct strp_msg *rxm = NULL;
2153c46234ebSDave Watson 	struct sock *sk = sock->sk;
2154c3f6bb74SJakub Kicinski 	struct tls_msg *tlm;
2155c46234ebSDave Watson 	struct sk_buff *skb;
2156c46234ebSDave Watson 	ssize_t copied = 0;
2157c46234ebSDave Watson 	int chunk;
215870f03fc2SJakub Kicinski 	int err;
2159c46234ebSDave Watson 
216070f03fc2SJakub Kicinski 	err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
216170f03fc2SJakub Kicinski 	if (err < 0)
216270f03fc2SJakub Kicinski 		return err;
2163c46234ebSDave Watson 
2164abb47dc9SJakub Kicinski 	if (!skb_queue_empty(&ctx->rx_list)) {
2165e062fe99SJakub Kicinski 		skb = __skb_dequeue(&ctx->rx_list);
2166e062fe99SJakub Kicinski 	} else {
21676bd116c8SJakub Kicinski 		struct tls_decrypt_arg darg;
21684175eac3SJakub Kicinski 
216935560b7fSJakub Kicinski 		err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
217070f03fc2SJakub Kicinski 				      true);
217135560b7fSJakub Kicinski 		if (err <= 0)
2172c46234ebSDave Watson 			goto splice_read_end;
2173c46234ebSDave Watson 
21746bd116c8SJakub Kicinski 		memset(&darg.inargs, 0, sizeof(darg.inargs));
21756bd116c8SJakub Kicinski 
2176541cc48bSJakub Kicinski 		err = tls_rx_one_record(sk, NULL, &darg);
2177520493f6SJakub Kicinski 		if (err < 0) {
2178520493f6SJakub Kicinski 			tls_err_abort(sk, -EBADMSG);
2179520493f6SJakub Kicinski 			goto splice_read_end;
2180520493f6SJakub Kicinski 		}
2181abb47dc9SJakub Kicinski 
2182abb47dc9SJakub Kicinski 		tls_rx_rec_done(ctx);
21836bd116c8SJakub Kicinski 		skb = darg.skb;
2184e062fe99SJakub Kicinski 	}
2185fedf201eSDave Watson 
2186c3f6bb74SJakub Kicinski 	rxm = strp_msg(skb);
2187c3f6bb74SJakub Kicinski 	tlm = tls_msg(skb);
2188c3f6bb74SJakub Kicinski 
2189c46234ebSDave Watson 	/* splice does not support reading control messages */
2190c3f6bb74SJakub Kicinski 	if (tlm->control != TLS_RECORD_TYPE_DATA) {
21914a5cdc60SValentin Vidic 		err = -EINVAL;
2192abb47dc9SJakub Kicinski 		goto splice_requeue;
2193c46234ebSDave Watson 	}
2194c46234ebSDave Watson 
2195c46234ebSDave Watson 	chunk = min_t(unsigned int, rxm->full_len, len);
2196c46234ebSDave Watson 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2197c46234ebSDave Watson 	if (copied < 0)
2198abb47dc9SJakub Kicinski 		goto splice_requeue;
2199c46234ebSDave Watson 
2200e062fe99SJakub Kicinski 	if (chunk < rxm->full_len) {
2201e062fe99SJakub Kicinski 		rxm->offset += len;
2202e062fe99SJakub Kicinski 		rxm->full_len -= len;
2203abb47dc9SJakub Kicinski 		goto splice_requeue;
2204e062fe99SJakub Kicinski 	}
2205c46234ebSDave Watson 
2206abb47dc9SJakub Kicinski 	consume_skb(skb);
2207abb47dc9SJakub Kicinski 
2208c46234ebSDave Watson splice_read_end:
22094cbc325eSJakub Kicinski 	tls_rx_reader_unlock(sk, ctx);
2210c46234ebSDave Watson 	return copied ? : err;
2211abb47dc9SJakub Kicinski 
2212abb47dc9SJakub Kicinski splice_requeue:
2213abb47dc9SJakub Kicinski 	__skb_queue_head(&ctx->rx_list, skb);
2214abb47dc9SJakub Kicinski 	goto splice_read_end;
2215c46234ebSDave Watson }
2216c46234ebSDave Watson 
22177b50ecfcSCong Wang bool tls_sw_sock_is_readable(struct sock *sk)
2218c46234ebSDave Watson {
2219c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2220f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2221d3b18ad3SJohn Fastabend 	bool ingress_empty = true;
2222d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
2223c46234ebSDave Watson 
2224d3b18ad3SJohn Fastabend 	rcu_read_lock();
2225d3b18ad3SJohn Fastabend 	psock = sk_psock(sk);
2226d3b18ad3SJohn Fastabend 	if (psock)
2227d3b18ad3SJohn Fastabend 		ingress_empty = list_empty(&psock->ingress_msg);
2228d3b18ad3SJohn Fastabend 	rcu_read_unlock();
2229c46234ebSDave Watson 
2230b92a13d4SJakub Kicinski 	return !ingress_empty || tls_strp_msg_ready(ctx) ||
223113aecb17SJakub Kicinski 		!skb_queue_empty(&ctx->rx_list);
2232c46234ebSDave Watson }
2233c46234ebSDave Watson 
223484c61fe1SJakub Kicinski int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2235c46234ebSDave Watson {
2236c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
22374509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
22383463e51dSKees Cook 	char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2239c46234ebSDave Watson 	size_t cipher_overhead;
2240c46234ebSDave Watson 	size_t data_len = 0;
2241c46234ebSDave Watson 	int ret;
2242c46234ebSDave Watson 
2243c46234ebSDave Watson 	/* Verify that we have a full TLS header, or wait for more data */
224484c61fe1SJakub Kicinski 	if (strp->stm.offset + prot->prepend_size > skb->len)
2245c46234ebSDave Watson 		return 0;
2246c46234ebSDave Watson 
22473463e51dSKees Cook 	/* Sanity-check size of on-stack buffer. */
22484509de14SVakul Garg 	if (WARN_ON(prot->prepend_size > sizeof(header))) {
22493463e51dSKees Cook 		ret = -EINVAL;
22503463e51dSKees Cook 		goto read_failure;
22513463e51dSKees Cook 	}
22523463e51dSKees Cook 
2253c46234ebSDave Watson 	/* Linearize header to local buffer */
225484c61fe1SJakub Kicinski 	ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2255c46234ebSDave Watson 	if (ret < 0)
2256c46234ebSDave Watson 		goto read_failure;
2257c46234ebSDave Watson 
225884c61fe1SJakub Kicinski 	strp->mark = header[0];
2259c46234ebSDave Watson 
2260c46234ebSDave Watson 	data_len = ((header[4] & 0xFF) | (header[3] << 8));
2261c46234ebSDave Watson 
22624509de14SVakul Garg 	cipher_overhead = prot->tag_size;
2263a6acbe62SVadim Fedorenko 	if (prot->version != TLS_1_3_VERSION &&
2264a6acbe62SVadim Fedorenko 	    prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
22654509de14SVakul Garg 		cipher_overhead += prot->iv_size;
2266c46234ebSDave Watson 
2267130b392cSDave Watson 	if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
22684509de14SVakul Garg 	    prot->tail_size) {
2269c46234ebSDave Watson 		ret = -EMSGSIZE;
2270c46234ebSDave Watson 		goto read_failure;
2271c46234ebSDave Watson 	}
2272c46234ebSDave Watson 	if (data_len < cipher_overhead) {
2273c46234ebSDave Watson 		ret = -EBADMSG;
2274c46234ebSDave Watson 		goto read_failure;
2275c46234ebSDave Watson 	}
2276c46234ebSDave Watson 
2277130b392cSDave Watson 	/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2278130b392cSDave Watson 	if (header[1] != TLS_1_2_VERSION_MINOR ||
2279130b392cSDave Watson 	    header[2] != TLS_1_2_VERSION_MAJOR) {
2280c46234ebSDave Watson 		ret = -EINVAL;
2281c46234ebSDave Watson 		goto read_failure;
2282c46234ebSDave Watson 	}
2283be2fbc15SJakub Kicinski 
2284f953d33bSJakub Kicinski 	tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
228584c61fe1SJakub Kicinski 				     TCP_SKB_CB(skb)->seq + strp->stm.offset);
2286c46234ebSDave Watson 	return data_len + TLS_HEADER_SIZE;
2287c46234ebSDave Watson 
2288c46234ebSDave Watson read_failure:
2289c46234ebSDave Watson 	tls_err_abort(strp->sk, ret);
2290c46234ebSDave Watson 
2291c46234ebSDave Watson 	return ret;
2292c46234ebSDave Watson }
2293c46234ebSDave Watson 
229484c61fe1SJakub Kicinski void tls_rx_msg_ready(struct tls_strparser *strp)
2295c46234ebSDave Watson {
229684c61fe1SJakub Kicinski 	struct tls_sw_context_rx *ctx;
2297c46234ebSDave Watson 
229884c61fe1SJakub Kicinski 	ctx = container_of(strp, struct tls_sw_context_rx, strp);
2299ad13acceSVakul Garg 	ctx->saved_data_ready(strp->sk);
2300c46234ebSDave Watson }
2301c46234ebSDave Watson 
2302c46234ebSDave Watson static void tls_data_ready(struct sock *sk)
2303c46234ebSDave Watson {
2304c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2305f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2306d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
2307*74836ec8SJakub Kicinski 	gfp_t alloc_save;
2308c46234ebSDave Watson 
230940e0b090SPeilin Ye 	trace_sk_data_ready(sk);
231040e0b090SPeilin Ye 
2311*74836ec8SJakub Kicinski 	alloc_save = sk->sk_allocation;
2312*74836ec8SJakub Kicinski 	sk->sk_allocation = GFP_ATOMIC;
231384c61fe1SJakub Kicinski 	tls_strp_data_ready(&ctx->strp);
2314*74836ec8SJakub Kicinski 	sk->sk_allocation = alloc_save;
2315d3b18ad3SJohn Fastabend 
2316d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
231762b4011fSXiyu Yang 	if (psock) {
231862b4011fSXiyu Yang 		if (!list_empty(&psock->ingress_msg))
2319d3b18ad3SJohn Fastabend 			ctx->saved_data_ready(sk);
2320d3b18ad3SJohn Fastabend 		sk_psock_put(sk, psock);
2321d3b18ad3SJohn Fastabend 	}
2322c46234ebSDave Watson }
2323c46234ebSDave Watson 
2324f87e62d4SJohn Fastabend void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2325f87e62d4SJohn Fastabend {
2326f87e62d4SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2327f87e62d4SJohn Fastabend 
2328f87e62d4SJohn Fastabend 	set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2329f87e62d4SJohn Fastabend 	set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2330f87e62d4SJohn Fastabend 	cancel_delayed_work_sync(&ctx->tx_work.work);
2331f87e62d4SJohn Fastabend }
2332f87e62d4SJohn Fastabend 
2333313ab004SJohn Fastabend void tls_sw_release_resources_tx(struct sock *sk)
23343c4d7559SDave Watson {
23353c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2336f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2337a42055e8SVakul Garg 	struct tls_rec *rec, *tmp;
233838f7e1c0SRohit Maheshwari 	int pending;
2339a42055e8SVakul Garg 
2340a42055e8SVakul Garg 	/* Wait for any pending async encryptions to complete */
234138f7e1c0SRohit Maheshwari 	spin_lock_bh(&ctx->encrypt_compl_lock);
234238f7e1c0SRohit Maheshwari 	ctx->async_notify = true;
234338f7e1c0SRohit Maheshwari 	pending = atomic_read(&ctx->encrypt_pending);
234438f7e1c0SRohit Maheshwari 	spin_unlock_bh(&ctx->encrypt_compl_lock);
234538f7e1c0SRohit Maheshwari 
234638f7e1c0SRohit Maheshwari 	if (pending)
2347a42055e8SVakul Garg 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2348a42055e8SVakul Garg 
2349a42055e8SVakul Garg 	tls_tx_records(sk, -1);
2350a42055e8SVakul Garg 
23519932a29aSVakul Garg 	/* Free up un-sent records in tx_list. First, free
2352a42055e8SVakul Garg 	 * the partially sent record if any at head of tx_list.
2353a42055e8SVakul Garg 	 */
2354c5daa6ccSJakub Kicinski 	if (tls_ctx->partially_sent_record) {
2355c5daa6ccSJakub Kicinski 		tls_free_partial_record(sk, tls_ctx);
23569932a29aSVakul Garg 		rec = list_first_entry(&ctx->tx_list,
2357a42055e8SVakul Garg 				       struct tls_rec, list);
2358a42055e8SVakul Garg 		list_del(&rec->list);
2359d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
2360a42055e8SVakul Garg 		kfree(rec);
2361a42055e8SVakul Garg 	}
2362a42055e8SVakul Garg 
23639932a29aSVakul Garg 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2364a42055e8SVakul Garg 		list_del(&rec->list);
2365d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_encrypted);
2366d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
2367a42055e8SVakul Garg 		kfree(rec);
2368a42055e8SVakul Garg 	}
23693c4d7559SDave Watson 
23703c4d7559SDave Watson 	crypto_free_aead(ctx->aead_send);
2371c774973eSVakul Garg 	tls_free_open_rec(sk);
2372313ab004SJohn Fastabend }
2373313ab004SJohn Fastabend 
2374313ab004SJohn Fastabend void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2375313ab004SJohn Fastabend {
2376313ab004SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2377f66de3eeSBoris Pismenny 
2378f66de3eeSBoris Pismenny 	kfree(ctx);
2379f66de3eeSBoris Pismenny }
2380f66de3eeSBoris Pismenny 
238139f56e1aSBoris Pismenny void tls_sw_release_resources_rx(struct sock *sk)
2382f66de3eeSBoris Pismenny {
2383f66de3eeSBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2384f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2385f66de3eeSBoris Pismenny 
238612c76861SJakub Kicinski 	kfree(tls_ctx->rx.rec_seq);
238712c76861SJakub Kicinski 	kfree(tls_ctx->rx.iv);
238812c76861SJakub Kicinski 
2389c46234ebSDave Watson 	if (ctx->aead_recv) {
2390a30295c4SJakub Kicinski 		__skb_queue_purge(&ctx->rx_list);
2391c46234ebSDave Watson 		crypto_free_aead(ctx->aead_recv);
239284c61fe1SJakub Kicinski 		tls_strp_stop(&ctx->strp);
2393313ab004SJohn Fastabend 		/* If tls_sw_strparser_arm() was not called (cleanup paths)
239484c61fe1SJakub Kicinski 		 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2395313ab004SJohn Fastabend 		 * never swapped.
2396313ab004SJohn Fastabend 		 */
2397313ab004SJohn Fastabend 		if (ctx->saved_data_ready) {
2398c46234ebSDave Watson 			write_lock_bh(&sk->sk_callback_lock);
2399c46234ebSDave Watson 			sk->sk_data_ready = ctx->saved_data_ready;
2400c46234ebSDave Watson 			write_unlock_bh(&sk->sk_callback_lock);
2401c46234ebSDave Watson 		}
240239f56e1aSBoris Pismenny 	}
2403313ab004SJohn Fastabend }
2404313ab004SJohn Fastabend 
2405313ab004SJohn Fastabend void tls_sw_strparser_done(struct tls_context *tls_ctx)
2406313ab004SJohn Fastabend {
2407313ab004SJohn Fastabend 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2408313ab004SJohn Fastabend 
240984c61fe1SJakub Kicinski 	tls_strp_done(&ctx->strp);
2410313ab004SJohn Fastabend }
2411313ab004SJohn Fastabend 
2412313ab004SJohn Fastabend void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2413313ab004SJohn Fastabend {
2414313ab004SJohn Fastabend 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2415313ab004SJohn Fastabend 
2416313ab004SJohn Fastabend 	kfree(ctx);
2417313ab004SJohn Fastabend }
241839f56e1aSBoris Pismenny 
241939f56e1aSBoris Pismenny void tls_sw_free_resources_rx(struct sock *sk)
242039f56e1aSBoris Pismenny {
242139f56e1aSBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
242239f56e1aSBoris Pismenny 
242339f56e1aSBoris Pismenny 	tls_sw_release_resources_rx(sk);
2424313ab004SJohn Fastabend 	tls_sw_free_ctx_rx(tls_ctx);
24253c4d7559SDave Watson }
24263c4d7559SDave Watson 
24279932a29aSVakul Garg /* The work handler to transmitt the encrypted records in tx_list */
2428a42055e8SVakul Garg static void tx_work_handler(struct work_struct *work)
2429a42055e8SVakul Garg {
2430a42055e8SVakul Garg 	struct delayed_work *delayed_work = to_delayed_work(work);
2431a42055e8SVakul Garg 	struct tx_work *tx_work = container_of(delayed_work,
2432a42055e8SVakul Garg 					       struct tx_work, work);
2433a42055e8SVakul Garg 	struct sock *sk = tx_work->sk;
2434a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2435f87e62d4SJohn Fastabend 	struct tls_sw_context_tx *ctx;
2436f87e62d4SJohn Fastabend 
2437f87e62d4SJohn Fastabend 	if (unlikely(!tls_ctx))
2438f87e62d4SJohn Fastabend 		return;
2439f87e62d4SJohn Fastabend 
2440f87e62d4SJohn Fastabend 	ctx = tls_sw_ctx_tx(tls_ctx);
2441f87e62d4SJohn Fastabend 	if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2442f87e62d4SJohn Fastabend 		return;
2443a42055e8SVakul Garg 
2444a42055e8SVakul Garg 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2445a42055e8SVakul Garg 		return;
2446f3221361SJakub Kicinski 
2447f3221361SJakub Kicinski 	if (mutex_trylock(&tls_ctx->tx_lock)) {
2448a42055e8SVakul Garg 		lock_sock(sk);
2449a42055e8SVakul Garg 		tls_tx_records(sk, -1);
2450a42055e8SVakul Garg 		release_sock(sk);
245179ffe608SJakub Kicinski 		mutex_unlock(&tls_ctx->tx_lock);
2452f3221361SJakub Kicinski 	} else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2453f3221361SJakub Kicinski 		/* Someone is holding the tx_lock, they will likely run Tx
2454f3221361SJakub Kicinski 		 * and cancel the work on their way out of the lock section.
2455f3221361SJakub Kicinski 		 * Schedule a long delay just in case.
2456f3221361SJakub Kicinski 		 */
2457f3221361SJakub Kicinski 		schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2458f3221361SJakub Kicinski 	}
2459a42055e8SVakul Garg }
2460a42055e8SVakul Garg 
246158790314SJakub Kicinski static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
246258790314SJakub Kicinski {
246358790314SJakub Kicinski 	struct tls_rec *rec;
246458790314SJakub Kicinski 
2465ffe2a225SPietro Borrello 	rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
246658790314SJakub Kicinski 	if (!rec)
246758790314SJakub Kicinski 		return false;
246858790314SJakub Kicinski 
246958790314SJakub Kicinski 	return READ_ONCE(rec->tx_ready);
247058790314SJakub Kicinski }
247158790314SJakub Kicinski 
24727463d3a2SBoris Pismenny void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
24737463d3a2SBoris Pismenny {
24747463d3a2SBoris Pismenny 	struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
24757463d3a2SBoris Pismenny 
24767463d3a2SBoris Pismenny 	/* Schedule the transmission if tx list is ready */
247758790314SJakub Kicinski 	if (tls_is_tx_ready(tx_ctx) &&
247802b1fa07SJakub Kicinski 	    !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
24797463d3a2SBoris Pismenny 		schedule_delayed_work(&tx_ctx->tx_work.work, 0);
24807463d3a2SBoris Pismenny }
24817463d3a2SBoris Pismenny 
2482318892acSJakub Kicinski void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2483318892acSJakub Kicinski {
2484318892acSJakub Kicinski 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2485318892acSJakub Kicinski 
2486318892acSJakub Kicinski 	write_lock_bh(&sk->sk_callback_lock);
2487318892acSJakub Kicinski 	rx_ctx->saved_data_ready = sk->sk_data_ready;
2488318892acSJakub Kicinski 	sk->sk_data_ready = tls_data_ready;
2489318892acSJakub Kicinski 	write_unlock_bh(&sk->sk_callback_lock);
2490318892acSJakub Kicinski }
2491318892acSJakub Kicinski 
249288527790SJakub Kicinski void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
249388527790SJakub Kicinski {
249488527790SJakub Kicinski 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
249588527790SJakub Kicinski 
249688527790SJakub Kicinski 	rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
249788527790SJakub Kicinski 		tls_ctx->prot_info.version != TLS_1_3_VERSION;
249888527790SJakub Kicinski }
249988527790SJakub Kicinski 
2500c46234ebSDave Watson int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
25013c4d7559SDave Watson {
25024509de14SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
25034509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
25043c4d7559SDave Watson 	struct tls_crypto_info *crypto_info;
2505f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *sw_ctx_tx = NULL;
2506f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *sw_ctx_rx = NULL;
2507c46234ebSDave Watson 	struct cipher_context *cctx;
2508c46234ebSDave Watson 	struct crypto_aead **aead;
2509f295b3aeSVakul Garg 	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2510692d7b5dSVakul Garg 	struct crypto_tfm *tfm;
2511f295b3aeSVakul Garg 	char *iv, *rec_seq, *key, *salt, *cipher_name;
2512fb99bce7SDave Watson 	size_t keysize;
25133c4d7559SDave Watson 	int rc = 0;
25143c4d7559SDave Watson 
25153c4d7559SDave Watson 	if (!ctx) {
25163c4d7559SDave Watson 		rc = -EINVAL;
25173c4d7559SDave Watson 		goto out;
25183c4d7559SDave Watson 	}
25193c4d7559SDave Watson 
2520f66de3eeSBoris Pismenny 	if (tx) {
2521b190a587SBoris Pismenny 		if (!ctx->priv_ctx_tx) {
2522f66de3eeSBoris Pismenny 			sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2523f66de3eeSBoris Pismenny 			if (!sw_ctx_tx) {
25243c4d7559SDave Watson 				rc = -ENOMEM;
25253c4d7559SDave Watson 				goto out;
25263c4d7559SDave Watson 			}
2527f66de3eeSBoris Pismenny 			ctx->priv_ctx_tx = sw_ctx_tx;
2528c46234ebSDave Watson 		} else {
2529b190a587SBoris Pismenny 			sw_ctx_tx =
2530b190a587SBoris Pismenny 				(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2531b190a587SBoris Pismenny 		}
2532b190a587SBoris Pismenny 	} else {
2533b190a587SBoris Pismenny 		if (!ctx->priv_ctx_rx) {
2534f66de3eeSBoris Pismenny 			sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2535f66de3eeSBoris Pismenny 			if (!sw_ctx_rx) {
2536f66de3eeSBoris Pismenny 				rc = -ENOMEM;
2537f66de3eeSBoris Pismenny 				goto out;
2538c46234ebSDave Watson 			}
2539f66de3eeSBoris Pismenny 			ctx->priv_ctx_rx = sw_ctx_rx;
2540b190a587SBoris Pismenny 		} else {
2541b190a587SBoris Pismenny 			sw_ctx_rx =
2542b190a587SBoris Pismenny 				(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2543b190a587SBoris Pismenny 		}
2544f66de3eeSBoris Pismenny 	}
25453c4d7559SDave Watson 
2546c46234ebSDave Watson 	if (tx) {
2547b190a587SBoris Pismenny 		crypto_init_wait(&sw_ctx_tx->async_wait);
25480cada332SVinay Kumar Yadav 		spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
254986029d10SSabrina Dubroca 		crypto_info = &ctx->crypto_send.info;
2550c46234ebSDave Watson 		cctx = &ctx->tx;
2551f66de3eeSBoris Pismenny 		aead = &sw_ctx_tx->aead_send;
25529932a29aSVakul Garg 		INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2553a42055e8SVakul Garg 		INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2554a42055e8SVakul Garg 		sw_ctx_tx->tx_work.sk = sk;
2555c46234ebSDave Watson 	} else {
2556b190a587SBoris Pismenny 		crypto_init_wait(&sw_ctx_rx->async_wait);
25570cada332SVinay Kumar Yadav 		spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
25584cbc325eSJakub Kicinski 		init_waitqueue_head(&sw_ctx_rx->wq);
255986029d10SSabrina Dubroca 		crypto_info = &ctx->crypto_recv.info;
2560c46234ebSDave Watson 		cctx = &ctx->rx;
2561692d7b5dSVakul Garg 		skb_queue_head_init(&sw_ctx_rx->rx_list);
2562c618db2aSJakub Kicinski 		skb_queue_head_init(&sw_ctx_rx->async_hold);
2563f66de3eeSBoris Pismenny 		aead = &sw_ctx_rx->aead_recv;
2564c46234ebSDave Watson 	}
2565c46234ebSDave Watson 
25663c4d7559SDave Watson 	switch (crypto_info->cipher_type) {
25673c4d7559SDave Watson 	case TLS_CIPHER_AES_GCM_128: {
2568dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2569dc2724a6STianjia Zhang 
2570dc2724a6STianjia Zhang 		gcm_128_info = (void *)crypto_info;
25713c4d7559SDave Watson 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
25723c4d7559SDave Watson 		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
25733c4d7559SDave Watson 		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2574dc2724a6STianjia Zhang 		iv = gcm_128_info->iv;
25753c4d7559SDave Watson 		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2576dc2724a6STianjia Zhang 		rec_seq = gcm_128_info->rec_seq;
2577fb99bce7SDave Watson 		keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2578fb99bce7SDave Watson 		key = gcm_128_info->key;
2579fb99bce7SDave Watson 		salt = gcm_128_info->salt;
2580f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2581f295b3aeSVakul Garg 		cipher_name = "gcm(aes)";
2582fb99bce7SDave Watson 		break;
2583fb99bce7SDave Watson 	}
2584fb99bce7SDave Watson 	case TLS_CIPHER_AES_GCM_256: {
2585dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2586dc2724a6STianjia Zhang 
2587dc2724a6STianjia Zhang 		gcm_256_info = (void *)crypto_info;
2588fb99bce7SDave Watson 		nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2589fb99bce7SDave Watson 		tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2590fb99bce7SDave Watson 		iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2591dc2724a6STianjia Zhang 		iv = gcm_256_info->iv;
2592fb99bce7SDave Watson 		rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2593dc2724a6STianjia Zhang 		rec_seq = gcm_256_info->rec_seq;
2594fb99bce7SDave Watson 		keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2595fb99bce7SDave Watson 		key = gcm_256_info->key;
2596fb99bce7SDave Watson 		salt = gcm_256_info->salt;
2597f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2598f295b3aeSVakul Garg 		cipher_name = "gcm(aes)";
2599f295b3aeSVakul Garg 		break;
2600f295b3aeSVakul Garg 	}
2601f295b3aeSVakul Garg 	case TLS_CIPHER_AES_CCM_128: {
2602dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2603dc2724a6STianjia Zhang 
2604dc2724a6STianjia Zhang 		ccm_128_info = (void *)crypto_info;
2605f295b3aeSVakul Garg 		nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2606f295b3aeSVakul Garg 		tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2607f295b3aeSVakul Garg 		iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2608dc2724a6STianjia Zhang 		iv = ccm_128_info->iv;
2609f295b3aeSVakul Garg 		rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2610dc2724a6STianjia Zhang 		rec_seq = ccm_128_info->rec_seq;
2611f295b3aeSVakul Garg 		keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2612f295b3aeSVakul Garg 		key = ccm_128_info->key;
2613f295b3aeSVakul Garg 		salt = ccm_128_info->salt;
2614f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2615f295b3aeSVakul Garg 		cipher_name = "ccm(aes)";
26163c4d7559SDave Watson 		break;
26173c4d7559SDave Watson 	}
261874ea6106SVadim Fedorenko 	case TLS_CIPHER_CHACHA20_POLY1305: {
2619dc2724a6STianjia Zhang 		struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2620dc2724a6STianjia Zhang 
262174ea6106SVadim Fedorenko 		chacha20_poly1305_info = (void *)crypto_info;
262274ea6106SVadim Fedorenko 		nonce_size = 0;
262374ea6106SVadim Fedorenko 		tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
262474ea6106SVadim Fedorenko 		iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
262574ea6106SVadim Fedorenko 		iv = chacha20_poly1305_info->iv;
262674ea6106SVadim Fedorenko 		rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
262774ea6106SVadim Fedorenko 		rec_seq = chacha20_poly1305_info->rec_seq;
262874ea6106SVadim Fedorenko 		keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
262974ea6106SVadim Fedorenko 		key = chacha20_poly1305_info->key;
263074ea6106SVadim Fedorenko 		salt = chacha20_poly1305_info->salt;
263174ea6106SVadim Fedorenko 		salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
263274ea6106SVadim Fedorenko 		cipher_name = "rfc7539(chacha20,poly1305)";
263374ea6106SVadim Fedorenko 		break;
263474ea6106SVadim Fedorenko 	}
2635227b9644STianjia Zhang 	case TLS_CIPHER_SM4_GCM: {
2636227b9644STianjia Zhang 		struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2637227b9644STianjia Zhang 
2638227b9644STianjia Zhang 		sm4_gcm_info = (void *)crypto_info;
2639227b9644STianjia Zhang 		nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2640227b9644STianjia Zhang 		tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2641227b9644STianjia Zhang 		iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2642227b9644STianjia Zhang 		iv = sm4_gcm_info->iv;
2643227b9644STianjia Zhang 		rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2644227b9644STianjia Zhang 		rec_seq = sm4_gcm_info->rec_seq;
2645227b9644STianjia Zhang 		keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2646227b9644STianjia Zhang 		key = sm4_gcm_info->key;
2647227b9644STianjia Zhang 		salt = sm4_gcm_info->salt;
2648227b9644STianjia Zhang 		salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2649227b9644STianjia Zhang 		cipher_name = "gcm(sm4)";
2650227b9644STianjia Zhang 		break;
2651227b9644STianjia Zhang 	}
2652227b9644STianjia Zhang 	case TLS_CIPHER_SM4_CCM: {
2653227b9644STianjia Zhang 		struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2654227b9644STianjia Zhang 
2655227b9644STianjia Zhang 		sm4_ccm_info = (void *)crypto_info;
2656227b9644STianjia Zhang 		nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2657227b9644STianjia Zhang 		tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2658227b9644STianjia Zhang 		iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2659227b9644STianjia Zhang 		iv = sm4_ccm_info->iv;
2660227b9644STianjia Zhang 		rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2661227b9644STianjia Zhang 		rec_seq = sm4_ccm_info->rec_seq;
2662227b9644STianjia Zhang 		keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2663227b9644STianjia Zhang 		key = sm4_ccm_info->key;
2664227b9644STianjia Zhang 		salt = sm4_ccm_info->salt;
2665227b9644STianjia Zhang 		salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2666227b9644STianjia Zhang 		cipher_name = "ccm(sm4)";
2667227b9644STianjia Zhang 		break;
2668227b9644STianjia Zhang 	}
266962e56ef5STaehee Yoo 	case TLS_CIPHER_ARIA_GCM_128: {
267062e56ef5STaehee Yoo 		struct tls12_crypto_info_aria_gcm_128 *aria_gcm_128_info;
267162e56ef5STaehee Yoo 
267262e56ef5STaehee Yoo 		aria_gcm_128_info = (void *)crypto_info;
267362e56ef5STaehee Yoo 		nonce_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
267462e56ef5STaehee Yoo 		tag_size = TLS_CIPHER_ARIA_GCM_128_TAG_SIZE;
267562e56ef5STaehee Yoo 		iv_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
267662e56ef5STaehee Yoo 		iv = aria_gcm_128_info->iv;
267762e56ef5STaehee Yoo 		rec_seq_size = TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE;
267862e56ef5STaehee Yoo 		rec_seq = aria_gcm_128_info->rec_seq;
267962e56ef5STaehee Yoo 		keysize = TLS_CIPHER_ARIA_GCM_128_KEY_SIZE;
268062e56ef5STaehee Yoo 		key = aria_gcm_128_info->key;
268162e56ef5STaehee Yoo 		salt = aria_gcm_128_info->salt;
268262e56ef5STaehee Yoo 		salt_size = TLS_CIPHER_ARIA_GCM_128_SALT_SIZE;
268362e56ef5STaehee Yoo 		cipher_name = "gcm(aria)";
268462e56ef5STaehee Yoo 		break;
268562e56ef5STaehee Yoo 	}
268662e56ef5STaehee Yoo 	case TLS_CIPHER_ARIA_GCM_256: {
268762e56ef5STaehee Yoo 		struct tls12_crypto_info_aria_gcm_256 *gcm_256_info;
268862e56ef5STaehee Yoo 
268962e56ef5STaehee Yoo 		gcm_256_info = (void *)crypto_info;
269062e56ef5STaehee Yoo 		nonce_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
269162e56ef5STaehee Yoo 		tag_size = TLS_CIPHER_ARIA_GCM_256_TAG_SIZE;
269262e56ef5STaehee Yoo 		iv_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
269362e56ef5STaehee Yoo 		iv = gcm_256_info->iv;
269462e56ef5STaehee Yoo 		rec_seq_size = TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE;
269562e56ef5STaehee Yoo 		rec_seq = gcm_256_info->rec_seq;
269662e56ef5STaehee Yoo 		keysize = TLS_CIPHER_ARIA_GCM_256_KEY_SIZE;
269762e56ef5STaehee Yoo 		key = gcm_256_info->key;
269862e56ef5STaehee Yoo 		salt = gcm_256_info->salt;
269962e56ef5STaehee Yoo 		salt_size = TLS_CIPHER_ARIA_GCM_256_SALT_SIZE;
270062e56ef5STaehee Yoo 		cipher_name = "gcm(aria)";
270162e56ef5STaehee Yoo 		break;
270262e56ef5STaehee Yoo 	}
27033c4d7559SDave Watson 	default:
27043c4d7559SDave Watson 		rc = -EINVAL;
2705cf6d43efSSabrina Dubroca 		goto free_priv;
27063c4d7559SDave Watson 	}
27073c4d7559SDave Watson 
2708130b392cSDave Watson 	if (crypto_info->version == TLS_1_3_VERSION) {
2709130b392cSDave Watson 		nonce_size = 0;
27104509de14SVakul Garg 		prot->aad_size = TLS_HEADER_SIZE;
27114509de14SVakul Garg 		prot->tail_size = 1;
2712130b392cSDave Watson 	} else {
27134509de14SVakul Garg 		prot->aad_size = TLS_AAD_SPACE_SIZE;
27144509de14SVakul Garg 		prot->tail_size = 0;
2715130b392cSDave Watson 	}
2716130b392cSDave Watson 
271750a07aa5SJakub Kicinski 	/* Sanity-check the sizes for stack allocations. */
271850a07aa5SJakub Kicinski 	if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
271950a07aa5SJakub Kicinski 	    rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
272050a07aa5SJakub Kicinski 	    prot->aad_size > TLS_MAX_AAD_SIZE) {
272150a07aa5SJakub Kicinski 		rc = -EINVAL;
272250a07aa5SJakub Kicinski 		goto free_priv;
272350a07aa5SJakub Kicinski 	}
272450a07aa5SJakub Kicinski 
27254509de14SVakul Garg 	prot->version = crypto_info->version;
27264509de14SVakul Garg 	prot->cipher_type = crypto_info->cipher_type;
27274509de14SVakul Garg 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
27284509de14SVakul Garg 	prot->tag_size = tag_size;
27294509de14SVakul Garg 	prot->overhead_size = prot->prepend_size +
27304509de14SVakul Garg 			      prot->tag_size + prot->tail_size;
27314509de14SVakul Garg 	prot->iv_size = iv_size;
2732f295b3aeSVakul Garg 	prot->salt_size = salt_size;
2733f295b3aeSVakul Garg 	cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2734c46234ebSDave Watson 	if (!cctx->iv) {
27353c4d7559SDave Watson 		rc = -ENOMEM;
2736cf6d43efSSabrina Dubroca 		goto free_priv;
27373c4d7559SDave Watson 	}
2738fb99bce7SDave Watson 	/* Note: 128 & 256 bit salt are the same size */
27394509de14SVakul Garg 	prot->rec_seq_size = rec_seq_size;
2740f295b3aeSVakul Garg 	memcpy(cctx->iv, salt, salt_size);
2741f295b3aeSVakul Garg 	memcpy(cctx->iv + salt_size, iv, iv_size);
2742969d5090Szhong jiang 	cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2743c46234ebSDave Watson 	if (!cctx->rec_seq) {
27443c4d7559SDave Watson 		rc = -ENOMEM;
27453c4d7559SDave Watson 		goto free_iv;
27463c4d7559SDave Watson 	}
27473c4d7559SDave Watson 
2748c46234ebSDave Watson 	if (!*aead) {
2749f295b3aeSVakul Garg 		*aead = crypto_alloc_aead(cipher_name, 0, 0);
2750c46234ebSDave Watson 		if (IS_ERR(*aead)) {
2751c46234ebSDave Watson 			rc = PTR_ERR(*aead);
2752c46234ebSDave Watson 			*aead = NULL;
27533c4d7559SDave Watson 			goto free_rec_seq;
27543c4d7559SDave Watson 		}
27553c4d7559SDave Watson 	}
27563c4d7559SDave Watson 
27573c4d7559SDave Watson 	ctx->push_pending_record = tls_sw_push_pending_record;
27583c4d7559SDave Watson 
2759fb99bce7SDave Watson 	rc = crypto_aead_setkey(*aead, key, keysize);
2760fb99bce7SDave Watson 
27613c4d7559SDave Watson 	if (rc)
27623c4d7559SDave Watson 		goto free_aead;
27633c4d7559SDave Watson 
27644509de14SVakul Garg 	rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2765c46234ebSDave Watson 	if (rc)
2766c46234ebSDave Watson 		goto free_aead;
2767c46234ebSDave Watson 
2768f66de3eeSBoris Pismenny 	if (sw_ctx_rx) {
2769692d7b5dSVakul Garg 		tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
27708497ded2SVakul Garg 
277188527790SJakub Kicinski 		tls_update_rx_zc_capable(ctx);
2772692d7b5dSVakul Garg 		sw_ctx_rx->async_capable =
277388527790SJakub Kicinski 			crypto_info->version != TLS_1_3_VERSION &&
277488527790SJakub Kicinski 			!!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2775692d7b5dSVakul Garg 
2776849f16bbSJakub Kicinski 		rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2777849f16bbSJakub Kicinski 		if (rc)
2778849f16bbSJakub Kicinski 			goto free_aead;
2779c46234ebSDave Watson 	}
2780c46234ebSDave Watson 
2781c46234ebSDave Watson 	goto out;
27823c4d7559SDave Watson 
27833c4d7559SDave Watson free_aead:
2784c46234ebSDave Watson 	crypto_free_aead(*aead);
2785c46234ebSDave Watson 	*aead = NULL;
27863c4d7559SDave Watson free_rec_seq:
2787c46234ebSDave Watson 	kfree(cctx->rec_seq);
2788c46234ebSDave Watson 	cctx->rec_seq = NULL;
27893c4d7559SDave Watson free_iv:
2790f66de3eeSBoris Pismenny 	kfree(cctx->iv);
2791f66de3eeSBoris Pismenny 	cctx->iv = NULL;
2792cf6d43efSSabrina Dubroca free_priv:
2793f66de3eeSBoris Pismenny 	if (tx) {
2794f66de3eeSBoris Pismenny 		kfree(ctx->priv_ctx_tx);
2795f66de3eeSBoris Pismenny 		ctx->priv_ctx_tx = NULL;
2796f66de3eeSBoris Pismenny 	} else {
2797f66de3eeSBoris Pismenny 		kfree(ctx->priv_ctx_rx);
2798f66de3eeSBoris Pismenny 		ctx->priv_ctx_rx = NULL;
2799f66de3eeSBoris Pismenny 	}
28003c4d7559SDave Watson out:
28013c4d7559SDave Watson 	return rc;
28023c4d7559SDave Watson }
2803