xref: /openbmc/linux/net/tls/tls_sw.c (revision 4175eac3)
13c4d7559SDave Watson /*
23c4d7559SDave Watson  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
33c4d7559SDave Watson  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
43c4d7559SDave Watson  * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
53c4d7559SDave Watson  * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
63c4d7559SDave Watson  * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7d3b18ad3SJohn Fastabend  * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
83c4d7559SDave Watson  *
93c4d7559SDave Watson  * This software is available to you under a choice of one of two
103c4d7559SDave Watson  * licenses.  You may choose to be licensed under the terms of the GNU
113c4d7559SDave Watson  * General Public License (GPL) Version 2, available from the file
123c4d7559SDave Watson  * COPYING in the main directory of this source tree, or the
133c4d7559SDave Watson  * OpenIB.org BSD license below:
143c4d7559SDave Watson  *
153c4d7559SDave Watson  *     Redistribution and use in source and binary forms, with or
163c4d7559SDave Watson  *     without modification, are permitted provided that the following
173c4d7559SDave Watson  *     conditions are met:
183c4d7559SDave Watson  *
193c4d7559SDave Watson  *      - Redistributions of source code must retain the above
203c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
213c4d7559SDave Watson  *        disclaimer.
223c4d7559SDave Watson  *
233c4d7559SDave Watson  *      - Redistributions in binary form must reproduce the above
243c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
253c4d7559SDave Watson  *        disclaimer in the documentation and/or other materials
263c4d7559SDave Watson  *        provided with the distribution.
273c4d7559SDave Watson  *
283c4d7559SDave Watson  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
293c4d7559SDave Watson  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
303c4d7559SDave Watson  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
313c4d7559SDave Watson  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
323c4d7559SDave Watson  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
333c4d7559SDave Watson  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
343c4d7559SDave Watson  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
353c4d7559SDave Watson  * SOFTWARE.
363c4d7559SDave Watson  */
373c4d7559SDave Watson 
38da353facSDaniel Jordan #include <linux/bug.h>
39c46234ebSDave Watson #include <linux/sched/signal.h>
403c4d7559SDave Watson #include <linux/module.h>
41974271e5SJim Ma #include <linux/splice.h>
423c4d7559SDave Watson #include <crypto/aead.h>
433c4d7559SDave Watson 
44c46234ebSDave Watson #include <net/strparser.h>
453c4d7559SDave Watson #include <net/tls.h>
463c4d7559SDave Watson 
47*4175eac3SJakub Kicinski struct tls_decrypt_arg {
48*4175eac3SJakub Kicinski 	bool zc;
49*4175eac3SJakub Kicinski 	bool async;
50*4175eac3SJakub Kicinski };
51*4175eac3SJakub Kicinski 
52da353facSDaniel Jordan noinline void tls_err_abort(struct sock *sk, int err)
53da353facSDaniel Jordan {
54da353facSDaniel Jordan 	WARN_ON_ONCE(err >= 0);
55da353facSDaniel Jordan 	/* sk->sk_err should contain a positive error code. */
56da353facSDaniel Jordan 	sk->sk_err = -err;
57da353facSDaniel Jordan 	sk_error_report(sk);
58da353facSDaniel Jordan }
59da353facSDaniel Jordan 
600927f71dSDoron Roberts-Kedes static int __skb_nsg(struct sk_buff *skb, int offset, int len,
610927f71dSDoron Roberts-Kedes                      unsigned int recursion_level)
620927f71dSDoron Roberts-Kedes {
630927f71dSDoron Roberts-Kedes         int start = skb_headlen(skb);
640927f71dSDoron Roberts-Kedes         int i, chunk = start - offset;
650927f71dSDoron Roberts-Kedes         struct sk_buff *frag_iter;
660927f71dSDoron Roberts-Kedes         int elt = 0;
670927f71dSDoron Roberts-Kedes 
680927f71dSDoron Roberts-Kedes         if (unlikely(recursion_level >= 24))
690927f71dSDoron Roberts-Kedes                 return -EMSGSIZE;
700927f71dSDoron Roberts-Kedes 
710927f71dSDoron Roberts-Kedes         if (chunk > 0) {
720927f71dSDoron Roberts-Kedes                 if (chunk > len)
730927f71dSDoron Roberts-Kedes                         chunk = len;
740927f71dSDoron Roberts-Kedes                 elt++;
750927f71dSDoron Roberts-Kedes                 len -= chunk;
760927f71dSDoron Roberts-Kedes                 if (len == 0)
770927f71dSDoron Roberts-Kedes                         return elt;
780927f71dSDoron Roberts-Kedes                 offset += chunk;
790927f71dSDoron Roberts-Kedes         }
800927f71dSDoron Roberts-Kedes 
810927f71dSDoron Roberts-Kedes         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
820927f71dSDoron Roberts-Kedes                 int end;
830927f71dSDoron Roberts-Kedes 
840927f71dSDoron Roberts-Kedes                 WARN_ON(start > offset + len);
850927f71dSDoron Roberts-Kedes 
860927f71dSDoron Roberts-Kedes                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
870927f71dSDoron Roberts-Kedes                 chunk = end - offset;
880927f71dSDoron Roberts-Kedes                 if (chunk > 0) {
890927f71dSDoron Roberts-Kedes                         if (chunk > len)
900927f71dSDoron Roberts-Kedes                                 chunk = len;
910927f71dSDoron Roberts-Kedes                         elt++;
920927f71dSDoron Roberts-Kedes                         len -= chunk;
930927f71dSDoron Roberts-Kedes                         if (len == 0)
940927f71dSDoron Roberts-Kedes                                 return elt;
950927f71dSDoron Roberts-Kedes                         offset += chunk;
960927f71dSDoron Roberts-Kedes                 }
970927f71dSDoron Roberts-Kedes                 start = end;
980927f71dSDoron Roberts-Kedes         }
990927f71dSDoron Roberts-Kedes 
1000927f71dSDoron Roberts-Kedes         if (unlikely(skb_has_frag_list(skb))) {
1010927f71dSDoron Roberts-Kedes                 skb_walk_frags(skb, frag_iter) {
1020927f71dSDoron Roberts-Kedes                         int end, ret;
1030927f71dSDoron Roberts-Kedes 
1040927f71dSDoron Roberts-Kedes                         WARN_ON(start > offset + len);
1050927f71dSDoron Roberts-Kedes 
1060927f71dSDoron Roberts-Kedes                         end = start + frag_iter->len;
1070927f71dSDoron Roberts-Kedes                         chunk = end - offset;
1080927f71dSDoron Roberts-Kedes                         if (chunk > 0) {
1090927f71dSDoron Roberts-Kedes                                 if (chunk > len)
1100927f71dSDoron Roberts-Kedes                                         chunk = len;
1110927f71dSDoron Roberts-Kedes                                 ret = __skb_nsg(frag_iter, offset - start, chunk,
1120927f71dSDoron Roberts-Kedes                                                 recursion_level + 1);
1130927f71dSDoron Roberts-Kedes                                 if (unlikely(ret < 0))
1140927f71dSDoron Roberts-Kedes                                         return ret;
1150927f71dSDoron Roberts-Kedes                                 elt += ret;
1160927f71dSDoron Roberts-Kedes                                 len -= chunk;
1170927f71dSDoron Roberts-Kedes                                 if (len == 0)
1180927f71dSDoron Roberts-Kedes                                         return elt;
1190927f71dSDoron Roberts-Kedes                                 offset += chunk;
1200927f71dSDoron Roberts-Kedes                         }
1210927f71dSDoron Roberts-Kedes                         start = end;
1220927f71dSDoron Roberts-Kedes                 }
1230927f71dSDoron Roberts-Kedes         }
1240927f71dSDoron Roberts-Kedes         BUG_ON(len);
1250927f71dSDoron Roberts-Kedes         return elt;
1260927f71dSDoron Roberts-Kedes }
1270927f71dSDoron Roberts-Kedes 
1280927f71dSDoron Roberts-Kedes /* Return the number of scatterlist elements required to completely map the
1290927f71dSDoron Roberts-Kedes  * skb, or -EMSGSIZE if the recursion depth is exceeded.
1300927f71dSDoron Roberts-Kedes  */
1310927f71dSDoron Roberts-Kedes static int skb_nsg(struct sk_buff *skb, int offset, int len)
1320927f71dSDoron Roberts-Kedes {
1330927f71dSDoron Roberts-Kedes         return __skb_nsg(skb, offset, len, 0);
1340927f71dSDoron Roberts-Kedes }
1350927f71dSDoron Roberts-Kedes 
136c3f6bb74SJakub Kicinski static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb)
137130b392cSDave Watson {
138130b392cSDave Watson 	struct strp_msg *rxm = strp_msg(skb);
139c3f6bb74SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
140130b392cSDave Watson 	int sub = 0;
141130b392cSDave Watson 
142130b392cSDave Watson 	/* Determine zero-padding length */
143b53f4976SJakub Kicinski 	if (prot->version == TLS_1_3_VERSION) {
1445deee41bSJakub Kicinski 		int offset = rxm->full_len - TLS_TAG_SIZE - 1;
145130b392cSDave Watson 		char content_type = 0;
146130b392cSDave Watson 		int err;
147130b392cSDave Watson 
148130b392cSDave Watson 		while (content_type == 0) {
1495deee41bSJakub Kicinski 			if (offset < prot->prepend_size)
150130b392cSDave Watson 				return -EBADMSG;
1515deee41bSJakub Kicinski 			err = skb_copy_bits(skb, rxm->offset + offset,
152130b392cSDave Watson 					    &content_type, 1);
153b53f4976SJakub Kicinski 			if (err)
154b53f4976SJakub Kicinski 				return err;
155130b392cSDave Watson 			if (content_type)
156130b392cSDave Watson 				break;
157130b392cSDave Watson 			sub++;
1585deee41bSJakub Kicinski 			offset--;
159130b392cSDave Watson 		}
160c3f6bb74SJakub Kicinski 		tlm->control = content_type;
161130b392cSDave Watson 	}
162130b392cSDave Watson 	return sub;
163130b392cSDave Watson }
164130b392cSDave Watson 
16594524d8fSVakul Garg static void tls_decrypt_done(struct crypto_async_request *req, int err)
16694524d8fSVakul Garg {
16794524d8fSVakul Garg 	struct aead_request *aead_req = (struct aead_request *)req;
16894524d8fSVakul Garg 	struct scatterlist *sgout = aead_req->dst;
169692d7b5dSVakul Garg 	struct scatterlist *sgin = aead_req->src;
1707a3dd8c8SJohn Fastabend 	struct tls_sw_context_rx *ctx;
1717a3dd8c8SJohn Fastabend 	struct tls_context *tls_ctx;
1724509de14SVakul Garg 	struct tls_prot_info *prot;
17394524d8fSVakul Garg 	struct scatterlist *sg;
1747a3dd8c8SJohn Fastabend 	struct sk_buff *skb;
17594524d8fSVakul Garg 	unsigned int pages;
1767a3dd8c8SJohn Fastabend 	int pending;
1777a3dd8c8SJohn Fastabend 
1787a3dd8c8SJohn Fastabend 	skb = (struct sk_buff *)req->data;
1797a3dd8c8SJohn Fastabend 	tls_ctx = tls_get_ctx(skb->sk);
1807a3dd8c8SJohn Fastabend 	ctx = tls_sw_ctx_rx(tls_ctx);
1814509de14SVakul Garg 	prot = &tls_ctx->prot_info;
18294524d8fSVakul Garg 
18394524d8fSVakul Garg 	/* Propagate if there was an err */
18494524d8fSVakul Garg 	if (err) {
1855c5ec668SJakub Kicinski 		if (err == -EBADMSG)
1865c5ec668SJakub Kicinski 			TLS_INC_STATS(sock_net(skb->sk),
1875c5ec668SJakub Kicinski 				      LINUX_MIB_TLSDECRYPTERROR);
18894524d8fSVakul Garg 		ctx->async_wait.err = err;
1897a3dd8c8SJohn Fastabend 		tls_err_abort(skb->sk, err);
190692d7b5dSVakul Garg 	} else {
191692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
192b53f4976SJakub Kicinski 		int pad;
193b53f4976SJakub Kicinski 
194c3f6bb74SJakub Kicinski 		pad = padding_length(prot, skb);
195b53f4976SJakub Kicinski 		if (pad < 0) {
196b53f4976SJakub Kicinski 			ctx->async_wait.err = pad;
197b53f4976SJakub Kicinski 			tls_err_abort(skb->sk, pad);
198b53f4976SJakub Kicinski 		} else {
199b53f4976SJakub Kicinski 			rxm->full_len -= pad;
2004509de14SVakul Garg 			rxm->offset += prot->prepend_size;
2014509de14SVakul Garg 			rxm->full_len -= prot->overhead_size;
20294524d8fSVakul Garg 		}
203b53f4976SJakub Kicinski 	}
20494524d8fSVakul Garg 
2057a3dd8c8SJohn Fastabend 	/* After using skb->sk to propagate sk through crypto async callback
2067a3dd8c8SJohn Fastabend 	 * we need to NULL it again.
2077a3dd8c8SJohn Fastabend 	 */
2087a3dd8c8SJohn Fastabend 	skb->sk = NULL;
2097a3dd8c8SJohn Fastabend 
21094524d8fSVakul Garg 
211692d7b5dSVakul Garg 	/* Free the destination pages if skb was not decrypted inplace */
212692d7b5dSVakul Garg 	if (sgout != sgin) {
21394524d8fSVakul Garg 		/* Skip the first S/G entry as it points to AAD */
21494524d8fSVakul Garg 		for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
21594524d8fSVakul Garg 			if (!sg)
21694524d8fSVakul Garg 				break;
21794524d8fSVakul Garg 			put_page(sg_page(sg));
21894524d8fSVakul Garg 		}
219692d7b5dSVakul Garg 	}
22094524d8fSVakul Garg 
22194524d8fSVakul Garg 	kfree(aead_req);
22294524d8fSVakul Garg 
2230cada332SVinay Kumar Yadav 	spin_lock_bh(&ctx->decrypt_compl_lock);
224692d7b5dSVakul Garg 	pending = atomic_dec_return(&ctx->decrypt_pending);
225692d7b5dSVakul Garg 
2260cada332SVinay Kumar Yadav 	if (!pending && ctx->async_notify)
22794524d8fSVakul Garg 		complete(&ctx->async_wait.completion);
2280cada332SVinay Kumar Yadav 	spin_unlock_bh(&ctx->decrypt_compl_lock);
22994524d8fSVakul Garg }
23094524d8fSVakul Garg 
231c46234ebSDave Watson static int tls_do_decryption(struct sock *sk,
23294524d8fSVakul Garg 			     struct sk_buff *skb,
233c46234ebSDave Watson 			     struct scatterlist *sgin,
234c46234ebSDave Watson 			     struct scatterlist *sgout,
235c46234ebSDave Watson 			     char *iv_recv,
236c46234ebSDave Watson 			     size_t data_len,
23794524d8fSVakul Garg 			     struct aead_request *aead_req,
23894524d8fSVakul Garg 			     bool async)
239c46234ebSDave Watson {
240c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2414509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
242f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
243c46234ebSDave Watson 	int ret;
244c46234ebSDave Watson 
2450b243d00SVakul Garg 	aead_request_set_tfm(aead_req, ctx->aead_recv);
2464509de14SVakul Garg 	aead_request_set_ad(aead_req, prot->aad_size);
247c46234ebSDave Watson 	aead_request_set_crypt(aead_req, sgin, sgout,
2484509de14SVakul Garg 			       data_len + prot->tag_size,
249c46234ebSDave Watson 			       (u8 *)iv_recv);
250c46234ebSDave Watson 
25194524d8fSVakul Garg 	if (async) {
2527a3dd8c8SJohn Fastabend 		/* Using skb->sk to push sk through to crypto async callback
2537a3dd8c8SJohn Fastabend 		 * handler. This allows propagating errors up to the socket
2547a3dd8c8SJohn Fastabend 		 * if needed. It _must_ be cleared in the async handler
255a88c26f6SVakul Garg 		 * before consume_skb is called. We _know_ skb->sk is NULL
2567a3dd8c8SJohn Fastabend 		 * because it is a clone from strparser.
2577a3dd8c8SJohn Fastabend 		 */
2587a3dd8c8SJohn Fastabend 		skb->sk = sk;
25994524d8fSVakul Garg 		aead_request_set_callback(aead_req,
26094524d8fSVakul Garg 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
26194524d8fSVakul Garg 					  tls_decrypt_done, skb);
26294524d8fSVakul Garg 		atomic_inc(&ctx->decrypt_pending);
26394524d8fSVakul Garg 	} else {
26494524d8fSVakul Garg 		aead_request_set_callback(aead_req,
26594524d8fSVakul Garg 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
26694524d8fSVakul Garg 					  crypto_req_done, &ctx->async_wait);
26794524d8fSVakul Garg 	}
26894524d8fSVakul Garg 
26994524d8fSVakul Garg 	ret = crypto_aead_decrypt(aead_req);
27094524d8fSVakul Garg 	if (ret == -EINPROGRESS) {
27194524d8fSVakul Garg 		if (async)
27294524d8fSVakul Garg 			return ret;
27394524d8fSVakul Garg 
27494524d8fSVakul Garg 		ret = crypto_wait_req(ret, &ctx->async_wait);
27594524d8fSVakul Garg 	}
27694524d8fSVakul Garg 
27794524d8fSVakul Garg 	if (async)
27894524d8fSVakul Garg 		atomic_dec(&ctx->decrypt_pending);
27994524d8fSVakul Garg 
280c46234ebSDave Watson 	return ret;
281c46234ebSDave Watson }
282c46234ebSDave Watson 
283d829e9c4SDaniel Borkmann static void tls_trim_both_msgs(struct sock *sk, int target_size)
2843c4d7559SDave Watson {
2853c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2864509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
287f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
288a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
2893c4d7559SDave Watson 
290d829e9c4SDaniel Borkmann 	sk_msg_trim(sk, &rec->msg_plaintext, target_size);
2913c4d7559SDave Watson 	if (target_size > 0)
2924509de14SVakul Garg 		target_size += prot->overhead_size;
293d829e9c4SDaniel Borkmann 	sk_msg_trim(sk, &rec->msg_encrypted, target_size);
2943c4d7559SDave Watson }
2953c4d7559SDave Watson 
296d829e9c4SDaniel Borkmann static int tls_alloc_encrypted_msg(struct sock *sk, int len)
2973c4d7559SDave Watson {
2983c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
299f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
300a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
301d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
3023c4d7559SDave Watson 
303d829e9c4SDaniel Borkmann 	return sk_msg_alloc(sk, msg_en, len, 0);
3043c4d7559SDave Watson }
3053c4d7559SDave Watson 
306d829e9c4SDaniel Borkmann static int tls_clone_plaintext_msg(struct sock *sk, int required)
3073c4d7559SDave Watson {
3083c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
3094509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
310f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
311a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
312d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl = &rec->msg_plaintext;
313d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
3144e6d4720SVakul Garg 	int skip, len;
3153c4d7559SDave Watson 
316d829e9c4SDaniel Borkmann 	/* We add page references worth len bytes from encrypted sg
317d829e9c4SDaniel Borkmann 	 * at the end of plaintext sg. It is guaranteed that msg_en
3184e6d4720SVakul Garg 	 * has enough required room (ensured by caller).
3194e6d4720SVakul Garg 	 */
320d829e9c4SDaniel Borkmann 	len = required - msg_pl->sg.size;
32152ea992cSVakul Garg 
322d829e9c4SDaniel Borkmann 	/* Skip initial bytes in msg_en's data to be able to use
323d829e9c4SDaniel Borkmann 	 * same offset of both plain and encrypted data.
3244e6d4720SVakul Garg 	 */
3254509de14SVakul Garg 	skip = prot->prepend_size + msg_pl->sg.size;
3264e6d4720SVakul Garg 
327d829e9c4SDaniel Borkmann 	return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
3283c4d7559SDave Watson }
3293c4d7559SDave Watson 
330d3b18ad3SJohn Fastabend static struct tls_rec *tls_get_rec(struct sock *sk)
331d3b18ad3SJohn Fastabend {
332d3b18ad3SJohn Fastabend 	struct tls_context *tls_ctx = tls_get_ctx(sk);
3334509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
334d3b18ad3SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
335d3b18ad3SJohn Fastabend 	struct sk_msg *msg_pl, *msg_en;
336d3b18ad3SJohn Fastabend 	struct tls_rec *rec;
337d3b18ad3SJohn Fastabend 	int mem_size;
338d3b18ad3SJohn Fastabend 
339d3b18ad3SJohn Fastabend 	mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
340d3b18ad3SJohn Fastabend 
341d3b18ad3SJohn Fastabend 	rec = kzalloc(mem_size, sk->sk_allocation);
342d3b18ad3SJohn Fastabend 	if (!rec)
343d3b18ad3SJohn Fastabend 		return NULL;
344d3b18ad3SJohn Fastabend 
345d3b18ad3SJohn Fastabend 	msg_pl = &rec->msg_plaintext;
346d3b18ad3SJohn Fastabend 	msg_en = &rec->msg_encrypted;
347d3b18ad3SJohn Fastabend 
348d3b18ad3SJohn Fastabend 	sk_msg_init(msg_pl);
349d3b18ad3SJohn Fastabend 	sk_msg_init(msg_en);
350d3b18ad3SJohn Fastabend 
351d3b18ad3SJohn Fastabend 	sg_init_table(rec->sg_aead_in, 2);
3524509de14SVakul Garg 	sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
353d3b18ad3SJohn Fastabend 	sg_unmark_end(&rec->sg_aead_in[1]);
354d3b18ad3SJohn Fastabend 
355d3b18ad3SJohn Fastabend 	sg_init_table(rec->sg_aead_out, 2);
3564509de14SVakul Garg 	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
357d3b18ad3SJohn Fastabend 	sg_unmark_end(&rec->sg_aead_out[1]);
358d3b18ad3SJohn Fastabend 
359d3b18ad3SJohn Fastabend 	return rec;
360d3b18ad3SJohn Fastabend }
361d3b18ad3SJohn Fastabend 
362d3b18ad3SJohn Fastabend static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
363d3b18ad3SJohn Fastabend {
364d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &rec->msg_encrypted);
365d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &rec->msg_plaintext);
366d3b18ad3SJohn Fastabend 	kfree(rec);
367d3b18ad3SJohn Fastabend }
368d3b18ad3SJohn Fastabend 
369c774973eSVakul Garg static void tls_free_open_rec(struct sock *sk)
3703c4d7559SDave Watson {
3713c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
372f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
373a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
3743c4d7559SDave Watson 
375d3b18ad3SJohn Fastabend 	if (rec) {
376d3b18ad3SJohn Fastabend 		tls_free_rec(sk, rec);
377d3b18ad3SJohn Fastabend 		ctx->open_rec = NULL;
378d3b18ad3SJohn Fastabend 	}
3793c4d7559SDave Watson }
3803c4d7559SDave Watson 
381a42055e8SVakul Garg int tls_tx_records(struct sock *sk, int flags)
382a42055e8SVakul Garg {
383a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
384a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
385a42055e8SVakul Garg 	struct tls_rec *rec, *tmp;
386d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en;
387a42055e8SVakul Garg 	int tx_flags, rc = 0;
388a42055e8SVakul Garg 
389a42055e8SVakul Garg 	if (tls_is_partially_sent_record(tls_ctx)) {
3909932a29aSVakul Garg 		rec = list_first_entry(&ctx->tx_list,
391a42055e8SVakul Garg 				       struct tls_rec, list);
392a42055e8SVakul Garg 
393a42055e8SVakul Garg 		if (flags == -1)
394a42055e8SVakul Garg 			tx_flags = rec->tx_flags;
395a42055e8SVakul Garg 		else
396a42055e8SVakul Garg 			tx_flags = flags;
397a42055e8SVakul Garg 
398a42055e8SVakul Garg 		rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
399a42055e8SVakul Garg 		if (rc)
400a42055e8SVakul Garg 			goto tx_err;
401a42055e8SVakul Garg 
402a42055e8SVakul Garg 		/* Full record has been transmitted.
4039932a29aSVakul Garg 		 * Remove the head of tx_list
404a42055e8SVakul Garg 		 */
405a42055e8SVakul Garg 		list_del(&rec->list);
406d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
407a42055e8SVakul Garg 		kfree(rec);
408a42055e8SVakul Garg 	}
409a42055e8SVakul Garg 
4109932a29aSVakul Garg 	/* Tx all ready records */
4119932a29aSVakul Garg 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
4129932a29aSVakul Garg 		if (READ_ONCE(rec->tx_ready)) {
413a42055e8SVakul Garg 			if (flags == -1)
414a42055e8SVakul Garg 				tx_flags = rec->tx_flags;
415a42055e8SVakul Garg 			else
416a42055e8SVakul Garg 				tx_flags = flags;
417a42055e8SVakul Garg 
418d829e9c4SDaniel Borkmann 			msg_en = &rec->msg_encrypted;
419a42055e8SVakul Garg 			rc = tls_push_sg(sk, tls_ctx,
420d829e9c4SDaniel Borkmann 					 &msg_en->sg.data[msg_en->sg.curr],
421a42055e8SVakul Garg 					 0, tx_flags);
422a42055e8SVakul Garg 			if (rc)
423a42055e8SVakul Garg 				goto tx_err;
424a42055e8SVakul Garg 
425a42055e8SVakul Garg 			list_del(&rec->list);
426d829e9c4SDaniel Borkmann 			sk_msg_free(sk, &rec->msg_plaintext);
427a42055e8SVakul Garg 			kfree(rec);
428a42055e8SVakul Garg 		} else {
429a42055e8SVakul Garg 			break;
430a42055e8SVakul Garg 		}
431a42055e8SVakul Garg 	}
432a42055e8SVakul Garg 
433a42055e8SVakul Garg tx_err:
434a42055e8SVakul Garg 	if (rc < 0 && rc != -EAGAIN)
435da353facSDaniel Jordan 		tls_err_abort(sk, -EBADMSG);
436a42055e8SVakul Garg 
437a42055e8SVakul Garg 	return rc;
438a42055e8SVakul Garg }
439a42055e8SVakul Garg 
440a42055e8SVakul Garg static void tls_encrypt_done(struct crypto_async_request *req, int err)
441a42055e8SVakul Garg {
442a42055e8SVakul Garg 	struct aead_request *aead_req = (struct aead_request *)req;
443a42055e8SVakul Garg 	struct sock *sk = req->data;
444a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
4454509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
446a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
447d829e9c4SDaniel Borkmann 	struct scatterlist *sge;
448d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en;
449a42055e8SVakul Garg 	struct tls_rec *rec;
450a42055e8SVakul Garg 	bool ready = false;
451a42055e8SVakul Garg 	int pending;
452a42055e8SVakul Garg 
453a42055e8SVakul Garg 	rec = container_of(aead_req, struct tls_rec, aead_req);
454d829e9c4SDaniel Borkmann 	msg_en = &rec->msg_encrypted;
455a42055e8SVakul Garg 
456d829e9c4SDaniel Borkmann 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
4574509de14SVakul Garg 	sge->offset -= prot->prepend_size;
4584509de14SVakul Garg 	sge->length += prot->prepend_size;
459a42055e8SVakul Garg 
46080ece6a0SVakul Garg 	/* Check if error is previously set on socket */
461a42055e8SVakul Garg 	if (err || sk->sk_err) {
462a42055e8SVakul Garg 		rec = NULL;
463a42055e8SVakul Garg 
464a42055e8SVakul Garg 		/* If err is already set on socket, return the same code */
465a42055e8SVakul Garg 		if (sk->sk_err) {
4661d9d6fd2SDaniel Jordan 			ctx->async_wait.err = -sk->sk_err;
467a42055e8SVakul Garg 		} else {
468a42055e8SVakul Garg 			ctx->async_wait.err = err;
469a42055e8SVakul Garg 			tls_err_abort(sk, err);
470a42055e8SVakul Garg 		}
471a42055e8SVakul Garg 	}
472a42055e8SVakul Garg 
4739932a29aSVakul Garg 	if (rec) {
4749932a29aSVakul Garg 		struct tls_rec *first_rec;
4759932a29aSVakul Garg 
4769932a29aSVakul Garg 		/* Mark the record as ready for transmission */
4779932a29aSVakul Garg 		smp_store_mb(rec->tx_ready, true);
4789932a29aSVakul Garg 
4799932a29aSVakul Garg 		/* If received record is at head of tx_list, schedule tx */
4809932a29aSVakul Garg 		first_rec = list_first_entry(&ctx->tx_list,
4819932a29aSVakul Garg 					     struct tls_rec, list);
4829932a29aSVakul Garg 		if (rec == first_rec)
4839932a29aSVakul Garg 			ready = true;
4849932a29aSVakul Garg 	}
485a42055e8SVakul Garg 
4860cada332SVinay Kumar Yadav 	spin_lock_bh(&ctx->encrypt_compl_lock);
487a42055e8SVakul Garg 	pending = atomic_dec_return(&ctx->encrypt_pending);
488a42055e8SVakul Garg 
4890cada332SVinay Kumar Yadav 	if (!pending && ctx->async_notify)
490a42055e8SVakul Garg 		complete(&ctx->async_wait.completion);
4910cada332SVinay Kumar Yadav 	spin_unlock_bh(&ctx->encrypt_compl_lock);
492a42055e8SVakul Garg 
493a42055e8SVakul Garg 	if (!ready)
494a42055e8SVakul Garg 		return;
495a42055e8SVakul Garg 
496a42055e8SVakul Garg 	/* Schedule the transmission */
497a42055e8SVakul Garg 	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
498d829e9c4SDaniel Borkmann 		schedule_delayed_work(&ctx->tx_work.work, 1);
499a42055e8SVakul Garg }
500a42055e8SVakul Garg 
501a42055e8SVakul Garg static int tls_do_encryption(struct sock *sk,
502a42055e8SVakul Garg 			     struct tls_context *tls_ctx,
503a447da7dSDaniel Borkmann 			     struct tls_sw_context_tx *ctx,
504a447da7dSDaniel Borkmann 			     struct aead_request *aead_req,
505d829e9c4SDaniel Borkmann 			     size_t data_len, u32 start)
5063c4d7559SDave Watson {
5074509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
508a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
509d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
510d829e9c4SDaniel Borkmann 	struct scatterlist *sge = sk_msg_elem(msg_en, start);
511f295b3aeSVakul Garg 	int rc, iv_offset = 0;
5123c4d7559SDave Watson 
513f295b3aeSVakul Garg 	/* For CCM based ciphers, first byte of IV is a constant */
514128cfb88STianjia Zhang 	switch (prot->cipher_type) {
515128cfb88STianjia Zhang 	case TLS_CIPHER_AES_CCM_128:
516f295b3aeSVakul Garg 		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
517f295b3aeSVakul Garg 		iv_offset = 1;
518128cfb88STianjia Zhang 		break;
519128cfb88STianjia Zhang 	case TLS_CIPHER_SM4_CCM:
520128cfb88STianjia Zhang 		rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
521128cfb88STianjia Zhang 		iv_offset = 1;
522128cfb88STianjia Zhang 		break;
523f295b3aeSVakul Garg 	}
524f295b3aeSVakul Garg 
525f295b3aeSVakul Garg 	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
526f295b3aeSVakul Garg 	       prot->iv_size + prot->salt_size);
527f295b3aeSVakul Garg 
52859610606STianjia Zhang 	xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
52932eb67b9SDave Watson 
5304509de14SVakul Garg 	sge->offset += prot->prepend_size;
5314509de14SVakul Garg 	sge->length -= prot->prepend_size;
5323c4d7559SDave Watson 
533d829e9c4SDaniel Borkmann 	msg_en->sg.curr = start;
5344e6d4720SVakul Garg 
5353c4d7559SDave Watson 	aead_request_set_tfm(aead_req, ctx->aead_send);
5364509de14SVakul Garg 	aead_request_set_ad(aead_req, prot->aad_size);
537d829e9c4SDaniel Borkmann 	aead_request_set_crypt(aead_req, rec->sg_aead_in,
538d829e9c4SDaniel Borkmann 			       rec->sg_aead_out,
53932eb67b9SDave Watson 			       data_len, rec->iv_data);
540a54667f6SVakul Garg 
541a54667f6SVakul Garg 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
542a42055e8SVakul Garg 				  tls_encrypt_done, sk);
543a54667f6SVakul Garg 
5449932a29aSVakul Garg 	/* Add the record in tx_list */
5459932a29aSVakul Garg 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
546a42055e8SVakul Garg 	atomic_inc(&ctx->encrypt_pending);
5473c4d7559SDave Watson 
548a42055e8SVakul Garg 	rc = crypto_aead_encrypt(aead_req);
549a42055e8SVakul Garg 	if (!rc || rc != -EINPROGRESS) {
550a42055e8SVakul Garg 		atomic_dec(&ctx->encrypt_pending);
5514509de14SVakul Garg 		sge->offset -= prot->prepend_size;
5524509de14SVakul Garg 		sge->length += prot->prepend_size;
553a42055e8SVakul Garg 	}
5543c4d7559SDave Watson 
5559932a29aSVakul Garg 	if (!rc) {
5569932a29aSVakul Garg 		WRITE_ONCE(rec->tx_ready, true);
5579932a29aSVakul Garg 	} else if (rc != -EINPROGRESS) {
5589932a29aSVakul Garg 		list_del(&rec->list);
559a42055e8SVakul Garg 		return rc;
5609932a29aSVakul Garg 	}
561a42055e8SVakul Garg 
562a42055e8SVakul Garg 	/* Unhook the record from context if encryption is not failure */
563a42055e8SVakul Garg 	ctx->open_rec = NULL;
564fb0f886fSJakub Kicinski 	tls_advance_record_sn(sk, prot, &tls_ctx->tx);
5653c4d7559SDave Watson 	return rc;
5663c4d7559SDave Watson }
5673c4d7559SDave Watson 
568d3b18ad3SJohn Fastabend static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
569d3b18ad3SJohn Fastabend 				 struct tls_rec **to, struct sk_msg *msg_opl,
570d3b18ad3SJohn Fastabend 				 struct sk_msg *msg_oen, u32 split_point,
571d3b18ad3SJohn Fastabend 				 u32 tx_overhead_size, u32 *orig_end)
572d3b18ad3SJohn Fastabend {
573d3b18ad3SJohn Fastabend 	u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
574d3b18ad3SJohn Fastabend 	struct scatterlist *sge, *osge, *nsge;
575d3b18ad3SJohn Fastabend 	u32 orig_size = msg_opl->sg.size;
576d3b18ad3SJohn Fastabend 	struct scatterlist tmp = { };
577d3b18ad3SJohn Fastabend 	struct sk_msg *msg_npl;
578d3b18ad3SJohn Fastabend 	struct tls_rec *new;
579d3b18ad3SJohn Fastabend 	int ret;
580d3b18ad3SJohn Fastabend 
581d3b18ad3SJohn Fastabend 	new = tls_get_rec(sk);
582d3b18ad3SJohn Fastabend 	if (!new)
583d3b18ad3SJohn Fastabend 		return -ENOMEM;
584d3b18ad3SJohn Fastabend 	ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
585d3b18ad3SJohn Fastabend 			   tx_overhead_size, 0);
586d3b18ad3SJohn Fastabend 	if (ret < 0) {
587d3b18ad3SJohn Fastabend 		tls_free_rec(sk, new);
588d3b18ad3SJohn Fastabend 		return ret;
589d3b18ad3SJohn Fastabend 	}
590d3b18ad3SJohn Fastabend 
591d3b18ad3SJohn Fastabend 	*orig_end = msg_opl->sg.end;
592d3b18ad3SJohn Fastabend 	i = msg_opl->sg.start;
593d3b18ad3SJohn Fastabend 	sge = sk_msg_elem(msg_opl, i);
594d3b18ad3SJohn Fastabend 	while (apply && sge->length) {
595d3b18ad3SJohn Fastabend 		if (sge->length > apply) {
596d3b18ad3SJohn Fastabend 			u32 len = sge->length - apply;
597d3b18ad3SJohn Fastabend 
598d3b18ad3SJohn Fastabend 			get_page(sg_page(sge));
599d3b18ad3SJohn Fastabend 			sg_set_page(&tmp, sg_page(sge), len,
600d3b18ad3SJohn Fastabend 				    sge->offset + apply);
601d3b18ad3SJohn Fastabend 			sge->length = apply;
602d3b18ad3SJohn Fastabend 			bytes += apply;
603d3b18ad3SJohn Fastabend 			apply = 0;
604d3b18ad3SJohn Fastabend 		} else {
605d3b18ad3SJohn Fastabend 			apply -= sge->length;
606d3b18ad3SJohn Fastabend 			bytes += sge->length;
607d3b18ad3SJohn Fastabend 		}
608d3b18ad3SJohn Fastabend 
609d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(i);
610d3b18ad3SJohn Fastabend 		if (i == msg_opl->sg.end)
611d3b18ad3SJohn Fastabend 			break;
612d3b18ad3SJohn Fastabend 		sge = sk_msg_elem(msg_opl, i);
613d3b18ad3SJohn Fastabend 	}
614d3b18ad3SJohn Fastabend 
615d3b18ad3SJohn Fastabend 	msg_opl->sg.end = i;
616d3b18ad3SJohn Fastabend 	msg_opl->sg.curr = i;
617d3b18ad3SJohn Fastabend 	msg_opl->sg.copybreak = 0;
618d3b18ad3SJohn Fastabend 	msg_opl->apply_bytes = 0;
619d3b18ad3SJohn Fastabend 	msg_opl->sg.size = bytes;
620d3b18ad3SJohn Fastabend 
621d3b18ad3SJohn Fastabend 	msg_npl = &new->msg_plaintext;
622d3b18ad3SJohn Fastabend 	msg_npl->apply_bytes = apply;
623d3b18ad3SJohn Fastabend 	msg_npl->sg.size = orig_size - bytes;
624d3b18ad3SJohn Fastabend 
625d3b18ad3SJohn Fastabend 	j = msg_npl->sg.start;
626d3b18ad3SJohn Fastabend 	nsge = sk_msg_elem(msg_npl, j);
627d3b18ad3SJohn Fastabend 	if (tmp.length) {
628d3b18ad3SJohn Fastabend 		memcpy(nsge, &tmp, sizeof(*nsge));
629d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(j);
630d3b18ad3SJohn Fastabend 		nsge = sk_msg_elem(msg_npl, j);
631d3b18ad3SJohn Fastabend 	}
632d3b18ad3SJohn Fastabend 
633d3b18ad3SJohn Fastabend 	osge = sk_msg_elem(msg_opl, i);
634d3b18ad3SJohn Fastabend 	while (osge->length) {
635d3b18ad3SJohn Fastabend 		memcpy(nsge, osge, sizeof(*nsge));
636d3b18ad3SJohn Fastabend 		sg_unmark_end(nsge);
637d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(i);
638d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(j);
639d3b18ad3SJohn Fastabend 		if (i == *orig_end)
640d3b18ad3SJohn Fastabend 			break;
641d3b18ad3SJohn Fastabend 		osge = sk_msg_elem(msg_opl, i);
642d3b18ad3SJohn Fastabend 		nsge = sk_msg_elem(msg_npl, j);
643d3b18ad3SJohn Fastabend 	}
644d3b18ad3SJohn Fastabend 
645d3b18ad3SJohn Fastabend 	msg_npl->sg.end = j;
646d3b18ad3SJohn Fastabend 	msg_npl->sg.curr = j;
647d3b18ad3SJohn Fastabend 	msg_npl->sg.copybreak = 0;
648d3b18ad3SJohn Fastabend 
649d3b18ad3SJohn Fastabend 	*to = new;
650d3b18ad3SJohn Fastabend 	return 0;
651d3b18ad3SJohn Fastabend }
652d3b18ad3SJohn Fastabend 
653d3b18ad3SJohn Fastabend static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
654d3b18ad3SJohn Fastabend 				  struct tls_rec *from, u32 orig_end)
655d3b18ad3SJohn Fastabend {
656d3b18ad3SJohn Fastabend 	struct sk_msg *msg_npl = &from->msg_plaintext;
657d3b18ad3SJohn Fastabend 	struct sk_msg *msg_opl = &to->msg_plaintext;
658d3b18ad3SJohn Fastabend 	struct scatterlist *osge, *nsge;
659d3b18ad3SJohn Fastabend 	u32 i, j;
660d3b18ad3SJohn Fastabend 
661d3b18ad3SJohn Fastabend 	i = msg_opl->sg.end;
662d3b18ad3SJohn Fastabend 	sk_msg_iter_var_prev(i);
663d3b18ad3SJohn Fastabend 	j = msg_npl->sg.start;
664d3b18ad3SJohn Fastabend 
665d3b18ad3SJohn Fastabend 	osge = sk_msg_elem(msg_opl, i);
666d3b18ad3SJohn Fastabend 	nsge = sk_msg_elem(msg_npl, j);
667d3b18ad3SJohn Fastabend 
668d3b18ad3SJohn Fastabend 	if (sg_page(osge) == sg_page(nsge) &&
669d3b18ad3SJohn Fastabend 	    osge->offset + osge->length == nsge->offset) {
670d3b18ad3SJohn Fastabend 		osge->length += nsge->length;
671d3b18ad3SJohn Fastabend 		put_page(sg_page(nsge));
672d3b18ad3SJohn Fastabend 	}
673d3b18ad3SJohn Fastabend 
674d3b18ad3SJohn Fastabend 	msg_opl->sg.end = orig_end;
675d3b18ad3SJohn Fastabend 	msg_opl->sg.curr = orig_end;
676d3b18ad3SJohn Fastabend 	msg_opl->sg.copybreak = 0;
677d3b18ad3SJohn Fastabend 	msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
678d3b18ad3SJohn Fastabend 	msg_opl->sg.size += msg_npl->sg.size;
679d3b18ad3SJohn Fastabend 
680d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &to->msg_encrypted);
681d3b18ad3SJohn Fastabend 	sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
682d3b18ad3SJohn Fastabend 
683d3b18ad3SJohn Fastabend 	kfree(from);
684d3b18ad3SJohn Fastabend }
685d3b18ad3SJohn Fastabend 
6863c4d7559SDave Watson static int tls_push_record(struct sock *sk, int flags,
6873c4d7559SDave Watson 			   unsigned char record_type)
6883c4d7559SDave Watson {
6893c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
6904509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
691f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
692d3b18ad3SJohn Fastabend 	struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
6933f649ab7SKees Cook 	u32 i, split_point, orig_end;
694d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl, *msg_en;
695a447da7dSDaniel Borkmann 	struct aead_request *req;
696d3b18ad3SJohn Fastabend 	bool split;
6973c4d7559SDave Watson 	int rc;
6983c4d7559SDave Watson 
699a42055e8SVakul Garg 	if (!rec)
700a42055e8SVakul Garg 		return 0;
701a447da7dSDaniel Borkmann 
702d829e9c4SDaniel Borkmann 	msg_pl = &rec->msg_plaintext;
703d829e9c4SDaniel Borkmann 	msg_en = &rec->msg_encrypted;
704d829e9c4SDaniel Borkmann 
705d3b18ad3SJohn Fastabend 	split_point = msg_pl->apply_bytes;
706d3b18ad3SJohn Fastabend 	split = split_point && split_point < msg_pl->sg.size;
707d468e477SJohn Fastabend 	if (unlikely((!split &&
708d468e477SJohn Fastabend 		      msg_pl->sg.size +
709d468e477SJohn Fastabend 		      prot->overhead_size > msg_en->sg.size) ||
710d468e477SJohn Fastabend 		     (split &&
711d468e477SJohn Fastabend 		      split_point +
712d468e477SJohn Fastabend 		      prot->overhead_size > msg_en->sg.size))) {
713d468e477SJohn Fastabend 		split = true;
714d468e477SJohn Fastabend 		split_point = msg_en->sg.size;
715d468e477SJohn Fastabend 	}
716d3b18ad3SJohn Fastabend 	if (split) {
717d3b18ad3SJohn Fastabend 		rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
7184509de14SVakul Garg 					   split_point, prot->overhead_size,
719d3b18ad3SJohn Fastabend 					   &orig_end);
720d3b18ad3SJohn Fastabend 		if (rc < 0)
721d3b18ad3SJohn Fastabend 			return rc;
722d468e477SJohn Fastabend 		/* This can happen if above tls_split_open_record allocates
723d468e477SJohn Fastabend 		 * a single large encryption buffer instead of two smaller
724d468e477SJohn Fastabend 		 * ones. In this case adjust pointers and continue without
725d468e477SJohn Fastabend 		 * split.
726d468e477SJohn Fastabend 		 */
727d468e477SJohn Fastabend 		if (!msg_pl->sg.size) {
728d468e477SJohn Fastabend 			tls_merge_open_record(sk, rec, tmp, orig_end);
729d468e477SJohn Fastabend 			msg_pl = &rec->msg_plaintext;
730d468e477SJohn Fastabend 			msg_en = &rec->msg_encrypted;
731d468e477SJohn Fastabend 			split = false;
732d468e477SJohn Fastabend 		}
733d3b18ad3SJohn Fastabend 		sk_msg_trim(sk, msg_en, msg_pl->sg.size +
7344509de14SVakul Garg 			    prot->overhead_size);
735d3b18ad3SJohn Fastabend 	}
736d3b18ad3SJohn Fastabend 
737a42055e8SVakul Garg 	rec->tx_flags = flags;
738a42055e8SVakul Garg 	req = &rec->aead_req;
7393c4d7559SDave Watson 
740d829e9c4SDaniel Borkmann 	i = msg_pl->sg.end;
741d829e9c4SDaniel Borkmann 	sk_msg_iter_var_prev(i);
742130b392cSDave Watson 
743130b392cSDave Watson 	rec->content_type = record_type;
7444509de14SVakul Garg 	if (prot->version == TLS_1_3_VERSION) {
745130b392cSDave Watson 		/* Add content type to end of message.  No padding added */
746130b392cSDave Watson 		sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
747130b392cSDave Watson 		sg_mark_end(&rec->sg_content_type);
748130b392cSDave Watson 		sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
749130b392cSDave Watson 			 &rec->sg_content_type);
750130b392cSDave Watson 	} else {
751d829e9c4SDaniel Borkmann 		sg_mark_end(sk_msg_elem(msg_pl, i));
752130b392cSDave Watson 	}
753a42055e8SVakul Garg 
7549aaaa568SJohn Fastabend 	if (msg_pl->sg.end < msg_pl->sg.start) {
7559aaaa568SJohn Fastabend 		sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
7569aaaa568SJohn Fastabend 			 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
7579aaaa568SJohn Fastabend 			 msg_pl->sg.data);
7589aaaa568SJohn Fastabend 	}
7599aaaa568SJohn Fastabend 
760d829e9c4SDaniel Borkmann 	i = msg_pl->sg.start;
7619e5ffed3SJakub Kicinski 	sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
762d829e9c4SDaniel Borkmann 
763d829e9c4SDaniel Borkmann 	i = msg_en->sg.end;
764d829e9c4SDaniel Borkmann 	sk_msg_iter_var_prev(i);
765d829e9c4SDaniel Borkmann 	sg_mark_end(sk_msg_elem(msg_en, i));
766d829e9c4SDaniel Borkmann 
767d829e9c4SDaniel Borkmann 	i = msg_en->sg.start;
768d829e9c4SDaniel Borkmann 	sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
769d829e9c4SDaniel Borkmann 
7704509de14SVakul Garg 	tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
7716942a284SVadim Fedorenko 		     tls_ctx->tx.rec_seq, record_type, prot);
7723c4d7559SDave Watson 
7733c4d7559SDave Watson 	tls_fill_prepend(tls_ctx,
774d829e9c4SDaniel Borkmann 			 page_address(sg_page(&msg_en->sg.data[i])) +
775130b392cSDave Watson 			 msg_en->sg.data[i].offset,
7764509de14SVakul Garg 			 msg_pl->sg.size + prot->tail_size,
7776942a284SVadim Fedorenko 			 record_type);
7783c4d7559SDave Watson 
779d829e9c4SDaniel Borkmann 	tls_ctx->pending_open_record_frags = false;
7803c4d7559SDave Watson 
781130b392cSDave Watson 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
7824509de14SVakul Garg 			       msg_pl->sg.size + prot->tail_size, i);
7833c4d7559SDave Watson 	if (rc < 0) {
784d3b18ad3SJohn Fastabend 		if (rc != -EINPROGRESS) {
785da353facSDaniel Jordan 			tls_err_abort(sk, -EBADMSG);
786d3b18ad3SJohn Fastabend 			if (split) {
787d3b18ad3SJohn Fastabend 				tls_ctx->pending_open_record_frags = true;
788d3b18ad3SJohn Fastabend 				tls_merge_open_record(sk, rec, tmp, orig_end);
789d3b18ad3SJohn Fastabend 			}
790d3b18ad3SJohn Fastabend 		}
7915b053e12SDave Watson 		ctx->async_capable = 1;
792a42055e8SVakul Garg 		return rc;
793d3b18ad3SJohn Fastabend 	} else if (split) {
794d3b18ad3SJohn Fastabend 		msg_pl = &tmp->msg_plaintext;
795d3b18ad3SJohn Fastabend 		msg_en = &tmp->msg_encrypted;
7964509de14SVakul Garg 		sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
797d3b18ad3SJohn Fastabend 		tls_ctx->pending_open_record_frags = true;
798d3b18ad3SJohn Fastabend 		ctx->open_rec = tmp;
7993c4d7559SDave Watson 	}
8003c4d7559SDave Watson 
801a42055e8SVakul Garg 	return tls_tx_records(sk, flags);
8023c4d7559SDave Watson }
8033c4d7559SDave Watson 
804d3b18ad3SJohn Fastabend static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
805d3b18ad3SJohn Fastabend 			       bool full_record, u8 record_type,
806a7bff11fSVadim Fedorenko 			       ssize_t *copied, int flags)
8073c4d7559SDave Watson {
8083c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
809f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
810d3b18ad3SJohn Fastabend 	struct sk_msg msg_redir = { };
811d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
812d3b18ad3SJohn Fastabend 	struct sock *sk_redir;
813a42055e8SVakul Garg 	struct tls_rec *rec;
8140608c69cSJohn Fastabend 	bool enospc, policy;
815d3b18ad3SJohn Fastabend 	int err = 0, send;
8167246d8edSJohn Fastabend 	u32 delta = 0;
817a42055e8SVakul Garg 
8180608c69cSJohn Fastabend 	policy = !(flags & MSG_SENDPAGE_NOPOLICY);
819d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
820d10523d0SJakub Kicinski 	if (!psock || !policy) {
821d10523d0SJakub Kicinski 		err = tls_push_record(sk, flags, record_type);
822635d9398SVadim Fedorenko 		if (err && sk->sk_err == EBADMSG) {
823d10523d0SJakub Kicinski 			*copied -= sk_msg_free(sk, msg);
824d10523d0SJakub Kicinski 			tls_free_open_rec(sk);
825635d9398SVadim Fedorenko 			err = -sk->sk_err;
826d10523d0SJakub Kicinski 		}
827095f5614SXiyu Yang 		if (psock)
828095f5614SXiyu Yang 			sk_psock_put(sk, psock);
829d10523d0SJakub Kicinski 		return err;
830d10523d0SJakub Kicinski 	}
831d3b18ad3SJohn Fastabend more_data:
832d3b18ad3SJohn Fastabend 	enospc = sk_msg_full(msg);
8337246d8edSJohn Fastabend 	if (psock->eval == __SK_NONE) {
8347246d8edSJohn Fastabend 		delta = msg->sg.size;
835d3b18ad3SJohn Fastabend 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
8367246d8edSJohn Fastabend 		delta -= msg->sg.size;
8377246d8edSJohn Fastabend 	}
838d3b18ad3SJohn Fastabend 	if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
839d3b18ad3SJohn Fastabend 	    !enospc && !full_record) {
840d3b18ad3SJohn Fastabend 		err = -ENOSPC;
841d3b18ad3SJohn Fastabend 		goto out_err;
842d3b18ad3SJohn Fastabend 	}
843d3b18ad3SJohn Fastabend 	msg->cork_bytes = 0;
844d3b18ad3SJohn Fastabend 	send = msg->sg.size;
845d3b18ad3SJohn Fastabend 	if (msg->apply_bytes && msg->apply_bytes < send)
846d3b18ad3SJohn Fastabend 		send = msg->apply_bytes;
847a42055e8SVakul Garg 
848d3b18ad3SJohn Fastabend 	switch (psock->eval) {
849d3b18ad3SJohn Fastabend 	case __SK_PASS:
850d3b18ad3SJohn Fastabend 		err = tls_push_record(sk, flags, record_type);
851635d9398SVadim Fedorenko 		if (err && sk->sk_err == EBADMSG) {
852d3b18ad3SJohn Fastabend 			*copied -= sk_msg_free(sk, msg);
853d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
854635d9398SVadim Fedorenko 			err = -sk->sk_err;
855d3b18ad3SJohn Fastabend 			goto out_err;
856d3b18ad3SJohn Fastabend 		}
857d3b18ad3SJohn Fastabend 		break;
858d3b18ad3SJohn Fastabend 	case __SK_REDIRECT:
859d3b18ad3SJohn Fastabend 		sk_redir = psock->sk_redir;
860d3b18ad3SJohn Fastabend 		memcpy(&msg_redir, msg, sizeof(*msg));
861d3b18ad3SJohn Fastabend 		if (msg->apply_bytes < send)
862d3b18ad3SJohn Fastabend 			msg->apply_bytes = 0;
863d3b18ad3SJohn Fastabend 		else
864d3b18ad3SJohn Fastabend 			msg->apply_bytes -= send;
865d3b18ad3SJohn Fastabend 		sk_msg_return_zero(sk, msg, send);
866d3b18ad3SJohn Fastabend 		msg->sg.size -= send;
867d3b18ad3SJohn Fastabend 		release_sock(sk);
868d3b18ad3SJohn Fastabend 		err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
869d3b18ad3SJohn Fastabend 		lock_sock(sk);
870d3b18ad3SJohn Fastabend 		if (err < 0) {
871d3b18ad3SJohn Fastabend 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
872d3b18ad3SJohn Fastabend 			msg->sg.size = 0;
873d3b18ad3SJohn Fastabend 		}
874d3b18ad3SJohn Fastabend 		if (msg->sg.size == 0)
875d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
876d3b18ad3SJohn Fastabend 		break;
877d3b18ad3SJohn Fastabend 	case __SK_DROP:
878d3b18ad3SJohn Fastabend 	default:
879d3b18ad3SJohn Fastabend 		sk_msg_free_partial(sk, msg, send);
880d3b18ad3SJohn Fastabend 		if (msg->apply_bytes < send)
881d3b18ad3SJohn Fastabend 			msg->apply_bytes = 0;
882d3b18ad3SJohn Fastabend 		else
883d3b18ad3SJohn Fastabend 			msg->apply_bytes -= send;
884d3b18ad3SJohn Fastabend 		if (msg->sg.size == 0)
885d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
8867246d8edSJohn Fastabend 		*copied -= (send + delta);
887d3b18ad3SJohn Fastabend 		err = -EACCES;
888d3b18ad3SJohn Fastabend 	}
889a42055e8SVakul Garg 
890d3b18ad3SJohn Fastabend 	if (likely(!err)) {
891d3b18ad3SJohn Fastabend 		bool reset_eval = !ctx->open_rec;
892d3b18ad3SJohn Fastabend 
893d3b18ad3SJohn Fastabend 		rec = ctx->open_rec;
894d3b18ad3SJohn Fastabend 		if (rec) {
895d3b18ad3SJohn Fastabend 			msg = &rec->msg_plaintext;
896d3b18ad3SJohn Fastabend 			if (!msg->apply_bytes)
897d3b18ad3SJohn Fastabend 				reset_eval = true;
898d3b18ad3SJohn Fastabend 		}
899d3b18ad3SJohn Fastabend 		if (reset_eval) {
900d3b18ad3SJohn Fastabend 			psock->eval = __SK_NONE;
901d3b18ad3SJohn Fastabend 			if (psock->sk_redir) {
902d3b18ad3SJohn Fastabend 				sock_put(psock->sk_redir);
903d3b18ad3SJohn Fastabend 				psock->sk_redir = NULL;
904d3b18ad3SJohn Fastabend 			}
905d3b18ad3SJohn Fastabend 		}
906d3b18ad3SJohn Fastabend 		if (rec)
907d3b18ad3SJohn Fastabend 			goto more_data;
908d3b18ad3SJohn Fastabend 	}
909d3b18ad3SJohn Fastabend  out_err:
910d3b18ad3SJohn Fastabend 	sk_psock_put(sk, psock);
911d3b18ad3SJohn Fastabend 	return err;
912d3b18ad3SJohn Fastabend }
913d3b18ad3SJohn Fastabend 
914d3b18ad3SJohn Fastabend static int tls_sw_push_pending_record(struct sock *sk, int flags)
915d3b18ad3SJohn Fastabend {
916d3b18ad3SJohn Fastabend 	struct tls_context *tls_ctx = tls_get_ctx(sk);
917d3b18ad3SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
918d3b18ad3SJohn Fastabend 	struct tls_rec *rec = ctx->open_rec;
919d3b18ad3SJohn Fastabend 	struct sk_msg *msg_pl;
920d3b18ad3SJohn Fastabend 	size_t copied;
921d3b18ad3SJohn Fastabend 
922a42055e8SVakul Garg 	if (!rec)
923d3b18ad3SJohn Fastabend 		return 0;
924a42055e8SVakul Garg 
925d829e9c4SDaniel Borkmann 	msg_pl = &rec->msg_plaintext;
926d3b18ad3SJohn Fastabend 	copied = msg_pl->sg.size;
927d3b18ad3SJohn Fastabend 	if (!copied)
928d3b18ad3SJohn Fastabend 		return 0;
929a42055e8SVakul Garg 
930d3b18ad3SJohn Fastabend 	return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
931d3b18ad3SJohn Fastabend 				   &copied, flags);
932a42055e8SVakul Garg }
933a42055e8SVakul Garg 
934a42055e8SVakul Garg int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
935a42055e8SVakul Garg {
9363c4d7559SDave Watson 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
937a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
9384509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
939a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
9405b053e12SDave Watson 	bool async_capable = ctx->async_capable;
941a42055e8SVakul Garg 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
94200e23707SDavid Howells 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
9433c4d7559SDave Watson 	bool eor = !(msg->msg_flags & MSG_MORE);
944a7bff11fSVadim Fedorenko 	size_t try_to_copy;
945a7bff11fSVadim Fedorenko 	ssize_t copied = 0;
946d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl, *msg_en;
947a42055e8SVakul Garg 	struct tls_rec *rec;
948a42055e8SVakul Garg 	int required_size;
949a42055e8SVakul Garg 	int num_async = 0;
9503c4d7559SDave Watson 	bool full_record;
951a42055e8SVakul Garg 	int record_room;
952a42055e8SVakul Garg 	int num_zc = 0;
9533c4d7559SDave Watson 	int orig_size;
9544128c0cfSVakul Garg 	int ret = 0;
9550cada332SVinay Kumar Yadav 	int pending;
9563c4d7559SDave Watson 
9571c3b63f1SRouven Czerwinski 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
9581c3b63f1SRouven Czerwinski 			       MSG_CMSG_COMPAT))
9594a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
9603c4d7559SDave Watson 
96179ffe608SJakub Kicinski 	mutex_lock(&tls_ctx->tx_lock);
9623c4d7559SDave Watson 	lock_sock(sk);
9633c4d7559SDave Watson 
9643c4d7559SDave Watson 	if (unlikely(msg->msg_controllen)) {
9653c4d7559SDave Watson 		ret = tls_proccess_cmsg(sk, msg, &record_type);
966a42055e8SVakul Garg 		if (ret) {
967a42055e8SVakul Garg 			if (ret == -EINPROGRESS)
968a42055e8SVakul Garg 				num_async++;
969a42055e8SVakul Garg 			else if (ret != -EAGAIN)
9703c4d7559SDave Watson 				goto send_end;
9713c4d7559SDave Watson 		}
972a42055e8SVakul Garg 	}
9733c4d7559SDave Watson 
9743c4d7559SDave Watson 	while (msg_data_left(msg)) {
9753c4d7559SDave Watson 		if (sk->sk_err) {
97630be8f8dSr.hering@avm.de 			ret = -sk->sk_err;
9773c4d7559SDave Watson 			goto send_end;
9783c4d7559SDave Watson 		}
9793c4d7559SDave Watson 
980d3b18ad3SJohn Fastabend 		if (ctx->open_rec)
981d3b18ad3SJohn Fastabend 			rec = ctx->open_rec;
982d3b18ad3SJohn Fastabend 		else
983d3b18ad3SJohn Fastabend 			rec = ctx->open_rec = tls_get_rec(sk);
984a42055e8SVakul Garg 		if (!rec) {
985a42055e8SVakul Garg 			ret = -ENOMEM;
986a42055e8SVakul Garg 			goto send_end;
987a42055e8SVakul Garg 		}
988a42055e8SVakul Garg 
989d829e9c4SDaniel Borkmann 		msg_pl = &rec->msg_plaintext;
990d829e9c4SDaniel Borkmann 		msg_en = &rec->msg_encrypted;
991d829e9c4SDaniel Borkmann 
992d829e9c4SDaniel Borkmann 		orig_size = msg_pl->sg.size;
9933c4d7559SDave Watson 		full_record = false;
9943c4d7559SDave Watson 		try_to_copy = msg_data_left(msg);
995d829e9c4SDaniel Borkmann 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
9963c4d7559SDave Watson 		if (try_to_copy >= record_room) {
9973c4d7559SDave Watson 			try_to_copy = record_room;
9983c4d7559SDave Watson 			full_record = true;
9993c4d7559SDave Watson 		}
10003c4d7559SDave Watson 
1001d829e9c4SDaniel Borkmann 		required_size = msg_pl->sg.size + try_to_copy +
10024509de14SVakul Garg 				prot->overhead_size;
10033c4d7559SDave Watson 
10043c4d7559SDave Watson 		if (!sk_stream_memory_free(sk))
10053c4d7559SDave Watson 			goto wait_for_sndbuf;
1006a42055e8SVakul Garg 
10073c4d7559SDave Watson alloc_encrypted:
1008d829e9c4SDaniel Borkmann 		ret = tls_alloc_encrypted_msg(sk, required_size);
10093c4d7559SDave Watson 		if (ret) {
10103c4d7559SDave Watson 			if (ret != -ENOSPC)
10113c4d7559SDave Watson 				goto wait_for_memory;
10123c4d7559SDave Watson 
10133c4d7559SDave Watson 			/* Adjust try_to_copy according to the amount that was
10143c4d7559SDave Watson 			 * actually allocated. The difference is due
10153c4d7559SDave Watson 			 * to max sg elements limit
10163c4d7559SDave Watson 			 */
1017d829e9c4SDaniel Borkmann 			try_to_copy -= required_size - msg_en->sg.size;
10183c4d7559SDave Watson 			full_record = true;
10193c4d7559SDave Watson 		}
1020a42055e8SVakul Garg 
1021a42055e8SVakul Garg 		if (!is_kvec && (full_record || eor) && !async_capable) {
1022d3b18ad3SJohn Fastabend 			u32 first = msg_pl->sg.end;
1023d3b18ad3SJohn Fastabend 
1024d829e9c4SDaniel Borkmann 			ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1025d829e9c4SDaniel Borkmann 							msg_pl, try_to_copy);
10263c4d7559SDave Watson 			if (ret)
10273c4d7559SDave Watson 				goto fallback_to_reg_send;
10283c4d7559SDave Watson 
1029a42055e8SVakul Garg 			num_zc++;
10303c4d7559SDave Watson 			copied += try_to_copy;
1031d3b18ad3SJohn Fastabend 
1032d3b18ad3SJohn Fastabend 			sk_msg_sg_copy_set(msg_pl, first);
1033d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1034d3b18ad3SJohn Fastabend 						  record_type, &copied,
1035d3b18ad3SJohn Fastabend 						  msg->msg_flags);
1036a42055e8SVakul Garg 			if (ret) {
1037a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1038a42055e8SVakul Garg 					num_async++;
1039d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1040d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1041c329ef96SJakub Kicinski 				else if (ctx->open_rec && ret == -ENOSPC)
1042d3b18ad3SJohn Fastabend 					goto rollback_iter;
1043a42055e8SVakul Garg 				else if (ret != -EAGAIN)
10443c4d7559SDave Watson 					goto send_end;
1045a42055e8SVakul Garg 			}
10465a3611efSDoron Roberts-Kedes 			continue;
1047d3b18ad3SJohn Fastabend rollback_iter:
1048d3b18ad3SJohn Fastabend 			copied -= try_to_copy;
1049d3b18ad3SJohn Fastabend 			sk_msg_sg_copy_clear(msg_pl, first);
1050d3b18ad3SJohn Fastabend 			iov_iter_revert(&msg->msg_iter,
1051d3b18ad3SJohn Fastabend 					msg_pl->sg.size - orig_size);
10523c4d7559SDave Watson fallback_to_reg_send:
1053d829e9c4SDaniel Borkmann 			sk_msg_trim(sk, msg_pl, orig_size);
10543c4d7559SDave Watson 		}
10553c4d7559SDave Watson 
1056d829e9c4SDaniel Borkmann 		required_size = msg_pl->sg.size + try_to_copy;
10574e6d4720SVakul Garg 
1058d829e9c4SDaniel Borkmann 		ret = tls_clone_plaintext_msg(sk, required_size);
10593c4d7559SDave Watson 		if (ret) {
10603c4d7559SDave Watson 			if (ret != -ENOSPC)
10614e6d4720SVakul Garg 				goto send_end;
10623c4d7559SDave Watson 
10633c4d7559SDave Watson 			/* Adjust try_to_copy according to the amount that was
10643c4d7559SDave Watson 			 * actually allocated. The difference is due
10653c4d7559SDave Watson 			 * to max sg elements limit
10663c4d7559SDave Watson 			 */
1067d829e9c4SDaniel Borkmann 			try_to_copy -= required_size - msg_pl->sg.size;
10683c4d7559SDave Watson 			full_record = true;
10694509de14SVakul Garg 			sk_msg_trim(sk, msg_en,
10704509de14SVakul Garg 				    msg_pl->sg.size + prot->overhead_size);
10713c4d7559SDave Watson 		}
10723c4d7559SDave Watson 
107365a10e28SVakul Garg 		if (try_to_copy) {
107465a10e28SVakul Garg 			ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
107565a10e28SVakul Garg 						       msg_pl, try_to_copy);
1076d829e9c4SDaniel Borkmann 			if (ret < 0)
10773c4d7559SDave Watson 				goto trim_sgl;
107865a10e28SVakul Garg 		}
10793c4d7559SDave Watson 
1080d829e9c4SDaniel Borkmann 		/* Open records defined only if successfully copied, otherwise
1081d829e9c4SDaniel Borkmann 		 * we would trim the sg but not reset the open record frags.
1082d829e9c4SDaniel Borkmann 		 */
1083d829e9c4SDaniel Borkmann 		tls_ctx->pending_open_record_frags = true;
10843c4d7559SDave Watson 		copied += try_to_copy;
10853c4d7559SDave Watson 		if (full_record || eor) {
1086d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1087d3b18ad3SJohn Fastabend 						  record_type, &copied,
1088d3b18ad3SJohn Fastabend 						  msg->msg_flags);
10893c4d7559SDave Watson 			if (ret) {
1090a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1091a42055e8SVakul Garg 					num_async++;
1092d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1093d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1094d3b18ad3SJohn Fastabend 				else if (ret != -EAGAIN) {
1095d3b18ad3SJohn Fastabend 					if (ret == -ENOSPC)
1096d3b18ad3SJohn Fastabend 						ret = 0;
10973c4d7559SDave Watson 					goto send_end;
10983c4d7559SDave Watson 				}
10993c4d7559SDave Watson 			}
1100d3b18ad3SJohn Fastabend 		}
11013c4d7559SDave Watson 
11023c4d7559SDave Watson 		continue;
11033c4d7559SDave Watson 
11043c4d7559SDave Watson wait_for_sndbuf:
11053c4d7559SDave Watson 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
11063c4d7559SDave Watson wait_for_memory:
11073c4d7559SDave Watson 		ret = sk_stream_wait_memory(sk, &timeo);
11083c4d7559SDave Watson 		if (ret) {
11093c4d7559SDave Watson trim_sgl:
1110c329ef96SJakub Kicinski 			if (ctx->open_rec)
1111d829e9c4SDaniel Borkmann 				tls_trim_both_msgs(sk, orig_size);
11123c4d7559SDave Watson 			goto send_end;
11133c4d7559SDave Watson 		}
11143c4d7559SDave Watson 
1115c329ef96SJakub Kicinski 		if (ctx->open_rec && msg_en->sg.size < required_size)
11163c4d7559SDave Watson 			goto alloc_encrypted;
11173c4d7559SDave Watson 	}
11183c4d7559SDave Watson 
1119a42055e8SVakul Garg 	if (!num_async) {
1120a42055e8SVakul Garg 		goto send_end;
1121a42055e8SVakul Garg 	} else if (num_zc) {
1122a42055e8SVakul Garg 		/* Wait for pending encryptions to get completed */
11230cada332SVinay Kumar Yadav 		spin_lock_bh(&ctx->encrypt_compl_lock);
11240cada332SVinay Kumar Yadav 		ctx->async_notify = true;
1125a42055e8SVakul Garg 
11260cada332SVinay Kumar Yadav 		pending = atomic_read(&ctx->encrypt_pending);
11270cada332SVinay Kumar Yadav 		spin_unlock_bh(&ctx->encrypt_compl_lock);
11280cada332SVinay Kumar Yadav 		if (pending)
1129a42055e8SVakul Garg 			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1130a42055e8SVakul Garg 		else
1131a42055e8SVakul Garg 			reinit_completion(&ctx->async_wait.completion);
1132a42055e8SVakul Garg 
11330cada332SVinay Kumar Yadav 		/* There can be no concurrent accesses, since we have no
11340cada332SVinay Kumar Yadav 		 * pending encrypt operations
11350cada332SVinay Kumar Yadav 		 */
1136a42055e8SVakul Garg 		WRITE_ONCE(ctx->async_notify, false);
1137a42055e8SVakul Garg 
1138a42055e8SVakul Garg 		if (ctx->async_wait.err) {
1139a42055e8SVakul Garg 			ret = ctx->async_wait.err;
1140a42055e8SVakul Garg 			copied = 0;
1141a42055e8SVakul Garg 		}
1142a42055e8SVakul Garg 	}
1143a42055e8SVakul Garg 
1144a42055e8SVakul Garg 	/* Transmit if any encryptions have completed */
1145a42055e8SVakul Garg 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1146a42055e8SVakul Garg 		cancel_delayed_work(&ctx->tx_work.work);
1147a42055e8SVakul Garg 		tls_tx_records(sk, msg->msg_flags);
1148a42055e8SVakul Garg 	}
1149a42055e8SVakul Garg 
11503c4d7559SDave Watson send_end:
11513c4d7559SDave Watson 	ret = sk_stream_error(sk, msg->msg_flags, ret);
11523c4d7559SDave Watson 
11533c4d7559SDave Watson 	release_sock(sk);
115479ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
1155a7bff11fSVadim Fedorenko 	return copied > 0 ? copied : ret;
11563c4d7559SDave Watson }
11573c4d7559SDave Watson 
115801cb8a1aSYueHaibing static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
11593c4d7559SDave Watson 			      int offset, size_t size, int flags)
11603c4d7559SDave Watson {
1161a42055e8SVakul Garg 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
11623c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1163f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
11644509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
11653c4d7559SDave Watson 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
1166d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl;
1167a42055e8SVakul Garg 	struct tls_rec *rec;
1168a42055e8SVakul Garg 	int num_async = 0;
1169a7bff11fSVadim Fedorenko 	ssize_t copied = 0;
11703c4d7559SDave Watson 	bool full_record;
11713c4d7559SDave Watson 	int record_room;
11724128c0cfSVakul Garg 	int ret = 0;
1173a42055e8SVakul Garg 	bool eor;
11743c4d7559SDave Watson 
1175d452d48bSJakub Kicinski 	eor = !(flags & MSG_SENDPAGE_NOTLAST);
11763c4d7559SDave Watson 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
11773c4d7559SDave Watson 
11783c4d7559SDave Watson 	/* Call the sk_stream functions to manage the sndbuf mem. */
11793c4d7559SDave Watson 	while (size > 0) {
11803c4d7559SDave Watson 		size_t copy, required_size;
11813c4d7559SDave Watson 
11823c4d7559SDave Watson 		if (sk->sk_err) {
118330be8f8dSr.hering@avm.de 			ret = -sk->sk_err;
11843c4d7559SDave Watson 			goto sendpage_end;
11853c4d7559SDave Watson 		}
11863c4d7559SDave Watson 
1187d3b18ad3SJohn Fastabend 		if (ctx->open_rec)
1188d3b18ad3SJohn Fastabend 			rec = ctx->open_rec;
1189d3b18ad3SJohn Fastabend 		else
1190d3b18ad3SJohn Fastabend 			rec = ctx->open_rec = tls_get_rec(sk);
1191a42055e8SVakul Garg 		if (!rec) {
1192a42055e8SVakul Garg 			ret = -ENOMEM;
1193a42055e8SVakul Garg 			goto sendpage_end;
1194a42055e8SVakul Garg 		}
1195a42055e8SVakul Garg 
1196d829e9c4SDaniel Borkmann 		msg_pl = &rec->msg_plaintext;
1197d829e9c4SDaniel Borkmann 
11983c4d7559SDave Watson 		full_record = false;
1199d829e9c4SDaniel Borkmann 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
12003c4d7559SDave Watson 		copy = size;
12013c4d7559SDave Watson 		if (copy >= record_room) {
12023c4d7559SDave Watson 			copy = record_room;
12033c4d7559SDave Watson 			full_record = true;
12043c4d7559SDave Watson 		}
1205d829e9c4SDaniel Borkmann 
12064509de14SVakul Garg 		required_size = msg_pl->sg.size + copy + prot->overhead_size;
12073c4d7559SDave Watson 
12083c4d7559SDave Watson 		if (!sk_stream_memory_free(sk))
12093c4d7559SDave Watson 			goto wait_for_sndbuf;
12103c4d7559SDave Watson alloc_payload:
1211d829e9c4SDaniel Borkmann 		ret = tls_alloc_encrypted_msg(sk, required_size);
12123c4d7559SDave Watson 		if (ret) {
12133c4d7559SDave Watson 			if (ret != -ENOSPC)
12143c4d7559SDave Watson 				goto wait_for_memory;
12153c4d7559SDave Watson 
12163c4d7559SDave Watson 			/* Adjust copy according to the amount that was
12173c4d7559SDave Watson 			 * actually allocated. The difference is due
12183c4d7559SDave Watson 			 * to max sg elements limit
12193c4d7559SDave Watson 			 */
1220d829e9c4SDaniel Borkmann 			copy -= required_size - msg_pl->sg.size;
12213c4d7559SDave Watson 			full_record = true;
12223c4d7559SDave Watson 		}
12233c4d7559SDave Watson 
1224d829e9c4SDaniel Borkmann 		sk_msg_page_add(msg_pl, page, copy, offset);
12253c4d7559SDave Watson 		sk_mem_charge(sk, copy);
1226d829e9c4SDaniel Borkmann 
12273c4d7559SDave Watson 		offset += copy;
12283c4d7559SDave Watson 		size -= copy;
1229d3b18ad3SJohn Fastabend 		copied += copy;
12303c4d7559SDave Watson 
1231d829e9c4SDaniel Borkmann 		tls_ctx->pending_open_record_frags = true;
1232d829e9c4SDaniel Borkmann 		if (full_record || eor || sk_msg_full(msg_pl)) {
1233d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1234d3b18ad3SJohn Fastabend 						  record_type, &copied, flags);
12353c4d7559SDave Watson 			if (ret) {
1236a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1237a42055e8SVakul Garg 					num_async++;
1238d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1239d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1240d3b18ad3SJohn Fastabend 				else if (ret != -EAGAIN) {
1241d3b18ad3SJohn Fastabend 					if (ret == -ENOSPC)
1242d3b18ad3SJohn Fastabend 						ret = 0;
12433c4d7559SDave Watson 					goto sendpage_end;
12443c4d7559SDave Watson 				}
12453c4d7559SDave Watson 			}
1246d3b18ad3SJohn Fastabend 		}
12473c4d7559SDave Watson 		continue;
12483c4d7559SDave Watson wait_for_sndbuf:
12493c4d7559SDave Watson 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
12503c4d7559SDave Watson wait_for_memory:
12513c4d7559SDave Watson 		ret = sk_stream_wait_memory(sk, &timeo);
12523c4d7559SDave Watson 		if (ret) {
1253c329ef96SJakub Kicinski 			if (ctx->open_rec)
1254d829e9c4SDaniel Borkmann 				tls_trim_both_msgs(sk, msg_pl->sg.size);
12553c4d7559SDave Watson 			goto sendpage_end;
12563c4d7559SDave Watson 		}
12573c4d7559SDave Watson 
1258c329ef96SJakub Kicinski 		if (ctx->open_rec)
12593c4d7559SDave Watson 			goto alloc_payload;
12603c4d7559SDave Watson 	}
12613c4d7559SDave Watson 
1262a42055e8SVakul Garg 	if (num_async) {
1263a42055e8SVakul Garg 		/* Transmit if any encryptions have completed */
1264a42055e8SVakul Garg 		if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1265a42055e8SVakul Garg 			cancel_delayed_work(&ctx->tx_work.work);
1266a42055e8SVakul Garg 			tls_tx_records(sk, flags);
1267a42055e8SVakul Garg 		}
1268a42055e8SVakul Garg 	}
12693c4d7559SDave Watson sendpage_end:
12703c4d7559SDave Watson 	ret = sk_stream_error(sk, flags, ret);
1271a7bff11fSVadim Fedorenko 	return copied > 0 ? copied : ret;
12723c4d7559SDave Watson }
12733c4d7559SDave Watson 
1274d4ffb02dSWillem de Bruijn int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1275d4ffb02dSWillem de Bruijn 			   int offset, size_t size, int flags)
1276d4ffb02dSWillem de Bruijn {
1277d4ffb02dSWillem de Bruijn 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1278d4ffb02dSWillem de Bruijn 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1279d4ffb02dSWillem de Bruijn 		      MSG_NO_SHARED_FRAGS))
12804a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
1281d4ffb02dSWillem de Bruijn 
1282d4ffb02dSWillem de Bruijn 	return tls_sw_do_sendpage(sk, page, offset, size, flags);
1283d4ffb02dSWillem de Bruijn }
1284d4ffb02dSWillem de Bruijn 
12850608c69cSJohn Fastabend int tls_sw_sendpage(struct sock *sk, struct page *page,
12860608c69cSJohn Fastabend 		    int offset, size_t size, int flags)
12870608c69cSJohn Fastabend {
128879ffe608SJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
12890608c69cSJohn Fastabend 	int ret;
12900608c69cSJohn Fastabend 
12910608c69cSJohn Fastabend 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
12920608c69cSJohn Fastabend 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
12934a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
12940608c69cSJohn Fastabend 
129579ffe608SJakub Kicinski 	mutex_lock(&tls_ctx->tx_lock);
12960608c69cSJohn Fastabend 	lock_sock(sk);
12970608c69cSJohn Fastabend 	ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
12980608c69cSJohn Fastabend 	release_sock(sk);
129979ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
13000608c69cSJohn Fastabend 	return ret;
13010608c69cSJohn Fastabend }
13020608c69cSJohn Fastabend 
1303d3b18ad3SJohn Fastabend static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1304974271e5SJim Ma 				     bool nonblock, long timeo, int *err)
1305c46234ebSDave Watson {
1306c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1307f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1308c46234ebSDave Watson 	struct sk_buff *skb;
1309c46234ebSDave Watson 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1310c46234ebSDave Watson 
1311d3b18ad3SJohn Fastabend 	while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1312c46234ebSDave Watson 		if (sk->sk_err) {
1313c46234ebSDave Watson 			*err = sock_error(sk);
1314c46234ebSDave Watson 			return NULL;
1315c46234ebSDave Watson 		}
1316c46234ebSDave Watson 
131720ffc7adSVadim Fedorenko 		if (!skb_queue_empty(&sk->sk_receive_queue)) {
131820ffc7adSVadim Fedorenko 			__strp_unpause(&ctx->strp);
131920ffc7adSVadim Fedorenko 			if (ctx->recv_pkt)
132020ffc7adSVadim Fedorenko 				return ctx->recv_pkt;
132120ffc7adSVadim Fedorenko 		}
132220ffc7adSVadim Fedorenko 
1323fcf4793eSDoron Roberts-Kedes 		if (sk->sk_shutdown & RCV_SHUTDOWN)
1324fcf4793eSDoron Roberts-Kedes 			return NULL;
1325fcf4793eSDoron Roberts-Kedes 
1326c46234ebSDave Watson 		if (sock_flag(sk, SOCK_DONE))
1327c46234ebSDave Watson 			return NULL;
1328c46234ebSDave Watson 
1329974271e5SJim Ma 		if (nonblock || !timeo) {
1330c46234ebSDave Watson 			*err = -EAGAIN;
1331c46234ebSDave Watson 			return NULL;
1332c46234ebSDave Watson 		}
1333c46234ebSDave Watson 
1334c46234ebSDave Watson 		add_wait_queue(sk_sleep(sk), &wait);
1335c46234ebSDave Watson 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1336d3b18ad3SJohn Fastabend 		sk_wait_event(sk, &timeo,
1337d3b18ad3SJohn Fastabend 			      ctx->recv_pkt != skb ||
1338d3b18ad3SJohn Fastabend 			      !sk_psock_queue_empty(psock),
1339d3b18ad3SJohn Fastabend 			      &wait);
1340c46234ebSDave Watson 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1341c46234ebSDave Watson 		remove_wait_queue(sk_sleep(sk), &wait);
1342c46234ebSDave Watson 
1343c46234ebSDave Watson 		/* Handle signals */
1344c46234ebSDave Watson 		if (signal_pending(current)) {
1345c46234ebSDave Watson 			*err = sock_intr_errno(timeo);
1346c46234ebSDave Watson 			return NULL;
1347c46234ebSDave Watson 		}
1348c46234ebSDave Watson 	}
1349c46234ebSDave Watson 
1350c46234ebSDave Watson 	return skb;
1351c46234ebSDave Watson }
1352c46234ebSDave Watson 
1353d4bd88e6SJakub Kicinski static int tls_setup_from_iter(struct iov_iter *from,
1354d829e9c4SDaniel Borkmann 			       int length, int *pages_used,
1355d829e9c4SDaniel Borkmann 			       struct scatterlist *to,
1356d829e9c4SDaniel Borkmann 			       int to_max_pages)
1357d829e9c4SDaniel Borkmann {
1358d829e9c4SDaniel Borkmann 	int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1359d829e9c4SDaniel Borkmann 	struct page *pages[MAX_SKB_FRAGS];
1360d4bd88e6SJakub Kicinski 	unsigned int size = 0;
1361d829e9c4SDaniel Borkmann 	ssize_t copied, use;
1362d829e9c4SDaniel Borkmann 	size_t offset;
1363d829e9c4SDaniel Borkmann 
1364d829e9c4SDaniel Borkmann 	while (length > 0) {
1365d829e9c4SDaniel Borkmann 		i = 0;
1366d829e9c4SDaniel Borkmann 		maxpages = to_max_pages - num_elem;
1367d829e9c4SDaniel Borkmann 		if (maxpages == 0) {
1368d829e9c4SDaniel Borkmann 			rc = -EFAULT;
1369d829e9c4SDaniel Borkmann 			goto out;
1370d829e9c4SDaniel Borkmann 		}
1371d829e9c4SDaniel Borkmann 		copied = iov_iter_get_pages(from, pages,
1372d829e9c4SDaniel Borkmann 					    length,
1373d829e9c4SDaniel Borkmann 					    maxpages, &offset);
1374d829e9c4SDaniel Borkmann 		if (copied <= 0) {
1375d829e9c4SDaniel Borkmann 			rc = -EFAULT;
1376d829e9c4SDaniel Borkmann 			goto out;
1377d829e9c4SDaniel Borkmann 		}
1378d829e9c4SDaniel Borkmann 
1379d829e9c4SDaniel Borkmann 		iov_iter_advance(from, copied);
1380d829e9c4SDaniel Borkmann 
1381d829e9c4SDaniel Borkmann 		length -= copied;
1382d829e9c4SDaniel Borkmann 		size += copied;
1383d829e9c4SDaniel Borkmann 		while (copied) {
1384d829e9c4SDaniel Borkmann 			use = min_t(int, copied, PAGE_SIZE - offset);
1385d829e9c4SDaniel Borkmann 
1386d829e9c4SDaniel Borkmann 			sg_set_page(&to[num_elem],
1387d829e9c4SDaniel Borkmann 				    pages[i], use, offset);
1388d829e9c4SDaniel Borkmann 			sg_unmark_end(&to[num_elem]);
1389d829e9c4SDaniel Borkmann 			/* We do not uncharge memory from this API */
1390d829e9c4SDaniel Borkmann 
1391d829e9c4SDaniel Borkmann 			offset = 0;
1392d829e9c4SDaniel Borkmann 			copied -= use;
1393d829e9c4SDaniel Borkmann 
1394d829e9c4SDaniel Borkmann 			i++;
1395d829e9c4SDaniel Borkmann 			num_elem++;
1396d829e9c4SDaniel Borkmann 		}
1397d829e9c4SDaniel Borkmann 	}
1398d829e9c4SDaniel Borkmann 	/* Mark the end in the last sg entry if newly added */
1399d829e9c4SDaniel Borkmann 	if (num_elem > *pages_used)
1400d829e9c4SDaniel Borkmann 		sg_mark_end(&to[num_elem - 1]);
1401d829e9c4SDaniel Borkmann out:
1402d829e9c4SDaniel Borkmann 	if (rc)
1403d4bd88e6SJakub Kicinski 		iov_iter_revert(from, size);
1404d829e9c4SDaniel Borkmann 	*pages_used = num_elem;
1405d829e9c4SDaniel Borkmann 
1406d829e9c4SDaniel Borkmann 	return rc;
1407d829e9c4SDaniel Borkmann }
1408d829e9c4SDaniel Borkmann 
14090b243d00SVakul Garg /* This function decrypts the input skb into either out_iov or in out_sg
14100b243d00SVakul Garg  * or in skb buffers itself. The input parameter 'zc' indicates if
14110b243d00SVakul Garg  * zero-copy mode needs to be tried or not. With zero-copy mode, either
14120b243d00SVakul Garg  * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
14130b243d00SVakul Garg  * NULL, then the decryption happens inside skb buffers itself, i.e.
14140b243d00SVakul Garg  * zero-copy gets disabled and 'zc' is updated.
14150b243d00SVakul Garg  */
14160b243d00SVakul Garg 
14170b243d00SVakul Garg static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
14180b243d00SVakul Garg 			    struct iov_iter *out_iov,
14190b243d00SVakul Garg 			    struct scatterlist *out_sg,
1420*4175eac3SJakub Kicinski 			    struct tls_decrypt_arg *darg)
14210b243d00SVakul Garg {
14220b243d00SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
14230b243d00SVakul Garg 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
14244509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
14250b243d00SVakul Garg 	struct strp_msg *rxm = strp_msg(skb);
1426c3f6bb74SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
14270b243d00SVakul Garg 	int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
14280b243d00SVakul Garg 	struct aead_request *aead_req;
14290b243d00SVakul Garg 	struct sk_buff *unused;
14300b243d00SVakul Garg 	u8 *aad, *iv, *mem = NULL;
14310b243d00SVakul Garg 	struct scatterlist *sgin = NULL;
14320b243d00SVakul Garg 	struct scatterlist *sgout = NULL;
14334509de14SVakul Garg 	const int data_len = rxm->full_len - prot->overhead_size +
14344509de14SVakul Garg 			     prot->tail_size;
1435f295b3aeSVakul Garg 	int iv_offset = 0;
14360b243d00SVakul Garg 
1437*4175eac3SJakub Kicinski 	if (darg->zc && (out_iov || out_sg)) {
14380b243d00SVakul Garg 		if (out_iov)
1439b93235e6SJakub Kicinski 			n_sgout = 1 +
1440b93235e6SJakub Kicinski 				iov_iter_npages_cap(out_iov, INT_MAX, data_len);
14410b243d00SVakul Garg 		else
14420b243d00SVakul Garg 			n_sgout = sg_nents(out_sg);
14434509de14SVakul Garg 		n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
14444509de14SVakul Garg 				 rxm->full_len - prot->prepend_size);
14450b243d00SVakul Garg 	} else {
14460b243d00SVakul Garg 		n_sgout = 0;
1447*4175eac3SJakub Kicinski 		darg->zc = false;
14480927f71dSDoron Roberts-Kedes 		n_sgin = skb_cow_data(skb, 0, &unused);
14490b243d00SVakul Garg 	}
14500b243d00SVakul Garg 
14510b243d00SVakul Garg 	if (n_sgin < 1)
14520b243d00SVakul Garg 		return -EBADMSG;
14530b243d00SVakul Garg 
14540b243d00SVakul Garg 	/* Increment to accommodate AAD */
14550b243d00SVakul Garg 	n_sgin = n_sgin + 1;
14560b243d00SVakul Garg 
14570b243d00SVakul Garg 	nsg = n_sgin + n_sgout;
14580b243d00SVakul Garg 
14590b243d00SVakul Garg 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
14600b243d00SVakul Garg 	mem_size = aead_size + (nsg * sizeof(struct scatterlist));
14614509de14SVakul Garg 	mem_size = mem_size + prot->aad_size;
14620b243d00SVakul Garg 	mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
14630b243d00SVakul Garg 
14640b243d00SVakul Garg 	/* Allocate a single block of memory which contains
14650b243d00SVakul Garg 	 * aead_req || sgin[] || sgout[] || aad || iv.
14660b243d00SVakul Garg 	 * This order achieves correct alignment for aead_req, sgin, sgout.
14670b243d00SVakul Garg 	 */
14680b243d00SVakul Garg 	mem = kmalloc(mem_size, sk->sk_allocation);
14690b243d00SVakul Garg 	if (!mem)
14700b243d00SVakul Garg 		return -ENOMEM;
14710b243d00SVakul Garg 
14720b243d00SVakul Garg 	/* Segment the allocated memory */
14730b243d00SVakul Garg 	aead_req = (struct aead_request *)mem;
14740b243d00SVakul Garg 	sgin = (struct scatterlist *)(mem + aead_size);
14750b243d00SVakul Garg 	sgout = sgin + n_sgin;
14760b243d00SVakul Garg 	aad = (u8 *)(sgout + n_sgout);
14774509de14SVakul Garg 	iv = aad + prot->aad_size;
14780b243d00SVakul Garg 
1479128cfb88STianjia Zhang 	/* For CCM based ciphers, first byte of nonce+iv is a constant */
1480128cfb88STianjia Zhang 	switch (prot->cipher_type) {
1481128cfb88STianjia Zhang 	case TLS_CIPHER_AES_CCM_128:
1482128cfb88STianjia Zhang 		iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1483f295b3aeSVakul Garg 		iv_offset = 1;
1484128cfb88STianjia Zhang 		break;
1485128cfb88STianjia Zhang 	case TLS_CIPHER_SM4_CCM:
1486128cfb88STianjia Zhang 		iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1487128cfb88STianjia Zhang 		iv_offset = 1;
1488128cfb88STianjia Zhang 		break;
1489f295b3aeSVakul Garg 	}
1490f295b3aeSVakul Garg 
14910b243d00SVakul Garg 	/* Prepare IV */
14920b243d00SVakul Garg 	err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1493f295b3aeSVakul Garg 			    iv + iv_offset + prot->salt_size,
14944509de14SVakul Garg 			    prot->iv_size);
14950b243d00SVakul Garg 	if (err < 0) {
14960b243d00SVakul Garg 		kfree(mem);
14970b243d00SVakul Garg 		return err;
14980b243d00SVakul Garg 	}
1499a6acbe62SVadim Fedorenko 	if (prot->version == TLS_1_3_VERSION ||
1500a6acbe62SVadim Fedorenko 	    prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
1501f295b3aeSVakul Garg 		memcpy(iv + iv_offset, tls_ctx->rx.iv,
15029381fe8cSZiyang Xuan 		       prot->iv_size + prot->salt_size);
1503130b392cSDave Watson 	else
1504f295b3aeSVakul Garg 		memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
15050b243d00SVakul Garg 
150659610606STianjia Zhang 	xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
1507130b392cSDave Watson 
15080b243d00SVakul Garg 	/* Prepare AAD */
15094509de14SVakul Garg 	tls_make_aad(aad, rxm->full_len - prot->overhead_size +
15104509de14SVakul Garg 		     prot->tail_size,
1511c3f6bb74SJakub Kicinski 		     tls_ctx->rx.rec_seq, tlm->control, prot);
15120b243d00SVakul Garg 
15130b243d00SVakul Garg 	/* Prepare sgin */
15140b243d00SVakul Garg 	sg_init_table(sgin, n_sgin);
15154509de14SVakul Garg 	sg_set_buf(&sgin[0], aad, prot->aad_size);
15160b243d00SVakul Garg 	err = skb_to_sgvec(skb, &sgin[1],
15174509de14SVakul Garg 			   rxm->offset + prot->prepend_size,
15184509de14SVakul Garg 			   rxm->full_len - prot->prepend_size);
15190b243d00SVakul Garg 	if (err < 0) {
15200b243d00SVakul Garg 		kfree(mem);
15210b243d00SVakul Garg 		return err;
15220b243d00SVakul Garg 	}
15230b243d00SVakul Garg 
15240b243d00SVakul Garg 	if (n_sgout) {
15250b243d00SVakul Garg 		if (out_iov) {
15260b243d00SVakul Garg 			sg_init_table(sgout, n_sgout);
15274509de14SVakul Garg 			sg_set_buf(&sgout[0], aad, prot->aad_size);
15280b243d00SVakul Garg 
1529d4bd88e6SJakub Kicinski 			err = tls_setup_from_iter(out_iov, data_len,
1530d4bd88e6SJakub Kicinski 						  &pages, &sgout[1],
1531d829e9c4SDaniel Borkmann 						  (n_sgout - 1));
15320b243d00SVakul Garg 			if (err < 0)
15330b243d00SVakul Garg 				goto fallback_to_reg_recv;
15340b243d00SVakul Garg 		} else if (out_sg) {
15350b243d00SVakul Garg 			memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
15360b243d00SVakul Garg 		} else {
15370b243d00SVakul Garg 			goto fallback_to_reg_recv;
15380b243d00SVakul Garg 		}
15390b243d00SVakul Garg 	} else {
15400b243d00SVakul Garg fallback_to_reg_recv:
15410b243d00SVakul Garg 		sgout = sgin;
15420b243d00SVakul Garg 		pages = 0;
1543*4175eac3SJakub Kicinski 		darg->zc = false;
15440b243d00SVakul Garg 	}
15450b243d00SVakul Garg 
15460b243d00SVakul Garg 	/* Prepare and submit AEAD request */
154794524d8fSVakul Garg 	err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1548*4175eac3SJakub Kicinski 				data_len, aead_req, darg->async);
154994524d8fSVakul Garg 	if (err == -EINPROGRESS)
155094524d8fSVakul Garg 		return err;
15510b243d00SVakul Garg 
15520b243d00SVakul Garg 	/* Release the pages in case iov was mapped to pages */
15530b243d00SVakul Garg 	for (; pages > 0; pages--)
15540b243d00SVakul Garg 		put_page(sg_page(&sgout[pages]));
15550b243d00SVakul Garg 
15560b243d00SVakul Garg 	kfree(mem);
15570b243d00SVakul Garg 	return err;
15580b243d00SVakul Garg }
15590b243d00SVakul Garg 
1560dafb67f3SBoris Pismenny static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1561*4175eac3SJakub Kicinski 			      struct iov_iter *dest,
1562*4175eac3SJakub Kicinski 			      struct tls_decrypt_arg *darg)
1563dafb67f3SBoris Pismenny {
1564dafb67f3SBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
15654509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1566dafb67f3SBoris Pismenny 	struct strp_msg *rxm = strp_msg(skb);
15677dc59c33SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
15683764ae5bSJakub Kicinski 	int pad, err;
1569dafb67f3SBoris Pismenny 
15703764ae5bSJakub Kicinski 	if (tlm->decrypted) {
1571*4175eac3SJakub Kicinski 		darg->zc = false;
15723764ae5bSJakub Kicinski 		return 0;
15733764ae5bSJakub Kicinski 	}
15743764ae5bSJakub Kicinski 
1575b9d8fec9SJakub Kicinski 	if (tls_ctx->rx_conf == TLS_HW) {
15764de30a8dSJakub Kicinski 		err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
15774799ac81SBoris Pismenny 		if (err < 0)
15784799ac81SBoris Pismenny 			return err;
157971471ca3SJakub Kicinski 		if (err > 0) {
158071471ca3SJakub Kicinski 			tlm->decrypted = 1;
1581*4175eac3SJakub Kicinski 			darg->zc = false;
15823764ae5bSJakub Kicinski 			goto decrypt_done;
15833764ae5bSJakub Kicinski 		}
1584b9d8fec9SJakub Kicinski 	}
1585be2fbc15SJakub Kicinski 
1586*4175eac3SJakub Kicinski 	err = decrypt_internal(sk, skb, dest, NULL, darg);
158794524d8fSVakul Garg 	if (err < 0) {
158894524d8fSVakul Garg 		if (err == -EINPROGRESS)
15893764ae5bSJakub Kicinski 			tls_advance_record_sn(sk, prot, &tls_ctx->rx);
15905c5d22a7SJakub Kicinski 		else if (err == -EBADMSG)
15913764ae5bSJakub Kicinski 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1592dafb67f3SBoris Pismenny 		return err;
159394524d8fSVakul Garg 	}
1594130b392cSDave Watson 
15953764ae5bSJakub Kicinski decrypt_done:
1596c3f6bb74SJakub Kicinski 	pad = padding_length(prot, skb);
1597b53f4976SJakub Kicinski 	if (pad < 0)
1598b53f4976SJakub Kicinski 		return pad;
1599b53f4976SJakub Kicinski 
1600b53f4976SJakub Kicinski 	rxm->full_len -= pad;
16014509de14SVakul Garg 	rxm->offset += prot->prepend_size;
16024509de14SVakul Garg 	rxm->full_len -= prot->overhead_size;
1603fb0f886fSJakub Kicinski 	tls_advance_record_sn(sk, prot, &tls_ctx->rx);
16047dc59c33SJakub Kicinski 	tlm->decrypted = 1;
1605dafb67f3SBoris Pismenny 
16063764ae5bSJakub Kicinski 	return 0;
1607dafb67f3SBoris Pismenny }
1608dafb67f3SBoris Pismenny 
1609dafb67f3SBoris Pismenny int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1610c46234ebSDave Watson 		struct scatterlist *sgout)
1611c46234ebSDave Watson {
1612*4175eac3SJakub Kicinski 	struct tls_decrypt_arg darg = { .zc = true, };
1613c46234ebSDave Watson 
1614*4175eac3SJakub Kicinski 	return decrypt_internal(sk, skb, NULL, sgout, &darg);
1615c46234ebSDave Watson }
1616c46234ebSDave Watson 
1617c46234ebSDave Watson static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1618c46234ebSDave Watson 			       unsigned int len)
1619c46234ebSDave Watson {
1620c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1621f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
162294524d8fSVakul Garg 
162394524d8fSVakul Garg 	if (skb) {
1624c46234ebSDave Watson 		struct strp_msg *rxm = strp_msg(skb);
1625c46234ebSDave Watson 
1626c46234ebSDave Watson 		if (len < rxm->full_len) {
1627c46234ebSDave Watson 			rxm->offset += len;
1628c46234ebSDave Watson 			rxm->full_len -= len;
1629c46234ebSDave Watson 			return false;
1630c46234ebSDave Watson 		}
1631a88c26f6SVakul Garg 		consume_skb(skb);
163294524d8fSVakul Garg 	}
1633c46234ebSDave Watson 
1634c46234ebSDave Watson 	/* Finished with message */
1635c46234ebSDave Watson 	ctx->recv_pkt = NULL;
16367170e604SDoron Roberts-Kedes 	__strp_unpause(&ctx->strp);
1637c46234ebSDave Watson 
1638c46234ebSDave Watson 	return true;
1639c46234ebSDave Watson }
1640c46234ebSDave Watson 
1641692d7b5dSVakul Garg /* This function traverses the rx_list in tls receive context to copies the
16422b794c40SVakul Garg  * decrypted records into the buffer provided by caller zero copy is not
1643692d7b5dSVakul Garg  * true. Further, the records are removed from the rx_list if it is not a peek
1644692d7b5dSVakul Garg  * case and the record has been consumed completely.
1645692d7b5dSVakul Garg  */
1646692d7b5dSVakul Garg static int process_rx_list(struct tls_sw_context_rx *ctx,
1647692d7b5dSVakul Garg 			   struct msghdr *msg,
16482b794c40SVakul Garg 			   u8 *control,
16492b794c40SVakul Garg 			   bool *cmsg,
1650692d7b5dSVakul Garg 			   size_t skip,
1651692d7b5dSVakul Garg 			   size_t len,
1652692d7b5dSVakul Garg 			   bool zc,
1653692d7b5dSVakul Garg 			   bool is_peek)
1654692d7b5dSVakul Garg {
1655692d7b5dSVakul Garg 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
16562b794c40SVakul Garg 	u8 ctrl = *control;
16572b794c40SVakul Garg 	u8 msgc = *cmsg;
16582b794c40SVakul Garg 	struct tls_msg *tlm;
1659692d7b5dSVakul Garg 	ssize_t copied = 0;
1660692d7b5dSVakul Garg 
16612b794c40SVakul Garg 	/* Set the record type in 'control' if caller didn't pass it */
16622b794c40SVakul Garg 	if (!ctrl && skb) {
16632b794c40SVakul Garg 		tlm = tls_msg(skb);
16642b794c40SVakul Garg 		ctrl = tlm->control;
16652b794c40SVakul Garg 	}
16662b794c40SVakul Garg 
1667692d7b5dSVakul Garg 	while (skip && skb) {
1668692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
16692b794c40SVakul Garg 		tlm = tls_msg(skb);
16702b794c40SVakul Garg 
16712b794c40SVakul Garg 		/* Cannot process a record of different type */
16722b794c40SVakul Garg 		if (ctrl != tlm->control)
16732b794c40SVakul Garg 			return 0;
1674692d7b5dSVakul Garg 
1675692d7b5dSVakul Garg 		if (skip < rxm->full_len)
1676692d7b5dSVakul Garg 			break;
1677692d7b5dSVakul Garg 
1678692d7b5dSVakul Garg 		skip = skip - rxm->full_len;
1679692d7b5dSVakul Garg 		skb = skb_peek_next(skb, &ctx->rx_list);
1680692d7b5dSVakul Garg 	}
1681692d7b5dSVakul Garg 
1682692d7b5dSVakul Garg 	while (len && skb) {
1683692d7b5dSVakul Garg 		struct sk_buff *next_skb;
1684692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
1685692d7b5dSVakul Garg 		int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1686692d7b5dSVakul Garg 
16872b794c40SVakul Garg 		tlm = tls_msg(skb);
16882b794c40SVakul Garg 
16892b794c40SVakul Garg 		/* Cannot process a record of different type */
16902b794c40SVakul Garg 		if (ctrl != tlm->control)
16912b794c40SVakul Garg 			return 0;
16922b794c40SVakul Garg 
16932b794c40SVakul Garg 		/* Set record type if not already done. For a non-data record,
16942b794c40SVakul Garg 		 * do not proceed if record type could not be copied.
16952b794c40SVakul Garg 		 */
16962b794c40SVakul Garg 		if (!msgc) {
16972b794c40SVakul Garg 			int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
16982b794c40SVakul Garg 					    sizeof(ctrl), &ctrl);
16992b794c40SVakul Garg 			msgc = true;
17002b794c40SVakul Garg 			if (ctrl != TLS_RECORD_TYPE_DATA) {
17012b794c40SVakul Garg 				if (cerr || msg->msg_flags & MSG_CTRUNC)
17022b794c40SVakul Garg 					return -EIO;
17032b794c40SVakul Garg 
17042b794c40SVakul Garg 				*cmsg = msgc;
17052b794c40SVakul Garg 			}
17062b794c40SVakul Garg 		}
17072b794c40SVakul Garg 
1708692d7b5dSVakul Garg 		if (!zc || (rxm->full_len - skip) > len) {
1709692d7b5dSVakul Garg 			int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1710692d7b5dSVakul Garg 						    msg, chunk);
1711692d7b5dSVakul Garg 			if (err < 0)
1712692d7b5dSVakul Garg 				return err;
1713692d7b5dSVakul Garg 		}
1714692d7b5dSVakul Garg 
1715692d7b5dSVakul Garg 		len = len - chunk;
1716692d7b5dSVakul Garg 		copied = copied + chunk;
1717692d7b5dSVakul Garg 
1718692d7b5dSVakul Garg 		/* Consume the data from record if it is non-peek case*/
1719692d7b5dSVakul Garg 		if (!is_peek) {
1720692d7b5dSVakul Garg 			rxm->offset = rxm->offset + chunk;
1721692d7b5dSVakul Garg 			rxm->full_len = rxm->full_len - chunk;
1722692d7b5dSVakul Garg 
1723692d7b5dSVakul Garg 			/* Return if there is unconsumed data in the record */
1724692d7b5dSVakul Garg 			if (rxm->full_len - skip)
1725692d7b5dSVakul Garg 				break;
1726692d7b5dSVakul Garg 		}
1727692d7b5dSVakul Garg 
1728692d7b5dSVakul Garg 		/* The remaining skip-bytes must lie in 1st record in rx_list.
1729692d7b5dSVakul Garg 		 * So from the 2nd record, 'skip' should be 0.
1730692d7b5dSVakul Garg 		 */
1731692d7b5dSVakul Garg 		skip = 0;
1732692d7b5dSVakul Garg 
1733692d7b5dSVakul Garg 		if (msg)
1734692d7b5dSVakul Garg 			msg->msg_flags |= MSG_EOR;
1735692d7b5dSVakul Garg 
1736692d7b5dSVakul Garg 		next_skb = skb_peek_next(skb, &ctx->rx_list);
1737692d7b5dSVakul Garg 
1738692d7b5dSVakul Garg 		if (!is_peek) {
1739692d7b5dSVakul Garg 			skb_unlink(skb, &ctx->rx_list);
1740a88c26f6SVakul Garg 			consume_skb(skb);
1741692d7b5dSVakul Garg 		}
1742692d7b5dSVakul Garg 
1743692d7b5dSVakul Garg 		skb = next_skb;
1744692d7b5dSVakul Garg 	}
1745692d7b5dSVakul Garg 
17462b794c40SVakul Garg 	*control = ctrl;
1747692d7b5dSVakul Garg 	return copied;
1748692d7b5dSVakul Garg }
1749692d7b5dSVakul Garg 
1750c46234ebSDave Watson int tls_sw_recvmsg(struct sock *sk,
1751c46234ebSDave Watson 		   struct msghdr *msg,
1752c46234ebSDave Watson 		   size_t len,
1753c46234ebSDave Watson 		   int nonblock,
1754c46234ebSDave Watson 		   int flags,
1755c46234ebSDave Watson 		   int *addr_len)
1756c46234ebSDave Watson {
1757c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1758f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
17594509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1760d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
1761bfc06e1aSJakub Kicinski 	int num_async, pending;
1762692d7b5dSVakul Garg 	unsigned char control = 0;
1763692d7b5dSVakul Garg 	ssize_t decrypted = 0;
1764c46234ebSDave Watson 	struct strp_msg *rxm;
17652b794c40SVakul Garg 	struct tls_msg *tlm;
1766c46234ebSDave Watson 	struct sk_buff *skb;
1767c46234ebSDave Watson 	ssize_t copied = 0;
1768c46234ebSDave Watson 	bool cmsg = false;
176906030dbaSDaniel Borkmann 	int target, err = 0;
1770c46234ebSDave Watson 	long timeo;
177100e23707SDavid Howells 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1772692d7b5dSVakul Garg 	bool is_peek = flags & MSG_PEEK;
1773e91de6afSJohn Fastabend 	bool bpf_strp_enabled;
1774c46234ebSDave Watson 
1775c46234ebSDave Watson 	flags |= nonblock;
1776c46234ebSDave Watson 
1777c46234ebSDave Watson 	if (unlikely(flags & MSG_ERRQUEUE))
1778c46234ebSDave Watson 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1779c46234ebSDave Watson 
1780d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
1781c46234ebSDave Watson 	lock_sock(sk);
1782e91de6afSJohn Fastabend 	bpf_strp_enabled = sk_psock_strp_enabled(psock);
1783c46234ebSDave Watson 
1784692d7b5dSVakul Garg 	/* Process pending decrypted records. It must be non-zero-copy */
17852b794c40SVakul Garg 	err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
17862b794c40SVakul Garg 			      is_peek);
1787692d7b5dSVakul Garg 	if (err < 0) {
1788692d7b5dSVakul Garg 		tls_err_abort(sk, err);
1789692d7b5dSVakul Garg 		goto end;
1790692d7b5dSVakul Garg 	}
1791692d7b5dSVakul Garg 
1792d5123eddSJakub Kicinski 	copied = err;
179346a16959SJakub Kicinski 	if (len <= copied)
1794bfc06e1aSJakub Kicinski 		goto end;
179546a16959SJakub Kicinski 
179646a16959SJakub Kicinski 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
179746a16959SJakub Kicinski 	len = len - copied;
179846a16959SJakub Kicinski 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1799692d7b5dSVakul Garg 
1800bfc06e1aSJakub Kicinski 	decrypted = 0;
1801bfc06e1aSJakub Kicinski 	num_async = 0;
180204b25a54SJakub Kicinski 	while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1803*4175eac3SJakub Kicinski 		struct tls_decrypt_arg darg = {};
1804692d7b5dSVakul Garg 		bool retain_skb = false;
18059bdf75ccSJakub Kicinski 		int to_decrypt, chunk;
1806*4175eac3SJakub Kicinski 		bool async;
1807c46234ebSDave Watson 
1808974271e5SJim Ma 		skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
1809d3b18ad3SJohn Fastabend 		if (!skb) {
1810d3b18ad3SJohn Fastabend 			if (psock) {
18112bc793e3SCong Wang 				int ret = sk_msg_recvmsg(sk, psock, msg, len,
18122bc793e3SCong Wang 							 flags);
1813d3b18ad3SJohn Fastabend 
1814d3b18ad3SJohn Fastabend 				if (ret > 0) {
1815692d7b5dSVakul Garg 					decrypted += ret;
1816d3b18ad3SJohn Fastabend 					len -= ret;
1817d3b18ad3SJohn Fastabend 					continue;
1818d3b18ad3SJohn Fastabend 				}
1819d3b18ad3SJohn Fastabend 			}
1820c46234ebSDave Watson 			goto recv_end;
1821d3b18ad3SJohn Fastabend 		}
1822c46234ebSDave Watson 
1823c46234ebSDave Watson 		rxm = strp_msg(skb);
1824c3f6bb74SJakub Kicinski 		tlm = tls_msg(skb);
182594524d8fSVakul Garg 
18264509de14SVakul Garg 		to_decrypt = rxm->full_len - prot->overhead_size;
1827fedf201eSDave Watson 
1828fedf201eSDave Watson 		if (to_decrypt <= len && !is_kvec && !is_peek &&
1829c3f6bb74SJakub Kicinski 		    tlm->control == TLS_RECORD_TYPE_DATA &&
1830e91de6afSJohn Fastabend 		    prot->version != TLS_1_3_VERSION &&
1831e91de6afSJohn Fastabend 		    !bpf_strp_enabled)
1832*4175eac3SJakub Kicinski 			darg.zc = true;
1833fedf201eSDave Watson 
1834c0ab4732SVakul Garg 		/* Do not use async mode if record is non-data */
1835c3f6bb74SJakub Kicinski 		if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
1836*4175eac3SJakub Kicinski 			darg.async = ctx->async_capable;
1837c0ab4732SVakul Garg 		else
1838*4175eac3SJakub Kicinski 			darg.async = false;
1839c0ab4732SVakul Garg 
1840*4175eac3SJakub Kicinski 		err = decrypt_skb_update(sk, skb, &msg->msg_iter, &darg);
1841fedf201eSDave Watson 		if (err < 0 && err != -EINPROGRESS) {
1842da353facSDaniel Jordan 			tls_err_abort(sk, -EBADMSG);
1843fedf201eSDave Watson 			goto recv_end;
1844fedf201eSDave Watson 		}
1845fedf201eSDave Watson 
18467754bd63SEran Ben Elisha 		if (err == -EINPROGRESS) {
18477754bd63SEran Ben Elisha 			async = true;
1848fedf201eSDave Watson 			num_async++;
18497754bd63SEran Ben Elisha 		}
18502b794c40SVakul Garg 
18512b794c40SVakul Garg 		/* If the type of records being processed is not known yet,
18522b794c40SVakul Garg 		 * set it to record type just dequeued. If it is already known,
18532b794c40SVakul Garg 		 * but does not match the record type just dequeued, go to end.
18542b794c40SVakul Garg 		 * We always get record type here since for tls1.2, record type
18552b794c40SVakul Garg 		 * is known just after record is dequeued from stream parser.
18562b794c40SVakul Garg 		 * For tls1.3, we disable async.
18572b794c40SVakul Garg 		 */
18582b794c40SVakul Garg 
18592b794c40SVakul Garg 		if (!control)
18602b794c40SVakul Garg 			control = tlm->control;
18612b794c40SVakul Garg 		else if (control != tlm->control)
18622b794c40SVakul Garg 			goto recv_end;
1863fedf201eSDave Watson 
1864c46234ebSDave Watson 		if (!cmsg) {
1865c46234ebSDave Watson 			int cerr;
1866c46234ebSDave Watson 
1867c46234ebSDave Watson 			cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
18682b794c40SVakul Garg 					sizeof(control), &control);
1869c46234ebSDave Watson 			cmsg = true;
18702b794c40SVakul Garg 			if (control != TLS_RECORD_TYPE_DATA) {
1871c46234ebSDave Watson 				if (cerr || msg->msg_flags & MSG_CTRUNC) {
1872c46234ebSDave Watson 					err = -EIO;
1873c46234ebSDave Watson 					goto recv_end;
1874c46234ebSDave Watson 				}
1875c46234ebSDave Watson 			}
1876c46234ebSDave Watson 		}
1877c46234ebSDave Watson 
18789bdf75ccSJakub Kicinski 		if (async) {
18799bdf75ccSJakub Kicinski 			/* TLS 1.2-only, to_decrypt must be text length */
18809bdf75ccSJakub Kicinski 			chunk = min_t(int, to_decrypt, len);
1881c0ab4732SVakul Garg 			goto pick_next_record;
18829bdf75ccSJakub Kicinski 		}
18839bdf75ccSJakub Kicinski 		/* TLS 1.3 may have updated the length by more than overhead */
18849bdf75ccSJakub Kicinski 		chunk = rxm->full_len;
1885c0ab4732SVakul Garg 
1886*4175eac3SJakub Kicinski 		if (!darg.zc) {
1887e91de6afSJohn Fastabend 			if (bpf_strp_enabled) {
1888e91de6afSJohn Fastabend 				err = sk_psock_tls_strp_read(psock, skb);
1889e91de6afSJohn Fastabend 				if (err != __SK_PASS) {
1890e91de6afSJohn Fastabend 					rxm->offset = rxm->offset + rxm->full_len;
1891e91de6afSJohn Fastabend 					rxm->full_len = 0;
1892e91de6afSJohn Fastabend 					if (err == __SK_DROP)
1893e91de6afSJohn Fastabend 						consume_skb(skb);
1894e91de6afSJohn Fastabend 					ctx->recv_pkt = NULL;
1895e91de6afSJohn Fastabend 					__strp_unpause(&ctx->strp);
1896e91de6afSJohn Fastabend 					continue;
1897e91de6afSJohn Fastabend 				}
1898e91de6afSJohn Fastabend 			}
1899e91de6afSJohn Fastabend 
19009bdf75ccSJakub Kicinski 			if (chunk > len) {
1901692d7b5dSVakul Garg 				retain_skb = true;
1902692d7b5dSVakul Garg 				chunk = len;
1903692d7b5dSVakul Garg 			}
190494524d8fSVakul Garg 
1905692d7b5dSVakul Garg 			err = skb_copy_datagram_msg(skb, rxm->offset,
1906692d7b5dSVakul Garg 						    msg, chunk);
1907c46234ebSDave Watson 			if (err < 0)
1908c46234ebSDave Watson 				goto recv_end;
1909692d7b5dSVakul Garg 
1910692d7b5dSVakul Garg 			if (!is_peek) {
1911692d7b5dSVakul Garg 				rxm->offset = rxm->offset + chunk;
1912692d7b5dSVakul Garg 				rxm->full_len = rxm->full_len - chunk;
1913692d7b5dSVakul Garg 			}
1914692d7b5dSVakul Garg 		}
1915c46234ebSDave Watson 
191694524d8fSVakul Garg pick_next_record:
1917692d7b5dSVakul Garg 		decrypted += chunk;
1918692d7b5dSVakul Garg 		len -= chunk;
1919692d7b5dSVakul Garg 
1920692d7b5dSVakul Garg 		/* For async or peek case, queue the current skb */
1921692d7b5dSVakul Garg 		if (async || is_peek || retain_skb) {
1922692d7b5dSVakul Garg 			skb_queue_tail(&ctx->rx_list, skb);
192394524d8fSVakul Garg 			skb = NULL;
1924692d7b5dSVakul Garg 		}
192594524d8fSVakul Garg 
1926c46234ebSDave Watson 		if (tls_sw_advance_skb(sk, skb, chunk)) {
1927c46234ebSDave Watson 			/* Return full control message to
1928c46234ebSDave Watson 			 * userspace before trying to parse
1929c46234ebSDave Watson 			 * another message type
1930c46234ebSDave Watson 			 */
1931c46234ebSDave Watson 			msg->msg_flags |= MSG_EOR;
19323fe16edfSVadim Fedorenko 			if (control != TLS_RECORD_TYPE_DATA)
1933c46234ebSDave Watson 				goto recv_end;
193494524d8fSVakul Garg 		} else {
193594524d8fSVakul Garg 			break;
1936c46234ebSDave Watson 		}
193704b25a54SJakub Kicinski 	}
1938c46234ebSDave Watson 
1939c46234ebSDave Watson recv_end:
194094524d8fSVakul Garg 	if (num_async) {
194194524d8fSVakul Garg 		/* Wait for all previously submitted records to be decrypted */
19420cada332SVinay Kumar Yadav 		spin_lock_bh(&ctx->decrypt_compl_lock);
19430cada332SVinay Kumar Yadav 		ctx->async_notify = true;
19440cada332SVinay Kumar Yadav 		pending = atomic_read(&ctx->decrypt_pending);
19450cada332SVinay Kumar Yadav 		spin_unlock_bh(&ctx->decrypt_compl_lock);
19460cada332SVinay Kumar Yadav 		if (pending) {
194794524d8fSVakul Garg 			err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
194894524d8fSVakul Garg 			if (err) {
194994524d8fSVakul Garg 				/* one of async decrypt failed */
195094524d8fSVakul Garg 				tls_err_abort(sk, err);
195194524d8fSVakul Garg 				copied = 0;
1952692d7b5dSVakul Garg 				decrypted = 0;
1953692d7b5dSVakul Garg 				goto end;
195494524d8fSVakul Garg 			}
195594524d8fSVakul Garg 		} else {
195694524d8fSVakul Garg 			reinit_completion(&ctx->async_wait.completion);
195794524d8fSVakul Garg 		}
19580cada332SVinay Kumar Yadav 
19590cada332SVinay Kumar Yadav 		/* There can be no concurrent accesses, since we have no
19600cada332SVinay Kumar Yadav 		 * pending decrypt operations
19610cada332SVinay Kumar Yadav 		 */
196294524d8fSVakul Garg 		WRITE_ONCE(ctx->async_notify, false);
1963692d7b5dSVakul Garg 
1964692d7b5dSVakul Garg 		/* Drain records from the rx_list & copy if required */
1965692d7b5dSVakul Garg 		if (is_peek || is_kvec)
19662b794c40SVakul Garg 			err = process_rx_list(ctx, msg, &control, &cmsg, copied,
1967692d7b5dSVakul Garg 					      decrypted, false, is_peek);
1968692d7b5dSVakul Garg 		else
19692b794c40SVakul Garg 			err = process_rx_list(ctx, msg, &control, &cmsg, 0,
1970692d7b5dSVakul Garg 					      decrypted, true, is_peek);
1971692d7b5dSVakul Garg 		if (err < 0) {
1972692d7b5dSVakul Garg 			tls_err_abort(sk, err);
1973692d7b5dSVakul Garg 			copied = 0;
1974692d7b5dSVakul Garg 			goto end;
197594524d8fSVakul Garg 		}
1976692d7b5dSVakul Garg 	}
1977692d7b5dSVakul Garg 
1978692d7b5dSVakul Garg 	copied += decrypted;
1979692d7b5dSVakul Garg 
1980692d7b5dSVakul Garg end:
1981c46234ebSDave Watson 	release_sock(sk);
1982ffef737fSGal Pressman 	sk_defer_free_flush(sk);
1983d3b18ad3SJohn Fastabend 	if (psock)
1984d3b18ad3SJohn Fastabend 		sk_psock_put(sk, psock);
1985c46234ebSDave Watson 	return copied ? : err;
1986c46234ebSDave Watson }
1987c46234ebSDave Watson 
1988c46234ebSDave Watson ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
1989c46234ebSDave Watson 			   struct pipe_inode_info *pipe,
1990c46234ebSDave Watson 			   size_t len, unsigned int flags)
1991c46234ebSDave Watson {
1992c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1993f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1994c46234ebSDave Watson 	struct strp_msg *rxm = NULL;
1995c46234ebSDave Watson 	struct sock *sk = sock->sk;
1996c3f6bb74SJakub Kicinski 	struct tls_msg *tlm;
1997c46234ebSDave Watson 	struct sk_buff *skb;
1998c46234ebSDave Watson 	ssize_t copied = 0;
1999e062fe99SJakub Kicinski 	bool from_queue;
2000c46234ebSDave Watson 	int err = 0;
2001c46234ebSDave Watson 	long timeo;
2002c46234ebSDave Watson 	int chunk;
2003c46234ebSDave Watson 
2004c46234ebSDave Watson 	lock_sock(sk);
2005c46234ebSDave Watson 
2006974271e5SJim Ma 	timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
2007c46234ebSDave Watson 
2008e062fe99SJakub Kicinski 	from_queue = !skb_queue_empty(&ctx->rx_list);
2009e062fe99SJakub Kicinski 	if (from_queue) {
2010e062fe99SJakub Kicinski 		skb = __skb_dequeue(&ctx->rx_list);
2011e062fe99SJakub Kicinski 	} else {
2012*4175eac3SJakub Kicinski 		struct tls_decrypt_arg darg = {};
2013*4175eac3SJakub Kicinski 
2014e062fe99SJakub Kicinski 		skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
2015e062fe99SJakub Kicinski 				    &err);
2016c46234ebSDave Watson 		if (!skb)
2017c46234ebSDave Watson 			goto splice_read_end;
2018c46234ebSDave Watson 
2019*4175eac3SJakub Kicinski 		err = decrypt_skb_update(sk, skb, NULL, &darg);
2020520493f6SJakub Kicinski 		if (err < 0) {
2021520493f6SJakub Kicinski 			tls_err_abort(sk, -EBADMSG);
2022520493f6SJakub Kicinski 			goto splice_read_end;
2023520493f6SJakub Kicinski 		}
2024e062fe99SJakub Kicinski 	}
2025fedf201eSDave Watson 
2026c3f6bb74SJakub Kicinski 	rxm = strp_msg(skb);
2027c3f6bb74SJakub Kicinski 	tlm = tls_msg(skb);
2028c3f6bb74SJakub Kicinski 
2029c46234ebSDave Watson 	/* splice does not support reading control messages */
2030c3f6bb74SJakub Kicinski 	if (tlm->control != TLS_RECORD_TYPE_DATA) {
20314a5cdc60SValentin Vidic 		err = -EINVAL;
2032c46234ebSDave Watson 		goto splice_read_end;
2033c46234ebSDave Watson 	}
2034c46234ebSDave Watson 
2035c46234ebSDave Watson 	chunk = min_t(unsigned int, rxm->full_len, len);
2036c46234ebSDave Watson 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2037c46234ebSDave Watson 	if (copied < 0)
2038c46234ebSDave Watson 		goto splice_read_end;
2039c46234ebSDave Watson 
2040e062fe99SJakub Kicinski 	if (!from_queue) {
2041e062fe99SJakub Kicinski 		ctx->recv_pkt = NULL;
2042e062fe99SJakub Kicinski 		__strp_unpause(&ctx->strp);
2043e062fe99SJakub Kicinski 	}
2044e062fe99SJakub Kicinski 	if (chunk < rxm->full_len) {
2045e062fe99SJakub Kicinski 		__skb_queue_head(&ctx->rx_list, skb);
2046e062fe99SJakub Kicinski 		rxm->offset += len;
2047e062fe99SJakub Kicinski 		rxm->full_len -= len;
2048e062fe99SJakub Kicinski 	} else {
2049e062fe99SJakub Kicinski 		consume_skb(skb);
2050e062fe99SJakub Kicinski 	}
2051c46234ebSDave Watson 
2052c46234ebSDave Watson splice_read_end:
2053c46234ebSDave Watson 	release_sock(sk);
2054db094aa8SGal Pressman 	sk_defer_free_flush(sk);
2055c46234ebSDave Watson 	return copied ? : err;
2056c46234ebSDave Watson }
2057c46234ebSDave Watson 
20587b50ecfcSCong Wang bool tls_sw_sock_is_readable(struct sock *sk)
2059c46234ebSDave Watson {
2060c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2061f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2062d3b18ad3SJohn Fastabend 	bool ingress_empty = true;
2063d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
2064c46234ebSDave Watson 
2065d3b18ad3SJohn Fastabend 	rcu_read_lock();
2066d3b18ad3SJohn Fastabend 	psock = sk_psock(sk);
2067d3b18ad3SJohn Fastabend 	if (psock)
2068d3b18ad3SJohn Fastabend 		ingress_empty = list_empty(&psock->ingress_msg);
2069d3b18ad3SJohn Fastabend 	rcu_read_unlock();
2070c46234ebSDave Watson 
207113aecb17SJakub Kicinski 	return !ingress_empty || ctx->recv_pkt ||
207213aecb17SJakub Kicinski 		!skb_queue_empty(&ctx->rx_list);
2073c46234ebSDave Watson }
2074c46234ebSDave Watson 
2075c46234ebSDave Watson static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2076c46234ebSDave Watson {
2077c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
20784509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
20793463e51dSKees Cook 	char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2080c46234ebSDave Watson 	struct strp_msg *rxm = strp_msg(skb);
2081c3f6bb74SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
2082c46234ebSDave Watson 	size_t cipher_overhead;
2083c46234ebSDave Watson 	size_t data_len = 0;
2084c46234ebSDave Watson 	int ret;
2085c46234ebSDave Watson 
2086c46234ebSDave Watson 	/* Verify that we have a full TLS header, or wait for more data */
20874509de14SVakul Garg 	if (rxm->offset + prot->prepend_size > skb->len)
2088c46234ebSDave Watson 		return 0;
2089c46234ebSDave Watson 
20903463e51dSKees Cook 	/* Sanity-check size of on-stack buffer. */
20914509de14SVakul Garg 	if (WARN_ON(prot->prepend_size > sizeof(header))) {
20923463e51dSKees Cook 		ret = -EINVAL;
20933463e51dSKees Cook 		goto read_failure;
20943463e51dSKees Cook 	}
20953463e51dSKees Cook 
2096c46234ebSDave Watson 	/* Linearize header to local buffer */
20974509de14SVakul Garg 	ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2098c46234ebSDave Watson 	if (ret < 0)
2099c46234ebSDave Watson 		goto read_failure;
2100c46234ebSDave Watson 
2101863533e3SJakub Kicinski 	tlm->decrypted = 0;
2102c3f6bb74SJakub Kicinski 	tlm->control = header[0];
2103c46234ebSDave Watson 
2104c46234ebSDave Watson 	data_len = ((header[4] & 0xFF) | (header[3] << 8));
2105c46234ebSDave Watson 
21064509de14SVakul Garg 	cipher_overhead = prot->tag_size;
2107a6acbe62SVadim Fedorenko 	if (prot->version != TLS_1_3_VERSION &&
2108a6acbe62SVadim Fedorenko 	    prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
21094509de14SVakul Garg 		cipher_overhead += prot->iv_size;
2110c46234ebSDave Watson 
2111130b392cSDave Watson 	if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
21124509de14SVakul Garg 	    prot->tail_size) {
2113c46234ebSDave Watson 		ret = -EMSGSIZE;
2114c46234ebSDave Watson 		goto read_failure;
2115c46234ebSDave Watson 	}
2116c46234ebSDave Watson 	if (data_len < cipher_overhead) {
2117c46234ebSDave Watson 		ret = -EBADMSG;
2118c46234ebSDave Watson 		goto read_failure;
2119c46234ebSDave Watson 	}
2120c46234ebSDave Watson 
2121130b392cSDave Watson 	/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2122130b392cSDave Watson 	if (header[1] != TLS_1_2_VERSION_MINOR ||
2123130b392cSDave Watson 	    header[2] != TLS_1_2_VERSION_MAJOR) {
2124c46234ebSDave Watson 		ret = -EINVAL;
2125c46234ebSDave Watson 		goto read_failure;
2126c46234ebSDave Watson 	}
2127be2fbc15SJakub Kicinski 
2128f953d33bSJakub Kicinski 	tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2129fe58a5a0SJakub Kicinski 				     TCP_SKB_CB(skb)->seq + rxm->offset);
2130c46234ebSDave Watson 	return data_len + TLS_HEADER_SIZE;
2131c46234ebSDave Watson 
2132c46234ebSDave Watson read_failure:
2133c46234ebSDave Watson 	tls_err_abort(strp->sk, ret);
2134c46234ebSDave Watson 
2135c46234ebSDave Watson 	return ret;
2136c46234ebSDave Watson }
2137c46234ebSDave Watson 
2138c46234ebSDave Watson static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2139c46234ebSDave Watson {
2140c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2141f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2142c46234ebSDave Watson 
2143c46234ebSDave Watson 	ctx->recv_pkt = skb;
2144c46234ebSDave Watson 	strp_pause(strp);
2145c46234ebSDave Watson 
2146ad13acceSVakul Garg 	ctx->saved_data_ready(strp->sk);
2147c46234ebSDave Watson }
2148c46234ebSDave Watson 
2149c46234ebSDave Watson static void tls_data_ready(struct sock *sk)
2150c46234ebSDave Watson {
2151c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2152f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2153d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
2154c46234ebSDave Watson 
2155c46234ebSDave Watson 	strp_data_ready(&ctx->strp);
2156d3b18ad3SJohn Fastabend 
2157d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
215862b4011fSXiyu Yang 	if (psock) {
215962b4011fSXiyu Yang 		if (!list_empty(&psock->ingress_msg))
2160d3b18ad3SJohn Fastabend 			ctx->saved_data_ready(sk);
2161d3b18ad3SJohn Fastabend 		sk_psock_put(sk, psock);
2162d3b18ad3SJohn Fastabend 	}
2163c46234ebSDave Watson }
2164c46234ebSDave Watson 
2165f87e62d4SJohn Fastabend void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2166f87e62d4SJohn Fastabend {
2167f87e62d4SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2168f87e62d4SJohn Fastabend 
2169f87e62d4SJohn Fastabend 	set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2170f87e62d4SJohn Fastabend 	set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2171f87e62d4SJohn Fastabend 	cancel_delayed_work_sync(&ctx->tx_work.work);
2172f87e62d4SJohn Fastabend }
2173f87e62d4SJohn Fastabend 
2174313ab004SJohn Fastabend void tls_sw_release_resources_tx(struct sock *sk)
21753c4d7559SDave Watson {
21763c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2177f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2178a42055e8SVakul Garg 	struct tls_rec *rec, *tmp;
217938f7e1c0SRohit Maheshwari 	int pending;
2180a42055e8SVakul Garg 
2181a42055e8SVakul Garg 	/* Wait for any pending async encryptions to complete */
218238f7e1c0SRohit Maheshwari 	spin_lock_bh(&ctx->encrypt_compl_lock);
218338f7e1c0SRohit Maheshwari 	ctx->async_notify = true;
218438f7e1c0SRohit Maheshwari 	pending = atomic_read(&ctx->encrypt_pending);
218538f7e1c0SRohit Maheshwari 	spin_unlock_bh(&ctx->encrypt_compl_lock);
218638f7e1c0SRohit Maheshwari 
218738f7e1c0SRohit Maheshwari 	if (pending)
2188a42055e8SVakul Garg 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2189a42055e8SVakul Garg 
2190a42055e8SVakul Garg 	tls_tx_records(sk, -1);
2191a42055e8SVakul Garg 
21929932a29aSVakul Garg 	/* Free up un-sent records in tx_list. First, free
2193a42055e8SVakul Garg 	 * the partially sent record if any at head of tx_list.
2194a42055e8SVakul Garg 	 */
2195c5daa6ccSJakub Kicinski 	if (tls_ctx->partially_sent_record) {
2196c5daa6ccSJakub Kicinski 		tls_free_partial_record(sk, tls_ctx);
21979932a29aSVakul Garg 		rec = list_first_entry(&ctx->tx_list,
2198a42055e8SVakul Garg 				       struct tls_rec, list);
2199a42055e8SVakul Garg 		list_del(&rec->list);
2200d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
2201a42055e8SVakul Garg 		kfree(rec);
2202a42055e8SVakul Garg 	}
2203a42055e8SVakul Garg 
22049932a29aSVakul Garg 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2205a42055e8SVakul Garg 		list_del(&rec->list);
2206d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_encrypted);
2207d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
2208a42055e8SVakul Garg 		kfree(rec);
2209a42055e8SVakul Garg 	}
22103c4d7559SDave Watson 
22113c4d7559SDave Watson 	crypto_free_aead(ctx->aead_send);
2212c774973eSVakul Garg 	tls_free_open_rec(sk);
2213313ab004SJohn Fastabend }
2214313ab004SJohn Fastabend 
2215313ab004SJohn Fastabend void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2216313ab004SJohn Fastabend {
2217313ab004SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2218f66de3eeSBoris Pismenny 
2219f66de3eeSBoris Pismenny 	kfree(ctx);
2220f66de3eeSBoris Pismenny }
2221f66de3eeSBoris Pismenny 
222239f56e1aSBoris Pismenny void tls_sw_release_resources_rx(struct sock *sk)
2223f66de3eeSBoris Pismenny {
2224f66de3eeSBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2225f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2226f66de3eeSBoris Pismenny 
222712c76861SJakub Kicinski 	kfree(tls_ctx->rx.rec_seq);
222812c76861SJakub Kicinski 	kfree(tls_ctx->rx.iv);
222912c76861SJakub Kicinski 
2230c46234ebSDave Watson 	if (ctx->aead_recv) {
2231c46234ebSDave Watson 		kfree_skb(ctx->recv_pkt);
2232c46234ebSDave Watson 		ctx->recv_pkt = NULL;
2233692d7b5dSVakul Garg 		skb_queue_purge(&ctx->rx_list);
2234c46234ebSDave Watson 		crypto_free_aead(ctx->aead_recv);
2235c46234ebSDave Watson 		strp_stop(&ctx->strp);
2236313ab004SJohn Fastabend 		/* If tls_sw_strparser_arm() was not called (cleanup paths)
2237313ab004SJohn Fastabend 		 * we still want to strp_stop(), but sk->sk_data_ready was
2238313ab004SJohn Fastabend 		 * never swapped.
2239313ab004SJohn Fastabend 		 */
2240313ab004SJohn Fastabend 		if (ctx->saved_data_ready) {
2241c46234ebSDave Watson 			write_lock_bh(&sk->sk_callback_lock);
2242c46234ebSDave Watson 			sk->sk_data_ready = ctx->saved_data_ready;
2243c46234ebSDave Watson 			write_unlock_bh(&sk->sk_callback_lock);
2244c46234ebSDave Watson 		}
224539f56e1aSBoris Pismenny 	}
2246313ab004SJohn Fastabend }
2247313ab004SJohn Fastabend 
2248313ab004SJohn Fastabend void tls_sw_strparser_done(struct tls_context *tls_ctx)
2249313ab004SJohn Fastabend {
2250313ab004SJohn Fastabend 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2251313ab004SJohn Fastabend 
2252313ab004SJohn Fastabend 	strp_done(&ctx->strp);
2253313ab004SJohn Fastabend }
2254313ab004SJohn Fastabend 
2255313ab004SJohn Fastabend void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2256313ab004SJohn Fastabend {
2257313ab004SJohn Fastabend 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2258313ab004SJohn Fastabend 
2259313ab004SJohn Fastabend 	kfree(ctx);
2260313ab004SJohn Fastabend }
226139f56e1aSBoris Pismenny 
226239f56e1aSBoris Pismenny void tls_sw_free_resources_rx(struct sock *sk)
226339f56e1aSBoris Pismenny {
226439f56e1aSBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
226539f56e1aSBoris Pismenny 
226639f56e1aSBoris Pismenny 	tls_sw_release_resources_rx(sk);
2267313ab004SJohn Fastabend 	tls_sw_free_ctx_rx(tls_ctx);
22683c4d7559SDave Watson }
22693c4d7559SDave Watson 
22709932a29aSVakul Garg /* The work handler to transmitt the encrypted records in tx_list */
2271a42055e8SVakul Garg static void tx_work_handler(struct work_struct *work)
2272a42055e8SVakul Garg {
2273a42055e8SVakul Garg 	struct delayed_work *delayed_work = to_delayed_work(work);
2274a42055e8SVakul Garg 	struct tx_work *tx_work = container_of(delayed_work,
2275a42055e8SVakul Garg 					       struct tx_work, work);
2276a42055e8SVakul Garg 	struct sock *sk = tx_work->sk;
2277a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2278f87e62d4SJohn Fastabend 	struct tls_sw_context_tx *ctx;
2279f87e62d4SJohn Fastabend 
2280f87e62d4SJohn Fastabend 	if (unlikely(!tls_ctx))
2281f87e62d4SJohn Fastabend 		return;
2282f87e62d4SJohn Fastabend 
2283f87e62d4SJohn Fastabend 	ctx = tls_sw_ctx_tx(tls_ctx);
2284f87e62d4SJohn Fastabend 	if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2285f87e62d4SJohn Fastabend 		return;
2286a42055e8SVakul Garg 
2287a42055e8SVakul Garg 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2288a42055e8SVakul Garg 		return;
228979ffe608SJakub Kicinski 	mutex_lock(&tls_ctx->tx_lock);
2290a42055e8SVakul Garg 	lock_sock(sk);
2291a42055e8SVakul Garg 	tls_tx_records(sk, -1);
2292a42055e8SVakul Garg 	release_sock(sk);
229379ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
2294a42055e8SVakul Garg }
2295a42055e8SVakul Garg 
22967463d3a2SBoris Pismenny void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
22977463d3a2SBoris Pismenny {
22987463d3a2SBoris Pismenny 	struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
22997463d3a2SBoris Pismenny 
23007463d3a2SBoris Pismenny 	/* Schedule the transmission if tx list is ready */
230102b1fa07SJakub Kicinski 	if (is_tx_ready(tx_ctx) &&
230202b1fa07SJakub Kicinski 	    !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
23037463d3a2SBoris Pismenny 		schedule_delayed_work(&tx_ctx->tx_work.work, 0);
23047463d3a2SBoris Pismenny }
23057463d3a2SBoris Pismenny 
2306318892acSJakub Kicinski void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2307318892acSJakub Kicinski {
2308318892acSJakub Kicinski 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2309318892acSJakub Kicinski 
2310318892acSJakub Kicinski 	write_lock_bh(&sk->sk_callback_lock);
2311318892acSJakub Kicinski 	rx_ctx->saved_data_ready = sk->sk_data_ready;
2312318892acSJakub Kicinski 	sk->sk_data_ready = tls_data_ready;
2313318892acSJakub Kicinski 	write_unlock_bh(&sk->sk_callback_lock);
2314318892acSJakub Kicinski 
2315318892acSJakub Kicinski 	strp_check_rcv(&rx_ctx->strp);
2316318892acSJakub Kicinski }
2317318892acSJakub Kicinski 
2318c46234ebSDave Watson int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
23193c4d7559SDave Watson {
23204509de14SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
23214509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
23223c4d7559SDave Watson 	struct tls_crypto_info *crypto_info;
2323f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *sw_ctx_tx = NULL;
2324f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *sw_ctx_rx = NULL;
2325c46234ebSDave Watson 	struct cipher_context *cctx;
2326c46234ebSDave Watson 	struct crypto_aead **aead;
2327c46234ebSDave Watson 	struct strp_callbacks cb;
2328f295b3aeSVakul Garg 	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2329692d7b5dSVakul Garg 	struct crypto_tfm *tfm;
2330f295b3aeSVakul Garg 	char *iv, *rec_seq, *key, *salt, *cipher_name;
2331fb99bce7SDave Watson 	size_t keysize;
23323c4d7559SDave Watson 	int rc = 0;
23333c4d7559SDave Watson 
23343c4d7559SDave Watson 	if (!ctx) {
23353c4d7559SDave Watson 		rc = -EINVAL;
23363c4d7559SDave Watson 		goto out;
23373c4d7559SDave Watson 	}
23383c4d7559SDave Watson 
2339f66de3eeSBoris Pismenny 	if (tx) {
2340b190a587SBoris Pismenny 		if (!ctx->priv_ctx_tx) {
2341f66de3eeSBoris Pismenny 			sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2342f66de3eeSBoris Pismenny 			if (!sw_ctx_tx) {
23433c4d7559SDave Watson 				rc = -ENOMEM;
23443c4d7559SDave Watson 				goto out;
23453c4d7559SDave Watson 			}
2346f66de3eeSBoris Pismenny 			ctx->priv_ctx_tx = sw_ctx_tx;
2347c46234ebSDave Watson 		} else {
2348b190a587SBoris Pismenny 			sw_ctx_tx =
2349b190a587SBoris Pismenny 				(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2350b190a587SBoris Pismenny 		}
2351b190a587SBoris Pismenny 	} else {
2352b190a587SBoris Pismenny 		if (!ctx->priv_ctx_rx) {
2353f66de3eeSBoris Pismenny 			sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2354f66de3eeSBoris Pismenny 			if (!sw_ctx_rx) {
2355f66de3eeSBoris Pismenny 				rc = -ENOMEM;
2356f66de3eeSBoris Pismenny 				goto out;
2357c46234ebSDave Watson 			}
2358f66de3eeSBoris Pismenny 			ctx->priv_ctx_rx = sw_ctx_rx;
2359b190a587SBoris Pismenny 		} else {
2360b190a587SBoris Pismenny 			sw_ctx_rx =
2361b190a587SBoris Pismenny 				(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2362b190a587SBoris Pismenny 		}
2363f66de3eeSBoris Pismenny 	}
23643c4d7559SDave Watson 
2365c46234ebSDave Watson 	if (tx) {
2366b190a587SBoris Pismenny 		crypto_init_wait(&sw_ctx_tx->async_wait);
23670cada332SVinay Kumar Yadav 		spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
236886029d10SSabrina Dubroca 		crypto_info = &ctx->crypto_send.info;
2369c46234ebSDave Watson 		cctx = &ctx->tx;
2370f66de3eeSBoris Pismenny 		aead = &sw_ctx_tx->aead_send;
23719932a29aSVakul Garg 		INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2372a42055e8SVakul Garg 		INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2373a42055e8SVakul Garg 		sw_ctx_tx->tx_work.sk = sk;
2374c46234ebSDave Watson 	} else {
2375b190a587SBoris Pismenny 		crypto_init_wait(&sw_ctx_rx->async_wait);
23760cada332SVinay Kumar Yadav 		spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
237786029d10SSabrina Dubroca 		crypto_info = &ctx->crypto_recv.info;
2378c46234ebSDave Watson 		cctx = &ctx->rx;
2379692d7b5dSVakul Garg 		skb_queue_head_init(&sw_ctx_rx->rx_list);
2380f66de3eeSBoris Pismenny 		aead = &sw_ctx_rx->aead_recv;
2381c46234ebSDave Watson 	}
2382c46234ebSDave Watson 
23833c4d7559SDave Watson 	switch (crypto_info->cipher_type) {
23843c4d7559SDave Watson 	case TLS_CIPHER_AES_GCM_128: {
2385dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2386dc2724a6STianjia Zhang 
2387dc2724a6STianjia Zhang 		gcm_128_info = (void *)crypto_info;
23883c4d7559SDave Watson 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
23893c4d7559SDave Watson 		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
23903c4d7559SDave Watson 		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2391dc2724a6STianjia Zhang 		iv = gcm_128_info->iv;
23923c4d7559SDave Watson 		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2393dc2724a6STianjia Zhang 		rec_seq = gcm_128_info->rec_seq;
2394fb99bce7SDave Watson 		keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2395fb99bce7SDave Watson 		key = gcm_128_info->key;
2396fb99bce7SDave Watson 		salt = gcm_128_info->salt;
2397f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2398f295b3aeSVakul Garg 		cipher_name = "gcm(aes)";
2399fb99bce7SDave Watson 		break;
2400fb99bce7SDave Watson 	}
2401fb99bce7SDave Watson 	case TLS_CIPHER_AES_GCM_256: {
2402dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2403dc2724a6STianjia Zhang 
2404dc2724a6STianjia Zhang 		gcm_256_info = (void *)crypto_info;
2405fb99bce7SDave Watson 		nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2406fb99bce7SDave Watson 		tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2407fb99bce7SDave Watson 		iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2408dc2724a6STianjia Zhang 		iv = gcm_256_info->iv;
2409fb99bce7SDave Watson 		rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2410dc2724a6STianjia Zhang 		rec_seq = gcm_256_info->rec_seq;
2411fb99bce7SDave Watson 		keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2412fb99bce7SDave Watson 		key = gcm_256_info->key;
2413fb99bce7SDave Watson 		salt = gcm_256_info->salt;
2414f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2415f295b3aeSVakul Garg 		cipher_name = "gcm(aes)";
2416f295b3aeSVakul Garg 		break;
2417f295b3aeSVakul Garg 	}
2418f295b3aeSVakul Garg 	case TLS_CIPHER_AES_CCM_128: {
2419dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2420dc2724a6STianjia Zhang 
2421dc2724a6STianjia Zhang 		ccm_128_info = (void *)crypto_info;
2422f295b3aeSVakul Garg 		nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2423f295b3aeSVakul Garg 		tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2424f295b3aeSVakul Garg 		iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2425dc2724a6STianjia Zhang 		iv = ccm_128_info->iv;
2426f295b3aeSVakul Garg 		rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2427dc2724a6STianjia Zhang 		rec_seq = ccm_128_info->rec_seq;
2428f295b3aeSVakul Garg 		keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2429f295b3aeSVakul Garg 		key = ccm_128_info->key;
2430f295b3aeSVakul Garg 		salt = ccm_128_info->salt;
2431f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2432f295b3aeSVakul Garg 		cipher_name = "ccm(aes)";
24333c4d7559SDave Watson 		break;
24343c4d7559SDave Watson 	}
243574ea6106SVadim Fedorenko 	case TLS_CIPHER_CHACHA20_POLY1305: {
2436dc2724a6STianjia Zhang 		struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2437dc2724a6STianjia Zhang 
243874ea6106SVadim Fedorenko 		chacha20_poly1305_info = (void *)crypto_info;
243974ea6106SVadim Fedorenko 		nonce_size = 0;
244074ea6106SVadim Fedorenko 		tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
244174ea6106SVadim Fedorenko 		iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
244274ea6106SVadim Fedorenko 		iv = chacha20_poly1305_info->iv;
244374ea6106SVadim Fedorenko 		rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
244474ea6106SVadim Fedorenko 		rec_seq = chacha20_poly1305_info->rec_seq;
244574ea6106SVadim Fedorenko 		keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
244674ea6106SVadim Fedorenko 		key = chacha20_poly1305_info->key;
244774ea6106SVadim Fedorenko 		salt = chacha20_poly1305_info->salt;
244874ea6106SVadim Fedorenko 		salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
244974ea6106SVadim Fedorenko 		cipher_name = "rfc7539(chacha20,poly1305)";
245074ea6106SVadim Fedorenko 		break;
245174ea6106SVadim Fedorenko 	}
2452227b9644STianjia Zhang 	case TLS_CIPHER_SM4_GCM: {
2453227b9644STianjia Zhang 		struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2454227b9644STianjia Zhang 
2455227b9644STianjia Zhang 		sm4_gcm_info = (void *)crypto_info;
2456227b9644STianjia Zhang 		nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2457227b9644STianjia Zhang 		tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2458227b9644STianjia Zhang 		iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2459227b9644STianjia Zhang 		iv = sm4_gcm_info->iv;
2460227b9644STianjia Zhang 		rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2461227b9644STianjia Zhang 		rec_seq = sm4_gcm_info->rec_seq;
2462227b9644STianjia Zhang 		keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2463227b9644STianjia Zhang 		key = sm4_gcm_info->key;
2464227b9644STianjia Zhang 		salt = sm4_gcm_info->salt;
2465227b9644STianjia Zhang 		salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2466227b9644STianjia Zhang 		cipher_name = "gcm(sm4)";
2467227b9644STianjia Zhang 		break;
2468227b9644STianjia Zhang 	}
2469227b9644STianjia Zhang 	case TLS_CIPHER_SM4_CCM: {
2470227b9644STianjia Zhang 		struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2471227b9644STianjia Zhang 
2472227b9644STianjia Zhang 		sm4_ccm_info = (void *)crypto_info;
2473227b9644STianjia Zhang 		nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2474227b9644STianjia Zhang 		tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2475227b9644STianjia Zhang 		iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2476227b9644STianjia Zhang 		iv = sm4_ccm_info->iv;
2477227b9644STianjia Zhang 		rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2478227b9644STianjia Zhang 		rec_seq = sm4_ccm_info->rec_seq;
2479227b9644STianjia Zhang 		keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2480227b9644STianjia Zhang 		key = sm4_ccm_info->key;
2481227b9644STianjia Zhang 		salt = sm4_ccm_info->salt;
2482227b9644STianjia Zhang 		salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2483227b9644STianjia Zhang 		cipher_name = "ccm(sm4)";
2484227b9644STianjia Zhang 		break;
2485227b9644STianjia Zhang 	}
24863c4d7559SDave Watson 	default:
24873c4d7559SDave Watson 		rc = -EINVAL;
2488cf6d43efSSabrina Dubroca 		goto free_priv;
24893c4d7559SDave Watson 	}
24903c4d7559SDave Watson 
249189fec474SJakub Kicinski 	/* Sanity-check the sizes for stack allocations. */
249289fec474SJakub Kicinski 	if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2493a8340cc0SJakub Kicinski 	    rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE) {
2494b16520f7SKees Cook 		rc = -EINVAL;
2495b16520f7SKees Cook 		goto free_priv;
2496b16520f7SKees Cook 	}
2497b16520f7SKees Cook 
2498130b392cSDave Watson 	if (crypto_info->version == TLS_1_3_VERSION) {
2499130b392cSDave Watson 		nonce_size = 0;
25004509de14SVakul Garg 		prot->aad_size = TLS_HEADER_SIZE;
25014509de14SVakul Garg 		prot->tail_size = 1;
2502130b392cSDave Watson 	} else {
25034509de14SVakul Garg 		prot->aad_size = TLS_AAD_SPACE_SIZE;
25044509de14SVakul Garg 		prot->tail_size = 0;
2505130b392cSDave Watson 	}
2506130b392cSDave Watson 
25074509de14SVakul Garg 	prot->version = crypto_info->version;
25084509de14SVakul Garg 	prot->cipher_type = crypto_info->cipher_type;
25094509de14SVakul Garg 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
25104509de14SVakul Garg 	prot->tag_size = tag_size;
25114509de14SVakul Garg 	prot->overhead_size = prot->prepend_size +
25124509de14SVakul Garg 			      prot->tag_size + prot->tail_size;
25134509de14SVakul Garg 	prot->iv_size = iv_size;
2514f295b3aeSVakul Garg 	prot->salt_size = salt_size;
2515f295b3aeSVakul Garg 	cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2516c46234ebSDave Watson 	if (!cctx->iv) {
25173c4d7559SDave Watson 		rc = -ENOMEM;
2518cf6d43efSSabrina Dubroca 		goto free_priv;
25193c4d7559SDave Watson 	}
2520fb99bce7SDave Watson 	/* Note: 128 & 256 bit salt are the same size */
25214509de14SVakul Garg 	prot->rec_seq_size = rec_seq_size;
2522f295b3aeSVakul Garg 	memcpy(cctx->iv, salt, salt_size);
2523f295b3aeSVakul Garg 	memcpy(cctx->iv + salt_size, iv, iv_size);
2524969d5090Szhong jiang 	cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2525c46234ebSDave Watson 	if (!cctx->rec_seq) {
25263c4d7559SDave Watson 		rc = -ENOMEM;
25273c4d7559SDave Watson 		goto free_iv;
25283c4d7559SDave Watson 	}
25293c4d7559SDave Watson 
2530c46234ebSDave Watson 	if (!*aead) {
2531f295b3aeSVakul Garg 		*aead = crypto_alloc_aead(cipher_name, 0, 0);
2532c46234ebSDave Watson 		if (IS_ERR(*aead)) {
2533c46234ebSDave Watson 			rc = PTR_ERR(*aead);
2534c46234ebSDave Watson 			*aead = NULL;
25353c4d7559SDave Watson 			goto free_rec_seq;
25363c4d7559SDave Watson 		}
25373c4d7559SDave Watson 	}
25383c4d7559SDave Watson 
25393c4d7559SDave Watson 	ctx->push_pending_record = tls_sw_push_pending_record;
25403c4d7559SDave Watson 
2541fb99bce7SDave Watson 	rc = crypto_aead_setkey(*aead, key, keysize);
2542fb99bce7SDave Watson 
25433c4d7559SDave Watson 	if (rc)
25443c4d7559SDave Watson 		goto free_aead;
25453c4d7559SDave Watson 
25464509de14SVakul Garg 	rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2547c46234ebSDave Watson 	if (rc)
2548c46234ebSDave Watson 		goto free_aead;
2549c46234ebSDave Watson 
2550f66de3eeSBoris Pismenny 	if (sw_ctx_rx) {
2551692d7b5dSVakul Garg 		tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
25528497ded2SVakul Garg 
25538497ded2SVakul Garg 		if (crypto_info->version == TLS_1_3_VERSION)
25545c5458ecSJakub Kicinski 			sw_ctx_rx->async_capable = 0;
25558497ded2SVakul Garg 		else
2556692d7b5dSVakul Garg 			sw_ctx_rx->async_capable =
25575c5458ecSJakub Kicinski 				!!(tfm->__crt_alg->cra_flags &
25585c5458ecSJakub Kicinski 				   CRYPTO_ALG_ASYNC);
2559692d7b5dSVakul Garg 
2560c46234ebSDave Watson 		/* Set up strparser */
2561c46234ebSDave Watson 		memset(&cb, 0, sizeof(cb));
2562c46234ebSDave Watson 		cb.rcv_msg = tls_queue;
2563c46234ebSDave Watson 		cb.parse_msg = tls_read_size;
2564c46234ebSDave Watson 
2565f66de3eeSBoris Pismenny 		strp_init(&sw_ctx_rx->strp, sk, &cb);
2566c46234ebSDave Watson 	}
2567c46234ebSDave Watson 
2568c46234ebSDave Watson 	goto out;
25693c4d7559SDave Watson 
25703c4d7559SDave Watson free_aead:
2571c46234ebSDave Watson 	crypto_free_aead(*aead);
2572c46234ebSDave Watson 	*aead = NULL;
25733c4d7559SDave Watson free_rec_seq:
2574c46234ebSDave Watson 	kfree(cctx->rec_seq);
2575c46234ebSDave Watson 	cctx->rec_seq = NULL;
25763c4d7559SDave Watson free_iv:
2577f66de3eeSBoris Pismenny 	kfree(cctx->iv);
2578f66de3eeSBoris Pismenny 	cctx->iv = NULL;
2579cf6d43efSSabrina Dubroca free_priv:
2580f66de3eeSBoris Pismenny 	if (tx) {
2581f66de3eeSBoris Pismenny 		kfree(ctx->priv_ctx_tx);
2582f66de3eeSBoris Pismenny 		ctx->priv_ctx_tx = NULL;
2583f66de3eeSBoris Pismenny 	} else {
2584f66de3eeSBoris Pismenny 		kfree(ctx->priv_ctx_rx);
2585f66de3eeSBoris Pismenny 		ctx->priv_ctx_rx = NULL;
2586f66de3eeSBoris Pismenny 	}
25873c4d7559SDave Watson out:
25883c4d7559SDave Watson 	return rc;
25893c4d7559SDave Watson }
2590