xref: /openbmc/linux/net/tls/tls_sw.c (revision a8340cc0)
13c4d7559SDave Watson /*
23c4d7559SDave Watson  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
33c4d7559SDave Watson  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
43c4d7559SDave Watson  * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
53c4d7559SDave Watson  * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
63c4d7559SDave Watson  * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7d3b18ad3SJohn Fastabend  * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
83c4d7559SDave Watson  *
93c4d7559SDave Watson  * This software is available to you under a choice of one of two
103c4d7559SDave Watson  * licenses.  You may choose to be licensed under the terms of the GNU
113c4d7559SDave Watson  * General Public License (GPL) Version 2, available from the file
123c4d7559SDave Watson  * COPYING in the main directory of this source tree, or the
133c4d7559SDave Watson  * OpenIB.org BSD license below:
143c4d7559SDave Watson  *
153c4d7559SDave Watson  *     Redistribution and use in source and binary forms, with or
163c4d7559SDave Watson  *     without modification, are permitted provided that the following
173c4d7559SDave Watson  *     conditions are met:
183c4d7559SDave Watson  *
193c4d7559SDave Watson  *      - Redistributions of source code must retain the above
203c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
213c4d7559SDave Watson  *        disclaimer.
223c4d7559SDave Watson  *
233c4d7559SDave Watson  *      - Redistributions in binary form must reproduce the above
243c4d7559SDave Watson  *        copyright notice, this list of conditions and the following
253c4d7559SDave Watson  *        disclaimer in the documentation and/or other materials
263c4d7559SDave Watson  *        provided with the distribution.
273c4d7559SDave Watson  *
283c4d7559SDave Watson  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
293c4d7559SDave Watson  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
303c4d7559SDave Watson  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
313c4d7559SDave Watson  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
323c4d7559SDave Watson  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
333c4d7559SDave Watson  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
343c4d7559SDave Watson  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
353c4d7559SDave Watson  * SOFTWARE.
363c4d7559SDave Watson  */
373c4d7559SDave Watson 
38da353facSDaniel Jordan #include <linux/bug.h>
39c46234ebSDave Watson #include <linux/sched/signal.h>
403c4d7559SDave Watson #include <linux/module.h>
41974271e5SJim Ma #include <linux/splice.h>
423c4d7559SDave Watson #include <crypto/aead.h>
433c4d7559SDave Watson 
44c46234ebSDave Watson #include <net/strparser.h>
453c4d7559SDave Watson #include <net/tls.h>
463c4d7559SDave Watson 
47da353facSDaniel Jordan noinline void tls_err_abort(struct sock *sk, int err)
48da353facSDaniel Jordan {
49da353facSDaniel Jordan 	WARN_ON_ONCE(err >= 0);
50da353facSDaniel Jordan 	/* sk->sk_err should contain a positive error code. */
51da353facSDaniel Jordan 	sk->sk_err = -err;
52da353facSDaniel Jordan 	sk_error_report(sk);
53da353facSDaniel Jordan }
54da353facSDaniel Jordan 
550927f71dSDoron Roberts-Kedes static int __skb_nsg(struct sk_buff *skb, int offset, int len,
560927f71dSDoron Roberts-Kedes                      unsigned int recursion_level)
570927f71dSDoron Roberts-Kedes {
580927f71dSDoron Roberts-Kedes         int start = skb_headlen(skb);
590927f71dSDoron Roberts-Kedes         int i, chunk = start - offset;
600927f71dSDoron Roberts-Kedes         struct sk_buff *frag_iter;
610927f71dSDoron Roberts-Kedes         int elt = 0;
620927f71dSDoron Roberts-Kedes 
630927f71dSDoron Roberts-Kedes         if (unlikely(recursion_level >= 24))
640927f71dSDoron Roberts-Kedes                 return -EMSGSIZE;
650927f71dSDoron Roberts-Kedes 
660927f71dSDoron Roberts-Kedes         if (chunk > 0) {
670927f71dSDoron Roberts-Kedes                 if (chunk > len)
680927f71dSDoron Roberts-Kedes                         chunk = len;
690927f71dSDoron Roberts-Kedes                 elt++;
700927f71dSDoron Roberts-Kedes                 len -= chunk;
710927f71dSDoron Roberts-Kedes                 if (len == 0)
720927f71dSDoron Roberts-Kedes                         return elt;
730927f71dSDoron Roberts-Kedes                 offset += chunk;
740927f71dSDoron Roberts-Kedes         }
750927f71dSDoron Roberts-Kedes 
760927f71dSDoron Roberts-Kedes         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
770927f71dSDoron Roberts-Kedes                 int end;
780927f71dSDoron Roberts-Kedes 
790927f71dSDoron Roberts-Kedes                 WARN_ON(start > offset + len);
800927f71dSDoron Roberts-Kedes 
810927f71dSDoron Roberts-Kedes                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
820927f71dSDoron Roberts-Kedes                 chunk = end - offset;
830927f71dSDoron Roberts-Kedes                 if (chunk > 0) {
840927f71dSDoron Roberts-Kedes                         if (chunk > len)
850927f71dSDoron Roberts-Kedes                                 chunk = len;
860927f71dSDoron Roberts-Kedes                         elt++;
870927f71dSDoron Roberts-Kedes                         len -= chunk;
880927f71dSDoron Roberts-Kedes                         if (len == 0)
890927f71dSDoron Roberts-Kedes                                 return elt;
900927f71dSDoron Roberts-Kedes                         offset += chunk;
910927f71dSDoron Roberts-Kedes                 }
920927f71dSDoron Roberts-Kedes                 start = end;
930927f71dSDoron Roberts-Kedes         }
940927f71dSDoron Roberts-Kedes 
950927f71dSDoron Roberts-Kedes         if (unlikely(skb_has_frag_list(skb))) {
960927f71dSDoron Roberts-Kedes                 skb_walk_frags(skb, frag_iter) {
970927f71dSDoron Roberts-Kedes                         int end, ret;
980927f71dSDoron Roberts-Kedes 
990927f71dSDoron Roberts-Kedes                         WARN_ON(start > offset + len);
1000927f71dSDoron Roberts-Kedes 
1010927f71dSDoron Roberts-Kedes                         end = start + frag_iter->len;
1020927f71dSDoron Roberts-Kedes                         chunk = end - offset;
1030927f71dSDoron Roberts-Kedes                         if (chunk > 0) {
1040927f71dSDoron Roberts-Kedes                                 if (chunk > len)
1050927f71dSDoron Roberts-Kedes                                         chunk = len;
1060927f71dSDoron Roberts-Kedes                                 ret = __skb_nsg(frag_iter, offset - start, chunk,
1070927f71dSDoron Roberts-Kedes                                                 recursion_level + 1);
1080927f71dSDoron Roberts-Kedes                                 if (unlikely(ret < 0))
1090927f71dSDoron Roberts-Kedes                                         return ret;
1100927f71dSDoron Roberts-Kedes                                 elt += ret;
1110927f71dSDoron Roberts-Kedes                                 len -= chunk;
1120927f71dSDoron Roberts-Kedes                                 if (len == 0)
1130927f71dSDoron Roberts-Kedes                                         return elt;
1140927f71dSDoron Roberts-Kedes                                 offset += chunk;
1150927f71dSDoron Roberts-Kedes                         }
1160927f71dSDoron Roberts-Kedes                         start = end;
1170927f71dSDoron Roberts-Kedes                 }
1180927f71dSDoron Roberts-Kedes         }
1190927f71dSDoron Roberts-Kedes         BUG_ON(len);
1200927f71dSDoron Roberts-Kedes         return elt;
1210927f71dSDoron Roberts-Kedes }
1220927f71dSDoron Roberts-Kedes 
1230927f71dSDoron Roberts-Kedes /* Return the number of scatterlist elements required to completely map the
1240927f71dSDoron Roberts-Kedes  * skb, or -EMSGSIZE if the recursion depth is exceeded.
1250927f71dSDoron Roberts-Kedes  */
1260927f71dSDoron Roberts-Kedes static int skb_nsg(struct sk_buff *skb, int offset, int len)
1270927f71dSDoron Roberts-Kedes {
1280927f71dSDoron Roberts-Kedes         return __skb_nsg(skb, offset, len, 0);
1290927f71dSDoron Roberts-Kedes }
1300927f71dSDoron Roberts-Kedes 
131c3f6bb74SJakub Kicinski static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb)
132130b392cSDave Watson {
133130b392cSDave Watson 	struct strp_msg *rxm = strp_msg(skb);
134c3f6bb74SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
135130b392cSDave Watson 	int sub = 0;
136130b392cSDave Watson 
137130b392cSDave Watson 	/* Determine zero-padding length */
138b53f4976SJakub Kicinski 	if (prot->version == TLS_1_3_VERSION) {
139*a8340cc0SJakub Kicinski 		int back = TLS_TAG_SIZE + 1;
140130b392cSDave Watson 		char content_type = 0;
141130b392cSDave Watson 		int err;
142130b392cSDave Watson 
143130b392cSDave Watson 		while (content_type == 0) {
144b53f4976SJakub Kicinski 			if (back > rxm->full_len - prot->prepend_size)
145130b392cSDave Watson 				return -EBADMSG;
146130b392cSDave Watson 			err = skb_copy_bits(skb,
147130b392cSDave Watson 					    rxm->offset + rxm->full_len - back,
148130b392cSDave Watson 					    &content_type, 1);
149b53f4976SJakub Kicinski 			if (err)
150b53f4976SJakub Kicinski 				return err;
151130b392cSDave Watson 			if (content_type)
152130b392cSDave Watson 				break;
153130b392cSDave Watson 			sub++;
154130b392cSDave Watson 			back++;
155130b392cSDave Watson 		}
156c3f6bb74SJakub Kicinski 		tlm->control = content_type;
157130b392cSDave Watson 	}
158130b392cSDave Watson 	return sub;
159130b392cSDave Watson }
160130b392cSDave Watson 
16194524d8fSVakul Garg static void tls_decrypt_done(struct crypto_async_request *req, int err)
16294524d8fSVakul Garg {
16394524d8fSVakul Garg 	struct aead_request *aead_req = (struct aead_request *)req;
16494524d8fSVakul Garg 	struct scatterlist *sgout = aead_req->dst;
165692d7b5dSVakul Garg 	struct scatterlist *sgin = aead_req->src;
1667a3dd8c8SJohn Fastabend 	struct tls_sw_context_rx *ctx;
1677a3dd8c8SJohn Fastabend 	struct tls_context *tls_ctx;
1684509de14SVakul Garg 	struct tls_prot_info *prot;
16994524d8fSVakul Garg 	struct scatterlist *sg;
1707a3dd8c8SJohn Fastabend 	struct sk_buff *skb;
17194524d8fSVakul Garg 	unsigned int pages;
1727a3dd8c8SJohn Fastabend 	int pending;
1737a3dd8c8SJohn Fastabend 
1747a3dd8c8SJohn Fastabend 	skb = (struct sk_buff *)req->data;
1757a3dd8c8SJohn Fastabend 	tls_ctx = tls_get_ctx(skb->sk);
1767a3dd8c8SJohn Fastabend 	ctx = tls_sw_ctx_rx(tls_ctx);
1774509de14SVakul Garg 	prot = &tls_ctx->prot_info;
17894524d8fSVakul Garg 
17994524d8fSVakul Garg 	/* Propagate if there was an err */
18094524d8fSVakul Garg 	if (err) {
1815c5ec668SJakub Kicinski 		if (err == -EBADMSG)
1825c5ec668SJakub Kicinski 			TLS_INC_STATS(sock_net(skb->sk),
1835c5ec668SJakub Kicinski 				      LINUX_MIB_TLSDECRYPTERROR);
18494524d8fSVakul Garg 		ctx->async_wait.err = err;
1857a3dd8c8SJohn Fastabend 		tls_err_abort(skb->sk, err);
186692d7b5dSVakul Garg 	} else {
187692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
188b53f4976SJakub Kicinski 		int pad;
189b53f4976SJakub Kicinski 
190c3f6bb74SJakub Kicinski 		pad = padding_length(prot, skb);
191b53f4976SJakub Kicinski 		if (pad < 0) {
192b53f4976SJakub Kicinski 			ctx->async_wait.err = pad;
193b53f4976SJakub Kicinski 			tls_err_abort(skb->sk, pad);
194b53f4976SJakub Kicinski 		} else {
195b53f4976SJakub Kicinski 			rxm->full_len -= pad;
1964509de14SVakul Garg 			rxm->offset += prot->prepend_size;
1974509de14SVakul Garg 			rxm->full_len -= prot->overhead_size;
19894524d8fSVakul Garg 		}
199b53f4976SJakub Kicinski 	}
20094524d8fSVakul Garg 
2017a3dd8c8SJohn Fastabend 	/* After using skb->sk to propagate sk through crypto async callback
2027a3dd8c8SJohn Fastabend 	 * we need to NULL it again.
2037a3dd8c8SJohn Fastabend 	 */
2047a3dd8c8SJohn Fastabend 	skb->sk = NULL;
2057a3dd8c8SJohn Fastabend 
20694524d8fSVakul Garg 
207692d7b5dSVakul Garg 	/* Free the destination pages if skb was not decrypted inplace */
208692d7b5dSVakul Garg 	if (sgout != sgin) {
20994524d8fSVakul Garg 		/* Skip the first S/G entry as it points to AAD */
21094524d8fSVakul Garg 		for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
21194524d8fSVakul Garg 			if (!sg)
21294524d8fSVakul Garg 				break;
21394524d8fSVakul Garg 			put_page(sg_page(sg));
21494524d8fSVakul Garg 		}
215692d7b5dSVakul Garg 	}
21694524d8fSVakul Garg 
21794524d8fSVakul Garg 	kfree(aead_req);
21894524d8fSVakul Garg 
2190cada332SVinay Kumar Yadav 	spin_lock_bh(&ctx->decrypt_compl_lock);
220692d7b5dSVakul Garg 	pending = atomic_dec_return(&ctx->decrypt_pending);
221692d7b5dSVakul Garg 
2220cada332SVinay Kumar Yadav 	if (!pending && ctx->async_notify)
22394524d8fSVakul Garg 		complete(&ctx->async_wait.completion);
2240cada332SVinay Kumar Yadav 	spin_unlock_bh(&ctx->decrypt_compl_lock);
22594524d8fSVakul Garg }
22694524d8fSVakul Garg 
227c46234ebSDave Watson static int tls_do_decryption(struct sock *sk,
22894524d8fSVakul Garg 			     struct sk_buff *skb,
229c46234ebSDave Watson 			     struct scatterlist *sgin,
230c46234ebSDave Watson 			     struct scatterlist *sgout,
231c46234ebSDave Watson 			     char *iv_recv,
232c46234ebSDave Watson 			     size_t data_len,
23394524d8fSVakul Garg 			     struct aead_request *aead_req,
23494524d8fSVakul Garg 			     bool async)
235c46234ebSDave Watson {
236c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2374509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
238f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
239c46234ebSDave Watson 	int ret;
240c46234ebSDave Watson 
2410b243d00SVakul Garg 	aead_request_set_tfm(aead_req, ctx->aead_recv);
2424509de14SVakul Garg 	aead_request_set_ad(aead_req, prot->aad_size);
243c46234ebSDave Watson 	aead_request_set_crypt(aead_req, sgin, sgout,
2444509de14SVakul Garg 			       data_len + prot->tag_size,
245c46234ebSDave Watson 			       (u8 *)iv_recv);
246c46234ebSDave Watson 
24794524d8fSVakul Garg 	if (async) {
2487a3dd8c8SJohn Fastabend 		/* Using skb->sk to push sk through to crypto async callback
2497a3dd8c8SJohn Fastabend 		 * handler. This allows propagating errors up to the socket
2507a3dd8c8SJohn Fastabend 		 * if needed. It _must_ be cleared in the async handler
251a88c26f6SVakul Garg 		 * before consume_skb is called. We _know_ skb->sk is NULL
2527a3dd8c8SJohn Fastabend 		 * because it is a clone from strparser.
2537a3dd8c8SJohn Fastabend 		 */
2547a3dd8c8SJohn Fastabend 		skb->sk = sk;
25594524d8fSVakul Garg 		aead_request_set_callback(aead_req,
25694524d8fSVakul Garg 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
25794524d8fSVakul Garg 					  tls_decrypt_done, skb);
25894524d8fSVakul Garg 		atomic_inc(&ctx->decrypt_pending);
25994524d8fSVakul Garg 	} else {
26094524d8fSVakul Garg 		aead_request_set_callback(aead_req,
26194524d8fSVakul Garg 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
26294524d8fSVakul Garg 					  crypto_req_done, &ctx->async_wait);
26394524d8fSVakul Garg 	}
26494524d8fSVakul Garg 
26594524d8fSVakul Garg 	ret = crypto_aead_decrypt(aead_req);
26694524d8fSVakul Garg 	if (ret == -EINPROGRESS) {
26794524d8fSVakul Garg 		if (async)
26894524d8fSVakul Garg 			return ret;
26994524d8fSVakul Garg 
27094524d8fSVakul Garg 		ret = crypto_wait_req(ret, &ctx->async_wait);
27194524d8fSVakul Garg 	}
27294524d8fSVakul Garg 
27394524d8fSVakul Garg 	if (async)
27494524d8fSVakul Garg 		atomic_dec(&ctx->decrypt_pending);
27594524d8fSVakul Garg 
276c46234ebSDave Watson 	return ret;
277c46234ebSDave Watson }
278c46234ebSDave Watson 
279d829e9c4SDaniel Borkmann static void tls_trim_both_msgs(struct sock *sk, int target_size)
2803c4d7559SDave Watson {
2813c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2824509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
283f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
284a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
2853c4d7559SDave Watson 
286d829e9c4SDaniel Borkmann 	sk_msg_trim(sk, &rec->msg_plaintext, target_size);
2873c4d7559SDave Watson 	if (target_size > 0)
2884509de14SVakul Garg 		target_size += prot->overhead_size;
289d829e9c4SDaniel Borkmann 	sk_msg_trim(sk, &rec->msg_encrypted, target_size);
2903c4d7559SDave Watson }
2913c4d7559SDave Watson 
292d829e9c4SDaniel Borkmann static int tls_alloc_encrypted_msg(struct sock *sk, int len)
2933c4d7559SDave Watson {
2943c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
295f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
296a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
297d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
2983c4d7559SDave Watson 
299d829e9c4SDaniel Borkmann 	return sk_msg_alloc(sk, msg_en, len, 0);
3003c4d7559SDave Watson }
3013c4d7559SDave Watson 
302d829e9c4SDaniel Borkmann static int tls_clone_plaintext_msg(struct sock *sk, int required)
3033c4d7559SDave Watson {
3043c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
3054509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
306f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
307a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
308d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl = &rec->msg_plaintext;
309d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
3104e6d4720SVakul Garg 	int skip, len;
3113c4d7559SDave Watson 
312d829e9c4SDaniel Borkmann 	/* We add page references worth len bytes from encrypted sg
313d829e9c4SDaniel Borkmann 	 * at the end of plaintext sg. It is guaranteed that msg_en
3144e6d4720SVakul Garg 	 * has enough required room (ensured by caller).
3154e6d4720SVakul Garg 	 */
316d829e9c4SDaniel Borkmann 	len = required - msg_pl->sg.size;
31752ea992cSVakul Garg 
318d829e9c4SDaniel Borkmann 	/* Skip initial bytes in msg_en's data to be able to use
319d829e9c4SDaniel Borkmann 	 * same offset of both plain and encrypted data.
3204e6d4720SVakul Garg 	 */
3214509de14SVakul Garg 	skip = prot->prepend_size + msg_pl->sg.size;
3224e6d4720SVakul Garg 
323d829e9c4SDaniel Borkmann 	return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
3243c4d7559SDave Watson }
3253c4d7559SDave Watson 
326d3b18ad3SJohn Fastabend static struct tls_rec *tls_get_rec(struct sock *sk)
327d3b18ad3SJohn Fastabend {
328d3b18ad3SJohn Fastabend 	struct tls_context *tls_ctx = tls_get_ctx(sk);
3294509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
330d3b18ad3SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
331d3b18ad3SJohn Fastabend 	struct sk_msg *msg_pl, *msg_en;
332d3b18ad3SJohn Fastabend 	struct tls_rec *rec;
333d3b18ad3SJohn Fastabend 	int mem_size;
334d3b18ad3SJohn Fastabend 
335d3b18ad3SJohn Fastabend 	mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
336d3b18ad3SJohn Fastabend 
337d3b18ad3SJohn Fastabend 	rec = kzalloc(mem_size, sk->sk_allocation);
338d3b18ad3SJohn Fastabend 	if (!rec)
339d3b18ad3SJohn Fastabend 		return NULL;
340d3b18ad3SJohn Fastabend 
341d3b18ad3SJohn Fastabend 	msg_pl = &rec->msg_plaintext;
342d3b18ad3SJohn Fastabend 	msg_en = &rec->msg_encrypted;
343d3b18ad3SJohn Fastabend 
344d3b18ad3SJohn Fastabend 	sk_msg_init(msg_pl);
345d3b18ad3SJohn Fastabend 	sk_msg_init(msg_en);
346d3b18ad3SJohn Fastabend 
347d3b18ad3SJohn Fastabend 	sg_init_table(rec->sg_aead_in, 2);
3484509de14SVakul Garg 	sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
349d3b18ad3SJohn Fastabend 	sg_unmark_end(&rec->sg_aead_in[1]);
350d3b18ad3SJohn Fastabend 
351d3b18ad3SJohn Fastabend 	sg_init_table(rec->sg_aead_out, 2);
3524509de14SVakul Garg 	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
353d3b18ad3SJohn Fastabend 	sg_unmark_end(&rec->sg_aead_out[1]);
354d3b18ad3SJohn Fastabend 
355d3b18ad3SJohn Fastabend 	return rec;
356d3b18ad3SJohn Fastabend }
357d3b18ad3SJohn Fastabend 
358d3b18ad3SJohn Fastabend static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
359d3b18ad3SJohn Fastabend {
360d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &rec->msg_encrypted);
361d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &rec->msg_plaintext);
362d3b18ad3SJohn Fastabend 	kfree(rec);
363d3b18ad3SJohn Fastabend }
364d3b18ad3SJohn Fastabend 
365c774973eSVakul Garg static void tls_free_open_rec(struct sock *sk)
3663c4d7559SDave Watson {
3673c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
368f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
369a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
3703c4d7559SDave Watson 
371d3b18ad3SJohn Fastabend 	if (rec) {
372d3b18ad3SJohn Fastabend 		tls_free_rec(sk, rec);
373d3b18ad3SJohn Fastabend 		ctx->open_rec = NULL;
374d3b18ad3SJohn Fastabend 	}
3753c4d7559SDave Watson }
3763c4d7559SDave Watson 
377a42055e8SVakul Garg int tls_tx_records(struct sock *sk, int flags)
378a42055e8SVakul Garg {
379a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
380a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
381a42055e8SVakul Garg 	struct tls_rec *rec, *tmp;
382d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en;
383a42055e8SVakul Garg 	int tx_flags, rc = 0;
384a42055e8SVakul Garg 
385a42055e8SVakul Garg 	if (tls_is_partially_sent_record(tls_ctx)) {
3869932a29aSVakul Garg 		rec = list_first_entry(&ctx->tx_list,
387a42055e8SVakul Garg 				       struct tls_rec, list);
388a42055e8SVakul Garg 
389a42055e8SVakul Garg 		if (flags == -1)
390a42055e8SVakul Garg 			tx_flags = rec->tx_flags;
391a42055e8SVakul Garg 		else
392a42055e8SVakul Garg 			tx_flags = flags;
393a42055e8SVakul Garg 
394a42055e8SVakul Garg 		rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
395a42055e8SVakul Garg 		if (rc)
396a42055e8SVakul Garg 			goto tx_err;
397a42055e8SVakul Garg 
398a42055e8SVakul Garg 		/* Full record has been transmitted.
3999932a29aSVakul Garg 		 * Remove the head of tx_list
400a42055e8SVakul Garg 		 */
401a42055e8SVakul Garg 		list_del(&rec->list);
402d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
403a42055e8SVakul Garg 		kfree(rec);
404a42055e8SVakul Garg 	}
405a42055e8SVakul Garg 
4069932a29aSVakul Garg 	/* Tx all ready records */
4079932a29aSVakul Garg 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
4089932a29aSVakul Garg 		if (READ_ONCE(rec->tx_ready)) {
409a42055e8SVakul Garg 			if (flags == -1)
410a42055e8SVakul Garg 				tx_flags = rec->tx_flags;
411a42055e8SVakul Garg 			else
412a42055e8SVakul Garg 				tx_flags = flags;
413a42055e8SVakul Garg 
414d829e9c4SDaniel Borkmann 			msg_en = &rec->msg_encrypted;
415a42055e8SVakul Garg 			rc = tls_push_sg(sk, tls_ctx,
416d829e9c4SDaniel Borkmann 					 &msg_en->sg.data[msg_en->sg.curr],
417a42055e8SVakul Garg 					 0, tx_flags);
418a42055e8SVakul Garg 			if (rc)
419a42055e8SVakul Garg 				goto tx_err;
420a42055e8SVakul Garg 
421a42055e8SVakul Garg 			list_del(&rec->list);
422d829e9c4SDaniel Borkmann 			sk_msg_free(sk, &rec->msg_plaintext);
423a42055e8SVakul Garg 			kfree(rec);
424a42055e8SVakul Garg 		} else {
425a42055e8SVakul Garg 			break;
426a42055e8SVakul Garg 		}
427a42055e8SVakul Garg 	}
428a42055e8SVakul Garg 
429a42055e8SVakul Garg tx_err:
430a42055e8SVakul Garg 	if (rc < 0 && rc != -EAGAIN)
431da353facSDaniel Jordan 		tls_err_abort(sk, -EBADMSG);
432a42055e8SVakul Garg 
433a42055e8SVakul Garg 	return rc;
434a42055e8SVakul Garg }
435a42055e8SVakul Garg 
436a42055e8SVakul Garg static void tls_encrypt_done(struct crypto_async_request *req, int err)
437a42055e8SVakul Garg {
438a42055e8SVakul Garg 	struct aead_request *aead_req = (struct aead_request *)req;
439a42055e8SVakul Garg 	struct sock *sk = req->data;
440a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
4414509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
442a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
443d829e9c4SDaniel Borkmann 	struct scatterlist *sge;
444d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en;
445a42055e8SVakul Garg 	struct tls_rec *rec;
446a42055e8SVakul Garg 	bool ready = false;
447a42055e8SVakul Garg 	int pending;
448a42055e8SVakul Garg 
449a42055e8SVakul Garg 	rec = container_of(aead_req, struct tls_rec, aead_req);
450d829e9c4SDaniel Borkmann 	msg_en = &rec->msg_encrypted;
451a42055e8SVakul Garg 
452d829e9c4SDaniel Borkmann 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
4534509de14SVakul Garg 	sge->offset -= prot->prepend_size;
4544509de14SVakul Garg 	sge->length += prot->prepend_size;
455a42055e8SVakul Garg 
45680ece6a0SVakul Garg 	/* Check if error is previously set on socket */
457a42055e8SVakul Garg 	if (err || sk->sk_err) {
458a42055e8SVakul Garg 		rec = NULL;
459a42055e8SVakul Garg 
460a42055e8SVakul Garg 		/* If err is already set on socket, return the same code */
461a42055e8SVakul Garg 		if (sk->sk_err) {
4621d9d6fd2SDaniel Jordan 			ctx->async_wait.err = -sk->sk_err;
463a42055e8SVakul Garg 		} else {
464a42055e8SVakul Garg 			ctx->async_wait.err = err;
465a42055e8SVakul Garg 			tls_err_abort(sk, err);
466a42055e8SVakul Garg 		}
467a42055e8SVakul Garg 	}
468a42055e8SVakul Garg 
4699932a29aSVakul Garg 	if (rec) {
4709932a29aSVakul Garg 		struct tls_rec *first_rec;
4719932a29aSVakul Garg 
4729932a29aSVakul Garg 		/* Mark the record as ready for transmission */
4739932a29aSVakul Garg 		smp_store_mb(rec->tx_ready, true);
4749932a29aSVakul Garg 
4759932a29aSVakul Garg 		/* If received record is at head of tx_list, schedule tx */
4769932a29aSVakul Garg 		first_rec = list_first_entry(&ctx->tx_list,
4779932a29aSVakul Garg 					     struct tls_rec, list);
4789932a29aSVakul Garg 		if (rec == first_rec)
4799932a29aSVakul Garg 			ready = true;
4809932a29aSVakul Garg 	}
481a42055e8SVakul Garg 
4820cada332SVinay Kumar Yadav 	spin_lock_bh(&ctx->encrypt_compl_lock);
483a42055e8SVakul Garg 	pending = atomic_dec_return(&ctx->encrypt_pending);
484a42055e8SVakul Garg 
4850cada332SVinay Kumar Yadav 	if (!pending && ctx->async_notify)
486a42055e8SVakul Garg 		complete(&ctx->async_wait.completion);
4870cada332SVinay Kumar Yadav 	spin_unlock_bh(&ctx->encrypt_compl_lock);
488a42055e8SVakul Garg 
489a42055e8SVakul Garg 	if (!ready)
490a42055e8SVakul Garg 		return;
491a42055e8SVakul Garg 
492a42055e8SVakul Garg 	/* Schedule the transmission */
493a42055e8SVakul Garg 	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
494d829e9c4SDaniel Borkmann 		schedule_delayed_work(&ctx->tx_work.work, 1);
495a42055e8SVakul Garg }
496a42055e8SVakul Garg 
497a42055e8SVakul Garg static int tls_do_encryption(struct sock *sk,
498a42055e8SVakul Garg 			     struct tls_context *tls_ctx,
499a447da7dSDaniel Borkmann 			     struct tls_sw_context_tx *ctx,
500a447da7dSDaniel Borkmann 			     struct aead_request *aead_req,
501d829e9c4SDaniel Borkmann 			     size_t data_len, u32 start)
5023c4d7559SDave Watson {
5034509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
504a42055e8SVakul Garg 	struct tls_rec *rec = ctx->open_rec;
505d829e9c4SDaniel Borkmann 	struct sk_msg *msg_en = &rec->msg_encrypted;
506d829e9c4SDaniel Borkmann 	struct scatterlist *sge = sk_msg_elem(msg_en, start);
507f295b3aeSVakul Garg 	int rc, iv_offset = 0;
5083c4d7559SDave Watson 
509f295b3aeSVakul Garg 	/* For CCM based ciphers, first byte of IV is a constant */
510128cfb88STianjia Zhang 	switch (prot->cipher_type) {
511128cfb88STianjia Zhang 	case TLS_CIPHER_AES_CCM_128:
512f295b3aeSVakul Garg 		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
513f295b3aeSVakul Garg 		iv_offset = 1;
514128cfb88STianjia Zhang 		break;
515128cfb88STianjia Zhang 	case TLS_CIPHER_SM4_CCM:
516128cfb88STianjia Zhang 		rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
517128cfb88STianjia Zhang 		iv_offset = 1;
518128cfb88STianjia Zhang 		break;
519f295b3aeSVakul Garg 	}
520f295b3aeSVakul Garg 
521f295b3aeSVakul Garg 	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
522f295b3aeSVakul Garg 	       prot->iv_size + prot->salt_size);
523f295b3aeSVakul Garg 
52459610606STianjia Zhang 	xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
52532eb67b9SDave Watson 
5264509de14SVakul Garg 	sge->offset += prot->prepend_size;
5274509de14SVakul Garg 	sge->length -= prot->prepend_size;
5283c4d7559SDave Watson 
529d829e9c4SDaniel Borkmann 	msg_en->sg.curr = start;
5304e6d4720SVakul Garg 
5313c4d7559SDave Watson 	aead_request_set_tfm(aead_req, ctx->aead_send);
5324509de14SVakul Garg 	aead_request_set_ad(aead_req, prot->aad_size);
533d829e9c4SDaniel Borkmann 	aead_request_set_crypt(aead_req, rec->sg_aead_in,
534d829e9c4SDaniel Borkmann 			       rec->sg_aead_out,
53532eb67b9SDave Watson 			       data_len, rec->iv_data);
536a54667f6SVakul Garg 
537a54667f6SVakul Garg 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
538a42055e8SVakul Garg 				  tls_encrypt_done, sk);
539a54667f6SVakul Garg 
5409932a29aSVakul Garg 	/* Add the record in tx_list */
5419932a29aSVakul Garg 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
542a42055e8SVakul Garg 	atomic_inc(&ctx->encrypt_pending);
5433c4d7559SDave Watson 
544a42055e8SVakul Garg 	rc = crypto_aead_encrypt(aead_req);
545a42055e8SVakul Garg 	if (!rc || rc != -EINPROGRESS) {
546a42055e8SVakul Garg 		atomic_dec(&ctx->encrypt_pending);
5474509de14SVakul Garg 		sge->offset -= prot->prepend_size;
5484509de14SVakul Garg 		sge->length += prot->prepend_size;
549a42055e8SVakul Garg 	}
5503c4d7559SDave Watson 
5519932a29aSVakul Garg 	if (!rc) {
5529932a29aSVakul Garg 		WRITE_ONCE(rec->tx_ready, true);
5539932a29aSVakul Garg 	} else if (rc != -EINPROGRESS) {
5549932a29aSVakul Garg 		list_del(&rec->list);
555a42055e8SVakul Garg 		return rc;
5569932a29aSVakul Garg 	}
557a42055e8SVakul Garg 
558a42055e8SVakul Garg 	/* Unhook the record from context if encryption is not failure */
559a42055e8SVakul Garg 	ctx->open_rec = NULL;
560fb0f886fSJakub Kicinski 	tls_advance_record_sn(sk, prot, &tls_ctx->tx);
5613c4d7559SDave Watson 	return rc;
5623c4d7559SDave Watson }
5633c4d7559SDave Watson 
564d3b18ad3SJohn Fastabend static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
565d3b18ad3SJohn Fastabend 				 struct tls_rec **to, struct sk_msg *msg_opl,
566d3b18ad3SJohn Fastabend 				 struct sk_msg *msg_oen, u32 split_point,
567d3b18ad3SJohn Fastabend 				 u32 tx_overhead_size, u32 *orig_end)
568d3b18ad3SJohn Fastabend {
569d3b18ad3SJohn Fastabend 	u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
570d3b18ad3SJohn Fastabend 	struct scatterlist *sge, *osge, *nsge;
571d3b18ad3SJohn Fastabend 	u32 orig_size = msg_opl->sg.size;
572d3b18ad3SJohn Fastabend 	struct scatterlist tmp = { };
573d3b18ad3SJohn Fastabend 	struct sk_msg *msg_npl;
574d3b18ad3SJohn Fastabend 	struct tls_rec *new;
575d3b18ad3SJohn Fastabend 	int ret;
576d3b18ad3SJohn Fastabend 
577d3b18ad3SJohn Fastabend 	new = tls_get_rec(sk);
578d3b18ad3SJohn Fastabend 	if (!new)
579d3b18ad3SJohn Fastabend 		return -ENOMEM;
580d3b18ad3SJohn Fastabend 	ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
581d3b18ad3SJohn Fastabend 			   tx_overhead_size, 0);
582d3b18ad3SJohn Fastabend 	if (ret < 0) {
583d3b18ad3SJohn Fastabend 		tls_free_rec(sk, new);
584d3b18ad3SJohn Fastabend 		return ret;
585d3b18ad3SJohn Fastabend 	}
586d3b18ad3SJohn Fastabend 
587d3b18ad3SJohn Fastabend 	*orig_end = msg_opl->sg.end;
588d3b18ad3SJohn Fastabend 	i = msg_opl->sg.start;
589d3b18ad3SJohn Fastabend 	sge = sk_msg_elem(msg_opl, i);
590d3b18ad3SJohn Fastabend 	while (apply && sge->length) {
591d3b18ad3SJohn Fastabend 		if (sge->length > apply) {
592d3b18ad3SJohn Fastabend 			u32 len = sge->length - apply;
593d3b18ad3SJohn Fastabend 
594d3b18ad3SJohn Fastabend 			get_page(sg_page(sge));
595d3b18ad3SJohn Fastabend 			sg_set_page(&tmp, sg_page(sge), len,
596d3b18ad3SJohn Fastabend 				    sge->offset + apply);
597d3b18ad3SJohn Fastabend 			sge->length = apply;
598d3b18ad3SJohn Fastabend 			bytes += apply;
599d3b18ad3SJohn Fastabend 			apply = 0;
600d3b18ad3SJohn Fastabend 		} else {
601d3b18ad3SJohn Fastabend 			apply -= sge->length;
602d3b18ad3SJohn Fastabend 			bytes += sge->length;
603d3b18ad3SJohn Fastabend 		}
604d3b18ad3SJohn Fastabend 
605d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(i);
606d3b18ad3SJohn Fastabend 		if (i == msg_opl->sg.end)
607d3b18ad3SJohn Fastabend 			break;
608d3b18ad3SJohn Fastabend 		sge = sk_msg_elem(msg_opl, i);
609d3b18ad3SJohn Fastabend 	}
610d3b18ad3SJohn Fastabend 
611d3b18ad3SJohn Fastabend 	msg_opl->sg.end = i;
612d3b18ad3SJohn Fastabend 	msg_opl->sg.curr = i;
613d3b18ad3SJohn Fastabend 	msg_opl->sg.copybreak = 0;
614d3b18ad3SJohn Fastabend 	msg_opl->apply_bytes = 0;
615d3b18ad3SJohn Fastabend 	msg_opl->sg.size = bytes;
616d3b18ad3SJohn Fastabend 
617d3b18ad3SJohn Fastabend 	msg_npl = &new->msg_plaintext;
618d3b18ad3SJohn Fastabend 	msg_npl->apply_bytes = apply;
619d3b18ad3SJohn Fastabend 	msg_npl->sg.size = orig_size - bytes;
620d3b18ad3SJohn Fastabend 
621d3b18ad3SJohn Fastabend 	j = msg_npl->sg.start;
622d3b18ad3SJohn Fastabend 	nsge = sk_msg_elem(msg_npl, j);
623d3b18ad3SJohn Fastabend 	if (tmp.length) {
624d3b18ad3SJohn Fastabend 		memcpy(nsge, &tmp, sizeof(*nsge));
625d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(j);
626d3b18ad3SJohn Fastabend 		nsge = sk_msg_elem(msg_npl, j);
627d3b18ad3SJohn Fastabend 	}
628d3b18ad3SJohn Fastabend 
629d3b18ad3SJohn Fastabend 	osge = sk_msg_elem(msg_opl, i);
630d3b18ad3SJohn Fastabend 	while (osge->length) {
631d3b18ad3SJohn Fastabend 		memcpy(nsge, osge, sizeof(*nsge));
632d3b18ad3SJohn Fastabend 		sg_unmark_end(nsge);
633d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(i);
634d3b18ad3SJohn Fastabend 		sk_msg_iter_var_next(j);
635d3b18ad3SJohn Fastabend 		if (i == *orig_end)
636d3b18ad3SJohn Fastabend 			break;
637d3b18ad3SJohn Fastabend 		osge = sk_msg_elem(msg_opl, i);
638d3b18ad3SJohn Fastabend 		nsge = sk_msg_elem(msg_npl, j);
639d3b18ad3SJohn Fastabend 	}
640d3b18ad3SJohn Fastabend 
641d3b18ad3SJohn Fastabend 	msg_npl->sg.end = j;
642d3b18ad3SJohn Fastabend 	msg_npl->sg.curr = j;
643d3b18ad3SJohn Fastabend 	msg_npl->sg.copybreak = 0;
644d3b18ad3SJohn Fastabend 
645d3b18ad3SJohn Fastabend 	*to = new;
646d3b18ad3SJohn Fastabend 	return 0;
647d3b18ad3SJohn Fastabend }
648d3b18ad3SJohn Fastabend 
649d3b18ad3SJohn Fastabend static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
650d3b18ad3SJohn Fastabend 				  struct tls_rec *from, u32 orig_end)
651d3b18ad3SJohn Fastabend {
652d3b18ad3SJohn Fastabend 	struct sk_msg *msg_npl = &from->msg_plaintext;
653d3b18ad3SJohn Fastabend 	struct sk_msg *msg_opl = &to->msg_plaintext;
654d3b18ad3SJohn Fastabend 	struct scatterlist *osge, *nsge;
655d3b18ad3SJohn Fastabend 	u32 i, j;
656d3b18ad3SJohn Fastabend 
657d3b18ad3SJohn Fastabend 	i = msg_opl->sg.end;
658d3b18ad3SJohn Fastabend 	sk_msg_iter_var_prev(i);
659d3b18ad3SJohn Fastabend 	j = msg_npl->sg.start;
660d3b18ad3SJohn Fastabend 
661d3b18ad3SJohn Fastabend 	osge = sk_msg_elem(msg_opl, i);
662d3b18ad3SJohn Fastabend 	nsge = sk_msg_elem(msg_npl, j);
663d3b18ad3SJohn Fastabend 
664d3b18ad3SJohn Fastabend 	if (sg_page(osge) == sg_page(nsge) &&
665d3b18ad3SJohn Fastabend 	    osge->offset + osge->length == nsge->offset) {
666d3b18ad3SJohn Fastabend 		osge->length += nsge->length;
667d3b18ad3SJohn Fastabend 		put_page(sg_page(nsge));
668d3b18ad3SJohn Fastabend 	}
669d3b18ad3SJohn Fastabend 
670d3b18ad3SJohn Fastabend 	msg_opl->sg.end = orig_end;
671d3b18ad3SJohn Fastabend 	msg_opl->sg.curr = orig_end;
672d3b18ad3SJohn Fastabend 	msg_opl->sg.copybreak = 0;
673d3b18ad3SJohn Fastabend 	msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
674d3b18ad3SJohn Fastabend 	msg_opl->sg.size += msg_npl->sg.size;
675d3b18ad3SJohn Fastabend 
676d3b18ad3SJohn Fastabend 	sk_msg_free(sk, &to->msg_encrypted);
677d3b18ad3SJohn Fastabend 	sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
678d3b18ad3SJohn Fastabend 
679d3b18ad3SJohn Fastabend 	kfree(from);
680d3b18ad3SJohn Fastabend }
681d3b18ad3SJohn Fastabend 
6823c4d7559SDave Watson static int tls_push_record(struct sock *sk, int flags,
6833c4d7559SDave Watson 			   unsigned char record_type)
6843c4d7559SDave Watson {
6853c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
6864509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
687f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
688d3b18ad3SJohn Fastabend 	struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
6893f649ab7SKees Cook 	u32 i, split_point, orig_end;
690d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl, *msg_en;
691a447da7dSDaniel Borkmann 	struct aead_request *req;
692d3b18ad3SJohn Fastabend 	bool split;
6933c4d7559SDave Watson 	int rc;
6943c4d7559SDave Watson 
695a42055e8SVakul Garg 	if (!rec)
696a42055e8SVakul Garg 		return 0;
697a447da7dSDaniel Borkmann 
698d829e9c4SDaniel Borkmann 	msg_pl = &rec->msg_plaintext;
699d829e9c4SDaniel Borkmann 	msg_en = &rec->msg_encrypted;
700d829e9c4SDaniel Borkmann 
701d3b18ad3SJohn Fastabend 	split_point = msg_pl->apply_bytes;
702d3b18ad3SJohn Fastabend 	split = split_point && split_point < msg_pl->sg.size;
703d468e477SJohn Fastabend 	if (unlikely((!split &&
704d468e477SJohn Fastabend 		      msg_pl->sg.size +
705d468e477SJohn Fastabend 		      prot->overhead_size > msg_en->sg.size) ||
706d468e477SJohn Fastabend 		     (split &&
707d468e477SJohn Fastabend 		      split_point +
708d468e477SJohn Fastabend 		      prot->overhead_size > msg_en->sg.size))) {
709d468e477SJohn Fastabend 		split = true;
710d468e477SJohn Fastabend 		split_point = msg_en->sg.size;
711d468e477SJohn Fastabend 	}
712d3b18ad3SJohn Fastabend 	if (split) {
713d3b18ad3SJohn Fastabend 		rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
7144509de14SVakul Garg 					   split_point, prot->overhead_size,
715d3b18ad3SJohn Fastabend 					   &orig_end);
716d3b18ad3SJohn Fastabend 		if (rc < 0)
717d3b18ad3SJohn Fastabend 			return rc;
718d468e477SJohn Fastabend 		/* This can happen if above tls_split_open_record allocates
719d468e477SJohn Fastabend 		 * a single large encryption buffer instead of two smaller
720d468e477SJohn Fastabend 		 * ones. In this case adjust pointers and continue without
721d468e477SJohn Fastabend 		 * split.
722d468e477SJohn Fastabend 		 */
723d468e477SJohn Fastabend 		if (!msg_pl->sg.size) {
724d468e477SJohn Fastabend 			tls_merge_open_record(sk, rec, tmp, orig_end);
725d468e477SJohn Fastabend 			msg_pl = &rec->msg_plaintext;
726d468e477SJohn Fastabend 			msg_en = &rec->msg_encrypted;
727d468e477SJohn Fastabend 			split = false;
728d468e477SJohn Fastabend 		}
729d3b18ad3SJohn Fastabend 		sk_msg_trim(sk, msg_en, msg_pl->sg.size +
7304509de14SVakul Garg 			    prot->overhead_size);
731d3b18ad3SJohn Fastabend 	}
732d3b18ad3SJohn Fastabend 
733a42055e8SVakul Garg 	rec->tx_flags = flags;
734a42055e8SVakul Garg 	req = &rec->aead_req;
7353c4d7559SDave Watson 
736d829e9c4SDaniel Borkmann 	i = msg_pl->sg.end;
737d829e9c4SDaniel Borkmann 	sk_msg_iter_var_prev(i);
738130b392cSDave Watson 
739130b392cSDave Watson 	rec->content_type = record_type;
7404509de14SVakul Garg 	if (prot->version == TLS_1_3_VERSION) {
741130b392cSDave Watson 		/* Add content type to end of message.  No padding added */
742130b392cSDave Watson 		sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
743130b392cSDave Watson 		sg_mark_end(&rec->sg_content_type);
744130b392cSDave Watson 		sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
745130b392cSDave Watson 			 &rec->sg_content_type);
746130b392cSDave Watson 	} else {
747d829e9c4SDaniel Borkmann 		sg_mark_end(sk_msg_elem(msg_pl, i));
748130b392cSDave Watson 	}
749a42055e8SVakul Garg 
7509aaaa568SJohn Fastabend 	if (msg_pl->sg.end < msg_pl->sg.start) {
7519aaaa568SJohn Fastabend 		sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
7529aaaa568SJohn Fastabend 			 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
7539aaaa568SJohn Fastabend 			 msg_pl->sg.data);
7549aaaa568SJohn Fastabend 	}
7559aaaa568SJohn Fastabend 
756d829e9c4SDaniel Borkmann 	i = msg_pl->sg.start;
7579e5ffed3SJakub Kicinski 	sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
758d829e9c4SDaniel Borkmann 
759d829e9c4SDaniel Borkmann 	i = msg_en->sg.end;
760d829e9c4SDaniel Borkmann 	sk_msg_iter_var_prev(i);
761d829e9c4SDaniel Borkmann 	sg_mark_end(sk_msg_elem(msg_en, i));
762d829e9c4SDaniel Borkmann 
763d829e9c4SDaniel Borkmann 	i = msg_en->sg.start;
764d829e9c4SDaniel Borkmann 	sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
765d829e9c4SDaniel Borkmann 
7664509de14SVakul Garg 	tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
7676942a284SVadim Fedorenko 		     tls_ctx->tx.rec_seq, record_type, prot);
7683c4d7559SDave Watson 
7693c4d7559SDave Watson 	tls_fill_prepend(tls_ctx,
770d829e9c4SDaniel Borkmann 			 page_address(sg_page(&msg_en->sg.data[i])) +
771130b392cSDave Watson 			 msg_en->sg.data[i].offset,
7724509de14SVakul Garg 			 msg_pl->sg.size + prot->tail_size,
7736942a284SVadim Fedorenko 			 record_type);
7743c4d7559SDave Watson 
775d829e9c4SDaniel Borkmann 	tls_ctx->pending_open_record_frags = false;
7763c4d7559SDave Watson 
777130b392cSDave Watson 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
7784509de14SVakul Garg 			       msg_pl->sg.size + prot->tail_size, i);
7793c4d7559SDave Watson 	if (rc < 0) {
780d3b18ad3SJohn Fastabend 		if (rc != -EINPROGRESS) {
781da353facSDaniel Jordan 			tls_err_abort(sk, -EBADMSG);
782d3b18ad3SJohn Fastabend 			if (split) {
783d3b18ad3SJohn Fastabend 				tls_ctx->pending_open_record_frags = true;
784d3b18ad3SJohn Fastabend 				tls_merge_open_record(sk, rec, tmp, orig_end);
785d3b18ad3SJohn Fastabend 			}
786d3b18ad3SJohn Fastabend 		}
7875b053e12SDave Watson 		ctx->async_capable = 1;
788a42055e8SVakul Garg 		return rc;
789d3b18ad3SJohn Fastabend 	} else if (split) {
790d3b18ad3SJohn Fastabend 		msg_pl = &tmp->msg_plaintext;
791d3b18ad3SJohn Fastabend 		msg_en = &tmp->msg_encrypted;
7924509de14SVakul Garg 		sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
793d3b18ad3SJohn Fastabend 		tls_ctx->pending_open_record_frags = true;
794d3b18ad3SJohn Fastabend 		ctx->open_rec = tmp;
7953c4d7559SDave Watson 	}
7963c4d7559SDave Watson 
797a42055e8SVakul Garg 	return tls_tx_records(sk, flags);
7983c4d7559SDave Watson }
7993c4d7559SDave Watson 
800d3b18ad3SJohn Fastabend static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
801d3b18ad3SJohn Fastabend 			       bool full_record, u8 record_type,
802a7bff11fSVadim Fedorenko 			       ssize_t *copied, int flags)
8033c4d7559SDave Watson {
8043c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
805f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
806d3b18ad3SJohn Fastabend 	struct sk_msg msg_redir = { };
807d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
808d3b18ad3SJohn Fastabend 	struct sock *sk_redir;
809a42055e8SVakul Garg 	struct tls_rec *rec;
8100608c69cSJohn Fastabend 	bool enospc, policy;
811d3b18ad3SJohn Fastabend 	int err = 0, send;
8127246d8edSJohn Fastabend 	u32 delta = 0;
813a42055e8SVakul Garg 
8140608c69cSJohn Fastabend 	policy = !(flags & MSG_SENDPAGE_NOPOLICY);
815d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
816d10523d0SJakub Kicinski 	if (!psock || !policy) {
817d10523d0SJakub Kicinski 		err = tls_push_record(sk, flags, record_type);
818635d9398SVadim Fedorenko 		if (err && sk->sk_err == EBADMSG) {
819d10523d0SJakub Kicinski 			*copied -= sk_msg_free(sk, msg);
820d10523d0SJakub Kicinski 			tls_free_open_rec(sk);
821635d9398SVadim Fedorenko 			err = -sk->sk_err;
822d10523d0SJakub Kicinski 		}
823095f5614SXiyu Yang 		if (psock)
824095f5614SXiyu Yang 			sk_psock_put(sk, psock);
825d10523d0SJakub Kicinski 		return err;
826d10523d0SJakub Kicinski 	}
827d3b18ad3SJohn Fastabend more_data:
828d3b18ad3SJohn Fastabend 	enospc = sk_msg_full(msg);
8297246d8edSJohn Fastabend 	if (psock->eval == __SK_NONE) {
8307246d8edSJohn Fastabend 		delta = msg->sg.size;
831d3b18ad3SJohn Fastabend 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
8327246d8edSJohn Fastabend 		delta -= msg->sg.size;
8337246d8edSJohn Fastabend 	}
834d3b18ad3SJohn Fastabend 	if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
835d3b18ad3SJohn Fastabend 	    !enospc && !full_record) {
836d3b18ad3SJohn Fastabend 		err = -ENOSPC;
837d3b18ad3SJohn Fastabend 		goto out_err;
838d3b18ad3SJohn Fastabend 	}
839d3b18ad3SJohn Fastabend 	msg->cork_bytes = 0;
840d3b18ad3SJohn Fastabend 	send = msg->sg.size;
841d3b18ad3SJohn Fastabend 	if (msg->apply_bytes && msg->apply_bytes < send)
842d3b18ad3SJohn Fastabend 		send = msg->apply_bytes;
843a42055e8SVakul Garg 
844d3b18ad3SJohn Fastabend 	switch (psock->eval) {
845d3b18ad3SJohn Fastabend 	case __SK_PASS:
846d3b18ad3SJohn Fastabend 		err = tls_push_record(sk, flags, record_type);
847635d9398SVadim Fedorenko 		if (err && sk->sk_err == EBADMSG) {
848d3b18ad3SJohn Fastabend 			*copied -= sk_msg_free(sk, msg);
849d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
850635d9398SVadim Fedorenko 			err = -sk->sk_err;
851d3b18ad3SJohn Fastabend 			goto out_err;
852d3b18ad3SJohn Fastabend 		}
853d3b18ad3SJohn Fastabend 		break;
854d3b18ad3SJohn Fastabend 	case __SK_REDIRECT:
855d3b18ad3SJohn Fastabend 		sk_redir = psock->sk_redir;
856d3b18ad3SJohn Fastabend 		memcpy(&msg_redir, msg, sizeof(*msg));
857d3b18ad3SJohn Fastabend 		if (msg->apply_bytes < send)
858d3b18ad3SJohn Fastabend 			msg->apply_bytes = 0;
859d3b18ad3SJohn Fastabend 		else
860d3b18ad3SJohn Fastabend 			msg->apply_bytes -= send;
861d3b18ad3SJohn Fastabend 		sk_msg_return_zero(sk, msg, send);
862d3b18ad3SJohn Fastabend 		msg->sg.size -= send;
863d3b18ad3SJohn Fastabend 		release_sock(sk);
864d3b18ad3SJohn Fastabend 		err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
865d3b18ad3SJohn Fastabend 		lock_sock(sk);
866d3b18ad3SJohn Fastabend 		if (err < 0) {
867d3b18ad3SJohn Fastabend 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
868d3b18ad3SJohn Fastabend 			msg->sg.size = 0;
869d3b18ad3SJohn Fastabend 		}
870d3b18ad3SJohn Fastabend 		if (msg->sg.size == 0)
871d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
872d3b18ad3SJohn Fastabend 		break;
873d3b18ad3SJohn Fastabend 	case __SK_DROP:
874d3b18ad3SJohn Fastabend 	default:
875d3b18ad3SJohn Fastabend 		sk_msg_free_partial(sk, msg, send);
876d3b18ad3SJohn Fastabend 		if (msg->apply_bytes < send)
877d3b18ad3SJohn Fastabend 			msg->apply_bytes = 0;
878d3b18ad3SJohn Fastabend 		else
879d3b18ad3SJohn Fastabend 			msg->apply_bytes -= send;
880d3b18ad3SJohn Fastabend 		if (msg->sg.size == 0)
881d3b18ad3SJohn Fastabend 			tls_free_open_rec(sk);
8827246d8edSJohn Fastabend 		*copied -= (send + delta);
883d3b18ad3SJohn Fastabend 		err = -EACCES;
884d3b18ad3SJohn Fastabend 	}
885a42055e8SVakul Garg 
886d3b18ad3SJohn Fastabend 	if (likely(!err)) {
887d3b18ad3SJohn Fastabend 		bool reset_eval = !ctx->open_rec;
888d3b18ad3SJohn Fastabend 
889d3b18ad3SJohn Fastabend 		rec = ctx->open_rec;
890d3b18ad3SJohn Fastabend 		if (rec) {
891d3b18ad3SJohn Fastabend 			msg = &rec->msg_plaintext;
892d3b18ad3SJohn Fastabend 			if (!msg->apply_bytes)
893d3b18ad3SJohn Fastabend 				reset_eval = true;
894d3b18ad3SJohn Fastabend 		}
895d3b18ad3SJohn Fastabend 		if (reset_eval) {
896d3b18ad3SJohn Fastabend 			psock->eval = __SK_NONE;
897d3b18ad3SJohn Fastabend 			if (psock->sk_redir) {
898d3b18ad3SJohn Fastabend 				sock_put(psock->sk_redir);
899d3b18ad3SJohn Fastabend 				psock->sk_redir = NULL;
900d3b18ad3SJohn Fastabend 			}
901d3b18ad3SJohn Fastabend 		}
902d3b18ad3SJohn Fastabend 		if (rec)
903d3b18ad3SJohn Fastabend 			goto more_data;
904d3b18ad3SJohn Fastabend 	}
905d3b18ad3SJohn Fastabend  out_err:
906d3b18ad3SJohn Fastabend 	sk_psock_put(sk, psock);
907d3b18ad3SJohn Fastabend 	return err;
908d3b18ad3SJohn Fastabend }
909d3b18ad3SJohn Fastabend 
910d3b18ad3SJohn Fastabend static int tls_sw_push_pending_record(struct sock *sk, int flags)
911d3b18ad3SJohn Fastabend {
912d3b18ad3SJohn Fastabend 	struct tls_context *tls_ctx = tls_get_ctx(sk);
913d3b18ad3SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
914d3b18ad3SJohn Fastabend 	struct tls_rec *rec = ctx->open_rec;
915d3b18ad3SJohn Fastabend 	struct sk_msg *msg_pl;
916d3b18ad3SJohn Fastabend 	size_t copied;
917d3b18ad3SJohn Fastabend 
918a42055e8SVakul Garg 	if (!rec)
919d3b18ad3SJohn Fastabend 		return 0;
920a42055e8SVakul Garg 
921d829e9c4SDaniel Borkmann 	msg_pl = &rec->msg_plaintext;
922d3b18ad3SJohn Fastabend 	copied = msg_pl->sg.size;
923d3b18ad3SJohn Fastabend 	if (!copied)
924d3b18ad3SJohn Fastabend 		return 0;
925a42055e8SVakul Garg 
926d3b18ad3SJohn Fastabend 	return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
927d3b18ad3SJohn Fastabend 				   &copied, flags);
928a42055e8SVakul Garg }
929a42055e8SVakul Garg 
930a42055e8SVakul Garg int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
931a42055e8SVakul Garg {
9323c4d7559SDave Watson 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
933a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
9344509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
935a42055e8SVakul Garg 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
9365b053e12SDave Watson 	bool async_capable = ctx->async_capable;
937a42055e8SVakul Garg 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
93800e23707SDavid Howells 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
9393c4d7559SDave Watson 	bool eor = !(msg->msg_flags & MSG_MORE);
940a7bff11fSVadim Fedorenko 	size_t try_to_copy;
941a7bff11fSVadim Fedorenko 	ssize_t copied = 0;
942d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl, *msg_en;
943a42055e8SVakul Garg 	struct tls_rec *rec;
944a42055e8SVakul Garg 	int required_size;
945a42055e8SVakul Garg 	int num_async = 0;
9463c4d7559SDave Watson 	bool full_record;
947a42055e8SVakul Garg 	int record_room;
948a42055e8SVakul Garg 	int num_zc = 0;
9493c4d7559SDave Watson 	int orig_size;
9504128c0cfSVakul Garg 	int ret = 0;
9510cada332SVinay Kumar Yadav 	int pending;
9523c4d7559SDave Watson 
9531c3b63f1SRouven Czerwinski 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
9541c3b63f1SRouven Czerwinski 			       MSG_CMSG_COMPAT))
9554a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
9563c4d7559SDave Watson 
95779ffe608SJakub Kicinski 	mutex_lock(&tls_ctx->tx_lock);
9583c4d7559SDave Watson 	lock_sock(sk);
9593c4d7559SDave Watson 
9603c4d7559SDave Watson 	if (unlikely(msg->msg_controllen)) {
9613c4d7559SDave Watson 		ret = tls_proccess_cmsg(sk, msg, &record_type);
962a42055e8SVakul Garg 		if (ret) {
963a42055e8SVakul Garg 			if (ret == -EINPROGRESS)
964a42055e8SVakul Garg 				num_async++;
965a42055e8SVakul Garg 			else if (ret != -EAGAIN)
9663c4d7559SDave Watson 				goto send_end;
9673c4d7559SDave Watson 		}
968a42055e8SVakul Garg 	}
9693c4d7559SDave Watson 
9703c4d7559SDave Watson 	while (msg_data_left(msg)) {
9713c4d7559SDave Watson 		if (sk->sk_err) {
97230be8f8dSr.hering@avm.de 			ret = -sk->sk_err;
9733c4d7559SDave Watson 			goto send_end;
9743c4d7559SDave Watson 		}
9753c4d7559SDave Watson 
976d3b18ad3SJohn Fastabend 		if (ctx->open_rec)
977d3b18ad3SJohn Fastabend 			rec = ctx->open_rec;
978d3b18ad3SJohn Fastabend 		else
979d3b18ad3SJohn Fastabend 			rec = ctx->open_rec = tls_get_rec(sk);
980a42055e8SVakul Garg 		if (!rec) {
981a42055e8SVakul Garg 			ret = -ENOMEM;
982a42055e8SVakul Garg 			goto send_end;
983a42055e8SVakul Garg 		}
984a42055e8SVakul Garg 
985d829e9c4SDaniel Borkmann 		msg_pl = &rec->msg_plaintext;
986d829e9c4SDaniel Borkmann 		msg_en = &rec->msg_encrypted;
987d829e9c4SDaniel Borkmann 
988d829e9c4SDaniel Borkmann 		orig_size = msg_pl->sg.size;
9893c4d7559SDave Watson 		full_record = false;
9903c4d7559SDave Watson 		try_to_copy = msg_data_left(msg);
991d829e9c4SDaniel Borkmann 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
9923c4d7559SDave Watson 		if (try_to_copy >= record_room) {
9933c4d7559SDave Watson 			try_to_copy = record_room;
9943c4d7559SDave Watson 			full_record = true;
9953c4d7559SDave Watson 		}
9963c4d7559SDave Watson 
997d829e9c4SDaniel Borkmann 		required_size = msg_pl->sg.size + try_to_copy +
9984509de14SVakul Garg 				prot->overhead_size;
9993c4d7559SDave Watson 
10003c4d7559SDave Watson 		if (!sk_stream_memory_free(sk))
10013c4d7559SDave Watson 			goto wait_for_sndbuf;
1002a42055e8SVakul Garg 
10033c4d7559SDave Watson alloc_encrypted:
1004d829e9c4SDaniel Borkmann 		ret = tls_alloc_encrypted_msg(sk, required_size);
10053c4d7559SDave Watson 		if (ret) {
10063c4d7559SDave Watson 			if (ret != -ENOSPC)
10073c4d7559SDave Watson 				goto wait_for_memory;
10083c4d7559SDave Watson 
10093c4d7559SDave Watson 			/* Adjust try_to_copy according to the amount that was
10103c4d7559SDave Watson 			 * actually allocated. The difference is due
10113c4d7559SDave Watson 			 * to max sg elements limit
10123c4d7559SDave Watson 			 */
1013d829e9c4SDaniel Borkmann 			try_to_copy -= required_size - msg_en->sg.size;
10143c4d7559SDave Watson 			full_record = true;
10153c4d7559SDave Watson 		}
1016a42055e8SVakul Garg 
1017a42055e8SVakul Garg 		if (!is_kvec && (full_record || eor) && !async_capable) {
1018d3b18ad3SJohn Fastabend 			u32 first = msg_pl->sg.end;
1019d3b18ad3SJohn Fastabend 
1020d829e9c4SDaniel Borkmann 			ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1021d829e9c4SDaniel Borkmann 							msg_pl, try_to_copy);
10223c4d7559SDave Watson 			if (ret)
10233c4d7559SDave Watson 				goto fallback_to_reg_send;
10243c4d7559SDave Watson 
1025a42055e8SVakul Garg 			num_zc++;
10263c4d7559SDave Watson 			copied += try_to_copy;
1027d3b18ad3SJohn Fastabend 
1028d3b18ad3SJohn Fastabend 			sk_msg_sg_copy_set(msg_pl, first);
1029d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1030d3b18ad3SJohn Fastabend 						  record_type, &copied,
1031d3b18ad3SJohn Fastabend 						  msg->msg_flags);
1032a42055e8SVakul Garg 			if (ret) {
1033a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1034a42055e8SVakul Garg 					num_async++;
1035d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1036d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1037c329ef96SJakub Kicinski 				else if (ctx->open_rec && ret == -ENOSPC)
1038d3b18ad3SJohn Fastabend 					goto rollback_iter;
1039a42055e8SVakul Garg 				else if (ret != -EAGAIN)
10403c4d7559SDave Watson 					goto send_end;
1041a42055e8SVakul Garg 			}
10425a3611efSDoron Roberts-Kedes 			continue;
1043d3b18ad3SJohn Fastabend rollback_iter:
1044d3b18ad3SJohn Fastabend 			copied -= try_to_copy;
1045d3b18ad3SJohn Fastabend 			sk_msg_sg_copy_clear(msg_pl, first);
1046d3b18ad3SJohn Fastabend 			iov_iter_revert(&msg->msg_iter,
1047d3b18ad3SJohn Fastabend 					msg_pl->sg.size - orig_size);
10483c4d7559SDave Watson fallback_to_reg_send:
1049d829e9c4SDaniel Borkmann 			sk_msg_trim(sk, msg_pl, orig_size);
10503c4d7559SDave Watson 		}
10513c4d7559SDave Watson 
1052d829e9c4SDaniel Borkmann 		required_size = msg_pl->sg.size + try_to_copy;
10534e6d4720SVakul Garg 
1054d829e9c4SDaniel Borkmann 		ret = tls_clone_plaintext_msg(sk, required_size);
10553c4d7559SDave Watson 		if (ret) {
10563c4d7559SDave Watson 			if (ret != -ENOSPC)
10574e6d4720SVakul Garg 				goto send_end;
10583c4d7559SDave Watson 
10593c4d7559SDave Watson 			/* Adjust try_to_copy according to the amount that was
10603c4d7559SDave Watson 			 * actually allocated. The difference is due
10613c4d7559SDave Watson 			 * to max sg elements limit
10623c4d7559SDave Watson 			 */
1063d829e9c4SDaniel Borkmann 			try_to_copy -= required_size - msg_pl->sg.size;
10643c4d7559SDave Watson 			full_record = true;
10654509de14SVakul Garg 			sk_msg_trim(sk, msg_en,
10664509de14SVakul Garg 				    msg_pl->sg.size + prot->overhead_size);
10673c4d7559SDave Watson 		}
10683c4d7559SDave Watson 
106965a10e28SVakul Garg 		if (try_to_copy) {
107065a10e28SVakul Garg 			ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
107165a10e28SVakul Garg 						       msg_pl, try_to_copy);
1072d829e9c4SDaniel Borkmann 			if (ret < 0)
10733c4d7559SDave Watson 				goto trim_sgl;
107465a10e28SVakul Garg 		}
10753c4d7559SDave Watson 
1076d829e9c4SDaniel Borkmann 		/* Open records defined only if successfully copied, otherwise
1077d829e9c4SDaniel Borkmann 		 * we would trim the sg but not reset the open record frags.
1078d829e9c4SDaniel Borkmann 		 */
1079d829e9c4SDaniel Borkmann 		tls_ctx->pending_open_record_frags = true;
10803c4d7559SDave Watson 		copied += try_to_copy;
10813c4d7559SDave Watson 		if (full_record || eor) {
1082d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1083d3b18ad3SJohn Fastabend 						  record_type, &copied,
1084d3b18ad3SJohn Fastabend 						  msg->msg_flags);
10853c4d7559SDave Watson 			if (ret) {
1086a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1087a42055e8SVakul Garg 					num_async++;
1088d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1089d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1090d3b18ad3SJohn Fastabend 				else if (ret != -EAGAIN) {
1091d3b18ad3SJohn Fastabend 					if (ret == -ENOSPC)
1092d3b18ad3SJohn Fastabend 						ret = 0;
10933c4d7559SDave Watson 					goto send_end;
10943c4d7559SDave Watson 				}
10953c4d7559SDave Watson 			}
1096d3b18ad3SJohn Fastabend 		}
10973c4d7559SDave Watson 
10983c4d7559SDave Watson 		continue;
10993c4d7559SDave Watson 
11003c4d7559SDave Watson wait_for_sndbuf:
11013c4d7559SDave Watson 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
11023c4d7559SDave Watson wait_for_memory:
11033c4d7559SDave Watson 		ret = sk_stream_wait_memory(sk, &timeo);
11043c4d7559SDave Watson 		if (ret) {
11053c4d7559SDave Watson trim_sgl:
1106c329ef96SJakub Kicinski 			if (ctx->open_rec)
1107d829e9c4SDaniel Borkmann 				tls_trim_both_msgs(sk, orig_size);
11083c4d7559SDave Watson 			goto send_end;
11093c4d7559SDave Watson 		}
11103c4d7559SDave Watson 
1111c329ef96SJakub Kicinski 		if (ctx->open_rec && msg_en->sg.size < required_size)
11123c4d7559SDave Watson 			goto alloc_encrypted;
11133c4d7559SDave Watson 	}
11143c4d7559SDave Watson 
1115a42055e8SVakul Garg 	if (!num_async) {
1116a42055e8SVakul Garg 		goto send_end;
1117a42055e8SVakul Garg 	} else if (num_zc) {
1118a42055e8SVakul Garg 		/* Wait for pending encryptions to get completed */
11190cada332SVinay Kumar Yadav 		spin_lock_bh(&ctx->encrypt_compl_lock);
11200cada332SVinay Kumar Yadav 		ctx->async_notify = true;
1121a42055e8SVakul Garg 
11220cada332SVinay Kumar Yadav 		pending = atomic_read(&ctx->encrypt_pending);
11230cada332SVinay Kumar Yadav 		spin_unlock_bh(&ctx->encrypt_compl_lock);
11240cada332SVinay Kumar Yadav 		if (pending)
1125a42055e8SVakul Garg 			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1126a42055e8SVakul Garg 		else
1127a42055e8SVakul Garg 			reinit_completion(&ctx->async_wait.completion);
1128a42055e8SVakul Garg 
11290cada332SVinay Kumar Yadav 		/* There can be no concurrent accesses, since we have no
11300cada332SVinay Kumar Yadav 		 * pending encrypt operations
11310cada332SVinay Kumar Yadav 		 */
1132a42055e8SVakul Garg 		WRITE_ONCE(ctx->async_notify, false);
1133a42055e8SVakul Garg 
1134a42055e8SVakul Garg 		if (ctx->async_wait.err) {
1135a42055e8SVakul Garg 			ret = ctx->async_wait.err;
1136a42055e8SVakul Garg 			copied = 0;
1137a42055e8SVakul Garg 		}
1138a42055e8SVakul Garg 	}
1139a42055e8SVakul Garg 
1140a42055e8SVakul Garg 	/* Transmit if any encryptions have completed */
1141a42055e8SVakul Garg 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1142a42055e8SVakul Garg 		cancel_delayed_work(&ctx->tx_work.work);
1143a42055e8SVakul Garg 		tls_tx_records(sk, msg->msg_flags);
1144a42055e8SVakul Garg 	}
1145a42055e8SVakul Garg 
11463c4d7559SDave Watson send_end:
11473c4d7559SDave Watson 	ret = sk_stream_error(sk, msg->msg_flags, ret);
11483c4d7559SDave Watson 
11493c4d7559SDave Watson 	release_sock(sk);
115079ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
1151a7bff11fSVadim Fedorenko 	return copied > 0 ? copied : ret;
11523c4d7559SDave Watson }
11533c4d7559SDave Watson 
115401cb8a1aSYueHaibing static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
11553c4d7559SDave Watson 			      int offset, size_t size, int flags)
11563c4d7559SDave Watson {
1157a42055e8SVakul Garg 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
11583c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1159f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
11604509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
11613c4d7559SDave Watson 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
1162d829e9c4SDaniel Borkmann 	struct sk_msg *msg_pl;
1163a42055e8SVakul Garg 	struct tls_rec *rec;
1164a42055e8SVakul Garg 	int num_async = 0;
1165a7bff11fSVadim Fedorenko 	ssize_t copied = 0;
11663c4d7559SDave Watson 	bool full_record;
11673c4d7559SDave Watson 	int record_room;
11684128c0cfSVakul Garg 	int ret = 0;
1169a42055e8SVakul Garg 	bool eor;
11703c4d7559SDave Watson 
1171d452d48bSJakub Kicinski 	eor = !(flags & MSG_SENDPAGE_NOTLAST);
11723c4d7559SDave Watson 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
11733c4d7559SDave Watson 
11743c4d7559SDave Watson 	/* Call the sk_stream functions to manage the sndbuf mem. */
11753c4d7559SDave Watson 	while (size > 0) {
11763c4d7559SDave Watson 		size_t copy, required_size;
11773c4d7559SDave Watson 
11783c4d7559SDave Watson 		if (sk->sk_err) {
117930be8f8dSr.hering@avm.de 			ret = -sk->sk_err;
11803c4d7559SDave Watson 			goto sendpage_end;
11813c4d7559SDave Watson 		}
11823c4d7559SDave Watson 
1183d3b18ad3SJohn Fastabend 		if (ctx->open_rec)
1184d3b18ad3SJohn Fastabend 			rec = ctx->open_rec;
1185d3b18ad3SJohn Fastabend 		else
1186d3b18ad3SJohn Fastabend 			rec = ctx->open_rec = tls_get_rec(sk);
1187a42055e8SVakul Garg 		if (!rec) {
1188a42055e8SVakul Garg 			ret = -ENOMEM;
1189a42055e8SVakul Garg 			goto sendpage_end;
1190a42055e8SVakul Garg 		}
1191a42055e8SVakul Garg 
1192d829e9c4SDaniel Borkmann 		msg_pl = &rec->msg_plaintext;
1193d829e9c4SDaniel Borkmann 
11943c4d7559SDave Watson 		full_record = false;
1195d829e9c4SDaniel Borkmann 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
11963c4d7559SDave Watson 		copy = size;
11973c4d7559SDave Watson 		if (copy >= record_room) {
11983c4d7559SDave Watson 			copy = record_room;
11993c4d7559SDave Watson 			full_record = true;
12003c4d7559SDave Watson 		}
1201d829e9c4SDaniel Borkmann 
12024509de14SVakul Garg 		required_size = msg_pl->sg.size + copy + prot->overhead_size;
12033c4d7559SDave Watson 
12043c4d7559SDave Watson 		if (!sk_stream_memory_free(sk))
12053c4d7559SDave Watson 			goto wait_for_sndbuf;
12063c4d7559SDave Watson alloc_payload:
1207d829e9c4SDaniel Borkmann 		ret = tls_alloc_encrypted_msg(sk, required_size);
12083c4d7559SDave Watson 		if (ret) {
12093c4d7559SDave Watson 			if (ret != -ENOSPC)
12103c4d7559SDave Watson 				goto wait_for_memory;
12113c4d7559SDave Watson 
12123c4d7559SDave Watson 			/* Adjust copy according to the amount that was
12133c4d7559SDave Watson 			 * actually allocated. The difference is due
12143c4d7559SDave Watson 			 * to max sg elements limit
12153c4d7559SDave Watson 			 */
1216d829e9c4SDaniel Borkmann 			copy -= required_size - msg_pl->sg.size;
12173c4d7559SDave Watson 			full_record = true;
12183c4d7559SDave Watson 		}
12193c4d7559SDave Watson 
1220d829e9c4SDaniel Borkmann 		sk_msg_page_add(msg_pl, page, copy, offset);
12213c4d7559SDave Watson 		sk_mem_charge(sk, copy);
1222d829e9c4SDaniel Borkmann 
12233c4d7559SDave Watson 		offset += copy;
12243c4d7559SDave Watson 		size -= copy;
1225d3b18ad3SJohn Fastabend 		copied += copy;
12263c4d7559SDave Watson 
1227d829e9c4SDaniel Borkmann 		tls_ctx->pending_open_record_frags = true;
1228d829e9c4SDaniel Borkmann 		if (full_record || eor || sk_msg_full(msg_pl)) {
1229d3b18ad3SJohn Fastabend 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1230d3b18ad3SJohn Fastabend 						  record_type, &copied, flags);
12313c4d7559SDave Watson 			if (ret) {
1232a42055e8SVakul Garg 				if (ret == -EINPROGRESS)
1233a42055e8SVakul Garg 					num_async++;
1234d3b18ad3SJohn Fastabend 				else if (ret == -ENOMEM)
1235d3b18ad3SJohn Fastabend 					goto wait_for_memory;
1236d3b18ad3SJohn Fastabend 				else if (ret != -EAGAIN) {
1237d3b18ad3SJohn Fastabend 					if (ret == -ENOSPC)
1238d3b18ad3SJohn Fastabend 						ret = 0;
12393c4d7559SDave Watson 					goto sendpage_end;
12403c4d7559SDave Watson 				}
12413c4d7559SDave Watson 			}
1242d3b18ad3SJohn Fastabend 		}
12433c4d7559SDave Watson 		continue;
12443c4d7559SDave Watson wait_for_sndbuf:
12453c4d7559SDave Watson 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
12463c4d7559SDave Watson wait_for_memory:
12473c4d7559SDave Watson 		ret = sk_stream_wait_memory(sk, &timeo);
12483c4d7559SDave Watson 		if (ret) {
1249c329ef96SJakub Kicinski 			if (ctx->open_rec)
1250d829e9c4SDaniel Borkmann 				tls_trim_both_msgs(sk, msg_pl->sg.size);
12513c4d7559SDave Watson 			goto sendpage_end;
12523c4d7559SDave Watson 		}
12533c4d7559SDave Watson 
1254c329ef96SJakub Kicinski 		if (ctx->open_rec)
12553c4d7559SDave Watson 			goto alloc_payload;
12563c4d7559SDave Watson 	}
12573c4d7559SDave Watson 
1258a42055e8SVakul Garg 	if (num_async) {
1259a42055e8SVakul Garg 		/* Transmit if any encryptions have completed */
1260a42055e8SVakul Garg 		if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1261a42055e8SVakul Garg 			cancel_delayed_work(&ctx->tx_work.work);
1262a42055e8SVakul Garg 			tls_tx_records(sk, flags);
1263a42055e8SVakul Garg 		}
1264a42055e8SVakul Garg 	}
12653c4d7559SDave Watson sendpage_end:
12663c4d7559SDave Watson 	ret = sk_stream_error(sk, flags, ret);
1267a7bff11fSVadim Fedorenko 	return copied > 0 ? copied : ret;
12683c4d7559SDave Watson }
12693c4d7559SDave Watson 
1270d4ffb02dSWillem de Bruijn int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1271d4ffb02dSWillem de Bruijn 			   int offset, size_t size, int flags)
1272d4ffb02dSWillem de Bruijn {
1273d4ffb02dSWillem de Bruijn 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1274d4ffb02dSWillem de Bruijn 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1275d4ffb02dSWillem de Bruijn 		      MSG_NO_SHARED_FRAGS))
12764a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
1277d4ffb02dSWillem de Bruijn 
1278d4ffb02dSWillem de Bruijn 	return tls_sw_do_sendpage(sk, page, offset, size, flags);
1279d4ffb02dSWillem de Bruijn }
1280d4ffb02dSWillem de Bruijn 
12810608c69cSJohn Fastabend int tls_sw_sendpage(struct sock *sk, struct page *page,
12820608c69cSJohn Fastabend 		    int offset, size_t size, int flags)
12830608c69cSJohn Fastabend {
128479ffe608SJakub Kicinski 	struct tls_context *tls_ctx = tls_get_ctx(sk);
12850608c69cSJohn Fastabend 	int ret;
12860608c69cSJohn Fastabend 
12870608c69cSJohn Fastabend 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
12880608c69cSJohn Fastabend 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
12894a5cdc60SValentin Vidic 		return -EOPNOTSUPP;
12900608c69cSJohn Fastabend 
129179ffe608SJakub Kicinski 	mutex_lock(&tls_ctx->tx_lock);
12920608c69cSJohn Fastabend 	lock_sock(sk);
12930608c69cSJohn Fastabend 	ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
12940608c69cSJohn Fastabend 	release_sock(sk);
129579ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
12960608c69cSJohn Fastabend 	return ret;
12970608c69cSJohn Fastabend }
12980608c69cSJohn Fastabend 
1299d3b18ad3SJohn Fastabend static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1300974271e5SJim Ma 				     bool nonblock, long timeo, int *err)
1301c46234ebSDave Watson {
1302c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1303f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1304c46234ebSDave Watson 	struct sk_buff *skb;
1305c46234ebSDave Watson 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1306c46234ebSDave Watson 
1307d3b18ad3SJohn Fastabend 	while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1308c46234ebSDave Watson 		if (sk->sk_err) {
1309c46234ebSDave Watson 			*err = sock_error(sk);
1310c46234ebSDave Watson 			return NULL;
1311c46234ebSDave Watson 		}
1312c46234ebSDave Watson 
131320ffc7adSVadim Fedorenko 		if (!skb_queue_empty(&sk->sk_receive_queue)) {
131420ffc7adSVadim Fedorenko 			__strp_unpause(&ctx->strp);
131520ffc7adSVadim Fedorenko 			if (ctx->recv_pkt)
131620ffc7adSVadim Fedorenko 				return ctx->recv_pkt;
131720ffc7adSVadim Fedorenko 		}
131820ffc7adSVadim Fedorenko 
1319fcf4793eSDoron Roberts-Kedes 		if (sk->sk_shutdown & RCV_SHUTDOWN)
1320fcf4793eSDoron Roberts-Kedes 			return NULL;
1321fcf4793eSDoron Roberts-Kedes 
1322c46234ebSDave Watson 		if (sock_flag(sk, SOCK_DONE))
1323c46234ebSDave Watson 			return NULL;
1324c46234ebSDave Watson 
1325974271e5SJim Ma 		if (nonblock || !timeo) {
1326c46234ebSDave Watson 			*err = -EAGAIN;
1327c46234ebSDave Watson 			return NULL;
1328c46234ebSDave Watson 		}
1329c46234ebSDave Watson 
1330c46234ebSDave Watson 		add_wait_queue(sk_sleep(sk), &wait);
1331c46234ebSDave Watson 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1332d3b18ad3SJohn Fastabend 		sk_wait_event(sk, &timeo,
1333d3b18ad3SJohn Fastabend 			      ctx->recv_pkt != skb ||
1334d3b18ad3SJohn Fastabend 			      !sk_psock_queue_empty(psock),
1335d3b18ad3SJohn Fastabend 			      &wait);
1336c46234ebSDave Watson 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1337c46234ebSDave Watson 		remove_wait_queue(sk_sleep(sk), &wait);
1338c46234ebSDave Watson 
1339c46234ebSDave Watson 		/* Handle signals */
1340c46234ebSDave Watson 		if (signal_pending(current)) {
1341c46234ebSDave Watson 			*err = sock_intr_errno(timeo);
1342c46234ebSDave Watson 			return NULL;
1343c46234ebSDave Watson 		}
1344c46234ebSDave Watson 	}
1345c46234ebSDave Watson 
1346c46234ebSDave Watson 	return skb;
1347c46234ebSDave Watson }
1348c46234ebSDave Watson 
1349d829e9c4SDaniel Borkmann static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1350d829e9c4SDaniel Borkmann 			       int length, int *pages_used,
1351d829e9c4SDaniel Borkmann 			       unsigned int *size_used,
1352d829e9c4SDaniel Borkmann 			       struct scatterlist *to,
1353d829e9c4SDaniel Borkmann 			       int to_max_pages)
1354d829e9c4SDaniel Borkmann {
1355d829e9c4SDaniel Borkmann 	int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1356d829e9c4SDaniel Borkmann 	struct page *pages[MAX_SKB_FRAGS];
1357d829e9c4SDaniel Borkmann 	unsigned int size = *size_used;
1358d829e9c4SDaniel Borkmann 	ssize_t copied, use;
1359d829e9c4SDaniel Borkmann 	size_t offset;
1360d829e9c4SDaniel Borkmann 
1361d829e9c4SDaniel Borkmann 	while (length > 0) {
1362d829e9c4SDaniel Borkmann 		i = 0;
1363d829e9c4SDaniel Borkmann 		maxpages = to_max_pages - num_elem;
1364d829e9c4SDaniel Borkmann 		if (maxpages == 0) {
1365d829e9c4SDaniel Borkmann 			rc = -EFAULT;
1366d829e9c4SDaniel Borkmann 			goto out;
1367d829e9c4SDaniel Borkmann 		}
1368d829e9c4SDaniel Borkmann 		copied = iov_iter_get_pages(from, pages,
1369d829e9c4SDaniel Borkmann 					    length,
1370d829e9c4SDaniel Borkmann 					    maxpages, &offset);
1371d829e9c4SDaniel Borkmann 		if (copied <= 0) {
1372d829e9c4SDaniel Borkmann 			rc = -EFAULT;
1373d829e9c4SDaniel Borkmann 			goto out;
1374d829e9c4SDaniel Borkmann 		}
1375d829e9c4SDaniel Borkmann 
1376d829e9c4SDaniel Borkmann 		iov_iter_advance(from, copied);
1377d829e9c4SDaniel Borkmann 
1378d829e9c4SDaniel Borkmann 		length -= copied;
1379d829e9c4SDaniel Borkmann 		size += copied;
1380d829e9c4SDaniel Borkmann 		while (copied) {
1381d829e9c4SDaniel Borkmann 			use = min_t(int, copied, PAGE_SIZE - offset);
1382d829e9c4SDaniel Borkmann 
1383d829e9c4SDaniel Borkmann 			sg_set_page(&to[num_elem],
1384d829e9c4SDaniel Borkmann 				    pages[i], use, offset);
1385d829e9c4SDaniel Borkmann 			sg_unmark_end(&to[num_elem]);
1386d829e9c4SDaniel Borkmann 			/* We do not uncharge memory from this API */
1387d829e9c4SDaniel Borkmann 
1388d829e9c4SDaniel Borkmann 			offset = 0;
1389d829e9c4SDaniel Borkmann 			copied -= use;
1390d829e9c4SDaniel Borkmann 
1391d829e9c4SDaniel Borkmann 			i++;
1392d829e9c4SDaniel Borkmann 			num_elem++;
1393d829e9c4SDaniel Borkmann 		}
1394d829e9c4SDaniel Borkmann 	}
1395d829e9c4SDaniel Borkmann 	/* Mark the end in the last sg entry if newly added */
1396d829e9c4SDaniel Borkmann 	if (num_elem > *pages_used)
1397d829e9c4SDaniel Borkmann 		sg_mark_end(&to[num_elem - 1]);
1398d829e9c4SDaniel Borkmann out:
1399d829e9c4SDaniel Borkmann 	if (rc)
1400d829e9c4SDaniel Borkmann 		iov_iter_revert(from, size - *size_used);
1401d829e9c4SDaniel Borkmann 	*size_used = size;
1402d829e9c4SDaniel Borkmann 	*pages_used = num_elem;
1403d829e9c4SDaniel Borkmann 
1404d829e9c4SDaniel Borkmann 	return rc;
1405d829e9c4SDaniel Borkmann }
1406d829e9c4SDaniel Borkmann 
14070b243d00SVakul Garg /* This function decrypts the input skb into either out_iov or in out_sg
14080b243d00SVakul Garg  * or in skb buffers itself. The input parameter 'zc' indicates if
14090b243d00SVakul Garg  * zero-copy mode needs to be tried or not. With zero-copy mode, either
14100b243d00SVakul Garg  * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
14110b243d00SVakul Garg  * NULL, then the decryption happens inside skb buffers itself, i.e.
14120b243d00SVakul Garg  * zero-copy gets disabled and 'zc' is updated.
14130b243d00SVakul Garg  */
14140b243d00SVakul Garg 
14150b243d00SVakul Garg static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
14160b243d00SVakul Garg 			    struct iov_iter *out_iov,
14170b243d00SVakul Garg 			    struct scatterlist *out_sg,
1418692d7b5dSVakul Garg 			    int *chunk, bool *zc, bool async)
14190b243d00SVakul Garg {
14200b243d00SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
14210b243d00SVakul Garg 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
14224509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
14230b243d00SVakul Garg 	struct strp_msg *rxm = strp_msg(skb);
1424c3f6bb74SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
14250b243d00SVakul Garg 	int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
14260b243d00SVakul Garg 	struct aead_request *aead_req;
14270b243d00SVakul Garg 	struct sk_buff *unused;
14280b243d00SVakul Garg 	u8 *aad, *iv, *mem = NULL;
14290b243d00SVakul Garg 	struct scatterlist *sgin = NULL;
14300b243d00SVakul Garg 	struct scatterlist *sgout = NULL;
14314509de14SVakul Garg 	const int data_len = rxm->full_len - prot->overhead_size +
14324509de14SVakul Garg 			     prot->tail_size;
1433f295b3aeSVakul Garg 	int iv_offset = 0;
14340b243d00SVakul Garg 
14350b243d00SVakul Garg 	if (*zc && (out_iov || out_sg)) {
14360b243d00SVakul Garg 		if (out_iov)
1437b93235e6SJakub Kicinski 			n_sgout = 1 +
1438b93235e6SJakub Kicinski 				iov_iter_npages_cap(out_iov, INT_MAX, data_len);
14390b243d00SVakul Garg 		else
14400b243d00SVakul Garg 			n_sgout = sg_nents(out_sg);
14414509de14SVakul Garg 		n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
14424509de14SVakul Garg 				 rxm->full_len - prot->prepend_size);
14430b243d00SVakul Garg 	} else {
14440b243d00SVakul Garg 		n_sgout = 0;
14450b243d00SVakul Garg 		*zc = false;
14460927f71dSDoron Roberts-Kedes 		n_sgin = skb_cow_data(skb, 0, &unused);
14470b243d00SVakul Garg 	}
14480b243d00SVakul Garg 
14490b243d00SVakul Garg 	if (n_sgin < 1)
14500b243d00SVakul Garg 		return -EBADMSG;
14510b243d00SVakul Garg 
14520b243d00SVakul Garg 	/* Increment to accommodate AAD */
14530b243d00SVakul Garg 	n_sgin = n_sgin + 1;
14540b243d00SVakul Garg 
14550b243d00SVakul Garg 	nsg = n_sgin + n_sgout;
14560b243d00SVakul Garg 
14570b243d00SVakul Garg 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
14580b243d00SVakul Garg 	mem_size = aead_size + (nsg * sizeof(struct scatterlist));
14594509de14SVakul Garg 	mem_size = mem_size + prot->aad_size;
14600b243d00SVakul Garg 	mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
14610b243d00SVakul Garg 
14620b243d00SVakul Garg 	/* Allocate a single block of memory which contains
14630b243d00SVakul Garg 	 * aead_req || sgin[] || sgout[] || aad || iv.
14640b243d00SVakul Garg 	 * This order achieves correct alignment for aead_req, sgin, sgout.
14650b243d00SVakul Garg 	 */
14660b243d00SVakul Garg 	mem = kmalloc(mem_size, sk->sk_allocation);
14670b243d00SVakul Garg 	if (!mem)
14680b243d00SVakul Garg 		return -ENOMEM;
14690b243d00SVakul Garg 
14700b243d00SVakul Garg 	/* Segment the allocated memory */
14710b243d00SVakul Garg 	aead_req = (struct aead_request *)mem;
14720b243d00SVakul Garg 	sgin = (struct scatterlist *)(mem + aead_size);
14730b243d00SVakul Garg 	sgout = sgin + n_sgin;
14740b243d00SVakul Garg 	aad = (u8 *)(sgout + n_sgout);
14754509de14SVakul Garg 	iv = aad + prot->aad_size;
14760b243d00SVakul Garg 
1477128cfb88STianjia Zhang 	/* For CCM based ciphers, first byte of nonce+iv is a constant */
1478128cfb88STianjia Zhang 	switch (prot->cipher_type) {
1479128cfb88STianjia Zhang 	case TLS_CIPHER_AES_CCM_128:
1480128cfb88STianjia Zhang 		iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1481f295b3aeSVakul Garg 		iv_offset = 1;
1482128cfb88STianjia Zhang 		break;
1483128cfb88STianjia Zhang 	case TLS_CIPHER_SM4_CCM:
1484128cfb88STianjia Zhang 		iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1485128cfb88STianjia Zhang 		iv_offset = 1;
1486128cfb88STianjia Zhang 		break;
1487f295b3aeSVakul Garg 	}
1488f295b3aeSVakul Garg 
14890b243d00SVakul Garg 	/* Prepare IV */
14900b243d00SVakul Garg 	err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1491f295b3aeSVakul Garg 			    iv + iv_offset + prot->salt_size,
14924509de14SVakul Garg 			    prot->iv_size);
14930b243d00SVakul Garg 	if (err < 0) {
14940b243d00SVakul Garg 		kfree(mem);
14950b243d00SVakul Garg 		return err;
14960b243d00SVakul Garg 	}
1497a6acbe62SVadim Fedorenko 	if (prot->version == TLS_1_3_VERSION ||
1498a6acbe62SVadim Fedorenko 	    prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
1499f295b3aeSVakul Garg 		memcpy(iv + iv_offset, tls_ctx->rx.iv,
15009381fe8cSZiyang Xuan 		       prot->iv_size + prot->salt_size);
1501130b392cSDave Watson 	else
1502f295b3aeSVakul Garg 		memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
15030b243d00SVakul Garg 
150459610606STianjia Zhang 	xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
1505130b392cSDave Watson 
15060b243d00SVakul Garg 	/* Prepare AAD */
15074509de14SVakul Garg 	tls_make_aad(aad, rxm->full_len - prot->overhead_size +
15084509de14SVakul Garg 		     prot->tail_size,
1509c3f6bb74SJakub Kicinski 		     tls_ctx->rx.rec_seq, tlm->control, prot);
15100b243d00SVakul Garg 
15110b243d00SVakul Garg 	/* Prepare sgin */
15120b243d00SVakul Garg 	sg_init_table(sgin, n_sgin);
15134509de14SVakul Garg 	sg_set_buf(&sgin[0], aad, prot->aad_size);
15140b243d00SVakul Garg 	err = skb_to_sgvec(skb, &sgin[1],
15154509de14SVakul Garg 			   rxm->offset + prot->prepend_size,
15164509de14SVakul Garg 			   rxm->full_len - prot->prepend_size);
15170b243d00SVakul Garg 	if (err < 0) {
15180b243d00SVakul Garg 		kfree(mem);
15190b243d00SVakul Garg 		return err;
15200b243d00SVakul Garg 	}
15210b243d00SVakul Garg 
15220b243d00SVakul Garg 	if (n_sgout) {
15230b243d00SVakul Garg 		if (out_iov) {
15240b243d00SVakul Garg 			sg_init_table(sgout, n_sgout);
15254509de14SVakul Garg 			sg_set_buf(&sgout[0], aad, prot->aad_size);
15260b243d00SVakul Garg 
15270b243d00SVakul Garg 			*chunk = 0;
1528d829e9c4SDaniel Borkmann 			err = tls_setup_from_iter(sk, out_iov, data_len,
1529d829e9c4SDaniel Borkmann 						  &pages, chunk, &sgout[1],
1530d829e9c4SDaniel Borkmann 						  (n_sgout - 1));
15310b243d00SVakul Garg 			if (err < 0)
15320b243d00SVakul Garg 				goto fallback_to_reg_recv;
15330b243d00SVakul Garg 		} else if (out_sg) {
15340b243d00SVakul Garg 			memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
15350b243d00SVakul Garg 		} else {
15360b243d00SVakul Garg 			goto fallback_to_reg_recv;
15370b243d00SVakul Garg 		}
15380b243d00SVakul Garg 	} else {
15390b243d00SVakul Garg fallback_to_reg_recv:
15400b243d00SVakul Garg 		sgout = sgin;
15410b243d00SVakul Garg 		pages = 0;
1542692d7b5dSVakul Garg 		*chunk = data_len;
15430b243d00SVakul Garg 		*zc = false;
15440b243d00SVakul Garg 	}
15450b243d00SVakul Garg 
15460b243d00SVakul Garg 	/* Prepare and submit AEAD request */
154794524d8fSVakul Garg 	err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1548692d7b5dSVakul Garg 				data_len, aead_req, async);
154994524d8fSVakul Garg 	if (err == -EINPROGRESS)
155094524d8fSVakul Garg 		return err;
15510b243d00SVakul Garg 
15520b243d00SVakul Garg 	/* Release the pages in case iov was mapped to pages */
15530b243d00SVakul Garg 	for (; pages > 0; pages--)
15540b243d00SVakul Garg 		put_page(sg_page(&sgout[pages]));
15550b243d00SVakul Garg 
15560b243d00SVakul Garg 	kfree(mem);
15570b243d00SVakul Garg 	return err;
15580b243d00SVakul Garg }
15590b243d00SVakul Garg 
1560dafb67f3SBoris Pismenny static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1561692d7b5dSVakul Garg 			      struct iov_iter *dest, int *chunk, bool *zc,
1562692d7b5dSVakul Garg 			      bool async)
1563dafb67f3SBoris Pismenny {
1564dafb67f3SBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1565dafb67f3SBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
15664509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1567dafb67f3SBoris Pismenny 	struct strp_msg *rxm = strp_msg(skb);
15687dc59c33SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
1569b53f4976SJakub Kicinski 	int pad, err = 0;
1570dafb67f3SBoris Pismenny 
15717dc59c33SJakub Kicinski 	if (!tlm->decrypted) {
1572b9d8fec9SJakub Kicinski 		if (tls_ctx->rx_conf == TLS_HW) {
15734de30a8dSJakub Kicinski 			err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
15744799ac81SBoris Pismenny 			if (err < 0)
15754799ac81SBoris Pismenny 				return err;
1576b9d8fec9SJakub Kicinski 		}
1577be2fbc15SJakub Kicinski 
1578d069b780SBoris Pismenny 		/* Still not decrypted after tls_device */
15797dc59c33SJakub Kicinski 		if (!tlm->decrypted) {
1580d069b780SBoris Pismenny 			err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1581d069b780SBoris Pismenny 					       async);
158294524d8fSVakul Garg 			if (err < 0) {
158394524d8fSVakul Garg 				if (err == -EINPROGRESS)
1584fb0f886fSJakub Kicinski 					tls_advance_record_sn(sk, prot,
1585fb0f886fSJakub Kicinski 							      &tls_ctx->rx);
15865c5d22a7SJakub Kicinski 				else if (err == -EBADMSG)
15875c5d22a7SJakub Kicinski 					TLS_INC_STATS(sock_net(sk),
15885c5d22a7SJakub Kicinski 						      LINUX_MIB_TLSDECRYPTERROR);
1589dafb67f3SBoris Pismenny 				return err;
159094524d8fSVakul Garg 			}
1591c43ac97bSJakub Kicinski 		} else {
1592c43ac97bSJakub Kicinski 			*zc = false;
1593d069b780SBoris Pismenny 		}
1594130b392cSDave Watson 
1595c3f6bb74SJakub Kicinski 		pad = padding_length(prot, skb);
1596b53f4976SJakub Kicinski 		if (pad < 0)
1597b53f4976SJakub Kicinski 			return pad;
1598b53f4976SJakub Kicinski 
1599b53f4976SJakub Kicinski 		rxm->full_len -= pad;
16004509de14SVakul Garg 		rxm->offset += prot->prepend_size;
16014509de14SVakul Garg 		rxm->full_len -= prot->overhead_size;
1602fb0f886fSJakub Kicinski 		tls_advance_record_sn(sk, prot, &tls_ctx->rx);
16037dc59c33SJakub Kicinski 		tlm->decrypted = 1;
1604dafb67f3SBoris Pismenny 		ctx->saved_data_ready(sk);
1605fedf201eSDave Watson 	} else {
1606fedf201eSDave Watson 		*zc = false;
1607fedf201eSDave Watson 	}
1608dafb67f3SBoris Pismenny 
1609dafb67f3SBoris Pismenny 	return err;
1610dafb67f3SBoris Pismenny }
1611dafb67f3SBoris Pismenny 
1612dafb67f3SBoris Pismenny int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1613c46234ebSDave Watson 		struct scatterlist *sgout)
1614c46234ebSDave Watson {
16150b243d00SVakul Garg 	bool zc = true;
16160b243d00SVakul Garg 	int chunk;
1617c46234ebSDave Watson 
1618692d7b5dSVakul Garg 	return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
1619c46234ebSDave Watson }
1620c46234ebSDave Watson 
1621c46234ebSDave Watson static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1622c46234ebSDave Watson 			       unsigned int len)
1623c46234ebSDave Watson {
1624c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1625f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
162694524d8fSVakul Garg 
162794524d8fSVakul Garg 	if (skb) {
1628c46234ebSDave Watson 		struct strp_msg *rxm = strp_msg(skb);
1629c46234ebSDave Watson 
1630c46234ebSDave Watson 		if (len < rxm->full_len) {
1631c46234ebSDave Watson 			rxm->offset += len;
1632c46234ebSDave Watson 			rxm->full_len -= len;
1633c46234ebSDave Watson 			return false;
1634c46234ebSDave Watson 		}
1635a88c26f6SVakul Garg 		consume_skb(skb);
163694524d8fSVakul Garg 	}
1637c46234ebSDave Watson 
1638c46234ebSDave Watson 	/* Finished with message */
1639c46234ebSDave Watson 	ctx->recv_pkt = NULL;
16407170e604SDoron Roberts-Kedes 	__strp_unpause(&ctx->strp);
1641c46234ebSDave Watson 
1642c46234ebSDave Watson 	return true;
1643c46234ebSDave Watson }
1644c46234ebSDave Watson 
1645692d7b5dSVakul Garg /* This function traverses the rx_list in tls receive context to copies the
16462b794c40SVakul Garg  * decrypted records into the buffer provided by caller zero copy is not
1647692d7b5dSVakul Garg  * true. Further, the records are removed from the rx_list if it is not a peek
1648692d7b5dSVakul Garg  * case and the record has been consumed completely.
1649692d7b5dSVakul Garg  */
1650692d7b5dSVakul Garg static int process_rx_list(struct tls_sw_context_rx *ctx,
1651692d7b5dSVakul Garg 			   struct msghdr *msg,
16522b794c40SVakul Garg 			   u8 *control,
16532b794c40SVakul Garg 			   bool *cmsg,
1654692d7b5dSVakul Garg 			   size_t skip,
1655692d7b5dSVakul Garg 			   size_t len,
1656692d7b5dSVakul Garg 			   bool zc,
1657692d7b5dSVakul Garg 			   bool is_peek)
1658692d7b5dSVakul Garg {
1659692d7b5dSVakul Garg 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
16602b794c40SVakul Garg 	u8 ctrl = *control;
16612b794c40SVakul Garg 	u8 msgc = *cmsg;
16622b794c40SVakul Garg 	struct tls_msg *tlm;
1663692d7b5dSVakul Garg 	ssize_t copied = 0;
1664692d7b5dSVakul Garg 
16652b794c40SVakul Garg 	/* Set the record type in 'control' if caller didn't pass it */
16662b794c40SVakul Garg 	if (!ctrl && skb) {
16672b794c40SVakul Garg 		tlm = tls_msg(skb);
16682b794c40SVakul Garg 		ctrl = tlm->control;
16692b794c40SVakul Garg 	}
16702b794c40SVakul Garg 
1671692d7b5dSVakul Garg 	while (skip && skb) {
1672692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
16732b794c40SVakul Garg 		tlm = tls_msg(skb);
16742b794c40SVakul Garg 
16752b794c40SVakul Garg 		/* Cannot process a record of different type */
16762b794c40SVakul Garg 		if (ctrl != tlm->control)
16772b794c40SVakul Garg 			return 0;
1678692d7b5dSVakul Garg 
1679692d7b5dSVakul Garg 		if (skip < rxm->full_len)
1680692d7b5dSVakul Garg 			break;
1681692d7b5dSVakul Garg 
1682692d7b5dSVakul Garg 		skip = skip - rxm->full_len;
1683692d7b5dSVakul Garg 		skb = skb_peek_next(skb, &ctx->rx_list);
1684692d7b5dSVakul Garg 	}
1685692d7b5dSVakul Garg 
1686692d7b5dSVakul Garg 	while (len && skb) {
1687692d7b5dSVakul Garg 		struct sk_buff *next_skb;
1688692d7b5dSVakul Garg 		struct strp_msg *rxm = strp_msg(skb);
1689692d7b5dSVakul Garg 		int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1690692d7b5dSVakul Garg 
16912b794c40SVakul Garg 		tlm = tls_msg(skb);
16922b794c40SVakul Garg 
16932b794c40SVakul Garg 		/* Cannot process a record of different type */
16942b794c40SVakul Garg 		if (ctrl != tlm->control)
16952b794c40SVakul Garg 			return 0;
16962b794c40SVakul Garg 
16972b794c40SVakul Garg 		/* Set record type if not already done. For a non-data record,
16982b794c40SVakul Garg 		 * do not proceed if record type could not be copied.
16992b794c40SVakul Garg 		 */
17002b794c40SVakul Garg 		if (!msgc) {
17012b794c40SVakul Garg 			int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
17022b794c40SVakul Garg 					    sizeof(ctrl), &ctrl);
17032b794c40SVakul Garg 			msgc = true;
17042b794c40SVakul Garg 			if (ctrl != TLS_RECORD_TYPE_DATA) {
17052b794c40SVakul Garg 				if (cerr || msg->msg_flags & MSG_CTRUNC)
17062b794c40SVakul Garg 					return -EIO;
17072b794c40SVakul Garg 
17082b794c40SVakul Garg 				*cmsg = msgc;
17092b794c40SVakul Garg 			}
17102b794c40SVakul Garg 		}
17112b794c40SVakul Garg 
1712692d7b5dSVakul Garg 		if (!zc || (rxm->full_len - skip) > len) {
1713692d7b5dSVakul Garg 			int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1714692d7b5dSVakul Garg 						    msg, chunk);
1715692d7b5dSVakul Garg 			if (err < 0)
1716692d7b5dSVakul Garg 				return err;
1717692d7b5dSVakul Garg 		}
1718692d7b5dSVakul Garg 
1719692d7b5dSVakul Garg 		len = len - chunk;
1720692d7b5dSVakul Garg 		copied = copied + chunk;
1721692d7b5dSVakul Garg 
1722692d7b5dSVakul Garg 		/* Consume the data from record if it is non-peek case*/
1723692d7b5dSVakul Garg 		if (!is_peek) {
1724692d7b5dSVakul Garg 			rxm->offset = rxm->offset + chunk;
1725692d7b5dSVakul Garg 			rxm->full_len = rxm->full_len - chunk;
1726692d7b5dSVakul Garg 
1727692d7b5dSVakul Garg 			/* Return if there is unconsumed data in the record */
1728692d7b5dSVakul Garg 			if (rxm->full_len - skip)
1729692d7b5dSVakul Garg 				break;
1730692d7b5dSVakul Garg 		}
1731692d7b5dSVakul Garg 
1732692d7b5dSVakul Garg 		/* The remaining skip-bytes must lie in 1st record in rx_list.
1733692d7b5dSVakul Garg 		 * So from the 2nd record, 'skip' should be 0.
1734692d7b5dSVakul Garg 		 */
1735692d7b5dSVakul Garg 		skip = 0;
1736692d7b5dSVakul Garg 
1737692d7b5dSVakul Garg 		if (msg)
1738692d7b5dSVakul Garg 			msg->msg_flags |= MSG_EOR;
1739692d7b5dSVakul Garg 
1740692d7b5dSVakul Garg 		next_skb = skb_peek_next(skb, &ctx->rx_list);
1741692d7b5dSVakul Garg 
1742692d7b5dSVakul Garg 		if (!is_peek) {
1743692d7b5dSVakul Garg 			skb_unlink(skb, &ctx->rx_list);
1744a88c26f6SVakul Garg 			consume_skb(skb);
1745692d7b5dSVakul Garg 		}
1746692d7b5dSVakul Garg 
1747692d7b5dSVakul Garg 		skb = next_skb;
1748692d7b5dSVakul Garg 	}
1749692d7b5dSVakul Garg 
17502b794c40SVakul Garg 	*control = ctrl;
1751692d7b5dSVakul Garg 	return copied;
1752692d7b5dSVakul Garg }
1753692d7b5dSVakul Garg 
1754c46234ebSDave Watson int tls_sw_recvmsg(struct sock *sk,
1755c46234ebSDave Watson 		   struct msghdr *msg,
1756c46234ebSDave Watson 		   size_t len,
1757c46234ebSDave Watson 		   int nonblock,
1758c46234ebSDave Watson 		   int flags,
1759c46234ebSDave Watson 		   int *addr_len)
1760c46234ebSDave Watson {
1761c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1762f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
17634509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1764d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
1765bfc06e1aSJakub Kicinski 	int num_async, pending;
1766692d7b5dSVakul Garg 	unsigned char control = 0;
1767692d7b5dSVakul Garg 	ssize_t decrypted = 0;
1768c46234ebSDave Watson 	struct strp_msg *rxm;
17692b794c40SVakul Garg 	struct tls_msg *tlm;
1770c46234ebSDave Watson 	struct sk_buff *skb;
1771c46234ebSDave Watson 	ssize_t copied = 0;
1772c46234ebSDave Watson 	bool cmsg = false;
177306030dbaSDaniel Borkmann 	int target, err = 0;
1774c46234ebSDave Watson 	long timeo;
177500e23707SDavid Howells 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1776692d7b5dSVakul Garg 	bool is_peek = flags & MSG_PEEK;
1777e91de6afSJohn Fastabend 	bool bpf_strp_enabled;
1778c46234ebSDave Watson 
1779c46234ebSDave Watson 	flags |= nonblock;
1780c46234ebSDave Watson 
1781c46234ebSDave Watson 	if (unlikely(flags & MSG_ERRQUEUE))
1782c46234ebSDave Watson 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1783c46234ebSDave Watson 
1784d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
1785c46234ebSDave Watson 	lock_sock(sk);
1786e91de6afSJohn Fastabend 	bpf_strp_enabled = sk_psock_strp_enabled(psock);
1787c46234ebSDave Watson 
1788692d7b5dSVakul Garg 	/* Process pending decrypted records. It must be non-zero-copy */
17892b794c40SVakul Garg 	err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
17902b794c40SVakul Garg 			      is_peek);
1791692d7b5dSVakul Garg 	if (err < 0) {
1792692d7b5dSVakul Garg 		tls_err_abort(sk, err);
1793692d7b5dSVakul Garg 		goto end;
1794692d7b5dSVakul Garg 	}
1795692d7b5dSVakul Garg 
1796d5123eddSJakub Kicinski 	copied = err;
179746a16959SJakub Kicinski 	if (len <= copied)
1798bfc06e1aSJakub Kicinski 		goto end;
179946a16959SJakub Kicinski 
180046a16959SJakub Kicinski 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
180146a16959SJakub Kicinski 	len = len - copied;
180246a16959SJakub Kicinski 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1803692d7b5dSVakul Garg 
1804bfc06e1aSJakub Kicinski 	decrypted = 0;
1805bfc06e1aSJakub Kicinski 	num_async = 0;
180604b25a54SJakub Kicinski 	while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1807692d7b5dSVakul Garg 		bool retain_skb = false;
1808692d7b5dSVakul Garg 		bool zc = false;
1809692d7b5dSVakul Garg 		int to_decrypt;
1810c46234ebSDave Watson 		int chunk = 0;
18117754bd63SEran Ben Elisha 		bool async_capable;
18127754bd63SEran Ben Elisha 		bool async = false;
1813c46234ebSDave Watson 
1814974271e5SJim Ma 		skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
1815d3b18ad3SJohn Fastabend 		if (!skb) {
1816d3b18ad3SJohn Fastabend 			if (psock) {
18172bc793e3SCong Wang 				int ret = sk_msg_recvmsg(sk, psock, msg, len,
18182bc793e3SCong Wang 							 flags);
1819d3b18ad3SJohn Fastabend 
1820d3b18ad3SJohn Fastabend 				if (ret > 0) {
1821692d7b5dSVakul Garg 					decrypted += ret;
1822d3b18ad3SJohn Fastabend 					len -= ret;
1823d3b18ad3SJohn Fastabend 					continue;
1824d3b18ad3SJohn Fastabend 				}
1825d3b18ad3SJohn Fastabend 			}
1826c46234ebSDave Watson 			goto recv_end;
1827d3b18ad3SJohn Fastabend 		}
1828c46234ebSDave Watson 
1829c46234ebSDave Watson 		rxm = strp_msg(skb);
1830c3f6bb74SJakub Kicinski 		tlm = tls_msg(skb);
183194524d8fSVakul Garg 
18324509de14SVakul Garg 		to_decrypt = rxm->full_len - prot->overhead_size;
1833fedf201eSDave Watson 
1834fedf201eSDave Watson 		if (to_decrypt <= len && !is_kvec && !is_peek &&
1835c3f6bb74SJakub Kicinski 		    tlm->control == TLS_RECORD_TYPE_DATA &&
1836e91de6afSJohn Fastabend 		    prot->version != TLS_1_3_VERSION &&
1837e91de6afSJohn Fastabend 		    !bpf_strp_enabled)
1838fedf201eSDave Watson 			zc = true;
1839fedf201eSDave Watson 
1840c0ab4732SVakul Garg 		/* Do not use async mode if record is non-data */
1841c3f6bb74SJakub Kicinski 		if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
18427754bd63SEran Ben Elisha 			async_capable = ctx->async_capable;
1843c0ab4732SVakul Garg 		else
18447754bd63SEran Ben Elisha 			async_capable = false;
1845c0ab4732SVakul Garg 
1846fedf201eSDave Watson 		err = decrypt_skb_update(sk, skb, &msg->msg_iter,
18477754bd63SEran Ben Elisha 					 &chunk, &zc, async_capable);
1848fedf201eSDave Watson 		if (err < 0 && err != -EINPROGRESS) {
1849da353facSDaniel Jordan 			tls_err_abort(sk, -EBADMSG);
1850fedf201eSDave Watson 			goto recv_end;
1851fedf201eSDave Watson 		}
1852fedf201eSDave Watson 
18537754bd63SEran Ben Elisha 		if (err == -EINPROGRESS) {
18547754bd63SEran Ben Elisha 			async = true;
1855fedf201eSDave Watson 			num_async++;
18567754bd63SEran Ben Elisha 		}
18572b794c40SVakul Garg 
18582b794c40SVakul Garg 		/* If the type of records being processed is not known yet,
18592b794c40SVakul Garg 		 * set it to record type just dequeued. If it is already known,
18602b794c40SVakul Garg 		 * but does not match the record type just dequeued, go to end.
18612b794c40SVakul Garg 		 * We always get record type here since for tls1.2, record type
18622b794c40SVakul Garg 		 * is known just after record is dequeued from stream parser.
18632b794c40SVakul Garg 		 * For tls1.3, we disable async.
18642b794c40SVakul Garg 		 */
18652b794c40SVakul Garg 
18662b794c40SVakul Garg 		if (!control)
18672b794c40SVakul Garg 			control = tlm->control;
18682b794c40SVakul Garg 		else if (control != tlm->control)
18692b794c40SVakul Garg 			goto recv_end;
1870fedf201eSDave Watson 
1871c46234ebSDave Watson 		if (!cmsg) {
1872c46234ebSDave Watson 			int cerr;
1873c46234ebSDave Watson 
1874c46234ebSDave Watson 			cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
18752b794c40SVakul Garg 					sizeof(control), &control);
1876c46234ebSDave Watson 			cmsg = true;
18772b794c40SVakul Garg 			if (control != TLS_RECORD_TYPE_DATA) {
1878c46234ebSDave Watson 				if (cerr || msg->msg_flags & MSG_CTRUNC) {
1879c46234ebSDave Watson 					err = -EIO;
1880c46234ebSDave Watson 					goto recv_end;
1881c46234ebSDave Watson 				}
1882c46234ebSDave Watson 			}
1883c46234ebSDave Watson 		}
1884c46234ebSDave Watson 
1885c0ab4732SVakul Garg 		if (async)
1886c0ab4732SVakul Garg 			goto pick_next_record;
1887c0ab4732SVakul Garg 
1888c46234ebSDave Watson 		if (!zc) {
1889e91de6afSJohn Fastabend 			if (bpf_strp_enabled) {
1890e91de6afSJohn Fastabend 				err = sk_psock_tls_strp_read(psock, skb);
1891e91de6afSJohn Fastabend 				if (err != __SK_PASS) {
1892e91de6afSJohn Fastabend 					rxm->offset = rxm->offset + rxm->full_len;
1893e91de6afSJohn Fastabend 					rxm->full_len = 0;
1894e91de6afSJohn Fastabend 					if (err == __SK_DROP)
1895e91de6afSJohn Fastabend 						consume_skb(skb);
1896e91de6afSJohn Fastabend 					ctx->recv_pkt = NULL;
1897e91de6afSJohn Fastabend 					__strp_unpause(&ctx->strp);
1898e91de6afSJohn Fastabend 					continue;
1899e91de6afSJohn Fastabend 				}
1900e91de6afSJohn Fastabend 			}
1901e91de6afSJohn Fastabend 
1902692d7b5dSVakul Garg 			if (rxm->full_len > len) {
1903692d7b5dSVakul Garg 				retain_skb = true;
1904692d7b5dSVakul Garg 				chunk = len;
1905692d7b5dSVakul Garg 			} else {
1906692d7b5dSVakul Garg 				chunk = rxm->full_len;
1907692d7b5dSVakul Garg 			}
190894524d8fSVakul Garg 
1909692d7b5dSVakul Garg 			err = skb_copy_datagram_msg(skb, rxm->offset,
1910692d7b5dSVakul Garg 						    msg, chunk);
1911c46234ebSDave Watson 			if (err < 0)
1912c46234ebSDave Watson 				goto recv_end;
1913692d7b5dSVakul Garg 
1914692d7b5dSVakul Garg 			if (!is_peek) {
1915692d7b5dSVakul Garg 				rxm->offset = rxm->offset + chunk;
1916692d7b5dSVakul Garg 				rxm->full_len = rxm->full_len - chunk;
1917692d7b5dSVakul Garg 			}
1918692d7b5dSVakul Garg 		}
1919c46234ebSDave Watson 
192094524d8fSVakul Garg pick_next_record:
1921692d7b5dSVakul Garg 		if (chunk > len)
1922692d7b5dSVakul Garg 			chunk = len;
1923c46234ebSDave Watson 
1924692d7b5dSVakul Garg 		decrypted += chunk;
1925692d7b5dSVakul Garg 		len -= chunk;
1926692d7b5dSVakul Garg 
1927692d7b5dSVakul Garg 		/* For async or peek case, queue the current skb */
1928692d7b5dSVakul Garg 		if (async || is_peek || retain_skb) {
1929692d7b5dSVakul Garg 			skb_queue_tail(&ctx->rx_list, skb);
193094524d8fSVakul Garg 			skb = NULL;
1931692d7b5dSVakul Garg 		}
193294524d8fSVakul Garg 
1933c46234ebSDave Watson 		if (tls_sw_advance_skb(sk, skb, chunk)) {
1934c46234ebSDave Watson 			/* Return full control message to
1935c46234ebSDave Watson 			 * userspace before trying to parse
1936c46234ebSDave Watson 			 * another message type
1937c46234ebSDave Watson 			 */
1938c46234ebSDave Watson 			msg->msg_flags |= MSG_EOR;
19393fe16edfSVadim Fedorenko 			if (control != TLS_RECORD_TYPE_DATA)
1940c46234ebSDave Watson 				goto recv_end;
194194524d8fSVakul Garg 		} else {
194294524d8fSVakul Garg 			break;
1943c46234ebSDave Watson 		}
194404b25a54SJakub Kicinski 	}
1945c46234ebSDave Watson 
1946c46234ebSDave Watson recv_end:
194794524d8fSVakul Garg 	if (num_async) {
194894524d8fSVakul Garg 		/* Wait for all previously submitted records to be decrypted */
19490cada332SVinay Kumar Yadav 		spin_lock_bh(&ctx->decrypt_compl_lock);
19500cada332SVinay Kumar Yadav 		ctx->async_notify = true;
19510cada332SVinay Kumar Yadav 		pending = atomic_read(&ctx->decrypt_pending);
19520cada332SVinay Kumar Yadav 		spin_unlock_bh(&ctx->decrypt_compl_lock);
19530cada332SVinay Kumar Yadav 		if (pending) {
195494524d8fSVakul Garg 			err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
195594524d8fSVakul Garg 			if (err) {
195694524d8fSVakul Garg 				/* one of async decrypt failed */
195794524d8fSVakul Garg 				tls_err_abort(sk, err);
195894524d8fSVakul Garg 				copied = 0;
1959692d7b5dSVakul Garg 				decrypted = 0;
1960692d7b5dSVakul Garg 				goto end;
196194524d8fSVakul Garg 			}
196294524d8fSVakul Garg 		} else {
196394524d8fSVakul Garg 			reinit_completion(&ctx->async_wait.completion);
196494524d8fSVakul Garg 		}
19650cada332SVinay Kumar Yadav 
19660cada332SVinay Kumar Yadav 		/* There can be no concurrent accesses, since we have no
19670cada332SVinay Kumar Yadav 		 * pending decrypt operations
19680cada332SVinay Kumar Yadav 		 */
196994524d8fSVakul Garg 		WRITE_ONCE(ctx->async_notify, false);
1970692d7b5dSVakul Garg 
1971692d7b5dSVakul Garg 		/* Drain records from the rx_list & copy if required */
1972692d7b5dSVakul Garg 		if (is_peek || is_kvec)
19732b794c40SVakul Garg 			err = process_rx_list(ctx, msg, &control, &cmsg, copied,
1974692d7b5dSVakul Garg 					      decrypted, false, is_peek);
1975692d7b5dSVakul Garg 		else
19762b794c40SVakul Garg 			err = process_rx_list(ctx, msg, &control, &cmsg, 0,
1977692d7b5dSVakul Garg 					      decrypted, true, is_peek);
1978692d7b5dSVakul Garg 		if (err < 0) {
1979692d7b5dSVakul Garg 			tls_err_abort(sk, err);
1980692d7b5dSVakul Garg 			copied = 0;
1981692d7b5dSVakul Garg 			goto end;
198294524d8fSVakul Garg 		}
1983692d7b5dSVakul Garg 	}
1984692d7b5dSVakul Garg 
1985692d7b5dSVakul Garg 	copied += decrypted;
1986692d7b5dSVakul Garg 
1987692d7b5dSVakul Garg end:
1988c46234ebSDave Watson 	release_sock(sk);
1989ffef737fSGal Pressman 	sk_defer_free_flush(sk);
1990d3b18ad3SJohn Fastabend 	if (psock)
1991d3b18ad3SJohn Fastabend 		sk_psock_put(sk, psock);
1992c46234ebSDave Watson 	return copied ? : err;
1993c46234ebSDave Watson }
1994c46234ebSDave Watson 
1995c46234ebSDave Watson ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
1996c46234ebSDave Watson 			   struct pipe_inode_info *pipe,
1997c46234ebSDave Watson 			   size_t len, unsigned int flags)
1998c46234ebSDave Watson {
1999c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2000f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2001c46234ebSDave Watson 	struct strp_msg *rxm = NULL;
2002c46234ebSDave Watson 	struct sock *sk = sock->sk;
2003c3f6bb74SJakub Kicinski 	struct tls_msg *tlm;
2004c46234ebSDave Watson 	struct sk_buff *skb;
2005c46234ebSDave Watson 	ssize_t copied = 0;
2006e062fe99SJakub Kicinski 	bool from_queue;
2007c46234ebSDave Watson 	int err = 0;
2008c46234ebSDave Watson 	long timeo;
2009c46234ebSDave Watson 	int chunk;
20100b243d00SVakul Garg 	bool zc = false;
2011c46234ebSDave Watson 
2012c46234ebSDave Watson 	lock_sock(sk);
2013c46234ebSDave Watson 
2014974271e5SJim Ma 	timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
2015c46234ebSDave Watson 
2016e062fe99SJakub Kicinski 	from_queue = !skb_queue_empty(&ctx->rx_list);
2017e062fe99SJakub Kicinski 	if (from_queue) {
2018e062fe99SJakub Kicinski 		skb = __skb_dequeue(&ctx->rx_list);
2019e062fe99SJakub Kicinski 	} else {
2020e062fe99SJakub Kicinski 		skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
2021e062fe99SJakub Kicinski 				    &err);
2022c46234ebSDave Watson 		if (!skb)
2023c46234ebSDave Watson 			goto splice_read_end;
2024c46234ebSDave Watson 
2025fedf201eSDave Watson 		err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
2026520493f6SJakub Kicinski 		if (err < 0) {
2027520493f6SJakub Kicinski 			tls_err_abort(sk, -EBADMSG);
2028520493f6SJakub Kicinski 			goto splice_read_end;
2029520493f6SJakub Kicinski 		}
2030e062fe99SJakub Kicinski 	}
2031fedf201eSDave Watson 
2032c3f6bb74SJakub Kicinski 	rxm = strp_msg(skb);
2033c3f6bb74SJakub Kicinski 	tlm = tls_msg(skb);
2034c3f6bb74SJakub Kicinski 
2035c46234ebSDave Watson 	/* splice does not support reading control messages */
2036c3f6bb74SJakub Kicinski 	if (tlm->control != TLS_RECORD_TYPE_DATA) {
20374a5cdc60SValentin Vidic 		err = -EINVAL;
2038c46234ebSDave Watson 		goto splice_read_end;
2039c46234ebSDave Watson 	}
2040c46234ebSDave Watson 
2041c46234ebSDave Watson 	chunk = min_t(unsigned int, rxm->full_len, len);
2042c46234ebSDave Watson 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2043c46234ebSDave Watson 	if (copied < 0)
2044c46234ebSDave Watson 		goto splice_read_end;
2045c46234ebSDave Watson 
2046e062fe99SJakub Kicinski 	if (!from_queue) {
2047e062fe99SJakub Kicinski 		ctx->recv_pkt = NULL;
2048e062fe99SJakub Kicinski 		__strp_unpause(&ctx->strp);
2049e062fe99SJakub Kicinski 	}
2050e062fe99SJakub Kicinski 	if (chunk < rxm->full_len) {
2051e062fe99SJakub Kicinski 		__skb_queue_head(&ctx->rx_list, skb);
2052e062fe99SJakub Kicinski 		rxm->offset += len;
2053e062fe99SJakub Kicinski 		rxm->full_len -= len;
2054e062fe99SJakub Kicinski 	} else {
2055e062fe99SJakub Kicinski 		consume_skb(skb);
2056e062fe99SJakub Kicinski 	}
2057c46234ebSDave Watson 
2058c46234ebSDave Watson splice_read_end:
2059c46234ebSDave Watson 	release_sock(sk);
2060db094aa8SGal Pressman 	sk_defer_free_flush(sk);
2061c46234ebSDave Watson 	return copied ? : err;
2062c46234ebSDave Watson }
2063c46234ebSDave Watson 
20647b50ecfcSCong Wang bool tls_sw_sock_is_readable(struct sock *sk)
2065c46234ebSDave Watson {
2066c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2067f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2068d3b18ad3SJohn Fastabend 	bool ingress_empty = true;
2069d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
2070c46234ebSDave Watson 
2071d3b18ad3SJohn Fastabend 	rcu_read_lock();
2072d3b18ad3SJohn Fastabend 	psock = sk_psock(sk);
2073d3b18ad3SJohn Fastabend 	if (psock)
2074d3b18ad3SJohn Fastabend 		ingress_empty = list_empty(&psock->ingress_msg);
2075d3b18ad3SJohn Fastabend 	rcu_read_unlock();
2076c46234ebSDave Watson 
207713aecb17SJakub Kicinski 	return !ingress_empty || ctx->recv_pkt ||
207813aecb17SJakub Kicinski 		!skb_queue_empty(&ctx->rx_list);
2079c46234ebSDave Watson }
2080c46234ebSDave Watson 
2081c46234ebSDave Watson static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2082c46234ebSDave Watson {
2083c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
20844509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
20853463e51dSKees Cook 	char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2086c46234ebSDave Watson 	struct strp_msg *rxm = strp_msg(skb);
2087c3f6bb74SJakub Kicinski 	struct tls_msg *tlm = tls_msg(skb);
2088c46234ebSDave Watson 	size_t cipher_overhead;
2089c46234ebSDave Watson 	size_t data_len = 0;
2090c46234ebSDave Watson 	int ret;
2091c46234ebSDave Watson 
2092c46234ebSDave Watson 	/* Verify that we have a full TLS header, or wait for more data */
20934509de14SVakul Garg 	if (rxm->offset + prot->prepend_size > skb->len)
2094c46234ebSDave Watson 		return 0;
2095c46234ebSDave Watson 
20963463e51dSKees Cook 	/* Sanity-check size of on-stack buffer. */
20974509de14SVakul Garg 	if (WARN_ON(prot->prepend_size > sizeof(header))) {
20983463e51dSKees Cook 		ret = -EINVAL;
20993463e51dSKees Cook 		goto read_failure;
21003463e51dSKees Cook 	}
21013463e51dSKees Cook 
2102c46234ebSDave Watson 	/* Linearize header to local buffer */
21034509de14SVakul Garg 	ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2104c46234ebSDave Watson 	if (ret < 0)
2105c46234ebSDave Watson 		goto read_failure;
2106c46234ebSDave Watson 
2107863533e3SJakub Kicinski 	tlm->decrypted = 0;
2108c3f6bb74SJakub Kicinski 	tlm->control = header[0];
2109c46234ebSDave Watson 
2110c46234ebSDave Watson 	data_len = ((header[4] & 0xFF) | (header[3] << 8));
2111c46234ebSDave Watson 
21124509de14SVakul Garg 	cipher_overhead = prot->tag_size;
2113a6acbe62SVadim Fedorenko 	if (prot->version != TLS_1_3_VERSION &&
2114a6acbe62SVadim Fedorenko 	    prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
21154509de14SVakul Garg 		cipher_overhead += prot->iv_size;
2116c46234ebSDave Watson 
2117130b392cSDave Watson 	if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
21184509de14SVakul Garg 	    prot->tail_size) {
2119c46234ebSDave Watson 		ret = -EMSGSIZE;
2120c46234ebSDave Watson 		goto read_failure;
2121c46234ebSDave Watson 	}
2122c46234ebSDave Watson 	if (data_len < cipher_overhead) {
2123c46234ebSDave Watson 		ret = -EBADMSG;
2124c46234ebSDave Watson 		goto read_failure;
2125c46234ebSDave Watson 	}
2126c46234ebSDave Watson 
2127130b392cSDave Watson 	/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2128130b392cSDave Watson 	if (header[1] != TLS_1_2_VERSION_MINOR ||
2129130b392cSDave Watson 	    header[2] != TLS_1_2_VERSION_MAJOR) {
2130c46234ebSDave Watson 		ret = -EINVAL;
2131c46234ebSDave Watson 		goto read_failure;
2132c46234ebSDave Watson 	}
2133be2fbc15SJakub Kicinski 
2134f953d33bSJakub Kicinski 	tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2135fe58a5a0SJakub Kicinski 				     TCP_SKB_CB(skb)->seq + rxm->offset);
2136c46234ebSDave Watson 	return data_len + TLS_HEADER_SIZE;
2137c46234ebSDave Watson 
2138c46234ebSDave Watson read_failure:
2139c46234ebSDave Watson 	tls_err_abort(strp->sk, ret);
2140c46234ebSDave Watson 
2141c46234ebSDave Watson 	return ret;
2142c46234ebSDave Watson }
2143c46234ebSDave Watson 
2144c46234ebSDave Watson static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2145c46234ebSDave Watson {
2146c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2147f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2148c46234ebSDave Watson 
2149c46234ebSDave Watson 	ctx->recv_pkt = skb;
2150c46234ebSDave Watson 	strp_pause(strp);
2151c46234ebSDave Watson 
2152ad13acceSVakul Garg 	ctx->saved_data_ready(strp->sk);
2153c46234ebSDave Watson }
2154c46234ebSDave Watson 
2155c46234ebSDave Watson static void tls_data_ready(struct sock *sk)
2156c46234ebSDave Watson {
2157c46234ebSDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2158f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2159d3b18ad3SJohn Fastabend 	struct sk_psock *psock;
2160c46234ebSDave Watson 
2161c46234ebSDave Watson 	strp_data_ready(&ctx->strp);
2162d3b18ad3SJohn Fastabend 
2163d3b18ad3SJohn Fastabend 	psock = sk_psock_get(sk);
216462b4011fSXiyu Yang 	if (psock) {
216562b4011fSXiyu Yang 		if (!list_empty(&psock->ingress_msg))
2166d3b18ad3SJohn Fastabend 			ctx->saved_data_ready(sk);
2167d3b18ad3SJohn Fastabend 		sk_psock_put(sk, psock);
2168d3b18ad3SJohn Fastabend 	}
2169c46234ebSDave Watson }
2170c46234ebSDave Watson 
2171f87e62d4SJohn Fastabend void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2172f87e62d4SJohn Fastabend {
2173f87e62d4SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2174f87e62d4SJohn Fastabend 
2175f87e62d4SJohn Fastabend 	set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2176f87e62d4SJohn Fastabend 	set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2177f87e62d4SJohn Fastabend 	cancel_delayed_work_sync(&ctx->tx_work.work);
2178f87e62d4SJohn Fastabend }
2179f87e62d4SJohn Fastabend 
2180313ab004SJohn Fastabend void tls_sw_release_resources_tx(struct sock *sk)
21813c4d7559SDave Watson {
21823c4d7559SDave Watson 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2183f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2184a42055e8SVakul Garg 	struct tls_rec *rec, *tmp;
218538f7e1c0SRohit Maheshwari 	int pending;
2186a42055e8SVakul Garg 
2187a42055e8SVakul Garg 	/* Wait for any pending async encryptions to complete */
218838f7e1c0SRohit Maheshwari 	spin_lock_bh(&ctx->encrypt_compl_lock);
218938f7e1c0SRohit Maheshwari 	ctx->async_notify = true;
219038f7e1c0SRohit Maheshwari 	pending = atomic_read(&ctx->encrypt_pending);
219138f7e1c0SRohit Maheshwari 	spin_unlock_bh(&ctx->encrypt_compl_lock);
219238f7e1c0SRohit Maheshwari 
219338f7e1c0SRohit Maheshwari 	if (pending)
2194a42055e8SVakul Garg 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2195a42055e8SVakul Garg 
2196a42055e8SVakul Garg 	tls_tx_records(sk, -1);
2197a42055e8SVakul Garg 
21989932a29aSVakul Garg 	/* Free up un-sent records in tx_list. First, free
2199a42055e8SVakul Garg 	 * the partially sent record if any at head of tx_list.
2200a42055e8SVakul Garg 	 */
2201c5daa6ccSJakub Kicinski 	if (tls_ctx->partially_sent_record) {
2202c5daa6ccSJakub Kicinski 		tls_free_partial_record(sk, tls_ctx);
22039932a29aSVakul Garg 		rec = list_first_entry(&ctx->tx_list,
2204a42055e8SVakul Garg 				       struct tls_rec, list);
2205a42055e8SVakul Garg 		list_del(&rec->list);
2206d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
2207a42055e8SVakul Garg 		kfree(rec);
2208a42055e8SVakul Garg 	}
2209a42055e8SVakul Garg 
22109932a29aSVakul Garg 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2211a42055e8SVakul Garg 		list_del(&rec->list);
2212d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_encrypted);
2213d829e9c4SDaniel Borkmann 		sk_msg_free(sk, &rec->msg_plaintext);
2214a42055e8SVakul Garg 		kfree(rec);
2215a42055e8SVakul Garg 	}
22163c4d7559SDave Watson 
22173c4d7559SDave Watson 	crypto_free_aead(ctx->aead_send);
2218c774973eSVakul Garg 	tls_free_open_rec(sk);
2219313ab004SJohn Fastabend }
2220313ab004SJohn Fastabend 
2221313ab004SJohn Fastabend void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2222313ab004SJohn Fastabend {
2223313ab004SJohn Fastabend 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2224f66de3eeSBoris Pismenny 
2225f66de3eeSBoris Pismenny 	kfree(ctx);
2226f66de3eeSBoris Pismenny }
2227f66de3eeSBoris Pismenny 
222839f56e1aSBoris Pismenny void tls_sw_release_resources_rx(struct sock *sk)
2229f66de3eeSBoris Pismenny {
2230f66de3eeSBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2231f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2232f66de3eeSBoris Pismenny 
223312c76861SJakub Kicinski 	kfree(tls_ctx->rx.rec_seq);
223412c76861SJakub Kicinski 	kfree(tls_ctx->rx.iv);
223512c76861SJakub Kicinski 
2236c46234ebSDave Watson 	if (ctx->aead_recv) {
2237c46234ebSDave Watson 		kfree_skb(ctx->recv_pkt);
2238c46234ebSDave Watson 		ctx->recv_pkt = NULL;
2239692d7b5dSVakul Garg 		skb_queue_purge(&ctx->rx_list);
2240c46234ebSDave Watson 		crypto_free_aead(ctx->aead_recv);
2241c46234ebSDave Watson 		strp_stop(&ctx->strp);
2242313ab004SJohn Fastabend 		/* If tls_sw_strparser_arm() was not called (cleanup paths)
2243313ab004SJohn Fastabend 		 * we still want to strp_stop(), but sk->sk_data_ready was
2244313ab004SJohn Fastabend 		 * never swapped.
2245313ab004SJohn Fastabend 		 */
2246313ab004SJohn Fastabend 		if (ctx->saved_data_ready) {
2247c46234ebSDave Watson 			write_lock_bh(&sk->sk_callback_lock);
2248c46234ebSDave Watson 			sk->sk_data_ready = ctx->saved_data_ready;
2249c46234ebSDave Watson 			write_unlock_bh(&sk->sk_callback_lock);
2250c46234ebSDave Watson 		}
225139f56e1aSBoris Pismenny 	}
2252313ab004SJohn Fastabend }
2253313ab004SJohn Fastabend 
2254313ab004SJohn Fastabend void tls_sw_strparser_done(struct tls_context *tls_ctx)
2255313ab004SJohn Fastabend {
2256313ab004SJohn Fastabend 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2257313ab004SJohn Fastabend 
2258313ab004SJohn Fastabend 	strp_done(&ctx->strp);
2259313ab004SJohn Fastabend }
2260313ab004SJohn Fastabend 
2261313ab004SJohn Fastabend void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2262313ab004SJohn Fastabend {
2263313ab004SJohn Fastabend 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2264313ab004SJohn Fastabend 
2265313ab004SJohn Fastabend 	kfree(ctx);
2266313ab004SJohn Fastabend }
226739f56e1aSBoris Pismenny 
226839f56e1aSBoris Pismenny void tls_sw_free_resources_rx(struct sock *sk)
226939f56e1aSBoris Pismenny {
227039f56e1aSBoris Pismenny 	struct tls_context *tls_ctx = tls_get_ctx(sk);
227139f56e1aSBoris Pismenny 
227239f56e1aSBoris Pismenny 	tls_sw_release_resources_rx(sk);
2273313ab004SJohn Fastabend 	tls_sw_free_ctx_rx(tls_ctx);
22743c4d7559SDave Watson }
22753c4d7559SDave Watson 
22769932a29aSVakul Garg /* The work handler to transmitt the encrypted records in tx_list */
2277a42055e8SVakul Garg static void tx_work_handler(struct work_struct *work)
2278a42055e8SVakul Garg {
2279a42055e8SVakul Garg 	struct delayed_work *delayed_work = to_delayed_work(work);
2280a42055e8SVakul Garg 	struct tx_work *tx_work = container_of(delayed_work,
2281a42055e8SVakul Garg 					       struct tx_work, work);
2282a42055e8SVakul Garg 	struct sock *sk = tx_work->sk;
2283a42055e8SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2284f87e62d4SJohn Fastabend 	struct tls_sw_context_tx *ctx;
2285f87e62d4SJohn Fastabend 
2286f87e62d4SJohn Fastabend 	if (unlikely(!tls_ctx))
2287f87e62d4SJohn Fastabend 		return;
2288f87e62d4SJohn Fastabend 
2289f87e62d4SJohn Fastabend 	ctx = tls_sw_ctx_tx(tls_ctx);
2290f87e62d4SJohn Fastabend 	if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2291f87e62d4SJohn Fastabend 		return;
2292a42055e8SVakul Garg 
2293a42055e8SVakul Garg 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2294a42055e8SVakul Garg 		return;
229579ffe608SJakub Kicinski 	mutex_lock(&tls_ctx->tx_lock);
2296a42055e8SVakul Garg 	lock_sock(sk);
2297a42055e8SVakul Garg 	tls_tx_records(sk, -1);
2298a42055e8SVakul Garg 	release_sock(sk);
229979ffe608SJakub Kicinski 	mutex_unlock(&tls_ctx->tx_lock);
2300a42055e8SVakul Garg }
2301a42055e8SVakul Garg 
23027463d3a2SBoris Pismenny void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
23037463d3a2SBoris Pismenny {
23047463d3a2SBoris Pismenny 	struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
23057463d3a2SBoris Pismenny 
23067463d3a2SBoris Pismenny 	/* Schedule the transmission if tx list is ready */
230702b1fa07SJakub Kicinski 	if (is_tx_ready(tx_ctx) &&
230802b1fa07SJakub Kicinski 	    !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
23097463d3a2SBoris Pismenny 		schedule_delayed_work(&tx_ctx->tx_work.work, 0);
23107463d3a2SBoris Pismenny }
23117463d3a2SBoris Pismenny 
2312318892acSJakub Kicinski void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2313318892acSJakub Kicinski {
2314318892acSJakub Kicinski 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2315318892acSJakub Kicinski 
2316318892acSJakub Kicinski 	write_lock_bh(&sk->sk_callback_lock);
2317318892acSJakub Kicinski 	rx_ctx->saved_data_ready = sk->sk_data_ready;
2318318892acSJakub Kicinski 	sk->sk_data_ready = tls_data_ready;
2319318892acSJakub Kicinski 	write_unlock_bh(&sk->sk_callback_lock);
2320318892acSJakub Kicinski 
2321318892acSJakub Kicinski 	strp_check_rcv(&rx_ctx->strp);
2322318892acSJakub Kicinski }
2323318892acSJakub Kicinski 
2324c46234ebSDave Watson int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
23253c4d7559SDave Watson {
23264509de14SVakul Garg 	struct tls_context *tls_ctx = tls_get_ctx(sk);
23274509de14SVakul Garg 	struct tls_prot_info *prot = &tls_ctx->prot_info;
23283c4d7559SDave Watson 	struct tls_crypto_info *crypto_info;
2329f66de3eeSBoris Pismenny 	struct tls_sw_context_tx *sw_ctx_tx = NULL;
2330f66de3eeSBoris Pismenny 	struct tls_sw_context_rx *sw_ctx_rx = NULL;
2331c46234ebSDave Watson 	struct cipher_context *cctx;
2332c46234ebSDave Watson 	struct crypto_aead **aead;
2333c46234ebSDave Watson 	struct strp_callbacks cb;
2334f295b3aeSVakul Garg 	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2335692d7b5dSVakul Garg 	struct crypto_tfm *tfm;
2336f295b3aeSVakul Garg 	char *iv, *rec_seq, *key, *salt, *cipher_name;
2337fb99bce7SDave Watson 	size_t keysize;
23383c4d7559SDave Watson 	int rc = 0;
23393c4d7559SDave Watson 
23403c4d7559SDave Watson 	if (!ctx) {
23413c4d7559SDave Watson 		rc = -EINVAL;
23423c4d7559SDave Watson 		goto out;
23433c4d7559SDave Watson 	}
23443c4d7559SDave Watson 
2345f66de3eeSBoris Pismenny 	if (tx) {
2346b190a587SBoris Pismenny 		if (!ctx->priv_ctx_tx) {
2347f66de3eeSBoris Pismenny 			sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2348f66de3eeSBoris Pismenny 			if (!sw_ctx_tx) {
23493c4d7559SDave Watson 				rc = -ENOMEM;
23503c4d7559SDave Watson 				goto out;
23513c4d7559SDave Watson 			}
2352f66de3eeSBoris Pismenny 			ctx->priv_ctx_tx = sw_ctx_tx;
2353c46234ebSDave Watson 		} else {
2354b190a587SBoris Pismenny 			sw_ctx_tx =
2355b190a587SBoris Pismenny 				(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2356b190a587SBoris Pismenny 		}
2357b190a587SBoris Pismenny 	} else {
2358b190a587SBoris Pismenny 		if (!ctx->priv_ctx_rx) {
2359f66de3eeSBoris Pismenny 			sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2360f66de3eeSBoris Pismenny 			if (!sw_ctx_rx) {
2361f66de3eeSBoris Pismenny 				rc = -ENOMEM;
2362f66de3eeSBoris Pismenny 				goto out;
2363c46234ebSDave Watson 			}
2364f66de3eeSBoris Pismenny 			ctx->priv_ctx_rx = sw_ctx_rx;
2365b190a587SBoris Pismenny 		} else {
2366b190a587SBoris Pismenny 			sw_ctx_rx =
2367b190a587SBoris Pismenny 				(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2368b190a587SBoris Pismenny 		}
2369f66de3eeSBoris Pismenny 	}
23703c4d7559SDave Watson 
2371c46234ebSDave Watson 	if (tx) {
2372b190a587SBoris Pismenny 		crypto_init_wait(&sw_ctx_tx->async_wait);
23730cada332SVinay Kumar Yadav 		spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
237486029d10SSabrina Dubroca 		crypto_info = &ctx->crypto_send.info;
2375c46234ebSDave Watson 		cctx = &ctx->tx;
2376f66de3eeSBoris Pismenny 		aead = &sw_ctx_tx->aead_send;
23779932a29aSVakul Garg 		INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2378a42055e8SVakul Garg 		INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2379a42055e8SVakul Garg 		sw_ctx_tx->tx_work.sk = sk;
2380c46234ebSDave Watson 	} else {
2381b190a587SBoris Pismenny 		crypto_init_wait(&sw_ctx_rx->async_wait);
23820cada332SVinay Kumar Yadav 		spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
238386029d10SSabrina Dubroca 		crypto_info = &ctx->crypto_recv.info;
2384c46234ebSDave Watson 		cctx = &ctx->rx;
2385692d7b5dSVakul Garg 		skb_queue_head_init(&sw_ctx_rx->rx_list);
2386f66de3eeSBoris Pismenny 		aead = &sw_ctx_rx->aead_recv;
2387c46234ebSDave Watson 	}
2388c46234ebSDave Watson 
23893c4d7559SDave Watson 	switch (crypto_info->cipher_type) {
23903c4d7559SDave Watson 	case TLS_CIPHER_AES_GCM_128: {
2391dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2392dc2724a6STianjia Zhang 
2393dc2724a6STianjia Zhang 		gcm_128_info = (void *)crypto_info;
23943c4d7559SDave Watson 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
23953c4d7559SDave Watson 		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
23963c4d7559SDave Watson 		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2397dc2724a6STianjia Zhang 		iv = gcm_128_info->iv;
23983c4d7559SDave Watson 		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2399dc2724a6STianjia Zhang 		rec_seq = gcm_128_info->rec_seq;
2400fb99bce7SDave Watson 		keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2401fb99bce7SDave Watson 		key = gcm_128_info->key;
2402fb99bce7SDave Watson 		salt = gcm_128_info->salt;
2403f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2404f295b3aeSVakul Garg 		cipher_name = "gcm(aes)";
2405fb99bce7SDave Watson 		break;
2406fb99bce7SDave Watson 	}
2407fb99bce7SDave Watson 	case TLS_CIPHER_AES_GCM_256: {
2408dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2409dc2724a6STianjia Zhang 
2410dc2724a6STianjia Zhang 		gcm_256_info = (void *)crypto_info;
2411fb99bce7SDave Watson 		nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2412fb99bce7SDave Watson 		tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2413fb99bce7SDave Watson 		iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2414dc2724a6STianjia Zhang 		iv = gcm_256_info->iv;
2415fb99bce7SDave Watson 		rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2416dc2724a6STianjia Zhang 		rec_seq = gcm_256_info->rec_seq;
2417fb99bce7SDave Watson 		keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2418fb99bce7SDave Watson 		key = gcm_256_info->key;
2419fb99bce7SDave Watson 		salt = gcm_256_info->salt;
2420f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2421f295b3aeSVakul Garg 		cipher_name = "gcm(aes)";
2422f295b3aeSVakul Garg 		break;
2423f295b3aeSVakul Garg 	}
2424f295b3aeSVakul Garg 	case TLS_CIPHER_AES_CCM_128: {
2425dc2724a6STianjia Zhang 		struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2426dc2724a6STianjia Zhang 
2427dc2724a6STianjia Zhang 		ccm_128_info = (void *)crypto_info;
2428f295b3aeSVakul Garg 		nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2429f295b3aeSVakul Garg 		tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2430f295b3aeSVakul Garg 		iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2431dc2724a6STianjia Zhang 		iv = ccm_128_info->iv;
2432f295b3aeSVakul Garg 		rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2433dc2724a6STianjia Zhang 		rec_seq = ccm_128_info->rec_seq;
2434f295b3aeSVakul Garg 		keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2435f295b3aeSVakul Garg 		key = ccm_128_info->key;
2436f295b3aeSVakul Garg 		salt = ccm_128_info->salt;
2437f295b3aeSVakul Garg 		salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2438f295b3aeSVakul Garg 		cipher_name = "ccm(aes)";
24393c4d7559SDave Watson 		break;
24403c4d7559SDave Watson 	}
244174ea6106SVadim Fedorenko 	case TLS_CIPHER_CHACHA20_POLY1305: {
2442dc2724a6STianjia Zhang 		struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2443dc2724a6STianjia Zhang 
244474ea6106SVadim Fedorenko 		chacha20_poly1305_info = (void *)crypto_info;
244574ea6106SVadim Fedorenko 		nonce_size = 0;
244674ea6106SVadim Fedorenko 		tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
244774ea6106SVadim Fedorenko 		iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
244874ea6106SVadim Fedorenko 		iv = chacha20_poly1305_info->iv;
244974ea6106SVadim Fedorenko 		rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
245074ea6106SVadim Fedorenko 		rec_seq = chacha20_poly1305_info->rec_seq;
245174ea6106SVadim Fedorenko 		keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
245274ea6106SVadim Fedorenko 		key = chacha20_poly1305_info->key;
245374ea6106SVadim Fedorenko 		salt = chacha20_poly1305_info->salt;
245474ea6106SVadim Fedorenko 		salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
245574ea6106SVadim Fedorenko 		cipher_name = "rfc7539(chacha20,poly1305)";
245674ea6106SVadim Fedorenko 		break;
245774ea6106SVadim Fedorenko 	}
2458227b9644STianjia Zhang 	case TLS_CIPHER_SM4_GCM: {
2459227b9644STianjia Zhang 		struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2460227b9644STianjia Zhang 
2461227b9644STianjia Zhang 		sm4_gcm_info = (void *)crypto_info;
2462227b9644STianjia Zhang 		nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2463227b9644STianjia Zhang 		tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2464227b9644STianjia Zhang 		iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2465227b9644STianjia Zhang 		iv = sm4_gcm_info->iv;
2466227b9644STianjia Zhang 		rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2467227b9644STianjia Zhang 		rec_seq = sm4_gcm_info->rec_seq;
2468227b9644STianjia Zhang 		keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2469227b9644STianjia Zhang 		key = sm4_gcm_info->key;
2470227b9644STianjia Zhang 		salt = sm4_gcm_info->salt;
2471227b9644STianjia Zhang 		salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2472227b9644STianjia Zhang 		cipher_name = "gcm(sm4)";
2473227b9644STianjia Zhang 		break;
2474227b9644STianjia Zhang 	}
2475227b9644STianjia Zhang 	case TLS_CIPHER_SM4_CCM: {
2476227b9644STianjia Zhang 		struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2477227b9644STianjia Zhang 
2478227b9644STianjia Zhang 		sm4_ccm_info = (void *)crypto_info;
2479227b9644STianjia Zhang 		nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2480227b9644STianjia Zhang 		tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2481227b9644STianjia Zhang 		iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2482227b9644STianjia Zhang 		iv = sm4_ccm_info->iv;
2483227b9644STianjia Zhang 		rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2484227b9644STianjia Zhang 		rec_seq = sm4_ccm_info->rec_seq;
2485227b9644STianjia Zhang 		keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2486227b9644STianjia Zhang 		key = sm4_ccm_info->key;
2487227b9644STianjia Zhang 		salt = sm4_ccm_info->salt;
2488227b9644STianjia Zhang 		salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2489227b9644STianjia Zhang 		cipher_name = "ccm(sm4)";
2490227b9644STianjia Zhang 		break;
2491227b9644STianjia Zhang 	}
24923c4d7559SDave Watson 	default:
24933c4d7559SDave Watson 		rc = -EINVAL;
2494cf6d43efSSabrina Dubroca 		goto free_priv;
24953c4d7559SDave Watson 	}
24963c4d7559SDave Watson 
249789fec474SJakub Kicinski 	/* Sanity-check the sizes for stack allocations. */
249889fec474SJakub Kicinski 	if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2499*a8340cc0SJakub Kicinski 	    rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE) {
2500b16520f7SKees Cook 		rc = -EINVAL;
2501b16520f7SKees Cook 		goto free_priv;
2502b16520f7SKees Cook 	}
2503b16520f7SKees Cook 
2504130b392cSDave Watson 	if (crypto_info->version == TLS_1_3_VERSION) {
2505130b392cSDave Watson 		nonce_size = 0;
25064509de14SVakul Garg 		prot->aad_size = TLS_HEADER_SIZE;
25074509de14SVakul Garg 		prot->tail_size = 1;
2508130b392cSDave Watson 	} else {
25094509de14SVakul Garg 		prot->aad_size = TLS_AAD_SPACE_SIZE;
25104509de14SVakul Garg 		prot->tail_size = 0;
2511130b392cSDave Watson 	}
2512130b392cSDave Watson 
25134509de14SVakul Garg 	prot->version = crypto_info->version;
25144509de14SVakul Garg 	prot->cipher_type = crypto_info->cipher_type;
25154509de14SVakul Garg 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
25164509de14SVakul Garg 	prot->tag_size = tag_size;
25174509de14SVakul Garg 	prot->overhead_size = prot->prepend_size +
25184509de14SVakul Garg 			      prot->tag_size + prot->tail_size;
25194509de14SVakul Garg 	prot->iv_size = iv_size;
2520f295b3aeSVakul Garg 	prot->salt_size = salt_size;
2521f295b3aeSVakul Garg 	cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2522c46234ebSDave Watson 	if (!cctx->iv) {
25233c4d7559SDave Watson 		rc = -ENOMEM;
2524cf6d43efSSabrina Dubroca 		goto free_priv;
25253c4d7559SDave Watson 	}
2526fb99bce7SDave Watson 	/* Note: 128 & 256 bit salt are the same size */
25274509de14SVakul Garg 	prot->rec_seq_size = rec_seq_size;
2528f295b3aeSVakul Garg 	memcpy(cctx->iv, salt, salt_size);
2529f295b3aeSVakul Garg 	memcpy(cctx->iv + salt_size, iv, iv_size);
2530969d5090Szhong jiang 	cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2531c46234ebSDave Watson 	if (!cctx->rec_seq) {
25323c4d7559SDave Watson 		rc = -ENOMEM;
25333c4d7559SDave Watson 		goto free_iv;
25343c4d7559SDave Watson 	}
25353c4d7559SDave Watson 
2536c46234ebSDave Watson 	if (!*aead) {
2537f295b3aeSVakul Garg 		*aead = crypto_alloc_aead(cipher_name, 0, 0);
2538c46234ebSDave Watson 		if (IS_ERR(*aead)) {
2539c46234ebSDave Watson 			rc = PTR_ERR(*aead);
2540c46234ebSDave Watson 			*aead = NULL;
25413c4d7559SDave Watson 			goto free_rec_seq;
25423c4d7559SDave Watson 		}
25433c4d7559SDave Watson 	}
25443c4d7559SDave Watson 
25453c4d7559SDave Watson 	ctx->push_pending_record = tls_sw_push_pending_record;
25463c4d7559SDave Watson 
2547fb99bce7SDave Watson 	rc = crypto_aead_setkey(*aead, key, keysize);
2548fb99bce7SDave Watson 
25493c4d7559SDave Watson 	if (rc)
25503c4d7559SDave Watson 		goto free_aead;
25513c4d7559SDave Watson 
25524509de14SVakul Garg 	rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2553c46234ebSDave Watson 	if (rc)
2554c46234ebSDave Watson 		goto free_aead;
2555c46234ebSDave Watson 
2556f66de3eeSBoris Pismenny 	if (sw_ctx_rx) {
2557692d7b5dSVakul Garg 		tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
25588497ded2SVakul Garg 
25598497ded2SVakul Garg 		if (crypto_info->version == TLS_1_3_VERSION)
25605c5458ecSJakub Kicinski 			sw_ctx_rx->async_capable = 0;
25618497ded2SVakul Garg 		else
2562692d7b5dSVakul Garg 			sw_ctx_rx->async_capable =
25635c5458ecSJakub Kicinski 				!!(tfm->__crt_alg->cra_flags &
25645c5458ecSJakub Kicinski 				   CRYPTO_ALG_ASYNC);
2565692d7b5dSVakul Garg 
2566c46234ebSDave Watson 		/* Set up strparser */
2567c46234ebSDave Watson 		memset(&cb, 0, sizeof(cb));
2568c46234ebSDave Watson 		cb.rcv_msg = tls_queue;
2569c46234ebSDave Watson 		cb.parse_msg = tls_read_size;
2570c46234ebSDave Watson 
2571f66de3eeSBoris Pismenny 		strp_init(&sw_ctx_rx->strp, sk, &cb);
2572c46234ebSDave Watson 	}
2573c46234ebSDave Watson 
2574c46234ebSDave Watson 	goto out;
25753c4d7559SDave Watson 
25763c4d7559SDave Watson free_aead:
2577c46234ebSDave Watson 	crypto_free_aead(*aead);
2578c46234ebSDave Watson 	*aead = NULL;
25793c4d7559SDave Watson free_rec_seq:
2580c46234ebSDave Watson 	kfree(cctx->rec_seq);
2581c46234ebSDave Watson 	cctx->rec_seq = NULL;
25823c4d7559SDave Watson free_iv:
2583f66de3eeSBoris Pismenny 	kfree(cctx->iv);
2584f66de3eeSBoris Pismenny 	cctx->iv = NULL;
2585cf6d43efSSabrina Dubroca free_priv:
2586f66de3eeSBoris Pismenny 	if (tx) {
2587f66de3eeSBoris Pismenny 		kfree(ctx->priv_ctx_tx);
2588f66de3eeSBoris Pismenny 		ctx->priv_ctx_tx = NULL;
2589f66de3eeSBoris Pismenny 	} else {
2590f66de3eeSBoris Pismenny 		kfree(ctx->priv_ctx_rx);
2591f66de3eeSBoris Pismenny 		ctx->priv_ctx_rx = NULL;
2592f66de3eeSBoris Pismenny 	}
25933c4d7559SDave Watson out:
25943c4d7559SDave Watson 	return rc;
25953c4d7559SDave Watson }
2596