xref: /openbmc/linux/net/tls/tls_sw.c (revision 874c8ca1)
1 /*
2  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4  * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5  * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6  * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7  * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  */
37 
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/splice.h>
42 #include <crypto/aead.h>
43 
44 #include <net/strparser.h>
45 #include <net/tls.h>
46 
47 struct tls_decrypt_arg {
48 	bool zc;
49 	bool async;
50 };
51 
52 noinline void tls_err_abort(struct sock *sk, int err)
53 {
54 	WARN_ON_ONCE(err >= 0);
55 	/* sk->sk_err should contain a positive error code. */
56 	sk->sk_err = -err;
57 	sk_error_report(sk);
58 }
59 
60 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
61                      unsigned int recursion_level)
62 {
63         int start = skb_headlen(skb);
64         int i, chunk = start - offset;
65         struct sk_buff *frag_iter;
66         int elt = 0;
67 
68         if (unlikely(recursion_level >= 24))
69                 return -EMSGSIZE;
70 
71         if (chunk > 0) {
72                 if (chunk > len)
73                         chunk = len;
74                 elt++;
75                 len -= chunk;
76                 if (len == 0)
77                         return elt;
78                 offset += chunk;
79         }
80 
81         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
82                 int end;
83 
84                 WARN_ON(start > offset + len);
85 
86                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
87                 chunk = end - offset;
88                 if (chunk > 0) {
89                         if (chunk > len)
90                                 chunk = len;
91                         elt++;
92                         len -= chunk;
93                         if (len == 0)
94                                 return elt;
95                         offset += chunk;
96                 }
97                 start = end;
98         }
99 
100         if (unlikely(skb_has_frag_list(skb))) {
101                 skb_walk_frags(skb, frag_iter) {
102                         int end, ret;
103 
104                         WARN_ON(start > offset + len);
105 
106                         end = start + frag_iter->len;
107                         chunk = end - offset;
108                         if (chunk > 0) {
109                                 if (chunk > len)
110                                         chunk = len;
111                                 ret = __skb_nsg(frag_iter, offset - start, chunk,
112                                                 recursion_level + 1);
113                                 if (unlikely(ret < 0))
114                                         return ret;
115                                 elt += ret;
116                                 len -= chunk;
117                                 if (len == 0)
118                                         return elt;
119                                 offset += chunk;
120                         }
121                         start = end;
122                 }
123         }
124         BUG_ON(len);
125         return elt;
126 }
127 
128 /* Return the number of scatterlist elements required to completely map the
129  * skb, or -EMSGSIZE if the recursion depth is exceeded.
130  */
131 static int skb_nsg(struct sk_buff *skb, int offset, int len)
132 {
133         return __skb_nsg(skb, offset, len, 0);
134 }
135 
136 static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb)
137 {
138 	struct strp_msg *rxm = strp_msg(skb);
139 	struct tls_msg *tlm = tls_msg(skb);
140 	int sub = 0;
141 
142 	/* Determine zero-padding length */
143 	if (prot->version == TLS_1_3_VERSION) {
144 		int offset = rxm->full_len - TLS_TAG_SIZE - 1;
145 		char content_type = 0;
146 		int err;
147 
148 		while (content_type == 0) {
149 			if (offset < prot->prepend_size)
150 				return -EBADMSG;
151 			err = skb_copy_bits(skb, rxm->offset + offset,
152 					    &content_type, 1);
153 			if (err)
154 				return err;
155 			if (content_type)
156 				break;
157 			sub++;
158 			offset--;
159 		}
160 		tlm->control = content_type;
161 	}
162 	return sub;
163 }
164 
165 static void tls_decrypt_done(struct crypto_async_request *req, int err)
166 {
167 	struct aead_request *aead_req = (struct aead_request *)req;
168 	struct scatterlist *sgout = aead_req->dst;
169 	struct scatterlist *sgin = aead_req->src;
170 	struct tls_sw_context_rx *ctx;
171 	struct tls_context *tls_ctx;
172 	struct tls_prot_info *prot;
173 	struct scatterlist *sg;
174 	struct sk_buff *skb;
175 	unsigned int pages;
176 
177 	skb = (struct sk_buff *)req->data;
178 	tls_ctx = tls_get_ctx(skb->sk);
179 	ctx = tls_sw_ctx_rx(tls_ctx);
180 	prot = &tls_ctx->prot_info;
181 
182 	/* Propagate if there was an err */
183 	if (err) {
184 		if (err == -EBADMSG)
185 			TLS_INC_STATS(sock_net(skb->sk),
186 				      LINUX_MIB_TLSDECRYPTERROR);
187 		ctx->async_wait.err = err;
188 		tls_err_abort(skb->sk, err);
189 	} else {
190 		struct strp_msg *rxm = strp_msg(skb);
191 
192 		/* No TLS 1.3 support with async crypto */
193 		WARN_ON(prot->tail_size);
194 
195 		rxm->offset += prot->prepend_size;
196 		rxm->full_len -= prot->overhead_size;
197 	}
198 
199 	/* After using skb->sk to propagate sk through crypto async callback
200 	 * we need to NULL it again.
201 	 */
202 	skb->sk = NULL;
203 
204 
205 	/* Free the destination pages if skb was not decrypted inplace */
206 	if (sgout != sgin) {
207 		/* Skip the first S/G entry as it points to AAD */
208 		for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
209 			if (!sg)
210 				break;
211 			put_page(sg_page(sg));
212 		}
213 	}
214 
215 	kfree(aead_req);
216 
217 	spin_lock_bh(&ctx->decrypt_compl_lock);
218 	if (!atomic_dec_return(&ctx->decrypt_pending))
219 		complete(&ctx->async_wait.completion);
220 	spin_unlock_bh(&ctx->decrypt_compl_lock);
221 }
222 
223 static int tls_do_decryption(struct sock *sk,
224 			     struct sk_buff *skb,
225 			     struct scatterlist *sgin,
226 			     struct scatterlist *sgout,
227 			     char *iv_recv,
228 			     size_t data_len,
229 			     struct aead_request *aead_req,
230 			     struct tls_decrypt_arg *darg)
231 {
232 	struct tls_context *tls_ctx = tls_get_ctx(sk);
233 	struct tls_prot_info *prot = &tls_ctx->prot_info;
234 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
235 	int ret;
236 
237 	aead_request_set_tfm(aead_req, ctx->aead_recv);
238 	aead_request_set_ad(aead_req, prot->aad_size);
239 	aead_request_set_crypt(aead_req, sgin, sgout,
240 			       data_len + prot->tag_size,
241 			       (u8 *)iv_recv);
242 
243 	if (darg->async) {
244 		/* Using skb->sk to push sk through to crypto async callback
245 		 * handler. This allows propagating errors up to the socket
246 		 * if needed. It _must_ be cleared in the async handler
247 		 * before consume_skb is called. We _know_ skb->sk is NULL
248 		 * because it is a clone from strparser.
249 		 */
250 		skb->sk = sk;
251 		aead_request_set_callback(aead_req,
252 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
253 					  tls_decrypt_done, skb);
254 		atomic_inc(&ctx->decrypt_pending);
255 	} else {
256 		aead_request_set_callback(aead_req,
257 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
258 					  crypto_req_done, &ctx->async_wait);
259 	}
260 
261 	ret = crypto_aead_decrypt(aead_req);
262 	if (ret == -EINPROGRESS) {
263 		if (darg->async)
264 			return 0;
265 
266 		ret = crypto_wait_req(ret, &ctx->async_wait);
267 	}
268 	darg->async = false;
269 
270 	if (ret == -EBADMSG)
271 		TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
272 
273 	return ret;
274 }
275 
276 static void tls_trim_both_msgs(struct sock *sk, int target_size)
277 {
278 	struct tls_context *tls_ctx = tls_get_ctx(sk);
279 	struct tls_prot_info *prot = &tls_ctx->prot_info;
280 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
281 	struct tls_rec *rec = ctx->open_rec;
282 
283 	sk_msg_trim(sk, &rec->msg_plaintext, target_size);
284 	if (target_size > 0)
285 		target_size += prot->overhead_size;
286 	sk_msg_trim(sk, &rec->msg_encrypted, target_size);
287 }
288 
289 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
290 {
291 	struct tls_context *tls_ctx = tls_get_ctx(sk);
292 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
293 	struct tls_rec *rec = ctx->open_rec;
294 	struct sk_msg *msg_en = &rec->msg_encrypted;
295 
296 	return sk_msg_alloc(sk, msg_en, len, 0);
297 }
298 
299 static int tls_clone_plaintext_msg(struct sock *sk, int required)
300 {
301 	struct tls_context *tls_ctx = tls_get_ctx(sk);
302 	struct tls_prot_info *prot = &tls_ctx->prot_info;
303 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
304 	struct tls_rec *rec = ctx->open_rec;
305 	struct sk_msg *msg_pl = &rec->msg_plaintext;
306 	struct sk_msg *msg_en = &rec->msg_encrypted;
307 	int skip, len;
308 
309 	/* We add page references worth len bytes from encrypted sg
310 	 * at the end of plaintext sg. It is guaranteed that msg_en
311 	 * has enough required room (ensured by caller).
312 	 */
313 	len = required - msg_pl->sg.size;
314 
315 	/* Skip initial bytes in msg_en's data to be able to use
316 	 * same offset of both plain and encrypted data.
317 	 */
318 	skip = prot->prepend_size + msg_pl->sg.size;
319 
320 	return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
321 }
322 
323 static struct tls_rec *tls_get_rec(struct sock *sk)
324 {
325 	struct tls_context *tls_ctx = tls_get_ctx(sk);
326 	struct tls_prot_info *prot = &tls_ctx->prot_info;
327 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
328 	struct sk_msg *msg_pl, *msg_en;
329 	struct tls_rec *rec;
330 	int mem_size;
331 
332 	mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
333 
334 	rec = kzalloc(mem_size, sk->sk_allocation);
335 	if (!rec)
336 		return NULL;
337 
338 	msg_pl = &rec->msg_plaintext;
339 	msg_en = &rec->msg_encrypted;
340 
341 	sk_msg_init(msg_pl);
342 	sk_msg_init(msg_en);
343 
344 	sg_init_table(rec->sg_aead_in, 2);
345 	sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
346 	sg_unmark_end(&rec->sg_aead_in[1]);
347 
348 	sg_init_table(rec->sg_aead_out, 2);
349 	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
350 	sg_unmark_end(&rec->sg_aead_out[1]);
351 
352 	return rec;
353 }
354 
355 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
356 {
357 	sk_msg_free(sk, &rec->msg_encrypted);
358 	sk_msg_free(sk, &rec->msg_plaintext);
359 	kfree(rec);
360 }
361 
362 static void tls_free_open_rec(struct sock *sk)
363 {
364 	struct tls_context *tls_ctx = tls_get_ctx(sk);
365 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
366 	struct tls_rec *rec = ctx->open_rec;
367 
368 	if (rec) {
369 		tls_free_rec(sk, rec);
370 		ctx->open_rec = NULL;
371 	}
372 }
373 
374 int tls_tx_records(struct sock *sk, int flags)
375 {
376 	struct tls_context *tls_ctx = tls_get_ctx(sk);
377 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
378 	struct tls_rec *rec, *tmp;
379 	struct sk_msg *msg_en;
380 	int tx_flags, rc = 0;
381 
382 	if (tls_is_partially_sent_record(tls_ctx)) {
383 		rec = list_first_entry(&ctx->tx_list,
384 				       struct tls_rec, list);
385 
386 		if (flags == -1)
387 			tx_flags = rec->tx_flags;
388 		else
389 			tx_flags = flags;
390 
391 		rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
392 		if (rc)
393 			goto tx_err;
394 
395 		/* Full record has been transmitted.
396 		 * Remove the head of tx_list
397 		 */
398 		list_del(&rec->list);
399 		sk_msg_free(sk, &rec->msg_plaintext);
400 		kfree(rec);
401 	}
402 
403 	/* Tx all ready records */
404 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
405 		if (READ_ONCE(rec->tx_ready)) {
406 			if (flags == -1)
407 				tx_flags = rec->tx_flags;
408 			else
409 				tx_flags = flags;
410 
411 			msg_en = &rec->msg_encrypted;
412 			rc = tls_push_sg(sk, tls_ctx,
413 					 &msg_en->sg.data[msg_en->sg.curr],
414 					 0, tx_flags);
415 			if (rc)
416 				goto tx_err;
417 
418 			list_del(&rec->list);
419 			sk_msg_free(sk, &rec->msg_plaintext);
420 			kfree(rec);
421 		} else {
422 			break;
423 		}
424 	}
425 
426 tx_err:
427 	if (rc < 0 && rc != -EAGAIN)
428 		tls_err_abort(sk, -EBADMSG);
429 
430 	return rc;
431 }
432 
433 static void tls_encrypt_done(struct crypto_async_request *req, int err)
434 {
435 	struct aead_request *aead_req = (struct aead_request *)req;
436 	struct sock *sk = req->data;
437 	struct tls_context *tls_ctx = tls_get_ctx(sk);
438 	struct tls_prot_info *prot = &tls_ctx->prot_info;
439 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
440 	struct scatterlist *sge;
441 	struct sk_msg *msg_en;
442 	struct tls_rec *rec;
443 	bool ready = false;
444 	int pending;
445 
446 	rec = container_of(aead_req, struct tls_rec, aead_req);
447 	msg_en = &rec->msg_encrypted;
448 
449 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
450 	sge->offset -= prot->prepend_size;
451 	sge->length += prot->prepend_size;
452 
453 	/* Check if error is previously set on socket */
454 	if (err || sk->sk_err) {
455 		rec = NULL;
456 
457 		/* If err is already set on socket, return the same code */
458 		if (sk->sk_err) {
459 			ctx->async_wait.err = -sk->sk_err;
460 		} else {
461 			ctx->async_wait.err = err;
462 			tls_err_abort(sk, err);
463 		}
464 	}
465 
466 	if (rec) {
467 		struct tls_rec *first_rec;
468 
469 		/* Mark the record as ready for transmission */
470 		smp_store_mb(rec->tx_ready, true);
471 
472 		/* If received record is at head of tx_list, schedule tx */
473 		first_rec = list_first_entry(&ctx->tx_list,
474 					     struct tls_rec, list);
475 		if (rec == first_rec)
476 			ready = true;
477 	}
478 
479 	spin_lock_bh(&ctx->encrypt_compl_lock);
480 	pending = atomic_dec_return(&ctx->encrypt_pending);
481 
482 	if (!pending && ctx->async_notify)
483 		complete(&ctx->async_wait.completion);
484 	spin_unlock_bh(&ctx->encrypt_compl_lock);
485 
486 	if (!ready)
487 		return;
488 
489 	/* Schedule the transmission */
490 	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
491 		schedule_delayed_work(&ctx->tx_work.work, 1);
492 }
493 
494 static int tls_do_encryption(struct sock *sk,
495 			     struct tls_context *tls_ctx,
496 			     struct tls_sw_context_tx *ctx,
497 			     struct aead_request *aead_req,
498 			     size_t data_len, u32 start)
499 {
500 	struct tls_prot_info *prot = &tls_ctx->prot_info;
501 	struct tls_rec *rec = ctx->open_rec;
502 	struct sk_msg *msg_en = &rec->msg_encrypted;
503 	struct scatterlist *sge = sk_msg_elem(msg_en, start);
504 	int rc, iv_offset = 0;
505 
506 	/* For CCM based ciphers, first byte of IV is a constant */
507 	switch (prot->cipher_type) {
508 	case TLS_CIPHER_AES_CCM_128:
509 		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
510 		iv_offset = 1;
511 		break;
512 	case TLS_CIPHER_SM4_CCM:
513 		rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
514 		iv_offset = 1;
515 		break;
516 	}
517 
518 	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
519 	       prot->iv_size + prot->salt_size);
520 
521 	xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
522 
523 	sge->offset += prot->prepend_size;
524 	sge->length -= prot->prepend_size;
525 
526 	msg_en->sg.curr = start;
527 
528 	aead_request_set_tfm(aead_req, ctx->aead_send);
529 	aead_request_set_ad(aead_req, prot->aad_size);
530 	aead_request_set_crypt(aead_req, rec->sg_aead_in,
531 			       rec->sg_aead_out,
532 			       data_len, rec->iv_data);
533 
534 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
535 				  tls_encrypt_done, sk);
536 
537 	/* Add the record in tx_list */
538 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
539 	atomic_inc(&ctx->encrypt_pending);
540 
541 	rc = crypto_aead_encrypt(aead_req);
542 	if (!rc || rc != -EINPROGRESS) {
543 		atomic_dec(&ctx->encrypt_pending);
544 		sge->offset -= prot->prepend_size;
545 		sge->length += prot->prepend_size;
546 	}
547 
548 	if (!rc) {
549 		WRITE_ONCE(rec->tx_ready, true);
550 	} else if (rc != -EINPROGRESS) {
551 		list_del(&rec->list);
552 		return rc;
553 	}
554 
555 	/* Unhook the record from context if encryption is not failure */
556 	ctx->open_rec = NULL;
557 	tls_advance_record_sn(sk, prot, &tls_ctx->tx);
558 	return rc;
559 }
560 
561 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
562 				 struct tls_rec **to, struct sk_msg *msg_opl,
563 				 struct sk_msg *msg_oen, u32 split_point,
564 				 u32 tx_overhead_size, u32 *orig_end)
565 {
566 	u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
567 	struct scatterlist *sge, *osge, *nsge;
568 	u32 orig_size = msg_opl->sg.size;
569 	struct scatterlist tmp = { };
570 	struct sk_msg *msg_npl;
571 	struct tls_rec *new;
572 	int ret;
573 
574 	new = tls_get_rec(sk);
575 	if (!new)
576 		return -ENOMEM;
577 	ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
578 			   tx_overhead_size, 0);
579 	if (ret < 0) {
580 		tls_free_rec(sk, new);
581 		return ret;
582 	}
583 
584 	*orig_end = msg_opl->sg.end;
585 	i = msg_opl->sg.start;
586 	sge = sk_msg_elem(msg_opl, i);
587 	while (apply && sge->length) {
588 		if (sge->length > apply) {
589 			u32 len = sge->length - apply;
590 
591 			get_page(sg_page(sge));
592 			sg_set_page(&tmp, sg_page(sge), len,
593 				    sge->offset + apply);
594 			sge->length = apply;
595 			bytes += apply;
596 			apply = 0;
597 		} else {
598 			apply -= sge->length;
599 			bytes += sge->length;
600 		}
601 
602 		sk_msg_iter_var_next(i);
603 		if (i == msg_opl->sg.end)
604 			break;
605 		sge = sk_msg_elem(msg_opl, i);
606 	}
607 
608 	msg_opl->sg.end = i;
609 	msg_opl->sg.curr = i;
610 	msg_opl->sg.copybreak = 0;
611 	msg_opl->apply_bytes = 0;
612 	msg_opl->sg.size = bytes;
613 
614 	msg_npl = &new->msg_plaintext;
615 	msg_npl->apply_bytes = apply;
616 	msg_npl->sg.size = orig_size - bytes;
617 
618 	j = msg_npl->sg.start;
619 	nsge = sk_msg_elem(msg_npl, j);
620 	if (tmp.length) {
621 		memcpy(nsge, &tmp, sizeof(*nsge));
622 		sk_msg_iter_var_next(j);
623 		nsge = sk_msg_elem(msg_npl, j);
624 	}
625 
626 	osge = sk_msg_elem(msg_opl, i);
627 	while (osge->length) {
628 		memcpy(nsge, osge, sizeof(*nsge));
629 		sg_unmark_end(nsge);
630 		sk_msg_iter_var_next(i);
631 		sk_msg_iter_var_next(j);
632 		if (i == *orig_end)
633 			break;
634 		osge = sk_msg_elem(msg_opl, i);
635 		nsge = sk_msg_elem(msg_npl, j);
636 	}
637 
638 	msg_npl->sg.end = j;
639 	msg_npl->sg.curr = j;
640 	msg_npl->sg.copybreak = 0;
641 
642 	*to = new;
643 	return 0;
644 }
645 
646 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
647 				  struct tls_rec *from, u32 orig_end)
648 {
649 	struct sk_msg *msg_npl = &from->msg_plaintext;
650 	struct sk_msg *msg_opl = &to->msg_plaintext;
651 	struct scatterlist *osge, *nsge;
652 	u32 i, j;
653 
654 	i = msg_opl->sg.end;
655 	sk_msg_iter_var_prev(i);
656 	j = msg_npl->sg.start;
657 
658 	osge = sk_msg_elem(msg_opl, i);
659 	nsge = sk_msg_elem(msg_npl, j);
660 
661 	if (sg_page(osge) == sg_page(nsge) &&
662 	    osge->offset + osge->length == nsge->offset) {
663 		osge->length += nsge->length;
664 		put_page(sg_page(nsge));
665 	}
666 
667 	msg_opl->sg.end = orig_end;
668 	msg_opl->sg.curr = orig_end;
669 	msg_opl->sg.copybreak = 0;
670 	msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
671 	msg_opl->sg.size += msg_npl->sg.size;
672 
673 	sk_msg_free(sk, &to->msg_encrypted);
674 	sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
675 
676 	kfree(from);
677 }
678 
679 static int tls_push_record(struct sock *sk, int flags,
680 			   unsigned char record_type)
681 {
682 	struct tls_context *tls_ctx = tls_get_ctx(sk);
683 	struct tls_prot_info *prot = &tls_ctx->prot_info;
684 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
685 	struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
686 	u32 i, split_point, orig_end;
687 	struct sk_msg *msg_pl, *msg_en;
688 	struct aead_request *req;
689 	bool split;
690 	int rc;
691 
692 	if (!rec)
693 		return 0;
694 
695 	msg_pl = &rec->msg_plaintext;
696 	msg_en = &rec->msg_encrypted;
697 
698 	split_point = msg_pl->apply_bytes;
699 	split = split_point && split_point < msg_pl->sg.size;
700 	if (unlikely((!split &&
701 		      msg_pl->sg.size +
702 		      prot->overhead_size > msg_en->sg.size) ||
703 		     (split &&
704 		      split_point +
705 		      prot->overhead_size > msg_en->sg.size))) {
706 		split = true;
707 		split_point = msg_en->sg.size;
708 	}
709 	if (split) {
710 		rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
711 					   split_point, prot->overhead_size,
712 					   &orig_end);
713 		if (rc < 0)
714 			return rc;
715 		/* This can happen if above tls_split_open_record allocates
716 		 * a single large encryption buffer instead of two smaller
717 		 * ones. In this case adjust pointers and continue without
718 		 * split.
719 		 */
720 		if (!msg_pl->sg.size) {
721 			tls_merge_open_record(sk, rec, tmp, orig_end);
722 			msg_pl = &rec->msg_plaintext;
723 			msg_en = &rec->msg_encrypted;
724 			split = false;
725 		}
726 		sk_msg_trim(sk, msg_en, msg_pl->sg.size +
727 			    prot->overhead_size);
728 	}
729 
730 	rec->tx_flags = flags;
731 	req = &rec->aead_req;
732 
733 	i = msg_pl->sg.end;
734 	sk_msg_iter_var_prev(i);
735 
736 	rec->content_type = record_type;
737 	if (prot->version == TLS_1_3_VERSION) {
738 		/* Add content type to end of message.  No padding added */
739 		sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
740 		sg_mark_end(&rec->sg_content_type);
741 		sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
742 			 &rec->sg_content_type);
743 	} else {
744 		sg_mark_end(sk_msg_elem(msg_pl, i));
745 	}
746 
747 	if (msg_pl->sg.end < msg_pl->sg.start) {
748 		sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
749 			 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
750 			 msg_pl->sg.data);
751 	}
752 
753 	i = msg_pl->sg.start;
754 	sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
755 
756 	i = msg_en->sg.end;
757 	sk_msg_iter_var_prev(i);
758 	sg_mark_end(sk_msg_elem(msg_en, i));
759 
760 	i = msg_en->sg.start;
761 	sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
762 
763 	tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
764 		     tls_ctx->tx.rec_seq, record_type, prot);
765 
766 	tls_fill_prepend(tls_ctx,
767 			 page_address(sg_page(&msg_en->sg.data[i])) +
768 			 msg_en->sg.data[i].offset,
769 			 msg_pl->sg.size + prot->tail_size,
770 			 record_type);
771 
772 	tls_ctx->pending_open_record_frags = false;
773 
774 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
775 			       msg_pl->sg.size + prot->tail_size, i);
776 	if (rc < 0) {
777 		if (rc != -EINPROGRESS) {
778 			tls_err_abort(sk, -EBADMSG);
779 			if (split) {
780 				tls_ctx->pending_open_record_frags = true;
781 				tls_merge_open_record(sk, rec, tmp, orig_end);
782 			}
783 		}
784 		ctx->async_capable = 1;
785 		return rc;
786 	} else if (split) {
787 		msg_pl = &tmp->msg_plaintext;
788 		msg_en = &tmp->msg_encrypted;
789 		sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
790 		tls_ctx->pending_open_record_frags = true;
791 		ctx->open_rec = tmp;
792 	}
793 
794 	return tls_tx_records(sk, flags);
795 }
796 
797 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
798 			       bool full_record, u8 record_type,
799 			       ssize_t *copied, int flags)
800 {
801 	struct tls_context *tls_ctx = tls_get_ctx(sk);
802 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
803 	struct sk_msg msg_redir = { };
804 	struct sk_psock *psock;
805 	struct sock *sk_redir;
806 	struct tls_rec *rec;
807 	bool enospc, policy;
808 	int err = 0, send;
809 	u32 delta = 0;
810 
811 	policy = !(flags & MSG_SENDPAGE_NOPOLICY);
812 	psock = sk_psock_get(sk);
813 	if (!psock || !policy) {
814 		err = tls_push_record(sk, flags, record_type);
815 		if (err && sk->sk_err == EBADMSG) {
816 			*copied -= sk_msg_free(sk, msg);
817 			tls_free_open_rec(sk);
818 			err = -sk->sk_err;
819 		}
820 		if (psock)
821 			sk_psock_put(sk, psock);
822 		return err;
823 	}
824 more_data:
825 	enospc = sk_msg_full(msg);
826 	if (psock->eval == __SK_NONE) {
827 		delta = msg->sg.size;
828 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
829 		delta -= msg->sg.size;
830 	}
831 	if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
832 	    !enospc && !full_record) {
833 		err = -ENOSPC;
834 		goto out_err;
835 	}
836 	msg->cork_bytes = 0;
837 	send = msg->sg.size;
838 	if (msg->apply_bytes && msg->apply_bytes < send)
839 		send = msg->apply_bytes;
840 
841 	switch (psock->eval) {
842 	case __SK_PASS:
843 		err = tls_push_record(sk, flags, record_type);
844 		if (err && sk->sk_err == EBADMSG) {
845 			*copied -= sk_msg_free(sk, msg);
846 			tls_free_open_rec(sk);
847 			err = -sk->sk_err;
848 			goto out_err;
849 		}
850 		break;
851 	case __SK_REDIRECT:
852 		sk_redir = psock->sk_redir;
853 		memcpy(&msg_redir, msg, sizeof(*msg));
854 		if (msg->apply_bytes < send)
855 			msg->apply_bytes = 0;
856 		else
857 			msg->apply_bytes -= send;
858 		sk_msg_return_zero(sk, msg, send);
859 		msg->sg.size -= send;
860 		release_sock(sk);
861 		err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
862 		lock_sock(sk);
863 		if (err < 0) {
864 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
865 			msg->sg.size = 0;
866 		}
867 		if (msg->sg.size == 0)
868 			tls_free_open_rec(sk);
869 		break;
870 	case __SK_DROP:
871 	default:
872 		sk_msg_free_partial(sk, msg, send);
873 		if (msg->apply_bytes < send)
874 			msg->apply_bytes = 0;
875 		else
876 			msg->apply_bytes -= send;
877 		if (msg->sg.size == 0)
878 			tls_free_open_rec(sk);
879 		*copied -= (send + delta);
880 		err = -EACCES;
881 	}
882 
883 	if (likely(!err)) {
884 		bool reset_eval = !ctx->open_rec;
885 
886 		rec = ctx->open_rec;
887 		if (rec) {
888 			msg = &rec->msg_plaintext;
889 			if (!msg->apply_bytes)
890 				reset_eval = true;
891 		}
892 		if (reset_eval) {
893 			psock->eval = __SK_NONE;
894 			if (psock->sk_redir) {
895 				sock_put(psock->sk_redir);
896 				psock->sk_redir = NULL;
897 			}
898 		}
899 		if (rec)
900 			goto more_data;
901 	}
902  out_err:
903 	sk_psock_put(sk, psock);
904 	return err;
905 }
906 
907 static int tls_sw_push_pending_record(struct sock *sk, int flags)
908 {
909 	struct tls_context *tls_ctx = tls_get_ctx(sk);
910 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
911 	struct tls_rec *rec = ctx->open_rec;
912 	struct sk_msg *msg_pl;
913 	size_t copied;
914 
915 	if (!rec)
916 		return 0;
917 
918 	msg_pl = &rec->msg_plaintext;
919 	copied = msg_pl->sg.size;
920 	if (!copied)
921 		return 0;
922 
923 	return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
924 				   &copied, flags);
925 }
926 
927 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
928 {
929 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
930 	struct tls_context *tls_ctx = tls_get_ctx(sk);
931 	struct tls_prot_info *prot = &tls_ctx->prot_info;
932 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
933 	bool async_capable = ctx->async_capable;
934 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
935 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
936 	bool eor = !(msg->msg_flags & MSG_MORE);
937 	size_t try_to_copy;
938 	ssize_t copied = 0;
939 	struct sk_msg *msg_pl, *msg_en;
940 	struct tls_rec *rec;
941 	int required_size;
942 	int num_async = 0;
943 	bool full_record;
944 	int record_room;
945 	int num_zc = 0;
946 	int orig_size;
947 	int ret = 0;
948 	int pending;
949 
950 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
951 			       MSG_CMSG_COMPAT))
952 		return -EOPNOTSUPP;
953 
954 	mutex_lock(&tls_ctx->tx_lock);
955 	lock_sock(sk);
956 
957 	if (unlikely(msg->msg_controllen)) {
958 		ret = tls_proccess_cmsg(sk, msg, &record_type);
959 		if (ret) {
960 			if (ret == -EINPROGRESS)
961 				num_async++;
962 			else if (ret != -EAGAIN)
963 				goto send_end;
964 		}
965 	}
966 
967 	while (msg_data_left(msg)) {
968 		if (sk->sk_err) {
969 			ret = -sk->sk_err;
970 			goto send_end;
971 		}
972 
973 		if (ctx->open_rec)
974 			rec = ctx->open_rec;
975 		else
976 			rec = ctx->open_rec = tls_get_rec(sk);
977 		if (!rec) {
978 			ret = -ENOMEM;
979 			goto send_end;
980 		}
981 
982 		msg_pl = &rec->msg_plaintext;
983 		msg_en = &rec->msg_encrypted;
984 
985 		orig_size = msg_pl->sg.size;
986 		full_record = false;
987 		try_to_copy = msg_data_left(msg);
988 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
989 		if (try_to_copy >= record_room) {
990 			try_to_copy = record_room;
991 			full_record = true;
992 		}
993 
994 		required_size = msg_pl->sg.size + try_to_copy +
995 				prot->overhead_size;
996 
997 		if (!sk_stream_memory_free(sk))
998 			goto wait_for_sndbuf;
999 
1000 alloc_encrypted:
1001 		ret = tls_alloc_encrypted_msg(sk, required_size);
1002 		if (ret) {
1003 			if (ret != -ENOSPC)
1004 				goto wait_for_memory;
1005 
1006 			/* Adjust try_to_copy according to the amount that was
1007 			 * actually allocated. The difference is due
1008 			 * to max sg elements limit
1009 			 */
1010 			try_to_copy -= required_size - msg_en->sg.size;
1011 			full_record = true;
1012 		}
1013 
1014 		if (!is_kvec && (full_record || eor) && !async_capable) {
1015 			u32 first = msg_pl->sg.end;
1016 
1017 			ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1018 							msg_pl, try_to_copy);
1019 			if (ret)
1020 				goto fallback_to_reg_send;
1021 
1022 			num_zc++;
1023 			copied += try_to_copy;
1024 
1025 			sk_msg_sg_copy_set(msg_pl, first);
1026 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1027 						  record_type, &copied,
1028 						  msg->msg_flags);
1029 			if (ret) {
1030 				if (ret == -EINPROGRESS)
1031 					num_async++;
1032 				else if (ret == -ENOMEM)
1033 					goto wait_for_memory;
1034 				else if (ctx->open_rec && ret == -ENOSPC)
1035 					goto rollback_iter;
1036 				else if (ret != -EAGAIN)
1037 					goto send_end;
1038 			}
1039 			continue;
1040 rollback_iter:
1041 			copied -= try_to_copy;
1042 			sk_msg_sg_copy_clear(msg_pl, first);
1043 			iov_iter_revert(&msg->msg_iter,
1044 					msg_pl->sg.size - orig_size);
1045 fallback_to_reg_send:
1046 			sk_msg_trim(sk, msg_pl, orig_size);
1047 		}
1048 
1049 		required_size = msg_pl->sg.size + try_to_copy;
1050 
1051 		ret = tls_clone_plaintext_msg(sk, required_size);
1052 		if (ret) {
1053 			if (ret != -ENOSPC)
1054 				goto send_end;
1055 
1056 			/* Adjust try_to_copy according to the amount that was
1057 			 * actually allocated. The difference is due
1058 			 * to max sg elements limit
1059 			 */
1060 			try_to_copy -= required_size - msg_pl->sg.size;
1061 			full_record = true;
1062 			sk_msg_trim(sk, msg_en,
1063 				    msg_pl->sg.size + prot->overhead_size);
1064 		}
1065 
1066 		if (try_to_copy) {
1067 			ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1068 						       msg_pl, try_to_copy);
1069 			if (ret < 0)
1070 				goto trim_sgl;
1071 		}
1072 
1073 		/* Open records defined only if successfully copied, otherwise
1074 		 * we would trim the sg but not reset the open record frags.
1075 		 */
1076 		tls_ctx->pending_open_record_frags = true;
1077 		copied += try_to_copy;
1078 		if (full_record || eor) {
1079 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1080 						  record_type, &copied,
1081 						  msg->msg_flags);
1082 			if (ret) {
1083 				if (ret == -EINPROGRESS)
1084 					num_async++;
1085 				else if (ret == -ENOMEM)
1086 					goto wait_for_memory;
1087 				else if (ret != -EAGAIN) {
1088 					if (ret == -ENOSPC)
1089 						ret = 0;
1090 					goto send_end;
1091 				}
1092 			}
1093 		}
1094 
1095 		continue;
1096 
1097 wait_for_sndbuf:
1098 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1099 wait_for_memory:
1100 		ret = sk_stream_wait_memory(sk, &timeo);
1101 		if (ret) {
1102 trim_sgl:
1103 			if (ctx->open_rec)
1104 				tls_trim_both_msgs(sk, orig_size);
1105 			goto send_end;
1106 		}
1107 
1108 		if (ctx->open_rec && msg_en->sg.size < required_size)
1109 			goto alloc_encrypted;
1110 	}
1111 
1112 	if (!num_async) {
1113 		goto send_end;
1114 	} else if (num_zc) {
1115 		/* Wait for pending encryptions to get completed */
1116 		spin_lock_bh(&ctx->encrypt_compl_lock);
1117 		ctx->async_notify = true;
1118 
1119 		pending = atomic_read(&ctx->encrypt_pending);
1120 		spin_unlock_bh(&ctx->encrypt_compl_lock);
1121 		if (pending)
1122 			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1123 		else
1124 			reinit_completion(&ctx->async_wait.completion);
1125 
1126 		/* There can be no concurrent accesses, since we have no
1127 		 * pending encrypt operations
1128 		 */
1129 		WRITE_ONCE(ctx->async_notify, false);
1130 
1131 		if (ctx->async_wait.err) {
1132 			ret = ctx->async_wait.err;
1133 			copied = 0;
1134 		}
1135 	}
1136 
1137 	/* Transmit if any encryptions have completed */
1138 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1139 		cancel_delayed_work(&ctx->tx_work.work);
1140 		tls_tx_records(sk, msg->msg_flags);
1141 	}
1142 
1143 send_end:
1144 	ret = sk_stream_error(sk, msg->msg_flags, ret);
1145 
1146 	release_sock(sk);
1147 	mutex_unlock(&tls_ctx->tx_lock);
1148 	return copied > 0 ? copied : ret;
1149 }
1150 
1151 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1152 			      int offset, size_t size, int flags)
1153 {
1154 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1155 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1156 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1157 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1158 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
1159 	struct sk_msg *msg_pl;
1160 	struct tls_rec *rec;
1161 	int num_async = 0;
1162 	ssize_t copied = 0;
1163 	bool full_record;
1164 	int record_room;
1165 	int ret = 0;
1166 	bool eor;
1167 
1168 	eor = !(flags & MSG_SENDPAGE_NOTLAST);
1169 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1170 
1171 	/* Call the sk_stream functions to manage the sndbuf mem. */
1172 	while (size > 0) {
1173 		size_t copy, required_size;
1174 
1175 		if (sk->sk_err) {
1176 			ret = -sk->sk_err;
1177 			goto sendpage_end;
1178 		}
1179 
1180 		if (ctx->open_rec)
1181 			rec = ctx->open_rec;
1182 		else
1183 			rec = ctx->open_rec = tls_get_rec(sk);
1184 		if (!rec) {
1185 			ret = -ENOMEM;
1186 			goto sendpage_end;
1187 		}
1188 
1189 		msg_pl = &rec->msg_plaintext;
1190 
1191 		full_record = false;
1192 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1193 		copy = size;
1194 		if (copy >= record_room) {
1195 			copy = record_room;
1196 			full_record = true;
1197 		}
1198 
1199 		required_size = msg_pl->sg.size + copy + prot->overhead_size;
1200 
1201 		if (!sk_stream_memory_free(sk))
1202 			goto wait_for_sndbuf;
1203 alloc_payload:
1204 		ret = tls_alloc_encrypted_msg(sk, required_size);
1205 		if (ret) {
1206 			if (ret != -ENOSPC)
1207 				goto wait_for_memory;
1208 
1209 			/* Adjust copy according to the amount that was
1210 			 * actually allocated. The difference is due
1211 			 * to max sg elements limit
1212 			 */
1213 			copy -= required_size - msg_pl->sg.size;
1214 			full_record = true;
1215 		}
1216 
1217 		sk_msg_page_add(msg_pl, page, copy, offset);
1218 		sk_mem_charge(sk, copy);
1219 
1220 		offset += copy;
1221 		size -= copy;
1222 		copied += copy;
1223 
1224 		tls_ctx->pending_open_record_frags = true;
1225 		if (full_record || eor || sk_msg_full(msg_pl)) {
1226 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1227 						  record_type, &copied, flags);
1228 			if (ret) {
1229 				if (ret == -EINPROGRESS)
1230 					num_async++;
1231 				else if (ret == -ENOMEM)
1232 					goto wait_for_memory;
1233 				else if (ret != -EAGAIN) {
1234 					if (ret == -ENOSPC)
1235 						ret = 0;
1236 					goto sendpage_end;
1237 				}
1238 			}
1239 		}
1240 		continue;
1241 wait_for_sndbuf:
1242 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1243 wait_for_memory:
1244 		ret = sk_stream_wait_memory(sk, &timeo);
1245 		if (ret) {
1246 			if (ctx->open_rec)
1247 				tls_trim_both_msgs(sk, msg_pl->sg.size);
1248 			goto sendpage_end;
1249 		}
1250 
1251 		if (ctx->open_rec)
1252 			goto alloc_payload;
1253 	}
1254 
1255 	if (num_async) {
1256 		/* Transmit if any encryptions have completed */
1257 		if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1258 			cancel_delayed_work(&ctx->tx_work.work);
1259 			tls_tx_records(sk, flags);
1260 		}
1261 	}
1262 sendpage_end:
1263 	ret = sk_stream_error(sk, flags, ret);
1264 	return copied > 0 ? copied : ret;
1265 }
1266 
1267 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1268 			   int offset, size_t size, int flags)
1269 {
1270 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1271 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1272 		      MSG_NO_SHARED_FRAGS))
1273 		return -EOPNOTSUPP;
1274 
1275 	return tls_sw_do_sendpage(sk, page, offset, size, flags);
1276 }
1277 
1278 int tls_sw_sendpage(struct sock *sk, struct page *page,
1279 		    int offset, size_t size, int flags)
1280 {
1281 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1282 	int ret;
1283 
1284 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1285 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1286 		return -EOPNOTSUPP;
1287 
1288 	mutex_lock(&tls_ctx->tx_lock);
1289 	lock_sock(sk);
1290 	ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1291 	release_sock(sk);
1292 	mutex_unlock(&tls_ctx->tx_lock);
1293 	return ret;
1294 }
1295 
1296 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1297 				     bool nonblock, long timeo, int *err)
1298 {
1299 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1300 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1301 	struct sk_buff *skb;
1302 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1303 
1304 	while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1305 		if (sk->sk_err) {
1306 			*err = sock_error(sk);
1307 			return NULL;
1308 		}
1309 
1310 		if (!skb_queue_empty(&sk->sk_receive_queue)) {
1311 			__strp_unpause(&ctx->strp);
1312 			if (ctx->recv_pkt)
1313 				return ctx->recv_pkt;
1314 		}
1315 
1316 		if (sk->sk_shutdown & RCV_SHUTDOWN)
1317 			return NULL;
1318 
1319 		if (sock_flag(sk, SOCK_DONE))
1320 			return NULL;
1321 
1322 		if (nonblock || !timeo) {
1323 			*err = -EAGAIN;
1324 			return NULL;
1325 		}
1326 
1327 		add_wait_queue(sk_sleep(sk), &wait);
1328 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1329 		sk_wait_event(sk, &timeo,
1330 			      ctx->recv_pkt != skb ||
1331 			      !sk_psock_queue_empty(psock),
1332 			      &wait);
1333 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1334 		remove_wait_queue(sk_sleep(sk), &wait);
1335 
1336 		/* Handle signals */
1337 		if (signal_pending(current)) {
1338 			*err = sock_intr_errno(timeo);
1339 			return NULL;
1340 		}
1341 	}
1342 
1343 	return skb;
1344 }
1345 
1346 static int tls_setup_from_iter(struct iov_iter *from,
1347 			       int length, int *pages_used,
1348 			       struct scatterlist *to,
1349 			       int to_max_pages)
1350 {
1351 	int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1352 	struct page *pages[MAX_SKB_FRAGS];
1353 	unsigned int size = 0;
1354 	ssize_t copied, use;
1355 	size_t offset;
1356 
1357 	while (length > 0) {
1358 		i = 0;
1359 		maxpages = to_max_pages - num_elem;
1360 		if (maxpages == 0) {
1361 			rc = -EFAULT;
1362 			goto out;
1363 		}
1364 		copied = iov_iter_get_pages(from, pages,
1365 					    length,
1366 					    maxpages, &offset);
1367 		if (copied <= 0) {
1368 			rc = -EFAULT;
1369 			goto out;
1370 		}
1371 
1372 		iov_iter_advance(from, copied);
1373 
1374 		length -= copied;
1375 		size += copied;
1376 		while (copied) {
1377 			use = min_t(int, copied, PAGE_SIZE - offset);
1378 
1379 			sg_set_page(&to[num_elem],
1380 				    pages[i], use, offset);
1381 			sg_unmark_end(&to[num_elem]);
1382 			/* We do not uncharge memory from this API */
1383 
1384 			offset = 0;
1385 			copied -= use;
1386 
1387 			i++;
1388 			num_elem++;
1389 		}
1390 	}
1391 	/* Mark the end in the last sg entry if newly added */
1392 	if (num_elem > *pages_used)
1393 		sg_mark_end(&to[num_elem - 1]);
1394 out:
1395 	if (rc)
1396 		iov_iter_revert(from, size);
1397 	*pages_used = num_elem;
1398 
1399 	return rc;
1400 }
1401 
1402 /* This function decrypts the input skb into either out_iov or in out_sg
1403  * or in skb buffers itself. The input parameter 'zc' indicates if
1404  * zero-copy mode needs to be tried or not. With zero-copy mode, either
1405  * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1406  * NULL, then the decryption happens inside skb buffers itself, i.e.
1407  * zero-copy gets disabled and 'zc' is updated.
1408  */
1409 
1410 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1411 			    struct iov_iter *out_iov,
1412 			    struct scatterlist *out_sg,
1413 			    struct tls_decrypt_arg *darg)
1414 {
1415 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1416 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1417 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1418 	struct strp_msg *rxm = strp_msg(skb);
1419 	struct tls_msg *tlm = tls_msg(skb);
1420 	int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1421 	struct aead_request *aead_req;
1422 	struct sk_buff *unused;
1423 	u8 *aad, *iv, *mem = NULL;
1424 	struct scatterlist *sgin = NULL;
1425 	struct scatterlist *sgout = NULL;
1426 	const int data_len = rxm->full_len - prot->overhead_size +
1427 			     prot->tail_size;
1428 	int iv_offset = 0;
1429 
1430 	if (darg->zc && (out_iov || out_sg)) {
1431 		if (out_iov)
1432 			n_sgout = 1 +
1433 				iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1434 		else
1435 			n_sgout = sg_nents(out_sg);
1436 		n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1437 				 rxm->full_len - prot->prepend_size);
1438 	} else {
1439 		n_sgout = 0;
1440 		darg->zc = false;
1441 		n_sgin = skb_cow_data(skb, 0, &unused);
1442 	}
1443 
1444 	if (n_sgin < 1)
1445 		return -EBADMSG;
1446 
1447 	/* Increment to accommodate AAD */
1448 	n_sgin = n_sgin + 1;
1449 
1450 	nsg = n_sgin + n_sgout;
1451 
1452 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1453 	mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1454 	mem_size = mem_size + prot->aad_size;
1455 	mem_size = mem_size + MAX_IV_SIZE;
1456 
1457 	/* Allocate a single block of memory which contains
1458 	 * aead_req || sgin[] || sgout[] || aad || iv.
1459 	 * This order achieves correct alignment for aead_req, sgin, sgout.
1460 	 */
1461 	mem = kmalloc(mem_size, sk->sk_allocation);
1462 	if (!mem)
1463 		return -ENOMEM;
1464 
1465 	/* Segment the allocated memory */
1466 	aead_req = (struct aead_request *)mem;
1467 	sgin = (struct scatterlist *)(mem + aead_size);
1468 	sgout = sgin + n_sgin;
1469 	aad = (u8 *)(sgout + n_sgout);
1470 	iv = aad + prot->aad_size;
1471 
1472 	/* For CCM based ciphers, first byte of nonce+iv is a constant */
1473 	switch (prot->cipher_type) {
1474 	case TLS_CIPHER_AES_CCM_128:
1475 		iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1476 		iv_offset = 1;
1477 		break;
1478 	case TLS_CIPHER_SM4_CCM:
1479 		iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1480 		iv_offset = 1;
1481 		break;
1482 	}
1483 
1484 	/* Prepare IV */
1485 	if (prot->version == TLS_1_3_VERSION ||
1486 	    prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1487 		memcpy(iv + iv_offset, tls_ctx->rx.iv,
1488 		       prot->iv_size + prot->salt_size);
1489 	} else {
1490 		err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1491 				    iv + iv_offset + prot->salt_size,
1492 				    prot->iv_size);
1493 		if (err < 0) {
1494 			kfree(mem);
1495 			return err;
1496 		}
1497 		memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
1498 	}
1499 	xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
1500 
1501 	/* Prepare AAD */
1502 	tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1503 		     prot->tail_size,
1504 		     tls_ctx->rx.rec_seq, tlm->control, prot);
1505 
1506 	/* Prepare sgin */
1507 	sg_init_table(sgin, n_sgin);
1508 	sg_set_buf(&sgin[0], aad, prot->aad_size);
1509 	err = skb_to_sgvec(skb, &sgin[1],
1510 			   rxm->offset + prot->prepend_size,
1511 			   rxm->full_len - prot->prepend_size);
1512 	if (err < 0) {
1513 		kfree(mem);
1514 		return err;
1515 	}
1516 
1517 	if (n_sgout) {
1518 		if (out_iov) {
1519 			sg_init_table(sgout, n_sgout);
1520 			sg_set_buf(&sgout[0], aad, prot->aad_size);
1521 
1522 			err = tls_setup_from_iter(out_iov, data_len,
1523 						  &pages, &sgout[1],
1524 						  (n_sgout - 1));
1525 			if (err < 0)
1526 				goto fallback_to_reg_recv;
1527 		} else if (out_sg) {
1528 			memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1529 		} else {
1530 			goto fallback_to_reg_recv;
1531 		}
1532 	} else {
1533 fallback_to_reg_recv:
1534 		sgout = sgin;
1535 		pages = 0;
1536 		darg->zc = false;
1537 	}
1538 
1539 	/* Prepare and submit AEAD request */
1540 	err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1541 				data_len, aead_req, darg);
1542 	if (darg->async)
1543 		return 0;
1544 
1545 	/* Release the pages in case iov was mapped to pages */
1546 	for (; pages > 0; pages--)
1547 		put_page(sg_page(&sgout[pages]));
1548 
1549 	kfree(mem);
1550 	return err;
1551 }
1552 
1553 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1554 			      struct iov_iter *dest,
1555 			      struct tls_decrypt_arg *darg)
1556 {
1557 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1558 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1559 	struct strp_msg *rxm = strp_msg(skb);
1560 	struct tls_msg *tlm = tls_msg(skb);
1561 	int pad, err;
1562 
1563 	if (tlm->decrypted) {
1564 		darg->zc = false;
1565 		darg->async = false;
1566 		return 0;
1567 	}
1568 
1569 	if (tls_ctx->rx_conf == TLS_HW) {
1570 		err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
1571 		if (err < 0)
1572 			return err;
1573 		if (err > 0) {
1574 			tlm->decrypted = 1;
1575 			darg->zc = false;
1576 			darg->async = false;
1577 			goto decrypt_done;
1578 		}
1579 	}
1580 
1581 	err = decrypt_internal(sk, skb, dest, NULL, darg);
1582 	if (err < 0)
1583 		return err;
1584 	if (darg->async)
1585 		goto decrypt_next;
1586 
1587 decrypt_done:
1588 	pad = padding_length(prot, skb);
1589 	if (pad < 0)
1590 		return pad;
1591 
1592 	rxm->full_len -= pad;
1593 	rxm->offset += prot->prepend_size;
1594 	rxm->full_len -= prot->overhead_size;
1595 	tlm->decrypted = 1;
1596 decrypt_next:
1597 	tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1598 
1599 	return 0;
1600 }
1601 
1602 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1603 		struct scatterlist *sgout)
1604 {
1605 	struct tls_decrypt_arg darg = { .zc = true, };
1606 
1607 	return decrypt_internal(sk, skb, NULL, sgout, &darg);
1608 }
1609 
1610 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1611 				   u8 *control)
1612 {
1613 	int err;
1614 
1615 	if (!*control) {
1616 		*control = tlm->control;
1617 		if (!*control)
1618 			return -EBADMSG;
1619 
1620 		err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1621 			       sizeof(*control), control);
1622 		if (*control != TLS_RECORD_TYPE_DATA) {
1623 			if (err || msg->msg_flags & MSG_CTRUNC)
1624 				return -EIO;
1625 		}
1626 	} else if (*control != tlm->control) {
1627 		return 0;
1628 	}
1629 
1630 	return 1;
1631 }
1632 
1633 /* This function traverses the rx_list in tls receive context to copies the
1634  * decrypted records into the buffer provided by caller zero copy is not
1635  * true. Further, the records are removed from the rx_list if it is not a peek
1636  * case and the record has been consumed completely.
1637  */
1638 static int process_rx_list(struct tls_sw_context_rx *ctx,
1639 			   struct msghdr *msg,
1640 			   u8 *control,
1641 			   size_t skip,
1642 			   size_t len,
1643 			   bool zc,
1644 			   bool is_peek)
1645 {
1646 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
1647 	struct tls_msg *tlm;
1648 	ssize_t copied = 0;
1649 	int err;
1650 
1651 	while (skip && skb) {
1652 		struct strp_msg *rxm = strp_msg(skb);
1653 		tlm = tls_msg(skb);
1654 
1655 		err = tls_record_content_type(msg, tlm, control);
1656 		if (err <= 0)
1657 			goto out;
1658 
1659 		if (skip < rxm->full_len)
1660 			break;
1661 
1662 		skip = skip - rxm->full_len;
1663 		skb = skb_peek_next(skb, &ctx->rx_list);
1664 	}
1665 
1666 	while (len && skb) {
1667 		struct sk_buff *next_skb;
1668 		struct strp_msg *rxm = strp_msg(skb);
1669 		int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1670 
1671 		tlm = tls_msg(skb);
1672 
1673 		err = tls_record_content_type(msg, tlm, control);
1674 		if (err <= 0)
1675 			goto out;
1676 
1677 		if (!zc || (rxm->full_len - skip) > len) {
1678 			err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1679 						    msg, chunk);
1680 			if (err < 0)
1681 				goto out;
1682 		}
1683 
1684 		len = len - chunk;
1685 		copied = copied + chunk;
1686 
1687 		/* Consume the data from record if it is non-peek case*/
1688 		if (!is_peek) {
1689 			rxm->offset = rxm->offset + chunk;
1690 			rxm->full_len = rxm->full_len - chunk;
1691 
1692 			/* Return if there is unconsumed data in the record */
1693 			if (rxm->full_len - skip)
1694 				break;
1695 		}
1696 
1697 		/* The remaining skip-bytes must lie in 1st record in rx_list.
1698 		 * So from the 2nd record, 'skip' should be 0.
1699 		 */
1700 		skip = 0;
1701 
1702 		if (msg)
1703 			msg->msg_flags |= MSG_EOR;
1704 
1705 		next_skb = skb_peek_next(skb, &ctx->rx_list);
1706 
1707 		if (!is_peek) {
1708 			__skb_unlink(skb, &ctx->rx_list);
1709 			consume_skb(skb);
1710 		}
1711 
1712 		skb = next_skb;
1713 	}
1714 	err = 0;
1715 
1716 out:
1717 	return copied ? : err;
1718 }
1719 
1720 int tls_sw_recvmsg(struct sock *sk,
1721 		   struct msghdr *msg,
1722 		   size_t len,
1723 		   int flags,
1724 		   int *addr_len)
1725 {
1726 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1727 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1728 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1729 	struct sk_psock *psock;
1730 	unsigned char control = 0;
1731 	ssize_t decrypted = 0;
1732 	struct strp_msg *rxm;
1733 	struct tls_msg *tlm;
1734 	struct sk_buff *skb;
1735 	ssize_t copied = 0;
1736 	bool async = false;
1737 	int target, err = 0;
1738 	long timeo;
1739 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1740 	bool is_peek = flags & MSG_PEEK;
1741 	bool bpf_strp_enabled;
1742 	bool zc_capable;
1743 
1744 	if (unlikely(flags & MSG_ERRQUEUE))
1745 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1746 
1747 	psock = sk_psock_get(sk);
1748 	lock_sock(sk);
1749 	bpf_strp_enabled = sk_psock_strp_enabled(psock);
1750 
1751 	/* If crypto failed the connection is broken */
1752 	err = ctx->async_wait.err;
1753 	if (err)
1754 		goto end;
1755 
1756 	/* Process pending decrypted records. It must be non-zero-copy */
1757 	err = process_rx_list(ctx, msg, &control, 0, len, false, is_peek);
1758 	if (err < 0)
1759 		goto end;
1760 
1761 	copied = err;
1762 	if (len <= copied)
1763 		goto end;
1764 
1765 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1766 	len = len - copied;
1767 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1768 
1769 	zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
1770 		     prot->version != TLS_1_3_VERSION;
1771 	decrypted = 0;
1772 	while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1773 		struct tls_decrypt_arg darg = {};
1774 		int to_decrypt, chunk;
1775 
1776 		skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
1777 		if (!skb) {
1778 			if (psock) {
1779 				chunk = sk_msg_recvmsg(sk, psock, msg, len,
1780 						       flags);
1781 				if (chunk > 0)
1782 					goto leave_on_list;
1783 			}
1784 			goto recv_end;
1785 		}
1786 
1787 		rxm = strp_msg(skb);
1788 		tlm = tls_msg(skb);
1789 
1790 		to_decrypt = rxm->full_len - prot->overhead_size;
1791 
1792 		if (zc_capable && to_decrypt <= len &&
1793 		    tlm->control == TLS_RECORD_TYPE_DATA)
1794 			darg.zc = true;
1795 
1796 		/* Do not use async mode if record is non-data */
1797 		if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
1798 			darg.async = ctx->async_capable;
1799 		else
1800 			darg.async = false;
1801 
1802 		err = decrypt_skb_update(sk, skb, &msg->msg_iter, &darg);
1803 		if (err < 0) {
1804 			tls_err_abort(sk, -EBADMSG);
1805 			goto recv_end;
1806 		}
1807 
1808 		async |= darg.async;
1809 
1810 		/* If the type of records being processed is not known yet,
1811 		 * set it to record type just dequeued. If it is already known,
1812 		 * but does not match the record type just dequeued, go to end.
1813 		 * We always get record type here since for tls1.2, record type
1814 		 * is known just after record is dequeued from stream parser.
1815 		 * For tls1.3, we disable async.
1816 		 */
1817 		err = tls_record_content_type(msg, tlm, &control);
1818 		if (err <= 0)
1819 			goto recv_end;
1820 
1821 		ctx->recv_pkt = NULL;
1822 		__strp_unpause(&ctx->strp);
1823 		__skb_queue_tail(&ctx->rx_list, skb);
1824 
1825 		if (async) {
1826 			/* TLS 1.2-only, to_decrypt must be text length */
1827 			chunk = min_t(int, to_decrypt, len);
1828 leave_on_list:
1829 			decrypted += chunk;
1830 			len -= chunk;
1831 			continue;
1832 		}
1833 		/* TLS 1.3 may have updated the length by more than overhead */
1834 		chunk = rxm->full_len;
1835 
1836 		if (!darg.zc) {
1837 			bool partially_consumed = chunk > len;
1838 
1839 			if (bpf_strp_enabled) {
1840 				/* BPF may try to queue the skb */
1841 				__skb_unlink(skb, &ctx->rx_list);
1842 				err = sk_psock_tls_strp_read(psock, skb);
1843 				if (err != __SK_PASS) {
1844 					rxm->offset = rxm->offset + rxm->full_len;
1845 					rxm->full_len = 0;
1846 					if (err == __SK_DROP)
1847 						consume_skb(skb);
1848 					continue;
1849 				}
1850 				__skb_queue_tail(&ctx->rx_list, skb);
1851 			}
1852 
1853 			if (partially_consumed)
1854 				chunk = len;
1855 
1856 			err = skb_copy_datagram_msg(skb, rxm->offset,
1857 						    msg, chunk);
1858 			if (err < 0)
1859 				goto recv_end;
1860 
1861 			if (is_peek)
1862 				goto leave_on_list;
1863 
1864 			if (partially_consumed) {
1865 				rxm->offset += chunk;
1866 				rxm->full_len -= chunk;
1867 				goto leave_on_list;
1868 			}
1869 		}
1870 
1871 		decrypted += chunk;
1872 		len -= chunk;
1873 
1874 		__skb_unlink(skb, &ctx->rx_list);
1875 		consume_skb(skb);
1876 
1877 		/* Return full control message to userspace before trying
1878 		 * to parse another message type
1879 		 */
1880 		msg->msg_flags |= MSG_EOR;
1881 		if (control != TLS_RECORD_TYPE_DATA)
1882 			break;
1883 	}
1884 
1885 recv_end:
1886 	if (async) {
1887 		int ret, pending;
1888 
1889 		/* Wait for all previously submitted records to be decrypted */
1890 		spin_lock_bh(&ctx->decrypt_compl_lock);
1891 		reinit_completion(&ctx->async_wait.completion);
1892 		pending = atomic_read(&ctx->decrypt_pending);
1893 		spin_unlock_bh(&ctx->decrypt_compl_lock);
1894 		if (pending) {
1895 			ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1896 			if (ret) {
1897 				if (err >= 0 || err == -EINPROGRESS)
1898 					err = ret;
1899 				decrypted = 0;
1900 				goto end;
1901 			}
1902 		}
1903 
1904 		/* Drain records from the rx_list & copy if required */
1905 		if (is_peek || is_kvec)
1906 			err = process_rx_list(ctx, msg, &control, copied,
1907 					      decrypted, false, is_peek);
1908 		else
1909 			err = process_rx_list(ctx, msg, &control, 0,
1910 					      decrypted, true, is_peek);
1911 		decrypted = max(err, 0);
1912 	}
1913 
1914 	copied += decrypted;
1915 
1916 end:
1917 	release_sock(sk);
1918 	if (psock)
1919 		sk_psock_put(sk, psock);
1920 	return copied ? : err;
1921 }
1922 
1923 ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
1924 			   struct pipe_inode_info *pipe,
1925 			   size_t len, unsigned int flags)
1926 {
1927 	struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1928 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1929 	struct strp_msg *rxm = NULL;
1930 	struct sock *sk = sock->sk;
1931 	struct tls_msg *tlm;
1932 	struct sk_buff *skb;
1933 	ssize_t copied = 0;
1934 	bool from_queue;
1935 	int err = 0;
1936 	long timeo;
1937 	int chunk;
1938 
1939 	lock_sock(sk);
1940 
1941 	timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
1942 
1943 	from_queue = !skb_queue_empty(&ctx->rx_list);
1944 	if (from_queue) {
1945 		skb = __skb_dequeue(&ctx->rx_list);
1946 	} else {
1947 		struct tls_decrypt_arg darg = {};
1948 
1949 		skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
1950 				    &err);
1951 		if (!skb)
1952 			goto splice_read_end;
1953 
1954 		err = decrypt_skb_update(sk, skb, NULL, &darg);
1955 		if (err < 0) {
1956 			tls_err_abort(sk, -EBADMSG);
1957 			goto splice_read_end;
1958 		}
1959 	}
1960 
1961 	rxm = strp_msg(skb);
1962 	tlm = tls_msg(skb);
1963 
1964 	/* splice does not support reading control messages */
1965 	if (tlm->control != TLS_RECORD_TYPE_DATA) {
1966 		err = -EINVAL;
1967 		goto splice_read_end;
1968 	}
1969 
1970 	chunk = min_t(unsigned int, rxm->full_len, len);
1971 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1972 	if (copied < 0)
1973 		goto splice_read_end;
1974 
1975 	if (!from_queue) {
1976 		ctx->recv_pkt = NULL;
1977 		__strp_unpause(&ctx->strp);
1978 	}
1979 	if (chunk < rxm->full_len) {
1980 		__skb_queue_head(&ctx->rx_list, skb);
1981 		rxm->offset += len;
1982 		rxm->full_len -= len;
1983 	} else {
1984 		consume_skb(skb);
1985 	}
1986 
1987 splice_read_end:
1988 	release_sock(sk);
1989 	return copied ? : err;
1990 }
1991 
1992 bool tls_sw_sock_is_readable(struct sock *sk)
1993 {
1994 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1995 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1996 	bool ingress_empty = true;
1997 	struct sk_psock *psock;
1998 
1999 	rcu_read_lock();
2000 	psock = sk_psock(sk);
2001 	if (psock)
2002 		ingress_empty = list_empty(&psock->ingress_msg);
2003 	rcu_read_unlock();
2004 
2005 	return !ingress_empty || ctx->recv_pkt ||
2006 		!skb_queue_empty(&ctx->rx_list);
2007 }
2008 
2009 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2010 {
2011 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2012 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2013 	char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2014 	struct strp_msg *rxm = strp_msg(skb);
2015 	struct tls_msg *tlm = tls_msg(skb);
2016 	size_t cipher_overhead;
2017 	size_t data_len = 0;
2018 	int ret;
2019 
2020 	/* Verify that we have a full TLS header, or wait for more data */
2021 	if (rxm->offset + prot->prepend_size > skb->len)
2022 		return 0;
2023 
2024 	/* Sanity-check size of on-stack buffer. */
2025 	if (WARN_ON(prot->prepend_size > sizeof(header))) {
2026 		ret = -EINVAL;
2027 		goto read_failure;
2028 	}
2029 
2030 	/* Linearize header to local buffer */
2031 	ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2032 	if (ret < 0)
2033 		goto read_failure;
2034 
2035 	tlm->decrypted = 0;
2036 	tlm->control = header[0];
2037 
2038 	data_len = ((header[4] & 0xFF) | (header[3] << 8));
2039 
2040 	cipher_overhead = prot->tag_size;
2041 	if (prot->version != TLS_1_3_VERSION &&
2042 	    prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2043 		cipher_overhead += prot->iv_size;
2044 
2045 	if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2046 	    prot->tail_size) {
2047 		ret = -EMSGSIZE;
2048 		goto read_failure;
2049 	}
2050 	if (data_len < cipher_overhead) {
2051 		ret = -EBADMSG;
2052 		goto read_failure;
2053 	}
2054 
2055 	/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2056 	if (header[1] != TLS_1_2_VERSION_MINOR ||
2057 	    header[2] != TLS_1_2_VERSION_MAJOR) {
2058 		ret = -EINVAL;
2059 		goto read_failure;
2060 	}
2061 
2062 	tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2063 				     TCP_SKB_CB(skb)->seq + rxm->offset);
2064 	return data_len + TLS_HEADER_SIZE;
2065 
2066 read_failure:
2067 	tls_err_abort(strp->sk, ret);
2068 
2069 	return ret;
2070 }
2071 
2072 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2073 {
2074 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2075 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2076 
2077 	ctx->recv_pkt = skb;
2078 	strp_pause(strp);
2079 
2080 	ctx->saved_data_ready(strp->sk);
2081 }
2082 
2083 static void tls_data_ready(struct sock *sk)
2084 {
2085 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2086 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2087 	struct sk_psock *psock;
2088 
2089 	strp_data_ready(&ctx->strp);
2090 
2091 	psock = sk_psock_get(sk);
2092 	if (psock) {
2093 		if (!list_empty(&psock->ingress_msg))
2094 			ctx->saved_data_ready(sk);
2095 		sk_psock_put(sk, psock);
2096 	}
2097 }
2098 
2099 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2100 {
2101 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2102 
2103 	set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2104 	set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2105 	cancel_delayed_work_sync(&ctx->tx_work.work);
2106 }
2107 
2108 void tls_sw_release_resources_tx(struct sock *sk)
2109 {
2110 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2111 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2112 	struct tls_rec *rec, *tmp;
2113 	int pending;
2114 
2115 	/* Wait for any pending async encryptions to complete */
2116 	spin_lock_bh(&ctx->encrypt_compl_lock);
2117 	ctx->async_notify = true;
2118 	pending = atomic_read(&ctx->encrypt_pending);
2119 	spin_unlock_bh(&ctx->encrypt_compl_lock);
2120 
2121 	if (pending)
2122 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2123 
2124 	tls_tx_records(sk, -1);
2125 
2126 	/* Free up un-sent records in tx_list. First, free
2127 	 * the partially sent record if any at head of tx_list.
2128 	 */
2129 	if (tls_ctx->partially_sent_record) {
2130 		tls_free_partial_record(sk, tls_ctx);
2131 		rec = list_first_entry(&ctx->tx_list,
2132 				       struct tls_rec, list);
2133 		list_del(&rec->list);
2134 		sk_msg_free(sk, &rec->msg_plaintext);
2135 		kfree(rec);
2136 	}
2137 
2138 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2139 		list_del(&rec->list);
2140 		sk_msg_free(sk, &rec->msg_encrypted);
2141 		sk_msg_free(sk, &rec->msg_plaintext);
2142 		kfree(rec);
2143 	}
2144 
2145 	crypto_free_aead(ctx->aead_send);
2146 	tls_free_open_rec(sk);
2147 }
2148 
2149 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2150 {
2151 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2152 
2153 	kfree(ctx);
2154 }
2155 
2156 void tls_sw_release_resources_rx(struct sock *sk)
2157 {
2158 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2159 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2160 
2161 	kfree(tls_ctx->rx.rec_seq);
2162 	kfree(tls_ctx->rx.iv);
2163 
2164 	if (ctx->aead_recv) {
2165 		kfree_skb(ctx->recv_pkt);
2166 		ctx->recv_pkt = NULL;
2167 		__skb_queue_purge(&ctx->rx_list);
2168 		crypto_free_aead(ctx->aead_recv);
2169 		strp_stop(&ctx->strp);
2170 		/* If tls_sw_strparser_arm() was not called (cleanup paths)
2171 		 * we still want to strp_stop(), but sk->sk_data_ready was
2172 		 * never swapped.
2173 		 */
2174 		if (ctx->saved_data_ready) {
2175 			write_lock_bh(&sk->sk_callback_lock);
2176 			sk->sk_data_ready = ctx->saved_data_ready;
2177 			write_unlock_bh(&sk->sk_callback_lock);
2178 		}
2179 	}
2180 }
2181 
2182 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2183 {
2184 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2185 
2186 	strp_done(&ctx->strp);
2187 }
2188 
2189 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2190 {
2191 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2192 
2193 	kfree(ctx);
2194 }
2195 
2196 void tls_sw_free_resources_rx(struct sock *sk)
2197 {
2198 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2199 
2200 	tls_sw_release_resources_rx(sk);
2201 	tls_sw_free_ctx_rx(tls_ctx);
2202 }
2203 
2204 /* The work handler to transmitt the encrypted records in tx_list */
2205 static void tx_work_handler(struct work_struct *work)
2206 {
2207 	struct delayed_work *delayed_work = to_delayed_work(work);
2208 	struct tx_work *tx_work = container_of(delayed_work,
2209 					       struct tx_work, work);
2210 	struct sock *sk = tx_work->sk;
2211 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2212 	struct tls_sw_context_tx *ctx;
2213 
2214 	if (unlikely(!tls_ctx))
2215 		return;
2216 
2217 	ctx = tls_sw_ctx_tx(tls_ctx);
2218 	if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2219 		return;
2220 
2221 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2222 		return;
2223 	mutex_lock(&tls_ctx->tx_lock);
2224 	lock_sock(sk);
2225 	tls_tx_records(sk, -1);
2226 	release_sock(sk);
2227 	mutex_unlock(&tls_ctx->tx_lock);
2228 }
2229 
2230 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2231 {
2232 	struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2233 
2234 	/* Schedule the transmission if tx list is ready */
2235 	if (is_tx_ready(tx_ctx) &&
2236 	    !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2237 		schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2238 }
2239 
2240 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2241 {
2242 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2243 
2244 	write_lock_bh(&sk->sk_callback_lock);
2245 	rx_ctx->saved_data_ready = sk->sk_data_ready;
2246 	sk->sk_data_ready = tls_data_ready;
2247 	write_unlock_bh(&sk->sk_callback_lock);
2248 
2249 	strp_check_rcv(&rx_ctx->strp);
2250 }
2251 
2252 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2253 {
2254 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2255 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2256 	struct tls_crypto_info *crypto_info;
2257 	struct tls_sw_context_tx *sw_ctx_tx = NULL;
2258 	struct tls_sw_context_rx *sw_ctx_rx = NULL;
2259 	struct cipher_context *cctx;
2260 	struct crypto_aead **aead;
2261 	struct strp_callbacks cb;
2262 	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2263 	struct crypto_tfm *tfm;
2264 	char *iv, *rec_seq, *key, *salt, *cipher_name;
2265 	size_t keysize;
2266 	int rc = 0;
2267 
2268 	if (!ctx) {
2269 		rc = -EINVAL;
2270 		goto out;
2271 	}
2272 
2273 	if (tx) {
2274 		if (!ctx->priv_ctx_tx) {
2275 			sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2276 			if (!sw_ctx_tx) {
2277 				rc = -ENOMEM;
2278 				goto out;
2279 			}
2280 			ctx->priv_ctx_tx = sw_ctx_tx;
2281 		} else {
2282 			sw_ctx_tx =
2283 				(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2284 		}
2285 	} else {
2286 		if (!ctx->priv_ctx_rx) {
2287 			sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2288 			if (!sw_ctx_rx) {
2289 				rc = -ENOMEM;
2290 				goto out;
2291 			}
2292 			ctx->priv_ctx_rx = sw_ctx_rx;
2293 		} else {
2294 			sw_ctx_rx =
2295 				(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2296 		}
2297 	}
2298 
2299 	if (tx) {
2300 		crypto_init_wait(&sw_ctx_tx->async_wait);
2301 		spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2302 		crypto_info = &ctx->crypto_send.info;
2303 		cctx = &ctx->tx;
2304 		aead = &sw_ctx_tx->aead_send;
2305 		INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2306 		INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2307 		sw_ctx_tx->tx_work.sk = sk;
2308 	} else {
2309 		crypto_init_wait(&sw_ctx_rx->async_wait);
2310 		spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2311 		crypto_info = &ctx->crypto_recv.info;
2312 		cctx = &ctx->rx;
2313 		skb_queue_head_init(&sw_ctx_rx->rx_list);
2314 		aead = &sw_ctx_rx->aead_recv;
2315 	}
2316 
2317 	switch (crypto_info->cipher_type) {
2318 	case TLS_CIPHER_AES_GCM_128: {
2319 		struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2320 
2321 		gcm_128_info = (void *)crypto_info;
2322 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2323 		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2324 		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2325 		iv = gcm_128_info->iv;
2326 		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2327 		rec_seq = gcm_128_info->rec_seq;
2328 		keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2329 		key = gcm_128_info->key;
2330 		salt = gcm_128_info->salt;
2331 		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2332 		cipher_name = "gcm(aes)";
2333 		break;
2334 	}
2335 	case TLS_CIPHER_AES_GCM_256: {
2336 		struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2337 
2338 		gcm_256_info = (void *)crypto_info;
2339 		nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2340 		tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2341 		iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2342 		iv = gcm_256_info->iv;
2343 		rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2344 		rec_seq = gcm_256_info->rec_seq;
2345 		keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2346 		key = gcm_256_info->key;
2347 		salt = gcm_256_info->salt;
2348 		salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2349 		cipher_name = "gcm(aes)";
2350 		break;
2351 	}
2352 	case TLS_CIPHER_AES_CCM_128: {
2353 		struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2354 
2355 		ccm_128_info = (void *)crypto_info;
2356 		nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2357 		tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2358 		iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2359 		iv = ccm_128_info->iv;
2360 		rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2361 		rec_seq = ccm_128_info->rec_seq;
2362 		keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2363 		key = ccm_128_info->key;
2364 		salt = ccm_128_info->salt;
2365 		salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2366 		cipher_name = "ccm(aes)";
2367 		break;
2368 	}
2369 	case TLS_CIPHER_CHACHA20_POLY1305: {
2370 		struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2371 
2372 		chacha20_poly1305_info = (void *)crypto_info;
2373 		nonce_size = 0;
2374 		tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
2375 		iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
2376 		iv = chacha20_poly1305_info->iv;
2377 		rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
2378 		rec_seq = chacha20_poly1305_info->rec_seq;
2379 		keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
2380 		key = chacha20_poly1305_info->key;
2381 		salt = chacha20_poly1305_info->salt;
2382 		salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
2383 		cipher_name = "rfc7539(chacha20,poly1305)";
2384 		break;
2385 	}
2386 	case TLS_CIPHER_SM4_GCM: {
2387 		struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2388 
2389 		sm4_gcm_info = (void *)crypto_info;
2390 		nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2391 		tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2392 		iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2393 		iv = sm4_gcm_info->iv;
2394 		rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2395 		rec_seq = sm4_gcm_info->rec_seq;
2396 		keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2397 		key = sm4_gcm_info->key;
2398 		salt = sm4_gcm_info->salt;
2399 		salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2400 		cipher_name = "gcm(sm4)";
2401 		break;
2402 	}
2403 	case TLS_CIPHER_SM4_CCM: {
2404 		struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2405 
2406 		sm4_ccm_info = (void *)crypto_info;
2407 		nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2408 		tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2409 		iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2410 		iv = sm4_ccm_info->iv;
2411 		rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2412 		rec_seq = sm4_ccm_info->rec_seq;
2413 		keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2414 		key = sm4_ccm_info->key;
2415 		salt = sm4_ccm_info->salt;
2416 		salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2417 		cipher_name = "ccm(sm4)";
2418 		break;
2419 	}
2420 	default:
2421 		rc = -EINVAL;
2422 		goto free_priv;
2423 	}
2424 
2425 	/* Sanity-check the sizes for stack allocations. */
2426 	if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2427 	    rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE) {
2428 		rc = -EINVAL;
2429 		goto free_priv;
2430 	}
2431 
2432 	if (crypto_info->version == TLS_1_3_VERSION) {
2433 		nonce_size = 0;
2434 		prot->aad_size = TLS_HEADER_SIZE;
2435 		prot->tail_size = 1;
2436 	} else {
2437 		prot->aad_size = TLS_AAD_SPACE_SIZE;
2438 		prot->tail_size = 0;
2439 	}
2440 
2441 	prot->version = crypto_info->version;
2442 	prot->cipher_type = crypto_info->cipher_type;
2443 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2444 	prot->tag_size = tag_size;
2445 	prot->overhead_size = prot->prepend_size +
2446 			      prot->tag_size + prot->tail_size;
2447 	prot->iv_size = iv_size;
2448 	prot->salt_size = salt_size;
2449 	cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2450 	if (!cctx->iv) {
2451 		rc = -ENOMEM;
2452 		goto free_priv;
2453 	}
2454 	/* Note: 128 & 256 bit salt are the same size */
2455 	prot->rec_seq_size = rec_seq_size;
2456 	memcpy(cctx->iv, salt, salt_size);
2457 	memcpy(cctx->iv + salt_size, iv, iv_size);
2458 	cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2459 	if (!cctx->rec_seq) {
2460 		rc = -ENOMEM;
2461 		goto free_iv;
2462 	}
2463 
2464 	if (!*aead) {
2465 		*aead = crypto_alloc_aead(cipher_name, 0, 0);
2466 		if (IS_ERR(*aead)) {
2467 			rc = PTR_ERR(*aead);
2468 			*aead = NULL;
2469 			goto free_rec_seq;
2470 		}
2471 	}
2472 
2473 	ctx->push_pending_record = tls_sw_push_pending_record;
2474 
2475 	rc = crypto_aead_setkey(*aead, key, keysize);
2476 
2477 	if (rc)
2478 		goto free_aead;
2479 
2480 	rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2481 	if (rc)
2482 		goto free_aead;
2483 
2484 	if (sw_ctx_rx) {
2485 		tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2486 
2487 		if (crypto_info->version == TLS_1_3_VERSION)
2488 			sw_ctx_rx->async_capable = 0;
2489 		else
2490 			sw_ctx_rx->async_capable =
2491 				!!(tfm->__crt_alg->cra_flags &
2492 				   CRYPTO_ALG_ASYNC);
2493 
2494 		/* Set up strparser */
2495 		memset(&cb, 0, sizeof(cb));
2496 		cb.rcv_msg = tls_queue;
2497 		cb.parse_msg = tls_read_size;
2498 
2499 		strp_init(&sw_ctx_rx->strp, sk, &cb);
2500 	}
2501 
2502 	goto out;
2503 
2504 free_aead:
2505 	crypto_free_aead(*aead);
2506 	*aead = NULL;
2507 free_rec_seq:
2508 	kfree(cctx->rec_seq);
2509 	cctx->rec_seq = NULL;
2510 free_iv:
2511 	kfree(cctx->iv);
2512 	cctx->iv = NULL;
2513 free_priv:
2514 	if (tx) {
2515 		kfree(ctx->priv_ctx_tx);
2516 		ctx->priv_ctx_tx = NULL;
2517 	} else {
2518 		kfree(ctx->priv_ctx_rx);
2519 		ctx->priv_ctx_rx = NULL;
2520 	}
2521 out:
2522 	return rc;
2523 }
2524