xref: /openbmc/linux/net/tls/tls_sw.c (revision 18da174d)
1 /*
2  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4  * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5  * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6  * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7  * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  */
37 
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
44 
45 #include <net/strparser.h>
46 #include <net/tls.h>
47 #include <trace/events/sock.h>
48 
49 #include "tls.h"
50 
51 struct tls_decrypt_arg {
52 	struct_group(inargs,
53 	bool zc;
54 	bool async;
55 	u8 tail;
56 	);
57 
58 	struct sk_buff *skb;
59 };
60 
61 struct tls_decrypt_ctx {
62 	struct sock *sk;
63 	u8 iv[MAX_IV_SIZE];
64 	u8 aad[TLS_MAX_AAD_SIZE];
65 	u8 tail;
66 	struct scatterlist sg[];
67 };
68 
69 noinline void tls_err_abort(struct sock *sk, int err)
70 {
71 	WARN_ON_ONCE(err >= 0);
72 	/* sk->sk_err should contain a positive error code. */
73 	WRITE_ONCE(sk->sk_err, -err);
74 	/* Paired with smp_rmb() in tcp_poll() */
75 	smp_wmb();
76 	sk_error_report(sk);
77 }
78 
79 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
80                      unsigned int recursion_level)
81 {
82         int start = skb_headlen(skb);
83         int i, chunk = start - offset;
84         struct sk_buff *frag_iter;
85         int elt = 0;
86 
87         if (unlikely(recursion_level >= 24))
88                 return -EMSGSIZE;
89 
90         if (chunk > 0) {
91                 if (chunk > len)
92                         chunk = len;
93                 elt++;
94                 len -= chunk;
95                 if (len == 0)
96                         return elt;
97                 offset += chunk;
98         }
99 
100         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
101                 int end;
102 
103                 WARN_ON(start > offset + len);
104 
105                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
106                 chunk = end - offset;
107                 if (chunk > 0) {
108                         if (chunk > len)
109                                 chunk = len;
110                         elt++;
111                         len -= chunk;
112                         if (len == 0)
113                                 return elt;
114                         offset += chunk;
115                 }
116                 start = end;
117         }
118 
119         if (unlikely(skb_has_frag_list(skb))) {
120                 skb_walk_frags(skb, frag_iter) {
121                         int end, ret;
122 
123                         WARN_ON(start > offset + len);
124 
125                         end = start + frag_iter->len;
126                         chunk = end - offset;
127                         if (chunk > 0) {
128                                 if (chunk > len)
129                                         chunk = len;
130                                 ret = __skb_nsg(frag_iter, offset - start, chunk,
131                                                 recursion_level + 1);
132                                 if (unlikely(ret < 0))
133                                         return ret;
134                                 elt += ret;
135                                 len -= chunk;
136                                 if (len == 0)
137                                         return elt;
138                                 offset += chunk;
139                         }
140                         start = end;
141                 }
142         }
143         BUG_ON(len);
144         return elt;
145 }
146 
147 /* Return the number of scatterlist elements required to completely map the
148  * skb, or -EMSGSIZE if the recursion depth is exceeded.
149  */
150 static int skb_nsg(struct sk_buff *skb, int offset, int len)
151 {
152         return __skb_nsg(skb, offset, len, 0);
153 }
154 
155 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
156 			      struct tls_decrypt_arg *darg)
157 {
158 	struct strp_msg *rxm = strp_msg(skb);
159 	struct tls_msg *tlm = tls_msg(skb);
160 	int sub = 0;
161 
162 	/* Determine zero-padding length */
163 	if (prot->version == TLS_1_3_VERSION) {
164 		int offset = rxm->full_len - TLS_TAG_SIZE - 1;
165 		char content_type = darg->zc ? darg->tail : 0;
166 		int err;
167 
168 		while (content_type == 0) {
169 			if (offset < prot->prepend_size)
170 				return -EBADMSG;
171 			err = skb_copy_bits(skb, rxm->offset + offset,
172 					    &content_type, 1);
173 			if (err)
174 				return err;
175 			if (content_type)
176 				break;
177 			sub++;
178 			offset--;
179 		}
180 		tlm->control = content_type;
181 	}
182 	return sub;
183 }
184 
185 static void tls_decrypt_done(void *data, int err)
186 {
187 	struct aead_request *aead_req = data;
188 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
189 	struct scatterlist *sgout = aead_req->dst;
190 	struct scatterlist *sgin = aead_req->src;
191 	struct tls_sw_context_rx *ctx;
192 	struct tls_decrypt_ctx *dctx;
193 	struct tls_context *tls_ctx;
194 	struct scatterlist *sg;
195 	unsigned int pages;
196 	struct sock *sk;
197 	int aead_size;
198 
199 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
200 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
201 	dctx = (void *)((u8 *)aead_req + aead_size);
202 
203 	sk = dctx->sk;
204 	tls_ctx = tls_get_ctx(sk);
205 	ctx = tls_sw_ctx_rx(tls_ctx);
206 
207 	/* Propagate if there was an err */
208 	if (err) {
209 		if (err == -EBADMSG)
210 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
211 		ctx->async_wait.err = err;
212 		tls_err_abort(sk, err);
213 	}
214 
215 	/* Free the destination pages if skb was not decrypted inplace */
216 	if (sgout != sgin) {
217 		/* Skip the first S/G entry as it points to AAD */
218 		for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
219 			if (!sg)
220 				break;
221 			put_page(sg_page(sg));
222 		}
223 	}
224 
225 	kfree(aead_req);
226 
227 	spin_lock_bh(&ctx->decrypt_compl_lock);
228 	if (!atomic_dec_return(&ctx->decrypt_pending))
229 		complete(&ctx->async_wait.completion);
230 	spin_unlock_bh(&ctx->decrypt_compl_lock);
231 }
232 
233 static int tls_do_decryption(struct sock *sk,
234 			     struct scatterlist *sgin,
235 			     struct scatterlist *sgout,
236 			     char *iv_recv,
237 			     size_t data_len,
238 			     struct aead_request *aead_req,
239 			     struct tls_decrypt_arg *darg)
240 {
241 	struct tls_context *tls_ctx = tls_get_ctx(sk);
242 	struct tls_prot_info *prot = &tls_ctx->prot_info;
243 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
244 	int ret;
245 
246 	aead_request_set_tfm(aead_req, ctx->aead_recv);
247 	aead_request_set_ad(aead_req, prot->aad_size);
248 	aead_request_set_crypt(aead_req, sgin, sgout,
249 			       data_len + prot->tag_size,
250 			       (u8 *)iv_recv);
251 
252 	if (darg->async) {
253 		aead_request_set_callback(aead_req,
254 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
255 					  tls_decrypt_done, aead_req);
256 		atomic_inc(&ctx->decrypt_pending);
257 	} else {
258 		aead_request_set_callback(aead_req,
259 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
260 					  crypto_req_done, &ctx->async_wait);
261 	}
262 
263 	ret = crypto_aead_decrypt(aead_req);
264 	if (ret == -EINPROGRESS) {
265 		if (darg->async)
266 			return 0;
267 
268 		ret = crypto_wait_req(ret, &ctx->async_wait);
269 	}
270 	darg->async = false;
271 
272 	return ret;
273 }
274 
275 static void tls_trim_both_msgs(struct sock *sk, int target_size)
276 {
277 	struct tls_context *tls_ctx = tls_get_ctx(sk);
278 	struct tls_prot_info *prot = &tls_ctx->prot_info;
279 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
280 	struct tls_rec *rec = ctx->open_rec;
281 
282 	sk_msg_trim(sk, &rec->msg_plaintext, target_size);
283 	if (target_size > 0)
284 		target_size += prot->overhead_size;
285 	sk_msg_trim(sk, &rec->msg_encrypted, target_size);
286 }
287 
288 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
289 {
290 	struct tls_context *tls_ctx = tls_get_ctx(sk);
291 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
292 	struct tls_rec *rec = ctx->open_rec;
293 	struct sk_msg *msg_en = &rec->msg_encrypted;
294 
295 	return sk_msg_alloc(sk, msg_en, len, 0);
296 }
297 
298 static int tls_clone_plaintext_msg(struct sock *sk, int required)
299 {
300 	struct tls_context *tls_ctx = tls_get_ctx(sk);
301 	struct tls_prot_info *prot = &tls_ctx->prot_info;
302 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
303 	struct tls_rec *rec = ctx->open_rec;
304 	struct sk_msg *msg_pl = &rec->msg_plaintext;
305 	struct sk_msg *msg_en = &rec->msg_encrypted;
306 	int skip, len;
307 
308 	/* We add page references worth len bytes from encrypted sg
309 	 * at the end of plaintext sg. It is guaranteed that msg_en
310 	 * has enough required room (ensured by caller).
311 	 */
312 	len = required - msg_pl->sg.size;
313 
314 	/* Skip initial bytes in msg_en's data to be able to use
315 	 * same offset of both plain and encrypted data.
316 	 */
317 	skip = prot->prepend_size + msg_pl->sg.size;
318 
319 	return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
320 }
321 
322 static struct tls_rec *tls_get_rec(struct sock *sk)
323 {
324 	struct tls_context *tls_ctx = tls_get_ctx(sk);
325 	struct tls_prot_info *prot = &tls_ctx->prot_info;
326 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
327 	struct sk_msg *msg_pl, *msg_en;
328 	struct tls_rec *rec;
329 	int mem_size;
330 
331 	mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
332 
333 	rec = kzalloc(mem_size, sk->sk_allocation);
334 	if (!rec)
335 		return NULL;
336 
337 	msg_pl = &rec->msg_plaintext;
338 	msg_en = &rec->msg_encrypted;
339 
340 	sk_msg_init(msg_pl);
341 	sk_msg_init(msg_en);
342 
343 	sg_init_table(rec->sg_aead_in, 2);
344 	sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
345 	sg_unmark_end(&rec->sg_aead_in[1]);
346 
347 	sg_init_table(rec->sg_aead_out, 2);
348 	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
349 	sg_unmark_end(&rec->sg_aead_out[1]);
350 
351 	rec->sk = sk;
352 
353 	return rec;
354 }
355 
356 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
357 {
358 	sk_msg_free(sk, &rec->msg_encrypted);
359 	sk_msg_free(sk, &rec->msg_plaintext);
360 	kfree(rec);
361 }
362 
363 static void tls_free_open_rec(struct sock *sk)
364 {
365 	struct tls_context *tls_ctx = tls_get_ctx(sk);
366 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
367 	struct tls_rec *rec = ctx->open_rec;
368 
369 	if (rec) {
370 		tls_free_rec(sk, rec);
371 		ctx->open_rec = NULL;
372 	}
373 }
374 
375 int tls_tx_records(struct sock *sk, int flags)
376 {
377 	struct tls_context *tls_ctx = tls_get_ctx(sk);
378 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
379 	struct tls_rec *rec, *tmp;
380 	struct sk_msg *msg_en;
381 	int tx_flags, rc = 0;
382 
383 	if (tls_is_partially_sent_record(tls_ctx)) {
384 		rec = list_first_entry(&ctx->tx_list,
385 				       struct tls_rec, list);
386 
387 		if (flags == -1)
388 			tx_flags = rec->tx_flags;
389 		else
390 			tx_flags = flags;
391 
392 		rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
393 		if (rc)
394 			goto tx_err;
395 
396 		/* Full record has been transmitted.
397 		 * Remove the head of tx_list
398 		 */
399 		list_del(&rec->list);
400 		sk_msg_free(sk, &rec->msg_plaintext);
401 		kfree(rec);
402 	}
403 
404 	/* Tx all ready records */
405 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
406 		if (READ_ONCE(rec->tx_ready)) {
407 			if (flags == -1)
408 				tx_flags = rec->tx_flags;
409 			else
410 				tx_flags = flags;
411 
412 			msg_en = &rec->msg_encrypted;
413 			rc = tls_push_sg(sk, tls_ctx,
414 					 &msg_en->sg.data[msg_en->sg.curr],
415 					 0, tx_flags);
416 			if (rc)
417 				goto tx_err;
418 
419 			list_del(&rec->list);
420 			sk_msg_free(sk, &rec->msg_plaintext);
421 			kfree(rec);
422 		} else {
423 			break;
424 		}
425 	}
426 
427 tx_err:
428 	if (rc < 0 && rc != -EAGAIN)
429 		tls_err_abort(sk, -EBADMSG);
430 
431 	return rc;
432 }
433 
434 static void tls_encrypt_done(void *data, int err)
435 {
436 	struct tls_sw_context_tx *ctx;
437 	struct tls_context *tls_ctx;
438 	struct tls_prot_info *prot;
439 	struct tls_rec *rec = data;
440 	struct scatterlist *sge;
441 	struct sk_msg *msg_en;
442 	bool ready = false;
443 	struct sock *sk;
444 	int pending;
445 
446 	msg_en = &rec->msg_encrypted;
447 
448 	sk = rec->sk;
449 	tls_ctx = tls_get_ctx(sk);
450 	prot = &tls_ctx->prot_info;
451 	ctx = tls_sw_ctx_tx(tls_ctx);
452 
453 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
454 	sge->offset -= prot->prepend_size;
455 	sge->length += prot->prepend_size;
456 
457 	/* Check if error is previously set on socket */
458 	if (err || sk->sk_err) {
459 		rec = NULL;
460 
461 		/* If err is already set on socket, return the same code */
462 		if (sk->sk_err) {
463 			ctx->async_wait.err = -sk->sk_err;
464 		} else {
465 			ctx->async_wait.err = err;
466 			tls_err_abort(sk, err);
467 		}
468 	}
469 
470 	if (rec) {
471 		struct tls_rec *first_rec;
472 
473 		/* Mark the record as ready for transmission */
474 		smp_store_mb(rec->tx_ready, true);
475 
476 		/* If received record is at head of tx_list, schedule tx */
477 		first_rec = list_first_entry(&ctx->tx_list,
478 					     struct tls_rec, list);
479 		if (rec == first_rec)
480 			ready = true;
481 	}
482 
483 	spin_lock_bh(&ctx->encrypt_compl_lock);
484 	pending = atomic_dec_return(&ctx->encrypt_pending);
485 
486 	if (!pending && ctx->async_notify)
487 		complete(&ctx->async_wait.completion);
488 	spin_unlock_bh(&ctx->encrypt_compl_lock);
489 
490 	if (!ready)
491 		return;
492 
493 	/* Schedule the transmission */
494 	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
495 		schedule_delayed_work(&ctx->tx_work.work, 1);
496 }
497 
498 static int tls_do_encryption(struct sock *sk,
499 			     struct tls_context *tls_ctx,
500 			     struct tls_sw_context_tx *ctx,
501 			     struct aead_request *aead_req,
502 			     size_t data_len, u32 start)
503 {
504 	struct tls_prot_info *prot = &tls_ctx->prot_info;
505 	struct tls_rec *rec = ctx->open_rec;
506 	struct sk_msg *msg_en = &rec->msg_encrypted;
507 	struct scatterlist *sge = sk_msg_elem(msg_en, start);
508 	int rc, iv_offset = 0;
509 
510 	/* For CCM based ciphers, first byte of IV is a constant */
511 	switch (prot->cipher_type) {
512 	case TLS_CIPHER_AES_CCM_128:
513 		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
514 		iv_offset = 1;
515 		break;
516 	case TLS_CIPHER_SM4_CCM:
517 		rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
518 		iv_offset = 1;
519 		break;
520 	}
521 
522 	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
523 	       prot->iv_size + prot->salt_size);
524 
525 	tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
526 			    tls_ctx->tx.rec_seq);
527 
528 	sge->offset += prot->prepend_size;
529 	sge->length -= prot->prepend_size;
530 
531 	msg_en->sg.curr = start;
532 
533 	aead_request_set_tfm(aead_req, ctx->aead_send);
534 	aead_request_set_ad(aead_req, prot->aad_size);
535 	aead_request_set_crypt(aead_req, rec->sg_aead_in,
536 			       rec->sg_aead_out,
537 			       data_len, rec->iv_data);
538 
539 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
540 				  tls_encrypt_done, rec);
541 
542 	/* Add the record in tx_list */
543 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
544 	atomic_inc(&ctx->encrypt_pending);
545 
546 	rc = crypto_aead_encrypt(aead_req);
547 	if (!rc || rc != -EINPROGRESS) {
548 		atomic_dec(&ctx->encrypt_pending);
549 		sge->offset -= prot->prepend_size;
550 		sge->length += prot->prepend_size;
551 	}
552 
553 	if (!rc) {
554 		WRITE_ONCE(rec->tx_ready, true);
555 	} else if (rc != -EINPROGRESS) {
556 		list_del(&rec->list);
557 		return rc;
558 	}
559 
560 	/* Unhook the record from context if encryption is not failure */
561 	ctx->open_rec = NULL;
562 	tls_advance_record_sn(sk, prot, &tls_ctx->tx);
563 	return rc;
564 }
565 
566 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
567 				 struct tls_rec **to, struct sk_msg *msg_opl,
568 				 struct sk_msg *msg_oen, u32 split_point,
569 				 u32 tx_overhead_size, u32 *orig_end)
570 {
571 	u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
572 	struct scatterlist *sge, *osge, *nsge;
573 	u32 orig_size = msg_opl->sg.size;
574 	struct scatterlist tmp = { };
575 	struct sk_msg *msg_npl;
576 	struct tls_rec *new;
577 	int ret;
578 
579 	new = tls_get_rec(sk);
580 	if (!new)
581 		return -ENOMEM;
582 	ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
583 			   tx_overhead_size, 0);
584 	if (ret < 0) {
585 		tls_free_rec(sk, new);
586 		return ret;
587 	}
588 
589 	*orig_end = msg_opl->sg.end;
590 	i = msg_opl->sg.start;
591 	sge = sk_msg_elem(msg_opl, i);
592 	while (apply && sge->length) {
593 		if (sge->length > apply) {
594 			u32 len = sge->length - apply;
595 
596 			get_page(sg_page(sge));
597 			sg_set_page(&tmp, sg_page(sge), len,
598 				    sge->offset + apply);
599 			sge->length = apply;
600 			bytes += apply;
601 			apply = 0;
602 		} else {
603 			apply -= sge->length;
604 			bytes += sge->length;
605 		}
606 
607 		sk_msg_iter_var_next(i);
608 		if (i == msg_opl->sg.end)
609 			break;
610 		sge = sk_msg_elem(msg_opl, i);
611 	}
612 
613 	msg_opl->sg.end = i;
614 	msg_opl->sg.curr = i;
615 	msg_opl->sg.copybreak = 0;
616 	msg_opl->apply_bytes = 0;
617 	msg_opl->sg.size = bytes;
618 
619 	msg_npl = &new->msg_plaintext;
620 	msg_npl->apply_bytes = apply;
621 	msg_npl->sg.size = orig_size - bytes;
622 
623 	j = msg_npl->sg.start;
624 	nsge = sk_msg_elem(msg_npl, j);
625 	if (tmp.length) {
626 		memcpy(nsge, &tmp, sizeof(*nsge));
627 		sk_msg_iter_var_next(j);
628 		nsge = sk_msg_elem(msg_npl, j);
629 	}
630 
631 	osge = sk_msg_elem(msg_opl, i);
632 	while (osge->length) {
633 		memcpy(nsge, osge, sizeof(*nsge));
634 		sg_unmark_end(nsge);
635 		sk_msg_iter_var_next(i);
636 		sk_msg_iter_var_next(j);
637 		if (i == *orig_end)
638 			break;
639 		osge = sk_msg_elem(msg_opl, i);
640 		nsge = sk_msg_elem(msg_npl, j);
641 	}
642 
643 	msg_npl->sg.end = j;
644 	msg_npl->sg.curr = j;
645 	msg_npl->sg.copybreak = 0;
646 
647 	*to = new;
648 	return 0;
649 }
650 
651 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
652 				  struct tls_rec *from, u32 orig_end)
653 {
654 	struct sk_msg *msg_npl = &from->msg_plaintext;
655 	struct sk_msg *msg_opl = &to->msg_plaintext;
656 	struct scatterlist *osge, *nsge;
657 	u32 i, j;
658 
659 	i = msg_opl->sg.end;
660 	sk_msg_iter_var_prev(i);
661 	j = msg_npl->sg.start;
662 
663 	osge = sk_msg_elem(msg_opl, i);
664 	nsge = sk_msg_elem(msg_npl, j);
665 
666 	if (sg_page(osge) == sg_page(nsge) &&
667 	    osge->offset + osge->length == nsge->offset) {
668 		osge->length += nsge->length;
669 		put_page(sg_page(nsge));
670 	}
671 
672 	msg_opl->sg.end = orig_end;
673 	msg_opl->sg.curr = orig_end;
674 	msg_opl->sg.copybreak = 0;
675 	msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
676 	msg_opl->sg.size += msg_npl->sg.size;
677 
678 	sk_msg_free(sk, &to->msg_encrypted);
679 	sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
680 
681 	kfree(from);
682 }
683 
684 static int tls_push_record(struct sock *sk, int flags,
685 			   unsigned char record_type)
686 {
687 	struct tls_context *tls_ctx = tls_get_ctx(sk);
688 	struct tls_prot_info *prot = &tls_ctx->prot_info;
689 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
690 	struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
691 	u32 i, split_point, orig_end;
692 	struct sk_msg *msg_pl, *msg_en;
693 	struct aead_request *req;
694 	bool split;
695 	int rc;
696 
697 	if (!rec)
698 		return 0;
699 
700 	msg_pl = &rec->msg_plaintext;
701 	msg_en = &rec->msg_encrypted;
702 
703 	split_point = msg_pl->apply_bytes;
704 	split = split_point && split_point < msg_pl->sg.size;
705 	if (unlikely((!split &&
706 		      msg_pl->sg.size +
707 		      prot->overhead_size > msg_en->sg.size) ||
708 		     (split &&
709 		      split_point +
710 		      prot->overhead_size > msg_en->sg.size))) {
711 		split = true;
712 		split_point = msg_en->sg.size;
713 	}
714 	if (split) {
715 		rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
716 					   split_point, prot->overhead_size,
717 					   &orig_end);
718 		if (rc < 0)
719 			return rc;
720 		/* This can happen if above tls_split_open_record allocates
721 		 * a single large encryption buffer instead of two smaller
722 		 * ones. In this case adjust pointers and continue without
723 		 * split.
724 		 */
725 		if (!msg_pl->sg.size) {
726 			tls_merge_open_record(sk, rec, tmp, orig_end);
727 			msg_pl = &rec->msg_plaintext;
728 			msg_en = &rec->msg_encrypted;
729 			split = false;
730 		}
731 		sk_msg_trim(sk, msg_en, msg_pl->sg.size +
732 			    prot->overhead_size);
733 	}
734 
735 	rec->tx_flags = flags;
736 	req = &rec->aead_req;
737 
738 	i = msg_pl->sg.end;
739 	sk_msg_iter_var_prev(i);
740 
741 	rec->content_type = record_type;
742 	if (prot->version == TLS_1_3_VERSION) {
743 		/* Add content type to end of message.  No padding added */
744 		sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
745 		sg_mark_end(&rec->sg_content_type);
746 		sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
747 			 &rec->sg_content_type);
748 	} else {
749 		sg_mark_end(sk_msg_elem(msg_pl, i));
750 	}
751 
752 	if (msg_pl->sg.end < msg_pl->sg.start) {
753 		sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
754 			 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
755 			 msg_pl->sg.data);
756 	}
757 
758 	i = msg_pl->sg.start;
759 	sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
760 
761 	i = msg_en->sg.end;
762 	sk_msg_iter_var_prev(i);
763 	sg_mark_end(sk_msg_elem(msg_en, i));
764 
765 	i = msg_en->sg.start;
766 	sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
767 
768 	tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
769 		     tls_ctx->tx.rec_seq, record_type, prot);
770 
771 	tls_fill_prepend(tls_ctx,
772 			 page_address(sg_page(&msg_en->sg.data[i])) +
773 			 msg_en->sg.data[i].offset,
774 			 msg_pl->sg.size + prot->tail_size,
775 			 record_type);
776 
777 	tls_ctx->pending_open_record_frags = false;
778 
779 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
780 			       msg_pl->sg.size + prot->tail_size, i);
781 	if (rc < 0) {
782 		if (rc != -EINPROGRESS) {
783 			tls_err_abort(sk, -EBADMSG);
784 			if (split) {
785 				tls_ctx->pending_open_record_frags = true;
786 				tls_merge_open_record(sk, rec, tmp, orig_end);
787 			}
788 		}
789 		ctx->async_capable = 1;
790 		return rc;
791 	} else if (split) {
792 		msg_pl = &tmp->msg_plaintext;
793 		msg_en = &tmp->msg_encrypted;
794 		sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
795 		tls_ctx->pending_open_record_frags = true;
796 		ctx->open_rec = tmp;
797 	}
798 
799 	return tls_tx_records(sk, flags);
800 }
801 
802 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
803 			       bool full_record, u8 record_type,
804 			       ssize_t *copied, int flags)
805 {
806 	struct tls_context *tls_ctx = tls_get_ctx(sk);
807 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
808 	struct sk_msg msg_redir = { };
809 	struct sk_psock *psock;
810 	struct sock *sk_redir;
811 	struct tls_rec *rec;
812 	bool enospc, policy, redir_ingress;
813 	int err = 0, send;
814 	u32 delta = 0;
815 
816 	policy = !(flags & MSG_SENDPAGE_NOPOLICY);
817 	psock = sk_psock_get(sk);
818 	if (!psock || !policy) {
819 		err = tls_push_record(sk, flags, record_type);
820 		if (err && sk->sk_err == EBADMSG) {
821 			*copied -= sk_msg_free(sk, msg);
822 			tls_free_open_rec(sk);
823 			err = -sk->sk_err;
824 		}
825 		if (psock)
826 			sk_psock_put(sk, psock);
827 		return err;
828 	}
829 more_data:
830 	enospc = sk_msg_full(msg);
831 	if (psock->eval == __SK_NONE) {
832 		delta = msg->sg.size;
833 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
834 		delta -= msg->sg.size;
835 	}
836 	if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
837 	    !enospc && !full_record) {
838 		err = -ENOSPC;
839 		goto out_err;
840 	}
841 	msg->cork_bytes = 0;
842 	send = msg->sg.size;
843 	if (msg->apply_bytes && msg->apply_bytes < send)
844 		send = msg->apply_bytes;
845 
846 	switch (psock->eval) {
847 	case __SK_PASS:
848 		err = tls_push_record(sk, flags, record_type);
849 		if (err && sk->sk_err == EBADMSG) {
850 			*copied -= sk_msg_free(sk, msg);
851 			tls_free_open_rec(sk);
852 			err = -sk->sk_err;
853 			goto out_err;
854 		}
855 		break;
856 	case __SK_REDIRECT:
857 		redir_ingress = psock->redir_ingress;
858 		sk_redir = psock->sk_redir;
859 		memcpy(&msg_redir, msg, sizeof(*msg));
860 		if (msg->apply_bytes < send)
861 			msg->apply_bytes = 0;
862 		else
863 			msg->apply_bytes -= send;
864 		sk_msg_return_zero(sk, msg, send);
865 		msg->sg.size -= send;
866 		release_sock(sk);
867 		err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
868 					    &msg_redir, send, flags);
869 		lock_sock(sk);
870 		if (err < 0) {
871 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
872 			msg->sg.size = 0;
873 		}
874 		if (msg->sg.size == 0)
875 			tls_free_open_rec(sk);
876 		break;
877 	case __SK_DROP:
878 	default:
879 		sk_msg_free_partial(sk, msg, send);
880 		if (msg->apply_bytes < send)
881 			msg->apply_bytes = 0;
882 		else
883 			msg->apply_bytes -= send;
884 		if (msg->sg.size == 0)
885 			tls_free_open_rec(sk);
886 		*copied -= (send + delta);
887 		err = -EACCES;
888 	}
889 
890 	if (likely(!err)) {
891 		bool reset_eval = !ctx->open_rec;
892 
893 		rec = ctx->open_rec;
894 		if (rec) {
895 			msg = &rec->msg_plaintext;
896 			if (!msg->apply_bytes)
897 				reset_eval = true;
898 		}
899 		if (reset_eval) {
900 			psock->eval = __SK_NONE;
901 			if (psock->sk_redir) {
902 				sock_put(psock->sk_redir);
903 				psock->sk_redir = NULL;
904 			}
905 		}
906 		if (rec)
907 			goto more_data;
908 	}
909  out_err:
910 	sk_psock_put(sk, psock);
911 	return err;
912 }
913 
914 static int tls_sw_push_pending_record(struct sock *sk, int flags)
915 {
916 	struct tls_context *tls_ctx = tls_get_ctx(sk);
917 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
918 	struct tls_rec *rec = ctx->open_rec;
919 	struct sk_msg *msg_pl;
920 	size_t copied;
921 
922 	if (!rec)
923 		return 0;
924 
925 	msg_pl = &rec->msg_plaintext;
926 	copied = msg_pl->sg.size;
927 	if (!copied)
928 		return 0;
929 
930 	return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
931 				   &copied, flags);
932 }
933 
934 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
935 				 struct sk_msg *msg_pl, size_t try_to_copy,
936 				 ssize_t *copied)
937 {
938 	struct page *page = NULL, **pages = &page;
939 
940 	do {
941 		ssize_t part;
942 		size_t off;
943 
944 		part = iov_iter_extract_pages(&msg->msg_iter, &pages,
945 					      try_to_copy, 1, 0, &off);
946 		if (part <= 0)
947 			return part ?: -EIO;
948 
949 		if (WARN_ON_ONCE(!sendpage_ok(page))) {
950 			iov_iter_revert(&msg->msg_iter, part);
951 			return -EIO;
952 		}
953 
954 		sk_msg_page_add(msg_pl, page, part, off);
955 		sk_mem_charge(sk, part);
956 		*copied += part;
957 		try_to_copy -= part;
958 	} while (try_to_copy && !sk_msg_full(msg_pl));
959 
960 	return 0;
961 }
962 
963 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
964 				 size_t size)
965 {
966 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
967 	struct tls_context *tls_ctx = tls_get_ctx(sk);
968 	struct tls_prot_info *prot = &tls_ctx->prot_info;
969 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
970 	bool async_capable = ctx->async_capable;
971 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
972 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
973 	bool eor = !(msg->msg_flags & MSG_MORE);
974 	size_t try_to_copy;
975 	ssize_t copied = 0;
976 	struct sk_msg *msg_pl, *msg_en;
977 	struct tls_rec *rec;
978 	int required_size;
979 	int num_async = 0;
980 	bool full_record;
981 	int record_room;
982 	int num_zc = 0;
983 	int orig_size;
984 	int ret = 0;
985 	int pending;
986 
987 	if (unlikely(msg->msg_controllen)) {
988 		ret = tls_process_cmsg(sk, msg, &record_type);
989 		if (ret) {
990 			if (ret == -EINPROGRESS)
991 				num_async++;
992 			else if (ret != -EAGAIN)
993 				goto send_end;
994 		}
995 	}
996 
997 	while (msg_data_left(msg)) {
998 		if (sk->sk_err) {
999 			ret = -sk->sk_err;
1000 			goto send_end;
1001 		}
1002 
1003 		if (ctx->open_rec)
1004 			rec = ctx->open_rec;
1005 		else
1006 			rec = ctx->open_rec = tls_get_rec(sk);
1007 		if (!rec) {
1008 			ret = -ENOMEM;
1009 			goto send_end;
1010 		}
1011 
1012 		msg_pl = &rec->msg_plaintext;
1013 		msg_en = &rec->msg_encrypted;
1014 
1015 		orig_size = msg_pl->sg.size;
1016 		full_record = false;
1017 		try_to_copy = msg_data_left(msg);
1018 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1019 		if (try_to_copy >= record_room) {
1020 			try_to_copy = record_room;
1021 			full_record = true;
1022 		}
1023 
1024 		required_size = msg_pl->sg.size + try_to_copy +
1025 				prot->overhead_size;
1026 
1027 		if (!sk_stream_memory_free(sk))
1028 			goto wait_for_sndbuf;
1029 
1030 alloc_encrypted:
1031 		ret = tls_alloc_encrypted_msg(sk, required_size);
1032 		if (ret) {
1033 			if (ret != -ENOSPC)
1034 				goto wait_for_memory;
1035 
1036 			/* Adjust try_to_copy according to the amount that was
1037 			 * actually allocated. The difference is due
1038 			 * to max sg elements limit
1039 			 */
1040 			try_to_copy -= required_size - msg_en->sg.size;
1041 			full_record = true;
1042 		}
1043 
1044 		if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
1045 			ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
1046 						    try_to_copy, &copied);
1047 			if (ret < 0)
1048 				goto send_end;
1049 			tls_ctx->pending_open_record_frags = true;
1050 			if (full_record || eor || sk_msg_full(msg_pl))
1051 				goto copied;
1052 			continue;
1053 		}
1054 
1055 		if (!is_kvec && (full_record || eor) && !async_capable) {
1056 			u32 first = msg_pl->sg.end;
1057 
1058 			ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1059 							msg_pl, try_to_copy);
1060 			if (ret)
1061 				goto fallback_to_reg_send;
1062 
1063 			num_zc++;
1064 			copied += try_to_copy;
1065 
1066 			sk_msg_sg_copy_set(msg_pl, first);
1067 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1068 						  record_type, &copied,
1069 						  msg->msg_flags);
1070 			if (ret) {
1071 				if (ret == -EINPROGRESS)
1072 					num_async++;
1073 				else if (ret == -ENOMEM)
1074 					goto wait_for_memory;
1075 				else if (ctx->open_rec && ret == -ENOSPC)
1076 					goto rollback_iter;
1077 				else if (ret != -EAGAIN)
1078 					goto send_end;
1079 			}
1080 			continue;
1081 rollback_iter:
1082 			copied -= try_to_copy;
1083 			sk_msg_sg_copy_clear(msg_pl, first);
1084 			iov_iter_revert(&msg->msg_iter,
1085 					msg_pl->sg.size - orig_size);
1086 fallback_to_reg_send:
1087 			sk_msg_trim(sk, msg_pl, orig_size);
1088 		}
1089 
1090 		required_size = msg_pl->sg.size + try_to_copy;
1091 
1092 		ret = tls_clone_plaintext_msg(sk, required_size);
1093 		if (ret) {
1094 			if (ret != -ENOSPC)
1095 				goto send_end;
1096 
1097 			/* Adjust try_to_copy according to the amount that was
1098 			 * actually allocated. The difference is due
1099 			 * to max sg elements limit
1100 			 */
1101 			try_to_copy -= required_size - msg_pl->sg.size;
1102 			full_record = true;
1103 			sk_msg_trim(sk, msg_en,
1104 				    msg_pl->sg.size + prot->overhead_size);
1105 		}
1106 
1107 		if (try_to_copy) {
1108 			ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1109 						       msg_pl, try_to_copy);
1110 			if (ret < 0)
1111 				goto trim_sgl;
1112 		}
1113 
1114 		/* Open records defined only if successfully copied, otherwise
1115 		 * we would trim the sg but not reset the open record frags.
1116 		 */
1117 		tls_ctx->pending_open_record_frags = true;
1118 		copied += try_to_copy;
1119 copied:
1120 		if (full_record || eor) {
1121 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1122 						  record_type, &copied,
1123 						  msg->msg_flags);
1124 			if (ret) {
1125 				if (ret == -EINPROGRESS)
1126 					num_async++;
1127 				else if (ret == -ENOMEM)
1128 					goto wait_for_memory;
1129 				else if (ret != -EAGAIN) {
1130 					if (ret == -ENOSPC)
1131 						ret = 0;
1132 					goto send_end;
1133 				}
1134 			}
1135 		}
1136 
1137 		continue;
1138 
1139 wait_for_sndbuf:
1140 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1141 wait_for_memory:
1142 		ret = sk_stream_wait_memory(sk, &timeo);
1143 		if (ret) {
1144 trim_sgl:
1145 			if (ctx->open_rec)
1146 				tls_trim_both_msgs(sk, orig_size);
1147 			goto send_end;
1148 		}
1149 
1150 		if (ctx->open_rec && msg_en->sg.size < required_size)
1151 			goto alloc_encrypted;
1152 	}
1153 
1154 	if (!num_async) {
1155 		goto send_end;
1156 	} else if (num_zc) {
1157 		/* Wait for pending encryptions to get completed */
1158 		spin_lock_bh(&ctx->encrypt_compl_lock);
1159 		ctx->async_notify = true;
1160 
1161 		pending = atomic_read(&ctx->encrypt_pending);
1162 		spin_unlock_bh(&ctx->encrypt_compl_lock);
1163 		if (pending)
1164 			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1165 		else
1166 			reinit_completion(&ctx->async_wait.completion);
1167 
1168 		/* There can be no concurrent accesses, since we have no
1169 		 * pending encrypt operations
1170 		 */
1171 		WRITE_ONCE(ctx->async_notify, false);
1172 
1173 		if (ctx->async_wait.err) {
1174 			ret = ctx->async_wait.err;
1175 			copied = 0;
1176 		}
1177 	}
1178 
1179 	/* Transmit if any encryptions have completed */
1180 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1181 		cancel_delayed_work(&ctx->tx_work.work);
1182 		tls_tx_records(sk, msg->msg_flags);
1183 	}
1184 
1185 send_end:
1186 	ret = sk_stream_error(sk, msg->msg_flags, ret);
1187 	return copied > 0 ? copied : ret;
1188 }
1189 
1190 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1191 {
1192 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1193 	int ret;
1194 
1195 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1196 			       MSG_CMSG_COMPAT | MSG_SPLICE_PAGES |
1197 			       MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1198 		return -EOPNOTSUPP;
1199 
1200 	ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1201 	if (ret)
1202 		return ret;
1203 	lock_sock(sk);
1204 	ret = tls_sw_sendmsg_locked(sk, msg, size);
1205 	release_sock(sk);
1206 	mutex_unlock(&tls_ctx->tx_lock);
1207 	return ret;
1208 }
1209 
1210 /*
1211  * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1212  */
1213 void tls_sw_splice_eof(struct socket *sock)
1214 {
1215 	struct sock *sk = sock->sk;
1216 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1217 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1218 	struct tls_rec *rec;
1219 	struct sk_msg *msg_pl;
1220 	ssize_t copied = 0;
1221 	bool retrying = false;
1222 	int ret = 0;
1223 	int pending;
1224 
1225 	if (!ctx->open_rec)
1226 		return;
1227 
1228 	mutex_lock(&tls_ctx->tx_lock);
1229 	lock_sock(sk);
1230 
1231 retry:
1232 	rec = ctx->open_rec;
1233 	if (!rec)
1234 		goto unlock;
1235 
1236 	msg_pl = &rec->msg_plaintext;
1237 
1238 	/* Check the BPF advisor and perform transmission. */
1239 	ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
1240 				  &copied, 0);
1241 	switch (ret) {
1242 	case 0:
1243 	case -EAGAIN:
1244 		if (retrying)
1245 			goto unlock;
1246 		retrying = true;
1247 		goto retry;
1248 	case -EINPROGRESS:
1249 		break;
1250 	default:
1251 		goto unlock;
1252 	}
1253 
1254 	/* Wait for pending encryptions to get completed */
1255 	spin_lock_bh(&ctx->encrypt_compl_lock);
1256 	ctx->async_notify = true;
1257 
1258 	pending = atomic_read(&ctx->encrypt_pending);
1259 	spin_unlock_bh(&ctx->encrypt_compl_lock);
1260 	if (pending)
1261 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1262 	else
1263 		reinit_completion(&ctx->async_wait.completion);
1264 
1265 	/* There can be no concurrent accesses, since we have no pending
1266 	 * encrypt operations
1267 	 */
1268 	WRITE_ONCE(ctx->async_notify, false);
1269 
1270 	if (ctx->async_wait.err)
1271 		goto unlock;
1272 
1273 	/* Transmit if any encryptions have completed */
1274 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1275 		cancel_delayed_work(&ctx->tx_work.work);
1276 		tls_tx_records(sk, 0);
1277 	}
1278 
1279 unlock:
1280 	release_sock(sk);
1281 	mutex_unlock(&tls_ctx->tx_lock);
1282 }
1283 
1284 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1285 			   int offset, size_t size, int flags)
1286 {
1287 	struct bio_vec bvec;
1288 	struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
1289 
1290 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1291 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1292 		      MSG_NO_SHARED_FRAGS))
1293 		return -EOPNOTSUPP;
1294 	if (flags & MSG_SENDPAGE_NOTLAST)
1295 		msg.msg_flags |= MSG_MORE;
1296 
1297 	bvec_set_page(&bvec, page, size, offset);
1298 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
1299 	return tls_sw_sendmsg_locked(sk, &msg, size);
1300 }
1301 
1302 int tls_sw_sendpage(struct sock *sk, struct page *page,
1303 		    int offset, size_t size, int flags)
1304 {
1305 	struct bio_vec bvec;
1306 	struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
1307 
1308 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1309 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1310 		return -EOPNOTSUPP;
1311 	if (flags & MSG_SENDPAGE_NOTLAST)
1312 		msg.msg_flags |= MSG_MORE;
1313 
1314 	bvec_set_page(&bvec, page, size, offset);
1315 	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
1316 	return tls_sw_sendmsg(sk, &msg, size);
1317 }
1318 
1319 static int
1320 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1321 		bool released)
1322 {
1323 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1324 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1325 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1326 	long timeo;
1327 
1328 	timeo = sock_rcvtimeo(sk, nonblock);
1329 
1330 	while (!tls_strp_msg_ready(ctx)) {
1331 		if (!sk_psock_queue_empty(psock))
1332 			return 0;
1333 
1334 		if (sk->sk_err)
1335 			return sock_error(sk);
1336 
1337 		if (!skb_queue_empty(&sk->sk_receive_queue)) {
1338 			tls_strp_check_rcv(&ctx->strp);
1339 			if (tls_strp_msg_ready(ctx))
1340 				break;
1341 		}
1342 
1343 		if (sk->sk_shutdown & RCV_SHUTDOWN)
1344 			return 0;
1345 
1346 		if (sock_flag(sk, SOCK_DONE))
1347 			return 0;
1348 
1349 		if (!timeo)
1350 			return -EAGAIN;
1351 
1352 		released = true;
1353 		add_wait_queue(sk_sleep(sk), &wait);
1354 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1355 		sk_wait_event(sk, &timeo,
1356 			      tls_strp_msg_ready(ctx) ||
1357 			      !sk_psock_queue_empty(psock),
1358 			      &wait);
1359 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1360 		remove_wait_queue(sk_sleep(sk), &wait);
1361 
1362 		/* Handle signals */
1363 		if (signal_pending(current))
1364 			return sock_intr_errno(timeo);
1365 	}
1366 
1367 	tls_strp_msg_load(&ctx->strp, released);
1368 
1369 	return 1;
1370 }
1371 
1372 static int tls_setup_from_iter(struct iov_iter *from,
1373 			       int length, int *pages_used,
1374 			       struct scatterlist *to,
1375 			       int to_max_pages)
1376 {
1377 	int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1378 	struct page *pages[MAX_SKB_FRAGS];
1379 	unsigned int size = 0;
1380 	ssize_t copied, use;
1381 	size_t offset;
1382 
1383 	while (length > 0) {
1384 		i = 0;
1385 		maxpages = to_max_pages - num_elem;
1386 		if (maxpages == 0) {
1387 			rc = -EFAULT;
1388 			goto out;
1389 		}
1390 		copied = iov_iter_get_pages2(from, pages,
1391 					    length,
1392 					    maxpages, &offset);
1393 		if (copied <= 0) {
1394 			rc = -EFAULT;
1395 			goto out;
1396 		}
1397 
1398 		length -= copied;
1399 		size += copied;
1400 		while (copied) {
1401 			use = min_t(int, copied, PAGE_SIZE - offset);
1402 
1403 			sg_set_page(&to[num_elem],
1404 				    pages[i], use, offset);
1405 			sg_unmark_end(&to[num_elem]);
1406 			/* We do not uncharge memory from this API */
1407 
1408 			offset = 0;
1409 			copied -= use;
1410 
1411 			i++;
1412 			num_elem++;
1413 		}
1414 	}
1415 	/* Mark the end in the last sg entry if newly added */
1416 	if (num_elem > *pages_used)
1417 		sg_mark_end(&to[num_elem - 1]);
1418 out:
1419 	if (rc)
1420 		iov_iter_revert(from, size);
1421 	*pages_used = num_elem;
1422 
1423 	return rc;
1424 }
1425 
1426 static struct sk_buff *
1427 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1428 		     unsigned int full_len)
1429 {
1430 	struct strp_msg *clr_rxm;
1431 	struct sk_buff *clr_skb;
1432 	int err;
1433 
1434 	clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1435 				       &err, sk->sk_allocation);
1436 	if (!clr_skb)
1437 		return NULL;
1438 
1439 	skb_copy_header(clr_skb, skb);
1440 	clr_skb->len = full_len;
1441 	clr_skb->data_len = full_len;
1442 
1443 	clr_rxm = strp_msg(clr_skb);
1444 	clr_rxm->offset = 0;
1445 
1446 	return clr_skb;
1447 }
1448 
1449 /* Decrypt handlers
1450  *
1451  * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1452  * They must transform the darg in/out argument are as follows:
1453  *       |          Input            |         Output
1454  * -------------------------------------------------------------------
1455  *    zc | Zero-copy decrypt allowed | Zero-copy performed
1456  * async | Async decrypt allowed     | Async crypto used / in progress
1457  *   skb |            *              | Output skb
1458  *
1459  * If ZC decryption was performed darg.skb will point to the input skb.
1460  */
1461 
1462 /* This function decrypts the input skb into either out_iov or in out_sg
1463  * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1464  * zero-copy mode needs to be tried or not. With zero-copy mode, either
1465  * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1466  * NULL, then the decryption happens inside skb buffers itself, i.e.
1467  * zero-copy gets disabled and 'darg->zc' is updated.
1468  */
1469 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
1470 			  struct scatterlist *out_sg,
1471 			  struct tls_decrypt_arg *darg)
1472 {
1473 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1474 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1475 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1476 	int n_sgin, n_sgout, aead_size, err, pages = 0;
1477 	struct sk_buff *skb = tls_strp_msg(ctx);
1478 	const struct strp_msg *rxm = strp_msg(skb);
1479 	const struct tls_msg *tlm = tls_msg(skb);
1480 	struct aead_request *aead_req;
1481 	struct scatterlist *sgin = NULL;
1482 	struct scatterlist *sgout = NULL;
1483 	const int data_len = rxm->full_len - prot->overhead_size;
1484 	int tail_pages = !!prot->tail_size;
1485 	struct tls_decrypt_ctx *dctx;
1486 	struct sk_buff *clear_skb;
1487 	int iv_offset = 0;
1488 	u8 *mem;
1489 
1490 	n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1491 			 rxm->full_len - prot->prepend_size);
1492 	if (n_sgin < 1)
1493 		return n_sgin ?: -EBADMSG;
1494 
1495 	if (darg->zc && (out_iov || out_sg)) {
1496 		clear_skb = NULL;
1497 
1498 		if (out_iov)
1499 			n_sgout = 1 + tail_pages +
1500 				iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1501 		else
1502 			n_sgout = sg_nents(out_sg);
1503 	} else {
1504 		darg->zc = false;
1505 
1506 		clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1507 		if (!clear_skb)
1508 			return -ENOMEM;
1509 
1510 		n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1511 	}
1512 
1513 	/* Increment to accommodate AAD */
1514 	n_sgin = n_sgin + 1;
1515 
1516 	/* Allocate a single block of memory which contains
1517 	 *   aead_req || tls_decrypt_ctx.
1518 	 * Both structs are variable length.
1519 	 */
1520 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1521 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
1522 	mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
1523 		      sk->sk_allocation);
1524 	if (!mem) {
1525 		err = -ENOMEM;
1526 		goto exit_free_skb;
1527 	}
1528 
1529 	/* Segment the allocated memory */
1530 	aead_req = (struct aead_request *)mem;
1531 	dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1532 	dctx->sk = sk;
1533 	sgin = &dctx->sg[0];
1534 	sgout = &dctx->sg[n_sgin];
1535 
1536 	/* For CCM based ciphers, first byte of nonce+iv is a constant */
1537 	switch (prot->cipher_type) {
1538 	case TLS_CIPHER_AES_CCM_128:
1539 		dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1540 		iv_offset = 1;
1541 		break;
1542 	case TLS_CIPHER_SM4_CCM:
1543 		dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1544 		iv_offset = 1;
1545 		break;
1546 	}
1547 
1548 	/* Prepare IV */
1549 	if (prot->version == TLS_1_3_VERSION ||
1550 	    prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1551 		memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1552 		       prot->iv_size + prot->salt_size);
1553 	} else {
1554 		err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1555 				    &dctx->iv[iv_offset] + prot->salt_size,
1556 				    prot->iv_size);
1557 		if (err < 0)
1558 			goto exit_free;
1559 		memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1560 	}
1561 	tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1562 
1563 	/* Prepare AAD */
1564 	tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1565 		     prot->tail_size,
1566 		     tls_ctx->rx.rec_seq, tlm->control, prot);
1567 
1568 	/* Prepare sgin */
1569 	sg_init_table(sgin, n_sgin);
1570 	sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1571 	err = skb_to_sgvec(skb, &sgin[1],
1572 			   rxm->offset + prot->prepend_size,
1573 			   rxm->full_len - prot->prepend_size);
1574 	if (err < 0)
1575 		goto exit_free;
1576 
1577 	if (clear_skb) {
1578 		sg_init_table(sgout, n_sgout);
1579 		sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1580 
1581 		err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1582 				   data_len + prot->tail_size);
1583 		if (err < 0)
1584 			goto exit_free;
1585 	} else if (out_iov) {
1586 		sg_init_table(sgout, n_sgout);
1587 		sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1588 
1589 		err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1590 					  (n_sgout - 1 - tail_pages));
1591 		if (err < 0)
1592 			goto exit_free_pages;
1593 
1594 		if (prot->tail_size) {
1595 			sg_unmark_end(&sgout[pages]);
1596 			sg_set_buf(&sgout[pages + 1], &dctx->tail,
1597 				   prot->tail_size);
1598 			sg_mark_end(&sgout[pages + 1]);
1599 		}
1600 	} else if (out_sg) {
1601 		memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1602 	}
1603 
1604 	/* Prepare and submit AEAD request */
1605 	err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1606 				data_len + prot->tail_size, aead_req, darg);
1607 	if (err)
1608 		goto exit_free_pages;
1609 
1610 	darg->skb = clear_skb ?: tls_strp_msg(ctx);
1611 	clear_skb = NULL;
1612 
1613 	if (unlikely(darg->async)) {
1614 		err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1615 		if (err)
1616 			__skb_queue_tail(&ctx->async_hold, darg->skb);
1617 		return err;
1618 	}
1619 
1620 	if (prot->tail_size)
1621 		darg->tail = dctx->tail;
1622 
1623 exit_free_pages:
1624 	/* Release the pages in case iov was mapped to pages */
1625 	for (; pages > 0; pages--)
1626 		put_page(sg_page(&sgout[pages]));
1627 exit_free:
1628 	kfree(mem);
1629 exit_free_skb:
1630 	consume_skb(clear_skb);
1631 	return err;
1632 }
1633 
1634 static int
1635 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1636 	       struct msghdr *msg, struct tls_decrypt_arg *darg)
1637 {
1638 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1639 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1640 	struct strp_msg *rxm;
1641 	int pad, err;
1642 
1643 	err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1644 	if (err < 0) {
1645 		if (err == -EBADMSG)
1646 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1647 		return err;
1648 	}
1649 	/* keep going even for ->async, the code below is TLS 1.3 */
1650 
1651 	/* If opportunistic TLS 1.3 ZC failed retry without ZC */
1652 	if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1653 		     darg->tail != TLS_RECORD_TYPE_DATA)) {
1654 		darg->zc = false;
1655 		if (!darg->tail)
1656 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1657 		TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1658 		return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1659 	}
1660 
1661 	pad = tls_padding_length(prot, darg->skb, darg);
1662 	if (pad < 0) {
1663 		if (darg->skb != tls_strp_msg(ctx))
1664 			consume_skb(darg->skb);
1665 		return pad;
1666 	}
1667 
1668 	rxm = strp_msg(darg->skb);
1669 	rxm->full_len -= pad;
1670 
1671 	return 0;
1672 }
1673 
1674 static int
1675 tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1676 		   struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1677 {
1678 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1679 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1680 	struct strp_msg *rxm;
1681 	int pad, err;
1682 
1683 	if (tls_ctx->rx_conf != TLS_HW)
1684 		return 0;
1685 
1686 	err = tls_device_decrypted(sk, tls_ctx);
1687 	if (err <= 0)
1688 		return err;
1689 
1690 	pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1691 	if (pad < 0)
1692 		return pad;
1693 
1694 	darg->async = false;
1695 	darg->skb = tls_strp_msg(ctx);
1696 	/* ->zc downgrade check, in case TLS 1.3 gets here */
1697 	darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1698 		      tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1699 
1700 	rxm = strp_msg(darg->skb);
1701 	rxm->full_len -= pad;
1702 
1703 	if (!darg->zc) {
1704 		/* Non-ZC case needs a real skb */
1705 		darg->skb = tls_strp_msg_detach(ctx);
1706 		if (!darg->skb)
1707 			return -ENOMEM;
1708 	} else {
1709 		unsigned int off, len;
1710 
1711 		/* In ZC case nobody cares about the output skb.
1712 		 * Just copy the data here. Note the skb is not fully trimmed.
1713 		 */
1714 		off = rxm->offset + prot->prepend_size;
1715 		len = rxm->full_len - prot->overhead_size;
1716 
1717 		err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1718 		if (err)
1719 			return err;
1720 	}
1721 	return 1;
1722 }
1723 
1724 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1725 			     struct tls_decrypt_arg *darg)
1726 {
1727 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1728 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1729 	struct strp_msg *rxm;
1730 	int err;
1731 
1732 	err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1733 	if (!err)
1734 		err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1735 	if (err < 0)
1736 		return err;
1737 
1738 	rxm = strp_msg(darg->skb);
1739 	rxm->offset += prot->prepend_size;
1740 	rxm->full_len -= prot->overhead_size;
1741 	tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1742 
1743 	return 0;
1744 }
1745 
1746 int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1747 {
1748 	struct tls_decrypt_arg darg = { .zc = true, };
1749 
1750 	return tls_decrypt_sg(sk, NULL, sgout, &darg);
1751 }
1752 
1753 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1754 				   u8 *control)
1755 {
1756 	int err;
1757 
1758 	if (!*control) {
1759 		*control = tlm->control;
1760 		if (!*control)
1761 			return -EBADMSG;
1762 
1763 		err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1764 			       sizeof(*control), control);
1765 		if (*control != TLS_RECORD_TYPE_DATA) {
1766 			if (err || msg->msg_flags & MSG_CTRUNC)
1767 				return -EIO;
1768 		}
1769 	} else if (*control != tlm->control) {
1770 		return 0;
1771 	}
1772 
1773 	return 1;
1774 }
1775 
1776 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1777 {
1778 	tls_strp_msg_done(&ctx->strp);
1779 }
1780 
1781 /* This function traverses the rx_list in tls receive context to copies the
1782  * decrypted records into the buffer provided by caller zero copy is not
1783  * true. Further, the records are removed from the rx_list if it is not a peek
1784  * case and the record has been consumed completely.
1785  */
1786 static int process_rx_list(struct tls_sw_context_rx *ctx,
1787 			   struct msghdr *msg,
1788 			   u8 *control,
1789 			   size_t skip,
1790 			   size_t len,
1791 			   bool is_peek)
1792 {
1793 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
1794 	struct tls_msg *tlm;
1795 	ssize_t copied = 0;
1796 	int err;
1797 
1798 	while (skip && skb) {
1799 		struct strp_msg *rxm = strp_msg(skb);
1800 		tlm = tls_msg(skb);
1801 
1802 		err = tls_record_content_type(msg, tlm, control);
1803 		if (err <= 0)
1804 			goto out;
1805 
1806 		if (skip < rxm->full_len)
1807 			break;
1808 
1809 		skip = skip - rxm->full_len;
1810 		skb = skb_peek_next(skb, &ctx->rx_list);
1811 	}
1812 
1813 	while (len && skb) {
1814 		struct sk_buff *next_skb;
1815 		struct strp_msg *rxm = strp_msg(skb);
1816 		int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1817 
1818 		tlm = tls_msg(skb);
1819 
1820 		err = tls_record_content_type(msg, tlm, control);
1821 		if (err <= 0)
1822 			goto out;
1823 
1824 		err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1825 					    msg, chunk);
1826 		if (err < 0)
1827 			goto out;
1828 
1829 		len = len - chunk;
1830 		copied = copied + chunk;
1831 
1832 		/* Consume the data from record if it is non-peek case*/
1833 		if (!is_peek) {
1834 			rxm->offset = rxm->offset + chunk;
1835 			rxm->full_len = rxm->full_len - chunk;
1836 
1837 			/* Return if there is unconsumed data in the record */
1838 			if (rxm->full_len - skip)
1839 				break;
1840 		}
1841 
1842 		/* The remaining skip-bytes must lie in 1st record in rx_list.
1843 		 * So from the 2nd record, 'skip' should be 0.
1844 		 */
1845 		skip = 0;
1846 
1847 		if (msg)
1848 			msg->msg_flags |= MSG_EOR;
1849 
1850 		next_skb = skb_peek_next(skb, &ctx->rx_list);
1851 
1852 		if (!is_peek) {
1853 			__skb_unlink(skb, &ctx->rx_list);
1854 			consume_skb(skb);
1855 		}
1856 
1857 		skb = next_skb;
1858 	}
1859 	err = 0;
1860 
1861 out:
1862 	return copied ? : err;
1863 }
1864 
1865 static bool
1866 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1867 		       size_t len_left, size_t decrypted, ssize_t done,
1868 		       size_t *flushed_at)
1869 {
1870 	size_t max_rec;
1871 
1872 	if (len_left <= decrypted)
1873 		return false;
1874 
1875 	max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1876 	if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1877 		return false;
1878 
1879 	*flushed_at = done;
1880 	return sk_flush_backlog(sk);
1881 }
1882 
1883 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
1884 			      bool nonblock)
1885 {
1886 	long timeo;
1887 	int err;
1888 
1889 	lock_sock(sk);
1890 
1891 	timeo = sock_rcvtimeo(sk, nonblock);
1892 
1893 	while (unlikely(ctx->reader_present)) {
1894 		DEFINE_WAIT_FUNC(wait, woken_wake_function);
1895 
1896 		ctx->reader_contended = 1;
1897 
1898 		add_wait_queue(&ctx->wq, &wait);
1899 		sk_wait_event(sk, &timeo,
1900 			      !READ_ONCE(ctx->reader_present), &wait);
1901 		remove_wait_queue(&ctx->wq, &wait);
1902 
1903 		if (timeo <= 0) {
1904 			err = -EAGAIN;
1905 			goto err_unlock;
1906 		}
1907 		if (signal_pending(current)) {
1908 			err = sock_intr_errno(timeo);
1909 			goto err_unlock;
1910 		}
1911 	}
1912 
1913 	WRITE_ONCE(ctx->reader_present, 1);
1914 
1915 	return 0;
1916 
1917 err_unlock:
1918 	release_sock(sk);
1919 	return err;
1920 }
1921 
1922 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
1923 {
1924 	if (unlikely(ctx->reader_contended)) {
1925 		if (wq_has_sleeper(&ctx->wq))
1926 			wake_up(&ctx->wq);
1927 		else
1928 			ctx->reader_contended = 0;
1929 
1930 		WARN_ON_ONCE(!ctx->reader_present);
1931 	}
1932 
1933 	WRITE_ONCE(ctx->reader_present, 0);
1934 	release_sock(sk);
1935 }
1936 
1937 int tls_sw_recvmsg(struct sock *sk,
1938 		   struct msghdr *msg,
1939 		   size_t len,
1940 		   int flags,
1941 		   int *addr_len)
1942 {
1943 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1944 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1945 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1946 	ssize_t decrypted = 0, async_copy_bytes = 0;
1947 	struct sk_psock *psock;
1948 	unsigned char control = 0;
1949 	size_t flushed_at = 0;
1950 	struct strp_msg *rxm;
1951 	struct tls_msg *tlm;
1952 	ssize_t copied = 0;
1953 	bool async = false;
1954 	int target, err;
1955 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1956 	bool is_peek = flags & MSG_PEEK;
1957 	bool released = true;
1958 	bool bpf_strp_enabled;
1959 	bool zc_capable;
1960 
1961 	if (unlikely(flags & MSG_ERRQUEUE))
1962 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1963 
1964 	psock = sk_psock_get(sk);
1965 	err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
1966 	if (err < 0)
1967 		return err;
1968 	bpf_strp_enabled = sk_psock_strp_enabled(psock);
1969 
1970 	/* If crypto failed the connection is broken */
1971 	err = ctx->async_wait.err;
1972 	if (err)
1973 		goto end;
1974 
1975 	/* Process pending decrypted records. It must be non-zero-copy */
1976 	err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
1977 	if (err < 0)
1978 		goto end;
1979 
1980 	copied = err;
1981 	if (len <= copied)
1982 		goto end;
1983 
1984 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1985 	len = len - copied;
1986 
1987 	zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
1988 		ctx->zc_capable;
1989 	decrypted = 0;
1990 	while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
1991 		struct tls_decrypt_arg darg;
1992 		int to_decrypt, chunk;
1993 
1994 		err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
1995 				      released);
1996 		if (err <= 0) {
1997 			if (psock) {
1998 				chunk = sk_msg_recvmsg(sk, psock, msg, len,
1999 						       flags);
2000 				if (chunk > 0) {
2001 					decrypted += chunk;
2002 					len -= chunk;
2003 					continue;
2004 				}
2005 			}
2006 			goto recv_end;
2007 		}
2008 
2009 		memset(&darg.inargs, 0, sizeof(darg.inargs));
2010 
2011 		rxm = strp_msg(tls_strp_msg(ctx));
2012 		tlm = tls_msg(tls_strp_msg(ctx));
2013 
2014 		to_decrypt = rxm->full_len - prot->overhead_size;
2015 
2016 		if (zc_capable && to_decrypt <= len &&
2017 		    tlm->control == TLS_RECORD_TYPE_DATA)
2018 			darg.zc = true;
2019 
2020 		/* Do not use async mode if record is non-data */
2021 		if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
2022 			darg.async = ctx->async_capable;
2023 		else
2024 			darg.async = false;
2025 
2026 		err = tls_rx_one_record(sk, msg, &darg);
2027 		if (err < 0) {
2028 			tls_err_abort(sk, -EBADMSG);
2029 			goto recv_end;
2030 		}
2031 
2032 		async |= darg.async;
2033 
2034 		/* If the type of records being processed is not known yet,
2035 		 * set it to record type just dequeued. If it is already known,
2036 		 * but does not match the record type just dequeued, go to end.
2037 		 * We always get record type here since for tls1.2, record type
2038 		 * is known just after record is dequeued from stream parser.
2039 		 * For tls1.3, we disable async.
2040 		 */
2041 		err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2042 		if (err <= 0) {
2043 			DEBUG_NET_WARN_ON_ONCE(darg.zc);
2044 			tls_rx_rec_done(ctx);
2045 put_on_rx_list_err:
2046 			__skb_queue_tail(&ctx->rx_list, darg.skb);
2047 			goto recv_end;
2048 		}
2049 
2050 		/* periodically flush backlog, and feed strparser */
2051 		released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
2052 						  decrypted + copied,
2053 						  &flushed_at);
2054 
2055 		/* TLS 1.3 may have updated the length by more than overhead */
2056 		rxm = strp_msg(darg.skb);
2057 		chunk = rxm->full_len;
2058 		tls_rx_rec_done(ctx);
2059 
2060 		if (!darg.zc) {
2061 			bool partially_consumed = chunk > len;
2062 			struct sk_buff *skb = darg.skb;
2063 
2064 			DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2065 
2066 			if (async) {
2067 				/* TLS 1.2-only, to_decrypt must be text len */
2068 				chunk = min_t(int, to_decrypt, len);
2069 				async_copy_bytes += chunk;
2070 put_on_rx_list:
2071 				decrypted += chunk;
2072 				len -= chunk;
2073 				__skb_queue_tail(&ctx->rx_list, skb);
2074 				continue;
2075 			}
2076 
2077 			if (bpf_strp_enabled) {
2078 				released = true;
2079 				err = sk_psock_tls_strp_read(psock, skb);
2080 				if (err != __SK_PASS) {
2081 					rxm->offset = rxm->offset + rxm->full_len;
2082 					rxm->full_len = 0;
2083 					if (err == __SK_DROP)
2084 						consume_skb(skb);
2085 					continue;
2086 				}
2087 			}
2088 
2089 			if (partially_consumed)
2090 				chunk = len;
2091 
2092 			err = skb_copy_datagram_msg(skb, rxm->offset,
2093 						    msg, chunk);
2094 			if (err < 0)
2095 				goto put_on_rx_list_err;
2096 
2097 			if (is_peek)
2098 				goto put_on_rx_list;
2099 
2100 			if (partially_consumed) {
2101 				rxm->offset += chunk;
2102 				rxm->full_len -= chunk;
2103 				goto put_on_rx_list;
2104 			}
2105 
2106 			consume_skb(skb);
2107 		}
2108 
2109 		decrypted += chunk;
2110 		len -= chunk;
2111 
2112 		/* Return full control message to userspace before trying
2113 		 * to parse another message type
2114 		 */
2115 		msg->msg_flags |= MSG_EOR;
2116 		if (control != TLS_RECORD_TYPE_DATA)
2117 			break;
2118 	}
2119 
2120 recv_end:
2121 	if (async) {
2122 		int ret, pending;
2123 
2124 		/* Wait for all previously submitted records to be decrypted */
2125 		spin_lock_bh(&ctx->decrypt_compl_lock);
2126 		reinit_completion(&ctx->async_wait.completion);
2127 		pending = atomic_read(&ctx->decrypt_pending);
2128 		spin_unlock_bh(&ctx->decrypt_compl_lock);
2129 		ret = 0;
2130 		if (pending)
2131 			ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2132 		__skb_queue_purge(&ctx->async_hold);
2133 
2134 		if (ret) {
2135 			if (err >= 0 || err == -EINPROGRESS)
2136 				err = ret;
2137 			decrypted = 0;
2138 			goto end;
2139 		}
2140 
2141 		/* Drain records from the rx_list & copy if required */
2142 		if (is_peek || is_kvec)
2143 			err = process_rx_list(ctx, msg, &control, copied,
2144 					      decrypted, is_peek);
2145 		else
2146 			err = process_rx_list(ctx, msg, &control, 0,
2147 					      async_copy_bytes, is_peek);
2148 		decrypted += max(err, 0);
2149 	}
2150 
2151 	copied += decrypted;
2152 
2153 end:
2154 	tls_rx_reader_unlock(sk, ctx);
2155 	if (psock)
2156 		sk_psock_put(sk, psock);
2157 	return copied ? : err;
2158 }
2159 
2160 ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
2161 			   struct pipe_inode_info *pipe,
2162 			   size_t len, unsigned int flags)
2163 {
2164 	struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2165 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2166 	struct strp_msg *rxm = NULL;
2167 	struct sock *sk = sock->sk;
2168 	struct tls_msg *tlm;
2169 	struct sk_buff *skb;
2170 	ssize_t copied = 0;
2171 	int chunk;
2172 	int err;
2173 
2174 	err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
2175 	if (err < 0)
2176 		return err;
2177 
2178 	if (!skb_queue_empty(&ctx->rx_list)) {
2179 		skb = __skb_dequeue(&ctx->rx_list);
2180 	} else {
2181 		struct tls_decrypt_arg darg;
2182 
2183 		err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
2184 				      true);
2185 		if (err <= 0)
2186 			goto splice_read_end;
2187 
2188 		memset(&darg.inargs, 0, sizeof(darg.inargs));
2189 
2190 		err = tls_rx_one_record(sk, NULL, &darg);
2191 		if (err < 0) {
2192 			tls_err_abort(sk, -EBADMSG);
2193 			goto splice_read_end;
2194 		}
2195 
2196 		tls_rx_rec_done(ctx);
2197 		skb = darg.skb;
2198 	}
2199 
2200 	rxm = strp_msg(skb);
2201 	tlm = tls_msg(skb);
2202 
2203 	/* splice does not support reading control messages */
2204 	if (tlm->control != TLS_RECORD_TYPE_DATA) {
2205 		err = -EINVAL;
2206 		goto splice_requeue;
2207 	}
2208 
2209 	chunk = min_t(unsigned int, rxm->full_len, len);
2210 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2211 	if (copied < 0)
2212 		goto splice_requeue;
2213 
2214 	if (chunk < rxm->full_len) {
2215 		rxm->offset += len;
2216 		rxm->full_len -= len;
2217 		goto splice_requeue;
2218 	}
2219 
2220 	consume_skb(skb);
2221 
2222 splice_read_end:
2223 	tls_rx_reader_unlock(sk, ctx);
2224 	return copied ? : err;
2225 
2226 splice_requeue:
2227 	__skb_queue_head(&ctx->rx_list, skb);
2228 	goto splice_read_end;
2229 }
2230 
2231 bool tls_sw_sock_is_readable(struct sock *sk)
2232 {
2233 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2234 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2235 	bool ingress_empty = true;
2236 	struct sk_psock *psock;
2237 
2238 	rcu_read_lock();
2239 	psock = sk_psock(sk);
2240 	if (psock)
2241 		ingress_empty = list_empty(&psock->ingress_msg);
2242 	rcu_read_unlock();
2243 
2244 	return !ingress_empty || tls_strp_msg_ready(ctx) ||
2245 		!skb_queue_empty(&ctx->rx_list);
2246 }
2247 
2248 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2249 {
2250 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2251 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2252 	char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2253 	size_t cipher_overhead;
2254 	size_t data_len = 0;
2255 	int ret;
2256 
2257 	/* Verify that we have a full TLS header, or wait for more data */
2258 	if (strp->stm.offset + prot->prepend_size > skb->len)
2259 		return 0;
2260 
2261 	/* Sanity-check size of on-stack buffer. */
2262 	if (WARN_ON(prot->prepend_size > sizeof(header))) {
2263 		ret = -EINVAL;
2264 		goto read_failure;
2265 	}
2266 
2267 	/* Linearize header to local buffer */
2268 	ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2269 	if (ret < 0)
2270 		goto read_failure;
2271 
2272 	strp->mark = header[0];
2273 
2274 	data_len = ((header[4] & 0xFF) | (header[3] << 8));
2275 
2276 	cipher_overhead = prot->tag_size;
2277 	if (prot->version != TLS_1_3_VERSION &&
2278 	    prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2279 		cipher_overhead += prot->iv_size;
2280 
2281 	if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2282 	    prot->tail_size) {
2283 		ret = -EMSGSIZE;
2284 		goto read_failure;
2285 	}
2286 	if (data_len < cipher_overhead) {
2287 		ret = -EBADMSG;
2288 		goto read_failure;
2289 	}
2290 
2291 	/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2292 	if (header[1] != TLS_1_2_VERSION_MINOR ||
2293 	    header[2] != TLS_1_2_VERSION_MAJOR) {
2294 		ret = -EINVAL;
2295 		goto read_failure;
2296 	}
2297 
2298 	tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2299 				     TCP_SKB_CB(skb)->seq + strp->stm.offset);
2300 	return data_len + TLS_HEADER_SIZE;
2301 
2302 read_failure:
2303 	tls_err_abort(strp->sk, ret);
2304 
2305 	return ret;
2306 }
2307 
2308 void tls_rx_msg_ready(struct tls_strparser *strp)
2309 {
2310 	struct tls_sw_context_rx *ctx;
2311 
2312 	ctx = container_of(strp, struct tls_sw_context_rx, strp);
2313 	ctx->saved_data_ready(strp->sk);
2314 }
2315 
2316 static void tls_data_ready(struct sock *sk)
2317 {
2318 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2319 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2320 	struct sk_psock *psock;
2321 	gfp_t alloc_save;
2322 
2323 	trace_sk_data_ready(sk);
2324 
2325 	alloc_save = sk->sk_allocation;
2326 	sk->sk_allocation = GFP_ATOMIC;
2327 	tls_strp_data_ready(&ctx->strp);
2328 	sk->sk_allocation = alloc_save;
2329 
2330 	psock = sk_psock_get(sk);
2331 	if (psock) {
2332 		if (!list_empty(&psock->ingress_msg))
2333 			ctx->saved_data_ready(sk);
2334 		sk_psock_put(sk, psock);
2335 	}
2336 }
2337 
2338 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2339 {
2340 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2341 
2342 	set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2343 	set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2344 	cancel_delayed_work_sync(&ctx->tx_work.work);
2345 }
2346 
2347 void tls_sw_release_resources_tx(struct sock *sk)
2348 {
2349 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2350 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2351 	struct tls_rec *rec, *tmp;
2352 	int pending;
2353 
2354 	/* Wait for any pending async encryptions to complete */
2355 	spin_lock_bh(&ctx->encrypt_compl_lock);
2356 	ctx->async_notify = true;
2357 	pending = atomic_read(&ctx->encrypt_pending);
2358 	spin_unlock_bh(&ctx->encrypt_compl_lock);
2359 
2360 	if (pending)
2361 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2362 
2363 	tls_tx_records(sk, -1);
2364 
2365 	/* Free up un-sent records in tx_list. First, free
2366 	 * the partially sent record if any at head of tx_list.
2367 	 */
2368 	if (tls_ctx->partially_sent_record) {
2369 		tls_free_partial_record(sk, tls_ctx);
2370 		rec = list_first_entry(&ctx->tx_list,
2371 				       struct tls_rec, list);
2372 		list_del(&rec->list);
2373 		sk_msg_free(sk, &rec->msg_plaintext);
2374 		kfree(rec);
2375 	}
2376 
2377 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2378 		list_del(&rec->list);
2379 		sk_msg_free(sk, &rec->msg_encrypted);
2380 		sk_msg_free(sk, &rec->msg_plaintext);
2381 		kfree(rec);
2382 	}
2383 
2384 	crypto_free_aead(ctx->aead_send);
2385 	tls_free_open_rec(sk);
2386 }
2387 
2388 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2389 {
2390 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2391 
2392 	kfree(ctx);
2393 }
2394 
2395 void tls_sw_release_resources_rx(struct sock *sk)
2396 {
2397 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2398 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2399 
2400 	kfree(tls_ctx->rx.rec_seq);
2401 	kfree(tls_ctx->rx.iv);
2402 
2403 	if (ctx->aead_recv) {
2404 		__skb_queue_purge(&ctx->rx_list);
2405 		crypto_free_aead(ctx->aead_recv);
2406 		tls_strp_stop(&ctx->strp);
2407 		/* If tls_sw_strparser_arm() was not called (cleanup paths)
2408 		 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2409 		 * never swapped.
2410 		 */
2411 		if (ctx->saved_data_ready) {
2412 			write_lock_bh(&sk->sk_callback_lock);
2413 			sk->sk_data_ready = ctx->saved_data_ready;
2414 			write_unlock_bh(&sk->sk_callback_lock);
2415 		}
2416 	}
2417 }
2418 
2419 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2420 {
2421 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2422 
2423 	tls_strp_done(&ctx->strp);
2424 }
2425 
2426 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2427 {
2428 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2429 
2430 	kfree(ctx);
2431 }
2432 
2433 void tls_sw_free_resources_rx(struct sock *sk)
2434 {
2435 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2436 
2437 	tls_sw_release_resources_rx(sk);
2438 	tls_sw_free_ctx_rx(tls_ctx);
2439 }
2440 
2441 /* The work handler to transmitt the encrypted records in tx_list */
2442 static void tx_work_handler(struct work_struct *work)
2443 {
2444 	struct delayed_work *delayed_work = to_delayed_work(work);
2445 	struct tx_work *tx_work = container_of(delayed_work,
2446 					       struct tx_work, work);
2447 	struct sock *sk = tx_work->sk;
2448 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2449 	struct tls_sw_context_tx *ctx;
2450 
2451 	if (unlikely(!tls_ctx))
2452 		return;
2453 
2454 	ctx = tls_sw_ctx_tx(tls_ctx);
2455 	if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2456 		return;
2457 
2458 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2459 		return;
2460 
2461 	if (mutex_trylock(&tls_ctx->tx_lock)) {
2462 		lock_sock(sk);
2463 		tls_tx_records(sk, -1);
2464 		release_sock(sk);
2465 		mutex_unlock(&tls_ctx->tx_lock);
2466 	} else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2467 		/* Someone is holding the tx_lock, they will likely run Tx
2468 		 * and cancel the work on their way out of the lock section.
2469 		 * Schedule a long delay just in case.
2470 		 */
2471 		schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2472 	}
2473 }
2474 
2475 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2476 {
2477 	struct tls_rec *rec;
2478 
2479 	rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
2480 	if (!rec)
2481 		return false;
2482 
2483 	return READ_ONCE(rec->tx_ready);
2484 }
2485 
2486 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2487 {
2488 	struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2489 
2490 	/* Schedule the transmission if tx list is ready */
2491 	if (tls_is_tx_ready(tx_ctx) &&
2492 	    !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2493 		schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2494 }
2495 
2496 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2497 {
2498 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2499 
2500 	write_lock_bh(&sk->sk_callback_lock);
2501 	rx_ctx->saved_data_ready = sk->sk_data_ready;
2502 	sk->sk_data_ready = tls_data_ready;
2503 	write_unlock_bh(&sk->sk_callback_lock);
2504 }
2505 
2506 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2507 {
2508 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2509 
2510 	rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2511 		tls_ctx->prot_info.version != TLS_1_3_VERSION;
2512 }
2513 
2514 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2515 {
2516 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2517 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2518 	struct tls_crypto_info *crypto_info;
2519 	struct tls_sw_context_tx *sw_ctx_tx = NULL;
2520 	struct tls_sw_context_rx *sw_ctx_rx = NULL;
2521 	struct cipher_context *cctx;
2522 	struct crypto_aead **aead;
2523 	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2524 	struct crypto_tfm *tfm;
2525 	char *iv, *rec_seq, *key, *salt, *cipher_name;
2526 	size_t keysize;
2527 	int rc = 0;
2528 
2529 	if (!ctx) {
2530 		rc = -EINVAL;
2531 		goto out;
2532 	}
2533 
2534 	if (tx) {
2535 		if (!ctx->priv_ctx_tx) {
2536 			sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2537 			if (!sw_ctx_tx) {
2538 				rc = -ENOMEM;
2539 				goto out;
2540 			}
2541 			ctx->priv_ctx_tx = sw_ctx_tx;
2542 		} else {
2543 			sw_ctx_tx =
2544 				(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2545 		}
2546 	} else {
2547 		if (!ctx->priv_ctx_rx) {
2548 			sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2549 			if (!sw_ctx_rx) {
2550 				rc = -ENOMEM;
2551 				goto out;
2552 			}
2553 			ctx->priv_ctx_rx = sw_ctx_rx;
2554 		} else {
2555 			sw_ctx_rx =
2556 				(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2557 		}
2558 	}
2559 
2560 	if (tx) {
2561 		crypto_init_wait(&sw_ctx_tx->async_wait);
2562 		spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2563 		crypto_info = &ctx->crypto_send.info;
2564 		cctx = &ctx->tx;
2565 		aead = &sw_ctx_tx->aead_send;
2566 		INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2567 		INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2568 		sw_ctx_tx->tx_work.sk = sk;
2569 	} else {
2570 		crypto_init_wait(&sw_ctx_rx->async_wait);
2571 		spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2572 		init_waitqueue_head(&sw_ctx_rx->wq);
2573 		crypto_info = &ctx->crypto_recv.info;
2574 		cctx = &ctx->rx;
2575 		skb_queue_head_init(&sw_ctx_rx->rx_list);
2576 		skb_queue_head_init(&sw_ctx_rx->async_hold);
2577 		aead = &sw_ctx_rx->aead_recv;
2578 	}
2579 
2580 	switch (crypto_info->cipher_type) {
2581 	case TLS_CIPHER_AES_GCM_128: {
2582 		struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2583 
2584 		gcm_128_info = (void *)crypto_info;
2585 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2586 		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2587 		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2588 		iv = gcm_128_info->iv;
2589 		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2590 		rec_seq = gcm_128_info->rec_seq;
2591 		keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2592 		key = gcm_128_info->key;
2593 		salt = gcm_128_info->salt;
2594 		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2595 		cipher_name = "gcm(aes)";
2596 		break;
2597 	}
2598 	case TLS_CIPHER_AES_GCM_256: {
2599 		struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2600 
2601 		gcm_256_info = (void *)crypto_info;
2602 		nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2603 		tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2604 		iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2605 		iv = gcm_256_info->iv;
2606 		rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2607 		rec_seq = gcm_256_info->rec_seq;
2608 		keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2609 		key = gcm_256_info->key;
2610 		salt = gcm_256_info->salt;
2611 		salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2612 		cipher_name = "gcm(aes)";
2613 		break;
2614 	}
2615 	case TLS_CIPHER_AES_CCM_128: {
2616 		struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2617 
2618 		ccm_128_info = (void *)crypto_info;
2619 		nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2620 		tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2621 		iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2622 		iv = ccm_128_info->iv;
2623 		rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2624 		rec_seq = ccm_128_info->rec_seq;
2625 		keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2626 		key = ccm_128_info->key;
2627 		salt = ccm_128_info->salt;
2628 		salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2629 		cipher_name = "ccm(aes)";
2630 		break;
2631 	}
2632 	case TLS_CIPHER_CHACHA20_POLY1305: {
2633 		struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2634 
2635 		chacha20_poly1305_info = (void *)crypto_info;
2636 		nonce_size = 0;
2637 		tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
2638 		iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
2639 		iv = chacha20_poly1305_info->iv;
2640 		rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
2641 		rec_seq = chacha20_poly1305_info->rec_seq;
2642 		keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
2643 		key = chacha20_poly1305_info->key;
2644 		salt = chacha20_poly1305_info->salt;
2645 		salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
2646 		cipher_name = "rfc7539(chacha20,poly1305)";
2647 		break;
2648 	}
2649 	case TLS_CIPHER_SM4_GCM: {
2650 		struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2651 
2652 		sm4_gcm_info = (void *)crypto_info;
2653 		nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2654 		tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2655 		iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2656 		iv = sm4_gcm_info->iv;
2657 		rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2658 		rec_seq = sm4_gcm_info->rec_seq;
2659 		keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2660 		key = sm4_gcm_info->key;
2661 		salt = sm4_gcm_info->salt;
2662 		salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2663 		cipher_name = "gcm(sm4)";
2664 		break;
2665 	}
2666 	case TLS_CIPHER_SM4_CCM: {
2667 		struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2668 
2669 		sm4_ccm_info = (void *)crypto_info;
2670 		nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2671 		tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2672 		iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2673 		iv = sm4_ccm_info->iv;
2674 		rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2675 		rec_seq = sm4_ccm_info->rec_seq;
2676 		keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2677 		key = sm4_ccm_info->key;
2678 		salt = sm4_ccm_info->salt;
2679 		salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2680 		cipher_name = "ccm(sm4)";
2681 		break;
2682 	}
2683 	case TLS_CIPHER_ARIA_GCM_128: {
2684 		struct tls12_crypto_info_aria_gcm_128 *aria_gcm_128_info;
2685 
2686 		aria_gcm_128_info = (void *)crypto_info;
2687 		nonce_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
2688 		tag_size = TLS_CIPHER_ARIA_GCM_128_TAG_SIZE;
2689 		iv_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
2690 		iv = aria_gcm_128_info->iv;
2691 		rec_seq_size = TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE;
2692 		rec_seq = aria_gcm_128_info->rec_seq;
2693 		keysize = TLS_CIPHER_ARIA_GCM_128_KEY_SIZE;
2694 		key = aria_gcm_128_info->key;
2695 		salt = aria_gcm_128_info->salt;
2696 		salt_size = TLS_CIPHER_ARIA_GCM_128_SALT_SIZE;
2697 		cipher_name = "gcm(aria)";
2698 		break;
2699 	}
2700 	case TLS_CIPHER_ARIA_GCM_256: {
2701 		struct tls12_crypto_info_aria_gcm_256 *gcm_256_info;
2702 
2703 		gcm_256_info = (void *)crypto_info;
2704 		nonce_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
2705 		tag_size = TLS_CIPHER_ARIA_GCM_256_TAG_SIZE;
2706 		iv_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
2707 		iv = gcm_256_info->iv;
2708 		rec_seq_size = TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE;
2709 		rec_seq = gcm_256_info->rec_seq;
2710 		keysize = TLS_CIPHER_ARIA_GCM_256_KEY_SIZE;
2711 		key = gcm_256_info->key;
2712 		salt = gcm_256_info->salt;
2713 		salt_size = TLS_CIPHER_ARIA_GCM_256_SALT_SIZE;
2714 		cipher_name = "gcm(aria)";
2715 		break;
2716 	}
2717 	default:
2718 		rc = -EINVAL;
2719 		goto free_priv;
2720 	}
2721 
2722 	if (crypto_info->version == TLS_1_3_VERSION) {
2723 		nonce_size = 0;
2724 		prot->aad_size = TLS_HEADER_SIZE;
2725 		prot->tail_size = 1;
2726 	} else {
2727 		prot->aad_size = TLS_AAD_SPACE_SIZE;
2728 		prot->tail_size = 0;
2729 	}
2730 
2731 	/* Sanity-check the sizes for stack allocations. */
2732 	if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2733 	    rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
2734 	    prot->aad_size > TLS_MAX_AAD_SIZE) {
2735 		rc = -EINVAL;
2736 		goto free_priv;
2737 	}
2738 
2739 	prot->version = crypto_info->version;
2740 	prot->cipher_type = crypto_info->cipher_type;
2741 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2742 	prot->tag_size = tag_size;
2743 	prot->overhead_size = prot->prepend_size +
2744 			      prot->tag_size + prot->tail_size;
2745 	prot->iv_size = iv_size;
2746 	prot->salt_size = salt_size;
2747 	cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2748 	if (!cctx->iv) {
2749 		rc = -ENOMEM;
2750 		goto free_priv;
2751 	}
2752 	/* Note: 128 & 256 bit salt are the same size */
2753 	prot->rec_seq_size = rec_seq_size;
2754 	memcpy(cctx->iv, salt, salt_size);
2755 	memcpy(cctx->iv + salt_size, iv, iv_size);
2756 	cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2757 	if (!cctx->rec_seq) {
2758 		rc = -ENOMEM;
2759 		goto free_iv;
2760 	}
2761 
2762 	if (!*aead) {
2763 		*aead = crypto_alloc_aead(cipher_name, 0, 0);
2764 		if (IS_ERR(*aead)) {
2765 			rc = PTR_ERR(*aead);
2766 			*aead = NULL;
2767 			goto free_rec_seq;
2768 		}
2769 	}
2770 
2771 	ctx->push_pending_record = tls_sw_push_pending_record;
2772 
2773 	rc = crypto_aead_setkey(*aead, key, keysize);
2774 
2775 	if (rc)
2776 		goto free_aead;
2777 
2778 	rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2779 	if (rc)
2780 		goto free_aead;
2781 
2782 	if (sw_ctx_rx) {
2783 		tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2784 
2785 		tls_update_rx_zc_capable(ctx);
2786 		sw_ctx_rx->async_capable =
2787 			crypto_info->version != TLS_1_3_VERSION &&
2788 			!!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2789 
2790 		rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2791 		if (rc)
2792 			goto free_aead;
2793 	}
2794 
2795 	goto out;
2796 
2797 free_aead:
2798 	crypto_free_aead(*aead);
2799 	*aead = NULL;
2800 free_rec_seq:
2801 	kfree(cctx->rec_seq);
2802 	cctx->rec_seq = NULL;
2803 free_iv:
2804 	kfree(cctx->iv);
2805 	cctx->iv = NULL;
2806 free_priv:
2807 	if (tx) {
2808 		kfree(ctx->priv_ctx_tx);
2809 		ctx->priv_ctx_tx = NULL;
2810 	} else {
2811 		kfree(ctx->priv_ctx_rx);
2812 		ctx->priv_ctx_rx = NULL;
2813 	}
2814 out:
2815 	return rc;
2816 }
2817