xref: /openbmc/linux/net/tls/tls_sw.c (revision bd4af432)
1 /*
2  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4  * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5  * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6  * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7  * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  */
37 
38 #include <linux/sched/signal.h>
39 #include <linux/module.h>
40 #include <crypto/aead.h>
41 
42 #include <net/strparser.h>
43 #include <net/tls.h>
44 
45 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
46                      unsigned int recursion_level)
47 {
48         int start = skb_headlen(skb);
49         int i, chunk = start - offset;
50         struct sk_buff *frag_iter;
51         int elt = 0;
52 
53         if (unlikely(recursion_level >= 24))
54                 return -EMSGSIZE;
55 
56         if (chunk > 0) {
57                 if (chunk > len)
58                         chunk = len;
59                 elt++;
60                 len -= chunk;
61                 if (len == 0)
62                         return elt;
63                 offset += chunk;
64         }
65 
66         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
67                 int end;
68 
69                 WARN_ON(start > offset + len);
70 
71                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
72                 chunk = end - offset;
73                 if (chunk > 0) {
74                         if (chunk > len)
75                                 chunk = len;
76                         elt++;
77                         len -= chunk;
78                         if (len == 0)
79                                 return elt;
80                         offset += chunk;
81                 }
82                 start = end;
83         }
84 
85         if (unlikely(skb_has_frag_list(skb))) {
86                 skb_walk_frags(skb, frag_iter) {
87                         int end, ret;
88 
89                         WARN_ON(start > offset + len);
90 
91                         end = start + frag_iter->len;
92                         chunk = end - offset;
93                         if (chunk > 0) {
94                                 if (chunk > len)
95                                         chunk = len;
96                                 ret = __skb_nsg(frag_iter, offset - start, chunk,
97                                                 recursion_level + 1);
98                                 if (unlikely(ret < 0))
99                                         return ret;
100                                 elt += ret;
101                                 len -= chunk;
102                                 if (len == 0)
103                                         return elt;
104                                 offset += chunk;
105                         }
106                         start = end;
107                 }
108         }
109         BUG_ON(len);
110         return elt;
111 }
112 
113 /* Return the number of scatterlist elements required to completely map the
114  * skb, or -EMSGSIZE if the recursion depth is exceeded.
115  */
116 static int skb_nsg(struct sk_buff *skb, int offset, int len)
117 {
118         return __skb_nsg(skb, offset, len, 0);
119 }
120 
121 static int padding_length(struct tls_sw_context_rx *ctx,
122 			  struct tls_prot_info *prot, struct sk_buff *skb)
123 {
124 	struct strp_msg *rxm = strp_msg(skb);
125 	int sub = 0;
126 
127 	/* Determine zero-padding length */
128 	if (prot->version == TLS_1_3_VERSION) {
129 		char content_type = 0;
130 		int err;
131 		int back = 17;
132 
133 		while (content_type == 0) {
134 			if (back > rxm->full_len - prot->prepend_size)
135 				return -EBADMSG;
136 			err = skb_copy_bits(skb,
137 					    rxm->offset + rxm->full_len - back,
138 					    &content_type, 1);
139 			if (err)
140 				return err;
141 			if (content_type)
142 				break;
143 			sub++;
144 			back++;
145 		}
146 		ctx->control = content_type;
147 	}
148 	return sub;
149 }
150 
151 static void tls_decrypt_done(struct crypto_async_request *req, int err)
152 {
153 	struct aead_request *aead_req = (struct aead_request *)req;
154 	struct scatterlist *sgout = aead_req->dst;
155 	struct scatterlist *sgin = aead_req->src;
156 	struct tls_sw_context_rx *ctx;
157 	struct tls_context *tls_ctx;
158 	struct tls_prot_info *prot;
159 	struct scatterlist *sg;
160 	struct sk_buff *skb;
161 	unsigned int pages;
162 	int pending;
163 
164 	skb = (struct sk_buff *)req->data;
165 	tls_ctx = tls_get_ctx(skb->sk);
166 	ctx = tls_sw_ctx_rx(tls_ctx);
167 	prot = &tls_ctx->prot_info;
168 
169 	/* Propagate if there was an err */
170 	if (err) {
171 		if (err == -EBADMSG)
172 			TLS_INC_STATS(sock_net(skb->sk),
173 				      LINUX_MIB_TLSDECRYPTERROR);
174 		ctx->async_wait.err = err;
175 		tls_err_abort(skb->sk, err);
176 	} else {
177 		struct strp_msg *rxm = strp_msg(skb);
178 		int pad;
179 
180 		pad = padding_length(ctx, prot, skb);
181 		if (pad < 0) {
182 			ctx->async_wait.err = pad;
183 			tls_err_abort(skb->sk, pad);
184 		} else {
185 			rxm->full_len -= pad;
186 			rxm->offset += prot->prepend_size;
187 			rxm->full_len -= prot->overhead_size;
188 		}
189 	}
190 
191 	/* After using skb->sk to propagate sk through crypto async callback
192 	 * we need to NULL it again.
193 	 */
194 	skb->sk = NULL;
195 
196 
197 	/* Free the destination pages if skb was not decrypted inplace */
198 	if (sgout != sgin) {
199 		/* Skip the first S/G entry as it points to AAD */
200 		for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
201 			if (!sg)
202 				break;
203 			put_page(sg_page(sg));
204 		}
205 	}
206 
207 	kfree(aead_req);
208 
209 	pending = atomic_dec_return(&ctx->decrypt_pending);
210 
211 	if (!pending && READ_ONCE(ctx->async_notify))
212 		complete(&ctx->async_wait.completion);
213 }
214 
215 static int tls_do_decryption(struct sock *sk,
216 			     struct sk_buff *skb,
217 			     struct scatterlist *sgin,
218 			     struct scatterlist *sgout,
219 			     char *iv_recv,
220 			     size_t data_len,
221 			     struct aead_request *aead_req,
222 			     bool async)
223 {
224 	struct tls_context *tls_ctx = tls_get_ctx(sk);
225 	struct tls_prot_info *prot = &tls_ctx->prot_info;
226 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
227 	int ret;
228 
229 	aead_request_set_tfm(aead_req, ctx->aead_recv);
230 	aead_request_set_ad(aead_req, prot->aad_size);
231 	aead_request_set_crypt(aead_req, sgin, sgout,
232 			       data_len + prot->tag_size,
233 			       (u8 *)iv_recv);
234 
235 	if (async) {
236 		/* Using skb->sk to push sk through to crypto async callback
237 		 * handler. This allows propagating errors up to the socket
238 		 * if needed. It _must_ be cleared in the async handler
239 		 * before consume_skb is called. We _know_ skb->sk is NULL
240 		 * because it is a clone from strparser.
241 		 */
242 		skb->sk = sk;
243 		aead_request_set_callback(aead_req,
244 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
245 					  tls_decrypt_done, skb);
246 		atomic_inc(&ctx->decrypt_pending);
247 	} else {
248 		aead_request_set_callback(aead_req,
249 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
250 					  crypto_req_done, &ctx->async_wait);
251 	}
252 
253 	ret = crypto_aead_decrypt(aead_req);
254 	if (ret == -EINPROGRESS) {
255 		if (async)
256 			return ret;
257 
258 		ret = crypto_wait_req(ret, &ctx->async_wait);
259 	}
260 
261 	if (async)
262 		atomic_dec(&ctx->decrypt_pending);
263 
264 	return ret;
265 }
266 
267 static void tls_trim_both_msgs(struct sock *sk, int target_size)
268 {
269 	struct tls_context *tls_ctx = tls_get_ctx(sk);
270 	struct tls_prot_info *prot = &tls_ctx->prot_info;
271 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
272 	struct tls_rec *rec = ctx->open_rec;
273 
274 	sk_msg_trim(sk, &rec->msg_plaintext, target_size);
275 	if (target_size > 0)
276 		target_size += prot->overhead_size;
277 	sk_msg_trim(sk, &rec->msg_encrypted, target_size);
278 }
279 
280 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
281 {
282 	struct tls_context *tls_ctx = tls_get_ctx(sk);
283 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
284 	struct tls_rec *rec = ctx->open_rec;
285 	struct sk_msg *msg_en = &rec->msg_encrypted;
286 
287 	return sk_msg_alloc(sk, msg_en, len, 0);
288 }
289 
290 static int tls_clone_plaintext_msg(struct sock *sk, int required)
291 {
292 	struct tls_context *tls_ctx = tls_get_ctx(sk);
293 	struct tls_prot_info *prot = &tls_ctx->prot_info;
294 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
295 	struct tls_rec *rec = ctx->open_rec;
296 	struct sk_msg *msg_pl = &rec->msg_plaintext;
297 	struct sk_msg *msg_en = &rec->msg_encrypted;
298 	int skip, len;
299 
300 	/* We add page references worth len bytes from encrypted sg
301 	 * at the end of plaintext sg. It is guaranteed that msg_en
302 	 * has enough required room (ensured by caller).
303 	 */
304 	len = required - msg_pl->sg.size;
305 
306 	/* Skip initial bytes in msg_en's data to be able to use
307 	 * same offset of both plain and encrypted data.
308 	 */
309 	skip = prot->prepend_size + msg_pl->sg.size;
310 
311 	return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
312 }
313 
314 static struct tls_rec *tls_get_rec(struct sock *sk)
315 {
316 	struct tls_context *tls_ctx = tls_get_ctx(sk);
317 	struct tls_prot_info *prot = &tls_ctx->prot_info;
318 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
319 	struct sk_msg *msg_pl, *msg_en;
320 	struct tls_rec *rec;
321 	int mem_size;
322 
323 	mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
324 
325 	rec = kzalloc(mem_size, sk->sk_allocation);
326 	if (!rec)
327 		return NULL;
328 
329 	msg_pl = &rec->msg_plaintext;
330 	msg_en = &rec->msg_encrypted;
331 
332 	sk_msg_init(msg_pl);
333 	sk_msg_init(msg_en);
334 
335 	sg_init_table(rec->sg_aead_in, 2);
336 	sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
337 	sg_unmark_end(&rec->sg_aead_in[1]);
338 
339 	sg_init_table(rec->sg_aead_out, 2);
340 	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
341 	sg_unmark_end(&rec->sg_aead_out[1]);
342 
343 	return rec;
344 }
345 
346 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
347 {
348 	sk_msg_free(sk, &rec->msg_encrypted);
349 	sk_msg_free(sk, &rec->msg_plaintext);
350 	kfree(rec);
351 }
352 
353 static void tls_free_open_rec(struct sock *sk)
354 {
355 	struct tls_context *tls_ctx = tls_get_ctx(sk);
356 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
357 	struct tls_rec *rec = ctx->open_rec;
358 
359 	if (rec) {
360 		tls_free_rec(sk, rec);
361 		ctx->open_rec = NULL;
362 	}
363 }
364 
365 int tls_tx_records(struct sock *sk, int flags)
366 {
367 	struct tls_context *tls_ctx = tls_get_ctx(sk);
368 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
369 	struct tls_rec *rec, *tmp;
370 	struct sk_msg *msg_en;
371 	int tx_flags, rc = 0;
372 
373 	if (tls_is_partially_sent_record(tls_ctx)) {
374 		rec = list_first_entry(&ctx->tx_list,
375 				       struct tls_rec, list);
376 
377 		if (flags == -1)
378 			tx_flags = rec->tx_flags;
379 		else
380 			tx_flags = flags;
381 
382 		rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
383 		if (rc)
384 			goto tx_err;
385 
386 		/* Full record has been transmitted.
387 		 * Remove the head of tx_list
388 		 */
389 		list_del(&rec->list);
390 		sk_msg_free(sk, &rec->msg_plaintext);
391 		kfree(rec);
392 	}
393 
394 	/* Tx all ready records */
395 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
396 		if (READ_ONCE(rec->tx_ready)) {
397 			if (flags == -1)
398 				tx_flags = rec->tx_flags;
399 			else
400 				tx_flags = flags;
401 
402 			msg_en = &rec->msg_encrypted;
403 			rc = tls_push_sg(sk, tls_ctx,
404 					 &msg_en->sg.data[msg_en->sg.curr],
405 					 0, tx_flags);
406 			if (rc)
407 				goto tx_err;
408 
409 			list_del(&rec->list);
410 			sk_msg_free(sk, &rec->msg_plaintext);
411 			kfree(rec);
412 		} else {
413 			break;
414 		}
415 	}
416 
417 tx_err:
418 	if (rc < 0 && rc != -EAGAIN)
419 		tls_err_abort(sk, EBADMSG);
420 
421 	return rc;
422 }
423 
424 static void tls_encrypt_done(struct crypto_async_request *req, int err)
425 {
426 	struct aead_request *aead_req = (struct aead_request *)req;
427 	struct sock *sk = req->data;
428 	struct tls_context *tls_ctx = tls_get_ctx(sk);
429 	struct tls_prot_info *prot = &tls_ctx->prot_info;
430 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
431 	struct scatterlist *sge;
432 	struct sk_msg *msg_en;
433 	struct tls_rec *rec;
434 	bool ready = false;
435 	int pending;
436 
437 	rec = container_of(aead_req, struct tls_rec, aead_req);
438 	msg_en = &rec->msg_encrypted;
439 
440 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
441 	sge->offset -= prot->prepend_size;
442 	sge->length += prot->prepend_size;
443 
444 	/* Check if error is previously set on socket */
445 	if (err || sk->sk_err) {
446 		rec = NULL;
447 
448 		/* If err is already set on socket, return the same code */
449 		if (sk->sk_err) {
450 			ctx->async_wait.err = sk->sk_err;
451 		} else {
452 			ctx->async_wait.err = err;
453 			tls_err_abort(sk, err);
454 		}
455 	}
456 
457 	if (rec) {
458 		struct tls_rec *first_rec;
459 
460 		/* Mark the record as ready for transmission */
461 		smp_store_mb(rec->tx_ready, true);
462 
463 		/* If received record is at head of tx_list, schedule tx */
464 		first_rec = list_first_entry(&ctx->tx_list,
465 					     struct tls_rec, list);
466 		if (rec == first_rec)
467 			ready = true;
468 	}
469 
470 	pending = atomic_dec_return(&ctx->encrypt_pending);
471 
472 	if (!pending && READ_ONCE(ctx->async_notify))
473 		complete(&ctx->async_wait.completion);
474 
475 	if (!ready)
476 		return;
477 
478 	/* Schedule the transmission */
479 	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
480 		schedule_delayed_work(&ctx->tx_work.work, 1);
481 }
482 
483 static int tls_do_encryption(struct sock *sk,
484 			     struct tls_context *tls_ctx,
485 			     struct tls_sw_context_tx *ctx,
486 			     struct aead_request *aead_req,
487 			     size_t data_len, u32 start)
488 {
489 	struct tls_prot_info *prot = &tls_ctx->prot_info;
490 	struct tls_rec *rec = ctx->open_rec;
491 	struct sk_msg *msg_en = &rec->msg_encrypted;
492 	struct scatterlist *sge = sk_msg_elem(msg_en, start);
493 	int rc, iv_offset = 0;
494 
495 	/* For CCM based ciphers, first byte of IV is a constant */
496 	if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
497 		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
498 		iv_offset = 1;
499 	}
500 
501 	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
502 	       prot->iv_size + prot->salt_size);
503 
504 	xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
505 
506 	sge->offset += prot->prepend_size;
507 	sge->length -= prot->prepend_size;
508 
509 	msg_en->sg.curr = start;
510 
511 	aead_request_set_tfm(aead_req, ctx->aead_send);
512 	aead_request_set_ad(aead_req, prot->aad_size);
513 	aead_request_set_crypt(aead_req, rec->sg_aead_in,
514 			       rec->sg_aead_out,
515 			       data_len, rec->iv_data);
516 
517 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
518 				  tls_encrypt_done, sk);
519 
520 	/* Add the record in tx_list */
521 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
522 	atomic_inc(&ctx->encrypt_pending);
523 
524 	rc = crypto_aead_encrypt(aead_req);
525 	if (!rc || rc != -EINPROGRESS) {
526 		atomic_dec(&ctx->encrypt_pending);
527 		sge->offset -= prot->prepend_size;
528 		sge->length += prot->prepend_size;
529 	}
530 
531 	if (!rc) {
532 		WRITE_ONCE(rec->tx_ready, true);
533 	} else if (rc != -EINPROGRESS) {
534 		list_del(&rec->list);
535 		return rc;
536 	}
537 
538 	/* Unhook the record from context if encryption is not failure */
539 	ctx->open_rec = NULL;
540 	tls_advance_record_sn(sk, prot, &tls_ctx->tx);
541 	return rc;
542 }
543 
544 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
545 				 struct tls_rec **to, struct sk_msg *msg_opl,
546 				 struct sk_msg *msg_oen, u32 split_point,
547 				 u32 tx_overhead_size, u32 *orig_end)
548 {
549 	u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
550 	struct scatterlist *sge, *osge, *nsge;
551 	u32 orig_size = msg_opl->sg.size;
552 	struct scatterlist tmp = { };
553 	struct sk_msg *msg_npl;
554 	struct tls_rec *new;
555 	int ret;
556 
557 	new = tls_get_rec(sk);
558 	if (!new)
559 		return -ENOMEM;
560 	ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
561 			   tx_overhead_size, 0);
562 	if (ret < 0) {
563 		tls_free_rec(sk, new);
564 		return ret;
565 	}
566 
567 	*orig_end = msg_opl->sg.end;
568 	i = msg_opl->sg.start;
569 	sge = sk_msg_elem(msg_opl, i);
570 	while (apply && sge->length) {
571 		if (sge->length > apply) {
572 			u32 len = sge->length - apply;
573 
574 			get_page(sg_page(sge));
575 			sg_set_page(&tmp, sg_page(sge), len,
576 				    sge->offset + apply);
577 			sge->length = apply;
578 			bytes += apply;
579 			apply = 0;
580 		} else {
581 			apply -= sge->length;
582 			bytes += sge->length;
583 		}
584 
585 		sk_msg_iter_var_next(i);
586 		if (i == msg_opl->sg.end)
587 			break;
588 		sge = sk_msg_elem(msg_opl, i);
589 	}
590 
591 	msg_opl->sg.end = i;
592 	msg_opl->sg.curr = i;
593 	msg_opl->sg.copybreak = 0;
594 	msg_opl->apply_bytes = 0;
595 	msg_opl->sg.size = bytes;
596 
597 	msg_npl = &new->msg_plaintext;
598 	msg_npl->apply_bytes = apply;
599 	msg_npl->sg.size = orig_size - bytes;
600 
601 	j = msg_npl->sg.start;
602 	nsge = sk_msg_elem(msg_npl, j);
603 	if (tmp.length) {
604 		memcpy(nsge, &tmp, sizeof(*nsge));
605 		sk_msg_iter_var_next(j);
606 		nsge = sk_msg_elem(msg_npl, j);
607 	}
608 
609 	osge = sk_msg_elem(msg_opl, i);
610 	while (osge->length) {
611 		memcpy(nsge, osge, sizeof(*nsge));
612 		sg_unmark_end(nsge);
613 		sk_msg_iter_var_next(i);
614 		sk_msg_iter_var_next(j);
615 		if (i == *orig_end)
616 			break;
617 		osge = sk_msg_elem(msg_opl, i);
618 		nsge = sk_msg_elem(msg_npl, j);
619 	}
620 
621 	msg_npl->sg.end = j;
622 	msg_npl->sg.curr = j;
623 	msg_npl->sg.copybreak = 0;
624 
625 	*to = new;
626 	return 0;
627 }
628 
629 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
630 				  struct tls_rec *from, u32 orig_end)
631 {
632 	struct sk_msg *msg_npl = &from->msg_plaintext;
633 	struct sk_msg *msg_opl = &to->msg_plaintext;
634 	struct scatterlist *osge, *nsge;
635 	u32 i, j;
636 
637 	i = msg_opl->sg.end;
638 	sk_msg_iter_var_prev(i);
639 	j = msg_npl->sg.start;
640 
641 	osge = sk_msg_elem(msg_opl, i);
642 	nsge = sk_msg_elem(msg_npl, j);
643 
644 	if (sg_page(osge) == sg_page(nsge) &&
645 	    osge->offset + osge->length == nsge->offset) {
646 		osge->length += nsge->length;
647 		put_page(sg_page(nsge));
648 	}
649 
650 	msg_opl->sg.end = orig_end;
651 	msg_opl->sg.curr = orig_end;
652 	msg_opl->sg.copybreak = 0;
653 	msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
654 	msg_opl->sg.size += msg_npl->sg.size;
655 
656 	sk_msg_free(sk, &to->msg_encrypted);
657 	sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
658 
659 	kfree(from);
660 }
661 
662 static int tls_push_record(struct sock *sk, int flags,
663 			   unsigned char record_type)
664 {
665 	struct tls_context *tls_ctx = tls_get_ctx(sk);
666 	struct tls_prot_info *prot = &tls_ctx->prot_info;
667 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
668 	struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
669 	u32 i, split_point, uninitialized_var(orig_end);
670 	struct sk_msg *msg_pl, *msg_en;
671 	struct aead_request *req;
672 	bool split;
673 	int rc;
674 
675 	if (!rec)
676 		return 0;
677 
678 	msg_pl = &rec->msg_plaintext;
679 	msg_en = &rec->msg_encrypted;
680 
681 	split_point = msg_pl->apply_bytes;
682 	split = split_point && split_point < msg_pl->sg.size;
683 	if (unlikely((!split &&
684 		      msg_pl->sg.size +
685 		      prot->overhead_size > msg_en->sg.size) ||
686 		     (split &&
687 		      split_point +
688 		      prot->overhead_size > msg_en->sg.size))) {
689 		split = true;
690 		split_point = msg_en->sg.size;
691 	}
692 	if (split) {
693 		rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
694 					   split_point, prot->overhead_size,
695 					   &orig_end);
696 		if (rc < 0)
697 			return rc;
698 		/* This can happen if above tls_split_open_record allocates
699 		 * a single large encryption buffer instead of two smaller
700 		 * ones. In this case adjust pointers and continue without
701 		 * split.
702 		 */
703 		if (!msg_pl->sg.size) {
704 			tls_merge_open_record(sk, rec, tmp, orig_end);
705 			msg_pl = &rec->msg_plaintext;
706 			msg_en = &rec->msg_encrypted;
707 			split = false;
708 		}
709 		sk_msg_trim(sk, msg_en, msg_pl->sg.size +
710 			    prot->overhead_size);
711 	}
712 
713 	rec->tx_flags = flags;
714 	req = &rec->aead_req;
715 
716 	i = msg_pl->sg.end;
717 	sk_msg_iter_var_prev(i);
718 
719 	rec->content_type = record_type;
720 	if (prot->version == TLS_1_3_VERSION) {
721 		/* Add content type to end of message.  No padding added */
722 		sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
723 		sg_mark_end(&rec->sg_content_type);
724 		sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
725 			 &rec->sg_content_type);
726 	} else {
727 		sg_mark_end(sk_msg_elem(msg_pl, i));
728 	}
729 
730 	if (msg_pl->sg.end < msg_pl->sg.start) {
731 		sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
732 			 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
733 			 msg_pl->sg.data);
734 	}
735 
736 	i = msg_pl->sg.start;
737 	sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
738 
739 	i = msg_en->sg.end;
740 	sk_msg_iter_var_prev(i);
741 	sg_mark_end(sk_msg_elem(msg_en, i));
742 
743 	i = msg_en->sg.start;
744 	sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
745 
746 	tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
747 		     tls_ctx->tx.rec_seq, prot->rec_seq_size,
748 		     record_type, prot->version);
749 
750 	tls_fill_prepend(tls_ctx,
751 			 page_address(sg_page(&msg_en->sg.data[i])) +
752 			 msg_en->sg.data[i].offset,
753 			 msg_pl->sg.size + prot->tail_size,
754 			 record_type, prot->version);
755 
756 	tls_ctx->pending_open_record_frags = false;
757 
758 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
759 			       msg_pl->sg.size + prot->tail_size, i);
760 	if (rc < 0) {
761 		if (rc != -EINPROGRESS) {
762 			tls_err_abort(sk, EBADMSG);
763 			if (split) {
764 				tls_ctx->pending_open_record_frags = true;
765 				tls_merge_open_record(sk, rec, tmp, orig_end);
766 			}
767 		}
768 		ctx->async_capable = 1;
769 		return rc;
770 	} else if (split) {
771 		msg_pl = &tmp->msg_plaintext;
772 		msg_en = &tmp->msg_encrypted;
773 		sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
774 		tls_ctx->pending_open_record_frags = true;
775 		ctx->open_rec = tmp;
776 	}
777 
778 	return tls_tx_records(sk, flags);
779 }
780 
781 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
782 			       bool full_record, u8 record_type,
783 			       size_t *copied, int flags)
784 {
785 	struct tls_context *tls_ctx = tls_get_ctx(sk);
786 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
787 	struct sk_msg msg_redir = { };
788 	struct sk_psock *psock;
789 	struct sock *sk_redir;
790 	struct tls_rec *rec;
791 	bool enospc, policy;
792 	int err = 0, send;
793 	u32 delta = 0;
794 
795 	policy = !(flags & MSG_SENDPAGE_NOPOLICY);
796 	psock = sk_psock_get(sk);
797 	if (!psock || !policy) {
798 		err = tls_push_record(sk, flags, record_type);
799 		if (err && err != -EINPROGRESS) {
800 			*copied -= sk_msg_free(sk, msg);
801 			tls_free_open_rec(sk);
802 		}
803 		if (psock)
804 			sk_psock_put(sk, psock);
805 		return err;
806 	}
807 more_data:
808 	enospc = sk_msg_full(msg);
809 	if (psock->eval == __SK_NONE) {
810 		delta = msg->sg.size;
811 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
812 		delta -= msg->sg.size;
813 	}
814 	if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
815 	    !enospc && !full_record) {
816 		err = -ENOSPC;
817 		goto out_err;
818 	}
819 	msg->cork_bytes = 0;
820 	send = msg->sg.size;
821 	if (msg->apply_bytes && msg->apply_bytes < send)
822 		send = msg->apply_bytes;
823 
824 	switch (psock->eval) {
825 	case __SK_PASS:
826 		err = tls_push_record(sk, flags, record_type);
827 		if (err && err != -EINPROGRESS) {
828 			*copied -= sk_msg_free(sk, msg);
829 			tls_free_open_rec(sk);
830 			goto out_err;
831 		}
832 		break;
833 	case __SK_REDIRECT:
834 		sk_redir = psock->sk_redir;
835 		memcpy(&msg_redir, msg, sizeof(*msg));
836 		if (msg->apply_bytes < send)
837 			msg->apply_bytes = 0;
838 		else
839 			msg->apply_bytes -= send;
840 		sk_msg_return_zero(sk, msg, send);
841 		msg->sg.size -= send;
842 		release_sock(sk);
843 		err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
844 		lock_sock(sk);
845 		if (err < 0) {
846 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
847 			msg->sg.size = 0;
848 		}
849 		if (msg->sg.size == 0)
850 			tls_free_open_rec(sk);
851 		break;
852 	case __SK_DROP:
853 	default:
854 		sk_msg_free_partial(sk, msg, send);
855 		if (msg->apply_bytes < send)
856 			msg->apply_bytes = 0;
857 		else
858 			msg->apply_bytes -= send;
859 		if (msg->sg.size == 0)
860 			tls_free_open_rec(sk);
861 		*copied -= (send + delta);
862 		err = -EACCES;
863 	}
864 
865 	if (likely(!err)) {
866 		bool reset_eval = !ctx->open_rec;
867 
868 		rec = ctx->open_rec;
869 		if (rec) {
870 			msg = &rec->msg_plaintext;
871 			if (!msg->apply_bytes)
872 				reset_eval = true;
873 		}
874 		if (reset_eval) {
875 			psock->eval = __SK_NONE;
876 			if (psock->sk_redir) {
877 				sock_put(psock->sk_redir);
878 				psock->sk_redir = NULL;
879 			}
880 		}
881 		if (rec)
882 			goto more_data;
883 	}
884  out_err:
885 	sk_psock_put(sk, psock);
886 	return err;
887 }
888 
889 static int tls_sw_push_pending_record(struct sock *sk, int flags)
890 {
891 	struct tls_context *tls_ctx = tls_get_ctx(sk);
892 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
893 	struct tls_rec *rec = ctx->open_rec;
894 	struct sk_msg *msg_pl;
895 	size_t copied;
896 
897 	if (!rec)
898 		return 0;
899 
900 	msg_pl = &rec->msg_plaintext;
901 	copied = msg_pl->sg.size;
902 	if (!copied)
903 		return 0;
904 
905 	return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
906 				   &copied, flags);
907 }
908 
909 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
910 {
911 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
912 	struct tls_context *tls_ctx = tls_get_ctx(sk);
913 	struct tls_prot_info *prot = &tls_ctx->prot_info;
914 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
915 	bool async_capable = ctx->async_capable;
916 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
917 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
918 	bool eor = !(msg->msg_flags & MSG_MORE);
919 	size_t try_to_copy, copied = 0;
920 	struct sk_msg *msg_pl, *msg_en;
921 	struct tls_rec *rec;
922 	int required_size;
923 	int num_async = 0;
924 	bool full_record;
925 	int record_room;
926 	int num_zc = 0;
927 	int orig_size;
928 	int ret = 0;
929 
930 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
931 		return -EOPNOTSUPP;
932 
933 	mutex_lock(&tls_ctx->tx_lock);
934 	lock_sock(sk);
935 
936 	if (unlikely(msg->msg_controllen)) {
937 		ret = tls_proccess_cmsg(sk, msg, &record_type);
938 		if (ret) {
939 			if (ret == -EINPROGRESS)
940 				num_async++;
941 			else if (ret != -EAGAIN)
942 				goto send_end;
943 		}
944 	}
945 
946 	while (msg_data_left(msg)) {
947 		if (sk->sk_err) {
948 			ret = -sk->sk_err;
949 			goto send_end;
950 		}
951 
952 		if (ctx->open_rec)
953 			rec = ctx->open_rec;
954 		else
955 			rec = ctx->open_rec = tls_get_rec(sk);
956 		if (!rec) {
957 			ret = -ENOMEM;
958 			goto send_end;
959 		}
960 
961 		msg_pl = &rec->msg_plaintext;
962 		msg_en = &rec->msg_encrypted;
963 
964 		orig_size = msg_pl->sg.size;
965 		full_record = false;
966 		try_to_copy = msg_data_left(msg);
967 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
968 		if (try_to_copy >= record_room) {
969 			try_to_copy = record_room;
970 			full_record = true;
971 		}
972 
973 		required_size = msg_pl->sg.size + try_to_copy +
974 				prot->overhead_size;
975 
976 		if (!sk_stream_memory_free(sk))
977 			goto wait_for_sndbuf;
978 
979 alloc_encrypted:
980 		ret = tls_alloc_encrypted_msg(sk, required_size);
981 		if (ret) {
982 			if (ret != -ENOSPC)
983 				goto wait_for_memory;
984 
985 			/* Adjust try_to_copy according to the amount that was
986 			 * actually allocated. The difference is due
987 			 * to max sg elements limit
988 			 */
989 			try_to_copy -= required_size - msg_en->sg.size;
990 			full_record = true;
991 		}
992 
993 		if (!is_kvec && (full_record || eor) && !async_capable) {
994 			u32 first = msg_pl->sg.end;
995 
996 			ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
997 							msg_pl, try_to_copy);
998 			if (ret)
999 				goto fallback_to_reg_send;
1000 
1001 			num_zc++;
1002 			copied += try_to_copy;
1003 
1004 			sk_msg_sg_copy_set(msg_pl, first);
1005 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1006 						  record_type, &copied,
1007 						  msg->msg_flags);
1008 			if (ret) {
1009 				if (ret == -EINPROGRESS)
1010 					num_async++;
1011 				else if (ret == -ENOMEM)
1012 					goto wait_for_memory;
1013 				else if (ctx->open_rec && ret == -ENOSPC)
1014 					goto rollback_iter;
1015 				else if (ret != -EAGAIN)
1016 					goto send_end;
1017 			}
1018 			continue;
1019 rollback_iter:
1020 			copied -= try_to_copy;
1021 			sk_msg_sg_copy_clear(msg_pl, first);
1022 			iov_iter_revert(&msg->msg_iter,
1023 					msg_pl->sg.size - orig_size);
1024 fallback_to_reg_send:
1025 			sk_msg_trim(sk, msg_pl, orig_size);
1026 		}
1027 
1028 		required_size = msg_pl->sg.size + try_to_copy;
1029 
1030 		ret = tls_clone_plaintext_msg(sk, required_size);
1031 		if (ret) {
1032 			if (ret != -ENOSPC)
1033 				goto send_end;
1034 
1035 			/* Adjust try_to_copy according to the amount that was
1036 			 * actually allocated. The difference is due
1037 			 * to max sg elements limit
1038 			 */
1039 			try_to_copy -= required_size - msg_pl->sg.size;
1040 			full_record = true;
1041 			sk_msg_trim(sk, msg_en,
1042 				    msg_pl->sg.size + prot->overhead_size);
1043 		}
1044 
1045 		if (try_to_copy) {
1046 			ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1047 						       msg_pl, try_to_copy);
1048 			if (ret < 0)
1049 				goto trim_sgl;
1050 		}
1051 
1052 		/* Open records defined only if successfully copied, otherwise
1053 		 * we would trim the sg but not reset the open record frags.
1054 		 */
1055 		tls_ctx->pending_open_record_frags = true;
1056 		copied += try_to_copy;
1057 		if (full_record || eor) {
1058 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1059 						  record_type, &copied,
1060 						  msg->msg_flags);
1061 			if (ret) {
1062 				if (ret == -EINPROGRESS)
1063 					num_async++;
1064 				else if (ret == -ENOMEM)
1065 					goto wait_for_memory;
1066 				else if (ret != -EAGAIN) {
1067 					if (ret == -ENOSPC)
1068 						ret = 0;
1069 					goto send_end;
1070 				}
1071 			}
1072 		}
1073 
1074 		continue;
1075 
1076 wait_for_sndbuf:
1077 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1078 wait_for_memory:
1079 		ret = sk_stream_wait_memory(sk, &timeo);
1080 		if (ret) {
1081 trim_sgl:
1082 			if (ctx->open_rec)
1083 				tls_trim_both_msgs(sk, orig_size);
1084 			goto send_end;
1085 		}
1086 
1087 		if (ctx->open_rec && msg_en->sg.size < required_size)
1088 			goto alloc_encrypted;
1089 	}
1090 
1091 	if (!num_async) {
1092 		goto send_end;
1093 	} else if (num_zc) {
1094 		/* Wait for pending encryptions to get completed */
1095 		smp_store_mb(ctx->async_notify, true);
1096 
1097 		if (atomic_read(&ctx->encrypt_pending))
1098 			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1099 		else
1100 			reinit_completion(&ctx->async_wait.completion);
1101 
1102 		WRITE_ONCE(ctx->async_notify, false);
1103 
1104 		if (ctx->async_wait.err) {
1105 			ret = ctx->async_wait.err;
1106 			copied = 0;
1107 		}
1108 	}
1109 
1110 	/* Transmit if any encryptions have completed */
1111 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1112 		cancel_delayed_work(&ctx->tx_work.work);
1113 		tls_tx_records(sk, msg->msg_flags);
1114 	}
1115 
1116 send_end:
1117 	ret = sk_stream_error(sk, msg->msg_flags, ret);
1118 
1119 	release_sock(sk);
1120 	mutex_unlock(&tls_ctx->tx_lock);
1121 	return copied ? copied : ret;
1122 }
1123 
1124 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1125 			      int offset, size_t size, int flags)
1126 {
1127 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1128 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1129 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1130 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1131 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
1132 	struct sk_msg *msg_pl;
1133 	struct tls_rec *rec;
1134 	int num_async = 0;
1135 	size_t copied = 0;
1136 	bool full_record;
1137 	int record_room;
1138 	int ret = 0;
1139 	bool eor;
1140 
1141 	eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
1142 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1143 
1144 	/* Call the sk_stream functions to manage the sndbuf mem. */
1145 	while (size > 0) {
1146 		size_t copy, required_size;
1147 
1148 		if (sk->sk_err) {
1149 			ret = -sk->sk_err;
1150 			goto sendpage_end;
1151 		}
1152 
1153 		if (ctx->open_rec)
1154 			rec = ctx->open_rec;
1155 		else
1156 			rec = ctx->open_rec = tls_get_rec(sk);
1157 		if (!rec) {
1158 			ret = -ENOMEM;
1159 			goto sendpage_end;
1160 		}
1161 
1162 		msg_pl = &rec->msg_plaintext;
1163 
1164 		full_record = false;
1165 		record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1166 		copy = size;
1167 		if (copy >= record_room) {
1168 			copy = record_room;
1169 			full_record = true;
1170 		}
1171 
1172 		required_size = msg_pl->sg.size + copy + prot->overhead_size;
1173 
1174 		if (!sk_stream_memory_free(sk))
1175 			goto wait_for_sndbuf;
1176 alloc_payload:
1177 		ret = tls_alloc_encrypted_msg(sk, required_size);
1178 		if (ret) {
1179 			if (ret != -ENOSPC)
1180 				goto wait_for_memory;
1181 
1182 			/* Adjust copy according to the amount that was
1183 			 * actually allocated. The difference is due
1184 			 * to max sg elements limit
1185 			 */
1186 			copy -= required_size - msg_pl->sg.size;
1187 			full_record = true;
1188 		}
1189 
1190 		sk_msg_page_add(msg_pl, page, copy, offset);
1191 		sk_mem_charge(sk, copy);
1192 
1193 		offset += copy;
1194 		size -= copy;
1195 		copied += copy;
1196 
1197 		tls_ctx->pending_open_record_frags = true;
1198 		if (full_record || eor || sk_msg_full(msg_pl)) {
1199 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1200 						  record_type, &copied, flags);
1201 			if (ret) {
1202 				if (ret == -EINPROGRESS)
1203 					num_async++;
1204 				else if (ret == -ENOMEM)
1205 					goto wait_for_memory;
1206 				else if (ret != -EAGAIN) {
1207 					if (ret == -ENOSPC)
1208 						ret = 0;
1209 					goto sendpage_end;
1210 				}
1211 			}
1212 		}
1213 		continue;
1214 wait_for_sndbuf:
1215 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1216 wait_for_memory:
1217 		ret = sk_stream_wait_memory(sk, &timeo);
1218 		if (ret) {
1219 			if (ctx->open_rec)
1220 				tls_trim_both_msgs(sk, msg_pl->sg.size);
1221 			goto sendpage_end;
1222 		}
1223 
1224 		if (ctx->open_rec)
1225 			goto alloc_payload;
1226 	}
1227 
1228 	if (num_async) {
1229 		/* Transmit if any encryptions have completed */
1230 		if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1231 			cancel_delayed_work(&ctx->tx_work.work);
1232 			tls_tx_records(sk, flags);
1233 		}
1234 	}
1235 sendpage_end:
1236 	ret = sk_stream_error(sk, flags, ret);
1237 	return copied ? copied : ret;
1238 }
1239 
1240 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1241 			   int offset, size_t size, int flags)
1242 {
1243 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1244 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1245 		      MSG_NO_SHARED_FRAGS))
1246 		return -EOPNOTSUPP;
1247 
1248 	return tls_sw_do_sendpage(sk, page, offset, size, flags);
1249 }
1250 
1251 int tls_sw_sendpage(struct sock *sk, struct page *page,
1252 		    int offset, size_t size, int flags)
1253 {
1254 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1255 	int ret;
1256 
1257 	if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1258 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1259 		return -EOPNOTSUPP;
1260 
1261 	mutex_lock(&tls_ctx->tx_lock);
1262 	lock_sock(sk);
1263 	ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1264 	release_sock(sk);
1265 	mutex_unlock(&tls_ctx->tx_lock);
1266 	return ret;
1267 }
1268 
1269 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1270 				     int flags, long timeo, int *err)
1271 {
1272 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1273 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1274 	struct sk_buff *skb;
1275 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1276 
1277 	while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1278 		if (sk->sk_err) {
1279 			*err = sock_error(sk);
1280 			return NULL;
1281 		}
1282 
1283 		if (sk->sk_shutdown & RCV_SHUTDOWN)
1284 			return NULL;
1285 
1286 		if (sock_flag(sk, SOCK_DONE))
1287 			return NULL;
1288 
1289 		if ((flags & MSG_DONTWAIT) || !timeo) {
1290 			*err = -EAGAIN;
1291 			return NULL;
1292 		}
1293 
1294 		add_wait_queue(sk_sleep(sk), &wait);
1295 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1296 		sk_wait_event(sk, &timeo,
1297 			      ctx->recv_pkt != skb ||
1298 			      !sk_psock_queue_empty(psock),
1299 			      &wait);
1300 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1301 		remove_wait_queue(sk_sleep(sk), &wait);
1302 
1303 		/* Handle signals */
1304 		if (signal_pending(current)) {
1305 			*err = sock_intr_errno(timeo);
1306 			return NULL;
1307 		}
1308 	}
1309 
1310 	return skb;
1311 }
1312 
1313 static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1314 			       int length, int *pages_used,
1315 			       unsigned int *size_used,
1316 			       struct scatterlist *to,
1317 			       int to_max_pages)
1318 {
1319 	int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1320 	struct page *pages[MAX_SKB_FRAGS];
1321 	unsigned int size = *size_used;
1322 	ssize_t copied, use;
1323 	size_t offset;
1324 
1325 	while (length > 0) {
1326 		i = 0;
1327 		maxpages = to_max_pages - num_elem;
1328 		if (maxpages == 0) {
1329 			rc = -EFAULT;
1330 			goto out;
1331 		}
1332 		copied = iov_iter_get_pages(from, pages,
1333 					    length,
1334 					    maxpages, &offset);
1335 		if (copied <= 0) {
1336 			rc = -EFAULT;
1337 			goto out;
1338 		}
1339 
1340 		iov_iter_advance(from, copied);
1341 
1342 		length -= copied;
1343 		size += copied;
1344 		while (copied) {
1345 			use = min_t(int, copied, PAGE_SIZE - offset);
1346 
1347 			sg_set_page(&to[num_elem],
1348 				    pages[i], use, offset);
1349 			sg_unmark_end(&to[num_elem]);
1350 			/* We do not uncharge memory from this API */
1351 
1352 			offset = 0;
1353 			copied -= use;
1354 
1355 			i++;
1356 			num_elem++;
1357 		}
1358 	}
1359 	/* Mark the end in the last sg entry if newly added */
1360 	if (num_elem > *pages_used)
1361 		sg_mark_end(&to[num_elem - 1]);
1362 out:
1363 	if (rc)
1364 		iov_iter_revert(from, size - *size_used);
1365 	*size_used = size;
1366 	*pages_used = num_elem;
1367 
1368 	return rc;
1369 }
1370 
1371 /* This function decrypts the input skb into either out_iov or in out_sg
1372  * or in skb buffers itself. The input parameter 'zc' indicates if
1373  * zero-copy mode needs to be tried or not. With zero-copy mode, either
1374  * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1375  * NULL, then the decryption happens inside skb buffers itself, i.e.
1376  * zero-copy gets disabled and 'zc' is updated.
1377  */
1378 
1379 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1380 			    struct iov_iter *out_iov,
1381 			    struct scatterlist *out_sg,
1382 			    int *chunk, bool *zc, bool async)
1383 {
1384 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1385 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1386 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1387 	struct strp_msg *rxm = strp_msg(skb);
1388 	int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1389 	struct aead_request *aead_req;
1390 	struct sk_buff *unused;
1391 	u8 *aad, *iv, *mem = NULL;
1392 	struct scatterlist *sgin = NULL;
1393 	struct scatterlist *sgout = NULL;
1394 	const int data_len = rxm->full_len - prot->overhead_size +
1395 			     prot->tail_size;
1396 	int iv_offset = 0;
1397 
1398 	if (*zc && (out_iov || out_sg)) {
1399 		if (out_iov)
1400 			n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1401 		else
1402 			n_sgout = sg_nents(out_sg);
1403 		n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1404 				 rxm->full_len - prot->prepend_size);
1405 	} else {
1406 		n_sgout = 0;
1407 		*zc = false;
1408 		n_sgin = skb_cow_data(skb, 0, &unused);
1409 	}
1410 
1411 	if (n_sgin < 1)
1412 		return -EBADMSG;
1413 
1414 	/* Increment to accommodate AAD */
1415 	n_sgin = n_sgin + 1;
1416 
1417 	nsg = n_sgin + n_sgout;
1418 
1419 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1420 	mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1421 	mem_size = mem_size + prot->aad_size;
1422 	mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1423 
1424 	/* Allocate a single block of memory which contains
1425 	 * aead_req || sgin[] || sgout[] || aad || iv.
1426 	 * This order achieves correct alignment for aead_req, sgin, sgout.
1427 	 */
1428 	mem = kmalloc(mem_size, sk->sk_allocation);
1429 	if (!mem)
1430 		return -ENOMEM;
1431 
1432 	/* Segment the allocated memory */
1433 	aead_req = (struct aead_request *)mem;
1434 	sgin = (struct scatterlist *)(mem + aead_size);
1435 	sgout = sgin + n_sgin;
1436 	aad = (u8 *)(sgout + n_sgout);
1437 	iv = aad + prot->aad_size;
1438 
1439 	/* For CCM based ciphers, first byte of nonce+iv is always '2' */
1440 	if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1441 		iv[0] = 2;
1442 		iv_offset = 1;
1443 	}
1444 
1445 	/* Prepare IV */
1446 	err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1447 			    iv + iv_offset + prot->salt_size,
1448 			    prot->iv_size);
1449 	if (err < 0) {
1450 		kfree(mem);
1451 		return err;
1452 	}
1453 	if (prot->version == TLS_1_3_VERSION)
1454 		memcpy(iv + iv_offset, tls_ctx->rx.iv,
1455 		       crypto_aead_ivsize(ctx->aead_recv));
1456 	else
1457 		memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
1458 
1459 	xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
1460 
1461 	/* Prepare AAD */
1462 	tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1463 		     prot->tail_size,
1464 		     tls_ctx->rx.rec_seq, prot->rec_seq_size,
1465 		     ctx->control, prot->version);
1466 
1467 	/* Prepare sgin */
1468 	sg_init_table(sgin, n_sgin);
1469 	sg_set_buf(&sgin[0], aad, prot->aad_size);
1470 	err = skb_to_sgvec(skb, &sgin[1],
1471 			   rxm->offset + prot->prepend_size,
1472 			   rxm->full_len - prot->prepend_size);
1473 	if (err < 0) {
1474 		kfree(mem);
1475 		return err;
1476 	}
1477 
1478 	if (n_sgout) {
1479 		if (out_iov) {
1480 			sg_init_table(sgout, n_sgout);
1481 			sg_set_buf(&sgout[0], aad, prot->aad_size);
1482 
1483 			*chunk = 0;
1484 			err = tls_setup_from_iter(sk, out_iov, data_len,
1485 						  &pages, chunk, &sgout[1],
1486 						  (n_sgout - 1));
1487 			if (err < 0)
1488 				goto fallback_to_reg_recv;
1489 		} else if (out_sg) {
1490 			memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1491 		} else {
1492 			goto fallback_to_reg_recv;
1493 		}
1494 	} else {
1495 fallback_to_reg_recv:
1496 		sgout = sgin;
1497 		pages = 0;
1498 		*chunk = data_len;
1499 		*zc = false;
1500 	}
1501 
1502 	/* Prepare and submit AEAD request */
1503 	err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1504 				data_len, aead_req, async);
1505 	if (err == -EINPROGRESS)
1506 		return err;
1507 
1508 	/* Release the pages in case iov was mapped to pages */
1509 	for (; pages > 0; pages--)
1510 		put_page(sg_page(&sgout[pages]));
1511 
1512 	kfree(mem);
1513 	return err;
1514 }
1515 
1516 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1517 			      struct iov_iter *dest, int *chunk, bool *zc,
1518 			      bool async)
1519 {
1520 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1521 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1522 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1523 	struct strp_msg *rxm = strp_msg(skb);
1524 	int pad, err = 0;
1525 
1526 	if (!ctx->decrypted) {
1527 		if (tls_ctx->rx_conf == TLS_HW) {
1528 			err = tls_device_decrypted(sk, tls_ctx, skb, rxm);
1529 			if (err < 0)
1530 				return err;
1531 		}
1532 
1533 		/* Still not decrypted after tls_device */
1534 		if (!ctx->decrypted) {
1535 			err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1536 					       async);
1537 			if (err < 0) {
1538 				if (err == -EINPROGRESS)
1539 					tls_advance_record_sn(sk, prot,
1540 							      &tls_ctx->rx);
1541 				else if (err == -EBADMSG)
1542 					TLS_INC_STATS(sock_net(sk),
1543 						      LINUX_MIB_TLSDECRYPTERROR);
1544 				return err;
1545 			}
1546 		} else {
1547 			*zc = false;
1548 		}
1549 
1550 		pad = padding_length(ctx, prot, skb);
1551 		if (pad < 0)
1552 			return pad;
1553 
1554 		rxm->full_len -= pad;
1555 		rxm->offset += prot->prepend_size;
1556 		rxm->full_len -= prot->overhead_size;
1557 		tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1558 		ctx->decrypted = 1;
1559 		ctx->saved_data_ready(sk);
1560 	} else {
1561 		*zc = false;
1562 	}
1563 
1564 	return err;
1565 }
1566 
1567 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1568 		struct scatterlist *sgout)
1569 {
1570 	bool zc = true;
1571 	int chunk;
1572 
1573 	return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
1574 }
1575 
1576 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1577 			       unsigned int len)
1578 {
1579 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1580 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1581 
1582 	if (skb) {
1583 		struct strp_msg *rxm = strp_msg(skb);
1584 
1585 		if (len < rxm->full_len) {
1586 			rxm->offset += len;
1587 			rxm->full_len -= len;
1588 			return false;
1589 		}
1590 		consume_skb(skb);
1591 	}
1592 
1593 	/* Finished with message */
1594 	ctx->recv_pkt = NULL;
1595 	__strp_unpause(&ctx->strp);
1596 
1597 	return true;
1598 }
1599 
1600 /* This function traverses the rx_list in tls receive context to copies the
1601  * decrypted records into the buffer provided by caller zero copy is not
1602  * true. Further, the records are removed from the rx_list if it is not a peek
1603  * case and the record has been consumed completely.
1604  */
1605 static int process_rx_list(struct tls_sw_context_rx *ctx,
1606 			   struct msghdr *msg,
1607 			   u8 *control,
1608 			   bool *cmsg,
1609 			   size_t skip,
1610 			   size_t len,
1611 			   bool zc,
1612 			   bool is_peek)
1613 {
1614 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
1615 	u8 ctrl = *control;
1616 	u8 msgc = *cmsg;
1617 	struct tls_msg *tlm;
1618 	ssize_t copied = 0;
1619 
1620 	/* Set the record type in 'control' if caller didn't pass it */
1621 	if (!ctrl && skb) {
1622 		tlm = tls_msg(skb);
1623 		ctrl = tlm->control;
1624 	}
1625 
1626 	while (skip && skb) {
1627 		struct strp_msg *rxm = strp_msg(skb);
1628 		tlm = tls_msg(skb);
1629 
1630 		/* Cannot process a record of different type */
1631 		if (ctrl != tlm->control)
1632 			return 0;
1633 
1634 		if (skip < rxm->full_len)
1635 			break;
1636 
1637 		skip = skip - rxm->full_len;
1638 		skb = skb_peek_next(skb, &ctx->rx_list);
1639 	}
1640 
1641 	while (len && skb) {
1642 		struct sk_buff *next_skb;
1643 		struct strp_msg *rxm = strp_msg(skb);
1644 		int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1645 
1646 		tlm = tls_msg(skb);
1647 
1648 		/* Cannot process a record of different type */
1649 		if (ctrl != tlm->control)
1650 			return 0;
1651 
1652 		/* Set record type if not already done. For a non-data record,
1653 		 * do not proceed if record type could not be copied.
1654 		 */
1655 		if (!msgc) {
1656 			int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1657 					    sizeof(ctrl), &ctrl);
1658 			msgc = true;
1659 			if (ctrl != TLS_RECORD_TYPE_DATA) {
1660 				if (cerr || msg->msg_flags & MSG_CTRUNC)
1661 					return -EIO;
1662 
1663 				*cmsg = msgc;
1664 			}
1665 		}
1666 
1667 		if (!zc || (rxm->full_len - skip) > len) {
1668 			int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1669 						    msg, chunk);
1670 			if (err < 0)
1671 				return err;
1672 		}
1673 
1674 		len = len - chunk;
1675 		copied = copied + chunk;
1676 
1677 		/* Consume the data from record if it is non-peek case*/
1678 		if (!is_peek) {
1679 			rxm->offset = rxm->offset + chunk;
1680 			rxm->full_len = rxm->full_len - chunk;
1681 
1682 			/* Return if there is unconsumed data in the record */
1683 			if (rxm->full_len - skip)
1684 				break;
1685 		}
1686 
1687 		/* The remaining skip-bytes must lie in 1st record in rx_list.
1688 		 * So from the 2nd record, 'skip' should be 0.
1689 		 */
1690 		skip = 0;
1691 
1692 		if (msg)
1693 			msg->msg_flags |= MSG_EOR;
1694 
1695 		next_skb = skb_peek_next(skb, &ctx->rx_list);
1696 
1697 		if (!is_peek) {
1698 			skb_unlink(skb, &ctx->rx_list);
1699 			consume_skb(skb);
1700 		}
1701 
1702 		skb = next_skb;
1703 	}
1704 
1705 	*control = ctrl;
1706 	return copied;
1707 }
1708 
1709 int tls_sw_recvmsg(struct sock *sk,
1710 		   struct msghdr *msg,
1711 		   size_t len,
1712 		   int nonblock,
1713 		   int flags,
1714 		   int *addr_len)
1715 {
1716 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1717 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1718 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1719 	struct sk_psock *psock;
1720 	unsigned char control = 0;
1721 	ssize_t decrypted = 0;
1722 	struct strp_msg *rxm;
1723 	struct tls_msg *tlm;
1724 	struct sk_buff *skb;
1725 	ssize_t copied = 0;
1726 	bool cmsg = false;
1727 	int target, err = 0;
1728 	long timeo;
1729 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1730 	bool is_peek = flags & MSG_PEEK;
1731 	int num_async = 0;
1732 
1733 	flags |= nonblock;
1734 
1735 	if (unlikely(flags & MSG_ERRQUEUE))
1736 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1737 
1738 	psock = sk_psock_get(sk);
1739 	lock_sock(sk);
1740 
1741 	/* Process pending decrypted records. It must be non-zero-copy */
1742 	err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1743 			      is_peek);
1744 	if (err < 0) {
1745 		tls_err_abort(sk, err);
1746 		goto end;
1747 	} else {
1748 		copied = err;
1749 	}
1750 
1751 	if (len <= copied)
1752 		goto recv_end;
1753 
1754 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1755 	len = len - copied;
1756 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1757 
1758 	while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1759 		bool retain_skb = false;
1760 		bool zc = false;
1761 		int to_decrypt;
1762 		int chunk = 0;
1763 		bool async_capable;
1764 		bool async = false;
1765 
1766 		skb = tls_wait_data(sk, psock, flags, timeo, &err);
1767 		if (!skb) {
1768 			if (psock) {
1769 				int ret = __tcp_bpf_recvmsg(sk, psock,
1770 							    msg, len, flags);
1771 
1772 				if (ret > 0) {
1773 					decrypted += ret;
1774 					len -= ret;
1775 					continue;
1776 				}
1777 			}
1778 			goto recv_end;
1779 		} else {
1780 			tlm = tls_msg(skb);
1781 			if (prot->version == TLS_1_3_VERSION)
1782 				tlm->control = 0;
1783 			else
1784 				tlm->control = ctx->control;
1785 		}
1786 
1787 		rxm = strp_msg(skb);
1788 
1789 		to_decrypt = rxm->full_len - prot->overhead_size;
1790 
1791 		if (to_decrypt <= len && !is_kvec && !is_peek &&
1792 		    ctx->control == TLS_RECORD_TYPE_DATA &&
1793 		    prot->version != TLS_1_3_VERSION)
1794 			zc = true;
1795 
1796 		/* Do not use async mode if record is non-data */
1797 		if (ctx->control == TLS_RECORD_TYPE_DATA)
1798 			async_capable = ctx->async_capable;
1799 		else
1800 			async_capable = false;
1801 
1802 		err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1803 					 &chunk, &zc, async_capable);
1804 		if (err < 0 && err != -EINPROGRESS) {
1805 			tls_err_abort(sk, EBADMSG);
1806 			goto recv_end;
1807 		}
1808 
1809 		if (err == -EINPROGRESS) {
1810 			async = true;
1811 			num_async++;
1812 		} else if (prot->version == TLS_1_3_VERSION) {
1813 			tlm->control = ctx->control;
1814 		}
1815 
1816 		/* If the type of records being processed is not known yet,
1817 		 * set it to record type just dequeued. If it is already known,
1818 		 * but does not match the record type just dequeued, go to end.
1819 		 * We always get record type here since for tls1.2, record type
1820 		 * is known just after record is dequeued from stream parser.
1821 		 * For tls1.3, we disable async.
1822 		 */
1823 
1824 		if (!control)
1825 			control = tlm->control;
1826 		else if (control != tlm->control)
1827 			goto recv_end;
1828 
1829 		if (!cmsg) {
1830 			int cerr;
1831 
1832 			cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1833 					sizeof(control), &control);
1834 			cmsg = true;
1835 			if (control != TLS_RECORD_TYPE_DATA) {
1836 				if (cerr || msg->msg_flags & MSG_CTRUNC) {
1837 					err = -EIO;
1838 					goto recv_end;
1839 				}
1840 			}
1841 		}
1842 
1843 		if (async)
1844 			goto pick_next_record;
1845 
1846 		if (!zc) {
1847 			if (rxm->full_len > len) {
1848 				retain_skb = true;
1849 				chunk = len;
1850 			} else {
1851 				chunk = rxm->full_len;
1852 			}
1853 
1854 			err = skb_copy_datagram_msg(skb, rxm->offset,
1855 						    msg, chunk);
1856 			if (err < 0)
1857 				goto recv_end;
1858 
1859 			if (!is_peek) {
1860 				rxm->offset = rxm->offset + chunk;
1861 				rxm->full_len = rxm->full_len - chunk;
1862 			}
1863 		}
1864 
1865 pick_next_record:
1866 		if (chunk > len)
1867 			chunk = len;
1868 
1869 		decrypted += chunk;
1870 		len -= chunk;
1871 
1872 		/* For async or peek case, queue the current skb */
1873 		if (async || is_peek || retain_skb) {
1874 			skb_queue_tail(&ctx->rx_list, skb);
1875 			skb = NULL;
1876 		}
1877 
1878 		if (tls_sw_advance_skb(sk, skb, chunk)) {
1879 			/* Return full control message to
1880 			 * userspace before trying to parse
1881 			 * another message type
1882 			 */
1883 			msg->msg_flags |= MSG_EOR;
1884 			if (ctx->control != TLS_RECORD_TYPE_DATA)
1885 				goto recv_end;
1886 		} else {
1887 			break;
1888 		}
1889 	}
1890 
1891 recv_end:
1892 	if (num_async) {
1893 		/* Wait for all previously submitted records to be decrypted */
1894 		smp_store_mb(ctx->async_notify, true);
1895 		if (atomic_read(&ctx->decrypt_pending)) {
1896 			err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1897 			if (err) {
1898 				/* one of async decrypt failed */
1899 				tls_err_abort(sk, err);
1900 				copied = 0;
1901 				decrypted = 0;
1902 				goto end;
1903 			}
1904 		} else {
1905 			reinit_completion(&ctx->async_wait.completion);
1906 		}
1907 		WRITE_ONCE(ctx->async_notify, false);
1908 
1909 		/* Drain records from the rx_list & copy if required */
1910 		if (is_peek || is_kvec)
1911 			err = process_rx_list(ctx, msg, &control, &cmsg, copied,
1912 					      decrypted, false, is_peek);
1913 		else
1914 			err = process_rx_list(ctx, msg, &control, &cmsg, 0,
1915 					      decrypted, true, is_peek);
1916 		if (err < 0) {
1917 			tls_err_abort(sk, err);
1918 			copied = 0;
1919 			goto end;
1920 		}
1921 	}
1922 
1923 	copied += decrypted;
1924 
1925 end:
1926 	release_sock(sk);
1927 	if (psock)
1928 		sk_psock_put(sk, psock);
1929 	return copied ? : err;
1930 }
1931 
1932 ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
1933 			   struct pipe_inode_info *pipe,
1934 			   size_t len, unsigned int flags)
1935 {
1936 	struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1937 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1938 	struct strp_msg *rxm = NULL;
1939 	struct sock *sk = sock->sk;
1940 	struct sk_buff *skb;
1941 	ssize_t copied = 0;
1942 	int err = 0;
1943 	long timeo;
1944 	int chunk;
1945 	bool zc = false;
1946 
1947 	lock_sock(sk);
1948 
1949 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1950 
1951 	skb = tls_wait_data(sk, NULL, flags, timeo, &err);
1952 	if (!skb)
1953 		goto splice_read_end;
1954 
1955 	if (!ctx->decrypted) {
1956 		err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
1957 
1958 		/* splice does not support reading control messages */
1959 		if (ctx->control != TLS_RECORD_TYPE_DATA) {
1960 			err = -EINVAL;
1961 			goto splice_read_end;
1962 		}
1963 
1964 		if (err < 0) {
1965 			tls_err_abort(sk, EBADMSG);
1966 			goto splice_read_end;
1967 		}
1968 		ctx->decrypted = 1;
1969 	}
1970 	rxm = strp_msg(skb);
1971 
1972 	chunk = min_t(unsigned int, rxm->full_len, len);
1973 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1974 	if (copied < 0)
1975 		goto splice_read_end;
1976 
1977 	if (likely(!(flags & MSG_PEEK)))
1978 		tls_sw_advance_skb(sk, skb, copied);
1979 
1980 splice_read_end:
1981 	release_sock(sk);
1982 	return copied ? : err;
1983 }
1984 
1985 bool tls_sw_stream_read(const struct sock *sk)
1986 {
1987 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1988 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1989 	bool ingress_empty = true;
1990 	struct sk_psock *psock;
1991 
1992 	rcu_read_lock();
1993 	psock = sk_psock(sk);
1994 	if (psock)
1995 		ingress_empty = list_empty(&psock->ingress_msg);
1996 	rcu_read_unlock();
1997 
1998 	return !ingress_empty || ctx->recv_pkt ||
1999 		!skb_queue_empty(&ctx->rx_list);
2000 }
2001 
2002 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2003 {
2004 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2005 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2006 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2007 	char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2008 	struct strp_msg *rxm = strp_msg(skb);
2009 	size_t cipher_overhead;
2010 	size_t data_len = 0;
2011 	int ret;
2012 
2013 	/* Verify that we have a full TLS header, or wait for more data */
2014 	if (rxm->offset + prot->prepend_size > skb->len)
2015 		return 0;
2016 
2017 	/* Sanity-check size of on-stack buffer. */
2018 	if (WARN_ON(prot->prepend_size > sizeof(header))) {
2019 		ret = -EINVAL;
2020 		goto read_failure;
2021 	}
2022 
2023 	/* Linearize header to local buffer */
2024 	ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2025 
2026 	if (ret < 0)
2027 		goto read_failure;
2028 
2029 	ctx->control = header[0];
2030 
2031 	data_len = ((header[4] & 0xFF) | (header[3] << 8));
2032 
2033 	cipher_overhead = prot->tag_size;
2034 	if (prot->version != TLS_1_3_VERSION)
2035 		cipher_overhead += prot->iv_size;
2036 
2037 	if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2038 	    prot->tail_size) {
2039 		ret = -EMSGSIZE;
2040 		goto read_failure;
2041 	}
2042 	if (data_len < cipher_overhead) {
2043 		ret = -EBADMSG;
2044 		goto read_failure;
2045 	}
2046 
2047 	/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2048 	if (header[1] != TLS_1_2_VERSION_MINOR ||
2049 	    header[2] != TLS_1_2_VERSION_MAJOR) {
2050 		ret = -EINVAL;
2051 		goto read_failure;
2052 	}
2053 
2054 	tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2055 				     TCP_SKB_CB(skb)->seq + rxm->offset);
2056 	return data_len + TLS_HEADER_SIZE;
2057 
2058 read_failure:
2059 	tls_err_abort(strp->sk, ret);
2060 
2061 	return ret;
2062 }
2063 
2064 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2065 {
2066 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2067 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2068 
2069 	ctx->decrypted = 0;
2070 
2071 	ctx->recv_pkt = skb;
2072 	strp_pause(strp);
2073 
2074 	ctx->saved_data_ready(strp->sk);
2075 }
2076 
2077 static void tls_data_ready(struct sock *sk)
2078 {
2079 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2080 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2081 	struct sk_psock *psock;
2082 
2083 	strp_data_ready(&ctx->strp);
2084 
2085 	psock = sk_psock_get(sk);
2086 	if (psock) {
2087 		if (!list_empty(&psock->ingress_msg))
2088 			ctx->saved_data_ready(sk);
2089 		sk_psock_put(sk, psock);
2090 	}
2091 }
2092 
2093 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2094 {
2095 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2096 
2097 	set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2098 	set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2099 	cancel_delayed_work_sync(&ctx->tx_work.work);
2100 }
2101 
2102 void tls_sw_release_resources_tx(struct sock *sk)
2103 {
2104 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2105 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2106 	struct tls_rec *rec, *tmp;
2107 
2108 	/* Wait for any pending async encryptions to complete */
2109 	smp_store_mb(ctx->async_notify, true);
2110 	if (atomic_read(&ctx->encrypt_pending))
2111 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2112 
2113 	tls_tx_records(sk, -1);
2114 
2115 	/* Free up un-sent records in tx_list. First, free
2116 	 * the partially sent record if any at head of tx_list.
2117 	 */
2118 	if (tls_ctx->partially_sent_record) {
2119 		tls_free_partial_record(sk, tls_ctx);
2120 		rec = list_first_entry(&ctx->tx_list,
2121 				       struct tls_rec, list);
2122 		list_del(&rec->list);
2123 		sk_msg_free(sk, &rec->msg_plaintext);
2124 		kfree(rec);
2125 	}
2126 
2127 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2128 		list_del(&rec->list);
2129 		sk_msg_free(sk, &rec->msg_encrypted);
2130 		sk_msg_free(sk, &rec->msg_plaintext);
2131 		kfree(rec);
2132 	}
2133 
2134 	crypto_free_aead(ctx->aead_send);
2135 	tls_free_open_rec(sk);
2136 }
2137 
2138 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2139 {
2140 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2141 
2142 	kfree(ctx);
2143 }
2144 
2145 void tls_sw_release_resources_rx(struct sock *sk)
2146 {
2147 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2148 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2149 
2150 	kfree(tls_ctx->rx.rec_seq);
2151 	kfree(tls_ctx->rx.iv);
2152 
2153 	if (ctx->aead_recv) {
2154 		kfree_skb(ctx->recv_pkt);
2155 		ctx->recv_pkt = NULL;
2156 		skb_queue_purge(&ctx->rx_list);
2157 		crypto_free_aead(ctx->aead_recv);
2158 		strp_stop(&ctx->strp);
2159 		/* If tls_sw_strparser_arm() was not called (cleanup paths)
2160 		 * we still want to strp_stop(), but sk->sk_data_ready was
2161 		 * never swapped.
2162 		 */
2163 		if (ctx->saved_data_ready) {
2164 			write_lock_bh(&sk->sk_callback_lock);
2165 			sk->sk_data_ready = ctx->saved_data_ready;
2166 			write_unlock_bh(&sk->sk_callback_lock);
2167 		}
2168 	}
2169 }
2170 
2171 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2172 {
2173 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2174 
2175 	strp_done(&ctx->strp);
2176 }
2177 
2178 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2179 {
2180 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2181 
2182 	kfree(ctx);
2183 }
2184 
2185 void tls_sw_free_resources_rx(struct sock *sk)
2186 {
2187 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2188 
2189 	tls_sw_release_resources_rx(sk);
2190 	tls_sw_free_ctx_rx(tls_ctx);
2191 }
2192 
2193 /* The work handler to transmitt the encrypted records in tx_list */
2194 static void tx_work_handler(struct work_struct *work)
2195 {
2196 	struct delayed_work *delayed_work = to_delayed_work(work);
2197 	struct tx_work *tx_work = container_of(delayed_work,
2198 					       struct tx_work, work);
2199 	struct sock *sk = tx_work->sk;
2200 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2201 	struct tls_sw_context_tx *ctx;
2202 
2203 	if (unlikely(!tls_ctx))
2204 		return;
2205 
2206 	ctx = tls_sw_ctx_tx(tls_ctx);
2207 	if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2208 		return;
2209 
2210 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2211 		return;
2212 	mutex_lock(&tls_ctx->tx_lock);
2213 	lock_sock(sk);
2214 	tls_tx_records(sk, -1);
2215 	release_sock(sk);
2216 	mutex_unlock(&tls_ctx->tx_lock);
2217 }
2218 
2219 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2220 {
2221 	struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2222 
2223 	/* Schedule the transmission if tx list is ready */
2224 	if (is_tx_ready(tx_ctx) &&
2225 	    !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2226 		schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2227 }
2228 
2229 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2230 {
2231 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2232 
2233 	write_lock_bh(&sk->sk_callback_lock);
2234 	rx_ctx->saved_data_ready = sk->sk_data_ready;
2235 	sk->sk_data_ready = tls_data_ready;
2236 	write_unlock_bh(&sk->sk_callback_lock);
2237 
2238 	strp_check_rcv(&rx_ctx->strp);
2239 }
2240 
2241 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2242 {
2243 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2244 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2245 	struct tls_crypto_info *crypto_info;
2246 	struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2247 	struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2248 	struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2249 	struct tls_sw_context_tx *sw_ctx_tx = NULL;
2250 	struct tls_sw_context_rx *sw_ctx_rx = NULL;
2251 	struct cipher_context *cctx;
2252 	struct crypto_aead **aead;
2253 	struct strp_callbacks cb;
2254 	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2255 	struct crypto_tfm *tfm;
2256 	char *iv, *rec_seq, *key, *salt, *cipher_name;
2257 	size_t keysize;
2258 	int rc = 0;
2259 
2260 	if (!ctx) {
2261 		rc = -EINVAL;
2262 		goto out;
2263 	}
2264 
2265 	if (tx) {
2266 		if (!ctx->priv_ctx_tx) {
2267 			sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2268 			if (!sw_ctx_tx) {
2269 				rc = -ENOMEM;
2270 				goto out;
2271 			}
2272 			ctx->priv_ctx_tx = sw_ctx_tx;
2273 		} else {
2274 			sw_ctx_tx =
2275 				(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2276 		}
2277 	} else {
2278 		if (!ctx->priv_ctx_rx) {
2279 			sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2280 			if (!sw_ctx_rx) {
2281 				rc = -ENOMEM;
2282 				goto out;
2283 			}
2284 			ctx->priv_ctx_rx = sw_ctx_rx;
2285 		} else {
2286 			sw_ctx_rx =
2287 				(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2288 		}
2289 	}
2290 
2291 	if (tx) {
2292 		crypto_init_wait(&sw_ctx_tx->async_wait);
2293 		crypto_info = &ctx->crypto_send.info;
2294 		cctx = &ctx->tx;
2295 		aead = &sw_ctx_tx->aead_send;
2296 		INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2297 		INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2298 		sw_ctx_tx->tx_work.sk = sk;
2299 	} else {
2300 		crypto_init_wait(&sw_ctx_rx->async_wait);
2301 		crypto_info = &ctx->crypto_recv.info;
2302 		cctx = &ctx->rx;
2303 		skb_queue_head_init(&sw_ctx_rx->rx_list);
2304 		aead = &sw_ctx_rx->aead_recv;
2305 	}
2306 
2307 	switch (crypto_info->cipher_type) {
2308 	case TLS_CIPHER_AES_GCM_128: {
2309 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2310 		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2311 		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2312 		iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2313 		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2314 		rec_seq =
2315 		 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2316 		gcm_128_info =
2317 			(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
2318 		keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2319 		key = gcm_128_info->key;
2320 		salt = gcm_128_info->salt;
2321 		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2322 		cipher_name = "gcm(aes)";
2323 		break;
2324 	}
2325 	case TLS_CIPHER_AES_GCM_256: {
2326 		nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2327 		tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2328 		iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2329 		iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2330 		rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2331 		rec_seq =
2332 		 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2333 		gcm_256_info =
2334 			(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2335 		keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2336 		key = gcm_256_info->key;
2337 		salt = gcm_256_info->salt;
2338 		salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2339 		cipher_name = "gcm(aes)";
2340 		break;
2341 	}
2342 	case TLS_CIPHER_AES_CCM_128: {
2343 		nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2344 		tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2345 		iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2346 		iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2347 		rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2348 		rec_seq =
2349 		((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2350 		ccm_128_info =
2351 		(struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2352 		keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2353 		key = ccm_128_info->key;
2354 		salt = ccm_128_info->salt;
2355 		salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2356 		cipher_name = "ccm(aes)";
2357 		break;
2358 	}
2359 	default:
2360 		rc = -EINVAL;
2361 		goto free_priv;
2362 	}
2363 
2364 	/* Sanity-check the sizes for stack allocations. */
2365 	if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2366 	    rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
2367 		rc = -EINVAL;
2368 		goto free_priv;
2369 	}
2370 
2371 	if (crypto_info->version == TLS_1_3_VERSION) {
2372 		nonce_size = 0;
2373 		prot->aad_size = TLS_HEADER_SIZE;
2374 		prot->tail_size = 1;
2375 	} else {
2376 		prot->aad_size = TLS_AAD_SPACE_SIZE;
2377 		prot->tail_size = 0;
2378 	}
2379 
2380 	prot->version = crypto_info->version;
2381 	prot->cipher_type = crypto_info->cipher_type;
2382 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2383 	prot->tag_size = tag_size;
2384 	prot->overhead_size = prot->prepend_size +
2385 			      prot->tag_size + prot->tail_size;
2386 	prot->iv_size = iv_size;
2387 	prot->salt_size = salt_size;
2388 	cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2389 	if (!cctx->iv) {
2390 		rc = -ENOMEM;
2391 		goto free_priv;
2392 	}
2393 	/* Note: 128 & 256 bit salt are the same size */
2394 	prot->rec_seq_size = rec_seq_size;
2395 	memcpy(cctx->iv, salt, salt_size);
2396 	memcpy(cctx->iv + salt_size, iv, iv_size);
2397 	cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2398 	if (!cctx->rec_seq) {
2399 		rc = -ENOMEM;
2400 		goto free_iv;
2401 	}
2402 
2403 	if (!*aead) {
2404 		*aead = crypto_alloc_aead(cipher_name, 0, 0);
2405 		if (IS_ERR(*aead)) {
2406 			rc = PTR_ERR(*aead);
2407 			*aead = NULL;
2408 			goto free_rec_seq;
2409 		}
2410 	}
2411 
2412 	ctx->push_pending_record = tls_sw_push_pending_record;
2413 
2414 	rc = crypto_aead_setkey(*aead, key, keysize);
2415 
2416 	if (rc)
2417 		goto free_aead;
2418 
2419 	rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2420 	if (rc)
2421 		goto free_aead;
2422 
2423 	if (sw_ctx_rx) {
2424 		tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2425 
2426 		if (crypto_info->version == TLS_1_3_VERSION)
2427 			sw_ctx_rx->async_capable = 0;
2428 		else
2429 			sw_ctx_rx->async_capable =
2430 				!!(tfm->__crt_alg->cra_flags &
2431 				   CRYPTO_ALG_ASYNC);
2432 
2433 		/* Set up strparser */
2434 		memset(&cb, 0, sizeof(cb));
2435 		cb.rcv_msg = tls_queue;
2436 		cb.parse_msg = tls_read_size;
2437 
2438 		strp_init(&sw_ctx_rx->strp, sk, &cb);
2439 	}
2440 
2441 	goto out;
2442 
2443 free_aead:
2444 	crypto_free_aead(*aead);
2445 	*aead = NULL;
2446 free_rec_seq:
2447 	kfree(cctx->rec_seq);
2448 	cctx->rec_seq = NULL;
2449 free_iv:
2450 	kfree(cctx->iv);
2451 	cctx->iv = NULL;
2452 free_priv:
2453 	if (tx) {
2454 		kfree(ctx->priv_ctx_tx);
2455 		ctx->priv_ctx_tx = NULL;
2456 	} else {
2457 		kfree(ctx->priv_ctx_rx);
2458 		ctx->priv_ctx_rx = NULL;
2459 	}
2460 out:
2461 	return rc;
2462 }
2463