xref: /openbmc/linux/net/tls/tls_strp.c (revision 4e40e624)
1c618db2aSJakub Kicinski // SPDX-License-Identifier: GPL-2.0-only
284c61fe1SJakub Kicinski /* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */
3c618db2aSJakub Kicinski 
4c618db2aSJakub Kicinski #include <linux/skbuff.h>
584c61fe1SJakub Kicinski #include <linux/workqueue.h>
684c61fe1SJakub Kicinski #include <net/strparser.h>
784c61fe1SJakub Kicinski #include <net/tcp.h>
884c61fe1SJakub Kicinski #include <net/sock.h>
984c61fe1SJakub Kicinski #include <net/tls.h>
10c618db2aSJakub Kicinski 
11c618db2aSJakub Kicinski #include "tls.h"
12c618db2aSJakub Kicinski 
1384c61fe1SJakub Kicinski static struct workqueue_struct *tls_strp_wq;
14d4e5db64SJakub Kicinski 
tls_strp_abort_strp(struct tls_strparser * strp,int err)1584c61fe1SJakub Kicinski static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
1684c61fe1SJakub Kicinski {
1784c61fe1SJakub Kicinski 	if (strp->stopped)
1884c61fe1SJakub Kicinski 		return;
1984c61fe1SJakub Kicinski 
2084c61fe1SJakub Kicinski 	strp->stopped = 1;
2184c61fe1SJakub Kicinski 
2284c61fe1SJakub Kicinski 	/* Report an error on the lower socket */
238a0d57dfSJakub Kicinski 	WRITE_ONCE(strp->sk->sk_err, -err);
248a0d57dfSJakub Kicinski 	/* Paired with smp_rmb() in tcp_poll() */
258a0d57dfSJakub Kicinski 	smp_wmb();
2684c61fe1SJakub Kicinski 	sk_error_report(strp->sk);
2784c61fe1SJakub Kicinski }
2884c61fe1SJakub Kicinski 
tls_strp_anchor_free(struct tls_strparser * strp)2984c61fe1SJakub Kicinski static void tls_strp_anchor_free(struct tls_strparser *strp)
3084c61fe1SJakub Kicinski {
3184c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
3284c61fe1SJakub Kicinski 
3384c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
34eca9bfafSJakub Kicinski 	if (!strp->copy_mode)
3584c61fe1SJakub Kicinski 		shinfo->frag_list = NULL;
3684c61fe1SJakub Kicinski 	consume_skb(strp->anchor);
3784c61fe1SJakub Kicinski 	strp->anchor = NULL;
3884c61fe1SJakub Kicinski }
3984c61fe1SJakub Kicinski 
40c1c607b1SJakub Kicinski static struct sk_buff *
tls_strp_skb_copy(struct tls_strparser * strp,struct sk_buff * in_skb,int offset,int len)41c1c607b1SJakub Kicinski tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb,
42c1c607b1SJakub Kicinski 		  int offset, int len)
4384c61fe1SJakub Kicinski {
4484c61fe1SJakub Kicinski 	struct sk_buff *skb;
45c1c607b1SJakub Kicinski 	int i, err;
4684c61fe1SJakub Kicinski 
47c1c607b1SJakub Kicinski 	skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER,
4884c61fe1SJakub Kicinski 				   &err, strp->sk->sk_allocation);
4984c61fe1SJakub Kicinski 	if (!skb)
5084c61fe1SJakub Kicinski 		return NULL;
5184c61fe1SJakub Kicinski 
5284c61fe1SJakub Kicinski 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5384c61fe1SJakub Kicinski 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5484c61fe1SJakub Kicinski 
55c1c607b1SJakub Kicinski 		WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
5684c61fe1SJakub Kicinski 					   skb_frag_address(frag),
5784c61fe1SJakub Kicinski 					   skb_frag_size(frag)));
5884c61fe1SJakub Kicinski 		offset += skb_frag_size(frag);
5984c61fe1SJakub Kicinski 	}
6084c61fe1SJakub Kicinski 
61c1c607b1SJakub Kicinski 	skb->len = len;
62c1c607b1SJakub Kicinski 	skb->data_len = len;
63c1c607b1SJakub Kicinski 	skb_copy_header(skb, in_skb);
64c1c607b1SJakub Kicinski 	return skb;
65c1c607b1SJakub Kicinski }
66c1c607b1SJakub Kicinski 
67c1c607b1SJakub Kicinski /* Create a new skb with the contents of input copied to its page frags */
tls_strp_msg_make_copy(struct tls_strparser * strp)68c1c607b1SJakub Kicinski static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
69c1c607b1SJakub Kicinski {
70c1c607b1SJakub Kicinski 	struct strp_msg *rxm;
71c1c607b1SJakub Kicinski 	struct sk_buff *skb;
72c1c607b1SJakub Kicinski 
73c1c607b1SJakub Kicinski 	skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
74c1c607b1SJakub Kicinski 				strp->stm.full_len);
75c1c607b1SJakub Kicinski 	if (!skb)
76c1c607b1SJakub Kicinski 		return NULL;
77c1c607b1SJakub Kicinski 
7884c61fe1SJakub Kicinski 	rxm = strp_msg(skb);
7984c61fe1SJakub Kicinski 	rxm->offset = 0;
80d4e5db64SJakub Kicinski 	return skb;
81d4e5db64SJakub Kicinski }
82d4e5db64SJakub Kicinski 
8384c61fe1SJakub Kicinski /* Steal the input skb, input msg is invalid after calling this function */
tls_strp_msg_detach(struct tls_sw_context_rx * ctx)8484c61fe1SJakub Kicinski struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx)
8584c61fe1SJakub Kicinski {
8684c61fe1SJakub Kicinski 	struct tls_strparser *strp = &ctx->strp;
8784c61fe1SJakub Kicinski 
8884c61fe1SJakub Kicinski #ifdef CONFIG_TLS_DEVICE
8984c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted);
9084c61fe1SJakub Kicinski #else
9184c61fe1SJakub Kicinski 	/* This function turns an input into an output,
9284c61fe1SJakub Kicinski 	 * that can only happen if we have offload.
9384c61fe1SJakub Kicinski 	 */
9484c61fe1SJakub Kicinski 	WARN_ON(1);
9584c61fe1SJakub Kicinski #endif
9684c61fe1SJakub Kicinski 
9784c61fe1SJakub Kicinski 	if (strp->copy_mode) {
9884c61fe1SJakub Kicinski 		struct sk_buff *skb;
9984c61fe1SJakub Kicinski 
10084c61fe1SJakub Kicinski 		/* Replace anchor with an empty skb, this is a little
10184c61fe1SJakub Kicinski 		 * dangerous but __tls_cur_msg() warns on empty skbs
10284c61fe1SJakub Kicinski 		 * so hopefully we'll catch abuses.
10384c61fe1SJakub Kicinski 		 */
10484c61fe1SJakub Kicinski 		skb = alloc_skb(0, strp->sk->sk_allocation);
10584c61fe1SJakub Kicinski 		if (!skb)
10684c61fe1SJakub Kicinski 			return NULL;
10784c61fe1SJakub Kicinski 
10884c61fe1SJakub Kicinski 		swap(strp->anchor, skb);
10984c61fe1SJakub Kicinski 		return skb;
11084c61fe1SJakub Kicinski 	}
11184c61fe1SJakub Kicinski 
11284c61fe1SJakub Kicinski 	return tls_strp_msg_make_copy(strp);
11384c61fe1SJakub Kicinski }
11484c61fe1SJakub Kicinski 
11584c61fe1SJakub Kicinski /* Force the input skb to be in copy mode. The data ownership remains
11684c61fe1SJakub Kicinski  * with the input skb itself (meaning unpause will wipe it) but it can
11784c61fe1SJakub Kicinski  * be modified.
11884c61fe1SJakub Kicinski  */
tls_strp_msg_cow(struct tls_sw_context_rx * ctx)1198b3c59a7SJakub Kicinski int tls_strp_msg_cow(struct tls_sw_context_rx *ctx)
1208b3c59a7SJakub Kicinski {
12184c61fe1SJakub Kicinski 	struct tls_strparser *strp = &ctx->strp;
12284c61fe1SJakub Kicinski 	struct sk_buff *skb;
1238b3c59a7SJakub Kicinski 
12484c61fe1SJakub Kicinski 	if (strp->copy_mode)
12584c61fe1SJakub Kicinski 		return 0;
12684c61fe1SJakub Kicinski 
12784c61fe1SJakub Kicinski 	skb = tls_strp_msg_make_copy(strp);
12884c61fe1SJakub Kicinski 	if (!skb)
12984c61fe1SJakub Kicinski 		return -ENOMEM;
13084c61fe1SJakub Kicinski 
13184c61fe1SJakub Kicinski 	tls_strp_anchor_free(strp);
13284c61fe1SJakub Kicinski 	strp->anchor = skb;
13384c61fe1SJakub Kicinski 
13484c61fe1SJakub Kicinski 	tcp_read_done(strp->sk, strp->stm.full_len);
13584c61fe1SJakub Kicinski 	strp->copy_mode = 1;
13684c61fe1SJakub Kicinski 
1378b3c59a7SJakub Kicinski 	return 0;
1388b3c59a7SJakub Kicinski }
1398b3c59a7SJakub Kicinski 
14084c61fe1SJakub Kicinski /* Make a clone (in the skb sense) of the input msg to keep a reference
14184c61fe1SJakub Kicinski  * to the underlying data. The reference-holding skbs get placed on
14284c61fe1SJakub Kicinski  * @dst.
14384c61fe1SJakub Kicinski  */
tls_strp_msg_hold(struct tls_strparser * strp,struct sk_buff_head * dst)14484c61fe1SJakub Kicinski int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst)
145c618db2aSJakub Kicinski {
14684c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
147c618db2aSJakub Kicinski 
14884c61fe1SJakub Kicinski 	if (strp->copy_mode) {
14984c61fe1SJakub Kicinski 		struct sk_buff *skb;
15084c61fe1SJakub Kicinski 
15184c61fe1SJakub Kicinski 		WARN_ON_ONCE(!shinfo->nr_frags);
15284c61fe1SJakub Kicinski 
15384c61fe1SJakub Kicinski 		/* We can't skb_clone() the anchor, it gets wiped by unpause */
15484c61fe1SJakub Kicinski 		skb = alloc_skb(0, strp->sk->sk_allocation);
15584c61fe1SJakub Kicinski 		if (!skb)
15684c61fe1SJakub Kicinski 			return -ENOMEM;
15784c61fe1SJakub Kicinski 
15884c61fe1SJakub Kicinski 		__skb_queue_tail(dst, strp->anchor);
15984c61fe1SJakub Kicinski 		strp->anchor = skb;
16084c61fe1SJakub Kicinski 	} else {
16184c61fe1SJakub Kicinski 		struct sk_buff *iter, *clone;
16284c61fe1SJakub Kicinski 		int chunk, len, offset;
16384c61fe1SJakub Kicinski 
16484c61fe1SJakub Kicinski 		offset = strp->stm.offset;
16584c61fe1SJakub Kicinski 		len = strp->stm.full_len;
16684c61fe1SJakub Kicinski 		iter = shinfo->frag_list;
16784c61fe1SJakub Kicinski 
16884c61fe1SJakub Kicinski 		while (len > 0) {
16984c61fe1SJakub Kicinski 			if (iter->len <= offset) {
17084c61fe1SJakub Kicinski 				offset -= iter->len;
17184c61fe1SJakub Kicinski 				goto next;
17284c61fe1SJakub Kicinski 			}
17384c61fe1SJakub Kicinski 
17484c61fe1SJakub Kicinski 			chunk = iter->len - offset;
17584c61fe1SJakub Kicinski 			offset = 0;
17684c61fe1SJakub Kicinski 
17784c61fe1SJakub Kicinski 			clone = skb_clone(iter, strp->sk->sk_allocation);
178c618db2aSJakub Kicinski 			if (!clone)
179c618db2aSJakub Kicinski 				return -ENOMEM;
180c618db2aSJakub Kicinski 			__skb_queue_tail(dst, clone);
18184c61fe1SJakub Kicinski 
18284c61fe1SJakub Kicinski 			len -= chunk;
18384c61fe1SJakub Kicinski next:
18484c61fe1SJakub Kicinski 			iter = iter->next;
18584c61fe1SJakub Kicinski 		}
18684c61fe1SJakub Kicinski 	}
18784c61fe1SJakub Kicinski 
188c618db2aSJakub Kicinski 	return 0;
189c618db2aSJakub Kicinski }
19084c61fe1SJakub Kicinski 
tls_strp_flush_anchor_copy(struct tls_strparser * strp)19184c61fe1SJakub Kicinski static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
19284c61fe1SJakub Kicinski {
19384c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
19484c61fe1SJakub Kicinski 	int i;
19584c61fe1SJakub Kicinski 
19684c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
19784c61fe1SJakub Kicinski 
19884c61fe1SJakub Kicinski 	for (i = 0; i < shinfo->nr_frags; i++)
19984c61fe1SJakub Kicinski 		__skb_frag_unref(&shinfo->frags[i], false);
20084c61fe1SJakub Kicinski 	shinfo->nr_frags = 0;
201eca9bfafSJakub Kicinski 	if (strp->copy_mode) {
202eca9bfafSJakub Kicinski 		kfree_skb_list(shinfo->frag_list);
203eca9bfafSJakub Kicinski 		shinfo->frag_list = NULL;
204eca9bfafSJakub Kicinski 	}
20584c61fe1SJakub Kicinski 	strp->copy_mode = 0;
206eca9bfafSJakub Kicinski 	strp->mixed_decrypted = 0;
20784c61fe1SJakub Kicinski }
20884c61fe1SJakub Kicinski 
tls_strp_copyin_frag(struct tls_strparser * strp,struct sk_buff * skb,struct sk_buff * in_skb,unsigned int offset,size_t in_len)209eca9bfafSJakub Kicinski static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
210eca9bfafSJakub Kicinski 				struct sk_buff *in_skb, unsigned int offset,
211eca9bfafSJakub Kicinski 				size_t in_len)
21284c61fe1SJakub Kicinski {
2138fd1e151SYang Li 	size_t len, chunk;
214eca9bfafSJakub Kicinski 	skb_frag_t *frag;
2158fd1e151SYang Li 	int sz;
21684c61fe1SJakub Kicinski 
21784c61fe1SJakub Kicinski 	frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
21884c61fe1SJakub Kicinski 
21984c61fe1SJakub Kicinski 	len = in_len;
22084c61fe1SJakub Kicinski 	/* First make sure we got the header */
22184c61fe1SJakub Kicinski 	if (!strp->stm.full_len) {
22284c61fe1SJakub Kicinski 		/* Assume one page is more than enough for headers */
22384c61fe1SJakub Kicinski 		chunk =	min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag));
22484c61fe1SJakub Kicinski 		WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
22584c61fe1SJakub Kicinski 					   skb_frag_address(frag) +
22684c61fe1SJakub Kicinski 					   skb_frag_size(frag),
22784c61fe1SJakub Kicinski 					   chunk));
22884c61fe1SJakub Kicinski 
2298b0c0dc9SJakub Kicinski 		skb->len += chunk;
2308b0c0dc9SJakub Kicinski 		skb->data_len += chunk;
2318b0c0dc9SJakub Kicinski 		skb_frag_size_add(frag, chunk);
2328b0c0dc9SJakub Kicinski 
2338b0c0dc9SJakub Kicinski 		sz = tls_rx_msg_size(strp, skb);
234eca9bfafSJakub Kicinski 		if (sz < 0)
235eca9bfafSJakub Kicinski 			return sz;
23684c61fe1SJakub Kicinski 
23784c61fe1SJakub Kicinski 		/* We may have over-read, sz == 0 is guaranteed under-read */
2388b0c0dc9SJakub Kicinski 		if (unlikely(sz && sz < skb->len)) {
2398b0c0dc9SJakub Kicinski 			int over = skb->len - sz;
24084c61fe1SJakub Kicinski 
2418b0c0dc9SJakub Kicinski 			WARN_ON_ONCE(over > chunk);
2428b0c0dc9SJakub Kicinski 			skb->len -= over;
2438b0c0dc9SJakub Kicinski 			skb->data_len -= over;
2448b0c0dc9SJakub Kicinski 			skb_frag_size_add(frag, -over);
2458b0c0dc9SJakub Kicinski 
2468b0c0dc9SJakub Kicinski 			chunk -= over;
2478b0c0dc9SJakub Kicinski 		}
2488b0c0dc9SJakub Kicinski 
24984c61fe1SJakub Kicinski 		frag++;
25084c61fe1SJakub Kicinski 		len -= chunk;
25184c61fe1SJakub Kicinski 		offset += chunk;
25284c61fe1SJakub Kicinski 
25384c61fe1SJakub Kicinski 		strp->stm.full_len = sz;
25484c61fe1SJakub Kicinski 		if (!strp->stm.full_len)
25584c61fe1SJakub Kicinski 			goto read_done;
25684c61fe1SJakub Kicinski 	}
25784c61fe1SJakub Kicinski 
25884c61fe1SJakub Kicinski 	/* Load up more data */
25984c61fe1SJakub Kicinski 	while (len && strp->stm.full_len > skb->len) {
26084c61fe1SJakub Kicinski 		chunk =	min_t(size_t, len, strp->stm.full_len - skb->len);
26184c61fe1SJakub Kicinski 		chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag));
26284c61fe1SJakub Kicinski 		WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
26384c61fe1SJakub Kicinski 					   skb_frag_address(frag) +
26484c61fe1SJakub Kicinski 					   skb_frag_size(frag),
26584c61fe1SJakub Kicinski 					   chunk));
26684c61fe1SJakub Kicinski 
26784c61fe1SJakub Kicinski 		skb->len += chunk;
26884c61fe1SJakub Kicinski 		skb->data_len += chunk;
26984c61fe1SJakub Kicinski 		skb_frag_size_add(frag, chunk);
27084c61fe1SJakub Kicinski 		frag++;
27184c61fe1SJakub Kicinski 		len -= chunk;
27284c61fe1SJakub Kicinski 		offset += chunk;
27384c61fe1SJakub Kicinski 	}
27484c61fe1SJakub Kicinski 
275eca9bfafSJakub Kicinski read_done:
276eca9bfafSJakub Kicinski 	return in_len - len;
277eca9bfafSJakub Kicinski }
278eca9bfafSJakub Kicinski 
tls_strp_copyin_skb(struct tls_strparser * strp,struct sk_buff * skb,struct sk_buff * in_skb,unsigned int offset,size_t in_len)279eca9bfafSJakub Kicinski static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
280eca9bfafSJakub Kicinski 			       struct sk_buff *in_skb, unsigned int offset,
281eca9bfafSJakub Kicinski 			       size_t in_len)
282eca9bfafSJakub Kicinski {
283eca9bfafSJakub Kicinski 	struct sk_buff *nskb, *first, *last;
284eca9bfafSJakub Kicinski 	struct skb_shared_info *shinfo;
285eca9bfafSJakub Kicinski 	size_t chunk;
286eca9bfafSJakub Kicinski 	int sz;
287eca9bfafSJakub Kicinski 
288eca9bfafSJakub Kicinski 	if (strp->stm.full_len)
289eca9bfafSJakub Kicinski 		chunk = strp->stm.full_len - skb->len;
290eca9bfafSJakub Kicinski 	else
291eca9bfafSJakub Kicinski 		chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
292eca9bfafSJakub Kicinski 	chunk = min(chunk, in_len);
293eca9bfafSJakub Kicinski 
294eca9bfafSJakub Kicinski 	nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
295eca9bfafSJakub Kicinski 	if (!nskb)
296eca9bfafSJakub Kicinski 		return -ENOMEM;
297eca9bfafSJakub Kicinski 
298eca9bfafSJakub Kicinski 	shinfo = skb_shinfo(skb);
299eca9bfafSJakub Kicinski 	if (!shinfo->frag_list) {
300eca9bfafSJakub Kicinski 		shinfo->frag_list = nskb;
301eca9bfafSJakub Kicinski 		nskb->prev = nskb;
302eca9bfafSJakub Kicinski 	} else {
303eca9bfafSJakub Kicinski 		first = shinfo->frag_list;
304eca9bfafSJakub Kicinski 		last = first->prev;
305eca9bfafSJakub Kicinski 		last->next = nskb;
306eca9bfafSJakub Kicinski 		first->prev = nskb;
307eca9bfafSJakub Kicinski 	}
308eca9bfafSJakub Kicinski 
309eca9bfafSJakub Kicinski 	skb->len += chunk;
310eca9bfafSJakub Kicinski 	skb->data_len += chunk;
311eca9bfafSJakub Kicinski 
312eca9bfafSJakub Kicinski 	if (!strp->stm.full_len) {
313eca9bfafSJakub Kicinski 		sz = tls_rx_msg_size(strp, skb);
314eca9bfafSJakub Kicinski 		if (sz < 0)
315eca9bfafSJakub Kicinski 			return sz;
316eca9bfafSJakub Kicinski 
317eca9bfafSJakub Kicinski 		/* We may have over-read, sz == 0 is guaranteed under-read */
318eca9bfafSJakub Kicinski 		if (unlikely(sz && sz < skb->len)) {
319eca9bfafSJakub Kicinski 			int over = skb->len - sz;
320eca9bfafSJakub Kicinski 
321eca9bfafSJakub Kicinski 			WARN_ON_ONCE(over > chunk);
322eca9bfafSJakub Kicinski 			skb->len -= over;
323eca9bfafSJakub Kicinski 			skb->data_len -= over;
324eca9bfafSJakub Kicinski 			__pskb_trim(nskb, nskb->len - over);
325eca9bfafSJakub Kicinski 
326eca9bfafSJakub Kicinski 			chunk -= over;
327eca9bfafSJakub Kicinski 		}
328eca9bfafSJakub Kicinski 
329eca9bfafSJakub Kicinski 		strp->stm.full_len = sz;
330eca9bfafSJakub Kicinski 	}
331eca9bfafSJakub Kicinski 
332eca9bfafSJakub Kicinski 	return chunk;
333eca9bfafSJakub Kicinski }
334eca9bfafSJakub Kicinski 
tls_strp_copyin(read_descriptor_t * desc,struct sk_buff * in_skb,unsigned int offset,size_t in_len)335eca9bfafSJakub Kicinski static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
336eca9bfafSJakub Kicinski 			   unsigned int offset, size_t in_len)
337eca9bfafSJakub Kicinski {
338eca9bfafSJakub Kicinski 	struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
339eca9bfafSJakub Kicinski 	struct sk_buff *skb;
340eca9bfafSJakub Kicinski 	int ret;
341eca9bfafSJakub Kicinski 
342eca9bfafSJakub Kicinski 	if (strp->msg_ready)
343eca9bfafSJakub Kicinski 		return 0;
344eca9bfafSJakub Kicinski 
345eca9bfafSJakub Kicinski 	skb = strp->anchor;
346eca9bfafSJakub Kicinski 	if (!skb->len)
347eca9bfafSJakub Kicinski 		skb_copy_decrypted(skb, in_skb);
348eca9bfafSJakub Kicinski 	else
349eca9bfafSJakub Kicinski 		strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
350eca9bfafSJakub Kicinski 
351eca9bfafSJakub Kicinski 	if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
352eca9bfafSJakub Kicinski 		ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
353eca9bfafSJakub Kicinski 	else
354eca9bfafSJakub Kicinski 		ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
355eca9bfafSJakub Kicinski 	if (ret < 0) {
356eca9bfafSJakub Kicinski 		desc->error = ret;
357eca9bfafSJakub Kicinski 		ret = 0;
358eca9bfafSJakub Kicinski 	}
359eca9bfafSJakub Kicinski 
360eca9bfafSJakub Kicinski 	if (strp->stm.full_len && strp->stm.full_len == skb->len) {
36184c61fe1SJakub Kicinski 		desc->count = 0;
36284c61fe1SJakub Kicinski 
3634e40e624SSabrina Dubroca 		WRITE_ONCE(strp->msg_ready, 1);
36484c61fe1SJakub Kicinski 		tls_rx_msg_ready(strp);
36584c61fe1SJakub Kicinski 	}
36684c61fe1SJakub Kicinski 
367eca9bfafSJakub Kicinski 	return ret;
36884c61fe1SJakub Kicinski }
36984c61fe1SJakub Kicinski 
tls_strp_read_copyin(struct tls_strparser * strp)37084c61fe1SJakub Kicinski static int tls_strp_read_copyin(struct tls_strparser *strp)
37184c61fe1SJakub Kicinski {
37284c61fe1SJakub Kicinski 	read_descriptor_t desc;
37384c61fe1SJakub Kicinski 
37484c61fe1SJakub Kicinski 	desc.arg.data = strp;
37584c61fe1SJakub Kicinski 	desc.error = 0;
37684c61fe1SJakub Kicinski 	desc.count = 1; /* give more than one skb per call */
37784c61fe1SJakub Kicinski 
37884c61fe1SJakub Kicinski 	/* sk should be locked here, so okay to do read_sock */
37911863c6dSHannes Reinecke 	tcp_read_sock(strp->sk, &desc, tls_strp_copyin);
38084c61fe1SJakub Kicinski 
38184c61fe1SJakub Kicinski 	return desc.error;
38284c61fe1SJakub Kicinski }
38384c61fe1SJakub Kicinski 
tls_strp_read_copy(struct tls_strparser * strp,bool qshort)3840d87bbd3SJakub Kicinski static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
38584c61fe1SJakub Kicinski {
38684c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo;
38784c61fe1SJakub Kicinski 	struct page *page;
38884c61fe1SJakub Kicinski 	int need_spc, len;
38984c61fe1SJakub Kicinski 
39084c61fe1SJakub Kicinski 	/* If the rbuf is small or rcv window has collapsed to 0 we need
39184c61fe1SJakub Kicinski 	 * to read the data out. Otherwise the connection will stall.
39284c61fe1SJakub Kicinski 	 * Without pressure threshold of INT_MAX will never be ready.
39384c61fe1SJakub Kicinski 	 */
3940d87bbd3SJakub Kicinski 	if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX)))
39584c61fe1SJakub Kicinski 		return 0;
39684c61fe1SJakub Kicinski 
39784c61fe1SJakub Kicinski 	shinfo = skb_shinfo(strp->anchor);
39884c61fe1SJakub Kicinski 	shinfo->frag_list = NULL;
39984c61fe1SJakub Kicinski 
40084c61fe1SJakub Kicinski 	/* If we don't know the length go max plus page for cipher overhead */
40184c61fe1SJakub Kicinski 	need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
40284c61fe1SJakub Kicinski 
40384c61fe1SJakub Kicinski 	for (len = need_spc; len > 0; len -= PAGE_SIZE) {
40484c61fe1SJakub Kicinski 		page = alloc_page(strp->sk->sk_allocation);
40584c61fe1SJakub Kicinski 		if (!page) {
40684c61fe1SJakub Kicinski 			tls_strp_flush_anchor_copy(strp);
40784c61fe1SJakub Kicinski 			return -ENOMEM;
40884c61fe1SJakub Kicinski 		}
40984c61fe1SJakub Kicinski 
41084c61fe1SJakub Kicinski 		skb_fill_page_desc(strp->anchor, shinfo->nr_frags++,
41184c61fe1SJakub Kicinski 				   page, 0, 0);
41284c61fe1SJakub Kicinski 	}
41384c61fe1SJakub Kicinski 
41484c61fe1SJakub Kicinski 	strp->copy_mode = 1;
41584c61fe1SJakub Kicinski 	strp->stm.offset = 0;
41684c61fe1SJakub Kicinski 
41784c61fe1SJakub Kicinski 	strp->anchor->len = 0;
41884c61fe1SJakub Kicinski 	strp->anchor->data_len = 0;
41984c61fe1SJakub Kicinski 	strp->anchor->truesize = round_up(need_spc, PAGE_SIZE);
42084c61fe1SJakub Kicinski 
42184c61fe1SJakub Kicinski 	tls_strp_read_copyin(strp);
42284c61fe1SJakub Kicinski 
42384c61fe1SJakub Kicinski 	return 0;
42484c61fe1SJakub Kicinski }
42584c61fe1SJakub Kicinski 
tls_strp_check_queue_ok(struct tls_strparser * strp)42614c4be92SJakub Kicinski static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
4270d87bbd3SJakub Kicinski {
4280d87bbd3SJakub Kicinski 	unsigned int len = strp->stm.offset + strp->stm.full_len;
42914c4be92SJakub Kicinski 	struct sk_buff *first, *skb;
4300d87bbd3SJakub Kicinski 	u32 seq;
4310d87bbd3SJakub Kicinski 
43214c4be92SJakub Kicinski 	first = skb_shinfo(strp->anchor)->frag_list;
43314c4be92SJakub Kicinski 	skb = first;
43414c4be92SJakub Kicinski 	seq = TCP_SKB_CB(first)->seq;
4350d87bbd3SJakub Kicinski 
43614c4be92SJakub Kicinski 	/* Make sure there's no duplicate data in the queue,
43714c4be92SJakub Kicinski 	 * and the decrypted status matches.
43814c4be92SJakub Kicinski 	 */
4390d87bbd3SJakub Kicinski 	while (skb->len < len) {
4400d87bbd3SJakub Kicinski 		seq += skb->len;
4410d87bbd3SJakub Kicinski 		len -= skb->len;
4420d87bbd3SJakub Kicinski 		skb = skb->next;
4430d87bbd3SJakub Kicinski 
4440d87bbd3SJakub Kicinski 		if (TCP_SKB_CB(skb)->seq != seq)
4450d87bbd3SJakub Kicinski 			return false;
44614c4be92SJakub Kicinski 		if (skb_cmp_decrypted(first, skb))
44714c4be92SJakub Kicinski 			return false;
4480d87bbd3SJakub Kicinski 	}
4490d87bbd3SJakub Kicinski 
4500d87bbd3SJakub Kicinski 	return true;
4510d87bbd3SJakub Kicinski }
4520d87bbd3SJakub Kicinski 
tls_strp_load_anchor_with_queue(struct tls_strparser * strp,int len)45384c61fe1SJakub Kicinski static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
45484c61fe1SJakub Kicinski {
45584c61fe1SJakub Kicinski 	struct tcp_sock *tp = tcp_sk(strp->sk);
45684c61fe1SJakub Kicinski 	struct sk_buff *first;
45784c61fe1SJakub Kicinski 	u32 offset;
45884c61fe1SJakub Kicinski 
45984c61fe1SJakub Kicinski 	first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset);
46084c61fe1SJakub Kicinski 	if (WARN_ON_ONCE(!first))
46184c61fe1SJakub Kicinski 		return;
46284c61fe1SJakub Kicinski 
46384c61fe1SJakub Kicinski 	/* Bestow the state onto the anchor */
46484c61fe1SJakub Kicinski 	strp->anchor->len = offset + len;
46584c61fe1SJakub Kicinski 	strp->anchor->data_len = offset + len;
46684c61fe1SJakub Kicinski 	strp->anchor->truesize = offset + len;
46784c61fe1SJakub Kicinski 
46884c61fe1SJakub Kicinski 	skb_shinfo(strp->anchor)->frag_list = first;
46984c61fe1SJakub Kicinski 
47084c61fe1SJakub Kicinski 	skb_copy_header(strp->anchor, first);
47184c61fe1SJakub Kicinski 	strp->anchor->destructor = NULL;
47284c61fe1SJakub Kicinski 
47384c61fe1SJakub Kicinski 	strp->stm.offset = offset;
47484c61fe1SJakub Kicinski }
47584c61fe1SJakub Kicinski 
tls_strp_msg_load(struct tls_strparser * strp,bool force_refresh)47684c61fe1SJakub Kicinski void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
47784c61fe1SJakub Kicinski {
47884c61fe1SJakub Kicinski 	struct strp_msg *rxm;
47984c61fe1SJakub Kicinski 	struct tls_msg *tlm;
48084c61fe1SJakub Kicinski 
48184c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready);
48284c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
48384c61fe1SJakub Kicinski 
48484c61fe1SJakub Kicinski 	if (!strp->copy_mode && force_refresh) {
48584c61fe1SJakub Kicinski 		if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
48684c61fe1SJakub Kicinski 			return;
48784c61fe1SJakub Kicinski 
48884c61fe1SJakub Kicinski 		tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
48984c61fe1SJakub Kicinski 	}
49084c61fe1SJakub Kicinski 
49184c61fe1SJakub Kicinski 	rxm = strp_msg(strp->anchor);
49284c61fe1SJakub Kicinski 	rxm->full_len	= strp->stm.full_len;
49384c61fe1SJakub Kicinski 	rxm->offset	= strp->stm.offset;
49484c61fe1SJakub Kicinski 	tlm = tls_msg(strp->anchor);
49584c61fe1SJakub Kicinski 	tlm->control	= strp->mark;
49684c61fe1SJakub Kicinski }
49784c61fe1SJakub Kicinski 
49884c61fe1SJakub Kicinski /* Called with lock held on lower socket */
tls_strp_read_sock(struct tls_strparser * strp)49984c61fe1SJakub Kicinski static int tls_strp_read_sock(struct tls_strparser *strp)
50084c61fe1SJakub Kicinski {
50184c61fe1SJakub Kicinski 	int sz, inq;
50284c61fe1SJakub Kicinski 
50384c61fe1SJakub Kicinski 	inq = tcp_inq(strp->sk);
50484c61fe1SJakub Kicinski 	if (inq < 1)
50584c61fe1SJakub Kicinski 		return 0;
50684c61fe1SJakub Kicinski 
50784c61fe1SJakub Kicinski 	if (unlikely(strp->copy_mode))
50884c61fe1SJakub Kicinski 		return tls_strp_read_copyin(strp);
50984c61fe1SJakub Kicinski 
51084c61fe1SJakub Kicinski 	if (inq < strp->stm.full_len)
5110d87bbd3SJakub Kicinski 		return tls_strp_read_copy(strp, true);
51284c61fe1SJakub Kicinski 
51384c61fe1SJakub Kicinski 	if (!strp->stm.full_len) {
51484c61fe1SJakub Kicinski 		tls_strp_load_anchor_with_queue(strp, inq);
51584c61fe1SJakub Kicinski 
51684c61fe1SJakub Kicinski 		sz = tls_rx_msg_size(strp, strp->anchor);
51784c61fe1SJakub Kicinski 		if (sz < 0) {
51884c61fe1SJakub Kicinski 			tls_strp_abort_strp(strp, sz);
51984c61fe1SJakub Kicinski 			return sz;
52084c61fe1SJakub Kicinski 		}
52184c61fe1SJakub Kicinski 
52284c61fe1SJakub Kicinski 		strp->stm.full_len = sz;
52384c61fe1SJakub Kicinski 
52484c61fe1SJakub Kicinski 		if (!strp->stm.full_len || inq < strp->stm.full_len)
5250d87bbd3SJakub Kicinski 			return tls_strp_read_copy(strp, true);
52684c61fe1SJakub Kicinski 	}
52784c61fe1SJakub Kicinski 
52814c4be92SJakub Kicinski 	if (!tls_strp_check_queue_ok(strp))
5290d87bbd3SJakub Kicinski 		return tls_strp_read_copy(strp, false);
5300d87bbd3SJakub Kicinski 
5314e40e624SSabrina Dubroca 	WRITE_ONCE(strp->msg_ready, 1);
53284c61fe1SJakub Kicinski 	tls_rx_msg_ready(strp);
53384c61fe1SJakub Kicinski 
53484c61fe1SJakub Kicinski 	return 0;
53584c61fe1SJakub Kicinski }
53684c61fe1SJakub Kicinski 
tls_strp_check_rcv(struct tls_strparser * strp)53784c61fe1SJakub Kicinski void tls_strp_check_rcv(struct tls_strparser *strp)
53884c61fe1SJakub Kicinski {
53984c61fe1SJakub Kicinski 	if (unlikely(strp->stopped) || strp->msg_ready)
54084c61fe1SJakub Kicinski 		return;
54184c61fe1SJakub Kicinski 
54284c61fe1SJakub Kicinski 	if (tls_strp_read_sock(strp) == -ENOMEM)
54384c61fe1SJakub Kicinski 		queue_work(tls_strp_wq, &strp->work);
54484c61fe1SJakub Kicinski }
54584c61fe1SJakub Kicinski 
54684c61fe1SJakub Kicinski /* Lower sock lock held */
tls_strp_data_ready(struct tls_strparser * strp)54784c61fe1SJakub Kicinski void tls_strp_data_ready(struct tls_strparser *strp)
54884c61fe1SJakub Kicinski {
54984c61fe1SJakub Kicinski 	/* This check is needed to synchronize with do_tls_strp_work.
55084c61fe1SJakub Kicinski 	 * do_tls_strp_work acquires a process lock (lock_sock) whereas
55184c61fe1SJakub Kicinski 	 * the lock held here is bh_lock_sock. The two locks can be
55284c61fe1SJakub Kicinski 	 * held by different threads at the same time, but bh_lock_sock
55384c61fe1SJakub Kicinski 	 * allows a thread in BH context to safely check if the process
55484c61fe1SJakub Kicinski 	 * lock is held. In this case, if the lock is held, queue work.
55584c61fe1SJakub Kicinski 	 */
55684c61fe1SJakub Kicinski 	if (sock_owned_by_user_nocheck(strp->sk)) {
55784c61fe1SJakub Kicinski 		queue_work(tls_strp_wq, &strp->work);
55884c61fe1SJakub Kicinski 		return;
55984c61fe1SJakub Kicinski 	}
56084c61fe1SJakub Kicinski 
56184c61fe1SJakub Kicinski 	tls_strp_check_rcv(strp);
56284c61fe1SJakub Kicinski }
56384c61fe1SJakub Kicinski 
tls_strp_work(struct work_struct * w)56484c61fe1SJakub Kicinski static void tls_strp_work(struct work_struct *w)
56584c61fe1SJakub Kicinski {
56684c61fe1SJakub Kicinski 	struct tls_strparser *strp =
56784c61fe1SJakub Kicinski 		container_of(w, struct tls_strparser, work);
56884c61fe1SJakub Kicinski 
56984c61fe1SJakub Kicinski 	lock_sock(strp->sk);
57084c61fe1SJakub Kicinski 	tls_strp_check_rcv(strp);
57184c61fe1SJakub Kicinski 	release_sock(strp->sk);
57284c61fe1SJakub Kicinski }
57384c61fe1SJakub Kicinski 
tls_strp_msg_done(struct tls_strparser * strp)57484c61fe1SJakub Kicinski void tls_strp_msg_done(struct tls_strparser *strp)
57584c61fe1SJakub Kicinski {
57684c61fe1SJakub Kicinski 	WARN_ON(!strp->stm.full_len);
57784c61fe1SJakub Kicinski 
57884c61fe1SJakub Kicinski 	if (likely(!strp->copy_mode))
57984c61fe1SJakub Kicinski 		tcp_read_done(strp->sk, strp->stm.full_len);
58084c61fe1SJakub Kicinski 	else
58184c61fe1SJakub Kicinski 		tls_strp_flush_anchor_copy(strp);
58284c61fe1SJakub Kicinski 
5834e40e624SSabrina Dubroca 	WRITE_ONCE(strp->msg_ready, 0);
58484c61fe1SJakub Kicinski 	memset(&strp->stm, 0, sizeof(strp->stm));
58584c61fe1SJakub Kicinski 
58684c61fe1SJakub Kicinski 	tls_strp_check_rcv(strp);
58784c61fe1SJakub Kicinski }
58884c61fe1SJakub Kicinski 
tls_strp_stop(struct tls_strparser * strp)58984c61fe1SJakub Kicinski void tls_strp_stop(struct tls_strparser *strp)
59084c61fe1SJakub Kicinski {
59184c61fe1SJakub Kicinski 	strp->stopped = 1;
59284c61fe1SJakub Kicinski }
59384c61fe1SJakub Kicinski 
tls_strp_init(struct tls_strparser * strp,struct sock * sk)59484c61fe1SJakub Kicinski int tls_strp_init(struct tls_strparser *strp, struct sock *sk)
59584c61fe1SJakub Kicinski {
59684c61fe1SJakub Kicinski 	memset(strp, 0, sizeof(*strp));
59784c61fe1SJakub Kicinski 
59884c61fe1SJakub Kicinski 	strp->sk = sk;
59984c61fe1SJakub Kicinski 
60084c61fe1SJakub Kicinski 	strp->anchor = alloc_skb(0, GFP_KERNEL);
60184c61fe1SJakub Kicinski 	if (!strp->anchor)
60284c61fe1SJakub Kicinski 		return -ENOMEM;
60384c61fe1SJakub Kicinski 
60484c61fe1SJakub Kicinski 	INIT_WORK(&strp->work, tls_strp_work);
60584c61fe1SJakub Kicinski 
60684c61fe1SJakub Kicinski 	return 0;
60784c61fe1SJakub Kicinski }
60884c61fe1SJakub Kicinski 
60984c61fe1SJakub Kicinski /* strp must already be stopped so that tls_strp_recv will no longer be called.
61084c61fe1SJakub Kicinski  * Note that tls_strp_done is not called with the lower socket held.
61184c61fe1SJakub Kicinski  */
tls_strp_done(struct tls_strparser * strp)61284c61fe1SJakub Kicinski void tls_strp_done(struct tls_strparser *strp)
61384c61fe1SJakub Kicinski {
61484c61fe1SJakub Kicinski 	WARN_ON(!strp->stopped);
61584c61fe1SJakub Kicinski 
61684c61fe1SJakub Kicinski 	cancel_work_sync(&strp->work);
61784c61fe1SJakub Kicinski 	tls_strp_anchor_free(strp);
61884c61fe1SJakub Kicinski }
61984c61fe1SJakub Kicinski 
tls_strp_dev_init(void)62084c61fe1SJakub Kicinski int __init tls_strp_dev_init(void)
62184c61fe1SJakub Kicinski {
622d11ef9ccSJakub Kicinski 	tls_strp_wq = create_workqueue("tls-strp");
62384c61fe1SJakub Kicinski 	if (unlikely(!tls_strp_wq))
62484c61fe1SJakub Kicinski 		return -ENOMEM;
62584c61fe1SJakub Kicinski 
62684c61fe1SJakub Kicinski 	return 0;
62784c61fe1SJakub Kicinski }
62884c61fe1SJakub Kicinski 
tls_strp_dev_exit(void)62984c61fe1SJakub Kicinski void tls_strp_dev_exit(void)
63084c61fe1SJakub Kicinski {
63184c61fe1SJakub Kicinski 	destroy_workqueue(tls_strp_wq);
63284c61fe1SJakub Kicinski }
633