xref: /openbmc/linux/net/tls/tls_strp.c (revision eca9bfaf)
1c618db2aSJakub Kicinski // SPDX-License-Identifier: GPL-2.0-only
284c61fe1SJakub Kicinski /* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */
3c618db2aSJakub Kicinski 
4c618db2aSJakub Kicinski #include <linux/skbuff.h>
584c61fe1SJakub Kicinski #include <linux/workqueue.h>
684c61fe1SJakub Kicinski #include <net/strparser.h>
784c61fe1SJakub Kicinski #include <net/tcp.h>
884c61fe1SJakub Kicinski #include <net/sock.h>
984c61fe1SJakub Kicinski #include <net/tls.h>
10c618db2aSJakub Kicinski 
11c618db2aSJakub Kicinski #include "tls.h"
12c618db2aSJakub Kicinski 
1384c61fe1SJakub Kicinski static struct workqueue_struct *tls_strp_wq;
14d4e5db64SJakub Kicinski 
1584c61fe1SJakub Kicinski static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
1684c61fe1SJakub Kicinski {
1784c61fe1SJakub Kicinski 	if (strp->stopped)
1884c61fe1SJakub Kicinski 		return;
1984c61fe1SJakub Kicinski 
2084c61fe1SJakub Kicinski 	strp->stopped = 1;
2184c61fe1SJakub Kicinski 
2284c61fe1SJakub Kicinski 	/* Report an error on the lower socket */
2384c61fe1SJakub Kicinski 	strp->sk->sk_err = -err;
2484c61fe1SJakub Kicinski 	sk_error_report(strp->sk);
2584c61fe1SJakub Kicinski }
2684c61fe1SJakub Kicinski 
2784c61fe1SJakub Kicinski static void tls_strp_anchor_free(struct tls_strparser *strp)
2884c61fe1SJakub Kicinski {
2984c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
3084c61fe1SJakub Kicinski 
3184c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
32*eca9bfafSJakub Kicinski 	if (!strp->copy_mode)
3384c61fe1SJakub Kicinski 		shinfo->frag_list = NULL;
3484c61fe1SJakub Kicinski 	consume_skb(strp->anchor);
3584c61fe1SJakub Kicinski 	strp->anchor = NULL;
3684c61fe1SJakub Kicinski }
3784c61fe1SJakub Kicinski 
38c1c607b1SJakub Kicinski static struct sk_buff *
39c1c607b1SJakub Kicinski tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb,
40c1c607b1SJakub Kicinski 		  int offset, int len)
4184c61fe1SJakub Kicinski {
4284c61fe1SJakub Kicinski 	struct sk_buff *skb;
43c1c607b1SJakub Kicinski 	int i, err;
4484c61fe1SJakub Kicinski 
45c1c607b1SJakub Kicinski 	skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER,
4684c61fe1SJakub Kicinski 				   &err, strp->sk->sk_allocation);
4784c61fe1SJakub Kicinski 	if (!skb)
4884c61fe1SJakub Kicinski 		return NULL;
4984c61fe1SJakub Kicinski 
5084c61fe1SJakub Kicinski 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5184c61fe1SJakub Kicinski 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5284c61fe1SJakub Kicinski 
53c1c607b1SJakub Kicinski 		WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
5484c61fe1SJakub Kicinski 					   skb_frag_address(frag),
5584c61fe1SJakub Kicinski 					   skb_frag_size(frag)));
5684c61fe1SJakub Kicinski 		offset += skb_frag_size(frag);
5784c61fe1SJakub Kicinski 	}
5884c61fe1SJakub Kicinski 
59c1c607b1SJakub Kicinski 	skb->len = len;
60c1c607b1SJakub Kicinski 	skb->data_len = len;
61c1c607b1SJakub Kicinski 	skb_copy_header(skb, in_skb);
62c1c607b1SJakub Kicinski 	return skb;
63c1c607b1SJakub Kicinski }
64c1c607b1SJakub Kicinski 
65c1c607b1SJakub Kicinski /* Create a new skb with the contents of input copied to its page frags */
66c1c607b1SJakub Kicinski static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
67c1c607b1SJakub Kicinski {
68c1c607b1SJakub Kicinski 	struct strp_msg *rxm;
69c1c607b1SJakub Kicinski 	struct sk_buff *skb;
70c1c607b1SJakub Kicinski 
71c1c607b1SJakub Kicinski 	skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
72c1c607b1SJakub Kicinski 				strp->stm.full_len);
73c1c607b1SJakub Kicinski 	if (!skb)
74c1c607b1SJakub Kicinski 		return NULL;
75c1c607b1SJakub Kicinski 
7684c61fe1SJakub Kicinski 	rxm = strp_msg(skb);
7784c61fe1SJakub Kicinski 	rxm->offset = 0;
78d4e5db64SJakub Kicinski 	return skb;
79d4e5db64SJakub Kicinski }
80d4e5db64SJakub Kicinski 
8184c61fe1SJakub Kicinski /* Steal the input skb, input msg is invalid after calling this function */
8284c61fe1SJakub Kicinski struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx)
8384c61fe1SJakub Kicinski {
8484c61fe1SJakub Kicinski 	struct tls_strparser *strp = &ctx->strp;
8584c61fe1SJakub Kicinski 
8684c61fe1SJakub Kicinski #ifdef CONFIG_TLS_DEVICE
8784c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted);
8884c61fe1SJakub Kicinski #else
8984c61fe1SJakub Kicinski 	/* This function turns an input into an output,
9084c61fe1SJakub Kicinski 	 * that can only happen if we have offload.
9184c61fe1SJakub Kicinski 	 */
9284c61fe1SJakub Kicinski 	WARN_ON(1);
9384c61fe1SJakub Kicinski #endif
9484c61fe1SJakub Kicinski 
9584c61fe1SJakub Kicinski 	if (strp->copy_mode) {
9684c61fe1SJakub Kicinski 		struct sk_buff *skb;
9784c61fe1SJakub Kicinski 
9884c61fe1SJakub Kicinski 		/* Replace anchor with an empty skb, this is a little
9984c61fe1SJakub Kicinski 		 * dangerous but __tls_cur_msg() warns on empty skbs
10084c61fe1SJakub Kicinski 		 * so hopefully we'll catch abuses.
10184c61fe1SJakub Kicinski 		 */
10284c61fe1SJakub Kicinski 		skb = alloc_skb(0, strp->sk->sk_allocation);
10384c61fe1SJakub Kicinski 		if (!skb)
10484c61fe1SJakub Kicinski 			return NULL;
10584c61fe1SJakub Kicinski 
10684c61fe1SJakub Kicinski 		swap(strp->anchor, skb);
10784c61fe1SJakub Kicinski 		return skb;
10884c61fe1SJakub Kicinski 	}
10984c61fe1SJakub Kicinski 
11084c61fe1SJakub Kicinski 	return tls_strp_msg_make_copy(strp);
11184c61fe1SJakub Kicinski }
11284c61fe1SJakub Kicinski 
11384c61fe1SJakub Kicinski /* Force the input skb to be in copy mode. The data ownership remains
11484c61fe1SJakub Kicinski  * with the input skb itself (meaning unpause will wipe it) but it can
11584c61fe1SJakub Kicinski  * be modified.
11684c61fe1SJakub Kicinski  */
1178b3c59a7SJakub Kicinski int tls_strp_msg_cow(struct tls_sw_context_rx *ctx)
1188b3c59a7SJakub Kicinski {
11984c61fe1SJakub Kicinski 	struct tls_strparser *strp = &ctx->strp;
12084c61fe1SJakub Kicinski 	struct sk_buff *skb;
1218b3c59a7SJakub Kicinski 
12284c61fe1SJakub Kicinski 	if (strp->copy_mode)
12384c61fe1SJakub Kicinski 		return 0;
12484c61fe1SJakub Kicinski 
12584c61fe1SJakub Kicinski 	skb = tls_strp_msg_make_copy(strp);
12684c61fe1SJakub Kicinski 	if (!skb)
12784c61fe1SJakub Kicinski 		return -ENOMEM;
12884c61fe1SJakub Kicinski 
12984c61fe1SJakub Kicinski 	tls_strp_anchor_free(strp);
13084c61fe1SJakub Kicinski 	strp->anchor = skb;
13184c61fe1SJakub Kicinski 
13284c61fe1SJakub Kicinski 	tcp_read_done(strp->sk, strp->stm.full_len);
13384c61fe1SJakub Kicinski 	strp->copy_mode = 1;
13484c61fe1SJakub Kicinski 
1358b3c59a7SJakub Kicinski 	return 0;
1368b3c59a7SJakub Kicinski }
1378b3c59a7SJakub Kicinski 
13884c61fe1SJakub Kicinski /* Make a clone (in the skb sense) of the input msg to keep a reference
13984c61fe1SJakub Kicinski  * to the underlying data. The reference-holding skbs get placed on
14084c61fe1SJakub Kicinski  * @dst.
14184c61fe1SJakub Kicinski  */
14284c61fe1SJakub Kicinski int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst)
143c618db2aSJakub Kicinski {
14484c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
145c618db2aSJakub Kicinski 
14684c61fe1SJakub Kicinski 	if (strp->copy_mode) {
14784c61fe1SJakub Kicinski 		struct sk_buff *skb;
14884c61fe1SJakub Kicinski 
14984c61fe1SJakub Kicinski 		WARN_ON_ONCE(!shinfo->nr_frags);
15084c61fe1SJakub Kicinski 
15184c61fe1SJakub Kicinski 		/* We can't skb_clone() the anchor, it gets wiped by unpause */
15284c61fe1SJakub Kicinski 		skb = alloc_skb(0, strp->sk->sk_allocation);
15384c61fe1SJakub Kicinski 		if (!skb)
15484c61fe1SJakub Kicinski 			return -ENOMEM;
15584c61fe1SJakub Kicinski 
15684c61fe1SJakub Kicinski 		__skb_queue_tail(dst, strp->anchor);
15784c61fe1SJakub Kicinski 		strp->anchor = skb;
15884c61fe1SJakub Kicinski 	} else {
15984c61fe1SJakub Kicinski 		struct sk_buff *iter, *clone;
16084c61fe1SJakub Kicinski 		int chunk, len, offset;
16184c61fe1SJakub Kicinski 
16284c61fe1SJakub Kicinski 		offset = strp->stm.offset;
16384c61fe1SJakub Kicinski 		len = strp->stm.full_len;
16484c61fe1SJakub Kicinski 		iter = shinfo->frag_list;
16584c61fe1SJakub Kicinski 
16684c61fe1SJakub Kicinski 		while (len > 0) {
16784c61fe1SJakub Kicinski 			if (iter->len <= offset) {
16884c61fe1SJakub Kicinski 				offset -= iter->len;
16984c61fe1SJakub Kicinski 				goto next;
17084c61fe1SJakub Kicinski 			}
17184c61fe1SJakub Kicinski 
17284c61fe1SJakub Kicinski 			chunk = iter->len - offset;
17384c61fe1SJakub Kicinski 			offset = 0;
17484c61fe1SJakub Kicinski 
17584c61fe1SJakub Kicinski 			clone = skb_clone(iter, strp->sk->sk_allocation);
176c618db2aSJakub Kicinski 			if (!clone)
177c618db2aSJakub Kicinski 				return -ENOMEM;
178c618db2aSJakub Kicinski 			__skb_queue_tail(dst, clone);
17984c61fe1SJakub Kicinski 
18084c61fe1SJakub Kicinski 			len -= chunk;
18184c61fe1SJakub Kicinski next:
18284c61fe1SJakub Kicinski 			iter = iter->next;
18384c61fe1SJakub Kicinski 		}
18484c61fe1SJakub Kicinski 	}
18584c61fe1SJakub Kicinski 
186c618db2aSJakub Kicinski 	return 0;
187c618db2aSJakub Kicinski }
18884c61fe1SJakub Kicinski 
18984c61fe1SJakub Kicinski static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
19084c61fe1SJakub Kicinski {
19184c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
19284c61fe1SJakub Kicinski 	int i;
19384c61fe1SJakub Kicinski 
19484c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
19584c61fe1SJakub Kicinski 
19684c61fe1SJakub Kicinski 	for (i = 0; i < shinfo->nr_frags; i++)
19784c61fe1SJakub Kicinski 		__skb_frag_unref(&shinfo->frags[i], false);
19884c61fe1SJakub Kicinski 	shinfo->nr_frags = 0;
199*eca9bfafSJakub Kicinski 	if (strp->copy_mode) {
200*eca9bfafSJakub Kicinski 		kfree_skb_list(shinfo->frag_list);
201*eca9bfafSJakub Kicinski 		shinfo->frag_list = NULL;
202*eca9bfafSJakub Kicinski 	}
20384c61fe1SJakub Kicinski 	strp->copy_mode = 0;
204*eca9bfafSJakub Kicinski 	strp->mixed_decrypted = 0;
20584c61fe1SJakub Kicinski }
20684c61fe1SJakub Kicinski 
207*eca9bfafSJakub Kicinski static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
208*eca9bfafSJakub Kicinski 				struct sk_buff *in_skb, unsigned int offset,
209*eca9bfafSJakub Kicinski 				size_t in_len)
21084c61fe1SJakub Kicinski {
2118fd1e151SYang Li 	size_t len, chunk;
212*eca9bfafSJakub Kicinski 	skb_frag_t *frag;
2138fd1e151SYang Li 	int sz;
21484c61fe1SJakub Kicinski 
21584c61fe1SJakub Kicinski 	frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
21684c61fe1SJakub Kicinski 
21784c61fe1SJakub Kicinski 	len = in_len;
21884c61fe1SJakub Kicinski 	/* First make sure we got the header */
21984c61fe1SJakub Kicinski 	if (!strp->stm.full_len) {
22084c61fe1SJakub Kicinski 		/* Assume one page is more than enough for headers */
22184c61fe1SJakub Kicinski 		chunk =	min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag));
22284c61fe1SJakub Kicinski 		WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
22384c61fe1SJakub Kicinski 					   skb_frag_address(frag) +
22484c61fe1SJakub Kicinski 					   skb_frag_size(frag),
22584c61fe1SJakub Kicinski 					   chunk));
22684c61fe1SJakub Kicinski 
2278b0c0dc9SJakub Kicinski 		skb->len += chunk;
2288b0c0dc9SJakub Kicinski 		skb->data_len += chunk;
2298b0c0dc9SJakub Kicinski 		skb_frag_size_add(frag, chunk);
2308b0c0dc9SJakub Kicinski 
2318b0c0dc9SJakub Kicinski 		sz = tls_rx_msg_size(strp, skb);
232*eca9bfafSJakub Kicinski 		if (sz < 0)
233*eca9bfafSJakub Kicinski 			return sz;
23484c61fe1SJakub Kicinski 
23584c61fe1SJakub Kicinski 		/* We may have over-read, sz == 0 is guaranteed under-read */
2368b0c0dc9SJakub Kicinski 		if (unlikely(sz && sz < skb->len)) {
2378b0c0dc9SJakub Kicinski 			int over = skb->len - sz;
23884c61fe1SJakub Kicinski 
2398b0c0dc9SJakub Kicinski 			WARN_ON_ONCE(over > chunk);
2408b0c0dc9SJakub Kicinski 			skb->len -= over;
2418b0c0dc9SJakub Kicinski 			skb->data_len -= over;
2428b0c0dc9SJakub Kicinski 			skb_frag_size_add(frag, -over);
2438b0c0dc9SJakub Kicinski 
2448b0c0dc9SJakub Kicinski 			chunk -= over;
2458b0c0dc9SJakub Kicinski 		}
2468b0c0dc9SJakub Kicinski 
24784c61fe1SJakub Kicinski 		frag++;
24884c61fe1SJakub Kicinski 		len -= chunk;
24984c61fe1SJakub Kicinski 		offset += chunk;
25084c61fe1SJakub Kicinski 
25184c61fe1SJakub Kicinski 		strp->stm.full_len = sz;
25284c61fe1SJakub Kicinski 		if (!strp->stm.full_len)
25384c61fe1SJakub Kicinski 			goto read_done;
25484c61fe1SJakub Kicinski 	}
25584c61fe1SJakub Kicinski 
25684c61fe1SJakub Kicinski 	/* Load up more data */
25784c61fe1SJakub Kicinski 	while (len && strp->stm.full_len > skb->len) {
25884c61fe1SJakub Kicinski 		chunk =	min_t(size_t, len, strp->stm.full_len - skb->len);
25984c61fe1SJakub Kicinski 		chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag));
26084c61fe1SJakub Kicinski 		WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
26184c61fe1SJakub Kicinski 					   skb_frag_address(frag) +
26284c61fe1SJakub Kicinski 					   skb_frag_size(frag),
26384c61fe1SJakub Kicinski 					   chunk));
26484c61fe1SJakub Kicinski 
26584c61fe1SJakub Kicinski 		skb->len += chunk;
26684c61fe1SJakub Kicinski 		skb->data_len += chunk;
26784c61fe1SJakub Kicinski 		skb_frag_size_add(frag, chunk);
26884c61fe1SJakub Kicinski 		frag++;
26984c61fe1SJakub Kicinski 		len -= chunk;
27084c61fe1SJakub Kicinski 		offset += chunk;
27184c61fe1SJakub Kicinski 	}
27284c61fe1SJakub Kicinski 
273*eca9bfafSJakub Kicinski read_done:
274*eca9bfafSJakub Kicinski 	return in_len - len;
275*eca9bfafSJakub Kicinski }
276*eca9bfafSJakub Kicinski 
277*eca9bfafSJakub Kicinski static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
278*eca9bfafSJakub Kicinski 			       struct sk_buff *in_skb, unsigned int offset,
279*eca9bfafSJakub Kicinski 			       size_t in_len)
280*eca9bfafSJakub Kicinski {
281*eca9bfafSJakub Kicinski 	struct sk_buff *nskb, *first, *last;
282*eca9bfafSJakub Kicinski 	struct skb_shared_info *shinfo;
283*eca9bfafSJakub Kicinski 	size_t chunk;
284*eca9bfafSJakub Kicinski 	int sz;
285*eca9bfafSJakub Kicinski 
286*eca9bfafSJakub Kicinski 	if (strp->stm.full_len)
287*eca9bfafSJakub Kicinski 		chunk = strp->stm.full_len - skb->len;
288*eca9bfafSJakub Kicinski 	else
289*eca9bfafSJakub Kicinski 		chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
290*eca9bfafSJakub Kicinski 	chunk = min(chunk, in_len);
291*eca9bfafSJakub Kicinski 
292*eca9bfafSJakub Kicinski 	nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
293*eca9bfafSJakub Kicinski 	if (!nskb)
294*eca9bfafSJakub Kicinski 		return -ENOMEM;
295*eca9bfafSJakub Kicinski 
296*eca9bfafSJakub Kicinski 	shinfo = skb_shinfo(skb);
297*eca9bfafSJakub Kicinski 	if (!shinfo->frag_list) {
298*eca9bfafSJakub Kicinski 		shinfo->frag_list = nskb;
299*eca9bfafSJakub Kicinski 		nskb->prev = nskb;
300*eca9bfafSJakub Kicinski 	} else {
301*eca9bfafSJakub Kicinski 		first = shinfo->frag_list;
302*eca9bfafSJakub Kicinski 		last = first->prev;
303*eca9bfafSJakub Kicinski 		last->next = nskb;
304*eca9bfafSJakub Kicinski 		first->prev = nskb;
305*eca9bfafSJakub Kicinski 	}
306*eca9bfafSJakub Kicinski 
307*eca9bfafSJakub Kicinski 	skb->len += chunk;
308*eca9bfafSJakub Kicinski 	skb->data_len += chunk;
309*eca9bfafSJakub Kicinski 
310*eca9bfafSJakub Kicinski 	if (!strp->stm.full_len) {
311*eca9bfafSJakub Kicinski 		sz = tls_rx_msg_size(strp, skb);
312*eca9bfafSJakub Kicinski 		if (sz < 0)
313*eca9bfafSJakub Kicinski 			return sz;
314*eca9bfafSJakub Kicinski 
315*eca9bfafSJakub Kicinski 		/* We may have over-read, sz == 0 is guaranteed under-read */
316*eca9bfafSJakub Kicinski 		if (unlikely(sz && sz < skb->len)) {
317*eca9bfafSJakub Kicinski 			int over = skb->len - sz;
318*eca9bfafSJakub Kicinski 
319*eca9bfafSJakub Kicinski 			WARN_ON_ONCE(over > chunk);
320*eca9bfafSJakub Kicinski 			skb->len -= over;
321*eca9bfafSJakub Kicinski 			skb->data_len -= over;
322*eca9bfafSJakub Kicinski 			__pskb_trim(nskb, nskb->len - over);
323*eca9bfafSJakub Kicinski 
324*eca9bfafSJakub Kicinski 			chunk -= over;
325*eca9bfafSJakub Kicinski 		}
326*eca9bfafSJakub Kicinski 
327*eca9bfafSJakub Kicinski 		strp->stm.full_len = sz;
328*eca9bfafSJakub Kicinski 	}
329*eca9bfafSJakub Kicinski 
330*eca9bfafSJakub Kicinski 	return chunk;
331*eca9bfafSJakub Kicinski }
332*eca9bfafSJakub Kicinski 
333*eca9bfafSJakub Kicinski static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
334*eca9bfafSJakub Kicinski 			   unsigned int offset, size_t in_len)
335*eca9bfafSJakub Kicinski {
336*eca9bfafSJakub Kicinski 	struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
337*eca9bfafSJakub Kicinski 	struct sk_buff *skb;
338*eca9bfafSJakub Kicinski 	int ret;
339*eca9bfafSJakub Kicinski 
340*eca9bfafSJakub Kicinski 	if (strp->msg_ready)
341*eca9bfafSJakub Kicinski 		return 0;
342*eca9bfafSJakub Kicinski 
343*eca9bfafSJakub Kicinski 	skb = strp->anchor;
344*eca9bfafSJakub Kicinski 	if (!skb->len)
345*eca9bfafSJakub Kicinski 		skb_copy_decrypted(skb, in_skb);
346*eca9bfafSJakub Kicinski 	else
347*eca9bfafSJakub Kicinski 		strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
348*eca9bfafSJakub Kicinski 
349*eca9bfafSJakub Kicinski 	if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
350*eca9bfafSJakub Kicinski 		ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
351*eca9bfafSJakub Kicinski 	else
352*eca9bfafSJakub Kicinski 		ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
353*eca9bfafSJakub Kicinski 	if (ret < 0) {
354*eca9bfafSJakub Kicinski 		desc->error = ret;
355*eca9bfafSJakub Kicinski 		ret = 0;
356*eca9bfafSJakub Kicinski 	}
357*eca9bfafSJakub Kicinski 
358*eca9bfafSJakub Kicinski 	if (strp->stm.full_len && strp->stm.full_len == skb->len) {
35984c61fe1SJakub Kicinski 		desc->count = 0;
36084c61fe1SJakub Kicinski 
36184c61fe1SJakub Kicinski 		strp->msg_ready = 1;
36284c61fe1SJakub Kicinski 		tls_rx_msg_ready(strp);
36384c61fe1SJakub Kicinski 	}
36484c61fe1SJakub Kicinski 
365*eca9bfafSJakub Kicinski 	return ret;
36684c61fe1SJakub Kicinski }
36784c61fe1SJakub Kicinski 
36884c61fe1SJakub Kicinski static int tls_strp_read_copyin(struct tls_strparser *strp)
36984c61fe1SJakub Kicinski {
37084c61fe1SJakub Kicinski 	struct socket *sock = strp->sk->sk_socket;
37184c61fe1SJakub Kicinski 	read_descriptor_t desc;
37284c61fe1SJakub Kicinski 
37384c61fe1SJakub Kicinski 	desc.arg.data = strp;
37484c61fe1SJakub Kicinski 	desc.error = 0;
37584c61fe1SJakub Kicinski 	desc.count = 1; /* give more than one skb per call */
37684c61fe1SJakub Kicinski 
37784c61fe1SJakub Kicinski 	/* sk should be locked here, so okay to do read_sock */
37884c61fe1SJakub Kicinski 	sock->ops->read_sock(strp->sk, &desc, tls_strp_copyin);
37984c61fe1SJakub Kicinski 
38084c61fe1SJakub Kicinski 	return desc.error;
38184c61fe1SJakub Kicinski }
38284c61fe1SJakub Kicinski 
3830d87bbd3SJakub Kicinski static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
38484c61fe1SJakub Kicinski {
38584c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo;
38684c61fe1SJakub Kicinski 	struct page *page;
38784c61fe1SJakub Kicinski 	int need_spc, len;
38884c61fe1SJakub Kicinski 
38984c61fe1SJakub Kicinski 	/* If the rbuf is small or rcv window has collapsed to 0 we need
39084c61fe1SJakub Kicinski 	 * to read the data out. Otherwise the connection will stall.
39184c61fe1SJakub Kicinski 	 * Without pressure threshold of INT_MAX will never be ready.
39284c61fe1SJakub Kicinski 	 */
3930d87bbd3SJakub Kicinski 	if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX)))
39484c61fe1SJakub Kicinski 		return 0;
39584c61fe1SJakub Kicinski 
39684c61fe1SJakub Kicinski 	shinfo = skb_shinfo(strp->anchor);
39784c61fe1SJakub Kicinski 	shinfo->frag_list = NULL;
39884c61fe1SJakub Kicinski 
39984c61fe1SJakub Kicinski 	/* If we don't know the length go max plus page for cipher overhead */
40084c61fe1SJakub Kicinski 	need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
40184c61fe1SJakub Kicinski 
40284c61fe1SJakub Kicinski 	for (len = need_spc; len > 0; len -= PAGE_SIZE) {
40384c61fe1SJakub Kicinski 		page = alloc_page(strp->sk->sk_allocation);
40484c61fe1SJakub Kicinski 		if (!page) {
40584c61fe1SJakub Kicinski 			tls_strp_flush_anchor_copy(strp);
40684c61fe1SJakub Kicinski 			return -ENOMEM;
40784c61fe1SJakub Kicinski 		}
40884c61fe1SJakub Kicinski 
40984c61fe1SJakub Kicinski 		skb_fill_page_desc(strp->anchor, shinfo->nr_frags++,
41084c61fe1SJakub Kicinski 				   page, 0, 0);
41184c61fe1SJakub Kicinski 	}
41284c61fe1SJakub Kicinski 
41384c61fe1SJakub Kicinski 	strp->copy_mode = 1;
41484c61fe1SJakub Kicinski 	strp->stm.offset = 0;
41584c61fe1SJakub Kicinski 
41684c61fe1SJakub Kicinski 	strp->anchor->len = 0;
41784c61fe1SJakub Kicinski 	strp->anchor->data_len = 0;
41884c61fe1SJakub Kicinski 	strp->anchor->truesize = round_up(need_spc, PAGE_SIZE);
41984c61fe1SJakub Kicinski 
42084c61fe1SJakub Kicinski 	tls_strp_read_copyin(strp);
42184c61fe1SJakub Kicinski 
42284c61fe1SJakub Kicinski 	return 0;
42384c61fe1SJakub Kicinski }
42484c61fe1SJakub Kicinski 
42514c4be92SJakub Kicinski static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
4260d87bbd3SJakub Kicinski {
4270d87bbd3SJakub Kicinski 	unsigned int len = strp->stm.offset + strp->stm.full_len;
42814c4be92SJakub Kicinski 	struct sk_buff *first, *skb;
4290d87bbd3SJakub Kicinski 	u32 seq;
4300d87bbd3SJakub Kicinski 
43114c4be92SJakub Kicinski 	first = skb_shinfo(strp->anchor)->frag_list;
43214c4be92SJakub Kicinski 	skb = first;
43314c4be92SJakub Kicinski 	seq = TCP_SKB_CB(first)->seq;
4340d87bbd3SJakub Kicinski 
43514c4be92SJakub Kicinski 	/* Make sure there's no duplicate data in the queue,
43614c4be92SJakub Kicinski 	 * and the decrypted status matches.
43714c4be92SJakub Kicinski 	 */
4380d87bbd3SJakub Kicinski 	while (skb->len < len) {
4390d87bbd3SJakub Kicinski 		seq += skb->len;
4400d87bbd3SJakub Kicinski 		len -= skb->len;
4410d87bbd3SJakub Kicinski 		skb = skb->next;
4420d87bbd3SJakub Kicinski 
4430d87bbd3SJakub Kicinski 		if (TCP_SKB_CB(skb)->seq != seq)
4440d87bbd3SJakub Kicinski 			return false;
44514c4be92SJakub Kicinski 		if (skb_cmp_decrypted(first, skb))
44614c4be92SJakub Kicinski 			return false;
4470d87bbd3SJakub Kicinski 	}
4480d87bbd3SJakub Kicinski 
4490d87bbd3SJakub Kicinski 	return true;
4500d87bbd3SJakub Kicinski }
4510d87bbd3SJakub Kicinski 
45284c61fe1SJakub Kicinski static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
45384c61fe1SJakub Kicinski {
45484c61fe1SJakub Kicinski 	struct tcp_sock *tp = tcp_sk(strp->sk);
45584c61fe1SJakub Kicinski 	struct sk_buff *first;
45684c61fe1SJakub Kicinski 	u32 offset;
45784c61fe1SJakub Kicinski 
45884c61fe1SJakub Kicinski 	first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset);
45984c61fe1SJakub Kicinski 	if (WARN_ON_ONCE(!first))
46084c61fe1SJakub Kicinski 		return;
46184c61fe1SJakub Kicinski 
46284c61fe1SJakub Kicinski 	/* Bestow the state onto the anchor */
46384c61fe1SJakub Kicinski 	strp->anchor->len = offset + len;
46484c61fe1SJakub Kicinski 	strp->anchor->data_len = offset + len;
46584c61fe1SJakub Kicinski 	strp->anchor->truesize = offset + len;
46684c61fe1SJakub Kicinski 
46784c61fe1SJakub Kicinski 	skb_shinfo(strp->anchor)->frag_list = first;
46884c61fe1SJakub Kicinski 
46984c61fe1SJakub Kicinski 	skb_copy_header(strp->anchor, first);
47084c61fe1SJakub Kicinski 	strp->anchor->destructor = NULL;
47184c61fe1SJakub Kicinski 
47284c61fe1SJakub Kicinski 	strp->stm.offset = offset;
47384c61fe1SJakub Kicinski }
47484c61fe1SJakub Kicinski 
47584c61fe1SJakub Kicinski void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
47684c61fe1SJakub Kicinski {
47784c61fe1SJakub Kicinski 	struct strp_msg *rxm;
47884c61fe1SJakub Kicinski 	struct tls_msg *tlm;
47984c61fe1SJakub Kicinski 
48084c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready);
48184c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
48284c61fe1SJakub Kicinski 
48384c61fe1SJakub Kicinski 	if (!strp->copy_mode && force_refresh) {
48484c61fe1SJakub Kicinski 		if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
48584c61fe1SJakub Kicinski 			return;
48684c61fe1SJakub Kicinski 
48784c61fe1SJakub Kicinski 		tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
48884c61fe1SJakub Kicinski 	}
48984c61fe1SJakub Kicinski 
49084c61fe1SJakub Kicinski 	rxm = strp_msg(strp->anchor);
49184c61fe1SJakub Kicinski 	rxm->full_len	= strp->stm.full_len;
49284c61fe1SJakub Kicinski 	rxm->offset	= strp->stm.offset;
49384c61fe1SJakub Kicinski 	tlm = tls_msg(strp->anchor);
49484c61fe1SJakub Kicinski 	tlm->control	= strp->mark;
49584c61fe1SJakub Kicinski }
49684c61fe1SJakub Kicinski 
49784c61fe1SJakub Kicinski /* Called with lock held on lower socket */
49884c61fe1SJakub Kicinski static int tls_strp_read_sock(struct tls_strparser *strp)
49984c61fe1SJakub Kicinski {
50084c61fe1SJakub Kicinski 	int sz, inq;
50184c61fe1SJakub Kicinski 
50284c61fe1SJakub Kicinski 	inq = tcp_inq(strp->sk);
50384c61fe1SJakub Kicinski 	if (inq < 1)
50484c61fe1SJakub Kicinski 		return 0;
50584c61fe1SJakub Kicinski 
50684c61fe1SJakub Kicinski 	if (unlikely(strp->copy_mode))
50784c61fe1SJakub Kicinski 		return tls_strp_read_copyin(strp);
50884c61fe1SJakub Kicinski 
50984c61fe1SJakub Kicinski 	if (inq < strp->stm.full_len)
5100d87bbd3SJakub Kicinski 		return tls_strp_read_copy(strp, true);
51184c61fe1SJakub Kicinski 
51284c61fe1SJakub Kicinski 	if (!strp->stm.full_len) {
51384c61fe1SJakub Kicinski 		tls_strp_load_anchor_with_queue(strp, inq);
51484c61fe1SJakub Kicinski 
51584c61fe1SJakub Kicinski 		sz = tls_rx_msg_size(strp, strp->anchor);
51684c61fe1SJakub Kicinski 		if (sz < 0) {
51784c61fe1SJakub Kicinski 			tls_strp_abort_strp(strp, sz);
51884c61fe1SJakub Kicinski 			return sz;
51984c61fe1SJakub Kicinski 		}
52084c61fe1SJakub Kicinski 
52184c61fe1SJakub Kicinski 		strp->stm.full_len = sz;
52284c61fe1SJakub Kicinski 
52384c61fe1SJakub Kicinski 		if (!strp->stm.full_len || inq < strp->stm.full_len)
5240d87bbd3SJakub Kicinski 			return tls_strp_read_copy(strp, true);
52584c61fe1SJakub Kicinski 	}
52684c61fe1SJakub Kicinski 
52714c4be92SJakub Kicinski 	if (!tls_strp_check_queue_ok(strp))
5280d87bbd3SJakub Kicinski 		return tls_strp_read_copy(strp, false);
5290d87bbd3SJakub Kicinski 
53084c61fe1SJakub Kicinski 	strp->msg_ready = 1;
53184c61fe1SJakub Kicinski 	tls_rx_msg_ready(strp);
53284c61fe1SJakub Kicinski 
53384c61fe1SJakub Kicinski 	return 0;
53484c61fe1SJakub Kicinski }
53584c61fe1SJakub Kicinski 
53684c61fe1SJakub Kicinski void tls_strp_check_rcv(struct tls_strparser *strp)
53784c61fe1SJakub Kicinski {
53884c61fe1SJakub Kicinski 	if (unlikely(strp->stopped) || strp->msg_ready)
53984c61fe1SJakub Kicinski 		return;
54084c61fe1SJakub Kicinski 
54184c61fe1SJakub Kicinski 	if (tls_strp_read_sock(strp) == -ENOMEM)
54284c61fe1SJakub Kicinski 		queue_work(tls_strp_wq, &strp->work);
54384c61fe1SJakub Kicinski }
54484c61fe1SJakub Kicinski 
54584c61fe1SJakub Kicinski /* Lower sock lock held */
54684c61fe1SJakub Kicinski void tls_strp_data_ready(struct tls_strparser *strp)
54784c61fe1SJakub Kicinski {
54884c61fe1SJakub Kicinski 	/* This check is needed to synchronize with do_tls_strp_work.
54984c61fe1SJakub Kicinski 	 * do_tls_strp_work acquires a process lock (lock_sock) whereas
55084c61fe1SJakub Kicinski 	 * the lock held here is bh_lock_sock. The two locks can be
55184c61fe1SJakub Kicinski 	 * held by different threads at the same time, but bh_lock_sock
55284c61fe1SJakub Kicinski 	 * allows a thread in BH context to safely check if the process
55384c61fe1SJakub Kicinski 	 * lock is held. In this case, if the lock is held, queue work.
55484c61fe1SJakub Kicinski 	 */
55584c61fe1SJakub Kicinski 	if (sock_owned_by_user_nocheck(strp->sk)) {
55684c61fe1SJakub Kicinski 		queue_work(tls_strp_wq, &strp->work);
55784c61fe1SJakub Kicinski 		return;
55884c61fe1SJakub Kicinski 	}
55984c61fe1SJakub Kicinski 
56084c61fe1SJakub Kicinski 	tls_strp_check_rcv(strp);
56184c61fe1SJakub Kicinski }
56284c61fe1SJakub Kicinski 
56384c61fe1SJakub Kicinski static void tls_strp_work(struct work_struct *w)
56484c61fe1SJakub Kicinski {
56584c61fe1SJakub Kicinski 	struct tls_strparser *strp =
56684c61fe1SJakub Kicinski 		container_of(w, struct tls_strparser, work);
56784c61fe1SJakub Kicinski 
56884c61fe1SJakub Kicinski 	lock_sock(strp->sk);
56984c61fe1SJakub Kicinski 	tls_strp_check_rcv(strp);
57084c61fe1SJakub Kicinski 	release_sock(strp->sk);
57184c61fe1SJakub Kicinski }
57284c61fe1SJakub Kicinski 
57384c61fe1SJakub Kicinski void tls_strp_msg_done(struct tls_strparser *strp)
57484c61fe1SJakub Kicinski {
57584c61fe1SJakub Kicinski 	WARN_ON(!strp->stm.full_len);
57684c61fe1SJakub Kicinski 
57784c61fe1SJakub Kicinski 	if (likely(!strp->copy_mode))
57884c61fe1SJakub Kicinski 		tcp_read_done(strp->sk, strp->stm.full_len);
57984c61fe1SJakub Kicinski 	else
58084c61fe1SJakub Kicinski 		tls_strp_flush_anchor_copy(strp);
58184c61fe1SJakub Kicinski 
58284c61fe1SJakub Kicinski 	strp->msg_ready = 0;
58384c61fe1SJakub Kicinski 	memset(&strp->stm, 0, sizeof(strp->stm));
58484c61fe1SJakub Kicinski 
58584c61fe1SJakub Kicinski 	tls_strp_check_rcv(strp);
58684c61fe1SJakub Kicinski }
58784c61fe1SJakub Kicinski 
58884c61fe1SJakub Kicinski void tls_strp_stop(struct tls_strparser *strp)
58984c61fe1SJakub Kicinski {
59084c61fe1SJakub Kicinski 	strp->stopped = 1;
59184c61fe1SJakub Kicinski }
59284c61fe1SJakub Kicinski 
59384c61fe1SJakub Kicinski int tls_strp_init(struct tls_strparser *strp, struct sock *sk)
59484c61fe1SJakub Kicinski {
59584c61fe1SJakub Kicinski 	memset(strp, 0, sizeof(*strp));
59684c61fe1SJakub Kicinski 
59784c61fe1SJakub Kicinski 	strp->sk = sk;
59884c61fe1SJakub Kicinski 
59984c61fe1SJakub Kicinski 	strp->anchor = alloc_skb(0, GFP_KERNEL);
60084c61fe1SJakub Kicinski 	if (!strp->anchor)
60184c61fe1SJakub Kicinski 		return -ENOMEM;
60284c61fe1SJakub Kicinski 
60384c61fe1SJakub Kicinski 	INIT_WORK(&strp->work, tls_strp_work);
60484c61fe1SJakub Kicinski 
60584c61fe1SJakub Kicinski 	return 0;
60684c61fe1SJakub Kicinski }
60784c61fe1SJakub Kicinski 
60884c61fe1SJakub Kicinski /* strp must already be stopped so that tls_strp_recv will no longer be called.
60984c61fe1SJakub Kicinski  * Note that tls_strp_done is not called with the lower socket held.
61084c61fe1SJakub Kicinski  */
61184c61fe1SJakub Kicinski void tls_strp_done(struct tls_strparser *strp)
61284c61fe1SJakub Kicinski {
61384c61fe1SJakub Kicinski 	WARN_ON(!strp->stopped);
61484c61fe1SJakub Kicinski 
61584c61fe1SJakub Kicinski 	cancel_work_sync(&strp->work);
61684c61fe1SJakub Kicinski 	tls_strp_anchor_free(strp);
61784c61fe1SJakub Kicinski }
61884c61fe1SJakub Kicinski 
61984c61fe1SJakub Kicinski int __init tls_strp_dev_init(void)
62084c61fe1SJakub Kicinski {
621d11ef9ccSJakub Kicinski 	tls_strp_wq = create_workqueue("tls-strp");
62284c61fe1SJakub Kicinski 	if (unlikely(!tls_strp_wq))
62384c61fe1SJakub Kicinski 		return -ENOMEM;
62484c61fe1SJakub Kicinski 
62584c61fe1SJakub Kicinski 	return 0;
62684c61fe1SJakub Kicinski }
62784c61fe1SJakub Kicinski 
62884c61fe1SJakub Kicinski void tls_strp_dev_exit(void)
62984c61fe1SJakub Kicinski {
63084c61fe1SJakub Kicinski 	destroy_workqueue(tls_strp_wq);
63184c61fe1SJakub Kicinski }
632