xref: /openbmc/linux/net/tls/tls_strp.c (revision 84c61fe1)
1c618db2aSJakub Kicinski // SPDX-License-Identifier: GPL-2.0-only
2*84c61fe1SJakub Kicinski /* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */
3c618db2aSJakub Kicinski 
4c618db2aSJakub Kicinski #include <linux/skbuff.h>
5*84c61fe1SJakub Kicinski #include <linux/workqueue.h>
6*84c61fe1SJakub Kicinski #include <net/strparser.h>
7*84c61fe1SJakub Kicinski #include <net/tcp.h>
8*84c61fe1SJakub Kicinski #include <net/sock.h>
9*84c61fe1SJakub Kicinski #include <net/tls.h>
10c618db2aSJakub Kicinski 
11c618db2aSJakub Kicinski #include "tls.h"
12c618db2aSJakub Kicinski 
13*84c61fe1SJakub Kicinski static struct workqueue_struct *tls_strp_wq;
14d4e5db64SJakub Kicinski 
15*84c61fe1SJakub Kicinski static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
16*84c61fe1SJakub Kicinski {
17*84c61fe1SJakub Kicinski 	if (strp->stopped)
18*84c61fe1SJakub Kicinski 		return;
19*84c61fe1SJakub Kicinski 
20*84c61fe1SJakub Kicinski 	strp->stopped = 1;
21*84c61fe1SJakub Kicinski 
22*84c61fe1SJakub Kicinski 	/* Report an error on the lower socket */
23*84c61fe1SJakub Kicinski 	strp->sk->sk_err = -err;
24*84c61fe1SJakub Kicinski 	sk_error_report(strp->sk);
25*84c61fe1SJakub Kicinski }
26*84c61fe1SJakub Kicinski 
27*84c61fe1SJakub Kicinski static void tls_strp_anchor_free(struct tls_strparser *strp)
28*84c61fe1SJakub Kicinski {
29*84c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
30*84c61fe1SJakub Kicinski 
31*84c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
32*84c61fe1SJakub Kicinski 	shinfo->frag_list = NULL;
33*84c61fe1SJakub Kicinski 	consume_skb(strp->anchor);
34*84c61fe1SJakub Kicinski 	strp->anchor = NULL;
35*84c61fe1SJakub Kicinski }
36*84c61fe1SJakub Kicinski 
37*84c61fe1SJakub Kicinski /* Create a new skb with the contents of input copied to its page frags */
38*84c61fe1SJakub Kicinski static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
39*84c61fe1SJakub Kicinski {
40*84c61fe1SJakub Kicinski 	struct strp_msg *rxm;
41*84c61fe1SJakub Kicinski 	struct sk_buff *skb;
42*84c61fe1SJakub Kicinski 	int i, err, offset;
43*84c61fe1SJakub Kicinski 
44*84c61fe1SJakub Kicinski 	skb = alloc_skb_with_frags(0, strp->anchor->len, TLS_PAGE_ORDER,
45*84c61fe1SJakub Kicinski 				   &err, strp->sk->sk_allocation);
46*84c61fe1SJakub Kicinski 	if (!skb)
47*84c61fe1SJakub Kicinski 		return NULL;
48*84c61fe1SJakub Kicinski 
49*84c61fe1SJakub Kicinski 	offset = strp->stm.offset;
50*84c61fe1SJakub Kicinski 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
51*84c61fe1SJakub Kicinski 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
52*84c61fe1SJakub Kicinski 
53*84c61fe1SJakub Kicinski 		WARN_ON_ONCE(skb_copy_bits(strp->anchor, offset,
54*84c61fe1SJakub Kicinski 					   skb_frag_address(frag),
55*84c61fe1SJakub Kicinski 					   skb_frag_size(frag)));
56*84c61fe1SJakub Kicinski 		offset += skb_frag_size(frag);
57*84c61fe1SJakub Kicinski 	}
58*84c61fe1SJakub Kicinski 
59*84c61fe1SJakub Kicinski 	skb_copy_header(skb, strp->anchor);
60*84c61fe1SJakub Kicinski 	rxm = strp_msg(skb);
61*84c61fe1SJakub Kicinski 	rxm->offset = 0;
62d4e5db64SJakub Kicinski 	return skb;
63d4e5db64SJakub Kicinski }
64d4e5db64SJakub Kicinski 
65*84c61fe1SJakub Kicinski /* Steal the input skb, input msg is invalid after calling this function */
66*84c61fe1SJakub Kicinski struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx)
67*84c61fe1SJakub Kicinski {
68*84c61fe1SJakub Kicinski 	struct tls_strparser *strp = &ctx->strp;
69*84c61fe1SJakub Kicinski 
70*84c61fe1SJakub Kicinski #ifdef CONFIG_TLS_DEVICE
71*84c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted);
72*84c61fe1SJakub Kicinski #else
73*84c61fe1SJakub Kicinski 	/* This function turns an input into an output,
74*84c61fe1SJakub Kicinski 	 * that can only happen if we have offload.
75*84c61fe1SJakub Kicinski 	 */
76*84c61fe1SJakub Kicinski 	WARN_ON(1);
77*84c61fe1SJakub Kicinski #endif
78*84c61fe1SJakub Kicinski 
79*84c61fe1SJakub Kicinski 	if (strp->copy_mode) {
80*84c61fe1SJakub Kicinski 		struct sk_buff *skb;
81*84c61fe1SJakub Kicinski 
82*84c61fe1SJakub Kicinski 		/* Replace anchor with an empty skb, this is a little
83*84c61fe1SJakub Kicinski 		 * dangerous but __tls_cur_msg() warns on empty skbs
84*84c61fe1SJakub Kicinski 		 * so hopefully we'll catch abuses.
85*84c61fe1SJakub Kicinski 		 */
86*84c61fe1SJakub Kicinski 		skb = alloc_skb(0, strp->sk->sk_allocation);
87*84c61fe1SJakub Kicinski 		if (!skb)
88*84c61fe1SJakub Kicinski 			return NULL;
89*84c61fe1SJakub Kicinski 
90*84c61fe1SJakub Kicinski 		swap(strp->anchor, skb);
91*84c61fe1SJakub Kicinski 		return skb;
92*84c61fe1SJakub Kicinski 	}
93*84c61fe1SJakub Kicinski 
94*84c61fe1SJakub Kicinski 	return tls_strp_msg_make_copy(strp);
95*84c61fe1SJakub Kicinski }
96*84c61fe1SJakub Kicinski 
97*84c61fe1SJakub Kicinski /* Force the input skb to be in copy mode. The data ownership remains
98*84c61fe1SJakub Kicinski  * with the input skb itself (meaning unpause will wipe it) but it can
99*84c61fe1SJakub Kicinski  * be modified.
100*84c61fe1SJakub Kicinski  */
1018b3c59a7SJakub Kicinski int tls_strp_msg_cow(struct tls_sw_context_rx *ctx)
1028b3c59a7SJakub Kicinski {
103*84c61fe1SJakub Kicinski 	struct tls_strparser *strp = &ctx->strp;
104*84c61fe1SJakub Kicinski 	struct sk_buff *skb;
1058b3c59a7SJakub Kicinski 
106*84c61fe1SJakub Kicinski 	if (strp->copy_mode)
107*84c61fe1SJakub Kicinski 		return 0;
108*84c61fe1SJakub Kicinski 
109*84c61fe1SJakub Kicinski 	skb = tls_strp_msg_make_copy(strp);
110*84c61fe1SJakub Kicinski 	if (!skb)
111*84c61fe1SJakub Kicinski 		return -ENOMEM;
112*84c61fe1SJakub Kicinski 
113*84c61fe1SJakub Kicinski 	tls_strp_anchor_free(strp);
114*84c61fe1SJakub Kicinski 	strp->anchor = skb;
115*84c61fe1SJakub Kicinski 
116*84c61fe1SJakub Kicinski 	tcp_read_done(strp->sk, strp->stm.full_len);
117*84c61fe1SJakub Kicinski 	strp->copy_mode = 1;
118*84c61fe1SJakub Kicinski 
1198b3c59a7SJakub Kicinski 	return 0;
1208b3c59a7SJakub Kicinski }
1218b3c59a7SJakub Kicinski 
122*84c61fe1SJakub Kicinski /* Make a clone (in the skb sense) of the input msg to keep a reference
123*84c61fe1SJakub Kicinski  * to the underlying data. The reference-holding skbs get placed on
124*84c61fe1SJakub Kicinski  * @dst.
125*84c61fe1SJakub Kicinski  */
126*84c61fe1SJakub Kicinski int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst)
127c618db2aSJakub Kicinski {
128*84c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
129c618db2aSJakub Kicinski 
130*84c61fe1SJakub Kicinski 	if (strp->copy_mode) {
131*84c61fe1SJakub Kicinski 		struct sk_buff *skb;
132*84c61fe1SJakub Kicinski 
133*84c61fe1SJakub Kicinski 		WARN_ON_ONCE(!shinfo->nr_frags);
134*84c61fe1SJakub Kicinski 
135*84c61fe1SJakub Kicinski 		/* We can't skb_clone() the anchor, it gets wiped by unpause */
136*84c61fe1SJakub Kicinski 		skb = alloc_skb(0, strp->sk->sk_allocation);
137*84c61fe1SJakub Kicinski 		if (!skb)
138*84c61fe1SJakub Kicinski 			return -ENOMEM;
139*84c61fe1SJakub Kicinski 
140*84c61fe1SJakub Kicinski 		__skb_queue_tail(dst, strp->anchor);
141*84c61fe1SJakub Kicinski 		strp->anchor = skb;
142*84c61fe1SJakub Kicinski 	} else {
143*84c61fe1SJakub Kicinski 		struct sk_buff *iter, *clone;
144*84c61fe1SJakub Kicinski 		int chunk, len, offset;
145*84c61fe1SJakub Kicinski 
146*84c61fe1SJakub Kicinski 		offset = strp->stm.offset;
147*84c61fe1SJakub Kicinski 		len = strp->stm.full_len;
148*84c61fe1SJakub Kicinski 		iter = shinfo->frag_list;
149*84c61fe1SJakub Kicinski 
150*84c61fe1SJakub Kicinski 		while (len > 0) {
151*84c61fe1SJakub Kicinski 			if (iter->len <= offset) {
152*84c61fe1SJakub Kicinski 				offset -= iter->len;
153*84c61fe1SJakub Kicinski 				goto next;
154*84c61fe1SJakub Kicinski 			}
155*84c61fe1SJakub Kicinski 
156*84c61fe1SJakub Kicinski 			chunk = iter->len - offset;
157*84c61fe1SJakub Kicinski 			offset = 0;
158*84c61fe1SJakub Kicinski 
159*84c61fe1SJakub Kicinski 			clone = skb_clone(iter, strp->sk->sk_allocation);
160c618db2aSJakub Kicinski 			if (!clone)
161c618db2aSJakub Kicinski 				return -ENOMEM;
162c618db2aSJakub Kicinski 			__skb_queue_tail(dst, clone);
163*84c61fe1SJakub Kicinski 
164*84c61fe1SJakub Kicinski 			len -= chunk;
165*84c61fe1SJakub Kicinski next:
166*84c61fe1SJakub Kicinski 			iter = iter->next;
167*84c61fe1SJakub Kicinski 		}
168*84c61fe1SJakub Kicinski 	}
169*84c61fe1SJakub Kicinski 
170c618db2aSJakub Kicinski 	return 0;
171c618db2aSJakub Kicinski }
172*84c61fe1SJakub Kicinski 
173*84c61fe1SJakub Kicinski static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
174*84c61fe1SJakub Kicinski {
175*84c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
176*84c61fe1SJakub Kicinski 	int i;
177*84c61fe1SJakub Kicinski 
178*84c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
179*84c61fe1SJakub Kicinski 
180*84c61fe1SJakub Kicinski 	for (i = 0; i < shinfo->nr_frags; i++)
181*84c61fe1SJakub Kicinski 		__skb_frag_unref(&shinfo->frags[i], false);
182*84c61fe1SJakub Kicinski 	shinfo->nr_frags = 0;
183*84c61fe1SJakub Kicinski 	strp->copy_mode = 0;
184*84c61fe1SJakub Kicinski }
185*84c61fe1SJakub Kicinski 
186*84c61fe1SJakub Kicinski static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
187*84c61fe1SJakub Kicinski 			   unsigned int offset, size_t in_len)
188*84c61fe1SJakub Kicinski {
189*84c61fe1SJakub Kicinski 	struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
190*84c61fe1SJakub Kicinski 	size_t sz, len, chunk;
191*84c61fe1SJakub Kicinski 	struct sk_buff *skb;
192*84c61fe1SJakub Kicinski 	skb_frag_t *frag;
193*84c61fe1SJakub Kicinski 
194*84c61fe1SJakub Kicinski 	if (strp->msg_ready)
195*84c61fe1SJakub Kicinski 		return 0;
196*84c61fe1SJakub Kicinski 
197*84c61fe1SJakub Kicinski 	skb = strp->anchor;
198*84c61fe1SJakub Kicinski 	frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
199*84c61fe1SJakub Kicinski 
200*84c61fe1SJakub Kicinski 	len = in_len;
201*84c61fe1SJakub Kicinski 	/* First make sure we got the header */
202*84c61fe1SJakub Kicinski 	if (!strp->stm.full_len) {
203*84c61fe1SJakub Kicinski 		/* Assume one page is more than enough for headers */
204*84c61fe1SJakub Kicinski 		chunk =	min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag));
205*84c61fe1SJakub Kicinski 		WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
206*84c61fe1SJakub Kicinski 					   skb_frag_address(frag) +
207*84c61fe1SJakub Kicinski 					   skb_frag_size(frag),
208*84c61fe1SJakub Kicinski 					   chunk));
209*84c61fe1SJakub Kicinski 
210*84c61fe1SJakub Kicinski 		sz = tls_rx_msg_size(strp, strp->anchor);
211*84c61fe1SJakub Kicinski 		if (sz < 0) {
212*84c61fe1SJakub Kicinski 			desc->error = sz;
213*84c61fe1SJakub Kicinski 			return 0;
214*84c61fe1SJakub Kicinski 		}
215*84c61fe1SJakub Kicinski 
216*84c61fe1SJakub Kicinski 		/* We may have over-read, sz == 0 is guaranteed under-read */
217*84c61fe1SJakub Kicinski 		if (sz > 0)
218*84c61fe1SJakub Kicinski 			chunk =	min_t(size_t, chunk, sz - skb->len);
219*84c61fe1SJakub Kicinski 
220*84c61fe1SJakub Kicinski 		skb->len += chunk;
221*84c61fe1SJakub Kicinski 		skb->data_len += chunk;
222*84c61fe1SJakub Kicinski 		skb_frag_size_add(frag, chunk);
223*84c61fe1SJakub Kicinski 		frag++;
224*84c61fe1SJakub Kicinski 		len -= chunk;
225*84c61fe1SJakub Kicinski 		offset += chunk;
226*84c61fe1SJakub Kicinski 
227*84c61fe1SJakub Kicinski 		strp->stm.full_len = sz;
228*84c61fe1SJakub Kicinski 		if (!strp->stm.full_len)
229*84c61fe1SJakub Kicinski 			goto read_done;
230*84c61fe1SJakub Kicinski 	}
231*84c61fe1SJakub Kicinski 
232*84c61fe1SJakub Kicinski 	/* Load up more data */
233*84c61fe1SJakub Kicinski 	while (len && strp->stm.full_len > skb->len) {
234*84c61fe1SJakub Kicinski 		chunk =	min_t(size_t, len, strp->stm.full_len - skb->len);
235*84c61fe1SJakub Kicinski 		chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag));
236*84c61fe1SJakub Kicinski 		WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
237*84c61fe1SJakub Kicinski 					   skb_frag_address(frag) +
238*84c61fe1SJakub Kicinski 					   skb_frag_size(frag),
239*84c61fe1SJakub Kicinski 					   chunk));
240*84c61fe1SJakub Kicinski 
241*84c61fe1SJakub Kicinski 		skb->len += chunk;
242*84c61fe1SJakub Kicinski 		skb->data_len += chunk;
243*84c61fe1SJakub Kicinski 		skb_frag_size_add(frag, chunk);
244*84c61fe1SJakub Kicinski 		frag++;
245*84c61fe1SJakub Kicinski 		len -= chunk;
246*84c61fe1SJakub Kicinski 		offset += chunk;
247*84c61fe1SJakub Kicinski 	}
248*84c61fe1SJakub Kicinski 
249*84c61fe1SJakub Kicinski 	if (strp->stm.full_len == skb->len) {
250*84c61fe1SJakub Kicinski 		desc->count = 0;
251*84c61fe1SJakub Kicinski 
252*84c61fe1SJakub Kicinski 		strp->msg_ready = 1;
253*84c61fe1SJakub Kicinski 		tls_rx_msg_ready(strp);
254*84c61fe1SJakub Kicinski 	}
255*84c61fe1SJakub Kicinski 
256*84c61fe1SJakub Kicinski read_done:
257*84c61fe1SJakub Kicinski 	return in_len - len;
258*84c61fe1SJakub Kicinski }
259*84c61fe1SJakub Kicinski 
260*84c61fe1SJakub Kicinski static int tls_strp_read_copyin(struct tls_strparser *strp)
261*84c61fe1SJakub Kicinski {
262*84c61fe1SJakub Kicinski 	struct socket *sock = strp->sk->sk_socket;
263*84c61fe1SJakub Kicinski 	read_descriptor_t desc;
264*84c61fe1SJakub Kicinski 
265*84c61fe1SJakub Kicinski 	desc.arg.data = strp;
266*84c61fe1SJakub Kicinski 	desc.error = 0;
267*84c61fe1SJakub Kicinski 	desc.count = 1; /* give more than one skb per call */
268*84c61fe1SJakub Kicinski 
269*84c61fe1SJakub Kicinski 	/* sk should be locked here, so okay to do read_sock */
270*84c61fe1SJakub Kicinski 	sock->ops->read_sock(strp->sk, &desc, tls_strp_copyin);
271*84c61fe1SJakub Kicinski 
272*84c61fe1SJakub Kicinski 	return desc.error;
273*84c61fe1SJakub Kicinski }
274*84c61fe1SJakub Kicinski 
275*84c61fe1SJakub Kicinski static int tls_strp_read_short(struct tls_strparser *strp)
276*84c61fe1SJakub Kicinski {
277*84c61fe1SJakub Kicinski 	struct skb_shared_info *shinfo;
278*84c61fe1SJakub Kicinski 	struct page *page;
279*84c61fe1SJakub Kicinski 	int need_spc, len;
280*84c61fe1SJakub Kicinski 
281*84c61fe1SJakub Kicinski 	/* If the rbuf is small or rcv window has collapsed to 0 we need
282*84c61fe1SJakub Kicinski 	 * to read the data out. Otherwise the connection will stall.
283*84c61fe1SJakub Kicinski 	 * Without pressure threshold of INT_MAX will never be ready.
284*84c61fe1SJakub Kicinski 	 */
285*84c61fe1SJakub Kicinski 	if (likely(!tcp_epollin_ready(strp->sk, INT_MAX)))
286*84c61fe1SJakub Kicinski 		return 0;
287*84c61fe1SJakub Kicinski 
288*84c61fe1SJakub Kicinski 	shinfo = skb_shinfo(strp->anchor);
289*84c61fe1SJakub Kicinski 	shinfo->frag_list = NULL;
290*84c61fe1SJakub Kicinski 
291*84c61fe1SJakub Kicinski 	/* If we don't know the length go max plus page for cipher overhead */
292*84c61fe1SJakub Kicinski 	need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
293*84c61fe1SJakub Kicinski 
294*84c61fe1SJakub Kicinski 	for (len = need_spc; len > 0; len -= PAGE_SIZE) {
295*84c61fe1SJakub Kicinski 		page = alloc_page(strp->sk->sk_allocation);
296*84c61fe1SJakub Kicinski 		if (!page) {
297*84c61fe1SJakub Kicinski 			tls_strp_flush_anchor_copy(strp);
298*84c61fe1SJakub Kicinski 			return -ENOMEM;
299*84c61fe1SJakub Kicinski 		}
300*84c61fe1SJakub Kicinski 
301*84c61fe1SJakub Kicinski 		skb_fill_page_desc(strp->anchor, shinfo->nr_frags++,
302*84c61fe1SJakub Kicinski 				   page, 0, 0);
303*84c61fe1SJakub Kicinski 	}
304*84c61fe1SJakub Kicinski 
305*84c61fe1SJakub Kicinski 	strp->copy_mode = 1;
306*84c61fe1SJakub Kicinski 	strp->stm.offset = 0;
307*84c61fe1SJakub Kicinski 
308*84c61fe1SJakub Kicinski 	strp->anchor->len = 0;
309*84c61fe1SJakub Kicinski 	strp->anchor->data_len = 0;
310*84c61fe1SJakub Kicinski 	strp->anchor->truesize = round_up(need_spc, PAGE_SIZE);
311*84c61fe1SJakub Kicinski 
312*84c61fe1SJakub Kicinski 	tls_strp_read_copyin(strp);
313*84c61fe1SJakub Kicinski 
314*84c61fe1SJakub Kicinski 	return 0;
315*84c61fe1SJakub Kicinski }
316*84c61fe1SJakub Kicinski 
317*84c61fe1SJakub Kicinski static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
318*84c61fe1SJakub Kicinski {
319*84c61fe1SJakub Kicinski 	struct tcp_sock *tp = tcp_sk(strp->sk);
320*84c61fe1SJakub Kicinski 	struct sk_buff *first;
321*84c61fe1SJakub Kicinski 	u32 offset;
322*84c61fe1SJakub Kicinski 
323*84c61fe1SJakub Kicinski 	first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset);
324*84c61fe1SJakub Kicinski 	if (WARN_ON_ONCE(!first))
325*84c61fe1SJakub Kicinski 		return;
326*84c61fe1SJakub Kicinski 
327*84c61fe1SJakub Kicinski 	/* Bestow the state onto the anchor */
328*84c61fe1SJakub Kicinski 	strp->anchor->len = offset + len;
329*84c61fe1SJakub Kicinski 	strp->anchor->data_len = offset + len;
330*84c61fe1SJakub Kicinski 	strp->anchor->truesize = offset + len;
331*84c61fe1SJakub Kicinski 
332*84c61fe1SJakub Kicinski 	skb_shinfo(strp->anchor)->frag_list = first;
333*84c61fe1SJakub Kicinski 
334*84c61fe1SJakub Kicinski 	skb_copy_header(strp->anchor, first);
335*84c61fe1SJakub Kicinski 	strp->anchor->destructor = NULL;
336*84c61fe1SJakub Kicinski 
337*84c61fe1SJakub Kicinski 	strp->stm.offset = offset;
338*84c61fe1SJakub Kicinski }
339*84c61fe1SJakub Kicinski 
340*84c61fe1SJakub Kicinski void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
341*84c61fe1SJakub Kicinski {
342*84c61fe1SJakub Kicinski 	struct strp_msg *rxm;
343*84c61fe1SJakub Kicinski 	struct tls_msg *tlm;
344*84c61fe1SJakub Kicinski 
345*84c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready);
346*84c61fe1SJakub Kicinski 	DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
347*84c61fe1SJakub Kicinski 
348*84c61fe1SJakub Kicinski 	if (!strp->copy_mode && force_refresh) {
349*84c61fe1SJakub Kicinski 		if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
350*84c61fe1SJakub Kicinski 			return;
351*84c61fe1SJakub Kicinski 
352*84c61fe1SJakub Kicinski 		tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
353*84c61fe1SJakub Kicinski 	}
354*84c61fe1SJakub Kicinski 
355*84c61fe1SJakub Kicinski 	rxm = strp_msg(strp->anchor);
356*84c61fe1SJakub Kicinski 	rxm->full_len	= strp->stm.full_len;
357*84c61fe1SJakub Kicinski 	rxm->offset	= strp->stm.offset;
358*84c61fe1SJakub Kicinski 	tlm = tls_msg(strp->anchor);
359*84c61fe1SJakub Kicinski 	tlm->control	= strp->mark;
360*84c61fe1SJakub Kicinski }
361*84c61fe1SJakub Kicinski 
362*84c61fe1SJakub Kicinski /* Called with lock held on lower socket */
363*84c61fe1SJakub Kicinski static int tls_strp_read_sock(struct tls_strparser *strp)
364*84c61fe1SJakub Kicinski {
365*84c61fe1SJakub Kicinski 	int sz, inq;
366*84c61fe1SJakub Kicinski 
367*84c61fe1SJakub Kicinski 	inq = tcp_inq(strp->sk);
368*84c61fe1SJakub Kicinski 	if (inq < 1)
369*84c61fe1SJakub Kicinski 		return 0;
370*84c61fe1SJakub Kicinski 
371*84c61fe1SJakub Kicinski 	if (unlikely(strp->copy_mode))
372*84c61fe1SJakub Kicinski 		return tls_strp_read_copyin(strp);
373*84c61fe1SJakub Kicinski 
374*84c61fe1SJakub Kicinski 	if (inq < strp->stm.full_len)
375*84c61fe1SJakub Kicinski 		return tls_strp_read_short(strp);
376*84c61fe1SJakub Kicinski 
377*84c61fe1SJakub Kicinski 	if (!strp->stm.full_len) {
378*84c61fe1SJakub Kicinski 		tls_strp_load_anchor_with_queue(strp, inq);
379*84c61fe1SJakub Kicinski 
380*84c61fe1SJakub Kicinski 		sz = tls_rx_msg_size(strp, strp->anchor);
381*84c61fe1SJakub Kicinski 		if (sz < 0) {
382*84c61fe1SJakub Kicinski 			tls_strp_abort_strp(strp, sz);
383*84c61fe1SJakub Kicinski 			return sz;
384*84c61fe1SJakub Kicinski 		}
385*84c61fe1SJakub Kicinski 
386*84c61fe1SJakub Kicinski 		strp->stm.full_len = sz;
387*84c61fe1SJakub Kicinski 
388*84c61fe1SJakub Kicinski 		if (!strp->stm.full_len || inq < strp->stm.full_len)
389*84c61fe1SJakub Kicinski 			return tls_strp_read_short(strp);
390*84c61fe1SJakub Kicinski 	}
391*84c61fe1SJakub Kicinski 
392*84c61fe1SJakub Kicinski 	strp->msg_ready = 1;
393*84c61fe1SJakub Kicinski 	tls_rx_msg_ready(strp);
394*84c61fe1SJakub Kicinski 
395*84c61fe1SJakub Kicinski 	return 0;
396*84c61fe1SJakub Kicinski }
397*84c61fe1SJakub Kicinski 
398*84c61fe1SJakub Kicinski void tls_strp_check_rcv(struct tls_strparser *strp)
399*84c61fe1SJakub Kicinski {
400*84c61fe1SJakub Kicinski 	if (unlikely(strp->stopped) || strp->msg_ready)
401*84c61fe1SJakub Kicinski 		return;
402*84c61fe1SJakub Kicinski 
403*84c61fe1SJakub Kicinski 	if (tls_strp_read_sock(strp) == -ENOMEM)
404*84c61fe1SJakub Kicinski 		queue_work(tls_strp_wq, &strp->work);
405*84c61fe1SJakub Kicinski }
406*84c61fe1SJakub Kicinski 
407*84c61fe1SJakub Kicinski /* Lower sock lock held */
408*84c61fe1SJakub Kicinski void tls_strp_data_ready(struct tls_strparser *strp)
409*84c61fe1SJakub Kicinski {
410*84c61fe1SJakub Kicinski 	/* This check is needed to synchronize with do_tls_strp_work.
411*84c61fe1SJakub Kicinski 	 * do_tls_strp_work acquires a process lock (lock_sock) whereas
412*84c61fe1SJakub Kicinski 	 * the lock held here is bh_lock_sock. The two locks can be
413*84c61fe1SJakub Kicinski 	 * held by different threads at the same time, but bh_lock_sock
414*84c61fe1SJakub Kicinski 	 * allows a thread in BH context to safely check if the process
415*84c61fe1SJakub Kicinski 	 * lock is held. In this case, if the lock is held, queue work.
416*84c61fe1SJakub Kicinski 	 */
417*84c61fe1SJakub Kicinski 	if (sock_owned_by_user_nocheck(strp->sk)) {
418*84c61fe1SJakub Kicinski 		queue_work(tls_strp_wq, &strp->work);
419*84c61fe1SJakub Kicinski 		return;
420*84c61fe1SJakub Kicinski 	}
421*84c61fe1SJakub Kicinski 
422*84c61fe1SJakub Kicinski 	tls_strp_check_rcv(strp);
423*84c61fe1SJakub Kicinski }
424*84c61fe1SJakub Kicinski 
425*84c61fe1SJakub Kicinski static void tls_strp_work(struct work_struct *w)
426*84c61fe1SJakub Kicinski {
427*84c61fe1SJakub Kicinski 	struct tls_strparser *strp =
428*84c61fe1SJakub Kicinski 		container_of(w, struct tls_strparser, work);
429*84c61fe1SJakub Kicinski 
430*84c61fe1SJakub Kicinski 	lock_sock(strp->sk);
431*84c61fe1SJakub Kicinski 	tls_strp_check_rcv(strp);
432*84c61fe1SJakub Kicinski 	release_sock(strp->sk);
433*84c61fe1SJakub Kicinski }
434*84c61fe1SJakub Kicinski 
435*84c61fe1SJakub Kicinski void tls_strp_msg_done(struct tls_strparser *strp)
436*84c61fe1SJakub Kicinski {
437*84c61fe1SJakub Kicinski 	WARN_ON(!strp->stm.full_len);
438*84c61fe1SJakub Kicinski 
439*84c61fe1SJakub Kicinski 	if (likely(!strp->copy_mode))
440*84c61fe1SJakub Kicinski 		tcp_read_done(strp->sk, strp->stm.full_len);
441*84c61fe1SJakub Kicinski 	else
442*84c61fe1SJakub Kicinski 		tls_strp_flush_anchor_copy(strp);
443*84c61fe1SJakub Kicinski 
444*84c61fe1SJakub Kicinski 	strp->msg_ready = 0;
445*84c61fe1SJakub Kicinski 	memset(&strp->stm, 0, sizeof(strp->stm));
446*84c61fe1SJakub Kicinski 
447*84c61fe1SJakub Kicinski 	tls_strp_check_rcv(strp);
448*84c61fe1SJakub Kicinski }
449*84c61fe1SJakub Kicinski 
450*84c61fe1SJakub Kicinski void tls_strp_stop(struct tls_strparser *strp)
451*84c61fe1SJakub Kicinski {
452*84c61fe1SJakub Kicinski 	strp->stopped = 1;
453*84c61fe1SJakub Kicinski }
454*84c61fe1SJakub Kicinski 
455*84c61fe1SJakub Kicinski int tls_strp_init(struct tls_strparser *strp, struct sock *sk)
456*84c61fe1SJakub Kicinski {
457*84c61fe1SJakub Kicinski 	memset(strp, 0, sizeof(*strp));
458*84c61fe1SJakub Kicinski 
459*84c61fe1SJakub Kicinski 	strp->sk = sk;
460*84c61fe1SJakub Kicinski 
461*84c61fe1SJakub Kicinski 	strp->anchor = alloc_skb(0, GFP_KERNEL);
462*84c61fe1SJakub Kicinski 	if (!strp->anchor)
463*84c61fe1SJakub Kicinski 		return -ENOMEM;
464*84c61fe1SJakub Kicinski 
465*84c61fe1SJakub Kicinski 	INIT_WORK(&strp->work, tls_strp_work);
466*84c61fe1SJakub Kicinski 
467*84c61fe1SJakub Kicinski 	return 0;
468*84c61fe1SJakub Kicinski }
469*84c61fe1SJakub Kicinski 
470*84c61fe1SJakub Kicinski /* strp must already be stopped so that tls_strp_recv will no longer be called.
471*84c61fe1SJakub Kicinski  * Note that tls_strp_done is not called with the lower socket held.
472*84c61fe1SJakub Kicinski  */
473*84c61fe1SJakub Kicinski void tls_strp_done(struct tls_strparser *strp)
474*84c61fe1SJakub Kicinski {
475*84c61fe1SJakub Kicinski 	WARN_ON(!strp->stopped);
476*84c61fe1SJakub Kicinski 
477*84c61fe1SJakub Kicinski 	cancel_work_sync(&strp->work);
478*84c61fe1SJakub Kicinski 	tls_strp_anchor_free(strp);
479*84c61fe1SJakub Kicinski }
480*84c61fe1SJakub Kicinski 
481*84c61fe1SJakub Kicinski int __init tls_strp_dev_init(void)
482*84c61fe1SJakub Kicinski {
483*84c61fe1SJakub Kicinski 	tls_strp_wq = create_singlethread_workqueue("kstrp");
484*84c61fe1SJakub Kicinski 	if (unlikely(!tls_strp_wq))
485*84c61fe1SJakub Kicinski 		return -ENOMEM;
486*84c61fe1SJakub Kicinski 
487*84c61fe1SJakub Kicinski 	return 0;
488*84c61fe1SJakub Kicinski }
489*84c61fe1SJakub Kicinski 
490*84c61fe1SJakub Kicinski void tls_strp_dev_exit(void)
491*84c61fe1SJakub Kicinski {
492*84c61fe1SJakub Kicinski 	destroy_workqueue(tls_strp_wq);
493*84c61fe1SJakub Kicinski }
494