xref: /openbmc/linux/net/core/skmsg.c (revision af9b2ff010f593d81e2f5fb04155e9fc25b9dfd0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7 
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 #include <trace/events/sock.h>
12 
sk_msg_try_coalesce_ok(struct sk_msg * msg,int elem_first_coalesce)13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14 {
15 	if (msg->sg.end > msg->sg.start &&
16 	    elem_first_coalesce < msg->sg.end)
17 		return true;
18 
19 	if (msg->sg.end < msg->sg.start &&
20 	    (elem_first_coalesce > msg->sg.start ||
21 	     elem_first_coalesce < msg->sg.end))
22 		return true;
23 
24 	return false;
25 }
26 
sk_msg_alloc(struct sock * sk,struct sk_msg * msg,int len,int elem_first_coalesce)27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
28 		 int elem_first_coalesce)
29 {
30 	struct page_frag *pfrag = sk_page_frag(sk);
31 	u32 osize = msg->sg.size;
32 	int ret = 0;
33 
34 	len -= msg->sg.size;
35 	while (len > 0) {
36 		struct scatterlist *sge;
37 		u32 orig_offset;
38 		int use, i;
39 
40 		if (!sk_page_frag_refill(sk, pfrag)) {
41 			ret = -ENOMEM;
42 			goto msg_trim;
43 		}
44 
45 		orig_offset = pfrag->offset;
46 		use = min_t(int, len, pfrag->size - orig_offset);
47 		if (!sk_wmem_schedule(sk, use)) {
48 			ret = -ENOMEM;
49 			goto msg_trim;
50 		}
51 
52 		i = msg->sg.end;
53 		sk_msg_iter_var_prev(i);
54 		sge = &msg->sg.data[i];
55 
56 		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
57 		    sg_page(sge) == pfrag->page &&
58 		    sge->offset + sge->length == orig_offset) {
59 			sge->length += use;
60 		} else {
61 			if (sk_msg_full(msg)) {
62 				ret = -ENOSPC;
63 				break;
64 			}
65 
66 			sge = &msg->sg.data[msg->sg.end];
67 			sg_unmark_end(sge);
68 			sg_set_page(sge, pfrag->page, use, orig_offset);
69 			get_page(pfrag->page);
70 			sk_msg_iter_next(msg, end);
71 		}
72 
73 		sk_mem_charge(sk, use);
74 		msg->sg.size += use;
75 		pfrag->offset += use;
76 		len -= use;
77 	}
78 
79 	return ret;
80 
81 msg_trim:
82 	sk_msg_trim(sk, msg, osize);
83 	return ret;
84 }
85 EXPORT_SYMBOL_GPL(sk_msg_alloc);
86 
sk_msg_clone(struct sock * sk,struct sk_msg * dst,struct sk_msg * src,u32 off,u32 len)87 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
88 		 u32 off, u32 len)
89 {
90 	int i = src->sg.start;
91 	struct scatterlist *sge = sk_msg_elem(src, i);
92 	struct scatterlist *sgd = NULL;
93 	u32 sge_len, sge_off;
94 
95 	while (off) {
96 		if (sge->length > off)
97 			break;
98 		off -= sge->length;
99 		sk_msg_iter_var_next(i);
100 		if (i == src->sg.end && off)
101 			return -ENOSPC;
102 		sge = sk_msg_elem(src, i);
103 	}
104 
105 	while (len) {
106 		sge_len = sge->length - off;
107 		if (sge_len > len)
108 			sge_len = len;
109 
110 		if (dst->sg.end)
111 			sgd = sk_msg_elem(dst, dst->sg.end - 1);
112 
113 		if (sgd &&
114 		    (sg_page(sge) == sg_page(sgd)) &&
115 		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
116 			sgd->length += sge_len;
117 			dst->sg.size += sge_len;
118 		} else if (!sk_msg_full(dst)) {
119 			sge_off = sge->offset + off;
120 			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
121 		} else {
122 			return -ENOSPC;
123 		}
124 
125 		off = 0;
126 		len -= sge_len;
127 		sk_mem_charge(sk, sge_len);
128 		sk_msg_iter_var_next(i);
129 		if (i == src->sg.end && len)
130 			return -ENOSPC;
131 		sge = sk_msg_elem(src, i);
132 	}
133 
134 	return 0;
135 }
136 EXPORT_SYMBOL_GPL(sk_msg_clone);
137 
sk_msg_return_zero(struct sock * sk,struct sk_msg * msg,int bytes)138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
139 {
140 	int i = msg->sg.start;
141 
142 	do {
143 		struct scatterlist *sge = sk_msg_elem(msg, i);
144 
145 		if (bytes < sge->length) {
146 			sge->length -= bytes;
147 			sge->offset += bytes;
148 			sk_mem_uncharge(sk, bytes);
149 			break;
150 		}
151 
152 		sk_mem_uncharge(sk, sge->length);
153 		bytes -= sge->length;
154 		sge->length = 0;
155 		sge->offset = 0;
156 		sk_msg_iter_var_next(i);
157 	} while (bytes && i != msg->sg.end);
158 	msg->sg.start = i;
159 }
160 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
161 
sk_msg_return(struct sock * sk,struct sk_msg * msg,int bytes)162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
163 {
164 	int i = msg->sg.start;
165 
166 	do {
167 		struct scatterlist *sge = &msg->sg.data[i];
168 		int uncharge = (bytes < sge->length) ? bytes : sge->length;
169 
170 		sk_mem_uncharge(sk, uncharge);
171 		bytes -= uncharge;
172 		sk_msg_iter_var_next(i);
173 	} while (i != msg->sg.end);
174 }
175 EXPORT_SYMBOL_GPL(sk_msg_return);
176 
sk_msg_free_elem(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
178 			    bool charge)
179 {
180 	struct scatterlist *sge = sk_msg_elem(msg, i);
181 	u32 len = sge->length;
182 
183 	/* When the skb owns the memory we free it from consume_skb path. */
184 	if (!msg->skb) {
185 		if (charge)
186 			sk_mem_uncharge(sk, len);
187 		put_page(sg_page(sge));
188 	}
189 	memset(sge, 0, sizeof(*sge));
190 	return len;
191 }
192 
__sk_msg_free(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
194 			 bool charge)
195 {
196 	struct scatterlist *sge = sk_msg_elem(msg, i);
197 	int freed = 0;
198 
199 	while (msg->sg.size) {
200 		msg->sg.size -= sge->length;
201 		freed += sk_msg_free_elem(sk, msg, i, charge);
202 		sk_msg_iter_var_next(i);
203 		sk_msg_check_to_free(msg, i, msg->sg.size);
204 		sge = sk_msg_elem(msg, i);
205 	}
206 	consume_skb(msg->skb);
207 	sk_msg_init(msg);
208 	return freed;
209 }
210 
sk_msg_free_nocharge(struct sock * sk,struct sk_msg * msg)211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
212 {
213 	return __sk_msg_free(sk, msg, msg->sg.start, false);
214 }
215 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
216 
sk_msg_free(struct sock * sk,struct sk_msg * msg)217 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
218 {
219 	return __sk_msg_free(sk, msg, msg->sg.start, true);
220 }
221 EXPORT_SYMBOL_GPL(sk_msg_free);
222 
__sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes,bool charge)223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
224 				  u32 bytes, bool charge)
225 {
226 	struct scatterlist *sge;
227 	u32 i = msg->sg.start;
228 
229 	while (bytes) {
230 		sge = sk_msg_elem(msg, i);
231 		if (!sge->length)
232 			break;
233 		if (bytes < sge->length) {
234 			if (charge)
235 				sk_mem_uncharge(sk, bytes);
236 			sge->length -= bytes;
237 			sge->offset += bytes;
238 			msg->sg.size -= bytes;
239 			break;
240 		}
241 
242 		msg->sg.size -= sge->length;
243 		bytes -= sge->length;
244 		sk_msg_free_elem(sk, msg, i, charge);
245 		sk_msg_iter_var_next(i);
246 		sk_msg_check_to_free(msg, i, bytes);
247 	}
248 	msg->sg.start = i;
249 }
250 
sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes)251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
252 {
253 	__sk_msg_free_partial(sk, msg, bytes, true);
254 }
255 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
256 
sk_msg_free_partial_nocharge(struct sock * sk,struct sk_msg * msg,u32 bytes)257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
258 				  u32 bytes)
259 {
260 	__sk_msg_free_partial(sk, msg, bytes, false);
261 }
262 
sk_msg_trim(struct sock * sk,struct sk_msg * msg,int len)263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
264 {
265 	int trim = msg->sg.size - len;
266 	u32 i = msg->sg.end;
267 
268 	if (trim <= 0) {
269 		WARN_ON(trim < 0);
270 		return;
271 	}
272 
273 	sk_msg_iter_var_prev(i);
274 	msg->sg.size = len;
275 	while (msg->sg.data[i].length &&
276 	       trim >= msg->sg.data[i].length) {
277 		trim -= msg->sg.data[i].length;
278 		sk_msg_free_elem(sk, msg, i, true);
279 		sk_msg_iter_var_prev(i);
280 		if (!trim)
281 			goto out;
282 	}
283 
284 	msg->sg.data[i].length -= trim;
285 	sk_mem_uncharge(sk, trim);
286 	/* Adjust copybreak if it falls into the trimmed part of last buf */
287 	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 		msg->sg.copybreak = msg->sg.data[i].length;
289 out:
290 	sk_msg_iter_var_next(i);
291 	msg->sg.end = i;
292 
293 	/* If we trim data a full sg elem before curr pointer update
294 	 * copybreak and current so that any future copy operations
295 	 * start at new copy location.
296 	 * However trimed data that has not yet been used in a copy op
297 	 * does not require an update.
298 	 */
299 	if (!msg->sg.size) {
300 		msg->sg.curr = msg->sg.start;
301 		msg->sg.copybreak = 0;
302 	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
304 		sk_msg_iter_var_prev(i);
305 		msg->sg.curr = i;
306 		msg->sg.copybreak = msg->sg.data[i].length;
307 	}
308 }
309 EXPORT_SYMBOL_GPL(sk_msg_trim);
310 
sk_msg_zerocopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)311 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
312 			      struct sk_msg *msg, u32 bytes)
313 {
314 	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
315 	const int to_max_pages = MAX_MSG_FRAGS;
316 	struct page *pages[MAX_MSG_FRAGS];
317 	ssize_t orig, copied, use, offset;
318 
319 	orig = msg->sg.size;
320 	while (bytes > 0) {
321 		i = 0;
322 		maxpages = to_max_pages - num_elems;
323 		if (maxpages == 0) {
324 			ret = -EFAULT;
325 			goto out;
326 		}
327 
328 		copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
329 					    &offset);
330 		if (copied <= 0) {
331 			ret = -EFAULT;
332 			goto out;
333 		}
334 
335 		bytes -= copied;
336 		msg->sg.size += copied;
337 
338 		while (copied) {
339 			use = min_t(int, copied, PAGE_SIZE - offset);
340 			sg_set_page(&msg->sg.data[msg->sg.end],
341 				    pages[i], use, offset);
342 			sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 			sk_mem_charge(sk, use);
344 
345 			offset = 0;
346 			copied -= use;
347 			sk_msg_iter_next(msg, end);
348 			num_elems++;
349 			i++;
350 		}
351 		/* When zerocopy is mixed with sk_msg_*copy* operations we
352 		 * may have a copybreak set in this case clear and prefer
353 		 * zerocopy remainder when possible.
354 		 */
355 		msg->sg.copybreak = 0;
356 		msg->sg.curr = msg->sg.end;
357 	}
358 out:
359 	/* Revert iov_iter updates, msg will need to use 'trim' later if it
360 	 * also needs to be cleared.
361 	 */
362 	if (ret)
363 		iov_iter_revert(from, msg->sg.size - orig);
364 	return ret;
365 }
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367 
sk_msg_memcopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 			     struct sk_msg *msg, u32 bytes)
370 {
371 	int ret = -ENOSPC, i = msg->sg.curr;
372 	struct scatterlist *sge;
373 	u32 copy, buf_size;
374 	void *to;
375 
376 	do {
377 		sge = sk_msg_elem(msg, i);
378 		/* This is possible if a trim operation shrunk the buffer */
379 		if (msg->sg.copybreak >= sge->length) {
380 			msg->sg.copybreak = 0;
381 			sk_msg_iter_var_next(i);
382 			if (i == msg->sg.end)
383 				break;
384 			sge = sk_msg_elem(msg, i);
385 		}
386 
387 		buf_size = sge->length - msg->sg.copybreak;
388 		copy = (buf_size > bytes) ? bytes : buf_size;
389 		to = sg_virt(sge) + msg->sg.copybreak;
390 		msg->sg.copybreak += copy;
391 		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 			ret = copy_from_iter_nocache(to, copy, from);
393 		else
394 			ret = copy_from_iter(to, copy, from);
395 		if (ret != copy) {
396 			ret = -EFAULT;
397 			goto out;
398 		}
399 		bytes -= copy;
400 		if (!bytes)
401 			break;
402 		msg->sg.copybreak = 0;
403 		sk_msg_iter_var_next(i);
404 	} while (i != msg->sg.end);
405 out:
406 	msg->sg.curr = i;
407 	return ret;
408 }
409 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
410 
411 /* Receive sk_msg from psock->ingress_msg to @msg. */
sk_msg_recvmsg(struct sock * sk,struct sk_psock * psock,struct msghdr * msg,int len,int flags)412 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
413 		   int len, int flags)
414 {
415 	struct iov_iter *iter = &msg->msg_iter;
416 	int peek = flags & MSG_PEEK;
417 	struct sk_msg *msg_rx;
418 	int i, copied = 0;
419 
420 	msg_rx = sk_psock_peek_msg(psock);
421 	while (copied != len) {
422 		struct scatterlist *sge;
423 
424 		if (unlikely(!msg_rx))
425 			break;
426 
427 		i = msg_rx->sg.start;
428 		do {
429 			struct page *page;
430 			int copy;
431 
432 			sge = sk_msg_elem(msg_rx, i);
433 			copy = sge->length;
434 			page = sg_page(sge);
435 			if (copied + copy > len)
436 				copy = len - copied;
437 			if (copy)
438 				copy = copy_page_to_iter(page, sge->offset, copy, iter);
439 			if (!copy) {
440 				copied = copied ? copied : -EFAULT;
441 				goto out;
442 			}
443 
444 			copied += copy;
445 			if (likely(!peek)) {
446 				sge->offset += copy;
447 				sge->length -= copy;
448 				if (!msg_rx->skb) {
449 					sk_mem_uncharge(sk, copy);
450 					atomic_sub(copy, &sk->sk_rmem_alloc);
451 				}
452 				msg_rx->sg.size -= copy;
453 
454 				if (!sge->length) {
455 					sk_msg_iter_var_next(i);
456 					if (!msg_rx->skb)
457 						put_page(page);
458 				}
459 			} else {
460 				/* Lets not optimize peek case if copy_page_to_iter
461 				 * didn't copy the entire length lets just break.
462 				 */
463 				if (copy != sge->length)
464 					goto out;
465 				sk_msg_iter_var_next(i);
466 			}
467 
468 			if (copied == len)
469 				break;
470 		} while ((i != msg_rx->sg.end) && !sg_is_last(sge));
471 
472 		if (unlikely(peek)) {
473 			msg_rx = sk_psock_next_msg(psock, msg_rx);
474 			if (!msg_rx)
475 				break;
476 			continue;
477 		}
478 
479 		msg_rx->sg.start = i;
480 		if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
481 			msg_rx = sk_psock_dequeue_msg(psock);
482 			kfree_sk_msg(msg_rx);
483 		}
484 		msg_rx = sk_psock_peek_msg(psock);
485 	}
486 out:
487 	return copied;
488 }
489 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
490 
sk_msg_is_readable(struct sock * sk)491 bool sk_msg_is_readable(struct sock *sk)
492 {
493 	struct sk_psock *psock;
494 	bool empty = true;
495 
496 	rcu_read_lock();
497 	psock = sk_psock(sk);
498 	if (likely(psock))
499 		empty = list_empty(&psock->ingress_msg);
500 	rcu_read_unlock();
501 	return !empty;
502 }
503 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
504 
alloc_sk_msg(gfp_t gfp)505 static struct sk_msg *alloc_sk_msg(gfp_t gfp)
506 {
507 	struct sk_msg *msg;
508 
509 	msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
510 	if (unlikely(!msg))
511 		return NULL;
512 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
513 	return msg;
514 }
515 
sk_psock_create_ingress_msg(struct sock * sk,struct sk_buff * skb)516 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
517 						  struct sk_buff *skb)
518 {
519 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
520 		return NULL;
521 
522 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
523 		return NULL;
524 
525 	return alloc_sk_msg(GFP_KERNEL);
526 }
527 
sk_psock_skb_ingress_enqueue(struct sk_buff * skb,u32 off,u32 len,struct sk_psock * psock,struct sock * sk,struct sk_msg * msg,bool take_ref)528 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
529 					u32 off, u32 len,
530 					struct sk_psock *psock,
531 					struct sock *sk,
532 					struct sk_msg *msg,
533 					bool take_ref)
534 {
535 	int num_sge, copied;
536 
537 	/* skb_to_sgvec will fail when the total number of fragments in
538 	 * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the
539 	 * caller may aggregate multiple skbs.
540 	 */
541 	num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
542 	if (num_sge < 0) {
543 		/* skb linearize may fail with ENOMEM, but lets simply try again
544 		 * later if this happens. Under memory pressure we don't want to
545 		 * drop the skb. We need to linearize the skb so that the mapping
546 		 * in skb_to_sgvec can not error.
547 		 * Note that skb_linearize requires the skb not to be shared.
548 		 */
549 		if (skb_linearize(skb))
550 			return -EAGAIN;
551 
552 		num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
553 		if (unlikely(num_sge < 0))
554 			return num_sge;
555 	}
556 
557 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
558 	psock->ingress_bytes += len;
559 #endif
560 	copied = len;
561 	msg->sg.start = 0;
562 	msg->sg.size = copied;
563 	msg->sg.end = num_sge;
564 	msg->skb = take_ref ? skb_get(skb) : skb;
565 
566 	sk_psock_queue_msg(psock, msg);
567 	sk_psock_data_ready(sk, psock);
568 	return copied;
569 }
570 
571 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
572 				     u32 off, u32 len, bool take_ref);
573 
sk_psock_skb_ingress(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len)574 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
575 				u32 off, u32 len)
576 {
577 	struct sock *sk = psock->sk;
578 	struct sk_msg *msg;
579 	int err;
580 
581 	/* If we are receiving on the same sock skb->sk is already assigned,
582 	 * skip memory accounting and owner transition seeing it already set
583 	 * correctly.
584 	 */
585 	if (unlikely(skb->sk == sk))
586 		return sk_psock_skb_ingress_self(psock, skb, off, len, true);
587 	msg = sk_psock_create_ingress_msg(sk, skb);
588 	if (!msg)
589 		return -EAGAIN;
590 
591 	/* This will transition ownership of the data from the socket where
592 	 * the BPF program was run initiating the redirect to the socket
593 	 * we will eventually receive this data on. The data will be released
594 	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
595 	 * into user buffers.
596 	 */
597 	skb_set_owner_r(skb, sk);
598 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true);
599 	if (err < 0)
600 		kfree(msg);
601 	return err;
602 }
603 
604 /* Puts an skb on the ingress queue of the socket already assigned to the
605  * skb. In this case we do not need to check memory limits or skb_set_owner_r
606  * because the skb is already accounted for here.
607  */
sk_psock_skb_ingress_self(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool take_ref)608 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
609 				     u32 off, u32 len, bool take_ref)
610 {
611 	struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
612 	struct sock *sk = psock->sk;
613 	int err;
614 
615 	if (unlikely(!msg))
616 		return -EAGAIN;
617 	skb_set_owner_r(skb, sk);
618 	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
619 	if (err < 0)
620 		kfree(msg);
621 	return err;
622 }
623 
sk_psock_handle_skb(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool ingress)624 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
625 			       u32 off, u32 len, bool ingress)
626 {
627 	if (!ingress) {
628 		if (!sock_writeable(psock->sk))
629 			return -EAGAIN;
630 		return skb_send_sock(psock->sk, skb, off, len);
631 	}
632 
633 	return sk_psock_skb_ingress(psock, skb, off, len);
634 }
635 
sk_psock_skb_state(struct sk_psock * psock,struct sk_psock_work_state * state,int len,int off)636 static void sk_psock_skb_state(struct sk_psock *psock,
637 			       struct sk_psock_work_state *state,
638 			       int len, int off)
639 {
640 	spin_lock_bh(&psock->ingress_lock);
641 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
642 		state->len = len;
643 		state->off = off;
644 	}
645 	spin_unlock_bh(&psock->ingress_lock);
646 }
647 
sk_psock_backlog(struct work_struct * work)648 static void sk_psock_backlog(struct work_struct *work)
649 {
650 	struct delayed_work *dwork = to_delayed_work(work);
651 	struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
652 	struct sk_psock_work_state *state = &psock->work_state;
653 	struct sk_buff *skb = NULL;
654 	u32 len = 0, off = 0;
655 	bool ingress;
656 	int ret;
657 
658 	/* Increment the psock refcnt to synchronize with close(fd) path in
659 	 * sock_map_close(), ensuring we wait for backlog thread completion
660 	 * before sk_socket freed. If refcnt increment fails, it indicates
661 	 * sock_map_close() completed with sk_socket potentially already freed.
662 	 */
663 	if (!sk_psock_get(psock->sk))
664 		return;
665 	mutex_lock(&psock->work_mutex);
666 	while ((skb = skb_peek(&psock->ingress_skb))) {
667 		len = skb->len;
668 		off = 0;
669 		if (skb_bpf_strparser(skb)) {
670 			struct strp_msg *stm = strp_msg(skb);
671 
672 			off = stm->offset;
673 			len = stm->full_len;
674 		}
675 
676 		/* Resume processing from previous partial state */
677 		if (unlikely(state->len)) {
678 			len = state->len;
679 			off = state->off;
680 		}
681 
682 		ingress = skb_bpf_ingress(skb);
683 		skb_bpf_redirect_clear(skb);
684 		do {
685 			ret = -EIO;
686 			if (!sock_flag(psock->sk, SOCK_DEAD))
687 				ret = sk_psock_handle_skb(psock, skb, off,
688 							  len, ingress);
689 			if (ret <= 0) {
690 				if (ret == -EAGAIN) {
691 					sk_psock_skb_state(psock, state, len, off);
692 					/* Restore redir info we cleared before */
693 					skb_bpf_set_redir(skb, psock->sk, ingress);
694 					/* Delay slightly to prioritize any
695 					 * other work that might be here.
696 					 */
697 					if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
698 						schedule_delayed_work(&psock->work, 1);
699 					goto end;
700 				}
701 				/* Hard errors break pipe and stop xmit. */
702 				sk_psock_report_error(psock, ret ? -ret : EPIPE);
703 				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
704 				goto end;
705 			}
706 			off += ret;
707 			len -= ret;
708 		} while (len);
709 
710 		/* The entire skb sent, clear state */
711 		sk_psock_skb_state(psock, state, 0, 0);
712 		skb = skb_dequeue(&psock->ingress_skb);
713 		kfree_skb(skb);
714 	}
715 end:
716 	mutex_unlock(&psock->work_mutex);
717 	sk_psock_put(psock->sk, psock);
718 }
719 
sk_psock_init(struct sock * sk,int node)720 struct sk_psock *sk_psock_init(struct sock *sk, int node)
721 {
722 	struct sk_psock *psock;
723 	struct proto *prot;
724 
725 	write_lock_bh(&sk->sk_callback_lock);
726 
727 	if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
728 		psock = ERR_PTR(-EINVAL);
729 		goto out;
730 	}
731 
732 	if (sk->sk_user_data) {
733 		psock = ERR_PTR(-EBUSY);
734 		goto out;
735 	}
736 
737 	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
738 	if (!psock) {
739 		psock = ERR_PTR(-ENOMEM);
740 		goto out;
741 	}
742 
743 	prot = READ_ONCE(sk->sk_prot);
744 	psock->sk = sk;
745 	psock->eval = __SK_NONE;
746 	psock->sk_proto = prot;
747 	psock->saved_unhash = prot->unhash;
748 	psock->saved_destroy = prot->destroy;
749 	psock->saved_close = prot->close;
750 	psock->saved_write_space = sk->sk_write_space;
751 
752 	INIT_LIST_HEAD(&psock->link);
753 	spin_lock_init(&psock->link_lock);
754 
755 	INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
756 	mutex_init(&psock->work_mutex);
757 	INIT_LIST_HEAD(&psock->ingress_msg);
758 	spin_lock_init(&psock->ingress_lock);
759 	skb_queue_head_init(&psock->ingress_skb);
760 
761 	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
762 	refcount_set(&psock->refcnt, 1);
763 
764 	__rcu_assign_sk_user_data_with_flags(sk, psock,
765 					     SK_USER_DATA_NOCOPY |
766 					     SK_USER_DATA_PSOCK);
767 	sock_hold(sk);
768 
769 out:
770 	write_unlock_bh(&sk->sk_callback_lock);
771 	return psock;
772 }
773 EXPORT_SYMBOL_GPL(sk_psock_init);
774 
sk_psock_link_pop(struct sk_psock * psock)775 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
776 {
777 	struct sk_psock_link *link;
778 
779 	spin_lock_bh(&psock->link_lock);
780 	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
781 					list);
782 	if (link)
783 		list_del(&link->list);
784 	spin_unlock_bh(&psock->link_lock);
785 	return link;
786 }
787 
__sk_psock_purge_ingress_msg(struct sk_psock * psock)788 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
789 {
790 	struct sk_msg *msg, *tmp;
791 
792 	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
793 		list_del(&msg->list);
794 		if (!msg->skb)
795 			atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
796 		sk_msg_free(psock->sk, msg);
797 		kfree(msg);
798 	}
799 }
800 
__sk_psock_zap_ingress(struct sk_psock * psock)801 static void __sk_psock_zap_ingress(struct sk_psock *psock)
802 {
803 	struct sk_buff *skb;
804 
805 	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
806 		skb_bpf_redirect_clear(skb);
807 		sock_drop(psock->sk, skb);
808 	}
809 	__sk_psock_purge_ingress_msg(psock);
810 }
811 
sk_psock_link_destroy(struct sk_psock * psock)812 static void sk_psock_link_destroy(struct sk_psock *psock)
813 {
814 	struct sk_psock_link *link, *tmp;
815 
816 	list_for_each_entry_safe(link, tmp, &psock->link, list) {
817 		list_del(&link->list);
818 		sk_psock_free_link(link);
819 	}
820 }
821 
sk_psock_stop(struct sk_psock * psock)822 void sk_psock_stop(struct sk_psock *psock)
823 {
824 	spin_lock_bh(&psock->ingress_lock);
825 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
826 	sk_psock_cork_free(psock);
827 	spin_unlock_bh(&psock->ingress_lock);
828 }
829 
830 static void sk_psock_done_strp(struct sk_psock *psock);
831 
sk_psock_destroy(struct work_struct * work)832 static void sk_psock_destroy(struct work_struct *work)
833 {
834 	struct sk_psock *psock = container_of(to_rcu_work(work),
835 					      struct sk_psock, rwork);
836 	/* No sk_callback_lock since already detached. */
837 
838 	sk_psock_done_strp(psock);
839 
840 	cancel_delayed_work_sync(&psock->work);
841 	__sk_psock_zap_ingress(psock);
842 	mutex_destroy(&psock->work_mutex);
843 
844 	psock_progs_drop(&psock->progs);
845 
846 	sk_psock_link_destroy(psock);
847 	sk_psock_cork_free(psock);
848 
849 	if (psock->sk_redir)
850 		sock_put(psock->sk_redir);
851 	if (psock->sk_pair)
852 		sock_put(psock->sk_pair);
853 	sock_put(psock->sk);
854 	kfree(psock);
855 }
856 
sk_psock_drop(struct sock * sk,struct sk_psock * psock)857 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
858 {
859 	write_lock_bh(&sk->sk_callback_lock);
860 	sk_psock_restore_proto(sk, psock);
861 	rcu_assign_sk_user_data(sk, NULL);
862 	if (psock->progs.stream_parser)
863 		sk_psock_stop_strp(sk, psock);
864 	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
865 		sk_psock_stop_verdict(sk, psock);
866 	write_unlock_bh(&sk->sk_callback_lock);
867 
868 	sk_psock_stop(psock);
869 
870 	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
871 	queue_rcu_work(system_wq, &psock->rwork);
872 }
873 EXPORT_SYMBOL_GPL(sk_psock_drop);
874 
sk_psock_map_verd(int verdict,bool redir)875 static int sk_psock_map_verd(int verdict, bool redir)
876 {
877 	switch (verdict) {
878 	case SK_PASS:
879 		return redir ? __SK_REDIRECT : __SK_PASS;
880 	case SK_DROP:
881 	default:
882 		break;
883 	}
884 
885 	return __SK_DROP;
886 }
887 
sk_psock_msg_verdict(struct sock * sk,struct sk_psock * psock,struct sk_msg * msg)888 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
889 			 struct sk_msg *msg)
890 {
891 	struct bpf_prog *prog;
892 	int ret;
893 
894 	rcu_read_lock();
895 	prog = READ_ONCE(psock->progs.msg_parser);
896 	if (unlikely(!prog)) {
897 		ret = __SK_PASS;
898 		goto out;
899 	}
900 
901 	sk_msg_compute_data_pointers(msg);
902 	msg->sk = sk;
903 	ret = bpf_prog_run_pin_on_cpu(prog, msg);
904 	ret = sk_psock_map_verd(ret, msg->sk_redir);
905 	psock->apply_bytes = msg->apply_bytes;
906 	if (ret == __SK_REDIRECT) {
907 		if (psock->sk_redir) {
908 			sock_put(psock->sk_redir);
909 			psock->sk_redir = NULL;
910 		}
911 		if (!msg->sk_redir) {
912 			ret = __SK_DROP;
913 			goto out;
914 		}
915 		psock->redir_ingress = sk_msg_to_ingress(msg);
916 		psock->sk_redir = msg->sk_redir;
917 		sock_hold(psock->sk_redir);
918 	}
919 out:
920 	rcu_read_unlock();
921 	return ret;
922 }
923 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
924 
sk_psock_skb_redirect(struct sk_psock * from,struct sk_buff * skb)925 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
926 {
927 	struct sk_psock *psock_other;
928 	struct sock *sk_other;
929 
930 	sk_other = skb_bpf_redirect_fetch(skb);
931 	/* This error is a buggy BPF program, it returned a redirect
932 	 * return code, but then didn't set a redirect interface.
933 	 */
934 	if (unlikely(!sk_other)) {
935 		skb_bpf_redirect_clear(skb);
936 		sock_drop(from->sk, skb);
937 		return -EIO;
938 	}
939 	psock_other = sk_psock(sk_other);
940 	/* This error indicates the socket is being torn down or had another
941 	 * error that caused the pipe to break. We can't send a packet on
942 	 * a socket that is in this state so we drop the skb.
943 	 */
944 	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
945 		skb_bpf_redirect_clear(skb);
946 		sock_drop(from->sk, skb);
947 		return -EIO;
948 	}
949 	spin_lock_bh(&psock_other->ingress_lock);
950 	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
951 		spin_unlock_bh(&psock_other->ingress_lock);
952 		skb_bpf_redirect_clear(skb);
953 		sock_drop(from->sk, skb);
954 		return -EIO;
955 	}
956 
957 	skb_queue_tail(&psock_other->ingress_skb, skb);
958 	schedule_delayed_work(&psock_other->work, 0);
959 	spin_unlock_bh(&psock_other->ingress_lock);
960 	return 0;
961 }
962 
sk_psock_tls_verdict_apply(struct sk_buff * skb,struct sk_psock * from,int verdict)963 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
964 				       struct sk_psock *from, int verdict)
965 {
966 	switch (verdict) {
967 	case __SK_REDIRECT:
968 		sk_psock_skb_redirect(from, skb);
969 		break;
970 	case __SK_PASS:
971 	case __SK_DROP:
972 	default:
973 		break;
974 	}
975 }
976 
sk_psock_tls_strp_read(struct sk_psock * psock,struct sk_buff * skb)977 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
978 {
979 	struct bpf_prog *prog;
980 	int ret = __SK_PASS;
981 
982 	rcu_read_lock();
983 	prog = READ_ONCE(psock->progs.stream_verdict);
984 	if (likely(prog)) {
985 		skb->sk = psock->sk;
986 		skb_dst_drop(skb);
987 		skb_bpf_redirect_clear(skb);
988 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
989 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
990 		skb->sk = NULL;
991 	}
992 	sk_psock_tls_verdict_apply(skb, psock, ret);
993 	rcu_read_unlock();
994 	return ret;
995 }
996 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
997 
sk_psock_verdict_apply(struct sk_psock * psock,struct sk_buff * skb,int verdict)998 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
999 				  int verdict)
1000 {
1001 	struct sock *sk_other;
1002 	int err = 0;
1003 	u32 len, off;
1004 
1005 	switch (verdict) {
1006 	case __SK_PASS:
1007 		err = -EIO;
1008 		sk_other = psock->sk;
1009 		if (sock_flag(sk_other, SOCK_DEAD) ||
1010 		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1011 			goto out_free;
1012 
1013 		skb_bpf_set_ingress(skb);
1014 
1015 		/* If the queue is empty then we can submit directly
1016 		 * into the msg queue. If its not empty we have to
1017 		 * queue work otherwise we may get OOO data. Otherwise,
1018 		 * if sk_psock_skb_ingress errors will be handled by
1019 		 * retrying later from workqueue.
1020 		 */
1021 		if (skb_queue_empty(&psock->ingress_skb)) {
1022 			len = skb->len;
1023 			off = 0;
1024 			if (skb_bpf_strparser(skb)) {
1025 				struct strp_msg *stm = strp_msg(skb);
1026 
1027 				off = stm->offset;
1028 				len = stm->full_len;
1029 			}
1030 			err = sk_psock_skb_ingress_self(psock, skb, off, len, false);
1031 		}
1032 		if (err < 0) {
1033 			spin_lock_bh(&psock->ingress_lock);
1034 			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1035 				skb_queue_tail(&psock->ingress_skb, skb);
1036 				schedule_delayed_work(&psock->work, 0);
1037 				err = 0;
1038 			}
1039 			spin_unlock_bh(&psock->ingress_lock);
1040 			if (err < 0)
1041 				goto out_free;
1042 		}
1043 		break;
1044 	case __SK_REDIRECT:
1045 		tcp_eat_skb(psock->sk, skb);
1046 		err = sk_psock_skb_redirect(psock, skb);
1047 		break;
1048 	case __SK_DROP:
1049 	default:
1050 out_free:
1051 		skb_bpf_redirect_clear(skb);
1052 		tcp_eat_skb(psock->sk, skb);
1053 		sock_drop(psock->sk, skb);
1054 	}
1055 
1056 	return err;
1057 }
1058 
sk_psock_write_space(struct sock * sk)1059 static void sk_psock_write_space(struct sock *sk)
1060 {
1061 	struct sk_psock *psock;
1062 	void (*write_space)(struct sock *sk) = NULL;
1063 
1064 	rcu_read_lock();
1065 	psock = sk_psock(sk);
1066 	if (likely(psock)) {
1067 		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1068 			schedule_delayed_work(&psock->work, 0);
1069 		write_space = psock->saved_write_space;
1070 	}
1071 	rcu_read_unlock();
1072 	if (write_space)
1073 		write_space(sk);
1074 }
1075 
1076 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
sk_psock_strp_read(struct strparser * strp,struct sk_buff * skb)1077 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1078 {
1079 	struct sk_psock *psock;
1080 	struct bpf_prog *prog;
1081 	int ret = __SK_DROP;
1082 	struct sock *sk;
1083 
1084 	rcu_read_lock();
1085 	sk = strp->sk;
1086 	psock = sk_psock(sk);
1087 	if (unlikely(!psock)) {
1088 		sock_drop(sk, skb);
1089 		goto out;
1090 	}
1091 	prog = READ_ONCE(psock->progs.stream_verdict);
1092 	if (likely(prog)) {
1093 		skb->sk = sk;
1094 		skb_dst_drop(skb);
1095 		skb_bpf_redirect_clear(skb);
1096 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1097 		skb_bpf_set_strparser(skb);
1098 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1099 		skb->sk = NULL;
1100 	}
1101 	sk_psock_verdict_apply(psock, skb, ret);
1102 out:
1103 	rcu_read_unlock();
1104 }
1105 
sk_psock_strp_read_done(struct strparser * strp,int err)1106 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1107 {
1108 	return err;
1109 }
1110 
sk_psock_strp_parse(struct strparser * strp,struct sk_buff * skb)1111 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1112 {
1113 	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1114 	struct bpf_prog *prog;
1115 	int ret = skb->len;
1116 
1117 	rcu_read_lock();
1118 	prog = READ_ONCE(psock->progs.stream_parser);
1119 	if (likely(prog)) {
1120 		skb->sk = psock->sk;
1121 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1122 		skb->sk = NULL;
1123 	}
1124 	rcu_read_unlock();
1125 	return ret;
1126 }
1127 
1128 /* Called with socket lock held. */
sk_psock_strp_data_ready(struct sock * sk)1129 static void sk_psock_strp_data_ready(struct sock *sk)
1130 {
1131 	struct sk_psock *psock;
1132 
1133 	trace_sk_data_ready(sk);
1134 
1135 	rcu_read_lock();
1136 	psock = sk_psock(sk);
1137 	if (likely(psock)) {
1138 		if (tls_sw_has_ctx_rx(sk)) {
1139 			psock->saved_data_ready(sk);
1140 		} else {
1141 			read_lock_bh(&sk->sk_callback_lock);
1142 			strp_data_ready(&psock->strp);
1143 			read_unlock_bh(&sk->sk_callback_lock);
1144 		}
1145 	}
1146 	rcu_read_unlock();
1147 }
1148 
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)1149 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1150 {
1151 	int ret;
1152 
1153 	static const struct strp_callbacks cb = {
1154 		.rcv_msg	= sk_psock_strp_read,
1155 		.read_sock_done	= sk_psock_strp_read_done,
1156 		.parse_msg	= sk_psock_strp_parse,
1157 	};
1158 
1159 	ret = strp_init(&psock->strp, sk, &cb);
1160 	if (!ret)
1161 		sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1162 
1163 	if (sk_is_tcp(sk)) {
1164 		psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
1165 		psock->copied_seq = tcp_sk(sk)->copied_seq;
1166 	}
1167 	return ret;
1168 }
1169 
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)1170 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1171 {
1172 	if (psock->saved_data_ready)
1173 		return;
1174 
1175 	psock->saved_data_ready = sk->sk_data_ready;
1176 	sk->sk_data_ready = sk_psock_strp_data_ready;
1177 	sk->sk_write_space = sk_psock_write_space;
1178 }
1179 
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)1180 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1181 {
1182 	psock_set_prog(&psock->progs.stream_parser, NULL);
1183 
1184 	if (!psock->saved_data_ready)
1185 		return;
1186 
1187 	sk->sk_data_ready = psock->saved_data_ready;
1188 	psock->saved_data_ready = NULL;
1189 	strp_stop(&psock->strp);
1190 }
1191 
sk_psock_done_strp(struct sk_psock * psock)1192 static void sk_psock_done_strp(struct sk_psock *psock)
1193 {
1194 	/* Parser has been stopped */
1195 	if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1196 		strp_done(&psock->strp);
1197 }
1198 #else
sk_psock_done_strp(struct sk_psock * psock)1199 static void sk_psock_done_strp(struct sk_psock *psock)
1200 {
1201 }
1202 #endif /* CONFIG_BPF_STREAM_PARSER */
1203 
sk_psock_verdict_recv(struct sock * sk,struct sk_buff * skb)1204 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1205 {
1206 	struct sk_psock *psock;
1207 	struct bpf_prog *prog;
1208 	int ret = __SK_DROP;
1209 	int len = skb->len;
1210 
1211 	rcu_read_lock();
1212 	psock = sk_psock(sk);
1213 	if (unlikely(!psock)) {
1214 		len = 0;
1215 		tcp_eat_skb(sk, skb);
1216 		sock_drop(sk, skb);
1217 		goto out;
1218 	}
1219 	prog = READ_ONCE(psock->progs.stream_verdict);
1220 	if (!prog)
1221 		prog = READ_ONCE(psock->progs.skb_verdict);
1222 	if (likely(prog)) {
1223 		skb_dst_drop(skb);
1224 		skb_bpf_redirect_clear(skb);
1225 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1226 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1227 	}
1228 	ret = sk_psock_verdict_apply(psock, skb, ret);
1229 	if (ret < 0)
1230 		len = ret;
1231 out:
1232 	rcu_read_unlock();
1233 	return len;
1234 }
1235 
sk_psock_verdict_data_ready(struct sock * sk)1236 static void sk_psock_verdict_data_ready(struct sock *sk)
1237 {
1238 	struct socket *sock = sk->sk_socket;
1239 	const struct proto_ops *ops;
1240 	int copied;
1241 
1242 	trace_sk_data_ready(sk);
1243 
1244 	if (unlikely(!sock))
1245 		return;
1246 	ops = READ_ONCE(sock->ops);
1247 	if (!ops || !ops->read_skb)
1248 		return;
1249 	copied = ops->read_skb(sk, sk_psock_verdict_recv);
1250 	if (copied >= 0) {
1251 		struct sk_psock *psock;
1252 
1253 		rcu_read_lock();
1254 		psock = sk_psock(sk);
1255 		if (psock)
1256 			sk_psock_data_ready(sk, psock);
1257 		rcu_read_unlock();
1258 	}
1259 }
1260 
sk_psock_start_verdict(struct sock * sk,struct sk_psock * psock)1261 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1262 {
1263 	if (psock->saved_data_ready)
1264 		return;
1265 
1266 	psock->saved_data_ready = sk->sk_data_ready;
1267 	sk->sk_data_ready = sk_psock_verdict_data_ready;
1268 	sk->sk_write_space = sk_psock_write_space;
1269 }
1270 
sk_psock_stop_verdict(struct sock * sk,struct sk_psock * psock)1271 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1272 {
1273 	psock_set_prog(&psock->progs.stream_verdict, NULL);
1274 	psock_set_prog(&psock->progs.skb_verdict, NULL);
1275 
1276 	if (!psock->saved_data_ready)
1277 		return;
1278 
1279 	sk->sk_data_ready = psock->saved_data_ready;
1280 	psock->saved_data_ready = NULL;
1281 }
1282