1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/skmsg.h> 5 #include <linux/filter.h> 6 #include <linux/bpf.h> 7 #include <linux/init.h> 8 #include <linux/wait.h> 9 #include <linux/util_macros.h> 10 11 #include <net/inet_common.h> 12 #include <net/tls.h> 13 14 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) 15 { 16 struct tcp_sock *tcp; 17 int copied; 18 19 if (!skb || !skb->len || !sk_is_tcp(sk)) 20 return; 21 22 if (skb_bpf_strparser(skb)) 23 return; 24 25 tcp = tcp_sk(sk); 26 copied = tcp->copied_seq + skb->len; 27 WRITE_ONCE(tcp->copied_seq, copied); 28 tcp_rcv_space_adjust(sk); 29 __tcp_cleanup_rbuf(sk, skb->len); 30 } 31 32 static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, 33 struct sk_msg *msg, u32 apply_bytes, int flags) 34 { 35 bool apply = apply_bytes; 36 struct scatterlist *sge; 37 u32 size, copied = 0; 38 struct sk_msg *tmp; 39 int i, ret = 0; 40 41 tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); 42 if (unlikely(!tmp)) 43 return -ENOMEM; 44 45 lock_sock(sk); 46 tmp->sg.start = msg->sg.start; 47 i = msg->sg.start; 48 do { 49 sge = sk_msg_elem(msg, i); 50 size = (apply && apply_bytes < sge->length) ? 51 apply_bytes : sge->length; 52 if (!sk_wmem_schedule(sk, size)) { 53 if (!copied) 54 ret = -ENOMEM; 55 break; 56 } 57 58 sk_mem_charge(sk, size); 59 sk_msg_xfer(tmp, msg, i, size); 60 copied += size; 61 if (sge->length) 62 get_page(sk_msg_page(tmp, i)); 63 sk_msg_iter_var_next(i); 64 tmp->sg.end = i; 65 if (apply) { 66 apply_bytes -= size; 67 if (!apply_bytes) { 68 if (sge->length) 69 sk_msg_iter_var_prev(i); 70 break; 71 } 72 } 73 } while (i != msg->sg.end); 74 75 if (!ret) { 76 msg->sg.start = i; 77 sk_psock_queue_msg(psock, tmp); 78 sk_psock_data_ready(sk, psock); 79 } else { 80 sk_msg_free(sk, tmp); 81 kfree(tmp); 82 } 83 84 release_sock(sk); 85 return ret; 86 } 87 88 static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, 89 int flags, bool uncharge) 90 { 91 struct msghdr msghdr = {}; 92 bool apply = apply_bytes; 93 struct scatterlist *sge; 94 struct page *page; 95 int size, ret = 0; 96 u32 off; 97 98 while (1) { 99 struct bio_vec bvec; 100 bool has_tx_ulp; 101 102 sge = sk_msg_elem(msg, msg->sg.start); 103 size = (apply && apply_bytes < sge->length) ? 104 apply_bytes : sge->length; 105 off = sge->offset; 106 page = sg_page(sge); 107 108 tcp_rate_check_app_limited(sk); 109 retry: 110 msghdr.msg_flags = flags | MSG_SPLICE_PAGES; 111 has_tx_ulp = tls_sw_has_ctx_tx(sk); 112 if (has_tx_ulp) 113 msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY; 114 115 if (size < sge->length && msg->sg.start != msg->sg.end) 116 msghdr.msg_flags |= MSG_MORE; 117 118 bvec_set_page(&bvec, page, size, off); 119 iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); 120 ret = tcp_sendmsg_locked(sk, &msghdr, size); 121 if (ret <= 0) 122 return ret; 123 124 if (apply) 125 apply_bytes -= ret; 126 msg->sg.size -= ret; 127 sge->offset += ret; 128 sge->length -= ret; 129 if (uncharge) 130 sk_mem_uncharge(sk, ret); 131 if (ret != size) { 132 size -= ret; 133 off += ret; 134 goto retry; 135 } 136 if (!sge->length) { 137 put_page(page); 138 sk_msg_iter_next(msg, start); 139 sg_init_table(sge, 1); 140 if (msg->sg.start == msg->sg.end) 141 break; 142 } 143 if (apply && !apply_bytes) 144 break; 145 } 146 147 return 0; 148 } 149 150 static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, 151 u32 apply_bytes, int flags, bool uncharge) 152 { 153 int ret; 154 155 lock_sock(sk); 156 ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); 157 release_sock(sk); 158 return ret; 159 } 160 161 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, 162 struct sk_msg *msg, u32 bytes, int flags) 163 { 164 struct sk_psock *psock = sk_psock_get(sk); 165 int ret; 166 167 if (unlikely(!psock)) 168 return -EPIPE; 169 170 ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : 171 tcp_bpf_push_locked(sk, msg, bytes, flags, false); 172 sk_psock_put(sk, psock); 173 return ret; 174 } 175 EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); 176 177 #ifdef CONFIG_BPF_SYSCALL 178 static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, 179 long timeo) 180 { 181 DEFINE_WAIT_FUNC(wait, woken_wake_function); 182 int ret = 0; 183 184 if (sk->sk_shutdown & RCV_SHUTDOWN) 185 return 1; 186 187 if (!timeo) 188 return ret; 189 190 add_wait_queue(sk_sleep(sk), &wait); 191 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 192 ret = sk_wait_event(sk, &timeo, 193 !list_empty(&psock->ingress_msg) || 194 !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait); 195 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 196 remove_wait_queue(sk_sleep(sk), &wait); 197 return ret; 198 } 199 200 static bool is_next_msg_fin(struct sk_psock *psock) 201 { 202 struct scatterlist *sge; 203 struct sk_msg *msg_rx; 204 int i; 205 206 msg_rx = sk_psock_peek_msg(psock); 207 i = msg_rx->sg.start; 208 sge = sk_msg_elem(msg_rx, i); 209 if (!sge->length) { 210 struct sk_buff *skb = msg_rx->skb; 211 212 if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 213 return true; 214 } 215 return false; 216 } 217 218 static int tcp_bpf_recvmsg_parser(struct sock *sk, 219 struct msghdr *msg, 220 size_t len, 221 int flags, 222 int *addr_len) 223 { 224 struct tcp_sock *tcp = tcp_sk(sk); 225 int peek = flags & MSG_PEEK; 226 u32 seq = tcp->copied_seq; 227 struct sk_psock *psock; 228 int copied = 0; 229 230 if (unlikely(flags & MSG_ERRQUEUE)) 231 return inet_recv_error(sk, msg, len, addr_len); 232 233 if (!len) 234 return 0; 235 236 psock = sk_psock_get(sk); 237 if (unlikely(!psock)) 238 return tcp_recvmsg(sk, msg, len, flags, addr_len); 239 240 lock_sock(sk); 241 242 /* We may have received data on the sk_receive_queue pre-accept and 243 * then we can not use read_skb in this context because we haven't 244 * assigned a sk_socket yet so have no link to the ops. The work-around 245 * is to check the sk_receive_queue and in these cases read skbs off 246 * queue again. The read_skb hook is not running at this point because 247 * of lock_sock so we avoid having multiple runners in read_skb. 248 */ 249 if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { 250 tcp_data_ready(sk); 251 /* This handles the ENOMEM errors if we both receive data 252 * pre accept and are already under memory pressure. At least 253 * let user know to retry. 254 */ 255 if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { 256 copied = -EAGAIN; 257 goto out; 258 } 259 } 260 261 msg_bytes_ready: 262 copied = sk_msg_recvmsg(sk, psock, msg, len, flags); 263 /* The typical case for EFAULT is the socket was gracefully 264 * shutdown with a FIN pkt. So check here the other case is 265 * some error on copy_page_to_iter which would be unexpected. 266 * On fin return correct return code to zero. 267 */ 268 if (copied == -EFAULT) { 269 bool is_fin = is_next_msg_fin(psock); 270 271 if (is_fin) { 272 copied = 0; 273 seq++; 274 goto out; 275 } 276 } 277 seq += copied; 278 if (!copied) { 279 long timeo; 280 int data; 281 282 if (sock_flag(sk, SOCK_DONE)) 283 goto out; 284 285 if (sk->sk_err) { 286 copied = sock_error(sk); 287 goto out; 288 } 289 290 if (sk->sk_shutdown & RCV_SHUTDOWN) 291 goto out; 292 293 if (sk->sk_state == TCP_CLOSE) { 294 copied = -ENOTCONN; 295 goto out; 296 } 297 298 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 299 if (!timeo) { 300 copied = -EAGAIN; 301 goto out; 302 } 303 304 if (signal_pending(current)) { 305 copied = sock_intr_errno(timeo); 306 goto out; 307 } 308 309 data = tcp_msg_wait_data(sk, psock, timeo); 310 if (data < 0) 311 return data; 312 if (data && !sk_psock_queue_empty(psock)) 313 goto msg_bytes_ready; 314 copied = -EAGAIN; 315 } 316 out: 317 if (!peek) 318 WRITE_ONCE(tcp->copied_seq, seq); 319 tcp_rcv_space_adjust(sk); 320 if (copied > 0) 321 __tcp_cleanup_rbuf(sk, copied); 322 release_sock(sk); 323 sk_psock_put(sk, psock); 324 return copied; 325 } 326 327 static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 328 int flags, int *addr_len) 329 { 330 struct sk_psock *psock; 331 int copied, ret; 332 333 if (unlikely(flags & MSG_ERRQUEUE)) 334 return inet_recv_error(sk, msg, len, addr_len); 335 336 if (!len) 337 return 0; 338 339 psock = sk_psock_get(sk); 340 if (unlikely(!psock)) 341 return tcp_recvmsg(sk, msg, len, flags, addr_len); 342 if (!skb_queue_empty(&sk->sk_receive_queue) && 343 sk_psock_queue_empty(psock)) { 344 sk_psock_put(sk, psock); 345 return tcp_recvmsg(sk, msg, len, flags, addr_len); 346 } 347 lock_sock(sk); 348 msg_bytes_ready: 349 copied = sk_msg_recvmsg(sk, psock, msg, len, flags); 350 if (!copied) { 351 long timeo; 352 int data; 353 354 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 355 data = tcp_msg_wait_data(sk, psock, timeo); 356 if (data < 0) 357 return data; 358 if (data) { 359 if (!sk_psock_queue_empty(psock)) 360 goto msg_bytes_ready; 361 release_sock(sk); 362 sk_psock_put(sk, psock); 363 return tcp_recvmsg(sk, msg, len, flags, addr_len); 364 } 365 copied = -EAGAIN; 366 } 367 ret = copied; 368 release_sock(sk); 369 sk_psock_put(sk, psock); 370 return ret; 371 } 372 373 static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, 374 struct sk_msg *msg, int *copied, int flags) 375 { 376 bool cork = false, enospc = sk_msg_full(msg), redir_ingress; 377 struct sock *sk_redir; 378 u32 tosend, origsize, sent, delta = 0; 379 u32 eval; 380 int ret; 381 382 more_data: 383 if (psock->eval == __SK_NONE) { 384 /* Track delta in msg size to add/subtract it on SK_DROP from 385 * returned to user copied size. This ensures user doesn't 386 * get a positive return code with msg_cut_data and SK_DROP 387 * verdict. 388 */ 389 delta = msg->sg.size; 390 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 391 delta -= msg->sg.size; 392 } 393 394 if (msg->cork_bytes && 395 msg->cork_bytes > msg->sg.size && !enospc) { 396 psock->cork_bytes = msg->cork_bytes - msg->sg.size; 397 if (!psock->cork) { 398 psock->cork = kzalloc(sizeof(*psock->cork), 399 GFP_ATOMIC | __GFP_NOWARN); 400 if (!psock->cork) 401 return -ENOMEM; 402 } 403 memcpy(psock->cork, msg, sizeof(*msg)); 404 return 0; 405 } 406 407 tosend = msg->sg.size; 408 if (psock->apply_bytes && psock->apply_bytes < tosend) 409 tosend = psock->apply_bytes; 410 eval = __SK_NONE; 411 412 switch (psock->eval) { 413 case __SK_PASS: 414 ret = tcp_bpf_push(sk, msg, tosend, flags, true); 415 if (unlikely(ret)) { 416 *copied -= sk_msg_free(sk, msg); 417 break; 418 } 419 sk_msg_apply_bytes(psock, tosend); 420 break; 421 case __SK_REDIRECT: 422 redir_ingress = psock->redir_ingress; 423 sk_redir = psock->sk_redir; 424 sk_msg_apply_bytes(psock, tosend); 425 if (!psock->apply_bytes) { 426 /* Clean up before releasing the sock lock. */ 427 eval = psock->eval; 428 psock->eval = __SK_NONE; 429 psock->sk_redir = NULL; 430 } 431 if (psock->cork) { 432 cork = true; 433 psock->cork = NULL; 434 } 435 sk_msg_return(sk, msg, tosend); 436 release_sock(sk); 437 438 origsize = msg->sg.size; 439 ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, 440 msg, tosend, flags); 441 sent = origsize - msg->sg.size; 442 443 if (eval == __SK_REDIRECT) 444 sock_put(sk_redir); 445 446 lock_sock(sk); 447 if (unlikely(ret < 0)) { 448 int free = sk_msg_free_nocharge(sk, msg); 449 450 if (!cork) 451 *copied -= free; 452 } 453 if (cork) { 454 sk_msg_free(sk, msg); 455 kfree(msg); 456 msg = NULL; 457 ret = 0; 458 } 459 break; 460 case __SK_DROP: 461 default: 462 sk_msg_free_partial(sk, msg, tosend); 463 sk_msg_apply_bytes(psock, tosend); 464 *copied -= (tosend + delta); 465 return -EACCES; 466 } 467 468 if (likely(!ret)) { 469 if (!psock->apply_bytes) { 470 psock->eval = __SK_NONE; 471 if (psock->sk_redir) { 472 sock_put(psock->sk_redir); 473 psock->sk_redir = NULL; 474 } 475 } 476 if (msg && 477 msg->sg.data[msg->sg.start].page_link && 478 msg->sg.data[msg->sg.start].length) { 479 if (eval == __SK_REDIRECT) 480 sk_mem_charge(sk, tosend - sent); 481 goto more_data; 482 } 483 } 484 return ret; 485 } 486 487 static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 488 { 489 struct sk_msg tmp, *msg_tx = NULL; 490 int copied = 0, err = 0; 491 struct sk_psock *psock; 492 long timeo; 493 int flags; 494 495 /* Don't let internal flags through */ 496 flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); 497 flags |= MSG_NO_SHARED_FRAGS; 498 499 psock = sk_psock_get(sk); 500 if (unlikely(!psock)) 501 return tcp_sendmsg(sk, msg, size); 502 503 lock_sock(sk); 504 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 505 while (msg_data_left(msg)) { 506 bool enospc = false; 507 u32 copy, osize; 508 509 if (sk->sk_err) { 510 err = -sk->sk_err; 511 goto out_err; 512 } 513 514 copy = msg_data_left(msg); 515 if (!sk_stream_memory_free(sk)) 516 goto wait_for_sndbuf; 517 if (psock->cork) { 518 msg_tx = psock->cork; 519 } else { 520 msg_tx = &tmp; 521 sk_msg_init(msg_tx); 522 } 523 524 osize = msg_tx->sg.size; 525 err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); 526 if (err) { 527 if (err != -ENOSPC) 528 goto wait_for_memory; 529 enospc = true; 530 copy = msg_tx->sg.size - osize; 531 } 532 533 err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, 534 copy); 535 if (err < 0) { 536 sk_msg_trim(sk, msg_tx, osize); 537 goto out_err; 538 } 539 540 copied += copy; 541 if (psock->cork_bytes) { 542 if (size > psock->cork_bytes) 543 psock->cork_bytes = 0; 544 else 545 psock->cork_bytes -= size; 546 if (psock->cork_bytes && !enospc) 547 goto out_err; 548 /* All cork bytes are accounted, rerun the prog. */ 549 psock->eval = __SK_NONE; 550 psock->cork_bytes = 0; 551 } 552 553 err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); 554 if (unlikely(err < 0)) 555 goto out_err; 556 continue; 557 wait_for_sndbuf: 558 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 559 wait_for_memory: 560 err = sk_stream_wait_memory(sk, &timeo); 561 if (err) { 562 if (msg_tx && msg_tx != psock->cork) 563 sk_msg_free(sk, msg_tx); 564 goto out_err; 565 } 566 } 567 out_err: 568 if (err < 0) 569 err = sk_stream_error(sk, msg->msg_flags, err); 570 release_sock(sk); 571 sk_psock_put(sk, psock); 572 return copied ? copied : err; 573 } 574 575 enum { 576 TCP_BPF_IPV4, 577 TCP_BPF_IPV6, 578 TCP_BPF_NUM_PROTS, 579 }; 580 581 enum { 582 TCP_BPF_BASE, 583 TCP_BPF_TX, 584 TCP_BPF_RX, 585 TCP_BPF_TXRX, 586 TCP_BPF_NUM_CFGS, 587 }; 588 589 static struct proto *tcpv6_prot_saved __read_mostly; 590 static DEFINE_SPINLOCK(tcpv6_prot_lock); 591 static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; 592 593 static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], 594 struct proto *base) 595 { 596 prot[TCP_BPF_BASE] = *base; 597 prot[TCP_BPF_BASE].destroy = sock_map_destroy; 598 prot[TCP_BPF_BASE].close = sock_map_close; 599 prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; 600 prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable; 601 602 prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; 603 prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; 604 605 prot[TCP_BPF_RX] = prot[TCP_BPF_BASE]; 606 prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser; 607 608 prot[TCP_BPF_TXRX] = prot[TCP_BPF_TX]; 609 prot[TCP_BPF_TXRX].recvmsg = tcp_bpf_recvmsg_parser; 610 } 611 612 static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) 613 { 614 if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { 615 spin_lock_bh(&tcpv6_prot_lock); 616 if (likely(ops != tcpv6_prot_saved)) { 617 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); 618 smp_store_release(&tcpv6_prot_saved, ops); 619 } 620 spin_unlock_bh(&tcpv6_prot_lock); 621 } 622 } 623 624 static int __init tcp_bpf_v4_build_proto(void) 625 { 626 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); 627 return 0; 628 } 629 late_initcall(tcp_bpf_v4_build_proto); 630 631 static int tcp_bpf_assert_proto_ops(struct proto *ops) 632 { 633 /* In order to avoid retpoline, we make assumptions when we call 634 * into ops if e.g. a psock is not present. Make sure they are 635 * indeed valid assumptions. 636 */ 637 return ops->recvmsg == tcp_recvmsg && 638 ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP; 639 } 640 641 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) 642 { 643 int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; 644 int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; 645 646 if (psock->progs.stream_verdict || psock->progs.skb_verdict) { 647 config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX; 648 } 649 650 if (restore) { 651 if (inet_csk_has_ulp(sk)) { 652 /* TLS does not have an unhash proto in SW cases, 653 * but we need to ensure we stop using the sock_map 654 * unhash routine because the associated psock is being 655 * removed. So use the original unhash handler. 656 */ 657 WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); 658 tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); 659 } else { 660 sk->sk_write_space = psock->saved_write_space; 661 /* Pairs with lockless read in sk_clone_lock() */ 662 sock_replace_proto(sk, psock->sk_proto); 663 } 664 return 0; 665 } 666 667 if (sk->sk_family == AF_INET6) { 668 if (tcp_bpf_assert_proto_ops(psock->sk_proto)) 669 return -EINVAL; 670 671 tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); 672 } 673 674 /* Pairs with lockless read in sk_clone_lock() */ 675 sock_replace_proto(sk, &tcp_bpf_prots[family][config]); 676 return 0; 677 } 678 EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); 679 680 /* If a child got cloned from a listening socket that had tcp_bpf 681 * protocol callbacks installed, we need to restore the callbacks to 682 * the default ones because the child does not inherit the psock state 683 * that tcp_bpf callbacks expect. 684 */ 685 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) 686 { 687 struct proto *prot = newsk->sk_prot; 688 689 if (is_insidevar(prot, tcp_bpf_prots)) 690 newsk->sk_prot = sk->sk_prot_creator; 691 } 692 #endif /* CONFIG_BPF_SYSCALL */ 693