1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/skmsg.h> 5 #include <linux/filter.h> 6 #include <linux/bpf.h> 7 #include <linux/init.h> 8 #include <linux/wait.h> 9 10 #include <net/inet_common.h> 11 #include <net/tls.h> 12 13 static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, 14 struct sk_msg *msg, u32 apply_bytes, int flags) 15 { 16 bool apply = apply_bytes; 17 struct scatterlist *sge; 18 u32 size, copied = 0; 19 struct sk_msg *tmp; 20 int i, ret = 0; 21 22 tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); 23 if (unlikely(!tmp)) 24 return -ENOMEM; 25 26 lock_sock(sk); 27 tmp->sg.start = msg->sg.start; 28 i = msg->sg.start; 29 do { 30 sge = sk_msg_elem(msg, i); 31 size = (apply && apply_bytes < sge->length) ? 32 apply_bytes : sge->length; 33 if (!sk_wmem_schedule(sk, size)) { 34 if (!copied) 35 ret = -ENOMEM; 36 break; 37 } 38 39 sk_mem_charge(sk, size); 40 sk_msg_xfer(tmp, msg, i, size); 41 copied += size; 42 if (sge->length) 43 get_page(sk_msg_page(tmp, i)); 44 sk_msg_iter_var_next(i); 45 tmp->sg.end = i; 46 if (apply) { 47 apply_bytes -= size; 48 if (!apply_bytes) 49 break; 50 } 51 } while (i != msg->sg.end); 52 53 if (!ret) { 54 msg->sg.start = i; 55 sk_psock_queue_msg(psock, tmp); 56 sk_psock_data_ready(sk, psock); 57 } else { 58 sk_msg_free(sk, tmp); 59 kfree(tmp); 60 } 61 62 release_sock(sk); 63 return ret; 64 } 65 66 static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, 67 int flags, bool uncharge) 68 { 69 bool apply = apply_bytes; 70 struct scatterlist *sge; 71 struct page *page; 72 int size, ret = 0; 73 u32 off; 74 75 while (1) { 76 bool has_tx_ulp; 77 78 sge = sk_msg_elem(msg, msg->sg.start); 79 size = (apply && apply_bytes < sge->length) ? 80 apply_bytes : sge->length; 81 off = sge->offset; 82 page = sg_page(sge); 83 84 tcp_rate_check_app_limited(sk); 85 retry: 86 has_tx_ulp = tls_sw_has_ctx_tx(sk); 87 if (has_tx_ulp) { 88 flags |= MSG_SENDPAGE_NOPOLICY; 89 ret = kernel_sendpage_locked(sk, 90 page, off, size, flags); 91 } else { 92 ret = do_tcp_sendpages(sk, page, off, size, flags); 93 } 94 95 if (ret <= 0) 96 return ret; 97 if (apply) 98 apply_bytes -= ret; 99 msg->sg.size -= ret; 100 sge->offset += ret; 101 sge->length -= ret; 102 if (uncharge) 103 sk_mem_uncharge(sk, ret); 104 if (ret != size) { 105 size -= ret; 106 off += ret; 107 goto retry; 108 } 109 if (!sge->length) { 110 put_page(page); 111 sk_msg_iter_next(msg, start); 112 sg_init_table(sge, 1); 113 if (msg->sg.start == msg->sg.end) 114 break; 115 } 116 if (apply && !apply_bytes) 117 break; 118 } 119 120 return 0; 121 } 122 123 static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, 124 u32 apply_bytes, int flags, bool uncharge) 125 { 126 int ret; 127 128 lock_sock(sk); 129 ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); 130 release_sock(sk); 131 return ret; 132 } 133 134 int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, 135 u32 bytes, int flags) 136 { 137 bool ingress = sk_msg_to_ingress(msg); 138 struct sk_psock *psock = sk_psock_get(sk); 139 int ret; 140 141 if (unlikely(!psock)) { 142 sk_msg_free(sk, msg); 143 return 0; 144 } 145 ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : 146 tcp_bpf_push_locked(sk, msg, bytes, flags, false); 147 sk_psock_put(sk, psock); 148 return ret; 149 } 150 EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); 151 152 #ifdef CONFIG_BPF_SYSCALL 153 static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, 154 long timeo) 155 { 156 DEFINE_WAIT_FUNC(wait, woken_wake_function); 157 int ret = 0; 158 159 if (sk->sk_shutdown & RCV_SHUTDOWN) 160 return 1; 161 162 if (!timeo) 163 return ret; 164 165 add_wait_queue(sk_sleep(sk), &wait); 166 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 167 ret = sk_wait_event(sk, &timeo, 168 !list_empty(&psock->ingress_msg) || 169 !skb_queue_empty(&sk->sk_receive_queue), &wait); 170 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 171 remove_wait_queue(sk_sleep(sk), &wait); 172 return ret; 173 } 174 175 static int tcp_bpf_recvmsg_parser(struct sock *sk, 176 struct msghdr *msg, 177 size_t len, 178 int nonblock, 179 int flags, 180 int *addr_len) 181 { 182 struct sk_psock *psock; 183 int copied; 184 185 if (unlikely(flags & MSG_ERRQUEUE)) 186 return inet_recv_error(sk, msg, len, addr_len); 187 188 psock = sk_psock_get(sk); 189 if (unlikely(!psock)) 190 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 191 192 lock_sock(sk); 193 msg_bytes_ready: 194 copied = sk_msg_recvmsg(sk, psock, msg, len, flags); 195 if (!copied) { 196 long timeo; 197 int data; 198 199 if (sock_flag(sk, SOCK_DONE)) 200 goto out; 201 202 if (sk->sk_err) { 203 copied = sock_error(sk); 204 goto out; 205 } 206 207 if (sk->sk_shutdown & RCV_SHUTDOWN) 208 goto out; 209 210 if (sk->sk_state == TCP_CLOSE) { 211 copied = -ENOTCONN; 212 goto out; 213 } 214 215 timeo = sock_rcvtimeo(sk, nonblock); 216 if (!timeo) { 217 copied = -EAGAIN; 218 goto out; 219 } 220 221 if (signal_pending(current)) { 222 copied = sock_intr_errno(timeo); 223 goto out; 224 } 225 226 data = tcp_msg_wait_data(sk, psock, timeo); 227 if (data && !sk_psock_queue_empty(psock)) 228 goto msg_bytes_ready; 229 copied = -EAGAIN; 230 } 231 out: 232 release_sock(sk); 233 sk_psock_put(sk, psock); 234 return copied; 235 } 236 237 static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 238 int nonblock, int flags, int *addr_len) 239 { 240 struct sk_psock *psock; 241 int copied, ret; 242 243 if (unlikely(flags & MSG_ERRQUEUE)) 244 return inet_recv_error(sk, msg, len, addr_len); 245 246 psock = sk_psock_get(sk); 247 if (unlikely(!psock)) 248 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 249 if (!skb_queue_empty(&sk->sk_receive_queue) && 250 sk_psock_queue_empty(psock)) { 251 sk_psock_put(sk, psock); 252 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 253 } 254 lock_sock(sk); 255 msg_bytes_ready: 256 copied = sk_msg_recvmsg(sk, psock, msg, len, flags); 257 if (!copied) { 258 long timeo; 259 int data; 260 261 timeo = sock_rcvtimeo(sk, nonblock); 262 data = tcp_msg_wait_data(sk, psock, timeo); 263 if (data) { 264 if (!sk_psock_queue_empty(psock)) 265 goto msg_bytes_ready; 266 release_sock(sk); 267 sk_psock_put(sk, psock); 268 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 269 } 270 copied = -EAGAIN; 271 } 272 ret = copied; 273 release_sock(sk); 274 sk_psock_put(sk, psock); 275 return ret; 276 } 277 278 static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, 279 struct sk_msg *msg, int *copied, int flags) 280 { 281 bool cork = false, enospc = sk_msg_full(msg); 282 struct sock *sk_redir; 283 u32 tosend, delta = 0; 284 u32 eval = __SK_NONE; 285 int ret; 286 287 more_data: 288 if (psock->eval == __SK_NONE) { 289 /* Track delta in msg size to add/subtract it on SK_DROP from 290 * returned to user copied size. This ensures user doesn't 291 * get a positive return code with msg_cut_data and SK_DROP 292 * verdict. 293 */ 294 delta = msg->sg.size; 295 psock->eval = sk_psock_msg_verdict(sk, psock, msg); 296 delta -= msg->sg.size; 297 } 298 299 if (msg->cork_bytes && 300 msg->cork_bytes > msg->sg.size && !enospc) { 301 psock->cork_bytes = msg->cork_bytes - msg->sg.size; 302 if (!psock->cork) { 303 psock->cork = kzalloc(sizeof(*psock->cork), 304 GFP_ATOMIC | __GFP_NOWARN); 305 if (!psock->cork) 306 return -ENOMEM; 307 } 308 memcpy(psock->cork, msg, sizeof(*msg)); 309 return 0; 310 } 311 312 tosend = msg->sg.size; 313 if (psock->apply_bytes && psock->apply_bytes < tosend) 314 tosend = psock->apply_bytes; 315 316 switch (psock->eval) { 317 case __SK_PASS: 318 ret = tcp_bpf_push(sk, msg, tosend, flags, true); 319 if (unlikely(ret)) { 320 *copied -= sk_msg_free(sk, msg); 321 break; 322 } 323 sk_msg_apply_bytes(psock, tosend); 324 break; 325 case __SK_REDIRECT: 326 sk_redir = psock->sk_redir; 327 sk_msg_apply_bytes(psock, tosend); 328 if (!psock->apply_bytes) { 329 /* Clean up before releasing the sock lock. */ 330 eval = psock->eval; 331 psock->eval = __SK_NONE; 332 psock->sk_redir = NULL; 333 } 334 if (psock->cork) { 335 cork = true; 336 psock->cork = NULL; 337 } 338 sk_msg_return(sk, msg, msg->sg.size); 339 release_sock(sk); 340 341 ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); 342 343 if (eval == __SK_REDIRECT) 344 sock_put(sk_redir); 345 346 lock_sock(sk); 347 if (unlikely(ret < 0)) { 348 int free = sk_msg_free_nocharge(sk, msg); 349 350 if (!cork) 351 *copied -= free; 352 } 353 if (cork) { 354 sk_msg_free(sk, msg); 355 kfree(msg); 356 msg = NULL; 357 ret = 0; 358 } 359 break; 360 case __SK_DROP: 361 default: 362 sk_msg_free_partial(sk, msg, tosend); 363 sk_msg_apply_bytes(psock, tosend); 364 *copied -= (tosend + delta); 365 return -EACCES; 366 } 367 368 if (likely(!ret)) { 369 if (!psock->apply_bytes) { 370 psock->eval = __SK_NONE; 371 if (psock->sk_redir) { 372 sock_put(psock->sk_redir); 373 psock->sk_redir = NULL; 374 } 375 } 376 if (msg && 377 msg->sg.data[msg->sg.start].page_link && 378 msg->sg.data[msg->sg.start].length) { 379 if (eval == __SK_REDIRECT) 380 sk_mem_charge(sk, msg->sg.size); 381 goto more_data; 382 } 383 } 384 return ret; 385 } 386 387 static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 388 { 389 struct sk_msg tmp, *msg_tx = NULL; 390 int copied = 0, err = 0; 391 struct sk_psock *psock; 392 long timeo; 393 int flags; 394 395 /* Don't let internal do_tcp_sendpages() flags through */ 396 flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); 397 flags |= MSG_NO_SHARED_FRAGS; 398 399 psock = sk_psock_get(sk); 400 if (unlikely(!psock)) 401 return tcp_sendmsg(sk, msg, size); 402 403 lock_sock(sk); 404 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 405 while (msg_data_left(msg)) { 406 bool enospc = false; 407 u32 copy, osize; 408 409 if (sk->sk_err) { 410 err = -sk->sk_err; 411 goto out_err; 412 } 413 414 copy = msg_data_left(msg); 415 if (!sk_stream_memory_free(sk)) 416 goto wait_for_sndbuf; 417 if (psock->cork) { 418 msg_tx = psock->cork; 419 } else { 420 msg_tx = &tmp; 421 sk_msg_init(msg_tx); 422 } 423 424 osize = msg_tx->sg.size; 425 err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); 426 if (err) { 427 if (err != -ENOSPC) 428 goto wait_for_memory; 429 enospc = true; 430 copy = msg_tx->sg.size - osize; 431 } 432 433 err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, 434 copy); 435 if (err < 0) { 436 sk_msg_trim(sk, msg_tx, osize); 437 goto out_err; 438 } 439 440 copied += copy; 441 if (psock->cork_bytes) { 442 if (size > psock->cork_bytes) 443 psock->cork_bytes = 0; 444 else 445 psock->cork_bytes -= size; 446 if (psock->cork_bytes && !enospc) 447 goto out_err; 448 /* All cork bytes are accounted, rerun the prog. */ 449 psock->eval = __SK_NONE; 450 psock->cork_bytes = 0; 451 } 452 453 err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); 454 if (unlikely(err < 0)) 455 goto out_err; 456 continue; 457 wait_for_sndbuf: 458 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 459 wait_for_memory: 460 err = sk_stream_wait_memory(sk, &timeo); 461 if (err) { 462 if (msg_tx && msg_tx != psock->cork) 463 sk_msg_free(sk, msg_tx); 464 goto out_err; 465 } 466 } 467 out_err: 468 if (err < 0) 469 err = sk_stream_error(sk, msg->msg_flags, err); 470 release_sock(sk); 471 sk_psock_put(sk, psock); 472 return copied ? copied : err; 473 } 474 475 static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset, 476 size_t size, int flags) 477 { 478 struct sk_msg tmp, *msg = NULL; 479 int err = 0, copied = 0; 480 struct sk_psock *psock; 481 bool enospc = false; 482 483 psock = sk_psock_get(sk); 484 if (unlikely(!psock)) 485 return tcp_sendpage(sk, page, offset, size, flags); 486 487 lock_sock(sk); 488 if (psock->cork) { 489 msg = psock->cork; 490 } else { 491 msg = &tmp; 492 sk_msg_init(msg); 493 } 494 495 /* Catch case where ring is full and sendpage is stalled. */ 496 if (unlikely(sk_msg_full(msg))) 497 goto out_err; 498 499 sk_msg_page_add(msg, page, size, offset); 500 sk_mem_charge(sk, size); 501 copied = size; 502 if (sk_msg_full(msg)) 503 enospc = true; 504 if (psock->cork_bytes) { 505 if (size > psock->cork_bytes) 506 psock->cork_bytes = 0; 507 else 508 psock->cork_bytes -= size; 509 if (psock->cork_bytes && !enospc) 510 goto out_err; 511 /* All cork bytes are accounted, rerun the prog. */ 512 psock->eval = __SK_NONE; 513 psock->cork_bytes = 0; 514 } 515 516 err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags); 517 out_err: 518 release_sock(sk); 519 sk_psock_put(sk, psock); 520 return copied ? copied : err; 521 } 522 523 enum { 524 TCP_BPF_IPV4, 525 TCP_BPF_IPV6, 526 TCP_BPF_NUM_PROTS, 527 }; 528 529 enum { 530 TCP_BPF_BASE, 531 TCP_BPF_TX, 532 TCP_BPF_RX, 533 TCP_BPF_TXRX, 534 TCP_BPF_NUM_CFGS, 535 }; 536 537 static struct proto *tcpv6_prot_saved __read_mostly; 538 static DEFINE_SPINLOCK(tcpv6_prot_lock); 539 static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; 540 541 static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], 542 struct proto *base) 543 { 544 prot[TCP_BPF_BASE] = *base; 545 prot[TCP_BPF_BASE].close = sock_map_close; 546 prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; 547 prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable; 548 549 prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; 550 prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; 551 prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage; 552 553 prot[TCP_BPF_RX] = prot[TCP_BPF_BASE]; 554 prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser; 555 556 prot[TCP_BPF_TXRX] = prot[TCP_BPF_TX]; 557 prot[TCP_BPF_TXRX].recvmsg = tcp_bpf_recvmsg_parser; 558 } 559 560 static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) 561 { 562 if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { 563 spin_lock_bh(&tcpv6_prot_lock); 564 if (likely(ops != tcpv6_prot_saved)) { 565 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); 566 smp_store_release(&tcpv6_prot_saved, ops); 567 } 568 spin_unlock_bh(&tcpv6_prot_lock); 569 } 570 } 571 572 static int __init tcp_bpf_v4_build_proto(void) 573 { 574 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); 575 return 0; 576 } 577 late_initcall(tcp_bpf_v4_build_proto); 578 579 static int tcp_bpf_assert_proto_ops(struct proto *ops) 580 { 581 /* In order to avoid retpoline, we make assumptions when we call 582 * into ops if e.g. a psock is not present. Make sure they are 583 * indeed valid assumptions. 584 */ 585 return ops->recvmsg == tcp_recvmsg && 586 ops->sendmsg == tcp_sendmsg && 587 ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP; 588 } 589 590 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) 591 { 592 int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; 593 int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; 594 595 if (psock->progs.stream_verdict || psock->progs.skb_verdict) { 596 config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX; 597 } 598 599 if (restore) { 600 if (inet_csk_has_ulp(sk)) { 601 /* TLS does not have an unhash proto in SW cases, 602 * but we need to ensure we stop using the sock_map 603 * unhash routine because the associated psock is being 604 * removed. So use the original unhash handler. 605 */ 606 WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); 607 tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); 608 } else { 609 sk->sk_write_space = psock->saved_write_space; 610 /* Pairs with lockless read in sk_clone_lock() */ 611 WRITE_ONCE(sk->sk_prot, psock->sk_proto); 612 } 613 return 0; 614 } 615 616 if (inet_csk_has_ulp(sk)) 617 return -EINVAL; 618 619 if (sk->sk_family == AF_INET6) { 620 if (tcp_bpf_assert_proto_ops(psock->sk_proto)) 621 return -EINVAL; 622 623 tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); 624 } 625 626 /* Pairs with lockless read in sk_clone_lock() */ 627 WRITE_ONCE(sk->sk_prot, &tcp_bpf_prots[family][config]); 628 return 0; 629 } 630 EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); 631 632 /* If a child got cloned from a listening socket that had tcp_bpf 633 * protocol callbacks installed, we need to restore the callbacks to 634 * the default ones because the child does not inherit the psock state 635 * that tcp_bpf callbacks expect. 636 */ 637 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) 638 { 639 int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; 640 struct proto *prot = newsk->sk_prot; 641 642 if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE]) 643 newsk->sk_prot = sk->sk_prot_creator; 644 } 645 #endif /* CONFIG_BPF_SYSCALL */ 646