1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/bpf.h> 5 #include <linux/btf_ids.h> 6 #include <linux/filter.h> 7 #include <linux/errno.h> 8 #include <linux/file.h> 9 #include <linux/net.h> 10 #include <linux/workqueue.h> 11 #include <linux/skmsg.h> 12 #include <linux/list.h> 13 #include <linux/jhash.h> 14 #include <linux/sock_diag.h> 15 #include <net/udp.h> 16 17 struct bpf_stab { 18 struct bpf_map map; 19 struct sock **sks; 20 struct sk_psock_progs progs; 21 raw_spinlock_t lock; 22 }; 23 24 #define SOCK_CREATE_FLAG_MASK \ 25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 26 27 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 28 struct bpf_prog *old, u32 which); 29 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map); 30 31 static struct bpf_map *sock_map_alloc(union bpf_attr *attr) 32 { 33 struct bpf_stab *stab; 34 35 if (!capable(CAP_NET_ADMIN)) 36 return ERR_PTR(-EPERM); 37 if (attr->max_entries == 0 || 38 attr->key_size != 4 || 39 (attr->value_size != sizeof(u32) && 40 attr->value_size != sizeof(u64)) || 41 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 42 return ERR_PTR(-EINVAL); 43 44 stab = kzalloc(sizeof(*stab), GFP_USER | __GFP_ACCOUNT); 45 if (!stab) 46 return ERR_PTR(-ENOMEM); 47 48 bpf_map_init_from_attr(&stab->map, attr); 49 raw_spin_lock_init(&stab->lock); 50 51 stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries * 52 sizeof(struct sock *), 53 stab->map.numa_node); 54 if (!stab->sks) { 55 kfree(stab); 56 return ERR_PTR(-ENOMEM); 57 } 58 59 return &stab->map; 60 } 61 62 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) 63 { 64 u32 ufd = attr->target_fd; 65 struct bpf_map *map; 66 struct fd f; 67 int ret; 68 69 if (attr->attach_flags || attr->replace_bpf_fd) 70 return -EINVAL; 71 72 f = fdget(ufd); 73 map = __bpf_map_get(f); 74 if (IS_ERR(map)) 75 return PTR_ERR(map); 76 ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); 77 fdput(f); 78 return ret; 79 } 80 81 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 82 { 83 u32 ufd = attr->target_fd; 84 struct bpf_prog *prog; 85 struct bpf_map *map; 86 struct fd f; 87 int ret; 88 89 if (attr->attach_flags || attr->replace_bpf_fd) 90 return -EINVAL; 91 92 f = fdget(ufd); 93 map = __bpf_map_get(f); 94 if (IS_ERR(map)) 95 return PTR_ERR(map); 96 97 prog = bpf_prog_get(attr->attach_bpf_fd); 98 if (IS_ERR(prog)) { 99 ret = PTR_ERR(prog); 100 goto put_map; 101 } 102 103 if (prog->type != ptype) { 104 ret = -EINVAL; 105 goto put_prog; 106 } 107 108 ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); 109 put_prog: 110 bpf_prog_put(prog); 111 put_map: 112 fdput(f); 113 return ret; 114 } 115 116 static void sock_map_sk_acquire(struct sock *sk) 117 __acquires(&sk->sk_lock.slock) 118 { 119 lock_sock(sk); 120 preempt_disable(); 121 rcu_read_lock(); 122 } 123 124 static void sock_map_sk_release(struct sock *sk) 125 __releases(&sk->sk_lock.slock) 126 { 127 rcu_read_unlock(); 128 preempt_enable(); 129 release_sock(sk); 130 } 131 132 static void sock_map_add_link(struct sk_psock *psock, 133 struct sk_psock_link *link, 134 struct bpf_map *map, void *link_raw) 135 { 136 link->link_raw = link_raw; 137 link->map = map; 138 spin_lock_bh(&psock->link_lock); 139 list_add_tail(&link->list, &psock->link); 140 spin_unlock_bh(&psock->link_lock); 141 } 142 143 static void sock_map_del_link(struct sock *sk, 144 struct sk_psock *psock, void *link_raw) 145 { 146 bool strp_stop = false, verdict_stop = false; 147 struct sk_psock_link *link, *tmp; 148 149 spin_lock_bh(&psock->link_lock); 150 list_for_each_entry_safe(link, tmp, &psock->link, list) { 151 if (link->link_raw == link_raw) { 152 struct bpf_map *map = link->map; 153 struct bpf_stab *stab = container_of(map, struct bpf_stab, 154 map); 155 if (psock->saved_data_ready && stab->progs.stream_parser) 156 strp_stop = true; 157 if (psock->saved_data_ready && stab->progs.stream_verdict) 158 verdict_stop = true; 159 if (psock->saved_data_ready && stab->progs.skb_verdict) 160 verdict_stop = true; 161 list_del(&link->list); 162 sk_psock_free_link(link); 163 } 164 } 165 spin_unlock_bh(&psock->link_lock); 166 if (strp_stop || verdict_stop) { 167 write_lock_bh(&sk->sk_callback_lock); 168 if (strp_stop) 169 sk_psock_stop_strp(sk, psock); 170 if (verdict_stop) 171 sk_psock_stop_verdict(sk, psock); 172 173 if (psock->psock_update_sk_prot) 174 psock->psock_update_sk_prot(sk, psock, false); 175 write_unlock_bh(&sk->sk_callback_lock); 176 } 177 } 178 179 static void sock_map_unref(struct sock *sk, void *link_raw) 180 { 181 struct sk_psock *psock = sk_psock(sk); 182 183 if (likely(psock)) { 184 sock_map_del_link(sk, psock, link_raw); 185 sk_psock_put(sk, psock); 186 } 187 } 188 189 static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) 190 { 191 if (!sk->sk_prot->psock_update_sk_prot) 192 return -EINVAL; 193 psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot; 194 return sk->sk_prot->psock_update_sk_prot(sk, psock, false); 195 } 196 197 static struct sk_psock *sock_map_psock_get_checked(struct sock *sk) 198 { 199 struct sk_psock *psock; 200 201 rcu_read_lock(); 202 psock = sk_psock(sk); 203 if (psock) { 204 if (sk->sk_prot->close != sock_map_close) { 205 psock = ERR_PTR(-EBUSY); 206 goto out; 207 } 208 209 if (!refcount_inc_not_zero(&psock->refcnt)) 210 psock = ERR_PTR(-EBUSY); 211 } 212 out: 213 rcu_read_unlock(); 214 return psock; 215 } 216 217 static int sock_map_link(struct bpf_map *map, struct sock *sk) 218 { 219 struct sk_psock_progs *progs = sock_map_progs(map); 220 struct bpf_prog *stream_verdict = NULL; 221 struct bpf_prog *stream_parser = NULL; 222 struct bpf_prog *skb_verdict = NULL; 223 struct bpf_prog *msg_parser = NULL; 224 struct sk_psock *psock; 225 int ret; 226 227 stream_verdict = READ_ONCE(progs->stream_verdict); 228 if (stream_verdict) { 229 stream_verdict = bpf_prog_inc_not_zero(stream_verdict); 230 if (IS_ERR(stream_verdict)) 231 return PTR_ERR(stream_verdict); 232 } 233 234 stream_parser = READ_ONCE(progs->stream_parser); 235 if (stream_parser) { 236 stream_parser = bpf_prog_inc_not_zero(stream_parser); 237 if (IS_ERR(stream_parser)) { 238 ret = PTR_ERR(stream_parser); 239 goto out_put_stream_verdict; 240 } 241 } 242 243 msg_parser = READ_ONCE(progs->msg_parser); 244 if (msg_parser) { 245 msg_parser = bpf_prog_inc_not_zero(msg_parser); 246 if (IS_ERR(msg_parser)) { 247 ret = PTR_ERR(msg_parser); 248 goto out_put_stream_parser; 249 } 250 } 251 252 skb_verdict = READ_ONCE(progs->skb_verdict); 253 if (skb_verdict) { 254 skb_verdict = bpf_prog_inc_not_zero(skb_verdict); 255 if (IS_ERR(skb_verdict)) { 256 ret = PTR_ERR(skb_verdict); 257 goto out_put_msg_parser; 258 } 259 } 260 261 psock = sock_map_psock_get_checked(sk); 262 if (IS_ERR(psock)) { 263 ret = PTR_ERR(psock); 264 goto out_progs; 265 } 266 267 if (psock) { 268 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) || 269 (stream_parser && READ_ONCE(psock->progs.stream_parser)) || 270 (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) || 271 (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) || 272 (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) || 273 (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) { 274 sk_psock_put(sk, psock); 275 ret = -EBUSY; 276 goto out_progs; 277 } 278 } else { 279 psock = sk_psock_init(sk, map->numa_node); 280 if (IS_ERR(psock)) { 281 ret = PTR_ERR(psock); 282 goto out_progs; 283 } 284 } 285 286 if (msg_parser) 287 psock_set_prog(&psock->progs.msg_parser, msg_parser); 288 if (stream_parser) 289 psock_set_prog(&psock->progs.stream_parser, stream_parser); 290 if (stream_verdict) 291 psock_set_prog(&psock->progs.stream_verdict, stream_verdict); 292 if (skb_verdict) 293 psock_set_prog(&psock->progs.skb_verdict, skb_verdict); 294 295 /* msg_* and stream_* programs references tracked in psock after this 296 * point. Reference dec and cleanup will occur through psock destructor 297 */ 298 ret = sock_map_init_proto(sk, psock); 299 if (ret < 0) { 300 sk_psock_put(sk, psock); 301 goto out; 302 } 303 304 write_lock_bh(&sk->sk_callback_lock); 305 if (stream_parser && stream_verdict && !psock->saved_data_ready) { 306 ret = sk_psock_init_strp(sk, psock); 307 if (ret) { 308 write_unlock_bh(&sk->sk_callback_lock); 309 sk_psock_put(sk, psock); 310 goto out; 311 } 312 sk_psock_start_strp(sk, psock); 313 } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) { 314 sk_psock_start_verdict(sk,psock); 315 } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) { 316 sk_psock_start_verdict(sk, psock); 317 } 318 write_unlock_bh(&sk->sk_callback_lock); 319 return 0; 320 out_progs: 321 if (skb_verdict) 322 bpf_prog_put(skb_verdict); 323 out_put_msg_parser: 324 if (msg_parser) 325 bpf_prog_put(msg_parser); 326 out_put_stream_parser: 327 if (stream_parser) 328 bpf_prog_put(stream_parser); 329 out_put_stream_verdict: 330 if (stream_verdict) 331 bpf_prog_put(stream_verdict); 332 out: 333 return ret; 334 } 335 336 static void sock_map_free(struct bpf_map *map) 337 { 338 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 339 int i; 340 341 /* After the sync no updates or deletes will be in-flight so it 342 * is safe to walk map and remove entries without risking a race 343 * in EEXIST update case. 344 */ 345 synchronize_rcu(); 346 for (i = 0; i < stab->map.max_entries; i++) { 347 struct sock **psk = &stab->sks[i]; 348 struct sock *sk; 349 350 sk = xchg(psk, NULL); 351 if (sk) { 352 lock_sock(sk); 353 rcu_read_lock(); 354 sock_map_unref(sk, psk); 355 rcu_read_unlock(); 356 release_sock(sk); 357 } 358 } 359 360 /* wait for psock readers accessing its map link */ 361 synchronize_rcu(); 362 363 bpf_map_area_free(stab->sks); 364 kfree(stab); 365 } 366 367 static void sock_map_release_progs(struct bpf_map *map) 368 { 369 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs); 370 } 371 372 static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 373 { 374 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 375 376 WARN_ON_ONCE(!rcu_read_lock_held()); 377 378 if (unlikely(key >= map->max_entries)) 379 return NULL; 380 return READ_ONCE(stab->sks[key]); 381 } 382 383 static void *sock_map_lookup(struct bpf_map *map, void *key) 384 { 385 struct sock *sk; 386 387 sk = __sock_map_lookup_elem(map, *(u32 *)key); 388 if (!sk) 389 return NULL; 390 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 391 return NULL; 392 return sk; 393 } 394 395 static void *sock_map_lookup_sys(struct bpf_map *map, void *key) 396 { 397 struct sock *sk; 398 399 if (map->value_size != sizeof(u64)) 400 return ERR_PTR(-ENOSPC); 401 402 sk = __sock_map_lookup_elem(map, *(u32 *)key); 403 if (!sk) 404 return ERR_PTR(-ENOENT); 405 406 __sock_gen_cookie(sk); 407 return &sk->sk_cookie; 408 } 409 410 static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, 411 struct sock **psk) 412 { 413 struct sock *sk; 414 int err = 0; 415 416 raw_spin_lock_bh(&stab->lock); 417 sk = *psk; 418 if (!sk_test || sk_test == sk) 419 sk = xchg(psk, NULL); 420 421 if (likely(sk)) 422 sock_map_unref(sk, psk); 423 else 424 err = -EINVAL; 425 426 raw_spin_unlock_bh(&stab->lock); 427 return err; 428 } 429 430 static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, 431 void *link_raw) 432 { 433 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 434 435 __sock_map_delete(stab, sk, link_raw); 436 } 437 438 static int sock_map_delete_elem(struct bpf_map *map, void *key) 439 { 440 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 441 u32 i = *(u32 *)key; 442 struct sock **psk; 443 444 if (unlikely(i >= map->max_entries)) 445 return -EINVAL; 446 447 psk = &stab->sks[i]; 448 return __sock_map_delete(stab, NULL, psk); 449 } 450 451 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) 452 { 453 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 454 u32 i = key ? *(u32 *)key : U32_MAX; 455 u32 *key_next = next; 456 457 if (i == stab->map.max_entries - 1) 458 return -ENOENT; 459 if (i >= stab->map.max_entries) 460 *key_next = 0; 461 else 462 *key_next = i + 1; 463 return 0; 464 } 465 466 static int sock_map_update_common(struct bpf_map *map, u32 idx, 467 struct sock *sk, u64 flags) 468 { 469 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 470 struct sk_psock_link *link; 471 struct sk_psock *psock; 472 struct sock *osk; 473 int ret; 474 475 WARN_ON_ONCE(!rcu_read_lock_held()); 476 if (unlikely(flags > BPF_EXIST)) 477 return -EINVAL; 478 if (unlikely(idx >= map->max_entries)) 479 return -E2BIG; 480 481 link = sk_psock_init_link(); 482 if (!link) 483 return -ENOMEM; 484 485 ret = sock_map_link(map, sk); 486 if (ret < 0) 487 goto out_free; 488 489 psock = sk_psock(sk); 490 WARN_ON_ONCE(!psock); 491 492 raw_spin_lock_bh(&stab->lock); 493 osk = stab->sks[idx]; 494 if (osk && flags == BPF_NOEXIST) { 495 ret = -EEXIST; 496 goto out_unlock; 497 } else if (!osk && flags == BPF_EXIST) { 498 ret = -ENOENT; 499 goto out_unlock; 500 } 501 502 sock_map_add_link(psock, link, map, &stab->sks[idx]); 503 stab->sks[idx] = sk; 504 if (osk) 505 sock_map_unref(osk, &stab->sks[idx]); 506 raw_spin_unlock_bh(&stab->lock); 507 return 0; 508 out_unlock: 509 raw_spin_unlock_bh(&stab->lock); 510 if (psock) 511 sk_psock_put(sk, psock); 512 out_free: 513 sk_psock_free_link(link); 514 return ret; 515 } 516 517 static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) 518 { 519 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || 520 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || 521 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; 522 } 523 524 static bool sock_map_redirect_allowed(const struct sock *sk) 525 { 526 if (sk_is_tcp(sk)) 527 return sk->sk_state != TCP_LISTEN; 528 else 529 return sk->sk_state == TCP_ESTABLISHED; 530 } 531 532 static bool sock_map_sk_is_suitable(const struct sock *sk) 533 { 534 return !!sk->sk_prot->psock_update_sk_prot; 535 } 536 537 static bool sock_map_sk_state_allowed(const struct sock *sk) 538 { 539 if (sk_is_tcp(sk)) 540 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); 541 return true; 542 } 543 544 static int sock_hash_update_common(struct bpf_map *map, void *key, 545 struct sock *sk, u64 flags); 546 547 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 548 u64 flags) 549 { 550 struct socket *sock; 551 struct sock *sk; 552 int ret; 553 u64 ufd; 554 555 if (map->value_size == sizeof(u64)) 556 ufd = *(u64 *)value; 557 else 558 ufd = *(u32 *)value; 559 if (ufd > S32_MAX) 560 return -EINVAL; 561 562 sock = sockfd_lookup(ufd, &ret); 563 if (!sock) 564 return ret; 565 sk = sock->sk; 566 if (!sk) { 567 ret = -EINVAL; 568 goto out; 569 } 570 if (!sock_map_sk_is_suitable(sk)) { 571 ret = -EOPNOTSUPP; 572 goto out; 573 } 574 575 sock_map_sk_acquire(sk); 576 if (!sock_map_sk_state_allowed(sk)) 577 ret = -EOPNOTSUPP; 578 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 579 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 580 else 581 ret = sock_hash_update_common(map, key, sk, flags); 582 sock_map_sk_release(sk); 583 out: 584 sockfd_put(sock); 585 return ret; 586 } 587 588 static int sock_map_update_elem(struct bpf_map *map, void *key, 589 void *value, u64 flags) 590 { 591 struct sock *sk = (struct sock *)value; 592 int ret; 593 594 if (unlikely(!sk || !sk_fullsock(sk))) 595 return -EINVAL; 596 597 if (!sock_map_sk_is_suitable(sk)) 598 return -EOPNOTSUPP; 599 600 local_bh_disable(); 601 bh_lock_sock(sk); 602 if (!sock_map_sk_state_allowed(sk)) 603 ret = -EOPNOTSUPP; 604 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 605 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 606 else 607 ret = sock_hash_update_common(map, key, sk, flags); 608 bh_unlock_sock(sk); 609 local_bh_enable(); 610 return ret; 611 } 612 613 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops, 614 struct bpf_map *, map, void *, key, u64, flags) 615 { 616 WARN_ON_ONCE(!rcu_read_lock_held()); 617 618 if (likely(sock_map_sk_is_suitable(sops->sk) && 619 sock_map_op_okay(sops))) 620 return sock_map_update_common(map, *(u32 *)key, sops->sk, 621 flags); 622 return -EOPNOTSUPP; 623 } 624 625 const struct bpf_func_proto bpf_sock_map_update_proto = { 626 .func = bpf_sock_map_update, 627 .gpl_only = false, 628 .pkt_access = true, 629 .ret_type = RET_INTEGER, 630 .arg1_type = ARG_PTR_TO_CTX, 631 .arg2_type = ARG_CONST_MAP_PTR, 632 .arg3_type = ARG_PTR_TO_MAP_KEY, 633 .arg4_type = ARG_ANYTHING, 634 }; 635 636 BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, 637 struct bpf_map *, map, u32, key, u64, flags) 638 { 639 struct sock *sk; 640 641 if (unlikely(flags & ~(BPF_F_INGRESS))) 642 return SK_DROP; 643 644 sk = __sock_map_lookup_elem(map, key); 645 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 646 return SK_DROP; 647 648 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 649 return SK_PASS; 650 } 651 652 const struct bpf_func_proto bpf_sk_redirect_map_proto = { 653 .func = bpf_sk_redirect_map, 654 .gpl_only = false, 655 .ret_type = RET_INTEGER, 656 .arg1_type = ARG_PTR_TO_CTX, 657 .arg2_type = ARG_CONST_MAP_PTR, 658 .arg3_type = ARG_ANYTHING, 659 .arg4_type = ARG_ANYTHING, 660 }; 661 662 BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, 663 struct bpf_map *, map, u32, key, u64, flags) 664 { 665 struct sock *sk; 666 667 if (unlikely(flags & ~(BPF_F_INGRESS))) 668 return SK_DROP; 669 670 sk = __sock_map_lookup_elem(map, key); 671 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 672 return SK_DROP; 673 674 msg->flags = flags; 675 msg->sk_redir = sk; 676 return SK_PASS; 677 } 678 679 const struct bpf_func_proto bpf_msg_redirect_map_proto = { 680 .func = bpf_msg_redirect_map, 681 .gpl_only = false, 682 .ret_type = RET_INTEGER, 683 .arg1_type = ARG_PTR_TO_CTX, 684 .arg2_type = ARG_CONST_MAP_PTR, 685 .arg3_type = ARG_ANYTHING, 686 .arg4_type = ARG_ANYTHING, 687 }; 688 689 struct sock_map_seq_info { 690 struct bpf_map *map; 691 struct sock *sk; 692 u32 index; 693 }; 694 695 struct bpf_iter__sockmap { 696 __bpf_md_ptr(struct bpf_iter_meta *, meta); 697 __bpf_md_ptr(struct bpf_map *, map); 698 __bpf_md_ptr(void *, key); 699 __bpf_md_ptr(struct sock *, sk); 700 }; 701 702 DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta, 703 struct bpf_map *map, void *key, 704 struct sock *sk) 705 706 static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info) 707 { 708 if (unlikely(info->index >= info->map->max_entries)) 709 return NULL; 710 711 info->sk = __sock_map_lookup_elem(info->map, info->index); 712 713 /* can't return sk directly, since that might be NULL */ 714 return info; 715 } 716 717 static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos) 718 __acquires(rcu) 719 { 720 struct sock_map_seq_info *info = seq->private; 721 722 if (*pos == 0) 723 ++*pos; 724 725 /* pairs with sock_map_seq_stop */ 726 rcu_read_lock(); 727 return sock_map_seq_lookup_elem(info); 728 } 729 730 static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 731 __must_hold(rcu) 732 { 733 struct sock_map_seq_info *info = seq->private; 734 735 ++*pos; 736 ++info->index; 737 738 return sock_map_seq_lookup_elem(info); 739 } 740 741 static int sock_map_seq_show(struct seq_file *seq, void *v) 742 __must_hold(rcu) 743 { 744 struct sock_map_seq_info *info = seq->private; 745 struct bpf_iter__sockmap ctx = {}; 746 struct bpf_iter_meta meta; 747 struct bpf_prog *prog; 748 749 meta.seq = seq; 750 prog = bpf_iter_get_info(&meta, !v); 751 if (!prog) 752 return 0; 753 754 ctx.meta = &meta; 755 ctx.map = info->map; 756 if (v) { 757 ctx.key = &info->index; 758 ctx.sk = info->sk; 759 } 760 761 return bpf_iter_run_prog(prog, &ctx); 762 } 763 764 static void sock_map_seq_stop(struct seq_file *seq, void *v) 765 __releases(rcu) 766 { 767 if (!v) 768 (void)sock_map_seq_show(seq, NULL); 769 770 /* pairs with sock_map_seq_start */ 771 rcu_read_unlock(); 772 } 773 774 static const struct seq_operations sock_map_seq_ops = { 775 .start = sock_map_seq_start, 776 .next = sock_map_seq_next, 777 .stop = sock_map_seq_stop, 778 .show = sock_map_seq_show, 779 }; 780 781 static int sock_map_init_seq_private(void *priv_data, 782 struct bpf_iter_aux_info *aux) 783 { 784 struct sock_map_seq_info *info = priv_data; 785 786 bpf_map_inc_with_uref(aux->map); 787 info->map = aux->map; 788 return 0; 789 } 790 791 static void sock_map_fini_seq_private(void *priv_data) 792 { 793 struct sock_map_seq_info *info = priv_data; 794 795 bpf_map_put_with_uref(info->map); 796 } 797 798 static const struct bpf_iter_seq_info sock_map_iter_seq_info = { 799 .seq_ops = &sock_map_seq_ops, 800 .init_seq_private = sock_map_init_seq_private, 801 .fini_seq_private = sock_map_fini_seq_private, 802 .seq_priv_size = sizeof(struct sock_map_seq_info), 803 }; 804 805 BTF_ID_LIST_SINGLE(sock_map_btf_ids, struct, bpf_stab) 806 const struct bpf_map_ops sock_map_ops = { 807 .map_meta_equal = bpf_map_meta_equal, 808 .map_alloc = sock_map_alloc, 809 .map_free = sock_map_free, 810 .map_get_next_key = sock_map_get_next_key, 811 .map_lookup_elem_sys_only = sock_map_lookup_sys, 812 .map_update_elem = sock_map_update_elem, 813 .map_delete_elem = sock_map_delete_elem, 814 .map_lookup_elem = sock_map_lookup, 815 .map_release_uref = sock_map_release_progs, 816 .map_check_btf = map_check_no_btf, 817 .map_btf_id = &sock_map_btf_ids[0], 818 .iter_seq_info = &sock_map_iter_seq_info, 819 }; 820 821 struct bpf_shtab_elem { 822 struct rcu_head rcu; 823 u32 hash; 824 struct sock *sk; 825 struct hlist_node node; 826 u8 key[]; 827 }; 828 829 struct bpf_shtab_bucket { 830 struct hlist_head head; 831 raw_spinlock_t lock; 832 }; 833 834 struct bpf_shtab { 835 struct bpf_map map; 836 struct bpf_shtab_bucket *buckets; 837 u32 buckets_num; 838 u32 elem_size; 839 struct sk_psock_progs progs; 840 atomic_t count; 841 }; 842 843 static inline u32 sock_hash_bucket_hash(const void *key, u32 len) 844 { 845 return jhash(key, len, 0); 846 } 847 848 static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab, 849 u32 hash) 850 { 851 return &htab->buckets[hash & (htab->buckets_num - 1)]; 852 } 853 854 static struct bpf_shtab_elem * 855 sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, 856 u32 key_size) 857 { 858 struct bpf_shtab_elem *elem; 859 860 hlist_for_each_entry_rcu(elem, head, node) { 861 if (elem->hash == hash && 862 !memcmp(&elem->key, key, key_size)) 863 return elem; 864 } 865 866 return NULL; 867 } 868 869 static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) 870 { 871 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 872 u32 key_size = map->key_size, hash; 873 struct bpf_shtab_bucket *bucket; 874 struct bpf_shtab_elem *elem; 875 876 WARN_ON_ONCE(!rcu_read_lock_held()); 877 878 hash = sock_hash_bucket_hash(key, key_size); 879 bucket = sock_hash_select_bucket(htab, hash); 880 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 881 882 return elem ? elem->sk : NULL; 883 } 884 885 static void sock_hash_free_elem(struct bpf_shtab *htab, 886 struct bpf_shtab_elem *elem) 887 { 888 atomic_dec(&htab->count); 889 kfree_rcu(elem, rcu); 890 } 891 892 static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, 893 void *link_raw) 894 { 895 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 896 struct bpf_shtab_elem *elem_probe, *elem = link_raw; 897 struct bpf_shtab_bucket *bucket; 898 899 WARN_ON_ONCE(!rcu_read_lock_held()); 900 bucket = sock_hash_select_bucket(htab, elem->hash); 901 902 /* elem may be deleted in parallel from the map, but access here 903 * is okay since it's going away only after RCU grace period. 904 * However, we need to check whether it's still present. 905 */ 906 raw_spin_lock_bh(&bucket->lock); 907 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, 908 elem->key, map->key_size); 909 if (elem_probe && elem_probe == elem) { 910 hlist_del_rcu(&elem->node); 911 sock_map_unref(elem->sk, elem); 912 sock_hash_free_elem(htab, elem); 913 } 914 raw_spin_unlock_bh(&bucket->lock); 915 } 916 917 static int sock_hash_delete_elem(struct bpf_map *map, void *key) 918 { 919 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 920 u32 hash, key_size = map->key_size; 921 struct bpf_shtab_bucket *bucket; 922 struct bpf_shtab_elem *elem; 923 int ret = -ENOENT; 924 925 hash = sock_hash_bucket_hash(key, key_size); 926 bucket = sock_hash_select_bucket(htab, hash); 927 928 raw_spin_lock_bh(&bucket->lock); 929 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 930 if (elem) { 931 hlist_del_rcu(&elem->node); 932 sock_map_unref(elem->sk, elem); 933 sock_hash_free_elem(htab, elem); 934 ret = 0; 935 } 936 raw_spin_unlock_bh(&bucket->lock); 937 return ret; 938 } 939 940 static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab, 941 void *key, u32 key_size, 942 u32 hash, struct sock *sk, 943 struct bpf_shtab_elem *old) 944 { 945 struct bpf_shtab_elem *new; 946 947 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 948 if (!old) { 949 atomic_dec(&htab->count); 950 return ERR_PTR(-E2BIG); 951 } 952 } 953 954 new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, 955 GFP_ATOMIC | __GFP_NOWARN, 956 htab->map.numa_node); 957 if (!new) { 958 atomic_dec(&htab->count); 959 return ERR_PTR(-ENOMEM); 960 } 961 memcpy(new->key, key, key_size); 962 new->sk = sk; 963 new->hash = hash; 964 return new; 965 } 966 967 static int sock_hash_update_common(struct bpf_map *map, void *key, 968 struct sock *sk, u64 flags) 969 { 970 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 971 u32 key_size = map->key_size, hash; 972 struct bpf_shtab_elem *elem, *elem_new; 973 struct bpf_shtab_bucket *bucket; 974 struct sk_psock_link *link; 975 struct sk_psock *psock; 976 int ret; 977 978 WARN_ON_ONCE(!rcu_read_lock_held()); 979 if (unlikely(flags > BPF_EXIST)) 980 return -EINVAL; 981 982 link = sk_psock_init_link(); 983 if (!link) 984 return -ENOMEM; 985 986 ret = sock_map_link(map, sk); 987 if (ret < 0) 988 goto out_free; 989 990 psock = sk_psock(sk); 991 WARN_ON_ONCE(!psock); 992 993 hash = sock_hash_bucket_hash(key, key_size); 994 bucket = sock_hash_select_bucket(htab, hash); 995 996 raw_spin_lock_bh(&bucket->lock); 997 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 998 if (elem && flags == BPF_NOEXIST) { 999 ret = -EEXIST; 1000 goto out_unlock; 1001 } else if (!elem && flags == BPF_EXIST) { 1002 ret = -ENOENT; 1003 goto out_unlock; 1004 } 1005 1006 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem); 1007 if (IS_ERR(elem_new)) { 1008 ret = PTR_ERR(elem_new); 1009 goto out_unlock; 1010 } 1011 1012 sock_map_add_link(psock, link, map, elem_new); 1013 /* Add new element to the head of the list, so that 1014 * concurrent search will find it before old elem. 1015 */ 1016 hlist_add_head_rcu(&elem_new->node, &bucket->head); 1017 if (elem) { 1018 hlist_del_rcu(&elem->node); 1019 sock_map_unref(elem->sk, elem); 1020 sock_hash_free_elem(htab, elem); 1021 } 1022 raw_spin_unlock_bh(&bucket->lock); 1023 return 0; 1024 out_unlock: 1025 raw_spin_unlock_bh(&bucket->lock); 1026 sk_psock_put(sk, psock); 1027 out_free: 1028 sk_psock_free_link(link); 1029 return ret; 1030 } 1031 1032 static int sock_hash_get_next_key(struct bpf_map *map, void *key, 1033 void *key_next) 1034 { 1035 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1036 struct bpf_shtab_elem *elem, *elem_next; 1037 u32 hash, key_size = map->key_size; 1038 struct hlist_head *head; 1039 int i = 0; 1040 1041 if (!key) 1042 goto find_first_elem; 1043 hash = sock_hash_bucket_hash(key, key_size); 1044 head = &sock_hash_select_bucket(htab, hash)->head; 1045 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); 1046 if (!elem) 1047 goto find_first_elem; 1048 1049 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)), 1050 struct bpf_shtab_elem, node); 1051 if (elem_next) { 1052 memcpy(key_next, elem_next->key, key_size); 1053 return 0; 1054 } 1055 1056 i = hash & (htab->buckets_num - 1); 1057 i++; 1058 find_first_elem: 1059 for (; i < htab->buckets_num; i++) { 1060 head = &sock_hash_select_bucket(htab, i)->head; 1061 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)), 1062 struct bpf_shtab_elem, node); 1063 if (elem_next) { 1064 memcpy(key_next, elem_next->key, key_size); 1065 return 0; 1066 } 1067 } 1068 1069 return -ENOENT; 1070 } 1071 1072 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) 1073 { 1074 struct bpf_shtab *htab; 1075 int i, err; 1076 1077 if (!capable(CAP_NET_ADMIN)) 1078 return ERR_PTR(-EPERM); 1079 if (attr->max_entries == 0 || 1080 attr->key_size == 0 || 1081 (attr->value_size != sizeof(u32) && 1082 attr->value_size != sizeof(u64)) || 1083 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 1084 return ERR_PTR(-EINVAL); 1085 if (attr->key_size > MAX_BPF_STACK) 1086 return ERR_PTR(-E2BIG); 1087 1088 htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT); 1089 if (!htab) 1090 return ERR_PTR(-ENOMEM); 1091 1092 bpf_map_init_from_attr(&htab->map, attr); 1093 1094 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); 1095 htab->elem_size = sizeof(struct bpf_shtab_elem) + 1096 round_up(htab->map.key_size, 8); 1097 if (htab->buckets_num == 0 || 1098 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) { 1099 err = -EINVAL; 1100 goto free_htab; 1101 } 1102 1103 htab->buckets = bpf_map_area_alloc(htab->buckets_num * 1104 sizeof(struct bpf_shtab_bucket), 1105 htab->map.numa_node); 1106 if (!htab->buckets) { 1107 err = -ENOMEM; 1108 goto free_htab; 1109 } 1110 1111 for (i = 0; i < htab->buckets_num; i++) { 1112 INIT_HLIST_HEAD(&htab->buckets[i].head); 1113 raw_spin_lock_init(&htab->buckets[i].lock); 1114 } 1115 1116 return &htab->map; 1117 free_htab: 1118 kfree(htab); 1119 return ERR_PTR(err); 1120 } 1121 1122 static void sock_hash_free(struct bpf_map *map) 1123 { 1124 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1125 struct bpf_shtab_bucket *bucket; 1126 struct hlist_head unlink_list; 1127 struct bpf_shtab_elem *elem; 1128 struct hlist_node *node; 1129 int i; 1130 1131 /* After the sync no updates or deletes will be in-flight so it 1132 * is safe to walk map and remove entries without risking a race 1133 * in EEXIST update case. 1134 */ 1135 synchronize_rcu(); 1136 for (i = 0; i < htab->buckets_num; i++) { 1137 bucket = sock_hash_select_bucket(htab, i); 1138 1139 /* We are racing with sock_hash_delete_from_link to 1140 * enter the spin-lock critical section. Every socket on 1141 * the list is still linked to sockhash. Since link 1142 * exists, psock exists and holds a ref to socket. That 1143 * lets us to grab a socket ref too. 1144 */ 1145 raw_spin_lock_bh(&bucket->lock); 1146 hlist_for_each_entry(elem, &bucket->head, node) 1147 sock_hold(elem->sk); 1148 hlist_move_list(&bucket->head, &unlink_list); 1149 raw_spin_unlock_bh(&bucket->lock); 1150 1151 /* Process removed entries out of atomic context to 1152 * block for socket lock before deleting the psock's 1153 * link to sockhash. 1154 */ 1155 hlist_for_each_entry_safe(elem, node, &unlink_list, node) { 1156 hlist_del(&elem->node); 1157 lock_sock(elem->sk); 1158 rcu_read_lock(); 1159 sock_map_unref(elem->sk, elem); 1160 rcu_read_unlock(); 1161 release_sock(elem->sk); 1162 sock_put(elem->sk); 1163 sock_hash_free_elem(htab, elem); 1164 } 1165 } 1166 1167 /* wait for psock readers accessing its map link */ 1168 synchronize_rcu(); 1169 1170 bpf_map_area_free(htab->buckets); 1171 kfree(htab); 1172 } 1173 1174 static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) 1175 { 1176 struct sock *sk; 1177 1178 if (map->value_size != sizeof(u64)) 1179 return ERR_PTR(-ENOSPC); 1180 1181 sk = __sock_hash_lookup_elem(map, key); 1182 if (!sk) 1183 return ERR_PTR(-ENOENT); 1184 1185 __sock_gen_cookie(sk); 1186 return &sk->sk_cookie; 1187 } 1188 1189 static void *sock_hash_lookup(struct bpf_map *map, void *key) 1190 { 1191 struct sock *sk; 1192 1193 sk = __sock_hash_lookup_elem(map, key); 1194 if (!sk) 1195 return NULL; 1196 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 1197 return NULL; 1198 return sk; 1199 } 1200 1201 static void sock_hash_release_progs(struct bpf_map *map) 1202 { 1203 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs); 1204 } 1205 1206 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, 1207 struct bpf_map *, map, void *, key, u64, flags) 1208 { 1209 WARN_ON_ONCE(!rcu_read_lock_held()); 1210 1211 if (likely(sock_map_sk_is_suitable(sops->sk) && 1212 sock_map_op_okay(sops))) 1213 return sock_hash_update_common(map, key, sops->sk, flags); 1214 return -EOPNOTSUPP; 1215 } 1216 1217 const struct bpf_func_proto bpf_sock_hash_update_proto = { 1218 .func = bpf_sock_hash_update, 1219 .gpl_only = false, 1220 .pkt_access = true, 1221 .ret_type = RET_INTEGER, 1222 .arg1_type = ARG_PTR_TO_CTX, 1223 .arg2_type = ARG_CONST_MAP_PTR, 1224 .arg3_type = ARG_PTR_TO_MAP_KEY, 1225 .arg4_type = ARG_ANYTHING, 1226 }; 1227 1228 BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, 1229 struct bpf_map *, map, void *, key, u64, flags) 1230 { 1231 struct sock *sk; 1232 1233 if (unlikely(flags & ~(BPF_F_INGRESS))) 1234 return SK_DROP; 1235 1236 sk = __sock_hash_lookup_elem(map, key); 1237 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1238 return SK_DROP; 1239 1240 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 1241 return SK_PASS; 1242 } 1243 1244 const struct bpf_func_proto bpf_sk_redirect_hash_proto = { 1245 .func = bpf_sk_redirect_hash, 1246 .gpl_only = false, 1247 .ret_type = RET_INTEGER, 1248 .arg1_type = ARG_PTR_TO_CTX, 1249 .arg2_type = ARG_CONST_MAP_PTR, 1250 .arg3_type = ARG_PTR_TO_MAP_KEY, 1251 .arg4_type = ARG_ANYTHING, 1252 }; 1253 1254 BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, 1255 struct bpf_map *, map, void *, key, u64, flags) 1256 { 1257 struct sock *sk; 1258 1259 if (unlikely(flags & ~(BPF_F_INGRESS))) 1260 return SK_DROP; 1261 1262 sk = __sock_hash_lookup_elem(map, key); 1263 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1264 return SK_DROP; 1265 1266 msg->flags = flags; 1267 msg->sk_redir = sk; 1268 return SK_PASS; 1269 } 1270 1271 const struct bpf_func_proto bpf_msg_redirect_hash_proto = { 1272 .func = bpf_msg_redirect_hash, 1273 .gpl_only = false, 1274 .ret_type = RET_INTEGER, 1275 .arg1_type = ARG_PTR_TO_CTX, 1276 .arg2_type = ARG_CONST_MAP_PTR, 1277 .arg3_type = ARG_PTR_TO_MAP_KEY, 1278 .arg4_type = ARG_ANYTHING, 1279 }; 1280 1281 struct sock_hash_seq_info { 1282 struct bpf_map *map; 1283 struct bpf_shtab *htab; 1284 u32 bucket_id; 1285 }; 1286 1287 static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info, 1288 struct bpf_shtab_elem *prev_elem) 1289 { 1290 const struct bpf_shtab *htab = info->htab; 1291 struct bpf_shtab_bucket *bucket; 1292 struct bpf_shtab_elem *elem; 1293 struct hlist_node *node; 1294 1295 /* try to find next elem in the same bucket */ 1296 if (prev_elem) { 1297 node = rcu_dereference(hlist_next_rcu(&prev_elem->node)); 1298 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1299 if (elem) 1300 return elem; 1301 1302 /* no more elements, continue in the next bucket */ 1303 info->bucket_id++; 1304 } 1305 1306 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) { 1307 bucket = &htab->buckets[info->bucket_id]; 1308 node = rcu_dereference(hlist_first_rcu(&bucket->head)); 1309 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1310 if (elem) 1311 return elem; 1312 } 1313 1314 return NULL; 1315 } 1316 1317 static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos) 1318 __acquires(rcu) 1319 { 1320 struct sock_hash_seq_info *info = seq->private; 1321 1322 if (*pos == 0) 1323 ++*pos; 1324 1325 /* pairs with sock_hash_seq_stop */ 1326 rcu_read_lock(); 1327 return sock_hash_seq_find_next(info, NULL); 1328 } 1329 1330 static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1331 __must_hold(rcu) 1332 { 1333 struct sock_hash_seq_info *info = seq->private; 1334 1335 ++*pos; 1336 return sock_hash_seq_find_next(info, v); 1337 } 1338 1339 static int sock_hash_seq_show(struct seq_file *seq, void *v) 1340 __must_hold(rcu) 1341 { 1342 struct sock_hash_seq_info *info = seq->private; 1343 struct bpf_iter__sockmap ctx = {}; 1344 struct bpf_shtab_elem *elem = v; 1345 struct bpf_iter_meta meta; 1346 struct bpf_prog *prog; 1347 1348 meta.seq = seq; 1349 prog = bpf_iter_get_info(&meta, !elem); 1350 if (!prog) 1351 return 0; 1352 1353 ctx.meta = &meta; 1354 ctx.map = info->map; 1355 if (elem) { 1356 ctx.key = elem->key; 1357 ctx.sk = elem->sk; 1358 } 1359 1360 return bpf_iter_run_prog(prog, &ctx); 1361 } 1362 1363 static void sock_hash_seq_stop(struct seq_file *seq, void *v) 1364 __releases(rcu) 1365 { 1366 if (!v) 1367 (void)sock_hash_seq_show(seq, NULL); 1368 1369 /* pairs with sock_hash_seq_start */ 1370 rcu_read_unlock(); 1371 } 1372 1373 static const struct seq_operations sock_hash_seq_ops = { 1374 .start = sock_hash_seq_start, 1375 .next = sock_hash_seq_next, 1376 .stop = sock_hash_seq_stop, 1377 .show = sock_hash_seq_show, 1378 }; 1379 1380 static int sock_hash_init_seq_private(void *priv_data, 1381 struct bpf_iter_aux_info *aux) 1382 { 1383 struct sock_hash_seq_info *info = priv_data; 1384 1385 bpf_map_inc_with_uref(aux->map); 1386 info->map = aux->map; 1387 info->htab = container_of(aux->map, struct bpf_shtab, map); 1388 return 0; 1389 } 1390 1391 static void sock_hash_fini_seq_private(void *priv_data) 1392 { 1393 struct sock_hash_seq_info *info = priv_data; 1394 1395 bpf_map_put_with_uref(info->map); 1396 } 1397 1398 static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { 1399 .seq_ops = &sock_hash_seq_ops, 1400 .init_seq_private = sock_hash_init_seq_private, 1401 .fini_seq_private = sock_hash_fini_seq_private, 1402 .seq_priv_size = sizeof(struct sock_hash_seq_info), 1403 }; 1404 1405 BTF_ID_LIST_SINGLE(sock_hash_map_btf_ids, struct, bpf_shtab) 1406 const struct bpf_map_ops sock_hash_ops = { 1407 .map_meta_equal = bpf_map_meta_equal, 1408 .map_alloc = sock_hash_alloc, 1409 .map_free = sock_hash_free, 1410 .map_get_next_key = sock_hash_get_next_key, 1411 .map_update_elem = sock_map_update_elem, 1412 .map_delete_elem = sock_hash_delete_elem, 1413 .map_lookup_elem = sock_hash_lookup, 1414 .map_lookup_elem_sys_only = sock_hash_lookup_sys, 1415 .map_release_uref = sock_hash_release_progs, 1416 .map_check_btf = map_check_no_btf, 1417 .map_btf_id = &sock_hash_map_btf_ids[0], 1418 .iter_seq_info = &sock_hash_iter_seq_info, 1419 }; 1420 1421 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) 1422 { 1423 switch (map->map_type) { 1424 case BPF_MAP_TYPE_SOCKMAP: 1425 return &container_of(map, struct bpf_stab, map)->progs; 1426 case BPF_MAP_TYPE_SOCKHASH: 1427 return &container_of(map, struct bpf_shtab, map)->progs; 1428 default: 1429 break; 1430 } 1431 1432 return NULL; 1433 } 1434 1435 static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog, 1436 u32 which) 1437 { 1438 struct sk_psock_progs *progs = sock_map_progs(map); 1439 1440 if (!progs) 1441 return -EOPNOTSUPP; 1442 1443 switch (which) { 1444 case BPF_SK_MSG_VERDICT: 1445 *pprog = &progs->msg_parser; 1446 break; 1447 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 1448 case BPF_SK_SKB_STREAM_PARSER: 1449 *pprog = &progs->stream_parser; 1450 break; 1451 #endif 1452 case BPF_SK_SKB_STREAM_VERDICT: 1453 if (progs->skb_verdict) 1454 return -EBUSY; 1455 *pprog = &progs->stream_verdict; 1456 break; 1457 case BPF_SK_SKB_VERDICT: 1458 if (progs->stream_verdict) 1459 return -EBUSY; 1460 *pprog = &progs->skb_verdict; 1461 break; 1462 default: 1463 return -EOPNOTSUPP; 1464 } 1465 1466 return 0; 1467 } 1468 1469 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 1470 struct bpf_prog *old, u32 which) 1471 { 1472 struct bpf_prog **pprog; 1473 int ret; 1474 1475 ret = sock_map_prog_lookup(map, &pprog, which); 1476 if (ret) 1477 return ret; 1478 1479 if (old) 1480 return psock_replace_prog(pprog, prog, old); 1481 1482 psock_set_prog(pprog, prog); 1483 return 0; 1484 } 1485 1486 int sock_map_bpf_prog_query(const union bpf_attr *attr, 1487 union bpf_attr __user *uattr) 1488 { 1489 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 1490 u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd; 1491 struct bpf_prog **pprog; 1492 struct bpf_prog *prog; 1493 struct bpf_map *map; 1494 struct fd f; 1495 u32 id = 0; 1496 int ret; 1497 1498 if (attr->query.query_flags) 1499 return -EINVAL; 1500 1501 f = fdget(ufd); 1502 map = __bpf_map_get(f); 1503 if (IS_ERR(map)) 1504 return PTR_ERR(map); 1505 1506 rcu_read_lock(); 1507 1508 ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type); 1509 if (ret) 1510 goto end; 1511 1512 prog = *pprog; 1513 prog_cnt = !prog ? 0 : 1; 1514 1515 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) 1516 goto end; 1517 1518 /* we do not hold the refcnt, the bpf prog may be released 1519 * asynchronously and the id would be set to 0. 1520 */ 1521 id = data_race(prog->aux->id); 1522 if (id == 0) 1523 prog_cnt = 0; 1524 1525 end: 1526 rcu_read_unlock(); 1527 1528 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) || 1529 (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) || 1530 copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) 1531 ret = -EFAULT; 1532 1533 fdput(f); 1534 return ret; 1535 } 1536 1537 static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) 1538 { 1539 switch (link->map->map_type) { 1540 case BPF_MAP_TYPE_SOCKMAP: 1541 return sock_map_delete_from_link(link->map, sk, 1542 link->link_raw); 1543 case BPF_MAP_TYPE_SOCKHASH: 1544 return sock_hash_delete_from_link(link->map, sk, 1545 link->link_raw); 1546 default: 1547 break; 1548 } 1549 } 1550 1551 static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock) 1552 { 1553 struct sk_psock_link *link; 1554 1555 while ((link = sk_psock_link_pop(psock))) { 1556 sock_map_unlink(sk, link); 1557 sk_psock_free_link(link); 1558 } 1559 } 1560 1561 void sock_map_unhash(struct sock *sk) 1562 { 1563 void (*saved_unhash)(struct sock *sk); 1564 struct sk_psock *psock; 1565 1566 rcu_read_lock(); 1567 psock = sk_psock(sk); 1568 if (unlikely(!psock)) { 1569 rcu_read_unlock(); 1570 if (sk->sk_prot->unhash) 1571 sk->sk_prot->unhash(sk); 1572 return; 1573 } 1574 1575 saved_unhash = psock->saved_unhash; 1576 sock_map_remove_links(sk, psock); 1577 rcu_read_unlock(); 1578 saved_unhash(sk); 1579 } 1580 EXPORT_SYMBOL_GPL(sock_map_unhash); 1581 1582 void sock_map_destroy(struct sock *sk) 1583 { 1584 void (*saved_destroy)(struct sock *sk); 1585 struct sk_psock *psock; 1586 1587 rcu_read_lock(); 1588 psock = sk_psock_get(sk); 1589 if (unlikely(!psock)) { 1590 rcu_read_unlock(); 1591 if (sk->sk_prot->destroy) 1592 sk->sk_prot->destroy(sk); 1593 return; 1594 } 1595 1596 saved_destroy = psock->saved_destroy; 1597 sock_map_remove_links(sk, psock); 1598 rcu_read_unlock(); 1599 sk_psock_stop(psock, false); 1600 sk_psock_put(sk, psock); 1601 saved_destroy(sk); 1602 } 1603 EXPORT_SYMBOL_GPL(sock_map_destroy); 1604 1605 void sock_map_close(struct sock *sk, long timeout) 1606 { 1607 void (*saved_close)(struct sock *sk, long timeout); 1608 struct sk_psock *psock; 1609 1610 lock_sock(sk); 1611 rcu_read_lock(); 1612 psock = sk_psock_get(sk); 1613 if (unlikely(!psock)) { 1614 rcu_read_unlock(); 1615 release_sock(sk); 1616 return sk->sk_prot->close(sk, timeout); 1617 } 1618 1619 saved_close = psock->saved_close; 1620 sock_map_remove_links(sk, psock); 1621 rcu_read_unlock(); 1622 sk_psock_stop(psock, true); 1623 sk_psock_put(sk, psock); 1624 release_sock(sk); 1625 saved_close(sk, timeout); 1626 } 1627 EXPORT_SYMBOL_GPL(sock_map_close); 1628 1629 static int sock_map_iter_attach_target(struct bpf_prog *prog, 1630 union bpf_iter_link_info *linfo, 1631 struct bpf_iter_aux_info *aux) 1632 { 1633 struct bpf_map *map; 1634 int err = -EINVAL; 1635 1636 if (!linfo->map.map_fd) 1637 return -EBADF; 1638 1639 map = bpf_map_get_with_uref(linfo->map.map_fd); 1640 if (IS_ERR(map)) 1641 return PTR_ERR(map); 1642 1643 if (map->map_type != BPF_MAP_TYPE_SOCKMAP && 1644 map->map_type != BPF_MAP_TYPE_SOCKHASH) 1645 goto put_map; 1646 1647 if (prog->aux->max_rdonly_access > map->key_size) { 1648 err = -EACCES; 1649 goto put_map; 1650 } 1651 1652 aux->map = map; 1653 return 0; 1654 1655 put_map: 1656 bpf_map_put_with_uref(map); 1657 return err; 1658 } 1659 1660 static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux) 1661 { 1662 bpf_map_put_with_uref(aux->map); 1663 } 1664 1665 static struct bpf_iter_reg sock_map_iter_reg = { 1666 .target = "sockmap", 1667 .attach_target = sock_map_iter_attach_target, 1668 .detach_target = sock_map_iter_detach_target, 1669 .show_fdinfo = bpf_iter_map_show_fdinfo, 1670 .fill_link_info = bpf_iter_map_fill_link_info, 1671 .ctx_arg_info_size = 2, 1672 .ctx_arg_info = { 1673 { offsetof(struct bpf_iter__sockmap, key), 1674 PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY }, 1675 { offsetof(struct bpf_iter__sockmap, sk), 1676 PTR_TO_BTF_ID_OR_NULL }, 1677 }, 1678 }; 1679 1680 static int __init bpf_sockmap_iter_init(void) 1681 { 1682 sock_map_iter_reg.ctx_arg_info[1].btf_id = 1683 btf_sock_ids[BTF_SOCK_TYPE_SOCK]; 1684 return bpf_iter_reg_target(&sock_map_iter_reg); 1685 } 1686 late_initcall(bpf_sockmap_iter_init); 1687