1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/bpf.h> 5 #include <linux/btf_ids.h> 6 #include <linux/filter.h> 7 #include <linux/errno.h> 8 #include <linux/file.h> 9 #include <linux/net.h> 10 #include <linux/workqueue.h> 11 #include <linux/skmsg.h> 12 #include <linux/list.h> 13 #include <linux/jhash.h> 14 #include <linux/sock_diag.h> 15 #include <net/udp.h> 16 17 struct bpf_stab { 18 struct bpf_map map; 19 struct sock **sks; 20 struct sk_psock_progs progs; 21 raw_spinlock_t lock; 22 }; 23 24 #define SOCK_CREATE_FLAG_MASK \ 25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 26 27 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 28 struct bpf_prog *old, u32 which); 29 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map); 30 31 static struct bpf_map *sock_map_alloc(union bpf_attr *attr) 32 { 33 struct bpf_stab *stab; 34 35 if (!capable(CAP_NET_ADMIN)) 36 return ERR_PTR(-EPERM); 37 if (attr->max_entries == 0 || 38 attr->key_size != 4 || 39 (attr->value_size != sizeof(u32) && 40 attr->value_size != sizeof(u64)) || 41 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 42 return ERR_PTR(-EINVAL); 43 44 stab = kzalloc(sizeof(*stab), GFP_USER | __GFP_ACCOUNT); 45 if (!stab) 46 return ERR_PTR(-ENOMEM); 47 48 bpf_map_init_from_attr(&stab->map, attr); 49 raw_spin_lock_init(&stab->lock); 50 51 stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries * 52 sizeof(struct sock *), 53 stab->map.numa_node); 54 if (!stab->sks) { 55 kfree(stab); 56 return ERR_PTR(-ENOMEM); 57 } 58 59 return &stab->map; 60 } 61 62 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) 63 { 64 u32 ufd = attr->target_fd; 65 struct bpf_map *map; 66 struct fd f; 67 int ret; 68 69 if (attr->attach_flags || attr->replace_bpf_fd) 70 return -EINVAL; 71 72 f = fdget(ufd); 73 map = __bpf_map_get(f); 74 if (IS_ERR(map)) 75 return PTR_ERR(map); 76 ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); 77 fdput(f); 78 return ret; 79 } 80 81 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 82 { 83 u32 ufd = attr->target_fd; 84 struct bpf_prog *prog; 85 struct bpf_map *map; 86 struct fd f; 87 int ret; 88 89 if (attr->attach_flags || attr->replace_bpf_fd) 90 return -EINVAL; 91 92 f = fdget(ufd); 93 map = __bpf_map_get(f); 94 if (IS_ERR(map)) 95 return PTR_ERR(map); 96 97 prog = bpf_prog_get(attr->attach_bpf_fd); 98 if (IS_ERR(prog)) { 99 ret = PTR_ERR(prog); 100 goto put_map; 101 } 102 103 if (prog->type != ptype) { 104 ret = -EINVAL; 105 goto put_prog; 106 } 107 108 ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); 109 put_prog: 110 bpf_prog_put(prog); 111 put_map: 112 fdput(f); 113 return ret; 114 } 115 116 static void sock_map_sk_acquire(struct sock *sk) 117 __acquires(&sk->sk_lock.slock) 118 { 119 lock_sock(sk); 120 preempt_disable(); 121 rcu_read_lock(); 122 } 123 124 static void sock_map_sk_release(struct sock *sk) 125 __releases(&sk->sk_lock.slock) 126 { 127 rcu_read_unlock(); 128 preempt_enable(); 129 release_sock(sk); 130 } 131 132 static void sock_map_add_link(struct sk_psock *psock, 133 struct sk_psock_link *link, 134 struct bpf_map *map, void *link_raw) 135 { 136 link->link_raw = link_raw; 137 link->map = map; 138 spin_lock_bh(&psock->link_lock); 139 list_add_tail(&link->list, &psock->link); 140 spin_unlock_bh(&psock->link_lock); 141 } 142 143 static void sock_map_del_link(struct sock *sk, 144 struct sk_psock *psock, void *link_raw) 145 { 146 bool strp_stop = false, verdict_stop = false; 147 struct sk_psock_link *link, *tmp; 148 149 spin_lock_bh(&psock->link_lock); 150 list_for_each_entry_safe(link, tmp, &psock->link, list) { 151 if (link->link_raw == link_raw) { 152 struct bpf_map *map = link->map; 153 struct bpf_stab *stab = container_of(map, struct bpf_stab, 154 map); 155 if (psock->saved_data_ready && stab->progs.stream_parser) 156 strp_stop = true; 157 if (psock->saved_data_ready && stab->progs.stream_verdict) 158 verdict_stop = true; 159 if (psock->saved_data_ready && stab->progs.skb_verdict) 160 verdict_stop = true; 161 list_del(&link->list); 162 sk_psock_free_link(link); 163 } 164 } 165 spin_unlock_bh(&psock->link_lock); 166 if (strp_stop || verdict_stop) { 167 write_lock_bh(&sk->sk_callback_lock); 168 if (strp_stop) 169 sk_psock_stop_strp(sk, psock); 170 else 171 sk_psock_stop_verdict(sk, psock); 172 write_unlock_bh(&sk->sk_callback_lock); 173 } 174 } 175 176 static void sock_map_unref(struct sock *sk, void *link_raw) 177 { 178 struct sk_psock *psock = sk_psock(sk); 179 180 if (likely(psock)) { 181 sock_map_del_link(sk, psock, link_raw); 182 sk_psock_put(sk, psock); 183 } 184 } 185 186 static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) 187 { 188 if (!sk->sk_prot->psock_update_sk_prot) 189 return -EINVAL; 190 psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot; 191 return sk->sk_prot->psock_update_sk_prot(sk, psock, false); 192 } 193 194 static struct sk_psock *sock_map_psock_get_checked(struct sock *sk) 195 { 196 struct sk_psock *psock; 197 198 rcu_read_lock(); 199 psock = sk_psock(sk); 200 if (psock) { 201 if (sk->sk_prot->close != sock_map_close) { 202 psock = ERR_PTR(-EBUSY); 203 goto out; 204 } 205 206 if (!refcount_inc_not_zero(&psock->refcnt)) 207 psock = ERR_PTR(-EBUSY); 208 } 209 out: 210 rcu_read_unlock(); 211 return psock; 212 } 213 214 static int sock_map_link(struct bpf_map *map, struct sock *sk) 215 { 216 struct sk_psock_progs *progs = sock_map_progs(map); 217 struct bpf_prog *stream_verdict = NULL; 218 struct bpf_prog *stream_parser = NULL; 219 struct bpf_prog *skb_verdict = NULL; 220 struct bpf_prog *msg_parser = NULL; 221 struct sk_psock *psock; 222 int ret; 223 224 stream_verdict = READ_ONCE(progs->stream_verdict); 225 if (stream_verdict) { 226 stream_verdict = bpf_prog_inc_not_zero(stream_verdict); 227 if (IS_ERR(stream_verdict)) 228 return PTR_ERR(stream_verdict); 229 } 230 231 stream_parser = READ_ONCE(progs->stream_parser); 232 if (stream_parser) { 233 stream_parser = bpf_prog_inc_not_zero(stream_parser); 234 if (IS_ERR(stream_parser)) { 235 ret = PTR_ERR(stream_parser); 236 goto out_put_stream_verdict; 237 } 238 } 239 240 msg_parser = READ_ONCE(progs->msg_parser); 241 if (msg_parser) { 242 msg_parser = bpf_prog_inc_not_zero(msg_parser); 243 if (IS_ERR(msg_parser)) { 244 ret = PTR_ERR(msg_parser); 245 goto out_put_stream_parser; 246 } 247 } 248 249 skb_verdict = READ_ONCE(progs->skb_verdict); 250 if (skb_verdict) { 251 skb_verdict = bpf_prog_inc_not_zero(skb_verdict); 252 if (IS_ERR(skb_verdict)) { 253 ret = PTR_ERR(skb_verdict); 254 goto out_put_msg_parser; 255 } 256 } 257 258 psock = sock_map_psock_get_checked(sk); 259 if (IS_ERR(psock)) { 260 ret = PTR_ERR(psock); 261 goto out_progs; 262 } 263 264 if (psock) { 265 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) || 266 (stream_parser && READ_ONCE(psock->progs.stream_parser)) || 267 (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) || 268 (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) || 269 (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) || 270 (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) { 271 sk_psock_put(sk, psock); 272 ret = -EBUSY; 273 goto out_progs; 274 } 275 } else { 276 psock = sk_psock_init(sk, map->numa_node); 277 if (IS_ERR(psock)) { 278 ret = PTR_ERR(psock); 279 goto out_progs; 280 } 281 } 282 283 if (msg_parser) 284 psock_set_prog(&psock->progs.msg_parser, msg_parser); 285 286 ret = sock_map_init_proto(sk, psock); 287 if (ret < 0) 288 goto out_drop; 289 290 write_lock_bh(&sk->sk_callback_lock); 291 if (stream_parser && stream_verdict && !psock->saved_data_ready) { 292 ret = sk_psock_init_strp(sk, psock); 293 if (ret) 294 goto out_unlock_drop; 295 psock_set_prog(&psock->progs.stream_verdict, stream_verdict); 296 psock_set_prog(&psock->progs.stream_parser, stream_parser); 297 sk_psock_start_strp(sk, psock); 298 } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) { 299 psock_set_prog(&psock->progs.stream_verdict, stream_verdict); 300 sk_psock_start_verdict(sk,psock); 301 } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) { 302 psock_set_prog(&psock->progs.skb_verdict, skb_verdict); 303 sk_psock_start_verdict(sk, psock); 304 } 305 write_unlock_bh(&sk->sk_callback_lock); 306 return 0; 307 out_unlock_drop: 308 write_unlock_bh(&sk->sk_callback_lock); 309 out_drop: 310 sk_psock_put(sk, psock); 311 out_progs: 312 if (skb_verdict) 313 bpf_prog_put(skb_verdict); 314 out_put_msg_parser: 315 if (msg_parser) 316 bpf_prog_put(msg_parser); 317 out_put_stream_parser: 318 if (stream_parser) 319 bpf_prog_put(stream_parser); 320 out_put_stream_verdict: 321 if (stream_verdict) 322 bpf_prog_put(stream_verdict); 323 return ret; 324 } 325 326 static void sock_map_free(struct bpf_map *map) 327 { 328 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 329 int i; 330 331 /* After the sync no updates or deletes will be in-flight so it 332 * is safe to walk map and remove entries without risking a race 333 * in EEXIST update case. 334 */ 335 synchronize_rcu(); 336 for (i = 0; i < stab->map.max_entries; i++) { 337 struct sock **psk = &stab->sks[i]; 338 struct sock *sk; 339 340 sk = xchg(psk, NULL); 341 if (sk) { 342 lock_sock(sk); 343 rcu_read_lock(); 344 sock_map_unref(sk, psk); 345 rcu_read_unlock(); 346 release_sock(sk); 347 } 348 } 349 350 /* wait for psock readers accessing its map link */ 351 synchronize_rcu(); 352 353 bpf_map_area_free(stab->sks); 354 kfree(stab); 355 } 356 357 static void sock_map_release_progs(struct bpf_map *map) 358 { 359 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs); 360 } 361 362 static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 363 { 364 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 365 366 WARN_ON_ONCE(!rcu_read_lock_held()); 367 368 if (unlikely(key >= map->max_entries)) 369 return NULL; 370 return READ_ONCE(stab->sks[key]); 371 } 372 373 static void *sock_map_lookup(struct bpf_map *map, void *key) 374 { 375 struct sock *sk; 376 377 sk = __sock_map_lookup_elem(map, *(u32 *)key); 378 if (!sk) 379 return NULL; 380 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 381 return NULL; 382 return sk; 383 } 384 385 static void *sock_map_lookup_sys(struct bpf_map *map, void *key) 386 { 387 struct sock *sk; 388 389 if (map->value_size != sizeof(u64)) 390 return ERR_PTR(-ENOSPC); 391 392 sk = __sock_map_lookup_elem(map, *(u32 *)key); 393 if (!sk) 394 return ERR_PTR(-ENOENT); 395 396 __sock_gen_cookie(sk); 397 return &sk->sk_cookie; 398 } 399 400 static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, 401 struct sock **psk) 402 { 403 struct sock *sk; 404 int err = 0; 405 406 raw_spin_lock_bh(&stab->lock); 407 sk = *psk; 408 if (!sk_test || sk_test == sk) 409 sk = xchg(psk, NULL); 410 411 if (likely(sk)) 412 sock_map_unref(sk, psk); 413 else 414 err = -EINVAL; 415 416 raw_spin_unlock_bh(&stab->lock); 417 return err; 418 } 419 420 static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, 421 void *link_raw) 422 { 423 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 424 425 __sock_map_delete(stab, sk, link_raw); 426 } 427 428 static int sock_map_delete_elem(struct bpf_map *map, void *key) 429 { 430 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 431 u32 i = *(u32 *)key; 432 struct sock **psk; 433 434 if (unlikely(i >= map->max_entries)) 435 return -EINVAL; 436 437 psk = &stab->sks[i]; 438 return __sock_map_delete(stab, NULL, psk); 439 } 440 441 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) 442 { 443 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 444 u32 i = key ? *(u32 *)key : U32_MAX; 445 u32 *key_next = next; 446 447 if (i == stab->map.max_entries - 1) 448 return -ENOENT; 449 if (i >= stab->map.max_entries) 450 *key_next = 0; 451 else 452 *key_next = i + 1; 453 return 0; 454 } 455 456 static int sock_map_update_common(struct bpf_map *map, u32 idx, 457 struct sock *sk, u64 flags) 458 { 459 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 460 struct sk_psock_link *link; 461 struct sk_psock *psock; 462 struct sock *osk; 463 int ret; 464 465 WARN_ON_ONCE(!rcu_read_lock_held()); 466 if (unlikely(flags > BPF_EXIST)) 467 return -EINVAL; 468 if (unlikely(idx >= map->max_entries)) 469 return -E2BIG; 470 471 link = sk_psock_init_link(); 472 if (!link) 473 return -ENOMEM; 474 475 ret = sock_map_link(map, sk); 476 if (ret < 0) 477 goto out_free; 478 479 psock = sk_psock(sk); 480 WARN_ON_ONCE(!psock); 481 482 raw_spin_lock_bh(&stab->lock); 483 osk = stab->sks[idx]; 484 if (osk && flags == BPF_NOEXIST) { 485 ret = -EEXIST; 486 goto out_unlock; 487 } else if (!osk && flags == BPF_EXIST) { 488 ret = -ENOENT; 489 goto out_unlock; 490 } 491 492 sock_map_add_link(psock, link, map, &stab->sks[idx]); 493 stab->sks[idx] = sk; 494 if (osk) 495 sock_map_unref(osk, &stab->sks[idx]); 496 raw_spin_unlock_bh(&stab->lock); 497 return 0; 498 out_unlock: 499 raw_spin_unlock_bh(&stab->lock); 500 if (psock) 501 sk_psock_put(sk, psock); 502 out_free: 503 sk_psock_free_link(link); 504 return ret; 505 } 506 507 static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) 508 { 509 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || 510 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || 511 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; 512 } 513 514 static bool sock_map_redirect_allowed(const struct sock *sk) 515 { 516 if (sk_is_tcp(sk)) 517 return sk->sk_state != TCP_LISTEN; 518 else 519 return sk->sk_state == TCP_ESTABLISHED; 520 } 521 522 static bool sock_map_sk_is_suitable(const struct sock *sk) 523 { 524 return !!sk->sk_prot->psock_update_sk_prot; 525 } 526 527 static bool sock_map_sk_state_allowed(const struct sock *sk) 528 { 529 if (sk_is_tcp(sk)) 530 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); 531 return true; 532 } 533 534 static int sock_hash_update_common(struct bpf_map *map, void *key, 535 struct sock *sk, u64 flags); 536 537 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, 538 u64 flags) 539 { 540 struct socket *sock; 541 struct sock *sk; 542 int ret; 543 u64 ufd; 544 545 if (map->value_size == sizeof(u64)) 546 ufd = *(u64 *)value; 547 else 548 ufd = *(u32 *)value; 549 if (ufd > S32_MAX) 550 return -EINVAL; 551 552 sock = sockfd_lookup(ufd, &ret); 553 if (!sock) 554 return ret; 555 sk = sock->sk; 556 if (!sk) { 557 ret = -EINVAL; 558 goto out; 559 } 560 if (!sock_map_sk_is_suitable(sk)) { 561 ret = -EOPNOTSUPP; 562 goto out; 563 } 564 565 sock_map_sk_acquire(sk); 566 if (!sock_map_sk_state_allowed(sk)) 567 ret = -EOPNOTSUPP; 568 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 569 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 570 else 571 ret = sock_hash_update_common(map, key, sk, flags); 572 sock_map_sk_release(sk); 573 out: 574 sockfd_put(sock); 575 return ret; 576 } 577 578 static int sock_map_update_elem(struct bpf_map *map, void *key, 579 void *value, u64 flags) 580 { 581 struct sock *sk = (struct sock *)value; 582 int ret; 583 584 if (unlikely(!sk || !sk_fullsock(sk))) 585 return -EINVAL; 586 587 if (!sock_map_sk_is_suitable(sk)) 588 return -EOPNOTSUPP; 589 590 local_bh_disable(); 591 bh_lock_sock(sk); 592 if (!sock_map_sk_state_allowed(sk)) 593 ret = -EOPNOTSUPP; 594 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) 595 ret = sock_map_update_common(map, *(u32 *)key, sk, flags); 596 else 597 ret = sock_hash_update_common(map, key, sk, flags); 598 bh_unlock_sock(sk); 599 local_bh_enable(); 600 return ret; 601 } 602 603 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops, 604 struct bpf_map *, map, void *, key, u64, flags) 605 { 606 WARN_ON_ONCE(!rcu_read_lock_held()); 607 608 if (likely(sock_map_sk_is_suitable(sops->sk) && 609 sock_map_op_okay(sops))) 610 return sock_map_update_common(map, *(u32 *)key, sops->sk, 611 flags); 612 return -EOPNOTSUPP; 613 } 614 615 const struct bpf_func_proto bpf_sock_map_update_proto = { 616 .func = bpf_sock_map_update, 617 .gpl_only = false, 618 .pkt_access = true, 619 .ret_type = RET_INTEGER, 620 .arg1_type = ARG_PTR_TO_CTX, 621 .arg2_type = ARG_CONST_MAP_PTR, 622 .arg3_type = ARG_PTR_TO_MAP_KEY, 623 .arg4_type = ARG_ANYTHING, 624 }; 625 626 BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, 627 struct bpf_map *, map, u32, key, u64, flags) 628 { 629 struct sock *sk; 630 631 if (unlikely(flags & ~(BPF_F_INGRESS))) 632 return SK_DROP; 633 634 sk = __sock_map_lookup_elem(map, key); 635 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 636 return SK_DROP; 637 638 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 639 return SK_PASS; 640 } 641 642 const struct bpf_func_proto bpf_sk_redirect_map_proto = { 643 .func = bpf_sk_redirect_map, 644 .gpl_only = false, 645 .ret_type = RET_INTEGER, 646 .arg1_type = ARG_PTR_TO_CTX, 647 .arg2_type = ARG_CONST_MAP_PTR, 648 .arg3_type = ARG_ANYTHING, 649 .arg4_type = ARG_ANYTHING, 650 }; 651 652 BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, 653 struct bpf_map *, map, u32, key, u64, flags) 654 { 655 struct sock *sk; 656 657 if (unlikely(flags & ~(BPF_F_INGRESS))) 658 return SK_DROP; 659 660 sk = __sock_map_lookup_elem(map, key); 661 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 662 return SK_DROP; 663 664 msg->flags = flags; 665 msg->sk_redir = sk; 666 return SK_PASS; 667 } 668 669 const struct bpf_func_proto bpf_msg_redirect_map_proto = { 670 .func = bpf_msg_redirect_map, 671 .gpl_only = false, 672 .ret_type = RET_INTEGER, 673 .arg1_type = ARG_PTR_TO_CTX, 674 .arg2_type = ARG_CONST_MAP_PTR, 675 .arg3_type = ARG_ANYTHING, 676 .arg4_type = ARG_ANYTHING, 677 }; 678 679 struct sock_map_seq_info { 680 struct bpf_map *map; 681 struct sock *sk; 682 u32 index; 683 }; 684 685 struct bpf_iter__sockmap { 686 __bpf_md_ptr(struct bpf_iter_meta *, meta); 687 __bpf_md_ptr(struct bpf_map *, map); 688 __bpf_md_ptr(void *, key); 689 __bpf_md_ptr(struct sock *, sk); 690 }; 691 692 DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta, 693 struct bpf_map *map, void *key, 694 struct sock *sk) 695 696 static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info) 697 { 698 if (unlikely(info->index >= info->map->max_entries)) 699 return NULL; 700 701 info->sk = __sock_map_lookup_elem(info->map, info->index); 702 703 /* can't return sk directly, since that might be NULL */ 704 return info; 705 } 706 707 static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos) 708 __acquires(rcu) 709 { 710 struct sock_map_seq_info *info = seq->private; 711 712 if (*pos == 0) 713 ++*pos; 714 715 /* pairs with sock_map_seq_stop */ 716 rcu_read_lock(); 717 return sock_map_seq_lookup_elem(info); 718 } 719 720 static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 721 __must_hold(rcu) 722 { 723 struct sock_map_seq_info *info = seq->private; 724 725 ++*pos; 726 ++info->index; 727 728 return sock_map_seq_lookup_elem(info); 729 } 730 731 static int sock_map_seq_show(struct seq_file *seq, void *v) 732 __must_hold(rcu) 733 { 734 struct sock_map_seq_info *info = seq->private; 735 struct bpf_iter__sockmap ctx = {}; 736 struct bpf_iter_meta meta; 737 struct bpf_prog *prog; 738 739 meta.seq = seq; 740 prog = bpf_iter_get_info(&meta, !v); 741 if (!prog) 742 return 0; 743 744 ctx.meta = &meta; 745 ctx.map = info->map; 746 if (v) { 747 ctx.key = &info->index; 748 ctx.sk = info->sk; 749 } 750 751 return bpf_iter_run_prog(prog, &ctx); 752 } 753 754 static void sock_map_seq_stop(struct seq_file *seq, void *v) 755 __releases(rcu) 756 { 757 if (!v) 758 (void)sock_map_seq_show(seq, NULL); 759 760 /* pairs with sock_map_seq_start */ 761 rcu_read_unlock(); 762 } 763 764 static const struct seq_operations sock_map_seq_ops = { 765 .start = sock_map_seq_start, 766 .next = sock_map_seq_next, 767 .stop = sock_map_seq_stop, 768 .show = sock_map_seq_show, 769 }; 770 771 static int sock_map_init_seq_private(void *priv_data, 772 struct bpf_iter_aux_info *aux) 773 { 774 struct sock_map_seq_info *info = priv_data; 775 776 info->map = aux->map; 777 return 0; 778 } 779 780 static const struct bpf_iter_seq_info sock_map_iter_seq_info = { 781 .seq_ops = &sock_map_seq_ops, 782 .init_seq_private = sock_map_init_seq_private, 783 .seq_priv_size = sizeof(struct sock_map_seq_info), 784 }; 785 786 static int sock_map_btf_id; 787 const struct bpf_map_ops sock_map_ops = { 788 .map_meta_equal = bpf_map_meta_equal, 789 .map_alloc = sock_map_alloc, 790 .map_free = sock_map_free, 791 .map_get_next_key = sock_map_get_next_key, 792 .map_lookup_elem_sys_only = sock_map_lookup_sys, 793 .map_update_elem = sock_map_update_elem, 794 .map_delete_elem = sock_map_delete_elem, 795 .map_lookup_elem = sock_map_lookup, 796 .map_release_uref = sock_map_release_progs, 797 .map_check_btf = map_check_no_btf, 798 .map_btf_name = "bpf_stab", 799 .map_btf_id = &sock_map_btf_id, 800 .iter_seq_info = &sock_map_iter_seq_info, 801 }; 802 803 struct bpf_shtab_elem { 804 struct rcu_head rcu; 805 u32 hash; 806 struct sock *sk; 807 struct hlist_node node; 808 u8 key[]; 809 }; 810 811 struct bpf_shtab_bucket { 812 struct hlist_head head; 813 raw_spinlock_t lock; 814 }; 815 816 struct bpf_shtab { 817 struct bpf_map map; 818 struct bpf_shtab_bucket *buckets; 819 u32 buckets_num; 820 u32 elem_size; 821 struct sk_psock_progs progs; 822 atomic_t count; 823 }; 824 825 static inline u32 sock_hash_bucket_hash(const void *key, u32 len) 826 { 827 return jhash(key, len, 0); 828 } 829 830 static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab, 831 u32 hash) 832 { 833 return &htab->buckets[hash & (htab->buckets_num - 1)]; 834 } 835 836 static struct bpf_shtab_elem * 837 sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, 838 u32 key_size) 839 { 840 struct bpf_shtab_elem *elem; 841 842 hlist_for_each_entry_rcu(elem, head, node) { 843 if (elem->hash == hash && 844 !memcmp(&elem->key, key, key_size)) 845 return elem; 846 } 847 848 return NULL; 849 } 850 851 static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) 852 { 853 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 854 u32 key_size = map->key_size, hash; 855 struct bpf_shtab_bucket *bucket; 856 struct bpf_shtab_elem *elem; 857 858 WARN_ON_ONCE(!rcu_read_lock_held()); 859 860 hash = sock_hash_bucket_hash(key, key_size); 861 bucket = sock_hash_select_bucket(htab, hash); 862 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 863 864 return elem ? elem->sk : NULL; 865 } 866 867 static void sock_hash_free_elem(struct bpf_shtab *htab, 868 struct bpf_shtab_elem *elem) 869 { 870 atomic_dec(&htab->count); 871 kfree_rcu(elem, rcu); 872 } 873 874 static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, 875 void *link_raw) 876 { 877 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 878 struct bpf_shtab_elem *elem_probe, *elem = link_raw; 879 struct bpf_shtab_bucket *bucket; 880 881 WARN_ON_ONCE(!rcu_read_lock_held()); 882 bucket = sock_hash_select_bucket(htab, elem->hash); 883 884 /* elem may be deleted in parallel from the map, but access here 885 * is okay since it's going away only after RCU grace period. 886 * However, we need to check whether it's still present. 887 */ 888 raw_spin_lock_bh(&bucket->lock); 889 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, 890 elem->key, map->key_size); 891 if (elem_probe && elem_probe == elem) { 892 hlist_del_rcu(&elem->node); 893 sock_map_unref(elem->sk, elem); 894 sock_hash_free_elem(htab, elem); 895 } 896 raw_spin_unlock_bh(&bucket->lock); 897 } 898 899 static int sock_hash_delete_elem(struct bpf_map *map, void *key) 900 { 901 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 902 u32 hash, key_size = map->key_size; 903 struct bpf_shtab_bucket *bucket; 904 struct bpf_shtab_elem *elem; 905 int ret = -ENOENT; 906 907 hash = sock_hash_bucket_hash(key, key_size); 908 bucket = sock_hash_select_bucket(htab, hash); 909 910 raw_spin_lock_bh(&bucket->lock); 911 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 912 if (elem) { 913 hlist_del_rcu(&elem->node); 914 sock_map_unref(elem->sk, elem); 915 sock_hash_free_elem(htab, elem); 916 ret = 0; 917 } 918 raw_spin_unlock_bh(&bucket->lock); 919 return ret; 920 } 921 922 static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab, 923 void *key, u32 key_size, 924 u32 hash, struct sock *sk, 925 struct bpf_shtab_elem *old) 926 { 927 struct bpf_shtab_elem *new; 928 929 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 930 if (!old) { 931 atomic_dec(&htab->count); 932 return ERR_PTR(-E2BIG); 933 } 934 } 935 936 new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, 937 GFP_ATOMIC | __GFP_NOWARN, 938 htab->map.numa_node); 939 if (!new) { 940 atomic_dec(&htab->count); 941 return ERR_PTR(-ENOMEM); 942 } 943 memcpy(new->key, key, key_size); 944 new->sk = sk; 945 new->hash = hash; 946 return new; 947 } 948 949 static int sock_hash_update_common(struct bpf_map *map, void *key, 950 struct sock *sk, u64 flags) 951 { 952 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 953 u32 key_size = map->key_size, hash; 954 struct bpf_shtab_elem *elem, *elem_new; 955 struct bpf_shtab_bucket *bucket; 956 struct sk_psock_link *link; 957 struct sk_psock *psock; 958 int ret; 959 960 WARN_ON_ONCE(!rcu_read_lock_held()); 961 if (unlikely(flags > BPF_EXIST)) 962 return -EINVAL; 963 964 link = sk_psock_init_link(); 965 if (!link) 966 return -ENOMEM; 967 968 ret = sock_map_link(map, sk); 969 if (ret < 0) 970 goto out_free; 971 972 psock = sk_psock(sk); 973 WARN_ON_ONCE(!psock); 974 975 hash = sock_hash_bucket_hash(key, key_size); 976 bucket = sock_hash_select_bucket(htab, hash); 977 978 raw_spin_lock_bh(&bucket->lock); 979 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); 980 if (elem && flags == BPF_NOEXIST) { 981 ret = -EEXIST; 982 goto out_unlock; 983 } else if (!elem && flags == BPF_EXIST) { 984 ret = -ENOENT; 985 goto out_unlock; 986 } 987 988 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem); 989 if (IS_ERR(elem_new)) { 990 ret = PTR_ERR(elem_new); 991 goto out_unlock; 992 } 993 994 sock_map_add_link(psock, link, map, elem_new); 995 /* Add new element to the head of the list, so that 996 * concurrent search will find it before old elem. 997 */ 998 hlist_add_head_rcu(&elem_new->node, &bucket->head); 999 if (elem) { 1000 hlist_del_rcu(&elem->node); 1001 sock_map_unref(elem->sk, elem); 1002 sock_hash_free_elem(htab, elem); 1003 } 1004 raw_spin_unlock_bh(&bucket->lock); 1005 return 0; 1006 out_unlock: 1007 raw_spin_unlock_bh(&bucket->lock); 1008 sk_psock_put(sk, psock); 1009 out_free: 1010 sk_psock_free_link(link); 1011 return ret; 1012 } 1013 1014 static int sock_hash_get_next_key(struct bpf_map *map, void *key, 1015 void *key_next) 1016 { 1017 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1018 struct bpf_shtab_elem *elem, *elem_next; 1019 u32 hash, key_size = map->key_size; 1020 struct hlist_head *head; 1021 int i = 0; 1022 1023 if (!key) 1024 goto find_first_elem; 1025 hash = sock_hash_bucket_hash(key, key_size); 1026 head = &sock_hash_select_bucket(htab, hash)->head; 1027 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); 1028 if (!elem) 1029 goto find_first_elem; 1030 1031 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)), 1032 struct bpf_shtab_elem, node); 1033 if (elem_next) { 1034 memcpy(key_next, elem_next->key, key_size); 1035 return 0; 1036 } 1037 1038 i = hash & (htab->buckets_num - 1); 1039 i++; 1040 find_first_elem: 1041 for (; i < htab->buckets_num; i++) { 1042 head = &sock_hash_select_bucket(htab, i)->head; 1043 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)), 1044 struct bpf_shtab_elem, node); 1045 if (elem_next) { 1046 memcpy(key_next, elem_next->key, key_size); 1047 return 0; 1048 } 1049 } 1050 1051 return -ENOENT; 1052 } 1053 1054 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) 1055 { 1056 struct bpf_shtab *htab; 1057 int i, err; 1058 1059 if (!capable(CAP_NET_ADMIN)) 1060 return ERR_PTR(-EPERM); 1061 if (attr->max_entries == 0 || 1062 attr->key_size == 0 || 1063 (attr->value_size != sizeof(u32) && 1064 attr->value_size != sizeof(u64)) || 1065 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 1066 return ERR_PTR(-EINVAL); 1067 if (attr->key_size > MAX_BPF_STACK) 1068 return ERR_PTR(-E2BIG); 1069 1070 htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT); 1071 if (!htab) 1072 return ERR_PTR(-ENOMEM); 1073 1074 bpf_map_init_from_attr(&htab->map, attr); 1075 1076 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); 1077 htab->elem_size = sizeof(struct bpf_shtab_elem) + 1078 round_up(htab->map.key_size, 8); 1079 if (htab->buckets_num == 0 || 1080 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) { 1081 err = -EINVAL; 1082 goto free_htab; 1083 } 1084 1085 htab->buckets = bpf_map_area_alloc(htab->buckets_num * 1086 sizeof(struct bpf_shtab_bucket), 1087 htab->map.numa_node); 1088 if (!htab->buckets) { 1089 err = -ENOMEM; 1090 goto free_htab; 1091 } 1092 1093 for (i = 0; i < htab->buckets_num; i++) { 1094 INIT_HLIST_HEAD(&htab->buckets[i].head); 1095 raw_spin_lock_init(&htab->buckets[i].lock); 1096 } 1097 1098 return &htab->map; 1099 free_htab: 1100 kfree(htab); 1101 return ERR_PTR(err); 1102 } 1103 1104 static void sock_hash_free(struct bpf_map *map) 1105 { 1106 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); 1107 struct bpf_shtab_bucket *bucket; 1108 struct hlist_head unlink_list; 1109 struct bpf_shtab_elem *elem; 1110 struct hlist_node *node; 1111 int i; 1112 1113 /* After the sync no updates or deletes will be in-flight so it 1114 * is safe to walk map and remove entries without risking a race 1115 * in EEXIST update case. 1116 */ 1117 synchronize_rcu(); 1118 for (i = 0; i < htab->buckets_num; i++) { 1119 bucket = sock_hash_select_bucket(htab, i); 1120 1121 /* We are racing with sock_hash_delete_from_link to 1122 * enter the spin-lock critical section. Every socket on 1123 * the list is still linked to sockhash. Since link 1124 * exists, psock exists and holds a ref to socket. That 1125 * lets us to grab a socket ref too. 1126 */ 1127 raw_spin_lock_bh(&bucket->lock); 1128 hlist_for_each_entry(elem, &bucket->head, node) 1129 sock_hold(elem->sk); 1130 hlist_move_list(&bucket->head, &unlink_list); 1131 raw_spin_unlock_bh(&bucket->lock); 1132 1133 /* Process removed entries out of atomic context to 1134 * block for socket lock before deleting the psock's 1135 * link to sockhash. 1136 */ 1137 hlist_for_each_entry_safe(elem, node, &unlink_list, node) { 1138 hlist_del(&elem->node); 1139 lock_sock(elem->sk); 1140 rcu_read_lock(); 1141 sock_map_unref(elem->sk, elem); 1142 rcu_read_unlock(); 1143 release_sock(elem->sk); 1144 sock_put(elem->sk); 1145 sock_hash_free_elem(htab, elem); 1146 } 1147 } 1148 1149 /* wait for psock readers accessing its map link */ 1150 synchronize_rcu(); 1151 1152 bpf_map_area_free(htab->buckets); 1153 kfree(htab); 1154 } 1155 1156 static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) 1157 { 1158 struct sock *sk; 1159 1160 if (map->value_size != sizeof(u64)) 1161 return ERR_PTR(-ENOSPC); 1162 1163 sk = __sock_hash_lookup_elem(map, key); 1164 if (!sk) 1165 return ERR_PTR(-ENOENT); 1166 1167 __sock_gen_cookie(sk); 1168 return &sk->sk_cookie; 1169 } 1170 1171 static void *sock_hash_lookup(struct bpf_map *map, void *key) 1172 { 1173 struct sock *sk; 1174 1175 sk = __sock_hash_lookup_elem(map, key); 1176 if (!sk) 1177 return NULL; 1178 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) 1179 return NULL; 1180 return sk; 1181 } 1182 1183 static void sock_hash_release_progs(struct bpf_map *map) 1184 { 1185 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs); 1186 } 1187 1188 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, 1189 struct bpf_map *, map, void *, key, u64, flags) 1190 { 1191 WARN_ON_ONCE(!rcu_read_lock_held()); 1192 1193 if (likely(sock_map_sk_is_suitable(sops->sk) && 1194 sock_map_op_okay(sops))) 1195 return sock_hash_update_common(map, key, sops->sk, flags); 1196 return -EOPNOTSUPP; 1197 } 1198 1199 const struct bpf_func_proto bpf_sock_hash_update_proto = { 1200 .func = bpf_sock_hash_update, 1201 .gpl_only = false, 1202 .pkt_access = true, 1203 .ret_type = RET_INTEGER, 1204 .arg1_type = ARG_PTR_TO_CTX, 1205 .arg2_type = ARG_CONST_MAP_PTR, 1206 .arg3_type = ARG_PTR_TO_MAP_KEY, 1207 .arg4_type = ARG_ANYTHING, 1208 }; 1209 1210 BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, 1211 struct bpf_map *, map, void *, key, u64, flags) 1212 { 1213 struct sock *sk; 1214 1215 if (unlikely(flags & ~(BPF_F_INGRESS))) 1216 return SK_DROP; 1217 1218 sk = __sock_hash_lookup_elem(map, key); 1219 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1220 return SK_DROP; 1221 1222 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 1223 return SK_PASS; 1224 } 1225 1226 const struct bpf_func_proto bpf_sk_redirect_hash_proto = { 1227 .func = bpf_sk_redirect_hash, 1228 .gpl_only = false, 1229 .ret_type = RET_INTEGER, 1230 .arg1_type = ARG_PTR_TO_CTX, 1231 .arg2_type = ARG_CONST_MAP_PTR, 1232 .arg3_type = ARG_PTR_TO_MAP_KEY, 1233 .arg4_type = ARG_ANYTHING, 1234 }; 1235 1236 BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, 1237 struct bpf_map *, map, void *, key, u64, flags) 1238 { 1239 struct sock *sk; 1240 1241 if (unlikely(flags & ~(BPF_F_INGRESS))) 1242 return SK_DROP; 1243 1244 sk = __sock_hash_lookup_elem(map, key); 1245 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1246 return SK_DROP; 1247 1248 msg->flags = flags; 1249 msg->sk_redir = sk; 1250 return SK_PASS; 1251 } 1252 1253 const struct bpf_func_proto bpf_msg_redirect_hash_proto = { 1254 .func = bpf_msg_redirect_hash, 1255 .gpl_only = false, 1256 .ret_type = RET_INTEGER, 1257 .arg1_type = ARG_PTR_TO_CTX, 1258 .arg2_type = ARG_CONST_MAP_PTR, 1259 .arg3_type = ARG_PTR_TO_MAP_KEY, 1260 .arg4_type = ARG_ANYTHING, 1261 }; 1262 1263 struct sock_hash_seq_info { 1264 struct bpf_map *map; 1265 struct bpf_shtab *htab; 1266 u32 bucket_id; 1267 }; 1268 1269 static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info, 1270 struct bpf_shtab_elem *prev_elem) 1271 { 1272 const struct bpf_shtab *htab = info->htab; 1273 struct bpf_shtab_bucket *bucket; 1274 struct bpf_shtab_elem *elem; 1275 struct hlist_node *node; 1276 1277 /* try to find next elem in the same bucket */ 1278 if (prev_elem) { 1279 node = rcu_dereference(hlist_next_rcu(&prev_elem->node)); 1280 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1281 if (elem) 1282 return elem; 1283 1284 /* no more elements, continue in the next bucket */ 1285 info->bucket_id++; 1286 } 1287 1288 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) { 1289 bucket = &htab->buckets[info->bucket_id]; 1290 node = rcu_dereference(hlist_first_rcu(&bucket->head)); 1291 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); 1292 if (elem) 1293 return elem; 1294 } 1295 1296 return NULL; 1297 } 1298 1299 static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos) 1300 __acquires(rcu) 1301 { 1302 struct sock_hash_seq_info *info = seq->private; 1303 1304 if (*pos == 0) 1305 ++*pos; 1306 1307 /* pairs with sock_hash_seq_stop */ 1308 rcu_read_lock(); 1309 return sock_hash_seq_find_next(info, NULL); 1310 } 1311 1312 static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1313 __must_hold(rcu) 1314 { 1315 struct sock_hash_seq_info *info = seq->private; 1316 1317 ++*pos; 1318 return sock_hash_seq_find_next(info, v); 1319 } 1320 1321 static int sock_hash_seq_show(struct seq_file *seq, void *v) 1322 __must_hold(rcu) 1323 { 1324 struct sock_hash_seq_info *info = seq->private; 1325 struct bpf_iter__sockmap ctx = {}; 1326 struct bpf_shtab_elem *elem = v; 1327 struct bpf_iter_meta meta; 1328 struct bpf_prog *prog; 1329 1330 meta.seq = seq; 1331 prog = bpf_iter_get_info(&meta, !elem); 1332 if (!prog) 1333 return 0; 1334 1335 ctx.meta = &meta; 1336 ctx.map = info->map; 1337 if (elem) { 1338 ctx.key = elem->key; 1339 ctx.sk = elem->sk; 1340 } 1341 1342 return bpf_iter_run_prog(prog, &ctx); 1343 } 1344 1345 static void sock_hash_seq_stop(struct seq_file *seq, void *v) 1346 __releases(rcu) 1347 { 1348 if (!v) 1349 (void)sock_hash_seq_show(seq, NULL); 1350 1351 /* pairs with sock_hash_seq_start */ 1352 rcu_read_unlock(); 1353 } 1354 1355 static const struct seq_operations sock_hash_seq_ops = { 1356 .start = sock_hash_seq_start, 1357 .next = sock_hash_seq_next, 1358 .stop = sock_hash_seq_stop, 1359 .show = sock_hash_seq_show, 1360 }; 1361 1362 static int sock_hash_init_seq_private(void *priv_data, 1363 struct bpf_iter_aux_info *aux) 1364 { 1365 struct sock_hash_seq_info *info = priv_data; 1366 1367 info->map = aux->map; 1368 info->htab = container_of(aux->map, struct bpf_shtab, map); 1369 return 0; 1370 } 1371 1372 static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { 1373 .seq_ops = &sock_hash_seq_ops, 1374 .init_seq_private = sock_hash_init_seq_private, 1375 .seq_priv_size = sizeof(struct sock_hash_seq_info), 1376 }; 1377 1378 static int sock_hash_map_btf_id; 1379 const struct bpf_map_ops sock_hash_ops = { 1380 .map_meta_equal = bpf_map_meta_equal, 1381 .map_alloc = sock_hash_alloc, 1382 .map_free = sock_hash_free, 1383 .map_get_next_key = sock_hash_get_next_key, 1384 .map_update_elem = sock_map_update_elem, 1385 .map_delete_elem = sock_hash_delete_elem, 1386 .map_lookup_elem = sock_hash_lookup, 1387 .map_lookup_elem_sys_only = sock_hash_lookup_sys, 1388 .map_release_uref = sock_hash_release_progs, 1389 .map_check_btf = map_check_no_btf, 1390 .map_btf_name = "bpf_shtab", 1391 .map_btf_id = &sock_hash_map_btf_id, 1392 .iter_seq_info = &sock_hash_iter_seq_info, 1393 }; 1394 1395 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) 1396 { 1397 switch (map->map_type) { 1398 case BPF_MAP_TYPE_SOCKMAP: 1399 return &container_of(map, struct bpf_stab, map)->progs; 1400 case BPF_MAP_TYPE_SOCKHASH: 1401 return &container_of(map, struct bpf_shtab, map)->progs; 1402 default: 1403 break; 1404 } 1405 1406 return NULL; 1407 } 1408 1409 static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, 1410 struct bpf_prog *old, u32 which) 1411 { 1412 struct sk_psock_progs *progs = sock_map_progs(map); 1413 struct bpf_prog **pprog; 1414 1415 if (!progs) 1416 return -EOPNOTSUPP; 1417 1418 switch (which) { 1419 case BPF_SK_MSG_VERDICT: 1420 pprog = &progs->msg_parser; 1421 break; 1422 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 1423 case BPF_SK_SKB_STREAM_PARSER: 1424 pprog = &progs->stream_parser; 1425 break; 1426 #endif 1427 case BPF_SK_SKB_STREAM_VERDICT: 1428 if (progs->skb_verdict) 1429 return -EBUSY; 1430 pprog = &progs->stream_verdict; 1431 break; 1432 case BPF_SK_SKB_VERDICT: 1433 if (progs->stream_verdict) 1434 return -EBUSY; 1435 pprog = &progs->skb_verdict; 1436 break; 1437 default: 1438 return -EOPNOTSUPP; 1439 } 1440 1441 if (old) 1442 return psock_replace_prog(pprog, prog, old); 1443 1444 psock_set_prog(pprog, prog); 1445 return 0; 1446 } 1447 1448 static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) 1449 { 1450 switch (link->map->map_type) { 1451 case BPF_MAP_TYPE_SOCKMAP: 1452 return sock_map_delete_from_link(link->map, sk, 1453 link->link_raw); 1454 case BPF_MAP_TYPE_SOCKHASH: 1455 return sock_hash_delete_from_link(link->map, sk, 1456 link->link_raw); 1457 default: 1458 break; 1459 } 1460 } 1461 1462 static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock) 1463 { 1464 struct sk_psock_link *link; 1465 1466 while ((link = sk_psock_link_pop(psock))) { 1467 sock_map_unlink(sk, link); 1468 sk_psock_free_link(link); 1469 } 1470 } 1471 1472 void sock_map_unhash(struct sock *sk) 1473 { 1474 void (*saved_unhash)(struct sock *sk); 1475 struct sk_psock *psock; 1476 1477 rcu_read_lock(); 1478 psock = sk_psock(sk); 1479 if (unlikely(!psock)) { 1480 rcu_read_unlock(); 1481 if (sk->sk_prot->unhash) 1482 sk->sk_prot->unhash(sk); 1483 return; 1484 } 1485 1486 saved_unhash = psock->saved_unhash; 1487 sock_map_remove_links(sk, psock); 1488 rcu_read_unlock(); 1489 saved_unhash(sk); 1490 } 1491 EXPORT_SYMBOL_GPL(sock_map_unhash); 1492 1493 void sock_map_close(struct sock *sk, long timeout) 1494 { 1495 void (*saved_close)(struct sock *sk, long timeout); 1496 struct sk_psock *psock; 1497 1498 lock_sock(sk); 1499 rcu_read_lock(); 1500 psock = sk_psock_get(sk); 1501 if (unlikely(!psock)) { 1502 rcu_read_unlock(); 1503 release_sock(sk); 1504 return sk->sk_prot->close(sk, timeout); 1505 } 1506 1507 saved_close = psock->saved_close; 1508 sock_map_remove_links(sk, psock); 1509 rcu_read_unlock(); 1510 sk_psock_stop(psock, true); 1511 sk_psock_put(sk, psock); 1512 release_sock(sk); 1513 saved_close(sk, timeout); 1514 } 1515 EXPORT_SYMBOL_GPL(sock_map_close); 1516 1517 static int sock_map_iter_attach_target(struct bpf_prog *prog, 1518 union bpf_iter_link_info *linfo, 1519 struct bpf_iter_aux_info *aux) 1520 { 1521 struct bpf_map *map; 1522 int err = -EINVAL; 1523 1524 if (!linfo->map.map_fd) 1525 return -EBADF; 1526 1527 map = bpf_map_get_with_uref(linfo->map.map_fd); 1528 if (IS_ERR(map)) 1529 return PTR_ERR(map); 1530 1531 if (map->map_type != BPF_MAP_TYPE_SOCKMAP && 1532 map->map_type != BPF_MAP_TYPE_SOCKHASH) 1533 goto put_map; 1534 1535 if (prog->aux->max_rdonly_access > map->key_size) { 1536 err = -EACCES; 1537 goto put_map; 1538 } 1539 1540 aux->map = map; 1541 return 0; 1542 1543 put_map: 1544 bpf_map_put_with_uref(map); 1545 return err; 1546 } 1547 1548 static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux) 1549 { 1550 bpf_map_put_with_uref(aux->map); 1551 } 1552 1553 static struct bpf_iter_reg sock_map_iter_reg = { 1554 .target = "sockmap", 1555 .attach_target = sock_map_iter_attach_target, 1556 .detach_target = sock_map_iter_detach_target, 1557 .show_fdinfo = bpf_iter_map_show_fdinfo, 1558 .fill_link_info = bpf_iter_map_fill_link_info, 1559 .ctx_arg_info_size = 2, 1560 .ctx_arg_info = { 1561 { offsetof(struct bpf_iter__sockmap, key), 1562 PTR_TO_RDONLY_BUF_OR_NULL }, 1563 { offsetof(struct bpf_iter__sockmap, sk), 1564 PTR_TO_BTF_ID_OR_NULL }, 1565 }, 1566 }; 1567 1568 static int __init bpf_sockmap_iter_init(void) 1569 { 1570 sock_map_iter_reg.ctx_arg_info[1].btf_id = 1571 btf_sock_ids[BTF_SOCK_TYPE_SOCK]; 1572 return bpf_iter_reg_target(&sock_map_iter_reg); 1573 } 1574 late_initcall(bpf_sockmap_iter_init); 1575