1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/btf_ids.h> 7 #include <linux/slab.h> 8 #include <linux/init.h> 9 #include <linux/vmalloc.h> 10 #include <linux/etherdevice.h> 11 #include <linux/filter.h> 12 #include <linux/rcupdate_trace.h> 13 #include <linux/sched/signal.h> 14 #include <net/bpf_sk_storage.h> 15 #include <net/sock.h> 16 #include <net/tcp.h> 17 #include <net/net_namespace.h> 18 #include <net/page_pool.h> 19 #include <linux/error-injection.h> 20 #include <linux/smp.h> 21 #include <linux/sock_diag.h> 22 #include <net/xdp.h> 23 24 #define CREATE_TRACE_POINTS 25 #include <trace/events/bpf_test_run.h> 26 27 struct bpf_test_timer { 28 enum { NO_PREEMPT, NO_MIGRATE } mode; 29 u32 i; 30 u64 time_start, time_spent; 31 }; 32 33 static void bpf_test_timer_enter(struct bpf_test_timer *t) 34 __acquires(rcu) 35 { 36 rcu_read_lock(); 37 if (t->mode == NO_PREEMPT) 38 preempt_disable(); 39 else 40 migrate_disable(); 41 42 t->time_start = ktime_get_ns(); 43 } 44 45 static void bpf_test_timer_leave(struct bpf_test_timer *t) 46 __releases(rcu) 47 { 48 t->time_start = 0; 49 50 if (t->mode == NO_PREEMPT) 51 preempt_enable(); 52 else 53 migrate_enable(); 54 rcu_read_unlock(); 55 } 56 57 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations, 58 u32 repeat, int *err, u32 *duration) 59 __must_hold(rcu) 60 { 61 t->i += iterations; 62 if (t->i >= repeat) { 63 /* We're done. */ 64 t->time_spent += ktime_get_ns() - t->time_start; 65 do_div(t->time_spent, t->i); 66 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; 67 *err = 0; 68 goto reset; 69 } 70 71 if (signal_pending(current)) { 72 /* During iteration: we've been cancelled, abort. */ 73 *err = -EINTR; 74 goto reset; 75 } 76 77 if (need_resched()) { 78 /* During iteration: we need to reschedule between runs. */ 79 t->time_spent += ktime_get_ns() - t->time_start; 80 bpf_test_timer_leave(t); 81 cond_resched(); 82 bpf_test_timer_enter(t); 83 } 84 85 /* Do another round. */ 86 return true; 87 88 reset: 89 t->i = 0; 90 return false; 91 } 92 93 /* We put this struct at the head of each page with a context and frame 94 * initialised when the page is allocated, so we don't have to do this on each 95 * repetition of the test run. 96 */ 97 struct xdp_page_head { 98 struct xdp_buff orig_ctx; 99 struct xdp_buff ctx; 100 struct xdp_frame frm; 101 u8 data[]; 102 }; 103 104 struct xdp_test_data { 105 struct xdp_buff *orig_ctx; 106 struct xdp_rxq_info rxq; 107 struct net_device *dev; 108 struct page_pool *pp; 109 struct xdp_frame **frames; 110 struct sk_buff **skbs; 111 struct xdp_mem_info mem; 112 u32 batch_size; 113 u32 frame_cnt; 114 }; 115 116 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) 117 #define TEST_XDP_MAX_BATCH 256 118 119 static void xdp_test_run_init_page(struct page *page, void *arg) 120 { 121 struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); 122 struct xdp_buff *new_ctx, *orig_ctx; 123 u32 headroom = XDP_PACKET_HEADROOM; 124 struct xdp_test_data *xdp = arg; 125 size_t frm_len, meta_len; 126 struct xdp_frame *frm; 127 void *data; 128 129 orig_ctx = xdp->orig_ctx; 130 frm_len = orig_ctx->data_end - orig_ctx->data_meta; 131 meta_len = orig_ctx->data - orig_ctx->data_meta; 132 headroom -= meta_len; 133 134 new_ctx = &head->ctx; 135 frm = &head->frm; 136 data = &head->data; 137 memcpy(data + headroom, orig_ctx->data_meta, frm_len); 138 139 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); 140 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true); 141 new_ctx->data = new_ctx->data_meta + meta_len; 142 143 xdp_update_frame_from_buff(new_ctx, frm); 144 frm->mem = new_ctx->rxq->mem; 145 146 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx)); 147 } 148 149 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) 150 { 151 struct page_pool *pp; 152 int err = -ENOMEM; 153 struct page_pool_params pp_params = { 154 .order = 0, 155 .flags = 0, 156 .pool_size = xdp->batch_size, 157 .nid = NUMA_NO_NODE, 158 .init_callback = xdp_test_run_init_page, 159 .init_arg = xdp, 160 }; 161 162 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); 163 if (!xdp->frames) 164 return -ENOMEM; 165 166 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); 167 if (!xdp->skbs) 168 goto err_skbs; 169 170 pp = page_pool_create(&pp_params); 171 if (IS_ERR(pp)) { 172 err = PTR_ERR(pp); 173 goto err_pp; 174 } 175 176 /* will copy 'mem.id' into pp->xdp_mem_id */ 177 err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp); 178 if (err) 179 goto err_mmodel; 180 181 xdp->pp = pp; 182 183 /* We create a 'fake' RXQ referencing the original dev, but with an 184 * xdp_mem_info pointing to our page_pool 185 */ 186 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0); 187 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL; 188 xdp->rxq.mem.id = pp->xdp_mem_id; 189 xdp->dev = orig_ctx->rxq->dev; 190 xdp->orig_ctx = orig_ctx; 191 192 return 0; 193 194 err_mmodel: 195 page_pool_destroy(pp); 196 err_pp: 197 kvfree(xdp->skbs); 198 err_skbs: 199 kvfree(xdp->frames); 200 return err; 201 } 202 203 static void xdp_test_run_teardown(struct xdp_test_data *xdp) 204 { 205 xdp_unreg_mem_model(&xdp->mem); 206 page_pool_destroy(xdp->pp); 207 kfree(xdp->frames); 208 kfree(xdp->skbs); 209 } 210 211 static bool ctx_was_changed(struct xdp_page_head *head) 212 { 213 return head->orig_ctx.data != head->ctx.data || 214 head->orig_ctx.data_meta != head->ctx.data_meta || 215 head->orig_ctx.data_end != head->ctx.data_end; 216 } 217 218 static void reset_ctx(struct xdp_page_head *head) 219 { 220 if (likely(!ctx_was_changed(head))) 221 return; 222 223 head->ctx.data = head->orig_ctx.data; 224 head->ctx.data_meta = head->orig_ctx.data_meta; 225 head->ctx.data_end = head->orig_ctx.data_end; 226 xdp_update_frame_from_buff(&head->ctx, &head->frm); 227 } 228 229 static int xdp_recv_frames(struct xdp_frame **frames, int nframes, 230 struct sk_buff **skbs, 231 struct net_device *dev) 232 { 233 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; 234 int i, n; 235 LIST_HEAD(list); 236 237 n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs); 238 if (unlikely(n == 0)) { 239 for (i = 0; i < nframes; i++) 240 xdp_return_frame(frames[i]); 241 return -ENOMEM; 242 } 243 244 for (i = 0; i < nframes; i++) { 245 struct xdp_frame *xdpf = frames[i]; 246 struct sk_buff *skb = skbs[i]; 247 248 skb = __xdp_build_skb_from_frame(xdpf, skb, dev); 249 if (!skb) { 250 xdp_return_frame(xdpf); 251 continue; 252 } 253 254 list_add_tail(&skb->list, &list); 255 } 256 netif_receive_skb_list(&list); 257 258 return 0; 259 } 260 261 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, 262 u32 repeat) 263 { 264 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 265 int err = 0, act, ret, i, nframes = 0, batch_sz; 266 struct xdp_frame **frames = xdp->frames; 267 struct xdp_page_head *head; 268 struct xdp_frame *frm; 269 bool redirect = false; 270 struct xdp_buff *ctx; 271 struct page *page; 272 273 batch_sz = min_t(u32, repeat, xdp->batch_size); 274 275 local_bh_disable(); 276 xdp_set_return_frame_no_direct(); 277 278 for (i = 0; i < batch_sz; i++) { 279 page = page_pool_dev_alloc_pages(xdp->pp); 280 if (!page) { 281 err = -ENOMEM; 282 goto out; 283 } 284 285 head = phys_to_virt(page_to_phys(page)); 286 reset_ctx(head); 287 ctx = &head->ctx; 288 frm = &head->frm; 289 xdp->frame_cnt++; 290 291 act = bpf_prog_run_xdp(prog, ctx); 292 293 /* if program changed pkt bounds we need to update the xdp_frame */ 294 if (unlikely(ctx_was_changed(head))) { 295 ret = xdp_update_frame_from_buff(ctx, frm); 296 if (ret) { 297 xdp_return_buff(ctx); 298 continue; 299 } 300 } 301 302 switch (act) { 303 case XDP_TX: 304 /* we can't do a real XDP_TX since we're not in the 305 * driver, so turn it into a REDIRECT back to the same 306 * index 307 */ 308 ri->tgt_index = xdp->dev->ifindex; 309 ri->map_id = INT_MAX; 310 ri->map_type = BPF_MAP_TYPE_UNSPEC; 311 fallthrough; 312 case XDP_REDIRECT: 313 redirect = true; 314 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); 315 if (ret) 316 xdp_return_buff(ctx); 317 break; 318 case XDP_PASS: 319 frames[nframes++] = frm; 320 break; 321 default: 322 bpf_warn_invalid_xdp_action(NULL, prog, act); 323 fallthrough; 324 case XDP_DROP: 325 xdp_return_buff(ctx); 326 break; 327 } 328 } 329 330 out: 331 if (redirect) 332 xdp_do_flush(); 333 if (nframes) { 334 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); 335 if (ret) 336 err = ret; 337 } 338 339 xdp_clear_return_frame_no_direct(); 340 local_bh_enable(); 341 return err; 342 } 343 344 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, 345 u32 repeat, u32 batch_size, u32 *time) 346 347 { 348 struct xdp_test_data xdp = { .batch_size = batch_size }; 349 struct bpf_test_timer t = { .mode = NO_MIGRATE }; 350 int ret; 351 352 if (!repeat) 353 repeat = 1; 354 355 ret = xdp_test_run_setup(&xdp, ctx); 356 if (ret) 357 return ret; 358 359 bpf_test_timer_enter(&t); 360 do { 361 xdp.frame_cnt = 0; 362 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); 363 if (unlikely(ret < 0)) 364 break; 365 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); 366 bpf_test_timer_leave(&t); 367 368 xdp_test_run_teardown(&xdp); 369 return ret; 370 } 371 372 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, 373 u32 *retval, u32 *time, bool xdp) 374 { 375 struct bpf_prog_array_item item = {.prog = prog}; 376 struct bpf_run_ctx *old_ctx; 377 struct bpf_cg_run_ctx run_ctx; 378 struct bpf_test_timer t = { NO_MIGRATE }; 379 enum bpf_cgroup_storage_type stype; 380 int ret; 381 382 for_each_cgroup_storage_type(stype) { 383 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 384 if (IS_ERR(item.cgroup_storage[stype])) { 385 item.cgroup_storage[stype] = NULL; 386 for_each_cgroup_storage_type(stype) 387 bpf_cgroup_storage_free(item.cgroup_storage[stype]); 388 return -ENOMEM; 389 } 390 } 391 392 if (!repeat) 393 repeat = 1; 394 395 bpf_test_timer_enter(&t); 396 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 397 do { 398 run_ctx.prog_item = &item; 399 if (xdp) 400 *retval = bpf_prog_run_xdp(prog, ctx); 401 else 402 *retval = bpf_prog_run(prog, ctx); 403 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); 404 bpf_reset_run_ctx(old_ctx); 405 bpf_test_timer_leave(&t); 406 407 for_each_cgroup_storage_type(stype) 408 bpf_cgroup_storage_free(item.cgroup_storage[stype]); 409 410 return ret; 411 } 412 413 static int bpf_test_finish(const union bpf_attr *kattr, 414 union bpf_attr __user *uattr, const void *data, 415 struct skb_shared_info *sinfo, u32 size, 416 u32 retval, u32 duration) 417 { 418 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 419 int err = -EFAULT; 420 u32 copy_size = size; 421 422 /* Clamp copy if the user has provided a size hint, but copy the full 423 * buffer if not to retain old behaviour. 424 */ 425 if (kattr->test.data_size_out && 426 copy_size > kattr->test.data_size_out) { 427 copy_size = kattr->test.data_size_out; 428 err = -ENOSPC; 429 } 430 431 if (data_out) { 432 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size; 433 434 if (len < 0) { 435 err = -ENOSPC; 436 goto out; 437 } 438 439 if (copy_to_user(data_out, data, len)) 440 goto out; 441 442 if (sinfo) { 443 int i, offset = len; 444 u32 data_len; 445 446 for (i = 0; i < sinfo->nr_frags; i++) { 447 skb_frag_t *frag = &sinfo->frags[i]; 448 449 if (offset >= copy_size) { 450 err = -ENOSPC; 451 break; 452 } 453 454 data_len = min_t(u32, copy_size - offset, 455 skb_frag_size(frag)); 456 457 if (copy_to_user(data_out + offset, 458 skb_frag_address(frag), 459 data_len)) 460 goto out; 461 462 offset += data_len; 463 } 464 } 465 } 466 467 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 468 goto out; 469 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 470 goto out; 471 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 472 goto out; 473 if (err != -ENOSPC) 474 err = 0; 475 out: 476 trace_bpf_test_finish(&err); 477 return err; 478 } 479 480 /* Integer types of various sizes and pointer combinations cover variety of 481 * architecture dependent calling conventions. 7+ can be supported in the 482 * future. 483 */ 484 __diag_push(); 485 __diag_ignore_all("-Wmissing-prototypes", 486 "Global functions as their definitions will be in vmlinux BTF"); 487 int noinline bpf_fentry_test1(int a) 488 { 489 return a + 1; 490 } 491 EXPORT_SYMBOL_GPL(bpf_fentry_test1); 492 ALLOW_ERROR_INJECTION(bpf_fentry_test1, ERRNO); 493 494 int noinline bpf_fentry_test2(int a, u64 b) 495 { 496 return a + b; 497 } 498 499 int noinline bpf_fentry_test3(char a, int b, u64 c) 500 { 501 return a + b + c; 502 } 503 504 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) 505 { 506 return (long)a + b + c + d; 507 } 508 509 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) 510 { 511 return a + (long)b + c + d + e; 512 } 513 514 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) 515 { 516 return a + (long)b + c + d + (long)e + f; 517 } 518 519 struct bpf_fentry_test_t { 520 struct bpf_fentry_test_t *a; 521 }; 522 523 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) 524 { 525 return (long)arg; 526 } 527 528 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) 529 { 530 return (long)arg->a; 531 } 532 533 int noinline bpf_modify_return_test(int a, int *b) 534 { 535 *b += 1; 536 return a + *b; 537 } 538 539 u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) 540 { 541 return a + b + c + d; 542 } 543 544 int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) 545 { 546 return a + b; 547 } 548 549 struct sock * noinline bpf_kfunc_call_test3(struct sock *sk) 550 { 551 return sk; 552 } 553 554 struct prog_test_member1 { 555 int a; 556 }; 557 558 struct prog_test_member { 559 struct prog_test_member1 m; 560 int c; 561 }; 562 563 struct prog_test_ref_kfunc { 564 int a; 565 int b; 566 struct prog_test_member memb; 567 struct prog_test_ref_kfunc *next; 568 refcount_t cnt; 569 }; 570 571 static struct prog_test_ref_kfunc prog_test_struct = { 572 .a = 42, 573 .b = 108, 574 .next = &prog_test_struct, 575 .cnt = REFCOUNT_INIT(1), 576 }; 577 578 noinline struct prog_test_ref_kfunc * 579 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) 580 { 581 refcount_inc(&prog_test_struct.cnt); 582 return &prog_test_struct; 583 } 584 585 noinline struct prog_test_member * 586 bpf_kfunc_call_memb_acquire(void) 587 { 588 WARN_ON_ONCE(1); 589 return NULL; 590 } 591 592 noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) 593 { 594 if (!p) 595 return; 596 597 refcount_dec(&p->cnt); 598 } 599 600 noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p) 601 { 602 } 603 604 noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) 605 { 606 WARN_ON_ONCE(1); 607 } 608 609 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) 610 { 611 if (size > 2 * sizeof(int)) 612 return NULL; 613 614 return (int *)p; 615 } 616 617 noinline int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) 618 { 619 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); 620 } 621 622 noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) 623 { 624 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 625 } 626 627 /* the next 2 ones can't be really used for testing expect to ensure 628 * that the verifier rejects the call. 629 * Acquire functions must return struct pointers, so these ones are 630 * failing. 631 */ 632 noinline int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) 633 { 634 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); 635 } 636 637 noinline void bpf_kfunc_call_int_mem_release(int *p) 638 { 639 } 640 641 noinline struct prog_test_ref_kfunc * 642 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b) 643 { 644 struct prog_test_ref_kfunc *p = READ_ONCE(*pp); 645 646 if (!p) 647 return NULL; 648 refcount_inc(&p->cnt); 649 return p; 650 } 651 652 struct prog_test_pass1 { 653 int x0; 654 struct { 655 int x1; 656 struct { 657 int x2; 658 struct { 659 int x3; 660 }; 661 }; 662 }; 663 }; 664 665 struct prog_test_pass2 { 666 int len; 667 short arr1[4]; 668 struct { 669 char arr2[4]; 670 unsigned long arr3[8]; 671 } x; 672 }; 673 674 struct prog_test_fail1 { 675 void *p; 676 int x; 677 }; 678 679 struct prog_test_fail2 { 680 int x8; 681 struct prog_test_pass1 x; 682 }; 683 684 struct prog_test_fail3 { 685 int len; 686 char arr1[2]; 687 char arr2[]; 688 }; 689 690 noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) 691 { 692 } 693 694 noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) 695 { 696 } 697 698 noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) 699 { 700 } 701 702 noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) 703 { 704 } 705 706 noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) 707 { 708 } 709 710 noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) 711 { 712 } 713 714 noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) 715 { 716 } 717 718 noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) 719 { 720 } 721 722 noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) 723 { 724 } 725 726 noinline void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) 727 { 728 } 729 730 noinline void bpf_kfunc_call_test_destructive(void) 731 { 732 } 733 734 __diag_pop(); 735 736 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); 737 738 BTF_SET8_START(test_sk_check_kfunc_ids) 739 BTF_ID_FLAGS(func, bpf_kfunc_call_test1) 740 BTF_ID_FLAGS(func, bpf_kfunc_call_test2) 741 BTF_ID_FLAGS(func, bpf_kfunc_call_test3) 742 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) 743 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) 744 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) 745 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) 746 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) 747 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) 748 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) 749 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) 750 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) 751 BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) 752 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) 753 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) 754 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) 755 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) 756 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) 757 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) 758 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) 759 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) 760 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) 761 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS) 762 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) 763 BTF_SET8_END(test_sk_check_kfunc_ids) 764 765 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, 766 u32 size, u32 headroom, u32 tailroom) 767 { 768 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 769 void *data; 770 771 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) 772 return ERR_PTR(-EINVAL); 773 774 if (user_size > size) 775 return ERR_PTR(-EMSGSIZE); 776 777 data = kzalloc(size + headroom + tailroom, GFP_USER); 778 if (!data) 779 return ERR_PTR(-ENOMEM); 780 781 if (copy_from_user(data + headroom, data_in, user_size)) { 782 kfree(data); 783 return ERR_PTR(-EFAULT); 784 } 785 786 return data; 787 } 788 789 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 790 const union bpf_attr *kattr, 791 union bpf_attr __user *uattr) 792 { 793 struct bpf_fentry_test_t arg = {}; 794 u16 side_effect = 0, ret = 0; 795 int b = 2, err = -EFAULT; 796 u32 retval = 0; 797 798 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 799 return -EINVAL; 800 801 switch (prog->expected_attach_type) { 802 case BPF_TRACE_FENTRY: 803 case BPF_TRACE_FEXIT: 804 if (bpf_fentry_test1(1) != 2 || 805 bpf_fentry_test2(2, 3) != 5 || 806 bpf_fentry_test3(4, 5, 6) != 15 || 807 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || 808 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || 809 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || 810 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || 811 bpf_fentry_test8(&arg) != 0) 812 goto out; 813 break; 814 case BPF_MODIFY_RETURN: 815 ret = bpf_modify_return_test(1, &b); 816 if (b != 2) 817 side_effect = 1; 818 break; 819 default: 820 goto out; 821 } 822 823 retval = ((u32)side_effect << 16) | ret; 824 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 825 goto out; 826 827 err = 0; 828 out: 829 trace_bpf_test_finish(&err); 830 return err; 831 } 832 833 struct bpf_raw_tp_test_run_info { 834 struct bpf_prog *prog; 835 void *ctx; 836 u32 retval; 837 }; 838 839 static void 840 __bpf_prog_test_run_raw_tp(void *data) 841 { 842 struct bpf_raw_tp_test_run_info *info = data; 843 844 rcu_read_lock(); 845 info->retval = bpf_prog_run(info->prog, info->ctx); 846 rcu_read_unlock(); 847 } 848 849 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 850 const union bpf_attr *kattr, 851 union bpf_attr __user *uattr) 852 { 853 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 854 __u32 ctx_size_in = kattr->test.ctx_size_in; 855 struct bpf_raw_tp_test_run_info info; 856 int cpu = kattr->test.cpu, err = 0; 857 int current_cpu; 858 859 /* doesn't support data_in/out, ctx_out, duration, or repeat */ 860 if (kattr->test.data_in || kattr->test.data_out || 861 kattr->test.ctx_out || kattr->test.duration || 862 kattr->test.repeat || kattr->test.batch_size) 863 return -EINVAL; 864 865 if (ctx_size_in < prog->aux->max_ctx_offset || 866 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) 867 return -EINVAL; 868 869 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) 870 return -EINVAL; 871 872 if (ctx_size_in) { 873 info.ctx = memdup_user(ctx_in, ctx_size_in); 874 if (IS_ERR(info.ctx)) 875 return PTR_ERR(info.ctx); 876 } else { 877 info.ctx = NULL; 878 } 879 880 info.prog = prog; 881 882 current_cpu = get_cpu(); 883 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || 884 cpu == current_cpu) { 885 __bpf_prog_test_run_raw_tp(&info); 886 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 887 /* smp_call_function_single() also checks cpu_online() 888 * after csd_lock(). However, since cpu is from user 889 * space, let's do an extra quick check to filter out 890 * invalid value before smp_call_function_single(). 891 */ 892 err = -ENXIO; 893 } else { 894 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, 895 &info, 1); 896 } 897 put_cpu(); 898 899 if (!err && 900 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) 901 err = -EFAULT; 902 903 kfree(info.ctx); 904 return err; 905 } 906 907 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) 908 { 909 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); 910 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 911 u32 size = kattr->test.ctx_size_in; 912 void *data; 913 int err; 914 915 if (!data_in && !data_out) 916 return NULL; 917 918 data = kzalloc(max_size, GFP_USER); 919 if (!data) 920 return ERR_PTR(-ENOMEM); 921 922 if (data_in) { 923 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size); 924 if (err) { 925 kfree(data); 926 return ERR_PTR(err); 927 } 928 929 size = min_t(u32, max_size, size); 930 if (copy_from_user(data, data_in, size)) { 931 kfree(data); 932 return ERR_PTR(-EFAULT); 933 } 934 } 935 return data; 936 } 937 938 static int bpf_ctx_finish(const union bpf_attr *kattr, 939 union bpf_attr __user *uattr, const void *data, 940 u32 size) 941 { 942 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 943 int err = -EFAULT; 944 u32 copy_size = size; 945 946 if (!data || !data_out) 947 return 0; 948 949 if (copy_size > kattr->test.ctx_size_out) { 950 copy_size = kattr->test.ctx_size_out; 951 err = -ENOSPC; 952 } 953 954 if (copy_to_user(data_out, data, copy_size)) 955 goto out; 956 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) 957 goto out; 958 if (err != -ENOSPC) 959 err = 0; 960 out: 961 return err; 962 } 963 964 /** 965 * range_is_zero - test whether buffer is initialized 966 * @buf: buffer to check 967 * @from: check from this position 968 * @to: check up until (excluding) this position 969 * 970 * This function returns true if the there is a non-zero byte 971 * in the buf in the range [from,to). 972 */ 973 static inline bool range_is_zero(void *buf, size_t from, size_t to) 974 { 975 return !memchr_inv((u8 *)buf + from, 0, to - from); 976 } 977 978 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) 979 { 980 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 981 982 if (!skb->len) 983 return -EINVAL; 984 985 if (!__skb) 986 return 0; 987 988 /* make sure the fields we don't use are zeroed */ 989 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) 990 return -EINVAL; 991 992 /* mark is allowed */ 993 994 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), 995 offsetof(struct __sk_buff, priority))) 996 return -EINVAL; 997 998 /* priority is allowed */ 999 /* ingress_ifindex is allowed */ 1000 /* ifindex is allowed */ 1001 1002 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), 1003 offsetof(struct __sk_buff, cb))) 1004 return -EINVAL; 1005 1006 /* cb is allowed */ 1007 1008 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), 1009 offsetof(struct __sk_buff, tstamp))) 1010 return -EINVAL; 1011 1012 /* tstamp is allowed */ 1013 /* wire_len is allowed */ 1014 /* gso_segs is allowed */ 1015 1016 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), 1017 offsetof(struct __sk_buff, gso_size))) 1018 return -EINVAL; 1019 1020 /* gso_size is allowed */ 1021 1022 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), 1023 offsetof(struct __sk_buff, hwtstamp))) 1024 return -EINVAL; 1025 1026 /* hwtstamp is allowed */ 1027 1028 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), 1029 sizeof(struct __sk_buff))) 1030 return -EINVAL; 1031 1032 skb->mark = __skb->mark; 1033 skb->priority = __skb->priority; 1034 skb->skb_iif = __skb->ingress_ifindex; 1035 skb->tstamp = __skb->tstamp; 1036 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); 1037 1038 if (__skb->wire_len == 0) { 1039 cb->pkt_len = skb->len; 1040 } else { 1041 if (__skb->wire_len < skb->len || 1042 __skb->wire_len > GSO_LEGACY_MAX_SIZE) 1043 return -EINVAL; 1044 cb->pkt_len = __skb->wire_len; 1045 } 1046 1047 if (__skb->gso_segs > GSO_MAX_SEGS) 1048 return -EINVAL; 1049 skb_shinfo(skb)->gso_segs = __skb->gso_segs; 1050 skb_shinfo(skb)->gso_size = __skb->gso_size; 1051 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; 1052 1053 return 0; 1054 } 1055 1056 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) 1057 { 1058 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 1059 1060 if (!__skb) 1061 return; 1062 1063 __skb->mark = skb->mark; 1064 __skb->priority = skb->priority; 1065 __skb->ingress_ifindex = skb->skb_iif; 1066 __skb->ifindex = skb->dev->ifindex; 1067 __skb->tstamp = skb->tstamp; 1068 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); 1069 __skb->wire_len = cb->pkt_len; 1070 __skb->gso_segs = skb_shinfo(skb)->gso_segs; 1071 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; 1072 } 1073 1074 static struct proto bpf_dummy_proto = { 1075 .name = "bpf_dummy", 1076 .owner = THIS_MODULE, 1077 .obj_size = sizeof(struct sock), 1078 }; 1079 1080 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1081 union bpf_attr __user *uattr) 1082 { 1083 bool is_l2 = false, is_direct_pkt_access = false; 1084 struct net *net = current->nsproxy->net_ns; 1085 struct net_device *dev = net->loopback_dev; 1086 u32 size = kattr->test.data_size_in; 1087 u32 repeat = kattr->test.repeat; 1088 struct __sk_buff *ctx = NULL; 1089 u32 retval, duration; 1090 int hh_len = ETH_HLEN; 1091 struct sk_buff *skb; 1092 struct sock *sk; 1093 void *data; 1094 int ret; 1095 1096 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1097 return -EINVAL; 1098 1099 data = bpf_test_init(kattr, kattr->test.data_size_in, 1100 size, NET_SKB_PAD + NET_IP_ALIGN, 1101 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 1102 if (IS_ERR(data)) 1103 return PTR_ERR(data); 1104 1105 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); 1106 if (IS_ERR(ctx)) { 1107 kfree(data); 1108 return PTR_ERR(ctx); 1109 } 1110 1111 switch (prog->type) { 1112 case BPF_PROG_TYPE_SCHED_CLS: 1113 case BPF_PROG_TYPE_SCHED_ACT: 1114 is_l2 = true; 1115 fallthrough; 1116 case BPF_PROG_TYPE_LWT_IN: 1117 case BPF_PROG_TYPE_LWT_OUT: 1118 case BPF_PROG_TYPE_LWT_XMIT: 1119 is_direct_pkt_access = true; 1120 break; 1121 default: 1122 break; 1123 } 1124 1125 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1); 1126 if (!sk) { 1127 kfree(data); 1128 kfree(ctx); 1129 return -ENOMEM; 1130 } 1131 sock_init_data(NULL, sk); 1132 1133 skb = build_skb(data, 0); 1134 if (!skb) { 1135 kfree(data); 1136 kfree(ctx); 1137 sk_free(sk); 1138 return -ENOMEM; 1139 } 1140 skb->sk = sk; 1141 1142 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 1143 __skb_put(skb, size); 1144 if (ctx && ctx->ifindex > 1) { 1145 dev = dev_get_by_index(net, ctx->ifindex); 1146 if (!dev) { 1147 ret = -ENODEV; 1148 goto out; 1149 } 1150 } 1151 skb->protocol = eth_type_trans(skb, dev); 1152 skb_reset_network_header(skb); 1153 1154 switch (skb->protocol) { 1155 case htons(ETH_P_IP): 1156 sk->sk_family = AF_INET; 1157 if (sizeof(struct iphdr) <= skb_headlen(skb)) { 1158 sk->sk_rcv_saddr = ip_hdr(skb)->saddr; 1159 sk->sk_daddr = ip_hdr(skb)->daddr; 1160 } 1161 break; 1162 #if IS_ENABLED(CONFIG_IPV6) 1163 case htons(ETH_P_IPV6): 1164 sk->sk_family = AF_INET6; 1165 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { 1166 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; 1167 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; 1168 } 1169 break; 1170 #endif 1171 default: 1172 break; 1173 } 1174 1175 if (is_l2) 1176 __skb_push(skb, hh_len); 1177 if (is_direct_pkt_access) 1178 bpf_compute_data_pointers(skb); 1179 ret = convert___skb_to_skb(skb, ctx); 1180 if (ret) 1181 goto out; 1182 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); 1183 if (ret) 1184 goto out; 1185 if (!is_l2) { 1186 if (skb_headroom(skb) < hh_len) { 1187 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 1188 1189 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 1190 ret = -ENOMEM; 1191 goto out; 1192 } 1193 } 1194 memset(__skb_push(skb, hh_len), 0, hh_len); 1195 } 1196 convert_skb_to___skb(skb, ctx); 1197 1198 size = skb->len; 1199 /* bpf program can never convert linear skb to non-linear */ 1200 if (WARN_ON_ONCE(skb_is_nonlinear(skb))) 1201 size = skb_headlen(skb); 1202 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval, 1203 duration); 1204 if (!ret) 1205 ret = bpf_ctx_finish(kattr, uattr, ctx, 1206 sizeof(struct __sk_buff)); 1207 out: 1208 if (dev && dev != net->loopback_dev) 1209 dev_put(dev); 1210 kfree_skb(skb); 1211 sk_free(sk); 1212 kfree(ctx); 1213 return ret; 1214 } 1215 1216 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp) 1217 { 1218 unsigned int ingress_ifindex, rx_queue_index; 1219 struct netdev_rx_queue *rxqueue; 1220 struct net_device *device; 1221 1222 if (!xdp_md) 1223 return 0; 1224 1225 if (xdp_md->egress_ifindex != 0) 1226 return -EINVAL; 1227 1228 ingress_ifindex = xdp_md->ingress_ifindex; 1229 rx_queue_index = xdp_md->rx_queue_index; 1230 1231 if (!ingress_ifindex && rx_queue_index) 1232 return -EINVAL; 1233 1234 if (ingress_ifindex) { 1235 device = dev_get_by_index(current->nsproxy->net_ns, 1236 ingress_ifindex); 1237 if (!device) 1238 return -ENODEV; 1239 1240 if (rx_queue_index >= device->real_num_rx_queues) 1241 goto free_dev; 1242 1243 rxqueue = __netif_get_rx_queue(device, rx_queue_index); 1244 1245 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq)) 1246 goto free_dev; 1247 1248 xdp->rxq = &rxqueue->xdp_rxq; 1249 /* The device is now tracked in the xdp->rxq for later 1250 * dev_put() 1251 */ 1252 } 1253 1254 xdp->data = xdp->data_meta + xdp_md->data; 1255 return 0; 1256 1257 free_dev: 1258 dev_put(device); 1259 return -EINVAL; 1260 } 1261 1262 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) 1263 { 1264 if (!xdp_md) 1265 return; 1266 1267 xdp_md->data = xdp->data - xdp->data_meta; 1268 xdp_md->data_end = xdp->data_end - xdp->data_meta; 1269 1270 if (xdp_md->ingress_ifindex) 1271 dev_put(xdp->rxq->dev); 1272 } 1273 1274 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1275 union bpf_attr __user *uattr) 1276 { 1277 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); 1278 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1279 u32 batch_size = kattr->test.batch_size; 1280 u32 retval = 0, duration, max_data_sz; 1281 u32 size = kattr->test.data_size_in; 1282 u32 headroom = XDP_PACKET_HEADROOM; 1283 u32 repeat = kattr->test.repeat; 1284 struct netdev_rx_queue *rxqueue; 1285 struct skb_shared_info *sinfo; 1286 struct xdp_buff xdp = {}; 1287 int i, ret = -EINVAL; 1288 struct xdp_md *ctx; 1289 void *data; 1290 1291 if (prog->expected_attach_type == BPF_XDP_DEVMAP || 1292 prog->expected_attach_type == BPF_XDP_CPUMAP) 1293 return -EINVAL; 1294 1295 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) 1296 return -EINVAL; 1297 1298 if (do_live) { 1299 if (!batch_size) 1300 batch_size = NAPI_POLL_WEIGHT; 1301 else if (batch_size > TEST_XDP_MAX_BATCH) 1302 return -E2BIG; 1303 1304 headroom += sizeof(struct xdp_page_head); 1305 } else if (batch_size) { 1306 return -EINVAL; 1307 } 1308 1309 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); 1310 if (IS_ERR(ctx)) 1311 return PTR_ERR(ctx); 1312 1313 if (ctx) { 1314 /* There can't be user provided data before the meta data */ 1315 if (ctx->data_meta || ctx->data_end != size || 1316 ctx->data > ctx->data_end || 1317 unlikely(xdp_metalen_invalid(ctx->data)) || 1318 (do_live && (kattr->test.data_out || kattr->test.ctx_out))) 1319 goto free_ctx; 1320 /* Meta data is allocated from the headroom */ 1321 headroom -= ctx->data; 1322 } 1323 1324 max_data_sz = 4096 - headroom - tailroom; 1325 if (size > max_data_sz) { 1326 /* disallow live data mode for jumbo frames */ 1327 if (do_live) 1328 goto free_ctx; 1329 size = max_data_sz; 1330 } 1331 1332 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); 1333 if (IS_ERR(data)) { 1334 ret = PTR_ERR(data); 1335 goto free_ctx; 1336 } 1337 1338 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 1339 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; 1340 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); 1341 xdp_prepare_buff(&xdp, data, headroom, size, true); 1342 sinfo = xdp_get_shared_info_from_buff(&xdp); 1343 1344 ret = xdp_convert_md_to_buff(ctx, &xdp); 1345 if (ret) 1346 goto free_data; 1347 1348 if (unlikely(kattr->test.data_size_in > size)) { 1349 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 1350 1351 while (size < kattr->test.data_size_in) { 1352 struct page *page; 1353 skb_frag_t *frag; 1354 u32 data_len; 1355 1356 if (sinfo->nr_frags == MAX_SKB_FRAGS) { 1357 ret = -ENOMEM; 1358 goto out; 1359 } 1360 1361 page = alloc_page(GFP_KERNEL); 1362 if (!page) { 1363 ret = -ENOMEM; 1364 goto out; 1365 } 1366 1367 frag = &sinfo->frags[sinfo->nr_frags++]; 1368 __skb_frag_set_page(frag, page); 1369 1370 data_len = min_t(u32, kattr->test.data_size_in - size, 1371 PAGE_SIZE); 1372 skb_frag_size_set(frag, data_len); 1373 1374 if (copy_from_user(page_address(page), data_in + size, 1375 data_len)) { 1376 ret = -EFAULT; 1377 goto out; 1378 } 1379 sinfo->xdp_frags_size += data_len; 1380 size += data_len; 1381 } 1382 xdp_buff_set_frags_flag(&xdp); 1383 } 1384 1385 if (repeat > 1) 1386 bpf_prog_change_xdp(NULL, prog); 1387 1388 if (do_live) 1389 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); 1390 else 1391 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); 1392 /* We convert the xdp_buff back to an xdp_md before checking the return 1393 * code so the reference count of any held netdevice will be decremented 1394 * even if the test run failed. 1395 */ 1396 xdp_convert_buff_to_md(&xdp, ctx); 1397 if (ret) 1398 goto out; 1399 1400 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size; 1401 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size, 1402 retval, duration); 1403 if (!ret) 1404 ret = bpf_ctx_finish(kattr, uattr, ctx, 1405 sizeof(struct xdp_md)); 1406 1407 out: 1408 if (repeat > 1) 1409 bpf_prog_change_xdp(prog, NULL); 1410 free_data: 1411 for (i = 0; i < sinfo->nr_frags; i++) 1412 __free_page(skb_frag_page(&sinfo->frags[i])); 1413 kfree(data); 1414 free_ctx: 1415 kfree(ctx); 1416 return ret; 1417 } 1418 1419 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) 1420 { 1421 /* make sure the fields we don't use are zeroed */ 1422 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) 1423 return -EINVAL; 1424 1425 /* flags is allowed */ 1426 1427 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), 1428 sizeof(struct bpf_flow_keys))) 1429 return -EINVAL; 1430 1431 return 0; 1432 } 1433 1434 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1435 const union bpf_attr *kattr, 1436 union bpf_attr __user *uattr) 1437 { 1438 struct bpf_test_timer t = { NO_PREEMPT }; 1439 u32 size = kattr->test.data_size_in; 1440 struct bpf_flow_dissector ctx = {}; 1441 u32 repeat = kattr->test.repeat; 1442 struct bpf_flow_keys *user_ctx; 1443 struct bpf_flow_keys flow_keys; 1444 const struct ethhdr *eth; 1445 unsigned int flags = 0; 1446 u32 retval, duration; 1447 void *data; 1448 int ret; 1449 1450 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1451 return -EINVAL; 1452 1453 if (size < ETH_HLEN) 1454 return -EINVAL; 1455 1456 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0); 1457 if (IS_ERR(data)) 1458 return PTR_ERR(data); 1459 1460 eth = (struct ethhdr *)data; 1461 1462 if (!repeat) 1463 repeat = 1; 1464 1465 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); 1466 if (IS_ERR(user_ctx)) { 1467 kfree(data); 1468 return PTR_ERR(user_ctx); 1469 } 1470 if (user_ctx) { 1471 ret = verify_user_bpf_flow_keys(user_ctx); 1472 if (ret) 1473 goto out; 1474 flags = user_ctx->flags; 1475 } 1476 1477 ctx.flow_keys = &flow_keys; 1478 ctx.data = data; 1479 ctx.data_end = (__u8 *)data + size; 1480 1481 bpf_test_timer_enter(&t); 1482 do { 1483 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, 1484 size, flags); 1485 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); 1486 bpf_test_timer_leave(&t); 1487 1488 if (ret < 0) 1489 goto out; 1490 1491 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL, 1492 sizeof(flow_keys), retval, duration); 1493 if (!ret) 1494 ret = bpf_ctx_finish(kattr, uattr, user_ctx, 1495 sizeof(struct bpf_flow_keys)); 1496 1497 out: 1498 kfree(user_ctx); 1499 kfree(data); 1500 return ret; 1501 } 1502 1503 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, 1504 union bpf_attr __user *uattr) 1505 { 1506 struct bpf_test_timer t = { NO_PREEMPT }; 1507 struct bpf_prog_array *progs = NULL; 1508 struct bpf_sk_lookup_kern ctx = {}; 1509 u32 repeat = kattr->test.repeat; 1510 struct bpf_sk_lookup *user_ctx; 1511 u32 retval, duration; 1512 int ret = -EINVAL; 1513 1514 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1515 return -EINVAL; 1516 1517 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || 1518 kattr->test.data_size_out) 1519 return -EINVAL; 1520 1521 if (!repeat) 1522 repeat = 1; 1523 1524 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); 1525 if (IS_ERR(user_ctx)) 1526 return PTR_ERR(user_ctx); 1527 1528 if (!user_ctx) 1529 return -EINVAL; 1530 1531 if (user_ctx->sk) 1532 goto out; 1533 1534 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) 1535 goto out; 1536 1537 if (user_ctx->local_port > U16_MAX) { 1538 ret = -ERANGE; 1539 goto out; 1540 } 1541 1542 ctx.family = (u16)user_ctx->family; 1543 ctx.protocol = (u16)user_ctx->protocol; 1544 ctx.dport = (u16)user_ctx->local_port; 1545 ctx.sport = user_ctx->remote_port; 1546 1547 switch (ctx.family) { 1548 case AF_INET: 1549 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; 1550 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; 1551 break; 1552 1553 #if IS_ENABLED(CONFIG_IPV6) 1554 case AF_INET6: 1555 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; 1556 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; 1557 break; 1558 #endif 1559 1560 default: 1561 ret = -EAFNOSUPPORT; 1562 goto out; 1563 } 1564 1565 progs = bpf_prog_array_alloc(1, GFP_KERNEL); 1566 if (!progs) { 1567 ret = -ENOMEM; 1568 goto out; 1569 } 1570 1571 progs->items[0].prog = prog; 1572 1573 bpf_test_timer_enter(&t); 1574 do { 1575 ctx.selected_sk = NULL; 1576 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run); 1577 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); 1578 bpf_test_timer_leave(&t); 1579 1580 if (ret < 0) 1581 goto out; 1582 1583 user_ctx->cookie = 0; 1584 if (ctx.selected_sk) { 1585 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { 1586 ret = -EOPNOTSUPP; 1587 goto out; 1588 } 1589 1590 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); 1591 } 1592 1593 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); 1594 if (!ret) 1595 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); 1596 1597 out: 1598 bpf_prog_array_free(progs); 1599 kfree(user_ctx); 1600 return ret; 1601 } 1602 1603 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1604 const union bpf_attr *kattr, 1605 union bpf_attr __user *uattr) 1606 { 1607 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 1608 __u32 ctx_size_in = kattr->test.ctx_size_in; 1609 void *ctx = NULL; 1610 u32 retval; 1611 int err = 0; 1612 1613 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ 1614 if (kattr->test.data_in || kattr->test.data_out || 1615 kattr->test.ctx_out || kattr->test.duration || 1616 kattr->test.repeat || kattr->test.flags || 1617 kattr->test.batch_size) 1618 return -EINVAL; 1619 1620 if (ctx_size_in < prog->aux->max_ctx_offset || 1621 ctx_size_in > U16_MAX) 1622 return -EINVAL; 1623 1624 if (ctx_size_in) { 1625 ctx = memdup_user(ctx_in, ctx_size_in); 1626 if (IS_ERR(ctx)) 1627 return PTR_ERR(ctx); 1628 } 1629 1630 rcu_read_lock_trace(); 1631 retval = bpf_prog_run_pin_on_cpu(prog, ctx); 1632 rcu_read_unlock_trace(); 1633 1634 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { 1635 err = -EFAULT; 1636 goto out; 1637 } 1638 if (ctx_size_in) 1639 if (copy_to_user(ctx_in, ctx, ctx_size_in)) 1640 err = -EFAULT; 1641 out: 1642 kfree(ctx); 1643 return err; 1644 } 1645 1646 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { 1647 .owner = THIS_MODULE, 1648 .set = &test_sk_check_kfunc_ids, 1649 }; 1650 1651 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids) 1652 BTF_ID(struct, prog_test_ref_kfunc) 1653 BTF_ID(func, bpf_kfunc_call_test_release) 1654 BTF_ID(struct, prog_test_member) 1655 BTF_ID(func, bpf_kfunc_call_memb_release) 1656 1657 static int __init bpf_prog_test_run_init(void) 1658 { 1659 const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = { 1660 { 1661 .btf_id = bpf_prog_test_dtor_kfunc_ids[0], 1662 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1] 1663 }, 1664 { 1665 .btf_id = bpf_prog_test_dtor_kfunc_ids[2], 1666 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3], 1667 }, 1668 }; 1669 int ret; 1670 1671 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); 1672 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set); 1673 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); 1674 return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, 1675 ARRAY_SIZE(bpf_prog_test_dtor_kfunc), 1676 THIS_MODULE); 1677 } 1678 late_initcall(bpf_prog_test_run_init); 1679