1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/btf_ids.h> 7 #include <linux/slab.h> 8 #include <linux/init.h> 9 #include <linux/vmalloc.h> 10 #include <linux/etherdevice.h> 11 #include <linux/filter.h> 12 #include <linux/rcupdate_trace.h> 13 #include <linux/sched/signal.h> 14 #include <net/bpf_sk_storage.h> 15 #include <net/sock.h> 16 #include <net/tcp.h> 17 #include <net/net_namespace.h> 18 #include <net/page_pool.h> 19 #include <linux/error-injection.h> 20 #include <linux/smp.h> 21 #include <linux/sock_diag.h> 22 #include <net/xdp.h> 23 24 #define CREATE_TRACE_POINTS 25 #include <trace/events/bpf_test_run.h> 26 27 struct bpf_test_timer { 28 enum { NO_PREEMPT, NO_MIGRATE } mode; 29 u32 i; 30 u64 time_start, time_spent; 31 }; 32 33 static void bpf_test_timer_enter(struct bpf_test_timer *t) 34 __acquires(rcu) 35 { 36 rcu_read_lock(); 37 if (t->mode == NO_PREEMPT) 38 preempt_disable(); 39 else 40 migrate_disable(); 41 42 t->time_start = ktime_get_ns(); 43 } 44 45 static void bpf_test_timer_leave(struct bpf_test_timer *t) 46 __releases(rcu) 47 { 48 t->time_start = 0; 49 50 if (t->mode == NO_PREEMPT) 51 preempt_enable(); 52 else 53 migrate_enable(); 54 rcu_read_unlock(); 55 } 56 57 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations, 58 u32 repeat, int *err, u32 *duration) 59 __must_hold(rcu) 60 { 61 t->i += iterations; 62 if (t->i >= repeat) { 63 /* We're done. */ 64 t->time_spent += ktime_get_ns() - t->time_start; 65 do_div(t->time_spent, t->i); 66 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; 67 *err = 0; 68 goto reset; 69 } 70 71 if (signal_pending(current)) { 72 /* During iteration: we've been cancelled, abort. */ 73 *err = -EINTR; 74 goto reset; 75 } 76 77 if (need_resched()) { 78 /* During iteration: we need to reschedule between runs. */ 79 t->time_spent += ktime_get_ns() - t->time_start; 80 bpf_test_timer_leave(t); 81 cond_resched(); 82 bpf_test_timer_enter(t); 83 } 84 85 /* Do another round. */ 86 return true; 87 88 reset: 89 t->i = 0; 90 return false; 91 } 92 93 /* We put this struct at the head of each page with a context and frame 94 * initialised when the page is allocated, so we don't have to do this on each 95 * repetition of the test run. 96 */ 97 struct xdp_page_head { 98 struct xdp_buff orig_ctx; 99 struct xdp_buff ctx; 100 struct xdp_frame frm; 101 u8 data[]; 102 }; 103 104 struct xdp_test_data { 105 struct xdp_buff *orig_ctx; 106 struct xdp_rxq_info rxq; 107 struct net_device *dev; 108 struct page_pool *pp; 109 struct xdp_frame **frames; 110 struct sk_buff **skbs; 111 struct xdp_mem_info mem; 112 u32 batch_size; 113 u32 frame_cnt; 114 }; 115 116 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) 117 #define TEST_XDP_MAX_BATCH 256 118 119 static void xdp_test_run_init_page(struct page *page, void *arg) 120 { 121 struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); 122 struct xdp_buff *new_ctx, *orig_ctx; 123 u32 headroom = XDP_PACKET_HEADROOM; 124 struct xdp_test_data *xdp = arg; 125 size_t frm_len, meta_len; 126 struct xdp_frame *frm; 127 void *data; 128 129 orig_ctx = xdp->orig_ctx; 130 frm_len = orig_ctx->data_end - orig_ctx->data_meta; 131 meta_len = orig_ctx->data - orig_ctx->data_meta; 132 headroom -= meta_len; 133 134 new_ctx = &head->ctx; 135 frm = &head->frm; 136 data = &head->data; 137 memcpy(data + headroom, orig_ctx->data_meta, frm_len); 138 139 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); 140 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true); 141 new_ctx->data = new_ctx->data_meta + meta_len; 142 143 xdp_update_frame_from_buff(new_ctx, frm); 144 frm->mem = new_ctx->rxq->mem; 145 146 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx)); 147 } 148 149 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) 150 { 151 struct page_pool *pp; 152 int err = -ENOMEM; 153 struct page_pool_params pp_params = { 154 .order = 0, 155 .flags = 0, 156 .pool_size = xdp->batch_size, 157 .nid = NUMA_NO_NODE, 158 .init_callback = xdp_test_run_init_page, 159 .init_arg = xdp, 160 }; 161 162 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); 163 if (!xdp->frames) 164 return -ENOMEM; 165 166 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); 167 if (!xdp->skbs) 168 goto err_skbs; 169 170 pp = page_pool_create(&pp_params); 171 if (IS_ERR(pp)) { 172 err = PTR_ERR(pp); 173 goto err_pp; 174 } 175 176 /* will copy 'mem.id' into pp->xdp_mem_id */ 177 err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp); 178 if (err) 179 goto err_mmodel; 180 181 xdp->pp = pp; 182 183 /* We create a 'fake' RXQ referencing the original dev, but with an 184 * xdp_mem_info pointing to our page_pool 185 */ 186 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0); 187 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL; 188 xdp->rxq.mem.id = pp->xdp_mem_id; 189 xdp->dev = orig_ctx->rxq->dev; 190 xdp->orig_ctx = orig_ctx; 191 192 return 0; 193 194 err_mmodel: 195 page_pool_destroy(pp); 196 err_pp: 197 kvfree(xdp->skbs); 198 err_skbs: 199 kvfree(xdp->frames); 200 return err; 201 } 202 203 static void xdp_test_run_teardown(struct xdp_test_data *xdp) 204 { 205 xdp_unreg_mem_model(&xdp->mem); 206 page_pool_destroy(xdp->pp); 207 kfree(xdp->frames); 208 kfree(xdp->skbs); 209 } 210 211 static bool ctx_was_changed(struct xdp_page_head *head) 212 { 213 return head->orig_ctx.data != head->ctx.data || 214 head->orig_ctx.data_meta != head->ctx.data_meta || 215 head->orig_ctx.data_end != head->ctx.data_end; 216 } 217 218 static void reset_ctx(struct xdp_page_head *head) 219 { 220 if (likely(!ctx_was_changed(head))) 221 return; 222 223 head->ctx.data = head->orig_ctx.data; 224 head->ctx.data_meta = head->orig_ctx.data_meta; 225 head->ctx.data_end = head->orig_ctx.data_end; 226 xdp_update_frame_from_buff(&head->ctx, &head->frm); 227 } 228 229 static int xdp_recv_frames(struct xdp_frame **frames, int nframes, 230 struct sk_buff **skbs, 231 struct net_device *dev) 232 { 233 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; 234 int i, n; 235 LIST_HEAD(list); 236 237 n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs); 238 if (unlikely(n == 0)) { 239 for (i = 0; i < nframes; i++) 240 xdp_return_frame(frames[i]); 241 return -ENOMEM; 242 } 243 244 for (i = 0; i < nframes; i++) { 245 struct xdp_frame *xdpf = frames[i]; 246 struct sk_buff *skb = skbs[i]; 247 248 skb = __xdp_build_skb_from_frame(xdpf, skb, dev); 249 if (!skb) { 250 xdp_return_frame(xdpf); 251 continue; 252 } 253 254 list_add_tail(&skb->list, &list); 255 } 256 netif_receive_skb_list(&list); 257 258 return 0; 259 } 260 261 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, 262 u32 repeat) 263 { 264 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 265 int err = 0, act, ret, i, nframes = 0, batch_sz; 266 struct xdp_frame **frames = xdp->frames; 267 struct xdp_page_head *head; 268 struct xdp_frame *frm; 269 bool redirect = false; 270 struct xdp_buff *ctx; 271 struct page *page; 272 273 batch_sz = min_t(u32, repeat, xdp->batch_size); 274 275 local_bh_disable(); 276 xdp_set_return_frame_no_direct(); 277 278 for (i = 0; i < batch_sz; i++) { 279 page = page_pool_dev_alloc_pages(xdp->pp); 280 if (!page) { 281 err = -ENOMEM; 282 goto out; 283 } 284 285 head = phys_to_virt(page_to_phys(page)); 286 reset_ctx(head); 287 ctx = &head->ctx; 288 frm = &head->frm; 289 xdp->frame_cnt++; 290 291 act = bpf_prog_run_xdp(prog, ctx); 292 293 /* if program changed pkt bounds we need to update the xdp_frame */ 294 if (unlikely(ctx_was_changed(head))) { 295 ret = xdp_update_frame_from_buff(ctx, frm); 296 if (ret) { 297 xdp_return_buff(ctx); 298 continue; 299 } 300 } 301 302 switch (act) { 303 case XDP_TX: 304 /* we can't do a real XDP_TX since we're not in the 305 * driver, so turn it into a REDIRECT back to the same 306 * index 307 */ 308 ri->tgt_index = xdp->dev->ifindex; 309 ri->map_id = INT_MAX; 310 ri->map_type = BPF_MAP_TYPE_UNSPEC; 311 fallthrough; 312 case XDP_REDIRECT: 313 redirect = true; 314 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); 315 if (ret) 316 xdp_return_buff(ctx); 317 break; 318 case XDP_PASS: 319 frames[nframes++] = frm; 320 break; 321 default: 322 bpf_warn_invalid_xdp_action(NULL, prog, act); 323 fallthrough; 324 case XDP_DROP: 325 xdp_return_buff(ctx); 326 break; 327 } 328 } 329 330 out: 331 if (redirect) 332 xdp_do_flush(); 333 if (nframes) { 334 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); 335 if (ret) 336 err = ret; 337 } 338 339 xdp_clear_return_frame_no_direct(); 340 local_bh_enable(); 341 return err; 342 } 343 344 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, 345 u32 repeat, u32 batch_size, u32 *time) 346 347 { 348 struct xdp_test_data xdp = { .batch_size = batch_size }; 349 struct bpf_test_timer t = { .mode = NO_MIGRATE }; 350 int ret; 351 352 if (!repeat) 353 repeat = 1; 354 355 ret = xdp_test_run_setup(&xdp, ctx); 356 if (ret) 357 return ret; 358 359 bpf_test_timer_enter(&t); 360 do { 361 xdp.frame_cnt = 0; 362 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); 363 if (unlikely(ret < 0)) 364 break; 365 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); 366 bpf_test_timer_leave(&t); 367 368 xdp_test_run_teardown(&xdp); 369 return ret; 370 } 371 372 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, 373 u32 *retval, u32 *time, bool xdp) 374 { 375 struct bpf_prog_array_item item = {.prog = prog}; 376 struct bpf_run_ctx *old_ctx; 377 struct bpf_cg_run_ctx run_ctx; 378 struct bpf_test_timer t = { NO_MIGRATE }; 379 enum bpf_cgroup_storage_type stype; 380 int ret; 381 382 for_each_cgroup_storage_type(stype) { 383 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 384 if (IS_ERR(item.cgroup_storage[stype])) { 385 item.cgroup_storage[stype] = NULL; 386 for_each_cgroup_storage_type(stype) 387 bpf_cgroup_storage_free(item.cgroup_storage[stype]); 388 return -ENOMEM; 389 } 390 } 391 392 if (!repeat) 393 repeat = 1; 394 395 bpf_test_timer_enter(&t); 396 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 397 do { 398 run_ctx.prog_item = &item; 399 if (xdp) 400 *retval = bpf_prog_run_xdp(prog, ctx); 401 else 402 *retval = bpf_prog_run(prog, ctx); 403 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); 404 bpf_reset_run_ctx(old_ctx); 405 bpf_test_timer_leave(&t); 406 407 for_each_cgroup_storage_type(stype) 408 bpf_cgroup_storage_free(item.cgroup_storage[stype]); 409 410 return ret; 411 } 412 413 static int bpf_test_finish(const union bpf_attr *kattr, 414 union bpf_attr __user *uattr, const void *data, 415 struct skb_shared_info *sinfo, u32 size, 416 u32 retval, u32 duration) 417 { 418 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 419 int err = -EFAULT; 420 u32 copy_size = size; 421 422 /* Clamp copy if the user has provided a size hint, but copy the full 423 * buffer if not to retain old behaviour. 424 */ 425 if (kattr->test.data_size_out && 426 copy_size > kattr->test.data_size_out) { 427 copy_size = kattr->test.data_size_out; 428 err = -ENOSPC; 429 } 430 431 if (data_out) { 432 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size; 433 434 if (len < 0) { 435 err = -ENOSPC; 436 goto out; 437 } 438 439 if (copy_to_user(data_out, data, len)) 440 goto out; 441 442 if (sinfo) { 443 int i, offset = len; 444 u32 data_len; 445 446 for (i = 0; i < sinfo->nr_frags; i++) { 447 skb_frag_t *frag = &sinfo->frags[i]; 448 449 if (offset >= copy_size) { 450 err = -ENOSPC; 451 break; 452 } 453 454 data_len = min_t(u32, copy_size - offset, 455 skb_frag_size(frag)); 456 457 if (copy_to_user(data_out + offset, 458 skb_frag_address(frag), 459 data_len)) 460 goto out; 461 462 offset += data_len; 463 } 464 } 465 } 466 467 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 468 goto out; 469 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 470 goto out; 471 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 472 goto out; 473 if (err != -ENOSPC) 474 err = 0; 475 out: 476 trace_bpf_test_finish(&err); 477 return err; 478 } 479 480 /* Integer types of various sizes and pointer combinations cover variety of 481 * architecture dependent calling conventions. 7+ can be supported in the 482 * future. 483 */ 484 __diag_push(); 485 __diag_ignore_all("-Wmissing-prototypes", 486 "Global functions as their definitions will be in vmlinux BTF"); 487 int noinline bpf_fentry_test1(int a) 488 { 489 return a + 1; 490 } 491 EXPORT_SYMBOL_GPL(bpf_fentry_test1); 492 ALLOW_ERROR_INJECTION(bpf_fentry_test1, ERRNO); 493 494 int noinline bpf_fentry_test2(int a, u64 b) 495 { 496 return a + b; 497 } 498 499 int noinline bpf_fentry_test3(char a, int b, u64 c) 500 { 501 return a + b + c; 502 } 503 504 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) 505 { 506 return (long)a + b + c + d; 507 } 508 509 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) 510 { 511 return a + (long)b + c + d + e; 512 } 513 514 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) 515 { 516 return a + (long)b + c + d + (long)e + f; 517 } 518 519 struct bpf_fentry_test_t { 520 struct bpf_fentry_test_t *a; 521 }; 522 523 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) 524 { 525 return (long)arg; 526 } 527 528 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) 529 { 530 return (long)arg->a; 531 } 532 533 int noinline bpf_modify_return_test(int a, int *b) 534 { 535 *b += 1; 536 return a + *b; 537 } 538 539 u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) 540 { 541 return a + b + c + d; 542 } 543 544 int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) 545 { 546 return a + b; 547 } 548 549 struct sock * noinline bpf_kfunc_call_test3(struct sock *sk) 550 { 551 return sk; 552 } 553 554 struct prog_test_member1 { 555 int a; 556 }; 557 558 struct prog_test_member { 559 struct prog_test_member1 m; 560 int c; 561 }; 562 563 struct prog_test_ref_kfunc { 564 int a; 565 int b; 566 struct prog_test_member memb; 567 struct prog_test_ref_kfunc *next; 568 refcount_t cnt; 569 }; 570 571 static struct prog_test_ref_kfunc prog_test_struct = { 572 .a = 42, 573 .b = 108, 574 .next = &prog_test_struct, 575 .cnt = REFCOUNT_INIT(1), 576 }; 577 578 noinline struct prog_test_ref_kfunc * 579 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) 580 { 581 refcount_inc(&prog_test_struct.cnt); 582 return &prog_test_struct; 583 } 584 585 noinline struct prog_test_member * 586 bpf_kfunc_call_memb_acquire(void) 587 { 588 WARN_ON_ONCE(1); 589 return NULL; 590 } 591 592 noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) 593 { 594 if (!p) 595 return; 596 597 refcount_dec(&p->cnt); 598 } 599 600 noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p) 601 { 602 } 603 604 noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) 605 { 606 WARN_ON_ONCE(1); 607 } 608 609 noinline struct prog_test_ref_kfunc * 610 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b) 611 { 612 struct prog_test_ref_kfunc *p = READ_ONCE(*pp); 613 614 if (!p) 615 return NULL; 616 refcount_inc(&p->cnt); 617 return p; 618 } 619 620 struct prog_test_pass1 { 621 int x0; 622 struct { 623 int x1; 624 struct { 625 int x2; 626 struct { 627 int x3; 628 }; 629 }; 630 }; 631 }; 632 633 struct prog_test_pass2 { 634 int len; 635 short arr1[4]; 636 struct { 637 char arr2[4]; 638 unsigned long arr3[8]; 639 } x; 640 }; 641 642 struct prog_test_fail1 { 643 void *p; 644 int x; 645 }; 646 647 struct prog_test_fail2 { 648 int x8; 649 struct prog_test_pass1 x; 650 }; 651 652 struct prog_test_fail3 { 653 int len; 654 char arr1[2]; 655 char arr2[]; 656 }; 657 658 noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) 659 { 660 } 661 662 noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) 663 { 664 } 665 666 noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) 667 { 668 } 669 670 noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) 671 { 672 } 673 674 noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) 675 { 676 } 677 678 noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) 679 { 680 } 681 682 noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) 683 { 684 } 685 686 noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) 687 { 688 } 689 690 noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) 691 { 692 } 693 694 noinline void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) 695 { 696 } 697 698 __diag_pop(); 699 700 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); 701 702 BTF_SET8_START(test_sk_check_kfunc_ids) 703 BTF_ID_FLAGS(func, bpf_kfunc_call_test1) 704 BTF_ID_FLAGS(func, bpf_kfunc_call_test2) 705 BTF_ID_FLAGS(func, bpf_kfunc_call_test3) 706 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) 707 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) 708 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) 709 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) 710 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) 711 BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) 712 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) 713 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) 714 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) 715 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) 716 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) 717 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) 718 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) 719 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) 720 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) 721 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS) 722 BTF_SET8_END(test_sk_check_kfunc_ids) 723 724 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, 725 u32 size, u32 headroom, u32 tailroom) 726 { 727 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 728 void *data; 729 730 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) 731 return ERR_PTR(-EINVAL); 732 733 if (user_size > size) 734 return ERR_PTR(-EMSGSIZE); 735 736 data = kzalloc(size + headroom + tailroom, GFP_USER); 737 if (!data) 738 return ERR_PTR(-ENOMEM); 739 740 if (copy_from_user(data + headroom, data_in, user_size)) { 741 kfree(data); 742 return ERR_PTR(-EFAULT); 743 } 744 745 return data; 746 } 747 748 int bpf_prog_test_run_tracing(struct bpf_prog *prog, 749 const union bpf_attr *kattr, 750 union bpf_attr __user *uattr) 751 { 752 struct bpf_fentry_test_t arg = {}; 753 u16 side_effect = 0, ret = 0; 754 int b = 2, err = -EFAULT; 755 u32 retval = 0; 756 757 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 758 return -EINVAL; 759 760 switch (prog->expected_attach_type) { 761 case BPF_TRACE_FENTRY: 762 case BPF_TRACE_FEXIT: 763 if (bpf_fentry_test1(1) != 2 || 764 bpf_fentry_test2(2, 3) != 5 || 765 bpf_fentry_test3(4, 5, 6) != 15 || 766 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || 767 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || 768 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || 769 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || 770 bpf_fentry_test8(&arg) != 0) 771 goto out; 772 break; 773 case BPF_MODIFY_RETURN: 774 ret = bpf_modify_return_test(1, &b); 775 if (b != 2) 776 side_effect = 1; 777 break; 778 default: 779 goto out; 780 } 781 782 retval = ((u32)side_effect << 16) | ret; 783 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 784 goto out; 785 786 err = 0; 787 out: 788 trace_bpf_test_finish(&err); 789 return err; 790 } 791 792 struct bpf_raw_tp_test_run_info { 793 struct bpf_prog *prog; 794 void *ctx; 795 u32 retval; 796 }; 797 798 static void 799 __bpf_prog_test_run_raw_tp(void *data) 800 { 801 struct bpf_raw_tp_test_run_info *info = data; 802 803 rcu_read_lock(); 804 info->retval = bpf_prog_run(info->prog, info->ctx); 805 rcu_read_unlock(); 806 } 807 808 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, 809 const union bpf_attr *kattr, 810 union bpf_attr __user *uattr) 811 { 812 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 813 __u32 ctx_size_in = kattr->test.ctx_size_in; 814 struct bpf_raw_tp_test_run_info info; 815 int cpu = kattr->test.cpu, err = 0; 816 int current_cpu; 817 818 /* doesn't support data_in/out, ctx_out, duration, or repeat */ 819 if (kattr->test.data_in || kattr->test.data_out || 820 kattr->test.ctx_out || kattr->test.duration || 821 kattr->test.repeat || kattr->test.batch_size) 822 return -EINVAL; 823 824 if (ctx_size_in < prog->aux->max_ctx_offset || 825 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) 826 return -EINVAL; 827 828 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) 829 return -EINVAL; 830 831 if (ctx_size_in) { 832 info.ctx = memdup_user(ctx_in, ctx_size_in); 833 if (IS_ERR(info.ctx)) 834 return PTR_ERR(info.ctx); 835 } else { 836 info.ctx = NULL; 837 } 838 839 info.prog = prog; 840 841 current_cpu = get_cpu(); 842 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || 843 cpu == current_cpu) { 844 __bpf_prog_test_run_raw_tp(&info); 845 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 846 /* smp_call_function_single() also checks cpu_online() 847 * after csd_lock(). However, since cpu is from user 848 * space, let's do an extra quick check to filter out 849 * invalid value before smp_call_function_single(). 850 */ 851 err = -ENXIO; 852 } else { 853 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, 854 &info, 1); 855 } 856 put_cpu(); 857 858 if (!err && 859 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) 860 err = -EFAULT; 861 862 kfree(info.ctx); 863 return err; 864 } 865 866 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) 867 { 868 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); 869 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 870 u32 size = kattr->test.ctx_size_in; 871 void *data; 872 int err; 873 874 if (!data_in && !data_out) 875 return NULL; 876 877 data = kzalloc(max_size, GFP_USER); 878 if (!data) 879 return ERR_PTR(-ENOMEM); 880 881 if (data_in) { 882 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size); 883 if (err) { 884 kfree(data); 885 return ERR_PTR(err); 886 } 887 888 size = min_t(u32, max_size, size); 889 if (copy_from_user(data, data_in, size)) { 890 kfree(data); 891 return ERR_PTR(-EFAULT); 892 } 893 } 894 return data; 895 } 896 897 static int bpf_ctx_finish(const union bpf_attr *kattr, 898 union bpf_attr __user *uattr, const void *data, 899 u32 size) 900 { 901 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 902 int err = -EFAULT; 903 u32 copy_size = size; 904 905 if (!data || !data_out) 906 return 0; 907 908 if (copy_size > kattr->test.ctx_size_out) { 909 copy_size = kattr->test.ctx_size_out; 910 err = -ENOSPC; 911 } 912 913 if (copy_to_user(data_out, data, copy_size)) 914 goto out; 915 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) 916 goto out; 917 if (err != -ENOSPC) 918 err = 0; 919 out: 920 return err; 921 } 922 923 /** 924 * range_is_zero - test whether buffer is initialized 925 * @buf: buffer to check 926 * @from: check from this position 927 * @to: check up until (excluding) this position 928 * 929 * This function returns true if the there is a non-zero byte 930 * in the buf in the range [from,to). 931 */ 932 static inline bool range_is_zero(void *buf, size_t from, size_t to) 933 { 934 return !memchr_inv((u8 *)buf + from, 0, to - from); 935 } 936 937 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) 938 { 939 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 940 941 if (!skb->len) 942 return -EINVAL; 943 944 if (!__skb) 945 return 0; 946 947 /* make sure the fields we don't use are zeroed */ 948 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) 949 return -EINVAL; 950 951 /* mark is allowed */ 952 953 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), 954 offsetof(struct __sk_buff, priority))) 955 return -EINVAL; 956 957 /* priority is allowed */ 958 /* ingress_ifindex is allowed */ 959 /* ifindex is allowed */ 960 961 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), 962 offsetof(struct __sk_buff, cb))) 963 return -EINVAL; 964 965 /* cb is allowed */ 966 967 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), 968 offsetof(struct __sk_buff, tstamp))) 969 return -EINVAL; 970 971 /* tstamp is allowed */ 972 /* wire_len is allowed */ 973 /* gso_segs is allowed */ 974 975 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), 976 offsetof(struct __sk_buff, gso_size))) 977 return -EINVAL; 978 979 /* gso_size is allowed */ 980 981 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), 982 offsetof(struct __sk_buff, hwtstamp))) 983 return -EINVAL; 984 985 /* hwtstamp is allowed */ 986 987 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), 988 sizeof(struct __sk_buff))) 989 return -EINVAL; 990 991 skb->mark = __skb->mark; 992 skb->priority = __skb->priority; 993 skb->skb_iif = __skb->ingress_ifindex; 994 skb->tstamp = __skb->tstamp; 995 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); 996 997 if (__skb->wire_len == 0) { 998 cb->pkt_len = skb->len; 999 } else { 1000 if (__skb->wire_len < skb->len || 1001 __skb->wire_len > GSO_LEGACY_MAX_SIZE) 1002 return -EINVAL; 1003 cb->pkt_len = __skb->wire_len; 1004 } 1005 1006 if (__skb->gso_segs > GSO_MAX_SEGS) 1007 return -EINVAL; 1008 skb_shinfo(skb)->gso_segs = __skb->gso_segs; 1009 skb_shinfo(skb)->gso_size = __skb->gso_size; 1010 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; 1011 1012 return 0; 1013 } 1014 1015 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) 1016 { 1017 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 1018 1019 if (!__skb) 1020 return; 1021 1022 __skb->mark = skb->mark; 1023 __skb->priority = skb->priority; 1024 __skb->ingress_ifindex = skb->skb_iif; 1025 __skb->ifindex = skb->dev->ifindex; 1026 __skb->tstamp = skb->tstamp; 1027 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); 1028 __skb->wire_len = cb->pkt_len; 1029 __skb->gso_segs = skb_shinfo(skb)->gso_segs; 1030 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; 1031 } 1032 1033 static struct proto bpf_dummy_proto = { 1034 .name = "bpf_dummy", 1035 .owner = THIS_MODULE, 1036 .obj_size = sizeof(struct sock), 1037 }; 1038 1039 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1040 union bpf_attr __user *uattr) 1041 { 1042 bool is_l2 = false, is_direct_pkt_access = false; 1043 struct net *net = current->nsproxy->net_ns; 1044 struct net_device *dev = net->loopback_dev; 1045 u32 size = kattr->test.data_size_in; 1046 u32 repeat = kattr->test.repeat; 1047 struct __sk_buff *ctx = NULL; 1048 u32 retval, duration; 1049 int hh_len = ETH_HLEN; 1050 struct sk_buff *skb; 1051 struct sock *sk; 1052 void *data; 1053 int ret; 1054 1055 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1056 return -EINVAL; 1057 1058 data = bpf_test_init(kattr, kattr->test.data_size_in, 1059 size, NET_SKB_PAD + NET_IP_ALIGN, 1060 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 1061 if (IS_ERR(data)) 1062 return PTR_ERR(data); 1063 1064 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); 1065 if (IS_ERR(ctx)) { 1066 kfree(data); 1067 return PTR_ERR(ctx); 1068 } 1069 1070 switch (prog->type) { 1071 case BPF_PROG_TYPE_SCHED_CLS: 1072 case BPF_PROG_TYPE_SCHED_ACT: 1073 is_l2 = true; 1074 fallthrough; 1075 case BPF_PROG_TYPE_LWT_IN: 1076 case BPF_PROG_TYPE_LWT_OUT: 1077 case BPF_PROG_TYPE_LWT_XMIT: 1078 is_direct_pkt_access = true; 1079 break; 1080 default: 1081 break; 1082 } 1083 1084 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1); 1085 if (!sk) { 1086 kfree(data); 1087 kfree(ctx); 1088 return -ENOMEM; 1089 } 1090 sock_init_data(NULL, sk); 1091 1092 skb = build_skb(data, 0); 1093 if (!skb) { 1094 kfree(data); 1095 kfree(ctx); 1096 sk_free(sk); 1097 return -ENOMEM; 1098 } 1099 skb->sk = sk; 1100 1101 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 1102 __skb_put(skb, size); 1103 if (ctx && ctx->ifindex > 1) { 1104 dev = dev_get_by_index(net, ctx->ifindex); 1105 if (!dev) { 1106 ret = -ENODEV; 1107 goto out; 1108 } 1109 } 1110 skb->protocol = eth_type_trans(skb, dev); 1111 skb_reset_network_header(skb); 1112 1113 switch (skb->protocol) { 1114 case htons(ETH_P_IP): 1115 sk->sk_family = AF_INET; 1116 if (sizeof(struct iphdr) <= skb_headlen(skb)) { 1117 sk->sk_rcv_saddr = ip_hdr(skb)->saddr; 1118 sk->sk_daddr = ip_hdr(skb)->daddr; 1119 } 1120 break; 1121 #if IS_ENABLED(CONFIG_IPV6) 1122 case htons(ETH_P_IPV6): 1123 sk->sk_family = AF_INET6; 1124 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { 1125 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; 1126 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; 1127 } 1128 break; 1129 #endif 1130 default: 1131 break; 1132 } 1133 1134 if (is_l2) 1135 __skb_push(skb, hh_len); 1136 if (is_direct_pkt_access) 1137 bpf_compute_data_pointers(skb); 1138 ret = convert___skb_to_skb(skb, ctx); 1139 if (ret) 1140 goto out; 1141 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); 1142 if (ret) 1143 goto out; 1144 if (!is_l2) { 1145 if (skb_headroom(skb) < hh_len) { 1146 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 1147 1148 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 1149 ret = -ENOMEM; 1150 goto out; 1151 } 1152 } 1153 memset(__skb_push(skb, hh_len), 0, hh_len); 1154 } 1155 convert_skb_to___skb(skb, ctx); 1156 1157 size = skb->len; 1158 /* bpf program can never convert linear skb to non-linear */ 1159 if (WARN_ON_ONCE(skb_is_nonlinear(skb))) 1160 size = skb_headlen(skb); 1161 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval, 1162 duration); 1163 if (!ret) 1164 ret = bpf_ctx_finish(kattr, uattr, ctx, 1165 sizeof(struct __sk_buff)); 1166 out: 1167 if (dev && dev != net->loopback_dev) 1168 dev_put(dev); 1169 kfree_skb(skb); 1170 sk_free(sk); 1171 kfree(ctx); 1172 return ret; 1173 } 1174 1175 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp) 1176 { 1177 unsigned int ingress_ifindex, rx_queue_index; 1178 struct netdev_rx_queue *rxqueue; 1179 struct net_device *device; 1180 1181 if (!xdp_md) 1182 return 0; 1183 1184 if (xdp_md->egress_ifindex != 0) 1185 return -EINVAL; 1186 1187 ingress_ifindex = xdp_md->ingress_ifindex; 1188 rx_queue_index = xdp_md->rx_queue_index; 1189 1190 if (!ingress_ifindex && rx_queue_index) 1191 return -EINVAL; 1192 1193 if (ingress_ifindex) { 1194 device = dev_get_by_index(current->nsproxy->net_ns, 1195 ingress_ifindex); 1196 if (!device) 1197 return -ENODEV; 1198 1199 if (rx_queue_index >= device->real_num_rx_queues) 1200 goto free_dev; 1201 1202 rxqueue = __netif_get_rx_queue(device, rx_queue_index); 1203 1204 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq)) 1205 goto free_dev; 1206 1207 xdp->rxq = &rxqueue->xdp_rxq; 1208 /* The device is now tracked in the xdp->rxq for later 1209 * dev_put() 1210 */ 1211 } 1212 1213 xdp->data = xdp->data_meta + xdp_md->data; 1214 return 0; 1215 1216 free_dev: 1217 dev_put(device); 1218 return -EINVAL; 1219 } 1220 1221 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) 1222 { 1223 if (!xdp_md) 1224 return; 1225 1226 xdp_md->data = xdp->data - xdp->data_meta; 1227 xdp_md->data_end = xdp->data_end - xdp->data_meta; 1228 1229 if (xdp_md->ingress_ifindex) 1230 dev_put(xdp->rxq->dev); 1231 } 1232 1233 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1234 union bpf_attr __user *uattr) 1235 { 1236 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); 1237 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1238 u32 batch_size = kattr->test.batch_size; 1239 u32 retval = 0, duration, max_data_sz; 1240 u32 size = kattr->test.data_size_in; 1241 u32 headroom = XDP_PACKET_HEADROOM; 1242 u32 repeat = kattr->test.repeat; 1243 struct netdev_rx_queue *rxqueue; 1244 struct skb_shared_info *sinfo; 1245 struct xdp_buff xdp = {}; 1246 int i, ret = -EINVAL; 1247 struct xdp_md *ctx; 1248 void *data; 1249 1250 if (prog->expected_attach_type == BPF_XDP_DEVMAP || 1251 prog->expected_attach_type == BPF_XDP_CPUMAP) 1252 return -EINVAL; 1253 1254 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) 1255 return -EINVAL; 1256 1257 if (do_live) { 1258 if (!batch_size) 1259 batch_size = NAPI_POLL_WEIGHT; 1260 else if (batch_size > TEST_XDP_MAX_BATCH) 1261 return -E2BIG; 1262 1263 headroom += sizeof(struct xdp_page_head); 1264 } else if (batch_size) { 1265 return -EINVAL; 1266 } 1267 1268 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); 1269 if (IS_ERR(ctx)) 1270 return PTR_ERR(ctx); 1271 1272 if (ctx) { 1273 /* There can't be user provided data before the meta data */ 1274 if (ctx->data_meta || ctx->data_end != size || 1275 ctx->data > ctx->data_end || 1276 unlikely(xdp_metalen_invalid(ctx->data)) || 1277 (do_live && (kattr->test.data_out || kattr->test.ctx_out))) 1278 goto free_ctx; 1279 /* Meta data is allocated from the headroom */ 1280 headroom -= ctx->data; 1281 } 1282 1283 max_data_sz = 4096 - headroom - tailroom; 1284 if (size > max_data_sz) { 1285 /* disallow live data mode for jumbo frames */ 1286 if (do_live) 1287 goto free_ctx; 1288 size = max_data_sz; 1289 } 1290 1291 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); 1292 if (IS_ERR(data)) { 1293 ret = PTR_ERR(data); 1294 goto free_ctx; 1295 } 1296 1297 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 1298 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; 1299 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); 1300 xdp_prepare_buff(&xdp, data, headroom, size, true); 1301 sinfo = xdp_get_shared_info_from_buff(&xdp); 1302 1303 ret = xdp_convert_md_to_buff(ctx, &xdp); 1304 if (ret) 1305 goto free_data; 1306 1307 if (unlikely(kattr->test.data_size_in > size)) { 1308 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 1309 1310 while (size < kattr->test.data_size_in) { 1311 struct page *page; 1312 skb_frag_t *frag; 1313 u32 data_len; 1314 1315 if (sinfo->nr_frags == MAX_SKB_FRAGS) { 1316 ret = -ENOMEM; 1317 goto out; 1318 } 1319 1320 page = alloc_page(GFP_KERNEL); 1321 if (!page) { 1322 ret = -ENOMEM; 1323 goto out; 1324 } 1325 1326 frag = &sinfo->frags[sinfo->nr_frags++]; 1327 __skb_frag_set_page(frag, page); 1328 1329 data_len = min_t(u32, kattr->test.data_size_in - size, 1330 PAGE_SIZE); 1331 skb_frag_size_set(frag, data_len); 1332 1333 if (copy_from_user(page_address(page), data_in + size, 1334 data_len)) { 1335 ret = -EFAULT; 1336 goto out; 1337 } 1338 sinfo->xdp_frags_size += data_len; 1339 size += data_len; 1340 } 1341 xdp_buff_set_frags_flag(&xdp); 1342 } 1343 1344 if (repeat > 1) 1345 bpf_prog_change_xdp(NULL, prog); 1346 1347 if (do_live) 1348 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); 1349 else 1350 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); 1351 /* We convert the xdp_buff back to an xdp_md before checking the return 1352 * code so the reference count of any held netdevice will be decremented 1353 * even if the test run failed. 1354 */ 1355 xdp_convert_buff_to_md(&xdp, ctx); 1356 if (ret) 1357 goto out; 1358 1359 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size; 1360 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size, 1361 retval, duration); 1362 if (!ret) 1363 ret = bpf_ctx_finish(kattr, uattr, ctx, 1364 sizeof(struct xdp_md)); 1365 1366 out: 1367 if (repeat > 1) 1368 bpf_prog_change_xdp(prog, NULL); 1369 free_data: 1370 for (i = 0; i < sinfo->nr_frags; i++) 1371 __free_page(skb_frag_page(&sinfo->frags[i])); 1372 kfree(data); 1373 free_ctx: 1374 kfree(ctx); 1375 return ret; 1376 } 1377 1378 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) 1379 { 1380 /* make sure the fields we don't use are zeroed */ 1381 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) 1382 return -EINVAL; 1383 1384 /* flags is allowed */ 1385 1386 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), 1387 sizeof(struct bpf_flow_keys))) 1388 return -EINVAL; 1389 1390 return 0; 1391 } 1392 1393 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 1394 const union bpf_attr *kattr, 1395 union bpf_attr __user *uattr) 1396 { 1397 struct bpf_test_timer t = { NO_PREEMPT }; 1398 u32 size = kattr->test.data_size_in; 1399 struct bpf_flow_dissector ctx = {}; 1400 u32 repeat = kattr->test.repeat; 1401 struct bpf_flow_keys *user_ctx; 1402 struct bpf_flow_keys flow_keys; 1403 const struct ethhdr *eth; 1404 unsigned int flags = 0; 1405 u32 retval, duration; 1406 void *data; 1407 int ret; 1408 1409 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1410 return -EINVAL; 1411 1412 if (size < ETH_HLEN) 1413 return -EINVAL; 1414 1415 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0); 1416 if (IS_ERR(data)) 1417 return PTR_ERR(data); 1418 1419 eth = (struct ethhdr *)data; 1420 1421 if (!repeat) 1422 repeat = 1; 1423 1424 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); 1425 if (IS_ERR(user_ctx)) { 1426 kfree(data); 1427 return PTR_ERR(user_ctx); 1428 } 1429 if (user_ctx) { 1430 ret = verify_user_bpf_flow_keys(user_ctx); 1431 if (ret) 1432 goto out; 1433 flags = user_ctx->flags; 1434 } 1435 1436 ctx.flow_keys = &flow_keys; 1437 ctx.data = data; 1438 ctx.data_end = (__u8 *)data + size; 1439 1440 bpf_test_timer_enter(&t); 1441 do { 1442 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, 1443 size, flags); 1444 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); 1445 bpf_test_timer_leave(&t); 1446 1447 if (ret < 0) 1448 goto out; 1449 1450 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL, 1451 sizeof(flow_keys), retval, duration); 1452 if (!ret) 1453 ret = bpf_ctx_finish(kattr, uattr, user_ctx, 1454 sizeof(struct bpf_flow_keys)); 1455 1456 out: 1457 kfree(user_ctx); 1458 kfree(data); 1459 return ret; 1460 } 1461 1462 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, 1463 union bpf_attr __user *uattr) 1464 { 1465 struct bpf_test_timer t = { NO_PREEMPT }; 1466 struct bpf_prog_array *progs = NULL; 1467 struct bpf_sk_lookup_kern ctx = {}; 1468 u32 repeat = kattr->test.repeat; 1469 struct bpf_sk_lookup *user_ctx; 1470 u32 retval, duration; 1471 int ret = -EINVAL; 1472 1473 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) 1474 return -EINVAL; 1475 1476 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || 1477 kattr->test.data_size_out) 1478 return -EINVAL; 1479 1480 if (!repeat) 1481 repeat = 1; 1482 1483 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); 1484 if (IS_ERR(user_ctx)) 1485 return PTR_ERR(user_ctx); 1486 1487 if (!user_ctx) 1488 return -EINVAL; 1489 1490 if (user_ctx->sk) 1491 goto out; 1492 1493 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) 1494 goto out; 1495 1496 if (user_ctx->local_port > U16_MAX) { 1497 ret = -ERANGE; 1498 goto out; 1499 } 1500 1501 ctx.family = (u16)user_ctx->family; 1502 ctx.protocol = (u16)user_ctx->protocol; 1503 ctx.dport = (u16)user_ctx->local_port; 1504 ctx.sport = user_ctx->remote_port; 1505 1506 switch (ctx.family) { 1507 case AF_INET: 1508 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; 1509 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; 1510 break; 1511 1512 #if IS_ENABLED(CONFIG_IPV6) 1513 case AF_INET6: 1514 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; 1515 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; 1516 break; 1517 #endif 1518 1519 default: 1520 ret = -EAFNOSUPPORT; 1521 goto out; 1522 } 1523 1524 progs = bpf_prog_array_alloc(1, GFP_KERNEL); 1525 if (!progs) { 1526 ret = -ENOMEM; 1527 goto out; 1528 } 1529 1530 progs->items[0].prog = prog; 1531 1532 bpf_test_timer_enter(&t); 1533 do { 1534 ctx.selected_sk = NULL; 1535 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run); 1536 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); 1537 bpf_test_timer_leave(&t); 1538 1539 if (ret < 0) 1540 goto out; 1541 1542 user_ctx->cookie = 0; 1543 if (ctx.selected_sk) { 1544 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { 1545 ret = -EOPNOTSUPP; 1546 goto out; 1547 } 1548 1549 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); 1550 } 1551 1552 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); 1553 if (!ret) 1554 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); 1555 1556 out: 1557 bpf_prog_array_free(progs); 1558 kfree(user_ctx); 1559 return ret; 1560 } 1561 1562 int bpf_prog_test_run_syscall(struct bpf_prog *prog, 1563 const union bpf_attr *kattr, 1564 union bpf_attr __user *uattr) 1565 { 1566 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); 1567 __u32 ctx_size_in = kattr->test.ctx_size_in; 1568 void *ctx = NULL; 1569 u32 retval; 1570 int err = 0; 1571 1572 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ 1573 if (kattr->test.data_in || kattr->test.data_out || 1574 kattr->test.ctx_out || kattr->test.duration || 1575 kattr->test.repeat || kattr->test.flags || 1576 kattr->test.batch_size) 1577 return -EINVAL; 1578 1579 if (ctx_size_in < prog->aux->max_ctx_offset || 1580 ctx_size_in > U16_MAX) 1581 return -EINVAL; 1582 1583 if (ctx_size_in) { 1584 ctx = memdup_user(ctx_in, ctx_size_in); 1585 if (IS_ERR(ctx)) 1586 return PTR_ERR(ctx); 1587 } 1588 1589 rcu_read_lock_trace(); 1590 retval = bpf_prog_run_pin_on_cpu(prog, ctx); 1591 rcu_read_unlock_trace(); 1592 1593 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { 1594 err = -EFAULT; 1595 goto out; 1596 } 1597 if (ctx_size_in) 1598 if (copy_to_user(ctx_in, ctx, ctx_size_in)) 1599 err = -EFAULT; 1600 out: 1601 kfree(ctx); 1602 return err; 1603 } 1604 1605 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { 1606 .owner = THIS_MODULE, 1607 .set = &test_sk_check_kfunc_ids, 1608 }; 1609 1610 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids) 1611 BTF_ID(struct, prog_test_ref_kfunc) 1612 BTF_ID(func, bpf_kfunc_call_test_release) 1613 BTF_ID(struct, prog_test_member) 1614 BTF_ID(func, bpf_kfunc_call_memb_release) 1615 1616 static int __init bpf_prog_test_run_init(void) 1617 { 1618 const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = { 1619 { 1620 .btf_id = bpf_prog_test_dtor_kfunc_ids[0], 1621 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1] 1622 }, 1623 { 1624 .btf_id = bpf_prog_test_dtor_kfunc_ids[2], 1625 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3], 1626 }, 1627 }; 1628 int ret; 1629 1630 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); 1631 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set); 1632 return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, 1633 ARRAY_SIZE(bpf_prog_test_dtor_kfunc), 1634 THIS_MODULE); 1635 } 1636 late_initcall(bpf_prog_test_run_init); 1637